Merge remote-tracking branch 'tint/main' into HEAD

Integrates Tint repo into Dawn

KIs:
- Building docs for Tint is turned off, because it fails due to lack
  of annotations in Dawn source files.
- Dawn CQ needs to be updated to run Tint specific tests
- Significant post-merge cleanup needed

R=bclayton,cwallez
BUG=dawn:1339

Change-Id: I6c9714a0030934edd6c51f3cac4684dcd59d1ea3
diff --git a/.clang-format b/.clang-format
index 2fb833a..ff58eea 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,2 +1,20 @@
 # http://clang.llvm.org/docs/ClangFormatStyleOptions.html
 BasedOnStyle: Chromium
+Standard: Cpp11
+
+AllowShortFunctionsOnASingleLine: false
+
+ColumnLimit: 100
+
+# Use 4 space indents
+IndentWidth: 4
+ObjCBlockIndentWidth: 4
+AccessModifierOffset: -2
+
+CompactNamespaces: true
+
+# This should result in only one indentation level with compacted namespaces
+NamespaceIndentation: All
+
+# Use this option once clang-format 6 is out.
+IndentPPDirectives: AfterHash
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..d31b156
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,4 @@
+*     text=auto
+*.sh  eol=lf
+*.gn  eol=lf
+*.gni eol=lf
diff --git a/.gitignore b/.gitignore
index 19bafab..42d8414 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,31 +1,123 @@
-.cipd
-.DS_Store
-.gclient
-.gclient_entries
-.vs
+*.pyc
+
+# Directories added by gclient sync and the GN build
+/.cipd
+/.gclient
+/.gclient_entries
+/build
+/buildtools
+/testing
+/third_party/abseil-cpp/
+/third_party/angle
+/third_party/benchmark
+/third_party/binutils
+/third_party/catapult
+/third_party/clang-format
+/third_party/cpplint
+/third_party/glfw
+/third_party/googletest
+/third_party/gpuweb
+/third_party/gpuweb-cts
+/third_party/jinja2
+/third_party/jsoncpp
+/third_party/llvm-build
+/third_party/markupsafe
+/third_party/node
+/third_party/node-addon-api
+/third_party/node-api-headers
+/third_party/protobuf
+/third_party/swiftshader
+/third_party/vulkan-deps
+/third_party/vulkan_memory_allocator
+/third_party/webgpu-cts
+/third_party/zlib
+/tools/clang
+/tools/cmake
+/tools/golang
+/tools/memory
+/out
+
+# Modified from https://www.gitignore.io/api/vim,macos,linux,emacs,windows,sublimetext,visualstudio,visualstudiocode,intellij
+
+### Emacs ###
+*~
+\#*\#
+/.emacs.desktop
+/.emacs.desktop.lock
+*.elc
+auto-save-list
+tramp
+.\#*
+
+### Linux ###
+.fuse_hidden*
+.directory
+.Trash-*
+.nfs*
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+._*
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### SublimeText ###
+*.tmlanguage.cache
+*.tmPreferences.cache
+*.stTheme.cache
+*.sublime-workspace
+*.sublime-project
+sftp-config.json
+GitHub.sublime-settings
+
+### Vim ###
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+Session.vim
+.netrwhist
+tags
+
+### VisualStudio ###
+.vs/*
+
+### VisualStudioCode ###
 .vscode/*
 !.vscode/tasks.json
+
+### Windows ###
+Thumbs.db
+ehthumbs.db
+ehthumbs_vista.db
+Desktop.ini
+$RECYCLE.BIN/
+
+### Intellij ###
 .idea
+
+### Dawn node tools binaries
+src/dawn/node/tools/bin/
+
+### Cached node transpiled tools
+/.node_transpile_work_dir
+
+# Misc inherited from Tint
+/test.wgsl
 coverage.summary
 default.profraw
 lcov.info
-
-/buildtools
 /cmake-build-*/
-/out
 /testing
-/third_party/clang-format
-/third_party/catapult
-/third_party/cpplint
-/third_party/benchmark
-/third_party/binutils
-/third_party/googletest
-/third_party/gpuweb-cts
-/third_party/llvm-build
-/third_party/protobuf
-/third_party/vulkan-deps
-/tools/clang
-/tools/bin
-
-/build*/
-/test.wgsl
diff --git a/.gn b/.gn
index 2bc6b1c..3860440 100644
--- a/.gn
+++ b/.gn
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors
+# Copyright 2018 The Dawn Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,4 +17,33 @@
 # Use Python3 to run scripts. On Windows this will use python.exe or python.bat
 script_executable = "python3"
 
-check_targets = [ "//*" ]
+default_args = {
+  clang_use_chrome_plugins = false
+
+  # Override the mac version so standalone Dawn compiles with at least 10.11
+  # which allows us to not skip the -Wunguarded-availability warning and get
+  # proper warnings for use of APIs that are 10.12 and above (even if
+  # Chromium is still on 10.10).
+  mac_deployment_target = "10.11.0"
+  mac_min_system_version = "10.11.0"
+
+  angle_enable_abseil = false
+  angle_standalone = false
+  angle_build_all = false
+  angle_has_rapidjson = false
+  angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
+  angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
+  angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
+  angle_vulkan_validation_layers_dir =
+      "//third_party/vulkan-deps/vulkan-validation-layers/src"
+
+  vma_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
+}
+
+check_targets = [
+  # Everything in BUILD.gn
+  "//:*",
+
+  # Everything in third_party/BUILD.gn
+  "//third_party/:*",
+]
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
index f584277..9d72ed9 100644
--- a/.vscode/tasks.json
+++ b/.vscode/tasks.json
@@ -10,46 +10,42 @@
     // ${cwd}: the current working directory of the spawned process
     "version": "2.0.0",
     "tasks": [
+        // Invokes ninja in the 'out/active' directory, which is created with
+        // the 'gn gen' task (see below).
         {
-            "label": "make",
+            "label": "build",
             "group": {
                 "kind": "build",
                 "isDefault": true
             },
             "type": "shell",
-            "osx": {
-                "command": "sh",
-                "args": [
-                    "-c",
-                    "cmake --build . && echo Done"
-                ],
-                "options": {
-                    "cwd": "${workspaceRoot}/build",
-                },
-            },
             "linux": {
                 "command": "sh",
                 "args": [
                     "-c",
-                    "cmake --build . && echo Done"
-                ],
-                "options": {
-                    "cwd": "${workspaceRoot}/build",
-                },
-            },
-            "windows": {
-                // Invokes ninja in the 'out/active' directory, which is created
-                // with the 'generate' task (see below).
-                "command": "/C",
-                "args": [
                     "ninja && echo Done"
                 ],
+            },
+            "osx": {
+                "command": "sh",
+                "args": [
+                    "-c",
+                    "ninja && echo Done"
+                ],
+            },
+            "windows": {
+                "command": "/C",
+                "args": [
+                    "ninja && echo Done",
+                ],
                 "options": {
-                    "cwd": "${workspaceRoot}/out/active",
                     "shell": {
                         "executable": "cmd"
-                    }
-                },
+                    },
+                }
+            },
+            "options": {
+                "cwd": "${workspaceRoot}/out/active",
             },
             "presentation": {
                 "echo": false,
@@ -72,36 +68,28 @@
                 }
             }
         },
+        // Generates a GN build directory at 'out/<build-type>' with the
+        // is_debug argument set to to true iff the build-type is Debug.
+        // A symbolic link to this build directory is created at 'out/active'
+        // which is used to track the active build directory.
         {
-            "label": "configure",
+            "label": "gn gen",
             "type": "shell",
-            "osx": {
-                "command": "cmake",
-                "args": [
-                    "..",
-                    "-GNinja",
-                    "-DCMAKE_BUILD_TYPE=${input:buildType}",
-                ],
-                "options": {
-                    "cwd": "${workspaceRoot}/build"
-                },
-            },
             "linux": {
-                "command": "cmake",
+                "command": "sh",
                 "args": [
-                    "..",
-                    "-GNinja",
-                    "-DCMAKE_BUILD_TYPE=${input:buildType}",
+                    "-c",
+                    "gn gen 'out/${input:buildType}' --args=is_debug=$(if [ '${input:buildType}' = 'Debug' ]; then echo 'true'; else echo 'false'; fi) && (rm -fr out/active || true) && ln -s ${input:buildType} out/active",
                 ],
-                "options": {
-                    "cwd": "${workspaceRoot}/build"
-                },
+            },
+            "osx": {
+                "command": "sh",
+                "args": [
+                    "-c",
+                    "gn gen 'out/${input:buildType}' --args=is_debug=$(if [ '${input:buildType}' = 'Debug' ]; then echo 'true'; else echo 'false'; fi) && (rm -fr out/active || true) && ln -s ${input:buildType} out/active",
+                ],
             },
             "windows": {
-                // Generates a GN build directory at 'out/<build-type>' with the
-                // is_debug argument set to true iff the build-type is Debug.
-                // A symbolic link to this build directory is created at 'out/active'
-                // which is used to track the active build directory.
                 "command": "/C",
                 "args": [
                     "(IF \"${input:buildType}\" == \"Debug\" ( gn gen \"out\\${input:buildType}\" --args=is_debug=true ) ELSE ( gn gen \"out\\${input:buildType}\" --args=is_debug=false )) && (IF EXIST \"out\\active\" rmdir \"out\\active\" /q /s) && (mklink /j \"out\\active\" \"out\\${input:buildType}\")",
@@ -109,13 +97,52 @@
                 "options": {
                     "shell": {
                         "executable": "cmd"
-                    }
-                },
+                    },
+                }
+            },
+            "options": {
+                "cwd": "${workspaceRoot}"
             },
             "problemMatcher": [],
         },
+        // Rebases the current branch on to origin/main and then calls
+        // `gclient sync`.
         {
-            "label": "Push branch for review",
+            "label": "sync",
+            "type": "shell",
+            "linux": {
+                "command": "sh",
+                "args": [
+                    "-c",
+                    "git fetch origin && git rebase origin/main && gclient sync && echo Done"
+                ],
+            },
+            "osx": {
+                "command": "sh",
+                "args": [
+                    "-c",
+                    "git fetch origin && git rebase origin/main && gclient sync && echo Done"
+                ],
+            },
+            "windows": {
+                "command": "/C",
+                "args": [
+                    "git fetch origin && git rebase origin/main && gclient sync && echo Done",
+                ],
+                "options": {
+                    "shell": {
+                        "executable": "cmd"
+                    },
+                }
+            },
+            "options": {
+                "cwd": "${workspaceRoot}"
+            },
+            "problemMatcher": [],
+        },
+        // Pushes the changes at HEAD to gerrit for review
+        {
+            "label": "push",
             "type": "shell",
             "command": "git",
             "args": [
@@ -136,8 +163,6 @@
             "options": [
                 "Debug",
                 "Release",
-                "MinSizeRel",
-                "RelWithDebInfo",
             ],
             "default": "Debug",
             "description": "The type of build",
diff --git a/AUTHORS b/AUTHORS
index a66d09e..bded374 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,8 +1,7 @@
-# This is the list of the Tint authors for copyright purposes.
+# This is the list of Dawn & Tint authors for copyright purposes.
 #
 # This does not necessarily list everyone who has contributed code, since in
 # some cases, their employer may be the copyright holder.  To see the full list
 # of contributors, see the revision history in source control.
-
 Google LLC
 Vasyl Teliman
diff --git a/AUTHORS.dawn b/AUTHORS.dawn
new file mode 100644
index 0000000..32a6c3c
--- /dev/null
+++ b/AUTHORS.dawn
@@ -0,0 +1,6 @@
+# This is the list of Dawn authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder.  To see the full list
+# of contributors, see the revision history in source control.
+Google Inc.
diff --git a/AUTHORS.tint b/AUTHORS.tint
new file mode 100644
index 0000000..a66d09e
--- /dev/null
+++ b/AUTHORS.tint
@@ -0,0 +1,8 @@
+# This is the list of the Tint authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder.  To see the full list
+# of contributors, see the revision history in source control.
+
+Google LLC
+Vasyl Teliman
diff --git a/BUILD.gn b/BUILD.gn
index f73c1da..c33776d 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors
+# Copyright 2018 The Dawn Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,13 +12,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import("scripts/dawn_overrides_with_defaults.gni")
+
+group("all") {
+  testonly = true
+  deps = [
+    "src/dawn/fuzzers",
+    "src/dawn/native:webgpu_dawn",
+    "src/dawn/tests",
+    "src/fuzzers/dawn:dawn_fuzzers",
+    "src/tint/fuzzers",
+    "src/tint:libtint",
+    "test/tint:tint_unittests",
+  ]
+  if (dawn_standalone) {
+    deps += [
+      "samples/dawn:samples",
+      "src/tint/cmd:tint",
+    ]
+  }
+}
+
 # This target is built when no specific target is specified on the command line.
 group("default") {
   testonly = true
-  deps = [
-    "src/tint:libtint",
-    "src/tint/cmd:tint",
-    "src/tint/fuzzers",
-    "test/tint:tint_unittests",
-  ]
+  deps = [ ":all" ]
 }
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 17a7ec5..f125476 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors.
+# Copyright 2022 The Dawn & Tint Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,8 +14,22 @@
 
 cmake_minimum_required(VERSION 3.10.2)
 
-project(tint)
+# When upgrading to CMake 3.11 we can remove DAWN_DUMMY_FILE because source-less add_library
+# becomes available.
+# When upgrading to CMake 3.12 we should add CONFIGURE_DEPENDS to DawnGenerator to rerun CMake in
+# case any of the generator files changes. We should also remove the CACHE "" FORCE stuff to
+# override options in third_party dependencies. We can also add the HOMEPAGE_URL
+# entry to the project `HOMEPAGE_URL "https://dawn.googlesource.com/dawn"`
+
+project(
+    Dawn
+    DESCRIPTION "Dawn, a WebGPU implementation"
+    LANGUAGES C CXX
+)
 enable_testing()
+
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
 set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
 set(CMAKE_POSITION_INDEPENDENT_CODE ON)
 set(CMAKE_CXX_STANDARD 17)
@@ -26,20 +40,17 @@
   set(CMAKE_BUILD_TYPE "Debug")
 endif()
 
-# TINT_IS_SUBPROJECT is 1 if added via add_subdirectory() from another project.
-get_directory_property(TINT_IS_SUBPROJECT PARENT_DIRECTORY)
-if(TINT_IS_SUBPROJECT)
-  set(TINT_IS_SUBPROJECT 1)
+set(DAWN_BUILD_GEN_DIR "${Dawn_BINARY_DIR}/gen")
+set(DAWN_GENERATOR_DIR "${Dawn_SOURCE_DIR}/generator")
+set(DAWN_SRC_DIR "${Dawn_SOURCE_DIR}/src")
+set(DAWN_INCLUDE_DIR "${Dawn_SOURCE_DIR}/include")
+set(DAWN_TEMPLATE_DIR "${DAWN_GENERATOR_DIR}/templates")
 
-  # If tint is used as a subproject, default to disabling the building of
-  # documentation and tests. These are unlikely to be desirable, but can be
-  # enabled.
-  set(TINT_BUILD_DOCS_DEFAULT OFF)
-  set(TINT_BUILD_TESTS_DEFAULT OFF)
-else()
-  set(TINT_BUILD_DOCS_DEFAULT ON)
-  set(TINT_BUILD_TESTS_DEFAULT ON)
-endif()
+set(DAWN_DUMMY_FILE "${DAWN_SRC_DIR}/Dummy.cpp")
+
+################################################################################
+# Configuration options
+################################################################################
 
 # option_if_not_defined(name description default)
 # Behaves like:
@@ -65,7 +76,159 @@
     endif()
 endfunction()
 
-set_if_not_defined(TINT_THIRD_PARTY_DIR "${tint_SOURCE_DIR}/third_party" "Directory in which to find third-party dependencies.")
+# Default values for the backend-enabling options
+set(ENABLE_D3D12 OFF)
+set(ENABLE_METAL OFF)
+set(ENABLE_OPENGLES OFF)
+set(ENABLE_DESKTOP_GL OFF)
+set(ENABLE_VULKAN OFF)
+set(USE_X11 OFF)
+set(BUILD_SAMPLES OFF)
+if (WIN32)
+    set(ENABLE_D3D12 ON)
+    if (NOT WINDOWS_STORE)
+        # Enable Vulkan in win32 compilation only
+        # since UWP only supports d3d
+        set(ENABLE_VULKAN ON)
+    endif()
+elseif(APPLE)
+    set(ENABLE_METAL ON)
+elseif(ANDROID)
+    set(ENABLE_VULKAN ON)
+    set(ENABLE_OPENGLES ON)
+elseif(UNIX)
+    set(ENABLE_OPENGLES ON)
+    set(ENABLE_DESKTOP_GL ON)
+    set(ENABLE_VULKAN ON)
+    set(USE_X11 ON)
+endif()
+
+# GLFW is not supported in UWP
+if ((WIN32 AND NOT WINDOWS_STORE) OR UNIX AND NOT ANDROID)
+    set(DAWN_SUPPORTS_GLFW_FOR_WINDOWING ON)
+endif()
+
+# Current examples are depend on GLFW
+if (DAWN_SUPPORTS_GLFW_FOR_WINDOWING)
+    set(BUILD_SAMPLES ON)
+endif()
+
+option_if_not_defined(DAWN_ENABLE_D3D12 "Enable compilation of the D3D12 backend" ${ENABLE_D3D12})
+option_if_not_defined(DAWN_ENABLE_METAL "Enable compilation of the Metal backend" ${ENABLE_METAL})
+option_if_not_defined(DAWN_ENABLE_NULL "Enable compilation of the Null backend" ON)
+option_if_not_defined(DAWN_ENABLE_DESKTOP_GL "Enable compilation of the OpenGL backend" ${ENABLE_DESKTOP_GL})
+option_if_not_defined(DAWN_ENABLE_OPENGLES "Enable compilation of the OpenGL ES backend" ${ENABLE_OPENGLES})
+option_if_not_defined(DAWN_ENABLE_VULKAN "Enable compilation of the Vulkan backend" ${ENABLE_VULKAN})
+option_if_not_defined(DAWN_ALWAYS_ASSERT "Enable assertions on all build types" OFF)
+option_if_not_defined(DAWN_USE_X11 "Enable support for X11 surface" ${USE_X11})
+
+option_if_not_defined(DAWN_BUILD_SAMPLES "Enables building Dawn's samples" ${BUILD_SAMPLES})
+option_if_not_defined(DAWN_BUILD_NODE_BINDINGS "Enables building Dawn's NodeJS bindings" OFF)
+
+option_if_not_defined(DAWN_ENABLE_PIC "Build with Position-Independent-Code enabled" OFF)
+
+set_if_not_defined(DAWN_THIRD_PARTY_DIR "${Dawn_SOURCE_DIR}/third_party" "Directory in which to find third-party dependencies.")
+
+# Recommended setting for compability with future abseil releases.
+set(ABSL_PROPAGATE_CXX_STD ON)
+
+set_if_not_defined(DAWN_ABSEIL_DIR "${DAWN_THIRD_PARTY_DIR}/abseil-cpp" "Directory in which to find Abseil")
+set_if_not_defined(DAWN_GLFW_DIR "${DAWN_THIRD_PARTY_DIR}/glfw" "Directory in which to find GLFW")
+set_if_not_defined(DAWN_JINJA2_DIR "${DAWN_THIRD_PARTY_DIR}/jinja2" "Directory in which to find Jinja2")
+set_if_not_defined(DAWN_SPIRV_HEADERS_DIR "${DAWN_THIRD_PARTY_DIR}/vulkan-deps/spirv-headers/src" "Directory in which to find SPIRV-Headers")
+set_if_not_defined(DAWN_SPIRV_TOOLS_DIR "${DAWN_THIRD_PARTY_DIR}/vulkan-deps/spirv-tools/src" "Directory in which to find SPIRV-Tools")
+set_if_not_defined(DAWN_TINT_DIR "${Dawn_SOURCE_DIR}" "Directory in which to find Tint")
+set_if_not_defined(DAWN_VULKAN_HEADERS_DIR "${DAWN_THIRD_PARTY_DIR}/vulkan-deps/vulkan-headers/src" "Directory in which to find Vulkan-Headers")
+
+# Dependencies for DAWN_BUILD_NODE_BINDINGS
+set_if_not_defined(NODE_ADDON_API_DIR "${DAWN_THIRD_PARTY_DIR}/node-addon-api" "Directory in which to find node-addon-api")
+set_if_not_defined(NODE_API_HEADERS_DIR "${DAWN_THIRD_PARTY_DIR}/node-api-headers" "Directory in which to find node-api-headers")
+set_if_not_defined(WEBGPU_IDL_PATH "${DAWN_THIRD_PARTY_DIR}/gpuweb/webgpu.idl" "Path to the webgpu.idl definition file")
+set_if_not_defined(GO_EXECUTABLE "go" "Golang executable for running the IDL generator")
+
+# Much of the backend code is shared among desktop OpenGL and OpenGL ES
+if (${DAWN_ENABLE_DESKTOP_GL} OR ${DAWN_ENABLE_OPENGLES})
+    set(DAWN_ENABLE_OPENGL ON)
+endif()
+
+if(DAWN_ENABLE_PIC)
+    set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+endif()
+
+################################################################################
+# Dawn's public and internal "configs"
+################################################################################
+
+# The public config contains only the include paths for the Dawn headers.
+add_library(dawn_public_config INTERFACE)
+target_include_directories(dawn_public_config INTERFACE
+    "${DAWN_INCLUDE_DIR}"
+    "${DAWN_BUILD_GEN_DIR}/include"
+)
+
+# The internal config conatins additional path but includes the dawn_public_config include paths
+add_library(dawn_internal_config INTERFACE)
+target_include_directories(dawn_internal_config INTERFACE
+    "${DAWN_SRC_DIR}"
+    "${DAWN_BUILD_GEN_DIR}/src"
+)
+target_link_libraries(dawn_internal_config INTERFACE dawn_public_config)
+
+# Compile definitions for the internal config
+if (DAWN_ALWAYS_ASSERT OR $<CONFIG:Debug>)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_ASSERTS")
+endif()
+if (DAWN_ENABLE_D3D12)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_D3D12")
+endif()
+if (DAWN_ENABLE_METAL)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_METAL")
+endif()
+if (DAWN_ENABLE_NULL)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_NULL")
+endif()
+if (DAWN_ENABLE_DESKTOP_GL)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_DESKTOP_GL")
+endif()
+if (DAWN_ENABLE_OPENGLES)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_OPENGLES")
+endif()
+if (DAWN_ENABLE_OPENGL)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_OPENGL")
+endif()
+if (DAWN_ENABLE_VULKAN)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_VULKAN")
+endif()
+if (DAWN_USE_X11)
+    target_compile_definitions(dawn_internal_config INTERFACE "DAWN_USE_X11")
+endif()
+if (WIN32)
+    target_compile_definitions(dawn_internal_config INTERFACE "NOMINMAX" "WIN32_LEAN_AND_MEAN")
+endif()
+
+set(CMAKE_CXX_STANDARD "17")
+
+################################################################################
+# Tint
+################################################################################
+
+# TINT_IS_SUBPROJECT is 1 if added via add_subdirectory() from another project.
+get_directory_property(TINT_IS_SUBPROJECT PARENT_DIRECTORY)
+if(TINT_IS_SUBPROJECT)
+  set(TINT_IS_SUBPROJECT 1)
+
+  # If tint is used as a subproject, default to disabling the building of
+  # documentation and tests. These are unlikely to be desirable, but can be
+  # enabled.
+  set(TINT_BUILD_DOCS_DEFAULT OFF)
+  set(TINT_BUILD_TESTS_DEFAULT OFF)
+else()
+  set(TINT_BUILD_DOCS_DEFAULT ON)
+  set(TINT_BUILD_TESTS_DEFAULT ON)
+endif()
+
+# Forcing building docs off right now, since currently this will try to build docs for both Tint & Dawn, and Dawn isn't annotated yet.
+set(TINT_BUILD_DOCS_DEFAULT OFF)
 
 option_if_not_defined(TINT_BUILD_SAMPLES "Build samples" ON)
 option_if_not_defined(TINT_BUILD_DOCS "Build documentation" ${TINT_BUILD_DOCS_DEFAULT})
@@ -200,7 +363,7 @@
 endif()
 
 if (${TINT_BUILD_SPV_READER})
-  include_directories("${TINT_THIRD_PARTY_DIR}/vulkan-deps/spirv-tools/src/include")
+  include_directories("${DAWN_THIRD_PARTY_DIR}/vulkan-deps/spirv-tools/include")
 endif()
 
 if((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") AND (CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))
@@ -264,7 +427,7 @@
 
   if (${TINT_BUILD_SPV_READER} OR ${TINT_BUILD_SPV_WRITER})
     target_include_directories(${TARGET} PUBLIC
-        "${TINT_THIRD_PARTY_DIR}/vulkan-deps/spirv-headers/src/include")
+        "${DAWN_THIRD_PARTY_DIR}/spirv-headers/include")
   endif()
 
   target_compile_definitions(${TARGET} PUBLIC -DTINT_BUILD_SPV_READER=$<BOOL:${TINT_BUILD_SPV_READER}>)
@@ -388,8 +551,25 @@
   endif()
 endfunction()
 
+################################################################################
+# Run on all subdirectories
+################################################################################
+
 add_subdirectory(third_party)
 add_subdirectory(src/tint)
+add_subdirectory(generator)
+add_subdirectory(src/dawn)
+
+################################################################################
+# Samples
+################################################################################
+
+if (DAWN_BUILD_SAMPLES)
+    #TODO(dawn:269): Add this once implementation-based swapchains are removed.
+    #add_subdirectory(src/utils)
+    add_subdirectory(samples/dawn)
+endif()
+
 if (TINT_BUILD_SAMPLES)
   add_subdirectory(src/tint/cmd)
 endif()
diff --git a/DEPS b/DEPS
index 7bf1906..0c13f63 100644
--- a/DEPS
+++ b/DEPS
@@ -8,100 +8,257 @@
 
 vars = {
   'chromium_git': 'https://chromium.googlesource.com',
+  'dawn_git': 'https://dawn.googlesource.com',
+  'github_git': 'https://github.com',
+  'swiftshader_git': 'https://swiftshader.googlesource.com',
 
-  'tint_gn_revision': 'git_revision:281ba2c91861b10fec7407c4b6172ec3d4661243',
+  'dawn_standalone': True,
+  'dawn_node': False, # Also fetches dependencies required for building NodeJS bindings.
+  'dawn_cmake_version': 'version:3.13.5',
+  'dawn_cmake_win32_sha1': 'b106d66bcdc8a71ea2cdf5446091327bfdb1bcd7',
+  'dawn_gn_version': 'git_revision:bd99dbf98cbdefe18a4128189665c5761263bcfb',
+  'dawn_go_version': 'version:1.16',
 
-  # We don't use location metadata in our test isolates.
+  'node_darwin_arm64_sha': '31859fc1fa0994a95f44f09c367d6ff63607cfde',
+  'node_darwin_x64_sha': '16dfd094763b71988933a31735f9dea966f9abd6',
+  'node_linux_x64_sha': 'ab9544e24e752d3d17f335fb7b2055062e582d11',
+  'node_win_x64_sha': '5ef847033c517c499f56f9d136d159b663bab717',
+
+  # GN variable required by //testing that will be output in the gclient_args.gni
   'generate_location_tags': False,
 }
 
 deps = {
-  'third_party/gpuweb-cts': {
-    'url': '{chromium_git}/external/github.com/gpuweb/cts@b0291fd966b55a5efc496772555b94842bde1085',
-  },
-
-  'third_party/vulkan-deps': {
-    'url': '{chromium_git}/vulkan-deps@20efc30b0c6fe3c9bbd4f8ed6335593ee51391b0',
-  },
-
   # Dependencies required to use GN/Clang in standalone
   'build': {
-    'url': '{chromium_git}/chromium/src/build@555c8b467c21e2c4b22d00e87e3faa0431df9ac2',
+    'url': '{chromium_git}/chromium/src/build@c7876b5a44308b94074287939244bc562007de69',
+    'condition': 'dawn_standalone',
   },
-
   'buildtools': {
-    'url': '{chromium_git}/chromium/src/buildtools@f78b4b9f33bd8ef9944d5ce643daff1c31880189',
+    'url': '{chromium_git}/chromium/src/buildtools@e1471b21ee9c6765ee95e9db0c76fe997ccad35c',
+    'condition': 'dawn_standalone',
   },
-
-  'tools/clang': {
-    'url': '{chromium_git}/chromium/src/tools/clang@8b7330592cb85ba09505a6be7bacabd0ad6160a3',
-  },
-
   'buildtools/clang_format/script': {
-    'url': '{chromium_git}/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@2271e89c145a5e27d6c110b6a1113c057a8301a3',
+    'url': '{chromium_git}/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@99803d74e35962f63a775f29477882afd4d57d94',
+    'condition': 'dawn_standalone',
   },
-
   'buildtools/linux64': {
     'packages': [{
       'package': 'gn/gn/linux-amd64',
-      'version': Var('tint_gn_revision'),
+      'version': Var('dawn_gn_version'),
     }],
     'dep_type': 'cipd',
-    'condition': 'host_os == "linux"',
+    'condition': 'dawn_standalone and host_os == "linux"',
   },
   'buildtools/mac': {
     'packages': [{
       'package': 'gn/gn/mac-${{arch}}',
-      'version': Var('tint_gn_revision'),
+      'version': Var('dawn_gn_version'),
     }],
     'dep_type': 'cipd',
-    'condition': 'host_os == "mac"',
+    'condition': 'dawn_standalone and host_os == "mac"',
   },
   'buildtools/win': {
     'packages': [{
       'package': 'gn/gn/windows-amd64',
-      'version': Var('tint_gn_revision'),
+      'version': Var('dawn_gn_version'),
     }],
     'dep_type': 'cipd',
-    'condition': 'host_os == "win"',
+    'condition': 'dawn_standalone and host_os == "win"',
   },
 
   'buildtools/third_party/libc++/trunk': {
     'url': '{chromium_git}/external/github.com/llvm/llvm-project/libcxx.git@79a2e924d96e2fc1e4b937c42efd08898fa472d7',
+    'condition': 'dawn_standalone',
   },
 
   'buildtools/third_party/libc++abi/trunk': {
-    'url': '{chromium_git}/external/github.com/llvm/llvm-project/libcxxabi.git@2715a6c0de8dac4c7674934a6b3d30ba0c685271',
+    'url': '{chromium_git}/external/github.com/llvm/llvm-project/libcxxabi.git@edde7bbc4049ae4a32257d9f16451312c763c601',
+    'condition': 'dawn_standalone',
   },
 
-  # Dependencies required for testing
+  'tools/clang': {
+    'url': '{chromium_git}/chromium/src/tools/clang@df9b14e26c163dd8e2c0ab081e2689f038ae7141',
+    'condition': 'dawn_standalone',
+  },
+  'tools/clang/dsymutil': {
+    'packages': [{
+      'package': 'chromium/llvm-build-tools/dsymutil',
+      'version': 'M56jPzDv1620Rnm__jTMYS62Zi8rxHVq7yw0qeBFEgkC',
+    }],
+    'condition': 'dawn_standalone and (checkout_mac or checkout_ios)',
+    'dep_type': 'cipd',
+  },
+
+  # Testing, GTest and GMock
   'testing': {
     'url': '{chromium_git}/chromium/src/testing@d485ae97b7900c1fb7edfbe2901ae5adcb120865',
+    'condition': 'dawn_standalone',
   },
-
+  'third_party/googletest': {
+    'url': '{chromium_git}/external/github.com/google/googletest@6b74da4757a549563d7c37c8fae3e704662a043b',
+    'condition': 'dawn_standalone',
+  },
+  # This is a dependency of //testing
   'third_party/catapult': {
     'url': '{chromium_git}/catapult.git@fa35beefb3429605035f98211ddb8750dee6a13d',
+    'condition': 'dawn_standalone',
   },
 
+  # Jinja2 and MarkupSafe for the code generator
+  'third_party/jinja2': {
+    'url': '{chromium_git}/chromium/src/third_party/jinja2@ee69aa00ee8536f61db6a451f3858745cf587de6',
+    'condition': 'dawn_standalone',
+  },
+  'third_party/markupsafe': {
+    'url': '{chromium_git}/chromium/src/third_party/markupsafe@0944e71f4b2cb9a871bcbe353f95e889b64a611a',
+    'condition': 'dawn_standalone',
+  },
+
+  # GLFW for tests and samples
+  'third_party/glfw': {
+    'url': '{chromium_git}/external/github.com/glfw/glfw@94773111300fee0453844a4c9407af7e880b4df8',
+    'condition': 'dawn_standalone',
+  },
+
+  'third_party/vulkan_memory_allocator': {
+    'url': '{chromium_git}/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator@5e49f57a6e71a026a54eb42e366de09a4142d24e',
+    'condition': 'dawn_standalone',
+  },
+
+  'third_party/angle': {
+    'url': '{chromium_git}/angle/angle@152616eedcfded69cab516e093efd3a98190fa5b',
+    'condition': 'dawn_standalone',
+  },
+
+  'third_party/swiftshader': {
+    'url': '{swiftshader_git}/SwiftShader@9c16e141823e93c2d6ad05b94165cc18ffda6ffe',
+    'condition': 'dawn_standalone',
+  },
+
+  'third_party/vulkan-deps': {
+    'url': '{chromium_git}/vulkan-deps@746dd371204b5b582e0ed0365909fefa4b0ef0fa',
+    'condition': 'dawn_standalone',
+  },
+
+  'third_party/zlib': {
+    'url': '{chromium_git}/chromium/src/third_party/zlib@c29ee8c9c3824ca013479bf8115035527967fe02',
+    'condition': 'dawn_standalone',
+  },
+
+  'third_party/abseil-cpp': {
+    'url': '{chromium_git}/chromium/src/third_party/abseil-cpp@789af048b388657987c59d4da406859034fe310f',
+    'condition': 'dawn_standalone',
+  },
+
+  # WebGPU CTS - not used directly by Dawn, only transitively by Chromium.
+  'third_party/webgpu-cts': {
+    'url': '{chromium_git}/external/github.com/gpuweb/cts@87e74a93e0c046b30a798667f19a449fc99ddb5d',
+    'condition': 'build_with_chromium',
+  },
+
+  # Dependencies required to build / run Dawn NodeJS bindings
+  'third_party/node-api-headers': {
+    'url': '{github_git}/nodejs/node-api-headers.git@d68505e4055ecb630e14c26c32e5c2c65e179bba',
+    'condition': 'dawn_node',
+  },
+  'third_party/node-addon-api': {
+    'url': '{github_git}/nodejs/node-addon-api.git@4a3de56c3e4ed0031635a2f642b27efeeed00add',
+    'condition': 'dawn_node',
+  },
+  'third_party/gpuweb': {
+    'url': '{github_git}/gpuweb/gpuweb.git@881403b5fda2d9ac9ffc5daa24e34738205bf155',
+    'condition': 'dawn_node',
+  },
+  'third_party/gpuweb-cts': {
+    'url': '{chromium_git}/external/github.com/gpuweb/cts@b0291fd966b55a5efc496772555b94842bde1085',
+    'condition': 'dawn_standalone',
+  },
+
+  'tools/golang': {
+    'condition': 'dawn_node',
+    'packages': [{
+      'package': 'infra/3pp/tools/go/${{platform}}',
+      'version': Var('dawn_go_version'),
+    }],
+    'dep_type': 'cipd',
+  },
+
+  'tools/cmake': {
+    'condition': 'dawn_node and (host_os == "mac" or host_os == "linux")',
+    'packages': [{
+      'package': 'infra/3pp/tools/cmake/${{platform}}',
+      'version': Var('dawn_cmake_version'),
+    }],
+    'dep_type': 'cipd',
+  },
+
+  # Misc dependencies inherited from Tint
   'third_party/benchmark': {
     'url': '{chromium_git}/external/github.com/google/benchmark.git@e991355c02b93fe17713efe04cbc2e278e00fdbd',
+    'condition': 'dawn_standalone',
   },
-
-  'third_party/googletest': {
-    'url': '{chromium_git}/external/github.com/google/googletest.git@6b74da4757a549563d7c37c8fae3e704662a043b',
-  },
-
   'third_party/protobuf': {
     'url': '{chromium_git}/external/github.com/protocolbuffers/protobuf.git@fde7cf7358ec7cd69e8db9be4f1fa6a5c431386a',
+    'condition': 'dawn_standalone',
   },
 }
 
 hooks = [
+  # Pull the compilers and system libraries for hermetic builds
+  {
+    'name': 'sysroot_x86',
+    'pattern': '.',
+    'condition': 'dawn_standalone and checkout_linux and (checkout_x86 or checkout_x64)',
+    'action': ['python3', 'build/linux/sysroot_scripts/install-sysroot.py',
+               '--arch=x86'],
+  },
+  {
+    'name': 'sysroot_x64',
+    'pattern': '.',
+    'condition': 'dawn_standalone and checkout_linux and checkout_x64',
+    'action': ['python3', 'build/linux/sysroot_scripts/install-sysroot.py',
+               '--arch=x64'],
+  },
+  {
+    # Update the Mac toolchain if possible, this makes builders use "hermetic XCode" which is
+    # is more consistent (only changes when rolling build/) and is cached.
+    'name': 'mac_toolchain',
+    'pattern': '.',
+    'condition': 'dawn_standalone and checkout_mac',
+    'action': ['python3', 'build/mac_toolchain.py'],
+  },
+  {
+    # Update the Windows toolchain if necessary. Must run before 'clang' below.
+    'name': 'win_toolchain',
+    'pattern': '.',
+    'condition': 'dawn_standalone and checkout_win',
+    'action': ['python3', 'build/vs_toolchain.py', 'update', '--force'],
+  },
+  {
+    # Note: On Win, this should run after win_toolchain, as it may use it.
+    'name': 'clang',
+    'pattern': '.',
+    'action': ['python3', 'tools/clang/scripts/update.py'],
+    'condition': 'dawn_standalone',
+  },
+  {
+    # Pull rc binaries using checked-in hashes.
+    'name': 'rc_win',
+    'pattern': '.',
+    'condition': 'dawn_standalone and checkout_win and host_os == "win"',
+    'action': [ 'download_from_google_storage',
+                '--no_resume',
+                '--no_auth',
+                '--bucket', 'chromium-browser-clang/rc',
+                '-s', 'build/toolchain/win/rc/win/rc.exe.sha1',
+    ],
+  },
   # Pull clang-format binaries using checked-in hashes.
   {
     'name': 'clang_format_win',
     'pattern': '.',
-    'condition': 'host_os == "win"',
+    'condition': 'dawn_standalone and host_os == "win"',
     'action': [ 'download_from_google_storage',
                 '--no_resume',
                 '--platform=win32',
@@ -113,7 +270,7 @@
   {
     'name': 'clang_format_mac',
     'pattern': '.',
-    'condition': 'host_os == "mac"',
+    'condition': 'dawn_standalone and host_os == "mac"',
     'action': [ 'download_from_google_storage',
                 '--no_resume',
                 '--platform=darwin',
@@ -125,7 +282,7 @@
   {
     'name': 'clang_format_linux',
     'pattern': '.',
-    'condition': 'host_os == "linux"',
+    'condition': 'dawn_standalone and host_os == "linux"',
     'action': [ 'download_from_google_storage',
                 '--no_resume',
                 '--platform=linux*',
@@ -134,7 +291,6 @@
                 '-s', 'buildtools/linux64/clang-format.sha1',
     ],
   },
-
   # Pull the compilers and system libraries for hermetic builds
   {
     'name': 'sysroot_x86',
@@ -186,15 +342,90 @@
   {
     'name': 'lastchange',
     'pattern': '.',
+    'condition': 'dawn_standalone',
     'action': ['python3', 'build/util/lastchange.py',
                '-o', 'build/util/LASTCHANGE'],
   },
+  # TODO(https://crbug.com/1180257): Use CIPD for CMake on Windows.
+  {
+    'name': 'cmake_win32',
+    'pattern': '.',
+    'condition': 'dawn_node and host_os == "win"',
+    'action': [ 'download_from_google_storage',
+                '--no_resume',
+                '--platform=win32',
+                '--no_auth',
+                '--bucket', 'chromium-tools',
+                Var('dawn_cmake_win32_sha1'),
+                '-o', 'tools/cmake-win32.zip'
+    ],
+  },
+  {
+    'name': 'cmake_win32_extract',
+    'pattern': '.',
+    'condition': 'dawn_node and host_os == "win"',
+    'action': [ 'python3',
+                'scripts/extract.py',
+                'tools/cmake-win32.zip',
+                'tools/cmake-win32/',
+    ],
+  },
+
+  # Node binaries, when dawn_node is enabled
+  {
+    'name': 'node_linux64',
+    'pattern': '.',
+    'condition': 'dawn_node and host_os == "linux"',
+    'action': [ 'download_from_google_storage',
+                '--no_resume',
+                '--extract',
+                '--no_auth',
+                '--bucket', 'chromium-nodejs/16.13.0',
+                Var('node_linux_x64_sha'),
+                '-o', 'third_party/node/node-linux-x64.tar.gz',
+    ],
+  },
+  {
+    'name': 'node_mac',
+    'pattern': '.',
+    'condition': 'dawn_node and host_os == "mac"',
+    'action': [ 'download_from_google_storage',
+                '--no_resume',
+                '--extract',
+                '--no_auth',
+                '--bucket', 'chromium-nodejs/16.13.0',
+                Var('node_darwin_x64_sha'),
+                '-o', 'third_party/node/node-darwin-x64.tar.gz',
+    ],
+  },
+  {
+    'name': 'node_mac_arm64',
+    'pattern': '.',
+    'condition': 'dawn_node and host_os == "mac"',
+    'action': [ 'download_from_google_storage',
+                '--no_resume',
+                '--extract',
+                '--no_auth',
+                '--bucket', 'chromium-nodejs/16.13.0',
+                Var('node_darwin_arm64_sha'),
+                '-o', 'third_party/node/node-darwin-arm64.tar.gz',
+    ],
+  },
+  {
+    'name': 'node_win',
+    'pattern': '.',
+    'condition': 'dawn_node and host_os == "win"',
+    'action': [ 'download_from_google_storage',
+                '--no_resume',
+                '--no_auth',
+                '--bucket', 'chromium-nodejs/16.13.0',
+                Var('node_win_x64_sha'),
+                '-o', 'third_party/node/node.exe',
+    ],
+  },
+
 ]
 
 recursedeps = [
-  # buildtools provides clang_format, libc++, and libc++abi
-  'buildtools',
-  # vulkan-deps provides spirv-headers, spirv-tools & gslang
-  # It also provides other Vulkan tools that Tint doesn't use
   'third_party/vulkan-deps',
 ]
diff --git a/DIR_METADATA b/DIR_METADATA
new file mode 100644
index 0000000..0ca8187
--- /dev/null
+++ b/DIR_METADATA
@@ -0,0 +1,3 @@
+monorail {
+  component: "Internals>GPU>Dawn"
+}
diff --git a/LICENSE b/LICENSE
index d645695..14b77bd 100644
--- a/LICENSE
+++ b/LICENSE
@@ -3,9 +3,9 @@
                            Version 2.0, January 2004
                         http://www.apache.org/licenses/
 
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
-   1. Definitions.
+    1. Definitions.
 
       "License" shall mean the terms and conditions for use, reproduction,
       and distribution as defined by Sections 1 through 9 of this document.
@@ -64,14 +64,14 @@
       on behalf of whom a Contribution has been received by Licensor and
       subsequently incorporated within the Work.
 
-   2. Grant of Copyright License. Subject to the terms and conditions of
+    2. Grant of Copyright License. Subject to the terms and conditions of
       this License, each Contributor hereby grants to You a perpetual,
       worldwide, non-exclusive, no-charge, royalty-free, irrevocable
       copyright license to reproduce, prepare Derivative Works of,
       publicly display, publicly perform, sublicense, and distribute the
       Work and such Derivative Works in Source or Object form.
 
-   3. Grant of Patent License. Subject to the terms and conditions of
+    3. Grant of Patent License. Subject to the terms and conditions of
       this License, each Contributor hereby grants to You a perpetual,
       worldwide, non-exclusive, no-charge, royalty-free, irrevocable
       (except as stated in this section) patent license to make, have made,
@@ -87,7 +87,7 @@
       granted to You under this License for that Work shall terminate
       as of the date such litigation is filed.
 
-   4. Redistribution. You may reproduce and distribute copies of the
+    4. Redistribution. You may reproduce and distribute copies of the
       Work or Derivative Works thereof in any medium, with or without
       modifications, and in Source or Object form, provided that You
       meet the following conditions:
@@ -128,7 +128,7 @@
       reproduction, and distribution of the Work otherwise complies with
       the conditions stated in this License.
 
-   5. Submission of Contributions. Unless You explicitly state otherwise,
+    5. Submission of Contributions. Unless You explicitly state otherwise,
       any Contribution intentionally submitted for inclusion in the Work
       by You to the Licensor shall be under the terms and conditions of
       this License, without any additional terms or conditions.
@@ -136,12 +136,12 @@
       the terms of any separate license agreement you may have executed
       with Licensor regarding such Contributions.
 
-   6. Trademarks. This License does not grant permission to use the trade
+    6. Trademarks. This License does not grant permission to use the trade
       names, trademarks, service marks, or product names of the Licensor,
       except as required for reasonable and customary use in describing the
       origin of the Work and reproducing the content of the NOTICE file.
 
-   7. Disclaimer of Warranty. Unless required by applicable law or
+    7. Disclaimer of Warranty. Unless required by applicable law or
       agreed to in writing, Licensor provides the Work (and each
       Contributor provides its Contributions) on an "AS IS" BASIS,
       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
@@ -151,7 +151,7 @@
       appropriateness of using or redistributing the Work and assume any
       risks associated with Your exercise of permissions under this License.
 
-   8. Limitation of Liability. In no event and under no legal theory,
+    8. Limitation of Liability. In no event and under no legal theory,
       whether in tort (including negligence), contract, or otherwise,
       unless required by applicable law (such as deliberate and grossly
       negligent acts) or agreed to in writing, shall any Contributor be
@@ -163,7 +163,7 @@
       other commercial damages or losses), even if such Contributor
       has been advised of the possibility of such damages.
 
-   9. Accepting Warranty or Additional Liability. While redistributing
+    9. Accepting Warranty or Additional Liability. While redistributing
       the Work or Derivative Works thereof, You may choose to offer,
       and charge a fee for, acceptance of support, warranty, indemnity,
       or other liability obligations and/or rights consistent with this
@@ -174,9 +174,9 @@
       incurred by, or claims asserted against, such Contributor by reason
       of your accepting any such warranty or additional liability.
 
-   END OF TERMS AND CONDITIONS
+    END OF TERMS AND CONDITIONS
 
-   APPENDIX: How to apply the Apache License to your work.
+    APPENDIX: How to apply the Apache License to your work.
 
       To apply the Apache License to your work, attach the following
       boilerplate notice, with the fields enclosed by brackets "[]"
@@ -187,16 +187,49 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright [yyyy] [name of copyright owner]
+    Copyright [yyyy] [name of copyright owner]
 
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
 
        http://www.apache.org/licenses/LICENSE-2.0
 
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+----------
+The following license is exclusively used by the template generated header files.
+
+BSD 3-Clause License
+
+Copyright (c) 2019, "WebGPU native" developers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/OWNERS b/OWNERS
index 18239af..a7cef5e 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,3 +1,16 @@
+cwallez@chromium.org
+enga@chromium.org
+jiawei.shao@intel.com
+
+# Backup reviewers if needed.
+bclayton@google.com
+kainino@chromium.org
+
+per-file dawn.json=kainino@chromium.org
+per-file DEPS=*
+per-file README.md=file://docs/dawn/OWNERS
+
+# Tint specific OWNERS
 amaiorano@google.com
 bclayton@chromium.org
 bclayton@google.com
diff --git a/OWNERS.dawn b/OWNERS.dawn
new file mode 100644
index 0000000..1856f30
--- /dev/null
+++ b/OWNERS.dawn
@@ -0,0 +1,11 @@
+cwallez@chromium.org
+enga@chromium.org
+jiawei.shao@intel.com
+
+# Backup reviewers if needed.
+bclayton@google.com
+kainino@chromium.org
+
+per-file dawn.json=kainino@chromium.org
+per-file DEPS=*
+per-file README.md=file://docs/dawn/OWNERS
diff --git a/OWNERS.tint b/OWNERS.tint
new file mode 100644
index 0000000..18239af
--- /dev/null
+++ b/OWNERS.tint
@@ -0,0 +1,7 @@
+amaiorano@google.com
+bclayton@chromium.org
+bclayton@google.com
+cwallez@chromium.org
+dneto@google.com
+jrprice@google.com
+rharrison@chromium.org
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
old mode 100755
new mode 100644
index 97623c1..968a27c
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors
+# Copyright 2022 The Dawn & Tint Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -11,42 +11,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-"""Presubmit script for Tint.
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into depot_tools.
-"""
 
 import re
 
 USE_PYTHON3 = True
 
-
-def _LicenseHeader(input_api):
-    """Returns the license header regexp."""
-    # Accept any year number from 2019 to the current year
-    current_year = int(input_api.time.strftime('%Y'))
-    allowed_years = (str(s) for s in reversed(xrange(2019, current_year + 1)))
-    years_re = '(' + '|'.join(allowed_years) + ')'
-    license_header = (
-        r'.*? Copyright( \(c\))? %(year)s The Tint [Aa]uthors\n '
-        r'.*?\n'
-        r'.*? Licensed under the Apache License, Version 2.0 (the "License");\n'
-        r'.*? you may not use this file except in compliance with the License.\n'
-        r'.*? You may obtain a copy of the License at\n'
-        r'.*?\n'
-        r'.*?     http://www.apache.org/licenses/LICENSE-2.0\n'
-        r'.*?\n'
-        r'.*? Unless required by applicable law or agreed to in writing, software\n'
-        r'.*? distributed under the License is distributed on an "AS IS" BASIS,\n'
-        r'.*? WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
-        r'.*? See the License for the specific language governing permissions and\n'
-        r'.*? limitations under the License.\n') % {
-            'year': years_re,
-        }
-    return license_header
-
-
-REGEXES = [
+NONINCLUSIVE_REGEXES = [
     r"(?i)black[-_]?list",
     r"(?i)white[-_]?list",
     r"(?i)gr[ea]y[-_]?list",
@@ -97,18 +67,19 @@
     r"(?i)red[-_]?line",
 ]
 
-REGEX_LIST = []
-for reg in REGEXES:
-    REGEX_LIST.append(re.compile(reg))
+NONINCLUSIVE_REGEX_LIST = []
+for reg in NONINCLUSIVE_REGEXES:
+    NONINCLUSIVE_REGEX_LIST.append(re.compile(reg))
 
-def CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
+
+def _CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
     """Checks the files for non-inclusive language."""
 
     matches = []
     for f in input_api.AffectedFiles(include_deletes=False,
                                      file_filter=source_file_filter):
         for line_num, line in f.ChangedContents():
-            for reg in REGEX_LIST:
+            for reg in NONINCLUSIVE_REGEX_LIST:
                 match = reg.search(line)
                 if match:
                     matches.append(
@@ -124,44 +95,53 @@
     return []
 
 
-def CheckChange(input_api, output_api):
+def _NonInclusiveFileFilter(file):
+    filter_list = [
+        "PRESUBMIT.py",  # Non-inclusive language check data
+        "docs/tint/spirv-input-output-variables.md",  # External URL
+        "test/tint/samples/compute_boids.wgsl ",  # External URL
+    ]
+    return file in filter_list
+
+
+def _DoCommonChecks(input_api, output_api):
     results = []
-
-    results += input_api.canned_checks.CheckChangeHasDescription(
-        input_api, output_api)
-    results += input_api.canned_checks.CheckPatchFormatted(input_api,
-                                                           output_api,
-                                                           check_python=True)
-    results += input_api.canned_checks.CheckGNFormatted(input_api, output_api)
-    results += input_api.canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol(
-        input_api, output_api)
-    results += input_api.canned_checks.CheckChangeHasNoTabs(
-        input_api, output_api)
-    results += input_api.canned_checks.CheckChangeTodoHasOwner(
-        input_api, output_api)
-    results += input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
-        input_api, output_api)
-    results += input_api.canned_checks.CheckDoNotSubmit(input_api, output_api)
-    results += input_api.canned_checks.CheckChangeLintsClean(input_api,
-                                                             output_api,
-                                                             lint_filters="")
-
-    def NonInclusiveFileFilter(file):
-        filter_list = [
-            "docs/tint/spirv-input-output-variables.md",  # External URL
-            "test/tint/samples/compute_boids.wgsl ",  # External URL
-        ]
-        return file in filter_list
-
-    results += CheckNonInclusiveLanguage(input_api, output_api,
-                                         NonInclusiveFileFilter)
-
+    results.extend(
+        input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckPatchFormatted(input_api,
+                                                    output_api,
+                                                    check_python=True))
+    results.extend(
+        input_api.canned_checks.CheckChangeHasDescription(
+            input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckGNFormatted(input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol(
+            input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckChangeHasNoTabs(input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckChangeTodoHasOwner(input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
+            input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckDoNotSubmit(input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckChangeLintsClean(input_api,
+                                                      output_api,
+                                                      lint_filters=""))
+    results.extend(
+        _CheckNonInclusiveLanguage(input_api, output_api,
+                                   _NonInclusiveFileFilter))
     return results
 
 
 def CheckChangeOnUpload(input_api, output_api):
-    return CheckChange(input_api, output_api)
+    return _DoCommonChecks(input_api, output_api)
 
 
 def CheckChangeOnCommit(input_api, output_api):
-    return CheckChange(input_api, output_api)
+    return _DoCommonChecks(input_api, output_api)
diff --git a/PRESUBMIT.py.dawn b/PRESUBMIT.py.dawn
new file mode 100644
index 0000000..899e0e2
--- /dev/null
+++ b/PRESUBMIT.py.dawn
@@ -0,0 +1,38 @@
+# Copyright 2018 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import platform
+import subprocess
+
+USE_PYTHON3 = True
+
+
+def _DoCommonChecks(input_api, output_api):
+    results = []
+    results.extend(
+        input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
+    results.extend(
+        input_api.canned_checks.CheckPatchFormatted(input_api,
+                                                    output_api,
+                                                    check_python=True))
+    return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+    return _DoCommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+    return _DoCommonChecks(input_api, output_api)
diff --git a/PRESUBMIT.py.tint b/PRESUBMIT.py.tint
new file mode 100755
index 0000000..97623c1
--- /dev/null
+++ b/PRESUBMIT.py.tint
@@ -0,0 +1,167 @@
+# Copyright 2020 The Tint Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Presubmit script for Tint.
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into depot_tools.
+"""
+
+import re
+
+USE_PYTHON3 = True
+
+
+def _LicenseHeader(input_api):
+    """Returns the license header regexp."""
+    # Accept any year number from 2019 to the current year
+    current_year = int(input_api.time.strftime('%Y'))
+    allowed_years = (str(s) for s in reversed(xrange(2019, current_year + 1)))
+    years_re = '(' + '|'.join(allowed_years) + ')'
+    license_header = (
+        r'.*? Copyright( \(c\))? %(year)s The Tint [Aa]uthors\n '
+        r'.*?\n'
+        r'.*? Licensed under the Apache License, Version 2.0 (the "License");\n'
+        r'.*? you may not use this file except in compliance with the License.\n'
+        r'.*? You may obtain a copy of the License at\n'
+        r'.*?\n'
+        r'.*?     http://www.apache.org/licenses/LICENSE-2.0\n'
+        r'.*?\n'
+        r'.*? Unless required by applicable law or agreed to in writing, software\n'
+        r'.*? distributed under the License is distributed on an "AS IS" BASIS,\n'
+        r'.*? WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
+        r'.*? See the License for the specific language governing permissions and\n'
+        r'.*? limitations under the License.\n') % {
+            'year': years_re,
+        }
+    return license_header
+
+
+REGEXES = [
+    r"(?i)black[-_]?list",
+    r"(?i)white[-_]?list",
+    r"(?i)gr[ea]y[-_]?list",
+    r"(?i)(first class citizen)",
+    r"(?i)black[-_]?hat",
+    r"(?i)white[-_]?hat",
+    r"(?i)gr[ea]y[-_]?hat",
+    r"(?i)master",
+    r"(?i)slave",
+    r"(?i)\bhim\b",
+    r"(?i)\bhis\b",
+    r"(?i)\bshe\b",
+    r"(?i)\bher\b",
+    r"(?i)\bguys\b",
+    r"(?i)\bhers\b",
+    r"(?i)\bman\b",
+    r"(?i)\bwoman\b",
+    r"(?i)\she\s",
+    r"(?i)\she$",
+    r"(?i)^he\s",
+    r"(?i)^he$",
+    r"(?i)\she['|\u2019]d\s",
+    r"(?i)\she['|\u2019]d$",
+    r"(?i)^he['|\u2019]d\s",
+    r"(?i)^he['|\u2019]d$",
+    r"(?i)\she['|\u2019]s\s",
+    r"(?i)\she['|\u2019]s$",
+    r"(?i)^he['|\u2019]s\s",
+    r"(?i)^he['|\u2019]s$",
+    r"(?i)\she['|\u2019]ll\s",
+    r"(?i)\she['|\u2019]ll$",
+    r"(?i)^he['|\u2019]ll\s",
+    r"(?i)^he['|\u2019]ll$",
+    r"(?i)grandfather",
+    r"(?i)\bmitm\b",
+    r"(?i)\bcrazy\b",
+    r"(?i)\binsane\b",
+    r"(?i)\bblind\sto\b",
+    r"(?i)\bflying\sblind\b",
+    r"(?i)\bblind\seye\b",
+    r"(?i)\bcripple\b",
+    r"(?i)\bcrippled\b",
+    r"(?i)\bdumb\b",
+    r"(?i)\bdummy\b",
+    r"(?i)\bparanoid\b",
+    r"(?i)\bsane\b",
+    r"(?i)\bsanity\b",
+    r"(?i)red[-_]?line",
+]
+
+REGEX_LIST = []
+for reg in REGEXES:
+    REGEX_LIST.append(re.compile(reg))
+
+def CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
+    """Checks the files for non-inclusive language."""
+
+    matches = []
+    for f in input_api.AffectedFiles(include_deletes=False,
+                                     file_filter=source_file_filter):
+        for line_num, line in f.ChangedContents():
+            for reg in REGEX_LIST:
+                match = reg.search(line)
+                if match:
+                    matches.append(
+                        "{} ({}): found non-inclusive language: {}".format(
+                            f.LocalPath(), line_num, match.group(0)))
+
+    if len(matches):
+        return [
+            output_api.PresubmitPromptWarning('Non-inclusive language found:',
+                                              items=matches)
+        ]
+
+    return []
+
+
+def CheckChange(input_api, output_api):
+    results = []
+
+    results += input_api.canned_checks.CheckChangeHasDescription(
+        input_api, output_api)
+    results += input_api.canned_checks.CheckPatchFormatted(input_api,
+                                                           output_api,
+                                                           check_python=True)
+    results += input_api.canned_checks.CheckGNFormatted(input_api, output_api)
+    results += input_api.canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol(
+        input_api, output_api)
+    results += input_api.canned_checks.CheckChangeHasNoTabs(
+        input_api, output_api)
+    results += input_api.canned_checks.CheckChangeTodoHasOwner(
+        input_api, output_api)
+    results += input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
+        input_api, output_api)
+    results += input_api.canned_checks.CheckDoNotSubmit(input_api, output_api)
+    results += input_api.canned_checks.CheckChangeLintsClean(input_api,
+                                                             output_api,
+                                                             lint_filters="")
+
+    def NonInclusiveFileFilter(file):
+        filter_list = [
+            "docs/tint/spirv-input-output-variables.md",  # External URL
+            "test/tint/samples/compute_boids.wgsl ",  # External URL
+        ]
+        return file in filter_list
+
+    results += CheckNonInclusiveLanguage(input_api, output_api,
+                                         NonInclusiveFileFilter)
+
+    return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+    return CheckChange(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+    return CheckChange(input_api, output_api)
diff --git a/README.chromium b/README.chromium
new file mode 100644
index 0000000..e14cc8b
--- /dev/null
+++ b/README.chromium
@@ -0,0 +1,12 @@
+Name: Dawn
+Short Name: dawn
+URL: https://dawn.googlesource.com/dawn
+License: Apache 2.0
+License File: LICENSE
+Security Critical: yes
+
+Description:
+Dawn is an implementation of the WebGPU standard exposed through a C/C++
+interface. It provides implementations on top of native graphics APIs like
+D3D12, Metal and Vulkan, as well as a client-server implementation to remote
+WebGPU outside sandboxed context like Chromium's render processes.
diff --git a/README.md b/README.md
index fbe6cfb..a430410 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,56 @@
+![Dawn's logo: a sun rising behind a stylized mountain inspired by the WebGPU logo. The text "Dawn" is written below it.](docs/imgs/dawn_logo.png "Dawn's logo")
+
+# Dawn, a WebGPU implementation
+
+Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
+More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
+Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
+
+Dawn provides several WebGPU building blocks:
+ - **WebGPU C/C++ headers** that applications and other building blocks use.
+   - The `webgpu.h` version that Dawn implements.
+   - A C++ wrapper for the `webgpu.h`.
+ - **A "native" implementation of WebGPU** using platforms' GPU APIs:
+   - **D3D12** on Windows 10
+   - **Metal** on macOS and iOS
+   - **Vulkan** on Windows, Linux, ChromeOS, Android and Fuchsia
+   - OpenGL as best effort where available
+ - **A client-server implementation of WebGPU** for applications that are in a sandbox without access to native drivers
+
+Helpful links:
+
+ - [Dawn's bug tracker](https://bugs.chromium.org/p/dawn/issues/entry) if you find issues with Dawn.
+ - [Dawn's mailing list](https://groups.google.com/forum/#!members/dawn-graphics) for other discussions related to Dawn.
+ - [Dawn's source code](https://dawn.googlesource.com/dawn)
+ - [Dawn's Matrix chatroom](https://matrix.to/#/#webgpu-dawn:matrix.org) for live discussion around contributing or using Dawn.
+ - [WebGPU's Matrix chatroom](https://matrix.to/#/#WebGPU:matrix.org)
+
+## Documentation table of content
+
+Developer documentation:
+
+ - [Dawn overview](docs/dawn/overview.md)
+ - [Building Dawn](docs/dawn/building.md)
+ - [Contributing to Dawn](docs/dawn/contributing.md)
+ - [Testing Dawn](docs/dawn/testing.md)
+ - [Debugging Dawn](docs/dawn/debugging.md)
+ - [Dawn's infrastructure](docs/dawn/infra.md)
+ - [Dawn errors](docs/dawn/errors.md)
+
+User documentation: (TODO, figure out what overlaps with the webgpu.h docs)
+
+## Status
+
+(TODO)
+
+## License
+
+Apache 2.0 Public License, please see [LICENSE](/LICENSE).
+
+## Disclaimer
+
+This is not an officially supported Google product.
+
 # Tint
 
 Tint is a compiler for the WebGPU Shader Language (WGSL).
diff --git a/README.md.dawn b/README.md.dawn
new file mode 100644
index 0000000..1871388
--- /dev/null
+++ b/README.md.dawn
@@ -0,0 +1,52 @@
+![Dawn's logo: a sun rising behind a stylized mountain inspired by the WebGPU logo. The text "Dawn" is written below it.](docs/imgs/dawn_logo.png "Dawn's logo")
+
+# Dawn, a WebGPU implementation
+
+Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
+More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
+Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
+
+Dawn provides several WebGPU building blocks:
+ - **WebGPU C/C++ headers** that applications and other building blocks use.
+   - The `webgpu.h` version that Dawn implements.
+   - A C++ wrapper for the `webgpu.h`.
+ - **A "native" implementation of WebGPU** using platforms' GPU APIs:
+   - **D3D12** on Windows 10
+   - **Metal** on macOS and iOS
+   - **Vulkan** on Windows, Linux, ChromeOS, Android and Fuchsia
+   - OpenGL as best effort where available
+ - **A client-server implementation of WebGPU** for applications that are in a sandbox without access to native drivers
+
+Helpful links:
+
+ - [Dawn's bug tracker](https://bugs.chromium.org/p/dawn/issues/entry) if you find issues with Dawn.
+ - [Dawn's mailing list](https://groups.google.com/forum/#!members/dawn-graphics) for other discussions related to Dawn.
+ - [Dawn's source code](https://dawn.googlesource.com/dawn)
+ - [Dawn's Matrix chatroom](https://matrix.to/#/#webgpu-dawn:matrix.org) for live discussion around contributing or using Dawn.
+ - [WebGPU's Matrix chatroom](https://matrix.to/#/#WebGPU:matrix.org)
+
+## Documentation table of content
+
+Developer documentation:
+
+ - [Dawn overview](docs/dawn/overview.md)
+ - [Building Dawn](docs/dawn/building.md)
+ - [Contributing to Dawn](docs/dawn/contributing.md)
+ - [Testing Dawn](docs/dawn/testing.md)
+ - [Debugging Dawn](docs/dawn/debugging.md)
+ - [Dawn's infrastructure](docs/dawn/infra.md)
+ - [Dawn errors](docs/dawn/errors.md)
+
+User documentation: (TODO, figure out what overlaps with the webgpu.h docs)
+
+## Status
+
+(TODO)
+
+## License
+
+Apache 2.0 Public License, please see [LICENSE](/LICENSE).
+
+## Disclaimer
+
+This is not an officially supported Google product.
diff --git a/README.md.tint b/README.md.tint
new file mode 100644
index 0000000..fbe6cfb
--- /dev/null
+++ b/README.md.tint
@@ -0,0 +1,106 @@
+# Tint
+
+Tint is a compiler for the WebGPU Shader Language (WGSL).
+
+This is not an officially supported Google product.
+
+## Requirements
+ * Git
+ * CMake (3.10.2 or later)
+ * Ninja (or other build tool)
+ * Python, for fetching dependencies
+ * [depot_tools] in your path
+
+## Build options
+ * `TINT_BUILD_SPV_READER` : enable the SPIR-V input reader (off by default)
+ * `TINT_BUILD_WGSL_READER` : enable the WGSL input reader (on by default)
+ * `TINT_BUILD_SPV_WRITER` : enable the SPIR-V output writer (on by default)
+ * `TINT_BUILD_WGSL_WRITER` : enable the WGSL output writer (on by default)
+ * `TINT_BUILD_FUZZERS` : enable building fuzzzers (off by default)
+
+## Building
+Tint uses Chromium dependency management so you need to install [depot_tools]
+and add it to your PATH.
+
+[depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
+
+### Getting source & dependencies
+
+```sh
+# Clone the repo as "tint"
+git clone https://dawn.googlesource.com/tint tint
+cd tint
+
+# Bootstrap the gclient configuration
+cp standalone.gclient .gclient
+
+# Fetch external dependencies and toolchains with gclient
+gclient sync
+```
+
+### Compiling using CMake + Ninja
+```sh
+mkdir -p out/Debug
+cd out/Debug
+cmake -GNinja ../..
+ninja # or autoninja
+```
+
+### Compiling using CMake + make
+```sh
+mkdir -p out/Debug
+cd out/Debug
+cmake ../..
+make # -j N for N-way parallel build
+```
+
+### Compiling using gn + ninja
+```sh
+mkdir -p out/Debug
+gn gen out/Debug
+autoninja -C out/Debug
+```
+
+### Fuzzers on MacOS
+If you are attempting fuzz, using `TINT_BUILD_FUZZERS=ON`, the version of llvm
+in the XCode SDK does not have the needed libfuzzer functionality included.
+
+The build error that you will see from using the XCode SDK will look something
+like this:
+```
+ld: file not found:/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/11.0.0/lib/darwin/libclang_rt.fuzzer_osx.a
+```
+
+The solution to this problem is to use a full version llvm, like what you would
+get via homebrew, `brew install llvm`, and use something like `CC=<path to full
+clang> cmake ..` to setup a build using that toolchain.
+
+### Checking [chromium-style] issues in CMake builds
+The gn based work flow uses the Chromium toolchain for building in anticipation
+of integration of Tint into Chromium based projects. This toolchain has
+additional plugins for checking for style issues, which are marked with
+[chromium-style] in log messages. This means that this toolchain is more strict
+then the default clang toolchain.
+
+In the future we will have a CQ that will build this work flow and flag issues
+automatically. Until that is in place, to avoid causing breakages you can run
+the [chromium-style] checks using the CMake based work flows. This requires
+setting `CC` to the version of clang checked out by `gclient sync` and setting
+the `TINT_CHECK_CHROMIUM_STYLE` to `ON`.
+
+```sh
+mkdir -p out/style
+cd out/style
+cmake ../..
+CC=../../third_party/llvm-build/Release+Asserts/bin/clang cmake -DTINT_CHECK_CHROMIUM_STYLE=ON ../../ # add -GNinja for ninja builds
+```
+
+## Issues
+Please file any issues or feature requests at
+https://bugs.chromium.org/p/tint/issues/entry
+
+## Contributing
+Please see the CONTRIBUTING and CODE_OF_CONDUCT files on how to contribute to
+Tint.
+
+Tint has a process for supporting [experimental extensions](docs/tint/experimental_extensions.md).
diff --git a/build_overrides/angle.gni b/build_overrides/angle.gni
new file mode 100644
index 0000000..85d4d95
--- /dev/null
+++ b/build_overrides/angle.gni
@@ -0,0 +1,26 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Override for angle_root
+angle_root = "//third_party/angle"
+
+# True if ANGLE can access build/, testing/ and other Chrome folders.
+angle_has_build = true
+
+# Paths to ANGLE dependencies in Dawn
+angle_glslang_dir = "//third_party/vulkan-deps/glslang/src"
+angle_spirv_cross_dir = "//third_party/vulkan-deps/spirv-cross/src"
+angle_spirv_headers_dir = "//third_party/vulkan-deps/spirv-headers/src"
+angle_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
+angle_vulkan_memory_allocator_dir = "//third_party/vulkan_memory_allocator"
diff --git a/build_overrides/build.gni b/build_overrides/build.gni
index 11ce9f4..8717867 100644
--- a/build_overrides/build.gni
+++ b/build_overrides/build.gni
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors.
+# Copyright 2022 The Dawn Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,10 +13,10 @@
 # limitations under the License.
 
 declare_args() {
-  # Tell Tint and dependencies to not do Chromium-specific things
+  # Tell Dawn and dependencies to not do Chromium-specific things
   build_with_chromium = false
 
-  # In standalone Tint builds, don't try to use the hermetic install of Xcode
+  # In standalone Dawn builds, don't try to use the hermetic install of Xcode
   # that Chromium uses
   use_system_xcode = ""
 
diff --git a/build_overrides/dawn.gni b/build_overrides/dawn.gni
new file mode 100644
index 0000000..87e1ded
--- /dev/null
+++ b/build_overrides/dawn.gni
@@ -0,0 +1,39 @@
+# Copyright 2018 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# These are variables that are overridable by projects that include Dawn.
+# The values in this file are the defaults for when we are building from
+# Dawn's repository.
+
+# Whether we are building from Dawn's repository.
+# MUST be unset in other projects (will default to false).
+dawn_standalone = true
+
+# True if Dawn can access build/, testing/ and other Chrome folders.
+dawn_has_build = true
+
+# Defaults for these are set again in dawn_overrides_with_defaults.gni so that
+# users of Dawn don't have to set dirs if they happen to use the same as Dawn.
+
+# The paths to Dawn's dependencies
+dawn_abseil_dir = "//third_party/abseil-cpp"
+dawn_angle_dir = "//third_party/angle"
+dawn_jinja2_dir = "//third_party/jinja2"
+dawn_glfw_dir = "//third_party/glfw"
+dawn_googletest_dir = "//third_party/googletest"
+dawn_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
+dawn_swiftshader_dir = "//third_party/swiftshader"
+dawn_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
+dawn_vulkan_validation_layers_dir =
+    "//third_party/vulkan-deps/vulkan-validation-layers/src"
diff --git a/build_overrides/glslang.gni b/build_overrides/glslang.gni
index 80a30c9..69d968d 100644
--- a/build_overrides/glslang.gni
+++ b/build_overrides/glslang.gni
@@ -1,4 +1,4 @@
-# Copyright 2021 The Dawn Authors
+# Copyright 2018 The Dawn Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/build_overrides/spirv_tools.gni b/build_overrides/spirv_tools.gni
index 13bffc5..48e7b11 100644
--- a/build_overrides/spirv_tools.gni
+++ b/build_overrides/spirv_tools.gni
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors
+# Copyright 2018 The Dawn Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# We are building inside Tint
+# We are building inside Dawn
 spirv_tools_standalone = false
 
-# Paths to SPIRV-Tools dependencies in Tint
+# Paths to SPIRV-Tools dependencies in Dawn
 spirv_tools_googletest_dir = "//third_party/googletest"
 spirv_tools_spirv_headers_dir = "//third_party/vulkan-deps/spirv-headers/src"
diff --git a/build_overrides/swiftshader.gni b/build_overrides/swiftshader.gni
new file mode 100644
index 0000000..dc3579b
--- /dev/null
+++ b/build_overrides/swiftshader.gni
@@ -0,0 +1,23 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We are building SwiftShader inside Dawn
+swiftshader_standalone = false
+
+# Path to SwiftShader
+swiftshader_dir = "//third_party/swiftshader"
+
+# Forward to ozone_platform_x11 when inside Dawn's repository
+import("../scripts/dawn_features.gni")
+ozone_platform_x11 = dawn_use_x11
diff --git a/build_overrides/tint.gni b/build_overrides/tint.gni
index fdcc866..8349998 100644
--- a/build_overrides/tint.gni
+++ b/build_overrides/tint.gni
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors.
+# Copyright 2020 The Dawn Authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,4 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This file contains Tint-related overrides.
+tint_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
+tint_spirv_headers_dir = "//third_party/vulkan-deps/spirv-headers/src"
+
+tint_build_spv_reader = true
+tint_build_spv_writer = true
+tint_build_wgsl_reader = true
+tint_build_wgsl_writer = true
diff --git a/build_overrides/vulkan_common.gni b/build_overrides/vulkan_common.gni
new file mode 100644
index 0000000..9a883e7
--- /dev/null
+++ b/build_overrides/vulkan_common.gni
@@ -0,0 +1,19 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
+
+# Subdirectories for generated files
+vulkan_data_subdir = "vulkandata"
+vulkan_gen_subdir = ""
diff --git a/build_overrides/vulkan_headers.gni b/build_overrides/vulkan_headers.gni
new file mode 100644
index 0000000..4c0047a
--- /dev/null
+++ b/build_overrides/vulkan_headers.gni
@@ -0,0 +1,17 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Fake the vulkan_use_x11 when inside Dawn's repository
+import("../scripts/dawn_features.gni")
+vulkan_use_x11 = dawn_use_x11
diff --git a/build_overrides/vulkan_loader.gni b/build_overrides/vulkan_loader.gni
new file mode 100644
index 0000000..6f6eaf0
--- /dev/null
+++ b/build_overrides/vulkan_loader.gni
@@ -0,0 +1,17 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build_overrides/vulkan_common.gni")
+
+vulkan_loader_shared = !is_mac
diff --git a/build_overrides/vulkan_tools.gni b/build_overrides/vulkan_tools.gni
new file mode 100644
index 0000000..7bd4d99
--- /dev/null
+++ b/build_overrides/vulkan_tools.gni
@@ -0,0 +1,15 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build_overrides/vulkan_common.gni")
diff --git a/build_overrides/vulkan_validation_layers.gni b/build_overrides/vulkan_validation_layers.gni
new file mode 100644
index 0000000..9463e66
--- /dev/null
+++ b/build_overrides/vulkan_validation_layers.gni
@@ -0,0 +1,25 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build_overrides/vulkan_common.gni")
+
+# These are variables that are overridable by projects that include Dawn.
+# The values in this file are the defaults for when we are building from
+# Dawn's repository.
+vvl_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
+vvl_glslang_dir = "//third_party/vulkan-deps/glslang/src"
+
+# Forward to ozone_platform_x11 when inside Dawn's repository
+import("../scripts/dawn_features.gni")
+ozone_platform_x11 = dawn_use_x11
diff --git a/codereview.settings b/codereview.settings
new file mode 100644
index 0000000..10cc2bf
--- /dev/null
+++ b/codereview.settings
@@ -0,0 +1,5 @@
+# This file is used by git cl to get repository specific information.
+GERRIT_HOST: True
+CODE_REVIEW_SERVER: https://dawn-review.googlesource.com
+GERRIT_SQUASH_UPLOADS: False
+TRYSERVER_GERRIT_URL: https://dawn-review.googlesource.com
diff --git a/dawn.json b/dawn.json
new file mode 100644
index 0000000..3578858
--- /dev/null
+++ b/dawn.json
@@ -0,0 +1,2848 @@
+{
+    "_comment": [
+        "Copyright 2017 The Dawn Authors",
+        "",
+        "Licensed under the Apache License, Version 2.0 (the \"License\");",
+        "you may not use this file except in compliance with the License.",
+        "You may obtain a copy of the License at",
+        "",
+        "    http://www.apache.org/licenses/LICENSE-2.0",
+        "",
+        "Unless required by applicable law or agreed to in writing, software",
+        "distributed under the License is distributed on an \"AS IS\" BASIS,",
+        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
+        "See the License for the specific language governing permissions and",
+        "limitations under the License."
+    ],
+
+    "_doc": "See docs/dawn/codegen.md",
+
+    "_metadata": {
+        "api": "WebGPU",
+        "c_prefix": "WGPU",
+        "namespace": "wgpu",
+        "proc_table_prefix": "Dawn",
+        "native_namespace": "dawn native",
+        "copyright_year": "2019"
+    },
+
+    "create instance": {
+        "category": "function",
+        "returns": "instance",
+        "args": [
+            {"name": "descriptor", "type": "instance descriptor", "annotation": "const*", "optional": true}
+        ]
+    },
+    "proc": {
+        "category": "function pointer",
+        "returns": "void",
+        "args": []
+    },
+    "get proc address": {
+        "category": "function",
+        "returns": "proc",
+        "args": [
+            {"name": "device", "type": "device"},
+            {"name": "proc name", "type": "char", "annotation": "const*"}
+        ]
+    },
+
+    "request adapter options": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "compatible surface", "type": "surface", "optional": true},
+            {"name": "power preference", "type": "power preference", "default": "undefined"},
+            {"name": "force fallback adapter", "type": "bool", "default": "false"}
+        ]
+    },
+    "request adapter status": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "success"},
+            {"value": 1, "name": "unavailable"},
+            {"value": 2, "name": "error"},
+            {"value": 3, "name": "unknown"}
+        ]
+    },
+    "request adapter callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "status", "type": "request adapter status"},
+            {"name": "adapter", "type": "adapter"},
+            {"name": "message", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "adapter": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "get limits",
+                "returns": "bool",
+                "args": [
+                    {"name": "limits", "type": "supported limits", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "get properties",
+                "args": [
+                    {"name": "properties", "type": "adapter properties", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "has feature",
+                "returns": "bool",
+                "args": [
+                    {"name": "feature", "type": "feature name"}
+                ]
+            },
+            {
+                "name": "enumerate features",
+                "returns": "size_t",
+                "args": [
+                    {"name": "features", "type": "feature name", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "request device",
+                "args": [
+                    {"name": "descriptor", "type": "device descriptor", "annotation": "const*"},
+                    {"name": "callback", "type": "request device callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "create device",
+                "tags": ["dawn"],
+                "returns": "device",
+                "args": [
+                    {"name": "descriptor", "type": "device descriptor", "annotation": "const*", "optional": "true"}
+                ]
+            }
+        ]
+    },
+    "adapter properties": {
+        "category": "structure",
+        "extensible": "out",
+        "members": [
+            {"name": "vendor ID", "type": "uint32_t"},
+            {"name": "device ID", "type": "uint32_t"},
+            {"name": "name", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "driver description", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "adapter type", "type": "adapter type"},
+            {"name": "backend type", "type": "backend type"}
+        ]
+    },
+    "adapter type": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "discrete GPU"},
+            {"value": 1, "name": "integrated GPU"},
+            {"value": 2, "name": "CPU"},
+            {"value": 3, "name": "unknown"}
+        ]
+    },
+    "device descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "required features count", "type": "uint32_t", "default": 0},
+            {"name": "required features", "type": "feature name", "annotation": "const*", "length": "required features count", "default": "nullptr"},
+            {"name": "required limits", "type": "required limits", "annotation": "const*", "optional": true},
+            {"name": "default queue", "type": "queue descriptor", "tags": ["upstream"]}
+        ]
+    },
+    "dawn toggles device descriptor": {
+        "tags": ["dawn", "native"],
+        "category": "structure",
+        "chained": "in",
+        "members": [
+            {"name": "force enabled toggles count", "type": "uint32_t", "default": 0},
+            {"name": "force enabled toggles", "type": "char", "annotation": "const*const*", "length": "force enabled toggles count"},
+            {"name": "force disabled toggles count", "type": "uint32_t", "default": 0},
+            {"name": "force disabled toggles", "type": "char", "annotation": "const*const*", "length": "force disabled toggles count"}
+        ]
+    },
+    "dawn cache device descriptor" : {
+        "tags": ["dawn", "native"],
+        "category": "structure",
+        "chained": "in",
+        "members": [
+            {"name": "isolation key", "type": "char", "annotation": "const*", "length": "strlen", "default": "\"\""}
+        ]
+    },
+    "address mode": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "repeat"},
+            {"value": 1, "name": "mirror repeat"},
+            {"value": 2, "name": "clamp to edge"}
+        ]
+    },
+    "backend type": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "null"},
+            {"value": 1, "name": "WebGPU"},
+            {"value": 2, "name": "D3D11"},
+            {"value": 3, "name": "D3D12"},
+            {"value": 4, "name": "metal"},
+            {"value": 5, "name": "vulkan"},
+            {"value": 6, "name": "openGL"},
+            {"value": 7, "name": "openGLES"}
+        ]
+    },
+    "bind group": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "bind group entry": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "binding", "type": "uint32_t"},
+            {"name": "buffer", "type": "buffer", "optional": true},
+            {"name": "offset", "type": "uint64_t", "default": "0"},
+            {"name": "size", "type": "uint64_t"},
+            {"name": "sampler", "type": "sampler", "optional": true},
+            {"name": "texture view", "type": "texture view", "optional": true}
+        ]
+    },
+    "bind group descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "layout", "type": "bind group layout"},
+            {"name": "entry count", "type": "uint32_t"},
+            {"name": "entries", "type": "bind group entry", "annotation": "const*", "length": "entry count"}
+        ]
+    },
+    "bind group layout": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+
+    "buffer binding type": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined", "valid": false},
+            {"value": 1, "name": "uniform"},
+            {"value": 2, "name": "storage"},
+            {"value": 3, "name": "read only storage"}
+        ]
+    },
+    "buffer binding layout": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "type", "type": "buffer binding type", "default": "undefined"},
+            {"name": "has dynamic offset", "type": "bool", "default": "false"},
+            {"name": "min binding size", "type": "uint64_t", "default": "0"}
+        ]
+    },
+
+    "sampler binding type": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined", "valid": false},
+            {"value": 1, "name": "filtering"},
+            {"value": 2, "name": "non filtering"},
+            {"value": 3, "name": "comparison"}
+        ]
+    },
+    "sampler binding layout": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "type", "type": "sampler binding type", "default": "undefined"}
+        ]
+    },
+
+    "texture sample type": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined", "valid": false},
+            {"value": 1, "name": "float"},
+            {"value": 2, "name": "unfilterable float"},
+            {"value": 3, "name": "depth"},
+            {"value": 4, "name": "sint"},
+            {"value": 5, "name": "uint"}
+        ]
+    },
+    "texture binding layout": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "sample type", "type": "texture sample type", "default": "undefined"},
+            {"name": "view dimension", "type": "texture view dimension", "default": "undefined"},
+            {"name": "multisampled", "type": "bool", "default": "false"}
+        ]
+    },
+
+    "external texture binding entry": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["dawn"],
+        "members": [
+            {"name": "external texture", "type": "external texture"}
+        ]
+    },
+
+    "external texture binding layout": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["dawn"],
+        "members": []
+    },
+
+    "storage texture access": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined", "valid": false},
+            {"value": 1, "name": "write only"}
+        ]
+    },
+    "storage texture binding layout": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "access", "type": "storage texture access", "default": "undefined"},
+            {"name": "format", "type": "texture format", "default": "undefined"},
+            {"name": "view dimension", "type": "texture view dimension", "default": "undefined"}
+        ]
+    },
+
+    "bind group layout entry": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "binding", "type": "uint32_t"},
+            {"name": "visibility", "type": "shader stage"},
+            {"name": "buffer", "type": "buffer binding layout"},
+            {"name": "sampler", "type": "sampler binding layout"},
+            {"name": "texture", "type": "texture binding layout"},
+            {"name": "storage texture", "type": "storage texture binding layout"}
+        ]
+    },
+    "bind group layout descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "entry count", "type": "uint32_t"},
+            {"name": "entries", "type": "bind group layout entry", "annotation": "const*", "length": "entry count"}
+        ]
+    },
+    "blend component": {
+        "category": "structure",
+        "extensible": false,
+        "members": [
+            {"name": "operation", "type": "blend operation", "default": "add"},
+            {"name": "src factor", "type": "blend factor", "default": "one"},
+            {"name": "dst factor", "type": "blend factor", "default": "zero"}
+        ]
+    },
+    "blend factor": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "zero"},
+            {"value": 1, "name": "one"},
+            {"value": 2, "name": "src"},
+            {"value": 3, "name": "one minus src"},
+            {"value": 4, "name": "src alpha"},
+            {"value": 5, "name": "one minus src alpha"},
+            {"value": 6, "name": "dst"},
+            {"value": 7, "name": "one minus dst"},
+            {"value": 8, "name": "dst alpha"},
+            {"value": 9, "name": "one minus dst alpha"},
+            {"value": 10, "name": "src alpha saturated"},
+            {"value": 11, "name": "constant"},
+            {"value": 12, "name": "one minus constant"}
+        ]
+    },
+    "blend operation": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "add"},
+            {"value": 1, "name": "subtract"},
+            {"value": 2, "name": "reverse subtract"},
+            {"value": 3, "name": "min"},
+            {"value": 4, "name": "max"}
+        ]
+    },
+    "bool": {
+        "category": "native"
+    },
+    "buffer": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "map async",
+                "args": [
+                    {"name": "mode", "type": "map mode"},
+                    {"name": "offset", "type": "size_t"},
+                    {"name": "size", "type": "size_t"},
+                    {"name": "callback", "type": "buffer map callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "get mapped range",
+                "returns": "void *",
+                "args": [
+                    {"name": "offset", "type": "size_t", "default": 0},
+                    {"name": "size", "type": "size_t", "default": 0}
+                ]
+            },
+            {
+                "name": "get const mapped range",
+                "returns": "void const *",
+                "args": [
+                    {"name": "offset", "type": "size_t", "default": 0},
+                    {"name": "size", "type": "size_t", "default": 0}
+                ]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "unmap"
+            },
+            {
+                "name": "destroy"
+            }
+        ]
+    },
+    "buffer descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "usage", "type": "buffer usage"},
+            {"name": "size", "type": "uint64_t"},
+            {"name": "mapped at creation", "type": "bool", "default": "false"}
+        ]
+    },
+    "buffer map callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "status", "type": "buffer map async status"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "buffer map async status": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "success"},
+            {"value": 1, "name": "error"},
+            {"value": 2, "name": "unknown"},
+            {"value": 3, "name": "device lost"},
+            {"value": 4, "name": "destroyed before callback"},
+            {"value": 5, "name": "unmapped before callback"}
+        ]
+    },
+    "buffer usage": {
+        "category": "bitmask",
+        "values": [
+            {"value": 0, "name": "none"},
+            {"value": 1, "name": "map read"},
+            {"value": 2, "name": "map write"},
+            {"value": 4, "name": "copy src"},
+            {"value": 8, "name": "copy dst"},
+            {"value": 16, "name": "index"},
+            {"value": 32, "name": "vertex"},
+            {"value": 64, "name": "uniform"},
+            {"value": 128, "name": "storage"},
+            {"value": 256, "name": "indirect"},
+            {"value": 512, "name": "query resolve"}
+        ]
+    },
+    "char": {
+        "category": "native"
+    },
+    "color": {
+        "category": "structure",
+        "members": [
+            {"name": "r", "type": "double"},
+            {"name": "g", "type": "double"},
+            {"name": "b", "type": "double"},
+            {"name": "a", "type": "double"}
+        ]
+    },
+    "color write mask": {
+        "category": "bitmask",
+        "values": [
+            {"value": 0, "name": "none"},
+            {"value": 1, "name": "red"},
+            {"value": 2, "name": "green"},
+            {"value": 4, "name": "blue"},
+            {"value": 8, "name": "alpha"},
+            {"value": 15, "name": "all"}
+        ]
+    },
+    "constant entry": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "key", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "value", "type": "double"}
+        ]
+    },
+    "command buffer": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "command buffer descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true}
+        ]
+    },
+    "command encoder": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "finish",
+                "returns": "command buffer",
+                "args": [
+                    {"name": "descriptor", "type": "command buffer descriptor", "annotation": "const*", "optional": true}
+                ]
+            },
+            {
+                "name": "begin compute pass",
+                "returns": "compute pass encoder",
+                "args": [
+                    {"name": "descriptor", "type": "compute pass descriptor", "annotation": "const*", "optional": true}
+                ]
+            },
+            {
+                "name": "begin render pass",
+                "returns": "render pass encoder",
+                "args": [
+                    {"name": "descriptor", "type": "render pass descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "copy buffer to buffer",
+                "args": [
+                    {"name": "source", "type": "buffer"},
+                    {"name": "source offset", "type": "uint64_t"},
+                    {"name": "destination", "type": "buffer"},
+                    {"name": "destination offset", "type": "uint64_t"},
+                    {"name": "size", "type": "uint64_t"}
+                ]
+            },
+            {
+                "name": "copy buffer to texture",
+                "args": [
+                    {"name": "source", "type": "image copy buffer", "annotation": "const*"},
+                    {"name": "destination", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "copy size", "type": "extent 3D", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "copy texture to buffer",
+                "args": [
+                    {"name": "source", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "destination", "type": "image copy buffer", "annotation": "const*"},
+                    {"name": "copy size", "type": "extent 3D", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "copy texture to texture",
+                "args": [
+                    {"name": "source", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "destination", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "copy size", "type": "extent 3D", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "copy texture to texture internal",
+                "tags": ["dawn"],
+                "args": [
+                    {"name": "source", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "destination", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "copy size", "type": "extent 3D", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "clear buffer",
+                "args": [
+                    {"name": "buffer", "type": "buffer"},
+                    {"name": "offset", "type": "uint64_t", "default": 0},
+                    {"name": "size", "type": "uint64_t", "default": "WGPU_WHOLE_SIZE"}
+                ]
+            },
+            {
+                "name": "inject validation error",
+                "tags": ["dawn"],
+                "args": [
+                    {"name": "message", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "insert debug marker",
+                "args": [
+                    {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "pop debug group",
+                "args": []
+            },
+            {
+                "name": "push debug group",
+                "args": [
+                    {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "resolve query set",
+                "args": [
+                    {"name": "query set", "type": "query set"},
+                    {"name": "first query", "type": "uint32_t"},
+                    {"name": "query count", "type": "uint32_t"},
+                    {"name": "destination", "type": "buffer"},
+                    {"name": "destination offset", "type": "uint64_t"}
+                ]
+            },
+            {
+                "name": "write buffer",
+                "tags": ["dawn"],
+                "args": [
+                    {"name": "buffer", "type": "buffer"},
+                    {"name": "buffer offset", "type": "uint64_t"},
+                    {"name": "data", "type": "uint8_t", "annotation": "const*", "length": "size"},
+                    {"name": "size", "type": "uint64_t"}
+                ]
+            },
+            {
+                "name": "write timestamp",
+                "args": [
+                    {"name": "query set", "type": "query set"},
+                    {"name": "query index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "command encoder descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true}
+        ]
+    },
+    "compare function": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined", "valid": false},
+            {"value": 1, "name": "never"},
+            {"value": 2, "name": "less"},
+            {"value": 3, "name": "less equal"},
+            {"value": 4, "name": "greater"},
+            {"value": 5, "name": "greater equal"},
+            {"value": 6, "name": "equal"},
+            {"value": 7, "name": "not equal"},
+            {"value": 8, "name": "always"}
+        ]
+    },
+    "compilation info": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "message count", "type": "uint32_t"},
+            {"name": "messages", "type": "compilation message", "annotation": "const*", "length": "message count"}
+        ]
+    },
+    "compilation info callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "status", "type": "compilation info request status"},
+            {"name": "compilation info", "type": "compilation info", "annotation": "const*"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "compilation info request status": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "success"},
+            {"value": 1, "name": "error"},
+            {"value": 2, "name": "device lost"},
+            {"value": 3, "name": "unknown"}
+        ]
+    },
+    "compilation message": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "message", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "type", "type": "compilation message type"},
+            {"name": "line num", "type": "uint64_t"},
+            {"name": "line pos", "type": "uint64_t"},
+            {"name": "offset", "type": "uint64_t"},
+            {"name": "length", "type": "uint64_t"}
+        ]
+    },
+    "compilation message type": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "error"},
+            {"value": 1, "name": "warning"},
+            {"value": 2, "name": "info"}
+        ]
+    },
+    "compute pass descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "timestamp write count", "type": "uint32_t", "default": 0},
+            {"name": "timestamp writes", "type": "compute pass timestamp write", "annotation": "const*", "length": "timestamp write count"}
+        ]
+    },
+    "compute pass encoder": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "insert debug marker",
+                "args": [
+                    {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "pop debug group",
+                "args": []
+            },
+            {
+                "name": "push debug group",
+                "args": [
+                    {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "set pipeline",
+                "args": [
+                    {"name": "pipeline", "type": "compute pipeline"}
+                ]
+            },
+            {
+                "name": "set bind group",
+                "args": [
+                    {"name": "group index", "type": "uint32_t"},
+                    {"name": "group", "type": "bind group"},
+                    {"name": "dynamic offset count", "type": "uint32_t", "default": "0"},
+                    {"name": "dynamic offsets", "type": "uint32_t", "annotation": "const*", "length": "dynamic offset count", "default": "nullptr"}
+                ]
+            },
+            {
+                "name": "write timestamp",
+                "tags": ["emscripten", "dawn"],
+                "args": [
+                    {"name": "query set", "type": "query set"},
+                    {"name": "query index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "begin pipeline statistics query",
+                "tags": ["upstream", "emscripten"],
+                "args": [
+                    {"name": "query set", "type": "query set"},
+                    {"name": "query index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "dispatch",
+                "args": [
+                    {"name": "workgroupCountX", "type": "uint32_t"},
+                    {"name": "workgroupCountY", "type": "uint32_t", "default": "1"},
+                    {"name": "workgroupCountZ", "type": "uint32_t", "default": "1"}
+                ]
+            },
+            {
+                "name": "dispatch indirect",
+                "args": [
+                  {"name": "indirect buffer", "type": "buffer"},
+                  {"name": "indirect offset", "type": "uint64_t"}
+                ]
+            },
+            {
+                "name": "end"
+            },
+            {
+                "name": "end pass",
+                "tags": ["deprecated"]
+            },
+            {
+                "name": "end pipeline statistics query",
+                "tags": ["upstream", "emscripten"]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "compute pass timestamp location": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "beginning"},
+            {"value": 1, "name": "end"}
+        ]
+    },
+    "compute pass timestamp write": {
+        "category": "structure",
+        "members": [
+            {"name": "query set", "type": "query set"},
+            {"name": "query index", "type": "uint32_t"},
+            {"name": "location", "type": "compute pass timestamp location"}
+        ]
+    },
+    "compute pipeline": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "get bind group layout",
+                "returns": "bind group layout",
+                "args": [
+                    {"name": "group index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "compute pipeline descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "layout", "type": "pipeline layout", "optional": true},
+            {"name": "compute", "type": "programmable stage descriptor"}
+        ]
+    },
+    "alpha mode": {
+        "category": "enum",
+        "tags": ["dawn"],
+        "values": [
+            {"value": 0, "name": "premultiplied"},
+            {"value": 1, "name": "unpremultiplied"}
+        ]
+    },
+    "copy texture for browser options": {
+        "category": "structure",
+        "extensible": "in",
+        "tags": ["dawn"],
+        "_TODO": "support number as length input",
+        "members": [
+            {"name": "flip y", "type": "bool", "default": "false"},
+            {"name": "needs color space conversion", "type": "bool", "default": "false"},
+            {"name": "src alpha mode", "type": "alpha mode", "default": "unpremultiplied"},
+            {"name": "src transfer function parameters", "type": "float", "annotation": "const*",
+                     "length": 7, "optional": true},
+            {"name": "conversion matrix", "type": "float", "annotation": "const*",
+                     "length": 9, "optional": true},
+            {"name": "dst transfer function parameters", "type": "float", "annotation": "const*",
+                     "length": 7, "optional": true},
+            {"name": "dst alpha mode", "type": "alpha mode", "default": "unpremultiplied"}
+        ]
+    },
+    "create compute pipeline async callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "status", "type": "create pipeline async status"},
+            {"name": "pipeline", "type": "compute pipeline"},
+            {"name": "message", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "create pipeline async status": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "success"},
+            {"value": 1, "name": "error"},
+            {"value": 2, "name": "device lost"},
+            {"value": 3, "name": "device destroyed"},
+            {"value": 4, "name": "unknown"}
+        ]
+    },
+    "create render pipeline async callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "status", "type": "create pipeline async status"},
+            {"name": "pipeline", "type": "render pipeline"},
+            {"name": "message", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "cull mode": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "none"},
+            {"value": 1, "name": "front"},
+            {"value": 2, "name": "back"}
+        ]
+    },
+    "device": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "create bind group",
+                "returns": "bind group",
+                "args": [
+                    {"name": "descriptor", "type": "bind group descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create bind group layout",
+                "returns": "bind group layout",
+                "args": [
+                    {"name": "descriptor", "type": "bind group layout descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create buffer",
+                "returns": "buffer",
+                "args": [
+                    {"name": "descriptor", "type": "buffer descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create error buffer",
+                "returns": "buffer",
+                "tags": ["dawn"]
+            },
+            {
+                "name": "create command encoder",
+                "returns": "command encoder",
+                "args": [
+                    {"name": "descriptor", "type": "command encoder descriptor", "annotation": "const*", "optional": true}
+                ]
+            },
+            {
+                "name": "create compute pipeline",
+                "returns": "compute pipeline",
+                "args": [
+                    {"name": "descriptor", "type": "compute pipeline descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create compute pipeline async",
+                "returns": "void",
+                "args": [
+                    {"name": "descriptor", "type": "compute pipeline descriptor", "annotation": "const*"},
+                    {"name": "callback", "type": "create compute pipeline async callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "create external texture",
+                "returns": "external texture",
+                "tags": ["dawn"],
+                "args": [
+                    {"name": "external texture descriptor", "type": "external texture descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create pipeline layout",
+                "returns": "pipeline layout",
+                "args": [
+                    {"name": "descriptor", "type": "pipeline layout descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create query set",
+                "returns": "query set",
+                "args": [
+                    {"name": "descriptor", "type": "query set descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create render pipeline async",
+                "returns": "void",
+                "args": [
+                    {"name": "descriptor", "type": "render pipeline descriptor", "annotation": "const*"},
+                    {"name": "callback", "type": "create render pipeline async callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "create render bundle encoder",
+                "returns": "render bundle encoder",
+                "args": [
+                    {"name": "descriptor", "type": "render bundle encoder descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create render pipeline",
+                "returns": "render pipeline",
+                "args": [
+                    {"name": "descriptor", "type": "render pipeline descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create sampler",
+                "returns": "sampler",
+                "args": [
+                    {"name": "descriptor", "type": "sampler descriptor", "annotation": "const*", "optional": true}
+                ]
+            },
+            {
+                "name": "create shader module",
+                "returns": "shader module",
+                "args": [
+                    {"name": "descriptor", "type": "shader module descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create swap chain",
+                "returns": "swap chain",
+                "args": [
+                    {"name": "surface", "type": "surface", "optional": true},
+                    {"name": "descriptor", "type": "swap chain descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "create texture",
+                "returns": "texture",
+                "args": [
+                    {"name": "descriptor", "type": "texture descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "destroy"
+            },
+            {
+                "name": "get limits",
+                "returns": "bool",
+                "args": [
+                    {"name": "limits", "type": "supported limits", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "has feature",
+                "returns": "bool",
+                "args": [
+                    {"name": "feature", "type": "feature name"}
+                ]
+            },
+            {
+                "name": "enumerate features",
+                "returns": "size_t",
+                "args": [
+                    {"name": "features", "type": "feature name", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "get queue",
+                "returns": "queue"
+            },
+            {
+                "name": "inject error",
+                "args": [
+                    {"name": "type", "type": "error type"},
+                    {"name": "message", "type": "char", "annotation": "const*", "length": "strlen"}
+                ],
+                "tags": ["dawn"]
+            },
+            {
+                "name": "lose for testing",
+                "tags": ["dawn"]
+            },
+            {
+                "name": "tick",
+                "tags": ["dawn"]
+            },
+            {
+                "name": "set uncaptured error callback",
+                "args": [
+                    {"name": "callback", "type": "error callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "set logging callback",
+                "tags": ["dawn"],
+                "args": [
+                    {"name": "callback", "type": "logging callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "set device lost callback",
+                "args": [
+                    {"name": "callback", "type": "device lost callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "push error scope",
+                "args": [
+                    {"name": "filter", "type": "error filter"}
+                ]
+            },
+            {
+                "name": "pop error scope",
+                "returns": "bool",
+                "args": [
+                    {"name": "callback", "type": "error callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            }
+        ]
+    },
+    "device lost callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "reason", "type": "device lost reason"},
+            {"name": "message", "type": "char", "annotation": "const*"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "device lost reason": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined"},
+            {"value": 1, "name": "destroyed"}
+        ]
+    },
+    "device properties": {
+        "category": "structure",
+        "extensible": false,
+        "tags": ["dawn"],
+        "members": [
+            {"name": "device ID", "type": "uint32_t"},
+            {"name": "vendor ID", "type": "uint32_t"},
+            {"name": "adapter type", "type": "adapter type"},
+            {"name": "texture compression BC", "type": "bool", "default": "false"},
+            {"name": "texture compression ETC2", "type": "bool", "default": "false"},
+            {"name": "texture compression ASTC", "type": "bool", "default": "false"},
+            {"name": "shader float16", "type": "bool", "default": "false"},
+            {"name": "pipeline statistics query", "type": "bool", "default": "false"},
+            {"name": "timestamp query", "type": "bool", "default": "false"},
+            {"name": "multi planar formats", "type": "bool", "default": "false"},
+            {"name": "depth clamping", "type": "bool", "default": "false"},
+            {"name": "depth24 unorm stencil8", "type": "bool", "default": "false"},
+            {"name": "depth32 float stencil8", "type": "bool", "default": "false"},
+            {"name": "invalid feature", "type": "bool", "default": "false"},
+            {"name": "dawn internal usages", "type": "bool", "default": "false"},
+            {"name": "dawn native", "type": "bool", "default": "false"},
+            {"name": "limits", "type": "supported limits"}
+        ]
+    },
+    "double": {
+        "category": "native"
+    },
+    "error callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "type", "type": "error type"},
+            {"name": "message", "type": "char", "annotation": "const*"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "limits": {
+        "category": "structure",
+        "members": [
+            {"name": "max texture dimension 1D", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max texture dimension 2D", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max texture dimension 3D", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max texture array layers", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max bind groups", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max dynamic uniform buffers per pipeline layout", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max dynamic storage buffers per pipeline layout", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max sampled textures per shader stage", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max samplers per shader stage", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max storage buffers per shader stage", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max storage textures per shader stage", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max uniform buffers per shader stage", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max uniform buffer binding size", "type": "uint64_t", "default": "WGPU_LIMIT_U64_UNDEFINED"},
+            {"name": "max storage buffer binding size", "type": "uint64_t", "default": "WGPU_LIMIT_U64_UNDEFINED"},
+            {"name": "min uniform buffer offset alignment", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "min storage buffer offset alignment", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max vertex buffers", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max vertex attributes", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max vertex buffer array stride", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max inter stage shader components", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max compute workgroup storage size", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max compute invocations per workgroup", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max compute workgroup size x", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max compute workgroup size y", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max compute workgroup size z", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"},
+            {"name": "max compute workgroups per dimension", "type": "uint32_t", "default": "WGPU_LIMIT_U32_UNDEFINED"}
+        ]
+    },
+    "required limits": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "limits", "type": "limits"}
+        ]
+    },
+    "supported limits": {
+        "category": "structure",
+        "extensible": "out",
+        "members": [
+            {"name": "limits", "type": "limits"}
+        ]
+    },
+    "logging callback": {
+        "category": "function pointer",
+        "tags": ["dawn"],
+        "args": [
+            {"name": "type", "type": "logging type"},
+            {"name": "message", "type": "char", "annotation": "const*"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "error filter": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "validation"},
+            {"value": 1, "name": "out of memory"}
+        ]
+    },
+    "error type": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "no error"},
+            {"value": 1, "name": "validation"},
+            {"value": 2, "name": "out of memory"},
+            {"value": 3, "name": "unknown"},
+            {"value": 4, "name": "device lost"}
+        ]
+    },
+    "logging type": {
+        "category": "enum",
+        "tags": ["dawn"],
+        "values": [
+            {"value": 0, "name": "verbose"},
+            {"value": 1, "name": "info"},
+            {"value": 2, "name": "warning"},
+            {"value": 3, "name": "error"}
+        ]
+    },
+    "extent 3D": {
+        "category": "structure",
+        "members": [
+            {"name": "width", "type": "uint32_t"},
+            {"name": "height", "type": "uint32_t", "default": 1},
+            {"name": "depth or array layers", "type": "uint32_t", "default": 1}
+        ]
+    },
+    "external texture": {
+        "category": "object",
+        "tags": ["dawn"],
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "destroy",
+                "returns": "void"
+            }
+        ]
+    },
+    "external texture descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "tags": ["dawn"],
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "plane 0", "type": "texture view"},
+            {"name": "plane 1", "type": "texture view", "optional": true},
+            {"name": "color space", "type": "predefined color space", "default": "srgb"}
+        ]
+    },
+    "feature name": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined"},
+            {"value": 1, "name": "depth clip control", "tags": ["upstream", "emscripten"]},
+            {"value": 2, "name": "depth24 unorm stencil8"},
+            {"value": 3, "name": "depth32 float stencil8"},
+            {"value": 4, "name": "timestamp query"},
+            {"value": 5, "name": "pipeline statistics query"},
+            {"value": 6, "name": "texture compression BC"},
+            {"value": 7, "name": "texture compression ETC2"},
+            {"value": 8, "name": "texture compression ASTC"},
+            {"value": 9, "name": "indirect first instance"},
+            {"value": 1000, "name": "depth clamping", "tags": ["emscripten", "dawn"]},
+            {"value": 1001, "name": "dawn shader float 16", "tags": ["dawn"]},
+            {"value": 1002, "name": "dawn internal usages", "tags": ["dawn"]},
+            {"value": 1003, "name": "dawn multi planar formats", "tags": ["dawn"]},
+            {"value": 1004, "name": "dawn native", "tags": ["dawn", "native"]}
+        ]
+    },
+    "filter mode": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "nearest"},
+            {"value": 1, "name": "linear"}
+        ]
+    },
+    "float": {
+        "category": "native"
+    },
+    "front face": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "CCW"},
+            {"value": 1, "name": "CW"}
+        ]
+    },
+    "image copy buffer": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "layout", "type": "texture data layout"},
+            {"name": "buffer", "type": "buffer"}
+        ]
+    },
+    "image copy texture": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "texture", "type": "texture"},
+            {"name": "mip level", "type": "uint32_t", "default": "0"},
+            {"name": "origin", "type": "origin 3D"},
+            {"name": "aspect", "type": "texture aspect", "default": "all"}
+        ]
+    },
+    "index format": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined"},
+            {"value": 1, "name": "uint16"},
+            {"value": 2, "name": "uint32"}
+        ]
+    },
+    "instance": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "create surface",
+                "returns": "surface",
+                "args": [
+                    {"name": "descriptor", "type": "surface descriptor", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "process events",
+                "tags": ["upstream", "emscripten"]
+            },
+            {
+                "name": "request adapter",
+                "args": [
+                    {"name": "options", "type": "request adapter options", "annotation": "const*"},
+                    {"name": "callback", "type": "request adapter callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            }
+        ]
+    },
+    "instance descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": []
+    },
+    "dawn instance descriptor": {
+        "tags": ["dawn", "native"],
+        "category": "structure",
+        "chained": "in",
+        "members": [
+            {"name": "additional runtime search paths count", "type": "uint32_t", "default": 0},
+            {"name": "additional runtime search paths", "type": "char", "annotation": "const*const*", "length": "additional runtime search paths count"}
+        ]
+    },
+    "vertex attribute": {
+        "category": "structure",
+        "extensible": false,
+        "members": [
+            {"name": "format", "type": "vertex format"},
+            {"name": "offset", "type": "uint64_t"},
+            {"name": "shader location", "type": "uint32_t"}
+        ]
+    },
+    "vertex buffer layout": {
+        "category": "structure",
+        "extensible": false,
+        "members": [
+            {"name": "array stride", "type": "uint64_t"},
+            {"name": "step mode", "type": "vertex step mode", "default": "vertex"},
+            {"name": "attribute count", "type": "uint32_t"},
+            {"name": "attributes", "type": "vertex attribute", "annotation": "const*", "length": "attribute count"}
+        ]
+    },
+    "vertex step mode": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "vertex"},
+            {"value": 1, "name": "instance"}
+        ]
+    },
+    "load op": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined"},
+            {"value": 1, "name": "clear"},
+            {"value": 2, "name": "load"}
+        ]
+    },
+    "map mode": {
+        "category": "bitmask",
+        "values": [
+            {"value": 0, "name": "none"},
+            {"value": 1, "name": "read"},
+            {"value": 2, "name": "write"}
+        ]
+    },
+    "mipmap filter mode": {
+        "category": "enum",
+        "tags": ["upstream"],
+        "values": [
+            {"value": 0, "name": "nearest"},
+            {"value": 1, "name": "linear"}
+        ]
+    },
+    "store op": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined"},
+            {"value": 1, "name": "store"},
+            {"value": 2, "name": "discard"}
+        ]
+    },
+    "origin 3D": {
+        "category": "structure",
+        "members": [
+            {"name": "x", "type": "uint32_t", "default": "0"},
+            {"name": "y", "type": "uint32_t", "default": "0"},
+            {"name": "z", "type": "uint32_t", "default": "0"}
+        ]
+    },
+    "pipeline layout": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "pipeline layout descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "bind group layout count", "type": "uint32_t"},
+            {"name": "bind group layouts", "type": "bind group layout", "annotation": "const*", "length": "bind group layout count"}
+        ]
+    },
+    "pipeline statistic name": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "vertex shader invocations"},
+            {"value": 1, "name": "clipper invocations"},
+            {"value": 2, "name": "clipper primitives out"},
+            {"value": 3, "name": "fragment shader invocations"},
+            {"value": 4, "name": "compute shader invocations"}
+        ]
+    },
+    "power preference": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined"},
+            {"value": 1, "name": "low power"},
+            {"value": 2, "name": "high performance"}
+        ]
+    },
+    "predefined color space": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "jsrepr": "undefined"},
+            {"value": 1, "name": "srgb"}
+        ]
+    },
+    "present mode": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "immediate"},
+            {"value": 1, "name": "mailbox"},
+            {"value": 2, "name": "fifo"}
+        ]
+    },
+    "programmable stage descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "module", "type": "shader module"},
+            {"name": "entry point", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "constant count", "type": "uint32_t", "default": 0},
+            {"name": "constants", "type": "constant entry", "annotation": "const*", "length": "constant count"}
+        ]
+    },
+    "primitive topology": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "point list"},
+            {"value": 1, "name": "line list"},
+            {"value": 2, "name": "line strip"},
+            {"value": 3, "name": "triangle list"},
+            {"value": 4, "name": "triangle strip"}
+        ]
+    },
+    "query set": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "destroy"
+            }
+        ]
+    },
+    "query set descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "type", "type": "query type"},
+            {"name": "count", "type": "uint32_t"},
+            {"name": "pipeline statistics", "type": "pipeline statistic name", "annotation": "const*", "length": "pipeline statistics count"},
+            {"name": "pipeline statistics count", "type": "uint32_t", "default": "0"}
+        ]
+    },
+    "query type": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "occlusion"},
+            {"value": 1, "name": "pipeline statistics"},
+            {"value": 2, "name": "timestamp"}
+        ]
+    },
+    "queue": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "submit",
+                "args": [
+                    {"name": "command count", "type": "uint32_t"},
+                    {"name": "commands", "type": "command buffer", "annotation": "const*", "length": "command count"}
+                ]
+            },
+            {
+                "name": "on submitted work done",
+                "tags": ["dawn", "emscripten"],
+                "args": [
+                    {"name": "signal value", "type": "uint64_t"},
+                    {"name": "callback", "type": "queue work done callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "on submitted work done",
+                "tags": ["upstream"],
+                "args": [
+                    {"name": "callback", "type": "queue work done callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "write buffer",
+                "args": [
+                    {"name": "buffer", "type": "buffer"},
+                    {"name": "buffer offset", "type": "uint64_t"},
+                    {"name": "data", "type": "void", "annotation": "const*", "length": "size"},
+                    {"name": "size", "type": "size_t"}
+                ]
+            },
+            {
+                "name": "write texture",
+                "args": [
+                    {"name": "destination", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "data", "type": "void", "annotation": "const*", "length": "data size"},
+                    {"name": "data size", "type": "size_t"},
+                    {"name": "data layout", "type": "texture data layout", "annotation": "const*"},
+                    {"name": "write size", "type": "extent 3D", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "copy texture for browser",
+                "extensible": "in",
+                "tags": ["dawn"],
+                "args": [
+                    {"name": "source", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "destination", "type": "image copy texture", "annotation": "const*"},
+                    {"name": "copy size", "type": "extent 3D", "annotation": "const*"},
+                    {"name": "options", "type": "copy texture for browser options", "annotation": "const*"}
+                ]
+            }
+        ]
+    },
+    "queue descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "tags": ["upstream"],
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true}
+        ]
+    },
+    "queue work done callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "status", "type": "queue work done status"},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+    "queue work done status": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "success"},
+            {"value": 1, "name": "error"},
+            {"value": 2, "name": "unknown"},
+            {"value": 3, "name": "device lost"}
+        ]
+    },
+
+    "render bundle": {
+        "category": "object"
+    },
+
+    "render bundle encoder": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set pipeline",
+                "args": [
+                    {"name": "pipeline", "type": "render pipeline"}
+                ]
+            },
+            {
+                "name": "set bind group",
+                "args": [
+                    {"name": "group index", "type": "uint32_t"},
+                    {"name": "group", "type": "bind group"},
+                    {"name": "dynamic offset count", "type": "uint32_t", "default": "0"},
+                    {"name": "dynamic offsets", "type": "uint32_t", "annotation": "const*", "length": "dynamic offset count", "default": "nullptr"}
+                ]
+            },
+            {
+                "name": "draw",
+                "args": [
+                    {"name": "vertex count", "type": "uint32_t"},
+                    {"name": "instance count", "type": "uint32_t", "default": "1"},
+                    {"name": "first vertex", "type": "uint32_t", "default": "0"},
+                    {"name": "first instance", "type": "uint32_t",  "default": "0"}
+                ]
+            },
+            {
+                "name": "draw indexed",
+                "args": [
+                    {"name": "index count", "type": "uint32_t"},
+                    {"name": "instance count", "type": "uint32_t", "default": "1"},
+                    {"name": "first index", "type": "uint32_t", "default": "0"},
+                    {"name": "base vertex", "type": "int32_t", "default": "0"},
+                    {"name": "first instance", "type": "uint32_t", "default": "0"}
+                ]
+            },
+            {
+              "name": "draw indirect",
+              "args": [
+                    {"name": "indirect buffer", "type": "buffer"},
+                    {"name": "indirect offset", "type": "uint64_t"}
+              ]
+            },
+            {
+              "name": "draw indexed indirect",
+              "args": [
+                    {"name": "indirect buffer", "type": "buffer"},
+                    {"name": "indirect offset", "type": "uint64_t"}
+              ]
+            },
+            {
+                "name": "insert debug marker",
+                "args": [
+                    {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "pop debug group",
+                "args": []
+            },
+            {
+                "name": "push debug group",
+                "args": [
+                    {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "set vertex buffer",
+                "args": [
+                    {"name": "slot", "type": "uint32_t"},
+                    {"name": "buffer", "type": "buffer"},
+                    {"name": "offset", "type": "uint64_t", "default": "0"},
+                    {"name": "size", "type": "uint64_t", "default": "WGPU_WHOLE_SIZE"}
+                ]
+            },
+            {
+                "name": "set index buffer",
+                "args": [
+                    {"name": "buffer", "type": "buffer"},
+                    {"name": "format", "type": "index format"},
+                    {"name": "offset", "type": "uint64_t", "default": "0"},
+                    {"name": "size", "type": "uint64_t", "default": "WGPU_WHOLE_SIZE"}
+                ]
+            },
+            {
+                "name": "finish",
+                "returns": "render bundle",
+                "args": [
+                    {"name": "descriptor", "type": "render bundle descriptor", "annotation": "const*", "optional": true}
+                ]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+
+    "render bundle descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true}
+        ]
+    },
+
+    "render bundle encoder descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "color formats count", "type": "uint32_t"},
+            {"name": "color formats", "type": "texture format", "annotation": "const*", "length": "color formats count"},
+            {"name": "depth stencil format", "type": "texture format", "default": "undefined"},
+            {"name": "sample count", "type": "uint32_t", "default": "1"},
+            {"name": "depth read only", "type": "bool", "default": "false"},
+            {"name": "stencil read only", "type": "bool", "default": "false"}
+        ]
+    },
+
+    "render pass color attachment": {
+        "category": "structure",
+        "members": [
+            {"name": "view", "type": "texture view", "optional": true},
+            {"name": "resolve target", "type": "texture view", "optional": true},
+            {"name": "load op", "type": "load op"},
+            {"name": "store op", "type": "store op"},
+            {"name": "clear color", "type": "color", "default": "{ NAN, NAN, NAN, NAN }", "tags": ["deprecated"]},
+            {"name": "clear value", "type": "color"}
+        ]
+    },
+
+    "render pass depth stencil attachment": {
+        "category": "structure",
+        "members": [
+            {"name": "view", "type": "texture view"},
+            {"name": "depth load op", "type": "load op", "default": "undefined"},
+            {"name": "depth store op", "type": "store op", "default": "undefined"},
+            {"name": "clear depth", "type": "float", "default": "NAN", "tags": ["deprecated"]},
+            {"name": "depth clear value", "type": "float", "default": "0"},
+            {"name": "depth read only", "type": "bool", "default": "false"},
+            {"name": "stencil load op", "type": "load op", "default": "undefined"},
+            {"name": "stencil store op", "type": "store op", "default": "undefined"},
+            {"name": "clear stencil", "type": "uint32_t", "default": "0", "tags": ["deprecated"]},
+            {"name": "stencil clear value", "type": "uint32_t", "default": "0"},
+            {"name": "stencil read only", "type": "bool", "default": "false"}
+        ]
+    },
+
+    "render pass descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "color attachment count", "type": "uint32_t"},
+            {"name": "color attachments", "type": "render pass color attachment", "annotation": "const*", "length": "color attachment count"},
+            {"name": "depth stencil attachment", "type": "render pass depth stencil attachment", "annotation": "const*", "optional": true},
+            {"name": "occlusion query set", "type": "query set", "optional": true},
+            {"name": "timestamp write count", "type": "uint32_t", "default": 0},
+            {"name": "timestamp writes", "type": "render pass timestamp write", "annotation": "const*", "length": "timestamp write count"}
+        ]
+    },
+    "render pass encoder": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set pipeline",
+                "args": [
+                    {"name": "pipeline", "type": "render pipeline"}
+                ]
+            },
+            {
+                "name": "set bind group",
+                "args": [
+                    {"name": "group index", "type": "uint32_t"},
+                    {"name": "group", "type": "bind group"},
+                    {"name": "dynamic offset count", "type": "uint32_t", "default": "0"},
+                    {"name": "dynamic offsets", "type": "uint32_t", "annotation": "const*", "length": "dynamic offset count", "default": "nullptr"}
+                ]
+            },
+            {
+                "name": "draw",
+                "args": [
+                    {"name": "vertex count", "type": "uint32_t"},
+                    {"name": "instance count", "type": "uint32_t", "default": "1"},
+                    {"name": "first vertex", "type": "uint32_t", "default": "0"},
+                    {"name": "first instance", "type": "uint32_t",  "default": "0"}
+                ]
+            },
+            {
+                "name": "draw indexed",
+                "args": [
+                    {"name": "index count", "type": "uint32_t"},
+                    {"name": "instance count", "type": "uint32_t", "default": "1"},
+                    {"name": "first index", "type": "uint32_t", "default": "0"},
+                    {"name": "base vertex", "type": "int32_t", "default": "0"},
+                    {"name": "first instance", "type": "uint32_t", "default": "0"}
+                ]
+            },
+            {
+              "name": "draw indirect",
+              "args": [
+                    {"name": "indirect buffer", "type": "buffer"},
+                    {"name": "indirect offset", "type": "uint64_t"}
+              ]
+            },
+            {
+              "name": "draw indexed indirect",
+              "args": [
+                    {"name": "indirect buffer", "type": "buffer"},
+                    {"name": "indirect offset", "type": "uint64_t"}
+              ]
+            },
+            {
+              "name": "execute bundles",
+              "args": [
+                  {"name": "bundles count", "type": "uint32_t"},
+                  {"name": "bundles", "type": "render bundle", "annotation": "const*", "length": "bundles count"}
+              ]
+            },
+            {
+                "name": "insert debug marker",
+                "args": [
+                    {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "pop debug group",
+                "args": []
+            },
+            {
+                "name": "push debug group",
+                "args": [
+                    {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "set stencil reference",
+                "args": [
+                    {"name": "reference", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "set blend constant",
+                "args": [
+                    {"name": "color", "type": "color", "annotation": "const*"}
+                ]
+            },
+            {
+                "name": "set viewport",
+                "args": [
+                    {"name": "x", "type": "float"},
+                    {"name": "y", "type": "float"},
+                    {"name": "width", "type": "float"},
+                    {"name": "height", "type": "float"},
+                    {"name": "min depth", "type": "float"},
+                    {"name": "max depth", "type": "float"}
+                ]
+            },
+            {
+                "name": "set scissor rect",
+                "args": [
+                    {"name": "x", "type": "uint32_t"},
+                    {"name": "y", "type": "uint32_t"},
+                    {"name": "width", "type": "uint32_t"},
+                    {"name": "height", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "set vertex buffer",
+                "args": [
+                    {"name": "slot", "type": "uint32_t"},
+                    {"name": "buffer", "type": "buffer"},
+                    {"name": "offset", "type": "uint64_t", "default": "0"},
+                    {"name": "size", "type": "uint64_t", "default": "WGPU_WHOLE_SIZE"}
+                ]
+            },
+            {
+                "name": "set index buffer",
+                "args": [
+                    {"name": "buffer", "type": "buffer"},
+                    {"name": "format", "type": "index format"},
+                    {"name": "offset", "type": "uint64_t", "default": "0"},
+                    {"name": "size", "type": "uint64_t", "default": "WGPU_WHOLE_SIZE"}
+                ]
+            },
+            {
+                "name": "begin occlusion query",
+                "args": [
+                    {"name": "query index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "begin pipeline statistics query",
+                "tags": ["upstream", "emscripten"],
+                "args": [
+                    {"name": "query set", "type": "query set"},
+                    {"name": "query index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "end occlusion query"
+            },
+            {
+                "name": "write timestamp",
+                "tags": ["emscripten", "dawn"],
+                "args": [
+                    {"name": "query set", "type": "query set"},
+                    {"name": "query index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "end"
+            },
+            {
+                "name": "end pass",
+                "tags": ["deprecated"]
+            },
+            {
+                "name": "end pipeline statistics query",
+                "tags": ["upstream", "emscripten"]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "render pass timestamp location": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "beginning"},
+            {"value": 1, "name": "end"}
+        ]
+    },
+    "render pass timestamp write": {
+        "category": "structure",
+        "members": [
+            {"name": "query set", "type": "query set"},
+            {"name": "query index", "type": "uint32_t"},
+            {"name": "location", "type": "render pass timestamp location"}
+        ]
+    },
+    "render pipeline": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "get bind group layout",
+                "returns": "bind group layout",
+                "args": [
+                    {"name": "group index", "type": "uint32_t"}
+                ]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+
+        ]
+    },
+
+    "request device callback": {
+        "category": "function pointer",
+        "args": [
+            {"name": "status", "type": "request device status"},
+            {"name": "device", "type": "device"},
+            {"name": "message", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "userdata", "type": "void", "annotation": "*"}
+        ]
+    },
+
+    "request device status": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "success"},
+            {"value": 1, "name": "error"},
+            {"value": 2, "name": "unknown"}
+        ]
+    },
+
+    "vertex state": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "module", "type": "shader module"},
+            {"name": "entry point", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "constant count", "type": "uint32_t", "default": 0},
+            {"name": "constants", "type": "constant entry", "annotation": "const*", "length": "constant count"},
+            {"name": "buffer count", "type": "uint32_t", "default": 0},
+            {"name": "buffers", "type": "vertex buffer layout", "annotation": "const*", "length": "buffer count"}
+        ]
+    },
+
+    "primitive state": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "topology", "type": "primitive topology", "default": "triangle list"},
+            {"name": "strip index format", "type": "index format", "default": "undefined"},
+            {"name": "front face", "type": "front face", "default": "CCW"},
+            {"name": "cull mode", "type": "cull mode", "default": "none"}
+        ]
+    },
+
+    "primitive depth clamping state": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["dawn", "emscripten"],
+        "members": [
+            {"name": "clamp depth", "type": "bool", "default": "false"}
+        ]
+    },
+
+    "primitive depth clip control": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["upstream", "emscripten"],
+        "members": [
+            {"name": "unclipped depth", "type": "bool", "default": "false"}
+        ]
+    },
+
+    "depth stencil state": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "format", "type": "texture format"},
+            {"name": "depth write enabled", "type": "bool", "default": "false"},
+            {"name": "depth compare", "type": "compare function", "default": "always"},
+            {"name": "stencil front", "type": "stencil face state"},
+            {"name": "stencil back", "type": "stencil face state"},
+            {"name": "stencil read mask", "type": "uint32_t", "default": "0xFFFFFFFF"},
+            {"name": "stencil write mask", "type": "uint32_t", "default": "0xFFFFFFFF"},
+            {"name": "depth bias", "type": "int32_t", "default": "0"},
+            {"name": "depth bias slope scale", "type": "float", "default": "0.0f"},
+            {"name": "depth bias clamp", "type": "float", "default": "0.0f"}
+        ]
+    },
+
+    "multisample state": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "count", "type": "uint32_t", "default": "1"},
+            {"name": "mask", "type": "uint32_t", "default": "0xFFFFFFFF"},
+            {"name": "alpha to coverage enabled", "type": "bool", "default": "false"}
+        ]
+    },
+
+    "fragment state": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "module", "type": "shader module"},
+            {"name": "entry point", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "constant count", "type": "uint32_t", "default": 0},
+            {"name": "constants", "type": "constant entry", "annotation": "const*", "length": "constant count"},
+            {"name": "target count", "type": "uint32_t"},
+            {"name": "targets", "type": "color target state", "annotation": "const*", "length": "target count"}
+        ]
+    },
+    "color target state": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "format", "type": "texture format"},
+            {"name": "blend", "type": "blend state", "annotation": "const*", "optional": true},
+            {"name": "write mask", "type": "color write mask", "default": "all"}
+        ]
+    },
+    "blend state": {
+        "category": "structure",
+        "extensible": false,
+        "members": [
+            {"name": "color", "type": "blend component"},
+            {"name": "alpha", "type": "blend component"}
+        ]
+    },
+
+    "render pipeline descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "layout", "type": "pipeline layout", "optional": true},
+            {"name": "vertex", "type": "vertex state"},
+            {"name": "primitive", "type": "primitive state"},
+            {"name": "depth stencil", "type": "depth stencil state", "annotation": "const*", "optional": true},
+            {"name": "multisample", "type": "multisample state"},
+            {"name": "fragment", "type": "fragment state", "annotation": "const*", "optional": true}
+        ]
+    },
+
+    "sampler": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "sampler descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "address mode u", "type": "address mode", "default": "clamp to edge"},
+            {"name": "address mode v", "type": "address mode", "default": "clamp to edge"},
+            {"name": "address mode w", "type": "address mode", "default": "clamp to edge"},
+            {"name": "mag filter", "type": "filter mode", "default": "nearest"},
+            {"name": "min filter", "type": "filter mode", "default": "nearest"},
+            {"name": "mipmap filter", "type": "filter mode", "default": "nearest", "tags": ["dawn", "emscripten"]},
+            {"name": "mipmap filter", "type": "mipmap filter mode", "default": "nearest", "tags": ["upstream"]},
+            {"name": "lod min clamp", "type": "float", "default": "0.0f"},
+            {"name": "lod max clamp", "type": "float", "default": "1000.0f"},
+            {"name": "compare", "type": "compare function", "default": "undefined"},
+            {"name": "max anisotropy", "type": "uint16_t", "default": "1"}
+        ]
+    },
+    "shader module": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "get compilation info",
+                "args": [
+                    {"name": "callback", "type": "compilation info callback"},
+                    {"name": "userdata", "type": "void", "annotation": "*"}
+                ]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "shader module descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "hint count", "type": "uint32_t", "default": 0, "tags": ["upstream"]},
+            {"name": "hints", "type": "shader module compilation hint", "annotation": "const*", "length": "hint count", "tags": ["upstream"]}
+        ]
+    },
+    "shader module compilation hint": {
+        "category": "structure",
+        "extensible": "in",
+        "tags": ["upstream"],
+        "members": [
+            {"name": "entry point", "type": "char", "annotation": "const*", "length": "strlen"},
+            {"name": "layout", "type": "pipeline layout"}
+        ]
+    },
+    "shader module SPIRV descriptor": {
+        "category": "structure",
+        "chained": "in",
+        "members": [
+            {"name": "code size", "type": "uint32_t"},
+            {"name": "code", "type": "uint32_t", "annotation": "const*", "length": "code size"}
+        ]
+    },
+    "shader module WGSL descriptor": {
+        "category": "structure",
+        "chained": "in",
+        "members": [
+            {"name": "source", "type": "char", "annotation": "const*", "length": "strlen", "tags": ["dawn", "emscripten"]},
+            {"name": "code", "type": "char", "annotation": "const*", "length": "strlen", "tags": ["upstream"]}
+        ]
+    },
+    "shader stage": {
+        "category": "bitmask",
+        "values": [
+            {"value": 0, "name": "none"},
+            {"value": 1, "name": "vertex"},
+            {"value": 2, "name": "fragment"},
+            {"value": 4, "name": "compute"}
+        ]
+    },
+    "stencil operation": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "keep"},
+            {"value": 1, "name": "zero"},
+            {"value": 2, "name": "replace"},
+            {"value": 3, "name": "invert"},
+            {"value": 4, "name": "increment clamp"},
+            {"value": 5, "name": "decrement clamp"},
+            {"value": 6, "name": "increment wrap"},
+            {"value": 7, "name": "decrement wrap"}
+        ]
+    },
+    "stencil face state": {
+        "category": "structure",
+        "extensible": false,
+        "members": [
+            {"name": "compare", "type": "compare function", "default": "always"},
+            {"name": "fail op", "type": "stencil operation", "default": "keep"},
+            {"name": "depth fail op", "type": "stencil operation", "default": "keep"},
+            {"name": "pass op", "type": "stencil operation", "default": "keep"}
+        ]
+    },
+    "surface": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "get preferred format",
+                "returns": "texture format",
+                "tags": ["upstream", "emscripten"],
+                "args": [
+                    {"name": "adapter", "type": "adapter"}
+                ]
+            }
+        ]
+    },
+    "surface descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true}
+        ]
+    },
+    "surface descriptor from android native window": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["native"],
+        "members": [
+            {"name": "window", "type": "void", "annotation": "*"}
+        ]
+    },
+    "surface descriptor from canvas HTML selector": {
+        "category": "structure",
+        "chained": "in",
+        "members": [
+            {"name": "selector", "type": "char", "annotation": "const*", "length": "strlen"}
+        ]
+    },
+    "surface descriptor from metal layer": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["native"],
+        "members": [
+            {"name": "layer", "type": "void", "annotation": "*"}
+        ]
+    },
+    "surface descriptor from windows HWND": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["native"],
+        "members": [
+            {"name": "hinstance", "type": "void", "annotation": "*"},
+            {"name": "hwnd", "type": "void", "annotation": "*"}
+        ]
+    },
+    "surface descriptor from xcb window": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["upstream"],
+        "members": [
+            {"name": "connection", "type": "void", "annotation": "*"},
+            {"name": "window", "type": "uint32_t"}
+        ]
+    },
+    "surface descriptor from xlib window": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["native"],
+        "members": [
+            {"name": "display", "type": "void", "annotation": "*"},
+            {"name": "window", "type": "uint32_t"}
+        ]
+    },
+    "surface descriptor from wayland surface": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["native"],
+        "members": [
+            {"name": "display", "type": "void", "annotation": "*"},
+            {"name": "surface", "type": "void", "annotation": "*"}
+        ]
+    },
+    "surface descriptor from windows core window": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["dawn"],
+        "members": [
+            {"name": "core window", "type": "void", "annotation": "*"}
+        ]
+    },
+    "surface descriptor from windows swap chain panel": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["dawn"],
+        "members": [
+            {"name": "swap chain panel", "type": "void", "annotation": "*"}
+        ]
+    },
+    "swap chain": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "configure",
+                "tags": ["dawn"],
+                "args": [
+                    {"name": "format", "type": "texture format"},
+                    {"name": "allowed usage", "type": "texture usage"},
+                    {"name": "width", "type": "uint32_t"},
+                    {"name": "height", "type": "uint32_t"}
+                ]
+            },
+            {"name": "get current texture view", "returns": "texture view"},
+            {"name": "present"}
+        ]
+    },
+    "swap chain descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "usage", "type": "texture usage"},
+            {"name": "format", "type": "texture format"},
+            {"name": "width", "type": "uint32_t"},
+            {"name": "height", "type": "uint32_t"},
+            {"name": "present mode", "type": "present mode"},
+            {"name": "implementation", "type": "uint64_t", "default": 0, "tags": ["deprecated"]}
+        ]
+    },
+    "s type": {
+        "category": "enum",
+        "emscripten_no_enum_table": true,
+        "values": [
+            {"value": 0, "name": "invalid", "valid": false},
+            {"value": 1, "name": "surface descriptor from metal layer", "tags": ["native"]},
+            {"value": 2, "name": "surface descriptor from windows HWND", "tags": ["native"]},
+            {"value": 3, "name": "surface descriptor from xlib window", "tags": ["native"]},
+            {"value": 4, "name": "surface descriptor from canvas HTML selector"},
+            {"value": 5, "name": "shader module SPIRV descriptor"},
+            {"value": 6, "name": "shader module WGSL descriptor"},
+            {"value": 7, "name": "primitive depth clip control", "tags": ["upstream", "emscripten"]},
+            {"value": 8, "name": "surface descriptor from wayland surface", "tags": ["native"]},
+            {"value": 9, "name": "surface descriptor from android native window", "tags": ["native"]},
+            {"value": 10, "name": "surface descriptor from xcb window", "tags": ["upstream"]},
+            {"value": 11, "name": "surface descriptor from windows core window", "tags": ["dawn"]},
+            {"value": 12, "name": "external texture binding entry", "tags": ["dawn"]},
+            {"value": 13, "name": "external texture binding layout", "tags": ["dawn"]},
+            {"value": 14, "name": "surface descriptor from windows swap chain panel", "tags": ["dawn"]},
+            {"value": 1000, "name": "dawn texture internal usage descriptor", "tags": ["dawn"]},
+            {"value": 1001, "name": "primitive depth clamping state", "tags": ["dawn", "emscripten"]},
+            {"value": 1002, "name": "dawn toggles device descriptor", "tags": ["dawn", "native"]},
+            {"value": 1003, "name": "dawn encoder internal usage descriptor", "tags": ["dawn"]},
+            {"value": 1004, "name": "dawn instance descriptor", "tags": ["dawn", "native"]},
+            {"value": 1005, "name": "dawn cache device descriptor", "tags": ["dawn", "native"]}
+        ]
+    },
+    "texture": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "create view",
+                "returns": "texture view",
+                "args": [
+                    {"name": "descriptor", "type": "texture view descriptor", "annotation": "const*", "optional": true}
+                ]
+            },
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            },
+            {
+                "name": "destroy"
+            }
+        ]
+    },
+    "texture aspect": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "all"},
+            {"value": 1, "name": "stencil only"},
+            {"value": 2, "name": "depth only"},
+            {"value": 3, "name": "plane 0 only", "tags": ["dawn"]},
+            {"value": 4, "name": "plane 1 only", "tags": ["dawn"]}
+        ]
+    },
+    "texture component type": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "float"},
+            {"value": 1, "name": "sint"},
+            {"value": 2, "name": "uint"},
+            {"value": 3, "name": "depth comparison"}
+        ]
+    },
+    "texture data layout": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "offset", "type": "uint64_t", "default": 0},
+            {"name": "bytes per row", "type": "uint32_t", "default": "WGPU_COPY_STRIDE_UNDEFINED"},
+            {"name": "rows per image", "type": "uint32_t", "default": "WGPU_COPY_STRIDE_UNDEFINED"}
+        ]
+    },
+    "texture descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "usage", "type": "texture usage"},
+            {"name": "dimension", "type": "texture dimension", "default": "2D"},
+            {"name": "size", "type": "extent 3D"},
+            {"name": "format", "type": "texture format"},
+            {"name": "mip level count", "type": "uint32_t", "default": 1},
+            {"name": "sample count", "type": "uint32_t", "default": 1},
+            {"name": "view format count", "type": "uint32_t", "default": 0},
+            {"name": "view formats", "type": "texture format", "annotation": "const*", "length": "view format count"}
+        ]
+    },
+    "texture dimension": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "1D"},
+            {"value": 1, "name": "2D"},
+            {"value": 2, "name": "3D"}
+        ]
+    },
+    "texture format": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "valid": false, "jsrepr": "undefined"},
+
+            {"value": 1, "name": "R8 unorm"},
+            {"value": 2, "name": "R8 snorm"},
+            {"value": 3, "name": "R8 uint"},
+            {"value": 4, "name": "R8 sint"},
+
+            {"value": 5, "name": "R16 uint"},
+            {"value": 6, "name": "R16 sint"},
+            {"value": 7, "name": "R16 float"},
+            {"value": 8, "name": "RG8 unorm"},
+            {"value": 9, "name": "RG8 snorm"},
+            {"value": 10, "name": "RG8 uint"},
+            {"value": 11, "name": "RG8 sint"},
+
+            {"value": 12, "name": "R32 float"},
+            {"value": 13, "name": "R32 uint"},
+            {"value": 14, "name": "R32 sint"},
+            {"value": 15, "name": "RG16 uint"},
+            {"value": 16, "name": "RG16 sint"},
+            {"value": 17, "name": "RG16 float"},
+            {"value": 18, "name": "RGBA8 unorm"},
+            {"value": 19, "name": "RGBA8 unorm srgb"},
+            {"value": 20, "name": "RGBA8 snorm"},
+            {"value": 21, "name": "RGBA8 uint"},
+            {"value": 22, "name": "RGBA8 sint"},
+            {"value": 23, "name": "BGRA8 unorm"},
+            {"value": 24, "name": "BGRA8 unorm srgb"},
+            {"value": 25, "name": "RGB10 A2 unorm"},
+            {"value": 26, "name": "RG11 B10 ufloat"},
+            {"value": 27, "name": "RGB9 E5 ufloat"},
+
+            {"value": 28, "name": "RG32 float"},
+            {"value": 29, "name": "RG32 uint"},
+            {"value": 30, "name": "RG32 sint"},
+            {"value": 31, "name": "RGBA16 uint"},
+            {"value": 32, "name": "RGBA16 sint"},
+            {"value": 33, "name": "RGBA16 float"},
+
+            {"value": 34, "name": "RGBA32 float"},
+            {"value": 35, "name": "RGBA32 uint"},
+            {"value": 36, "name": "RGBA32 sint"},
+
+            {"value": 37, "name": "stencil8"},
+            {"value": 38, "name": "depth16 unorm"},
+            {"value": 39, "name": "depth24 plus"},
+            {"value": 40, "name": "depth24 plus stencil8"},
+            {"value": 41, "name": "depth24 unorm stencil8"},
+            {"value": 42, "name": "depth32 float"},
+            {"value": 43, "name": "depth32 float stencil8"},
+
+            {"value": 44, "name": "BC1 RGBA unorm",         "jsrepr": "'bc1-rgba-unorm'"},
+            {"value": 45, "name": "BC1 RGBA unorm srgb",    "jsrepr": "'bc1-rgba-unorm-srgb'"},
+            {"value": 46, "name": "BC2 RGBA unorm",         "jsrepr": "'bc2-rgba-unorm'"},
+            {"value": 47, "name": "BC2 RGBA unorm srgb",    "jsrepr": "'bc2-rgba-unorm-srgb'"},
+            {"value": 48, "name": "BC3 RGBA unorm",         "jsrepr": "'bc3-rgba-unorm'"},
+            {"value": 49, "name": "BC3 RGBA unorm srgb",    "jsrepr": "'bc3-rgba-unorm-srgb'"},
+            {"value": 50, "name": "BC4 R unorm",            "jsrepr": "'bc4-r-unorm'"},
+            {"value": 51, "name": "BC4 R snorm",            "jsrepr": "'bc4-r-snorm'"},
+            {"value": 52, "name": "BC5 RG unorm",           "jsrepr": "'bc5-rg-unorm'"},
+            {"value": 53, "name": "BC5 RG snorm",           "jsrepr": "'bc5-rg-snorm'"},
+            {"value": 54, "name": "BC6H RGB ufloat",        "jsrepr": "'bc6h-rgb-ufloat'"},
+            {"value": 55, "name": "BC6H RGB float",         "jsrepr": "'bc6h-rgb-float'"},
+            {"value": 56, "name": "BC7 RGBA unorm",         "jsrepr": "'bc7-rgba-unorm'"},
+            {"value": 57, "name": "BC7 RGBA unorm srgb",    "jsrepr": "'bc7-rgba-unorm-srgb'"},
+
+            {"value": 58, "name": "ETC2 RGB8 unorm",        "jsrepr": "'etc2-rgb8unorm'"},
+            {"value": 59, "name": "ETC2 RGB8 unorm srgb",   "jsrepr": "'etc2-rgb8unorm-srgb'"},
+            {"value": 60, "name": "ETC2 RGB8A1 unorm",      "jsrepr": "'etc2-rgb8a1unorm'"},
+            {"value": 61, "name": "ETC2 RGB8A1 unorm srgb", "jsrepr": "'etc2-rgb8a1unorm-srgb'"},
+            {"value": 62, "name": "ETC2 RGBA8 unorm",       "jsrepr": "'etc2-rgba8unorm'"},
+            {"value": 63, "name": "ETC2 RGBA8 unorm srgb",  "jsrepr": "'etc2-rgba8unorm-srgb'"},
+            {"value": 64, "name": "EAC R11 unorm",          "jsrepr": "'eac-r11unorm'"},
+            {"value": 65, "name": "EAC R11 snorm",          "jsrepr": "'eac-r11snorm'"},
+            {"value": 66, "name": "EAC RG11 unorm",         "jsrepr": "'eac-rg11unorm'"},
+            {"value": 67, "name": "EAC RG11 snorm",         "jsrepr": "'eac-rg11snorm'"},
+
+            {"value": 68, "name": "ASTC 4x4 unorm",         "jsrepr": "'astc-4x4-unorm'"},
+            {"value": 69, "name": "ASTC 4x4 unorm srgb",    "jsrepr": "'astc-4x4-unorm-srgb'"},
+            {"value": 70, "name": "ASTC 5x4 unorm",         "jsrepr": "'astc-5x4-unorm'"},
+            {"value": 71, "name": "ASTC 5x4 unorm srgb",    "jsrepr": "'astc-5x4-unorm-srgb'"},
+            {"value": 72, "name": "ASTC 5x5 unorm",         "jsrepr": "'astc-5x5-unorm'"},
+            {"value": 73, "name": "ASTC 5x5 unorm srgb",    "jsrepr": "'astc-5x5-unorm-srgb'"},
+            {"value": 74, "name": "ASTC 6x5 unorm",         "jsrepr": "'astc-6x5-unorm'"},
+            {"value": 75, "name": "ASTC 6x5 unorm srgb",    "jsrepr": "'astc-6x5-unorm-srgb'"},
+            {"value": 76, "name": "ASTC 6x6 unorm",         "jsrepr": "'astc-6x6-unorm'"},
+            {"value": 77, "name": "ASTC 6x6 unorm srgb",    "jsrepr": "'astc-6x6-unorm-srgb'"},
+            {"value": 78, "name": "ASTC 8x5 unorm",         "jsrepr": "'astc-8x5-unorm'"},
+            {"value": 79, "name": "ASTC 8x5 unorm srgb",    "jsrepr": "'astc-8x5-unorm-srgb'"},
+            {"value": 80, "name": "ASTC 8x6 unorm",         "jsrepr": "'astc-8x6-unorm'"},
+            {"value": 81, "name": "ASTC 8x6 unorm srgb",    "jsrepr": "'astc-8x6-unorm-srgb'"},
+            {"value": 82, "name": "ASTC 8x8 unorm",         "jsrepr": "'astc-8x8-unorm'"},
+            {"value": 83, "name": "ASTC 8x8 unorm srgb",    "jsrepr": "'astc-8x8-unorm-srgb'"},
+            {"value": 84, "name": "ASTC 10x5 unorm",        "jsrepr": "'astc-10x5-unorm'"},
+            {"value": 85, "name": "ASTC 10x5 unorm srgb",   "jsrepr": "'astc-10x5-unorm-srgb'"},
+            {"value": 86, "name": "ASTC 10x6 unorm",        "jsrepr": "'astc-10x6-unorm'"},
+            {"value": 87, "name": "ASTC 10x6 unorm srgb",   "jsrepr": "'astc-10x6-unorm-srgb'"},
+            {"value": 88, "name": "ASTC 10x8 unorm",        "jsrepr": "'astc-10x8-unorm'"},
+            {"value": 89, "name": "ASTC 10x8 unorm srgb",   "jsrepr": "'astc-10x8-unorm-srgb'"},
+            {"value": 90, "name": "ASTC 10x10 unorm",       "jsrepr": "'astc-10x10-unorm'"},
+            {"value": 91, "name": "ASTC 10x10 unorm srgb",  "jsrepr": "'astc-10x10-unorm-srgb'"},
+            {"value": 92, "name": "ASTC 12x10 unorm",       "jsrepr": "'astc-12x10-unorm'"},
+            {"value": 93, "name": "ASTC 12x10 unorm srgb",  "jsrepr": "'astc-12x10-unorm-srgb'"},
+            {"value": 94, "name": "ASTC 12x12 unorm",       "jsrepr": "'astc-12x12-unorm'"},
+            {"value": 95, "name": "ASTC 12x12 unorm srgb",  "jsrepr": "'astc-12x12-unorm-srgb'"},
+
+            {"value": 96, "name": "R8 BG8 Biplanar 420 unorm", "tags": ["dawn"]}
+        ]
+    },
+    "texture usage": {
+        "category": "bitmask",
+        "values": [
+            {"value": 0, "name": "none"},
+            {"value": 1, "name": "copy src"},
+            {"value": 2, "name": "copy dst"},
+            {"value": 4, "name": "texture binding"},
+            {"value": 8, "name": "storage binding"},
+            {"value": 16, "name": "render attachment"},
+            {"value": 32, "name": "present", "tags": ["dawn"]}
+        ]
+    },
+    "texture view descriptor": {
+        "category": "structure",
+        "extensible": "in",
+        "members": [
+            {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+            {"name": "format", "type": "texture format", "default": "undefined"},
+            {"name": "dimension", "type": "texture view dimension", "default": "undefined"},
+            {"name": "base mip level", "type": "uint32_t", "default": "0"},
+            {"name": "mip level count", "type": "uint32_t", "default": "WGPU_MIP_LEVEL_COUNT_UNDEFINED"},
+            {"name": "base array layer", "type": "uint32_t", "default": "0"},
+            {"name": "array layer count", "type": "uint32_t", "default": "WGPU_ARRAY_LAYER_COUNT_UNDEFINED"},
+            {"name": "aspect", "type": "texture aspect", "default": "all"}
+        ]
+    },
+    "texture view": {
+        "category": "object",
+        "methods": [
+            {
+                "name": "set label",
+                "returns": "void",
+                "tags": ["dawn"],
+                "_TODO": "needs an upstream equivalent",
+                "args": [
+                    {"name": "label", "type": "char", "annotation": "const*", "length": "strlen"}
+                ]
+            }
+        ]
+    },
+    "texture view dimension": {
+        "category": "enum",
+        "values": [
+            {"value": 0, "name": "undefined", "valid": false, "jsrepr": "undefined"},
+            {"value": 1, "name": "1D"},
+            {"value": 2, "name": "2D"},
+            {"value": 3, "name": "2D array"},
+            {"value": 4, "name": "cube"},
+            {"value": 5, "name": "cube array"},
+            {"value": 6, "name": "3D"}
+        ]
+    },
+    "vertex format": {
+        "category": "enum",
+        "values": [
+            {"value": 0,  "name": "undefined", "valid": false, "jsrepr": "undefined"},
+            {"value": 1,  "name": "uint8x2"},
+            {"value": 2,  "name": "uint8x4"},
+            {"value": 3,  "name": "sint8x2"},
+            {"value": 4,  "name": "sint8x4"},
+            {"value": 5,  "name": "unorm8x2"},
+            {"value": 6,  "name": "unorm8x4"},
+            {"value": 7,  "name": "snorm8x2"},
+            {"value": 8,  "name": "snorm8x4"},
+            {"value": 9,  "name": "uint16x2"},
+            {"value": 10, "name": "uint16x4"},
+            {"value": 11, "name": "sint16x2"},
+            {"value": 12, "name": "sint16x4"},
+            {"value": 13, "name": "unorm16x2"},
+            {"value": 14, "name": "unorm16x4"},
+            {"value": 15, "name": "snorm16x2"},
+            {"value": 16, "name": "snorm16x4"},
+            {"value": 17, "name": "float16x2"},
+            {"value": 18, "name": "float16x4"},
+            {"value": 19, "name": "float32"},
+            {"value": 20, "name": "float32x2"},
+            {"value": 21, "name": "float32x3"},
+            {"value": 22, "name": "float32x4"},
+            {"value": 23, "name": "uint32"},
+            {"value": 24, "name": "uint32x2"},
+            {"value": 25, "name": "uint32x3"},
+            {"value": 26, "name": "uint32x4"},
+            {"value": 27, "name": "sint32"},
+            {"value": 28, "name": "sint32x2"},
+            {"value": 29, "name": "sint32x3"},
+            {"value": 30, "name": "sint32x4"}
+        ]
+    },
+    "whole size" : {
+        "category": "constant",
+        "type": "uint64_t",
+        "value":  "(0xffffffffffffffffULL)"
+    },
+    "whole map size" : {
+        "category": "constant",
+        "type": "size_t",
+        "value":  "SIZE_MAX"
+    },
+    "stride undefined" : {
+        "category": "constant",
+        "tags": ["deprecated"],
+        "_TODO": "crbug.com/dawn/520: Remove WGPU_STRIDE_UNDEFINED in favor of WGPU_COPY_STRIDE_UNDEFINED.",
+        "type": "uint32_t",
+        "value":  "(0xffffffffUL)"
+    },
+    "copy stride undefined" : {
+        "category": "constant",
+        "type": "uint32_t",
+        "value":  "(0xffffffffUL)"
+    },
+    "limit u32 undefined" : {
+        "category": "constant",
+        "type": "uint32_t",
+        "value":  "(0xffffffffUL)"
+    },
+    "limit u64 undefined" : {
+        "category": "constant",
+        "type": "uint64_t",
+        "value":  "(0xffffffffffffffffULL)"
+    },
+    "array layer count undefined" : {
+        "category": "constant",
+        "type": "uint32_t",
+        "value":  "(0xffffffffUL)"
+    },
+    "mip level count undefined" : {
+        "category": "constant",
+        "type": "uint32_t",
+        "value":  "(0xffffffffUL)"
+    },
+    "ObjectType": {
+      "_comment": "Only used for the wire",
+      "category": "native"
+    },
+    "ObjectId": {
+      "_comment": "Only used for the wire",
+      "category": "native"
+    },
+    "ObjectHandle": {
+      "_comment": "Only used for the wire",
+      "category": "native"
+    },
+    "void": {
+        "category": "native"
+    },
+    "void *": {
+        "category": "native"
+    },
+    "void const *": {
+        "category": "native"
+    },
+    "int32_t": {
+        "category": "native"
+    },
+    "size_t": {
+        "category": "native"
+    },
+    "uint16_t": {
+        "category": "native"
+    },
+    "uint32_t": {
+        "category": "native"
+    },
+    "uint64_t": {
+        "category": "native"
+    },
+    "uint8_t": {
+        "category": "native"
+    },
+    "dawn texture internal usage descriptor": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["dawn"],
+        "members": [
+            {"name": "internal usage", "type": "texture usage", "default": "none"}
+        ]
+    },
+    "dawn encoder internal usage descriptor": {
+        "category": "structure",
+        "chained": "in",
+        "tags": ["dawn"],
+        "members": [
+            {"name": "use internal usages", "type": "bool", "default": "false"}
+        ]
+    }
+}
diff --git a/dawn_wire.json b/dawn_wire.json
new file mode 100644
index 0000000..2e2318e
--- /dev/null
+++ b/dawn_wire.json
@@ -0,0 +1,233 @@
+{
+    "_comment": [
+        "Copyright 2019 The Dawn Authors",
+        "",
+        "Licensed under the Apache License, Version 2.0 (the \"License\");",
+        "you may not use this file except in compliance with the License.",
+        "You may obtain a copy of the License at",
+        "",
+        "    http://www.apache.org/licenses/LICENSE-2.0",
+        "",
+        "Unless required by applicable law or agreed to in writing, software",
+        "distributed under the License is distributed on an \"AS IS\" BASIS,",
+        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
+        "See the License for the specific language governing permissions and",
+        "limitations under the License."
+    ],
+
+    "_doc": "See docs/dawn/codegen.md",
+
+    "commands": {
+        "buffer map async": [
+            { "name": "buffer id", "type": "ObjectId" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "mode", "type": "map mode" },
+            { "name": "offset", "type": "uint64_t"},
+            { "name": "size", "type": "uint64_t"}
+        ],
+        "buffer update mapped data": [
+            { "name": "buffer id", "type": "ObjectId" },
+            { "name": "write data update info length", "type": "uint64_t" },
+            { "name": "write data update info", "type": "uint8_t", "annotation": "const*", "length": "write data update info length", "skip_serialize": true},
+            { "name": "offset", "type": "uint64_t"},
+            { "name": "size", "type": "uint64_t"}
+        ],
+        "device create buffer": [
+            { "name": "device id", "type": "ObjectId" },
+            { "name": "descriptor", "type": "buffer descriptor", "annotation": "const*" },
+            { "name": "result", "type": "ObjectHandle", "handle_type": "buffer" },
+            { "name": "read handle create info length", "type": "uint64_t" },
+            { "name": "read handle create info", "type": "uint8_t", "annotation": "const*", "length": "read handle create info length", "skip_serialize": true},
+            { "name": "write handle create info length", "type": "uint64_t" },
+            { "name": "write handle create info", "type": "uint8_t", "annotation": "const*", "length": "write handle create info length", "skip_serialize": true}
+        ],
+        "device create compute pipeline async": [
+            { "name": "device id", "type": "ObjectId" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "pipeline object handle", "type": "ObjectHandle", "handle_type": "compute pipeline"},
+            { "name": "descriptor", "type": "compute pipeline descriptor", "annotation": "const*"}
+        ],
+        "device create render pipeline async": [
+            { "name": "device id", "type": "ObjectId" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "pipeline object handle", "type": "ObjectHandle", "handle_type": "render pipeline"},
+            { "name": "descriptor", "type": "render pipeline descriptor", "annotation": "const*"}
+        ],
+        "device pop error scope": [
+            { "name": "device id", "type": "ObjectId" },
+            { "name": "request serial", "type": "uint64_t" }
+        ],
+        "destroy object": [
+            { "name": "object type", "type": "ObjectType" },
+            { "name": "object id", "type": "ObjectId" }
+        ],
+        "queue on submitted work done": [
+            { "name": "queue id", "type": "ObjectId" },
+            { "name": "signal value", "type": "uint64_t" },
+            { "name": "request serial", "type": "uint64_t" }
+        ],
+        "queue write buffer": [
+            {"name": "queue id", "type": "ObjectId" },
+            {"name": "buffer id", "type": "ObjectId" },
+            {"name": "buffer offset", "type": "uint64_t"},
+            {"name": "data", "type": "uint8_t", "annotation": "const*", "length": "size", "wire_is_data_only": true},
+            {"name": "size", "type": "uint64_t"}
+        ],
+        "queue write texture": [
+            {"name": "queue id", "type": "ObjectId" },
+            {"name": "destination", "type": "image copy texture", "annotation": "const*"},
+            {"name": "data", "type": "uint8_t", "annotation": "const*", "length": "data size", "wire_is_data_only": true},
+            {"name": "data size", "type": "uint64_t"},
+            {"name": "data layout", "type": "texture data layout", "annotation": "const*"},
+            {"name": "writeSize", "type": "extent 3D", "annotation": "const*"}
+        ],
+        "shader module get compilation info": [
+            { "name": "shader module id", "type": "ObjectId" },
+            { "name": "request serial", "type": "uint64_t" }
+        ],
+        "instance request adapter": [
+            { "name": "instance id", "type": "ObjectId" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "adapter object handle", "type": "ObjectHandle", "handle_type": "adapter"},
+            { "name": "options", "type": "request adapter options", "annotation": "const*" }
+        ],
+        "adapter request device": [
+            { "name": "adapter id", "type": "ObjectId" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "device object handle", "type": "ObjectHandle", "handle_type": "device"},
+            { "name": "descriptor", "type": "device descriptor", "annotation": "const*" }
+        ]
+    },
+    "return commands": {
+        "buffer map async callback": [
+            { "name": "buffer", "type": "ObjectHandle", "handle_type": "buffer" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "status", "type": "uint32_t" },
+            { "name": "read data update info length", "type": "uint64_t" },
+            { "name": "read data update info", "type": "uint8_t", "annotation": "const*", "length": "read data update info length", "skip_serialize": true }
+        ],
+        "device create compute pipeline async callback": [
+            { "name": "device", "type": "ObjectHandle", "handle_type": "device" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "status", "type": "create pipeline async status" },
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
+        ],
+        "device create render pipeline async callback": [
+            { "name": "device", "type": "ObjectHandle", "handle_type": "device" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "status", "type": "create pipeline async status" },
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
+        ],
+        "device uncaptured error callback": [
+            { "name": "device", "type": "ObjectHandle", "handle_type": "device" },
+            { "name": "type", "type": "error type"},
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
+        ],
+        "device logging callback": [
+            { "name": "device", "type": "ObjectHandle", "handle_type": "device" },
+            { "name": "type", "type": "logging type"},
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
+        ],
+        "device lost callback" : [
+            { "name": "device", "type": "ObjectHandle", "handle_type": "device" },
+            { "name": "reason", "type": "device lost reason" },
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
+        ],
+        "device pop error scope callback": [
+            { "name": "device", "type": "ObjectHandle", "handle_type": "device" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "type", "type": "error type" },
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
+        ],
+        "queue work done callback": [
+            { "name": "queue", "type": "ObjectHandle", "handle_type": "queue" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "status", "type": "queue work done status" }
+        ],
+        "shader module get compilation info callback": [
+            { "name": "shader module", "type": "ObjectHandle", "handle_type": "shader module" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "status", "type": "compilation info request status" },
+            { "name": "info", "type": "compilation info", "annotation": "const*", "optional": true }
+        ],
+        "instance request adapter callback": [
+            { "name": "instance", "type": "ObjectHandle", "handle_type": "instance" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "status", "type": "request adapter status" },
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen", "optional": true },
+            { "name": "properties", "type": "adapter properties", "annotation": "const*", "optional": "true" },
+            { "name": "limits", "type": "supported limits", "annotation": "const*", "optional": "true" },
+            { "name": "features count", "type": "uint32_t"},
+            { "name": "features", "type": "feature name", "annotation": "const*", "length": "features count"}
+        ],
+        "adapter request device callback": [
+            { "name": "adapter", "type": "ObjectHandle", "handle_type": "adapter" },
+            { "name": "request serial", "type": "uint64_t" },
+            { "name": "status", "type": "request device status" },
+            { "name": "message", "type": "char", "annotation": "const*", "length": "strlen", "optional": true },
+            { "name": "limits", "type": "supported limits", "annotation": "const*", "optional": "true" },
+            { "name": "features count", "type": "uint32_t"},
+            { "name": "features", "type": "feature name", "annotation": "const*", "length": "features count"}
+        ]
+    },
+    "special items": {
+        "client_side_structures": [
+            "SurfaceDescriptorFromMetalLayer",
+            "SurfaceDescriptorFromWindowsHWND",
+            "SurfaceDescriptorFromXlibWindow",
+            "SurfaceDescriptorFromWindowsCoreWindow",
+            "SurfaceDescriptorFromWindowsSwapChainPanel",
+            "SurfaceDescriptorFromAndroidNativeWindow"
+        ],
+        "client_side_commands": [
+            "AdapterCreateDevice",
+            "AdapterGetProperties",
+            "AdapterGetLimits",
+            "AdapterHasFeature",
+            "AdapterEnumerateFeatures",
+            "AdapterRequestDevice",
+            "BufferMapAsync",
+            "BufferGetConstMappedRange",
+            "BufferGetMappedRange",
+            "DeviceCreateBuffer",
+            "DeviceCreateComputePipelineAsync",
+            "DeviceCreateRenderPipelineAsync",
+            "DeviceGetLimits",
+            "DeviceHasFeature",
+            "DeviceEnumerateFeatures",
+            "DevicePopErrorScope",
+            "DeviceSetDeviceLostCallback",
+            "DeviceSetUncapturedErrorCallback",
+            "DeviceSetLoggingCallback",
+            "InstanceRequestAdapter",
+            "ShaderModuleGetCompilationInfo",
+            "QueueOnSubmittedWorkDone",
+            "QueueWriteBuffer",
+            "QueueWriteTexture"
+        ],
+        "client_handwritten_commands": [
+            "BufferDestroy",
+            "BufferUnmap",
+            "DeviceCreateErrorBuffer",
+            "DeviceGetQueue",
+            "DeviceInjectError"
+        ],
+        "client_special_objects": [
+            "Adapter",
+            "Buffer",
+            "Device",
+            "Instance",
+            "Queue",
+            "ShaderModule"
+        ],
+        "server_custom_pre_handler_commands": [
+            "BufferDestroy",
+            "BufferUnmap"
+        ],
+        "server_handwritten_commands": [
+            "QueueSignal"
+        ],
+        "server_reverse_lookup_objects": [
+        ]
+    }
+}
diff --git a/docs/dawn/OWNERS b/docs/dawn/OWNERS
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/docs/dawn/OWNERS
@@ -0,0 +1 @@
+*
diff --git a/docs/dawn/buffer_mapping.md b/docs/dawn/buffer_mapping.md
new file mode 100644
index 0000000..0663f68
--- /dev/null
+++ b/docs/dawn/buffer_mapping.md
@@ -0,0 +1,3 @@
+- Buffer mapping dawn wire memory transfer interface design
+    - https://docs.google.com/document/d/1JOhCpmJ_JyNZJtX6MVbSgxjtG1TdKORdeOGYtSfVYjk/edit?usp=sharing&resourcekey=0-1bFi47mR1jkBLdRFxcTVig
+- TODO: make a md doc targeted at code walkthrough for contributors
diff --git a/docs/dawn/building.md b/docs/dawn/building.md
new file mode 100644
index 0000000..230c222
--- /dev/null
+++ b/docs/dawn/building.md
@@ -0,0 +1,46 @@
+# Building Dawn
+
+## System requirements
+
+- Linux
+  - The `pkg-config` command:
+    ```sh
+    # Install pkg-config on Ubuntu
+    sudo apt-get install pkg-config
+    ```
+
+- Mac
+  - [Xcode](https://developer.apple.com/xcode/) 12.2+.
+  - The macOS 11.0 SDK. Run `xcode-select` to check whether you have it.
+    ```sh
+    ls `xcode-select -p`/Platforms/MacOSX.platform/Developer/SDKs
+    ```
+
+## Install `depot_tools`
+
+Dawn uses the Chromium build system and dependency management so you need to [install depot_tools] and add it to the PATH.
+
+[install depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
+
+## Get the code
+
+```sh
+# Clone the repo as "dawn"
+git clone https://dawn.googlesource.com/dawn dawn && cd dawn
+
+# Bootstrap the gclient configuration
+cp scripts/standalone.gclient .gclient
+
+# Fetch external dependencies and toolchains with gclient
+gclient sync
+```
+
+## Build Dawn
+
+Then generate build files using `gn args out/Debug` or `gn args out/Release`.
+A text editor will appear asking build options, the most common option is `is_debug=true/false`; otherwise `gn args out/Release --list` shows all the possible options.
+
+On macOS you'll want to add the `use_system_xcode=true` in most cases. (and if you're a googler please get XCode from go/xcode).
+
+Then use `ninja -C out/Release` to build dawn and for example `./out/Release/dawn_end2end_tests` to run the tests.
+
diff --git a/docs/dawn/codegen.md b/docs/dawn/codegen.md
new file mode 100644
index 0000000..b62c449
--- /dev/null
+++ b/docs/dawn/codegen.md
@@ -0,0 +1,112 @@
+# Dawn's code generators.
+
+Dawn relies on a lot of code generation to produce boilerplate code, especially webgpu.h-related code. They start by reading some JSON files (and sometimes XML too), process the data into an in-memory representation that's then used by some [Jinja2](https://jinja.palletsprojects.com/) templates to generate the code. This is similar to the model/view separation in Web development.
+
+Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/master:out/Debug/gen/third_party/dawn/src/).
+
+## Dawn "JSON API" generators
+
+Most of the code generation is done from [`dawn.json`](../dawn.json) which is a JSON description of the WebGPU API with extra annotation used by some of the generators. The code for all the "Dawn JSON" generators is in [`dawn_json_generator.py`](../generator/dawn_json_generator.py) (with templates in the regular template dir).
+
+At this time it is used to generate:
+
+ - the Dawn, Emscripten, and upstream webgpu-native `webgpu.h` C header
+ - the Dawn and Emscripten `webgpu_cpp.cpp/h` C++ wrapper over the C header
+ - libraries that implements `webgpu.h` by calling in a static or `thread_local` proc table
+ - other parts of the [Emscripten](https://emscripten.org/) WebGPU implementation
+ - a GMock version of the API with its proc table for testing
+ - validation helper functions for dawn_native
+ - the definition of dawn_native's proc table
+ - dawn_native's internal version of the webgpu.h types
+ - utilities for working with dawn_native's chained structs
+ - a lot of dawn_wire parts, see below
+
+Internally `dawn.json` is a dictionary from the "canonical name" of things to their definition. The "canonical name" is a space-separated (mostly) lower-case version of the name that's parsed into a `Name` Python object. Then that name can be turned into various casings with `.CamelCase()` `.SNAKE_CASE()`, etc. When `dawn.json` things reference each other, it is always via these "canonical names".
+
+The `"_metadata"` key in the JSON file is used by flexible templates for generating various Web Standard API that contains following metadata:
+
+ - `"api"` a string, the name of the Web API
+ - `"namespace"` a string, the namespace of C++ wrapper
+ - `"c_prefix"` (optional) a string, the prefix of C function and data type, it will default to upper-case of `"namespace"` if it's not provided.
+ - `"proc_table_prefix"` a string, the prefix of proc table.
+ - `"impl_dir"` a string, the directory of API implementation
+ - `"native_namespace"` a string, the namespace of native implementation
+ - `"copyright_year"` (optional) a string, templates will use the year of copyright.
+
+The basic schema is that every entry is a thing with a `"category"` key what determines the sub-schema to apply to that thing. Categories and their sub-shema are defined below. Several parts of the schema use the concept of "record" which is a list of "record members" which are a combination of a type, a name and other metadata. For example the list of arguments of a function is a record. The list of structure members is a record. This combined concept is useful for the dawn_wire generator to generate code for structure and function calls in a very similar way.
+
+Most items and sub-items can include a list of `"tags"`, which, if specified, conditionally includes the item if any of its tags appears in the `enabled_tags` configuration passed to `parse_json`. This is used to include and exclude various items for Dawn, Emscripten, or upstream header variants. Tags are applied in the "parse_json" step ([rather than later](https://docs.google.com/document/d/1fBniVOxx3-hQbxHMugEPcQsaXaKBZYVO8yG9iXJp-fU/edit?usp=sharing)): this has the benefit of automatically catching when, for a particular tag configuration, an included item references an excluded item.
+
+A **record** is a list of **record members**, each of which is a dictionary with the following schema:
+ - `"name"` a string
+ - `"type"` a string, the name of the base type for this member
+ - `"annotation"` a string, default to "value". Define the C annotation to apply to the base type. Allowed annotations are `"value"` (the default), `"*"`, `"const*"`
+ - `"length"` (default to 1 if not set), a string. Defines length of the array pointed to for pointer arguments. If not set the length is implicitly 1 (so not an array), but otherwise it can be set to the name of another member in the same record that will contain the length of the array (this is heavily used in the `fooCount` `foos` pattern in the API). As a special case `"strlen"` can be used for `const char*` record members to denote that the length should be determined with `strlen`.
+ - `"optional"` (default to false) a boolean that says whether this member is optional. Member records can be optional if they are pointers (otherwise dawn_wire will always try to dereference them), objects (otherwise dawn_wire will always try to encode their ID and crash), or if they have a `"default"` key. Optional pointers and objects will always default to `nullptr`.
+ - `"default"` (optional) a number or string. If set the record member will use that value as default value. Depending on the member's category it can be a number, a string containing a number, or the name of an enum/bitmask value.
+ - `"wire_is_data_only"` (default to false) a boolean that says whether it is safe to directly return a pointer of this member that is pointing to a piece of memory in the transfer buffer into dawn_wire. To prevent TOCTOU attacks, by default in dawn_wire we must ensure every single value returned to dawn_native a copy of what's in the wire, so `"wire_is_data_only"` is set to true only when the member is data-only and don't impact control flow.
+
+**`"native"`**, doesn't have any other key. This is used to define native types that can be referenced by name in other things.
+
+**`"typedef"`** (usually only used for gradual deprecations):
+ - `"type"`: the name of the things this is a typedef for.
+
+**`"enum"`** an `uint32_t`-based enum value.
+ - `"values"` an array of enum values. Each value is a dictionary containing:
+   - `"name"` a string
+   - `"value"` a number that can be decimal or hexadecimal
+   - `"jsrepr"` (optional) a string to allow overriding how this value map to Javascript for the Emscripten bits
+   - `"valid"` (defaults to true) a boolean that controls whether the dawn_native validation utilities will consider this enum value valid.
+ - `"emscripten_no_enum_table"` (optional) if true, skips generating an enum table in `library_webgpu_enum_tables.js`
+
+**`"bitmask"`** an `uint32_t`-based bitmask. It is similar to **`"enum"`** but can be output differently.
+
+**`"function pointer"`** defines a function pointer type that can be used by other things.
+ - `"returns"` a string that's the name of the return type
+ - `"args"` a **record**, so an array of **record members**
+
+**`"structure"`**
+ - `"members"` a **record**, so an array of **record members**
+ - `"extensible"` (defaults to false) a boolean defining if this is an "extensible" WebGPU structure (i.e. has `nextInChain`). "descriptor" structures should usually have this set to true.
+ - `"chained"` (defaults to false) a boolean defining if this is a structure that can be "chained" in a WebGPU structure (i.e. has `nextInChain` and `sType`)
+
+**`"object"`**
+ - `**methods**` an array of methods for this object. Note that "release" and "reference" don't need to be specified. Each method is a dictionary containing:
+   - `"name"` a string
+   - `"return_type"` (default to no return type) a string that's the name of the return type.
+   - `"arguments"` a **record**, so an array of **record members**
+
+**`"constant"`**
+ - `"type"`: a string, the name of the base data type
+ - `"value"`: a string, the value is defined with preprocessor macro
+
+**`"function"`** declares a function that not belongs to any class.
+ - `"returns"` a string that's the name of the return type
+ - `"args"` a **record**, so an array of **record members**
+
+## Dawn "wire" generators
+
+The generator for the pieces of dawn_wire need additional data which is found in [`dawn_wire_json`](../dawn_wire.json). Examples of pieces that are generated are:
+
+ - `WireCmd.cpp/.h` the most important piece: the meat of the serialization / deserialization code for WebGPU structures and commands
+ - `ServerHandlers/Doers.cpp` that does the complete handling of all regular WebGPU methods in the server
+ - `ApiProcs.cpp` that implements the complete handling of all regular WebGPU methods in the client
+
+Most of the WebGPU methods can be handled automatically by the wire client/server but some of them need custom handling (for example because they handle callbacks or need client-side state tracking). `dawn_wire.json` defines which methods need special handling, and extra wire commands that can be used by that special handling (and will get `WireCmd` support).
+
+The schema of `dawn_wire.json` is a dictionary with the following keys:
+ - `"commands"` an array of **records** defining extra client->server commands that can be used in special-cased code path.
+   - Each **record member** can have an extra `"skip_serialize"` key that's a boolean that default to false and makes `WireCmd` skip it on its on-wire format.
+ - `"return commands"` like `"commands"` but in revers, an array of **records** defining extra server->client commands
+ - `"special items"` a dictionary containing various lists of methods or object that require special handling in places in the dawn_wire autogenerated files
+   - `"client_side_structures"`: a list of structure that we shouldn't generate serialization/deserialization code for because they are client-side only
+   - `"client_handwritten_commands"`: a list of methods that are written manually and won't be automatically generated in the client
+   - `"client_side_commands"`: a list of methods that won't be automatically generated in the server. Gets added to `"client_handwritten_commands"`
+   - `"client_special_objects"`: a list of objects that need special manual state-tracking in the client and won't be autogenerated
+   - `"server_custom_pre_handler_commands"`: a list of methods that will run custom "pre-handlers" before calling the autogenerated handlers in the server
+   - `"server_handwrittten_commands"`: a list of methods that are written manually and won't be automatically generated in the server.
+   - `server_reverse_object_lookup_objects`: a list of objects for which the server will maintain an object -> ID mapping.
+
+## OpenGL loader generator
+
+The code to load OpenGL entrypoints from a `GetProcAddress` function is generated from [`gl.xml`](../third_party/khronos/gl.xml) and the [list of extensions](../src/dawn/native/opengl/supported_extensions.json) it supports.
diff --git a/docs/dawn/contributing.md b/docs/dawn/contributing.md
new file mode 100644
index 0000000..6fabb37
--- /dev/null
+++ b/docs/dawn/contributing.md
@@ -0,0 +1,122 @@
+# How to contribute to Dawn
+
+First off, we'd love to get your contributions to Dawn!
+
+Everything helps other folks using Dawn and WebGPU: from small fixes and documentation
+improvements to larger features and optimizations.
+Please read on to learn about the contribution process.
+
+## One time setup
+
+### Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution.
+This simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different Google project), you probably don't need to do
+it again.
+
+### Gerrit setup
+
+Dawn's contributions are submitted and reviewed on [Dawn's Gerrit](https://dawn-review.googlesource.com).
+
+Gerrit works a bit differently than Github (if that's what you're used to):
+there are no forks. Instead everyone works on the same repository. Gerrit has
+magic branches for various purpose:
+
+ - `refs/for/<branch>` (most commonly `refs/for/main`) is a branch that anyone
+can push to that will create or update code reviews (called CLs for ChangeList)
+for the commits pushed.
+ - `refs/changes/00/<change number>/<patchset>` is a branch that corresponds to
+the commits that were pushed for codereview for "change number" at a certain
+"patchset" (a new patchset is created each time you push to a CL).
+
+#### Gerrit's .gitcookies
+
+To push commits to Gerrit your `git` command needs to be authenticated. This is
+done with `.gitcookies` that will make `git` send authentication information
+when connecting to the remote. To get the `.gitcookies`, log-in to [Dawn's Gerrit](https://dawn-review.googlesource.com)
+and browse to the [new-password](https://dawn.googlesource.com/new-password)
+page that will give you shell/cmd commands to run to update `.gitcookie`.
+
+#### Set up the commit-msg hook
+
+Gerrit associates commits to CLs based on a `Change-Id:` tag in the commit
+message. Each push with commits with a `Change-Id:` will update the
+corresponding CL.
+
+To add the `commit-msg` hook that will automatically add a `Change-Id:` to your
+commit messages, run the following command:
+
+```
+f=`git rev-parse --git-dir`/hooks/commit-msg ; mkdir -p $(dirname $f) ; curl -Lo $f https://gerrit-review.googlesource.com/tools/hooks/commit-msg ; chmod +x $f
+```
+
+Gerrit helpfully reminds you of that command if you forgot to set up the hook
+before pushing commits.
+
+## The code review process
+
+All submissions, including submissions by project members, require review.
+
+### Discuss the change if needed
+
+Some changes are inherently risky, because they have long-term or architectural
+consequences, contain a lot of unknowns or other reasons. When that's the case
+it is better to discuss it on the [Dawn Matrix Channel](https://matrix.to/#/#webgpu-dawn:matrix.org)
+or the [Dawn mailing-list](https://groups.google.com/g/dawn-graphics/members).
+
+### Pushing changes to code review
+
+Before pushing changes to code review, it is better to run `git cl presubmit`
+that will check the formatting of files and other small things.
+
+Pushing commits is done with `git push origin HEAD:refs/for/main`. Which means
+push to `origin` (i.e. Gerrit) the currently checkout out commit to the
+`refs/for/main` magic branch that creates or updates CLs.
+
+In the terminal you will see a URL where code review for this CL will happen.
+CLs start in the "Work In Progress" state. To start the code review proper,
+click on "Start Review", add reviewers and click "Send and start review". If
+you are unsure which reviewers to use, pick one of the reviewers in the
+[OWNERS file](../OWNERS) who will review or triage the CL.
+
+When code review asks for changes in the commits, you can amend them any way
+you want (small fixup commit and `git rebase -i` are crowd favorites) and run
+the same `git push origin HEAD:refs/for/main` command.
+
+### Tracking issues
+
+We usually like to have commits associated with issues in [Dawn's issue tracker](https://bugs.chromium.org/p/dawn/issues/list)
+so that commits for the issue can all be found on the same page. This is done
+by adding a `Bug: dawn:<issue number>` tag at the end of the commit message. It
+is also possible to reference Chromium or Tint issues with
+`Bug: tint:<issue number>` or `Bug: chromium:<issue number>`.
+
+Some small fixes (like typo fixes, or some one-off maintenance) don't need a
+tracking issue. When that's the case, it's good practice to call it out by
+adding a `Bug: None` tag.
+
+It is possible to make issues fixed automatically when the CL is merged by
+adding a `Fixed: <project>:<issue number>` tag in the commit message.
+
+### Iterating on code review
+
+Dawn follows the general [Google code review guidelines](https://google.github.io/eng-practices/review/).
+Most Dawn changes need reviews from two Dawn committers. Reviewers will set the
+"Code Review" CR+1 or CR+2 label once the change looks good to them (although
+it could still have comments that need to be addressed first). When addressing
+comments, please mark them as "Done" if you just address them, or start a
+discussion until they are resolved.
+
+Once you are granted rights (you can ask on your first contribution), you can
+add the "Commit Queue" CQ+1 label to run the automated tests for Dawn. Once the
+CL has CR+2 you can then add the CQ+2 label to run the automated tests and
+submit the commit if they pass.
+
+The "Auto Submit" AS+1 label can be used to make Gerrit automatically set the
+CQ+2 label once the CR+2 label is added.
diff --git a/docs/dawn/debug_markers.md b/docs/dawn/debug_markers.md
new file mode 100644
index 0000000..d7fd266
--- /dev/null
+++ b/docs/dawn/debug_markers.md
@@ -0,0 +1,50 @@
+# Debug Markers
+
+Dawn provides debug tooling integration for each backend.
+
+Debugging markers are exposed through this API:
+```
+partial GPUProgrammablePassEncoder {
+    void pushDebugGroup(const char * markerLabel);
+    void popDebugGroup();
+    void insertDebugMarker(const char * markerLabel);
+};
+```
+
+These APIs will result in silent no-ops if they are used without setting up
+the execution environment properly. Each backend has a specific process
+for setting up this environment.
+
+## D3D12
+
+Debug markers on D3D12 are implemented with the [PIX Event Runtime](https://blogs.msdn.microsoft.com/pix/winpixeventruntime/).
+
+To enable marker functionality, you must:
+1. Click the download link on https://www.nuget.org/packages/WinPixEventRuntime
+2. Rename the .nupkg file to a .zip extension, then extract its contents.
+3. Copy `bin\WinPixEventRuntime.dll` into the same directory as `libdawn_native.dll`.
+4. Launch your application.
+
+You may now call the debug marker APIs mentioned above and see them from your GPU debugging tool. When using your tool, it is supported to both launch your application with the debugger attached, or attach the debugger while your application is running.
+
+D3D12 debug markers have been tested with [Microsoft PIX](https://devblogs.microsoft.com/pix/) and [Intel Graphics Frame Analyzer](https://software.intel.com/en-us/gpa/graphics-frame-analyzer).
+
+Unfortunately, PIX's UI does does not lend itself to capturing single frame applications like tests. You must enable capture from within your application. To do this in Dawn tests, pass the --begin-capture-on-startup flag to dawn_end2end_tests.exe.
+
+## Vulkan
+
+Debug markers on Vulkan are implemented with [VK_EXT_debug_utils](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_EXT_debug_utils.html).
+
+To enable marker functionality, you must launch your application from your debugging tool. Attaching to an already running application is not supported.
+
+Vulkan markers have been tested with [RenderDoc](https://renderdoc.org/).
+
+## Metal
+
+Debug markers on Metal are used with the XCode debugger.
+
+To enable marker functionality, you must launch your application from XCode and use [GPU Frame Capture](https://developer.apple.com/documentation/metal/tools_profiling_and_debugging/metal_gpu_capture).
+
+## OpenGL
+
+Debug markers on OpenGL are not implemented and will result in a silent no-op. This is due to low adoption of the GL_EXT_debug_marker extension in Linux device drivers.
diff --git a/docs/dawn/debugging.md b/docs/dawn/debugging.md
new file mode 100644
index 0000000..740d0b2
--- /dev/null
+++ b/docs/dawn/debugging.md
@@ -0,0 +1,3 @@
+# Debugging Dawn
+
+(TODO)
diff --git a/docs/dawn/device_facilities.md b/docs/dawn/device_facilities.md
new file mode 100644
index 0000000..b625558
--- /dev/null
+++ b/docs/dawn/device_facilities.md
@@ -0,0 +1,106 @@
+# Devices
+
+In Dawn the `Device` is a "god object" that contains a lot of facilities useful for the whole object graph that descends from it.
+There a number of facilities common to all backends that live in the frontend and backend-specific facilities.
+Example of frontend facilities are the management of content-less object caches, or the toggle management.
+Example of backend facilities are GPU memory allocators or the backing API function pointer table.
+
+## Frontend facilities
+
+### Error Handling
+
+Dawn (dawn_native) uses the [Error.h](../src/dawn/native/Error.h) error handling to robustly handle errors.
+With `DAWN_TRY` errors bubble up all the way to, and are "consumed" by the entry-point that was called by the application.
+Error consumption uses `Device::ConsumeError` that expose them via the WebGPU "error scopes" and can also influence the device lifecycle by notifying of a device loss, or triggering a device loss..
+
+See [Error.h](../src/dawn/native/Error.h) for more information about using errors.
+
+### Device Lifecycle
+
+The device lifecycle is a bit more complicated than other objects in Dawn for multiple reasons:
+
+ - The device initialization creates facilities in both the backend and the frontend, which can fail.
+   When a device fails to initialize, it should still be possible to destroy it without crashing.
+ - Execution of commands on the GPU must be finished before the device can be destroyed (because there's noone to "DeleteWhenUnused" the device).
+ - On creation a device might want to run some GPU commands (like initializing zero-buffers), which must be completed before it is destroyed.
+ - A device can become "disconnected" when a TDR or hot-unplug happens.
+   In this case, destruction of the device doesn't need to wait on GPU commands to finish because they just disappeared.
+
+There is a state machine `State` defined in [Device.h](../src/dawn/native/Device.h) that controls all of the above.
+The most common state is `Alive` when there are potentially GPU commands executing.
+
+Initialization of a device looks like the following:
+
+ - `DeviceBase::DeviceBase` is called and does mostly nothing except setting `State` to `BeingCreated` (and initial toggles).
+ - `backend::Device::Initialize` creates things like the underlying device and other stuff that doesn't run GPU commands.
+ - It then calls `DeviceBase::Initialize` that enables the `DeviceBase` facilities and sets the `State` to `Alive`.
+ - Optionally, `backend::Device::Initialize` can now enqueue GPU commands for its initialization.
+ - The device is ready to be used by the application!
+
+While it is `Alive` the device can notify it has been disconnected by the backend, in which case it jumps directly to the `Disconnected` state.
+Internal errors, or a call to `LoseForTesting` can also disconnect the device, but in the underlying API commands are still running, so the frontend will finish all commands (with `WaitForIdleForDesctruction`) and prevent any new commands to be enqueued (by setting state to `BeingDisconnected`).
+After this the device is set in the `Disconnected` state.
+If an `Alive` device is destroyed, then a similar flow to `LoseForTesting happens`.
+
+All this ensures that during destruction or forceful disconnect of the device, it properly gets to the `Disconnected` state with no commands executing on the GPU.
+After disconnecting, frontend will call `backend::Device::DestroyImpl` so that it can properly free driver objects.
+
+### Toggles
+
+Toggles are booleans that control code paths inside of Dawn, like lazy-clearing resources or using D3D12 render passes.
+They aren't just booleans close to the code path they control, because embedders of Dawn like Chromium want to be able to surface what toggles are used by a device (like in about:gpu).
+
+Toogles are to be used for any optional code path in Dawn, including:
+
+ - Workarounds for driver bugs.
+ - Disabling select parts of the validation or robustness.
+ - Enabling limitations that help with testing.
+ - Using more advanced or optional backend API features.
+
+Toggles can be queried using `DeviceBase::IsToggleEnabled`:
+```
+bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
+```
+
+Toggles are defined in a table in [Toggles.cpp](../src/dawn/native/Toggles.cpp) that also includes their name and description.
+The name can be used to force enabling of a toggle or, at the contrary, force the disabling of a toogle.
+This is particularly useful in tests so that the two sides of a code path can be tested (for example using D3D12 render passes and not).
+
+Here's an example of a test that is run in the D3D12 backend both with the D3D12 render passes forcibly disabled, and in the default configuration.
+```
+DAWN_INSTANTIATE_TEST(RenderPassTest,
+                      D3D12Backend(),
+                      D3D12Backend({}, {"use_d3d12_render_pass"}));
+// The {} is the list of force enabled toggles, {"..."} the force disabled ones.
+```
+
+The initialization order of toggles looks as follows:
+
+ - The toggles overrides from the device descriptor are applied.
+ - The frontend device default toggles are applied (unless already overriden).
+ - The backend device default toggles are applied (unless already overriden) using `DeviceBase::SetToggle`
+ - The backend device can ignore overriden toggles if it can't support them by using `DeviceBase::ForceSetToggle`
+
+Forcing toggles should only be done when there is no "safe" option for the toggle.
+This is to avoid crashes during testing when the tests try to use both sides of a toggle.
+For toggles that are safe to enable, like workarounds, the tests can run against the base configuration and with the toggle enabled.
+For toggles that are safe to disable, like using more advanced backing API features, the tests can run against the base configuation and with the toggle disabled.
+
+### Immutable object caches
+
+A number of WebGPU objects are immutable once created, and can be expensive to create, like pipelines.
+`DeviceBase` contains caches for these objects so that they are free to create the second time.
+This is also useful to be able to compare objects by pointers like `BindGroupLayouts` since two BGLs would be equal iff they are the same object.
+
+### Format Tables
+
+The frontend has a `Format` structure that represent all the information that are known about a particular WebGPU format for this Device based on the enabled features.
+Formats are precomputed at device initialization and can be queried from a WebGPU format either assuming the format is a valid enum, or in a safe manner that doesn't do this assumption.
+A reference to these formats can be stored persistently as they have the same lifetime as the `Device`.
+
+Formats also have an "index" so that backends can create parallel tables for internal informations about formats, like what they translate to in the backing API.
+
+### Object factory
+
+Like WebGPU's device object, `DeviceBase` is an factory with methods to create all kinds of other WebGPU objects.
+WebGPU has some objects that aren't created from the device, like the texture view, but in Dawn these creations also go through `DeviceBase` so that there is a single factory for each backend.
diff --git a/docs/dawn/errors.md b/docs/dawn/errors.md
new file mode 100644
index 0000000..9f60ba7
--- /dev/null
+++ b/docs/dawn/errors.md
@@ -0,0 +1,118 @@
+# Dawn Errors
+
+Dawn produces errors for several reasons. The most common is validation errors, indicating that a
+given descriptor, configuration, state, or action is not valid according to the WebGPU spec. Errors
+can also be produced during exceptional circumstances such as the system running out of GPU memory
+or the device being lost.
+
+The messages attached to these errors will frequently be one of the primary tools developers use to
+debug problems their applications, so it is important that the messages Dawn returns are useful.
+
+Following the guidelines in document will help ensure that Dawn's errors are clear, informative, and
+consistent.
+
+## Returning Errors
+
+Since errors are expected to be an exceptional case, it's important that code that produces an error
+doesn't adversely impact the performance of the error-free path. The best way to ensure that is to
+make sure that all errors are returned from within an `if` statement that uses the `DAWN_UNLIKELY()`
+macro to indicate that the expression is not expected to evaluate to true. For example:
+
+```C++
+if (DAWN_UNLIKELY(offset > buffer.size)) {
+  return DAWN_VALIDATION_ERROR("Offset (%u) is larger than the size (%u) of %s."
+    offset, buffer.size, buffer);
+}
+```
+
+To simplify producing validation errors, it's strongly suggested that the `DAWN_INVALID_IF()` macro
+is used, which will wrap the expression in the `DAWN_UNLIKELY()` macro for you:
+
+```C++
+// This is equivalent to the previous example.
+DAWN_INVALID_IF(offset > buffer.size, "Offset (%u) is larger than the size (%u) of %s."
+    offset, buffer.size, buffer);
+```
+
+// TODO: Cover `MaybeError`, `ResultOrError<T>`, `DAWN_TRY(_ASSIGN)`, `DAWN_TRY_CONTEXT`, etc...
+
+## Error message formatting
+
+Errors returned from `DAWN_INVALID_IF()` or `DAWN_VALIDATION_ERROR()` should follow these guidelines:
+
+**Write error messages as complete sentences. (First word capitalized, ends with a period, etc.)**
+ * Example: `Command encoding has already finished.`
+ * Instead of: `encoder finished`
+
+**Error messages should be in the present tense.**
+ * Example: `Buffer is not large enough...`
+ * Instead of: `Buffer was not large enough...`
+
+**When possible any values mentioned should be immediately followed in parentheses by the given value.**
+ * Example: `("Array stride (%u) is not...", stride)`
+ * Output: `Array stride (16) is not...`
+
+**When possible any object or descriptors should be represented by the object formatted as a string.**
+ * Example: `("The %s size (%s) is...", buffer, buffer.size)`
+ * Output: `The [Buffer] size (512) is...` or `The [Buffer "Label"] size (512) is...`
+
+**Enum and bitmask values should be formatted as strings rather than integers or hex values.**
+ * Example: `("The %s format (%s) is...", texture, texture.format)`
+ * Output: `The [Texture "Label"] format (TextureFormat::RGBA8Unorm) is...`
+
+**When possible state both the given value and the expected value or limit.**
+ * Example: `("Offset (%u) is larger than the size (%u) of %s.", offset, buffer.size, buffer)`
+ * Output: `Offset (256) is larger than the size (144) of [Buffer "Label"].`
+
+**State errors in terms of what failed, rather than how to satisfy the rule.**
+ * Example: `Binding size (3) is less than the minimum binding size (32).`
+ * Instead of: `Binding size (3) must not be less than the minimum binding size (32).`
+
+**Don't repeat information given in context.**
+ * See next section for details
+
+## Error Context
+
+When calling functions that perform validation consider if calling `DAWN_TRY_CONTEXT()` rather than
+`DAWN_TRY()` is appropriate. Context messages, when provided, will be appended to any validation
+errors as a type of human readable "callstack". An error with context messages appears will be
+formatted as:
+
+```
+<Primary error message.>
+ - While <context message lvl 2>
+ - While <context message lvl 1>
+ - While <context message lvl 0>
+```
+
+For example, if a validation error occurs while validating the creation of a BindGroup, the message
+may be:
+
+```
+Binding size (256) is larger than the size (80) of [Buffer "View Matrix"].
+ - While validating entries[1] as a Buffer
+ - While validating [BindGroupDescriptor "Frame Bind Group"] against [BindGroupLayout]
+ - While calling CreateBindGroup
+```
+
+// TODO: Guidelines about when to include context
+
+## Context message formatting
+
+Context messages should follow these guidelines:
+
+**Begin with the action being taken, starting with a lower case. `- While ` will be appended by Dawn.**
+ * Example: `("validating primitive state")`
+ * Output: `- While validating primitive state`
+
+**When looping through arrays, indicate the array name and index.**
+ * Example: `("validating buffers[%u]", i)`
+ * Output: `- While validating buffers[2]`
+
+**Indicate which descriptors or objects are being examined in as high-level a context as possible.**
+ * Example: `("validating % against %", descriptor, descriptor->layout)`
+ * Output: `- While validating [BindGroupDescriptor "Label"] against [BindGroupLayout]`
+
+**When possible, indicate the function call being made as the top-level context, as well as the parameters passed.**
+ * Example: `("calling %s.CreatePipelineLayout(%s).", this, descriptor)`
+ * Output: `- While calling [Device].CreatePipelineLayout([PipelineLayoutDescriptor]).`
diff --git a/docs/dawn/external_resources.md b/docs/dawn/external_resources.md
new file mode 100644
index 0000000..91f7a60
--- /dev/null
+++ b/docs/dawn/external_resources.md
@@ -0,0 +1,6 @@
+# Dawn's external resources links
+
+## Design Docs
+
+- Buffer mapping dawn wire memory transfer interface design
+    - https://docs.google.com/document/d/1JOhCpmJ_JyNZJtX6MVbSgxjtG1TdKORdeOGYtSfVYjk/edit?usp=sharing&resourcekey=0-1bFi47mR1jkBLdRFxcTVig
\ No newline at end of file
diff --git a/docs/dawn/features/dawn_internal_usages.md b/docs/dawn/features/dawn_internal_usages.md
new file mode 100644
index 0000000..521a3ac
--- /dev/null
+++ b/docs/dawn/features/dawn_internal_usages.md
@@ -0,0 +1,38 @@
+# Dawn Internal Usages
+
+The `dawn-internal-usages` feature allows adding additional usage which affects how a texture is allocated, but does not affect normal frontend validation.
+
+Adds `WGPUDawnTextureInternalUsageDescriptor` for specifying additional internal usages to create a texture with.
+
+Example Usage:
+```
+wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+
+wgpu::TextureDescriptor desc = {};
+// set properties of desc.
+desc.nextInChain = &internalDesc;
+
+device.CreateTexture(&desc);
+```
+
+Adds `WGPUDawnEncoderInternalUsageDescriptor` which may be chained on `WGPUCommandEncoderDescriptor`. Setting `WGPUDawnEncoderInternalUsageDescriptor::useInternalUsages` to `true` means that internal resource usages will be visible during validation. ex.) A texture that has `WGPUTextureUsage_CopySrc` in `WGPUDawnEncoderInternalUsageDescriptor::internalUsage`, but not in `WGPUTextureDescriptor::usage` may be used as the source of a copy command.
+
+
+Example Usage:
+```
+wgpu::DawnEncoderInternalUsageDescriptor internalEncoderDesc = { true };
+wgpu::CommandEncoderDescriptor encoderDesc = {};
+encoderDesc.nextInChain = &internalEncoderDesc;
+
+wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&encoderDesc);
+
+// This will be valid
+wgpu::ImageCopyTexture src = {};
+src.texture = texture;
+encoder.CopyTextureToBuffer(&src, ...);
+```
+
+One use case for this is so that Chromium can use an internal copyTextureToTexture command to implement copies from a WebGPU texture-backed canvas to other Web platform primitives when the swapchain texture was not explicitly created with CopySrc usage in Javascript.
+
+Note: copyTextureToTextureInternal will be removed in favor of `WGPUDawnEncoderInternalUsageDescriptor`.
diff --git a/docs/dawn/features/dawn_native.md b/docs/dawn/features/dawn_native.md
new file mode 100644
index 0000000..0b4664b
--- /dev/null
+++ b/docs/dawn/features/dawn_native.md
@@ -0,0 +1,15 @@
+# Dawn Native
+
+The `dawn-native` feature enables additional functionality that is supported only
+when the WebGPU implementation is `dawn_native`.
+
+Additional functionality:
+ - `wgpu::DawnTogglesDeviceDescriptor` may be chained on `wgpu::DeviceDescriptor` on device creation to enable Dawn-specific toggles on the device.
+
+ - `wgpu::DawnCacheDeviceDescriptor` may be chained on `wgpu::DeviceDescriptor` on device creation to enable cache options such as isolation keys.
+
+ - Synchronous `adapter.CreateDevice(const wgpu::DeviceDescriptor*)` may be called.
+
+Notes:
+ - Enabling this feature in the `wgpu::DeviceDescriptor` does nothing, but
+its presence in the Adapter's set of supported features means that the additional functionality is supported.
diff --git a/docs/dawn/fuzzing.md b/docs/dawn/fuzzing.md
new file mode 100644
index 0000000..8521901
--- /dev/null
+++ b/docs/dawn/fuzzing.md
@@ -0,0 +1,18 @@
+# Fuzzing Dawn
+
+## `dawn_wire_server_and_frontend_fuzzer`
+
+The `dawn_wire_server_and_frontend_fuzzer` sets up Dawn using the Null backend, and passes inputs to the wire server. This fuzzes the `dawn_wire` deserialization, as well as Dawn's frontend validation.
+
+## `dawn_wire_server_and_vulkan_backend_fuzzer`
+
+The `dawn_wire_server_and_vulkan_backend_fuzzer` is like `dawn_wire_server_and_frontend_fuzzer` but it runs using a Vulkan CPU backend such as Swiftshader. This fuzzer supports error injection by using the first bytes of the fuzzing input as a Vulkan call index for which to mock a failure.
+
+## Automatic Seed Corpus Generation
+
+Using a seed corpus significantly improves the efficiency of fuzzing. Dawn's fuzzers use interesting testcases discovered in previous fuzzing runs to seed future runs. Fuzzing can be further improved by using Dawn tests as a example of API usage which allows the fuzzer to quickly discover and use new API entrypoints and usage patterns.
+
+Dawn has a CI builder [cron-linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/ci/cron-linux-clang-rel-x64) which runs on a periodic schedule. This bot runs the `dawn_end2end_tests` and `dawn_unittests` using the wire and writes out traces of the commands. This can manually be done by running: `<test_binary> --use-wire --wire-trace-dir=tmp_dir`. The output directory will contain one trace for each test, where the traces are prepended with `0xFFFFFFFFFFFFFFFF`. The header is the callsite index at which the error injector should inject an error. If the fuzzer doesn't support error injection it will skip the header. [cron-linux-clang-rel-x64] then hashes the output files to produce unique names and uploads them to the fuzzer corpus directories.
+Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/master:recipes/recipes/dawn.py] recipe for specific details.
+
+Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes.
\ No newline at end of file
diff --git a/docs/dawn/infra.md b/docs/dawn/infra.md
new file mode 100644
index 0000000..605d9ca
--- /dev/null
+++ b/docs/dawn/infra.md
@@ -0,0 +1,92 @@
+# Dawn's Continuous Testing Infrastructure
+
+Dawn uses Chromium's continuous integration (CI) infrastructure to continually run tests on changes to Dawn and provide a way for developers to run tests against their changes before submitting. CI bots continually build and run tests for every new change, and Try bots build and run developers' pending changes before submission. Dawn uses two different build recipes. There is a Dawn build recipe which checks out Dawn standalone, compiles, and runs the `dawn_unittests`. And, there is the Chromium build recipe which checks out Dawn inside a Chromium checkout. Inside a Chromium checkout, there is more infrastructure available for triggering `dawn_end2end_tests` that run on real GPU hardware, and we are able to run Chromium integration tests as well as tests for WebGPU.
+
+ - [Dawn CI Builders](https://ci.chromium.org/p/dawn/g/ci/builders)
+ - [Dawn Try Builders](https://ci.chromium.org/p/dawn/g/try/builders)
+ - [chromium.dawn Waterfall](https://ci.chromium.org/p/chromium/g/chromium.dawn/console)
+
+For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/master/docs/gpu/gpu_testing_bot_details.md).
+
+## Dawn CI/Try Builders
+Dawn builders are specified in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg). This file contains a few mixins such as `clang`, `no_clang`, `x64`, `x86`, `debug`, `release` which are used to specify the bot dimensions and build properties (builder_mixins.recipe.properties). At the time of writing, we have the following builders:
+  - [dawn/try/presubmit](https://ci.chromium.org/p/dawn/builders/try/presubmit)
+  - [dawn/try/linux-clang-dbg-x64](https://ci.chromium.org/p/dawn/builders/try/linux-clang-dbg-x64)
+  - [dawn/try/linux-clang-dbg-x86](https://ci.chromium.org/p/dawn/builders/try/linux-clang-dbg-x86)
+  - [dawn/try/linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/try/linux-clang-rel-x64)
+  - [dawn/try/mac-dbg](https://ci.chromium.org/p/dawn/builders/try/mac-dbg)
+  - [dawn/try/mac-rel](https://ci.chromium.org/p/dawn/builders/try/mac-rel)
+  - [dawn/try/win-clang-dbg-x86](https://ci.chromium.org/p/dawn/builders/try/win-clang-dbg-x86)
+  - [dawn/try/win-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/try/win-clang-rel-x64)
+  - [dawn/try/win-msvc-dbg-x86](https://ci.chromium.org/p/dawn/builders/try/win-msvc-dbg-x86)
+  - [dawn/try/win-msvc-rel-x64](https://ci.chromium.org/p/dawn/builders/try/win-msvc-rel-x64)
+
+There are additional `chromium/try` builders, but those are described later in this document.
+
+These bots are defined in both buckets luci.dawn.ci and luci.dawn.try, though their ACL permissions differ. luci.dawn.ci bots will be scheduled regularly based on [[dawn]//infra/config/global/luci-scheduler.cfg](../infra/config/global/luci-scheduler.cfg). luci.dawn.try bots will be triggered on the CQ based on [[dawn]//infra/config/global/commit-queue.cfg](../infra/config/global/commit-queue.cfg).
+
+One particular note is `buckets.swarming.builder_defaults.recipe.name: "dawn"` which specifies these use the [`dawn.py`](https://source.chromium.org/search/?q=file:recipes/dawn.py) build recipe.
+
+Build status for both CI and Try builders can be seen at this [console](https://ci.chromium.org/p/dawn) which is generated from [[dawn]//infra/config/global/luci-milo.cfg](../infra/config/global/luci-milo.cfg).
+
+## Dawn Build Recipe
+The [`dawn.py`](https://cs.chromium.org/search/?q=file:recipes/dawn.py) build recipe is simple and intended only for testing compilation and unit tests. It does the following:
+  1. Checks out Dawn standalone and dependencies
+  2. Builds based on the `builder_mixins.recipe.properties` coming from the builder config in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg).
+  3. Runs the `dawn_unittests` on that same bot.
+
+## Dawn Chromium-Based CI Waterfall Bots
+The [`chromium.dawn`](https://ci.chromium.org/p/chromium/g/chromium.dawn/console) waterfall consists of the bots specified in the `chromium.dawn` section of [[chromium/src]//testing/buildbot/waterfalls.pyl](https://source.chromium.org/search/?q=file:waterfalls.pyl%20chromium.dawn). Bots named "Builder" are responsible for building top-of-tree Dawn, whereas bots named "DEPS Builder" are responsible for building Chromium's DEPS version of Dawn.
+
+The other bots, such as "Dawn Linux x64 DEPS Release (Intel HD 630)" receive the build products from the Builders and are responsible for running tests. The Tester configuration may specify `mixins` from [[chromium/src]//testing/buildbot/mixins.pyl](https://source.chromium.org/search/?q=file:buildbot/mixins.pyl) which help specify bot test dimensions like OS version and GPU vendor. The Tester configuration also specifies `test_suites` from [[chromium/src]//testing/buildbot/test_suites.pyl](https://source.chromium.org/search/?q=file:buildbot/test_suites.pyl%20dawn_end2end_tests) which declare the tests are arguments passed to tests that should be run on the bot.
+
+The Builder and Tester bots are additionally configured at [[chromium/tools/build]//scripts/slave/recipe_modules/chromium_tests/chromium_dawn.py](https://source.chromium.org/search?q=file:chromium_dawn.py) which defines the bot specs for the builders and testers. Some things to note:
+ - The Tester bots set `parent_buildername` to be their respective Builder bot.
+ - The non DEPS bots use the `dawn_top_of_tree` config.
+ - The bots apply the `mb` config which references [[chromium]//tools/mb/mb_config.pyl](https://source.chromium.org/search?q=file:mb_config.pyl%20%22Dawn%20Linux%20x64%20Builder%22) and [[chromium]//tools/mb/mb_config_buckets.pyl](https://source.chromium.org/search?q=file:mb_config_buckets.pyl%20%22Dawn%20Linux%20x64%20Builder%22). Various mixins there specify build dimensions like debug, release, gn args, x86, x64, etc.
+
+Finally, builds on these waterfall bots are automatically scheduled based on the configuration in [[chromium/src]//infra/config/buckets/ci.star](https://source.chromium.org/search?q=file:ci.star%20%22Dawn%20Linux%20x64%20Builder%22). Note that the Tester bots are `triggered_by` the Builder bots.
+
+## Dawn Chromium-Based Tryjobs
+[[dawn]//infra/config/global/commit-queue.cfg](../infra/config/global/commit-queue.cfg) declares additional tryjob builders which are defined in the Chromium workspace. The reason for this separation is that jobs sent to these bots rely on the Chromium infrastructure for doing builds and triggering jobs on bots with GPU hardware in swarming.
+
+At the time of writing, the bots for Dawn CLs are:
+  - [chromium/try/linux-dawn-rel](https://ci.chromium.org/p/chromium/builders/try/linux-dawn-rel)
+  - [chromium/try/mac-dawn-rel](https://ci.chromium.org/p/chromium/builders/try/mac-dawn-rel)
+  - [chromium/try/win-dawn-rel](https://ci.chromium.org/p/chromium/builders/try/win-dawn-rel)
+
+And for Chromium CLs:
+  - [chromium/try/dawn-linux-x64-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-linux-x64-deps-rel)
+  - [chromium/try/dawn-mac-x64-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-mac-x64-deps-rel)
+  - [chromium/try/dawn-win10-x86-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-win10-x86-deps-rel)
+  - [chromium/try/dawn-win10-x64-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-win10-x64-deps-rel)
+
+ The configuration for these bots is generated from [[chromium]//infra/config/buckets/try.star](https://source.chromium.org/search/?q=file:try.star%20linux-dawn-rel) which uses the [`chromium_dawn_builder`](https://source.chromium.org/search/?q=%22def%20chromium_dawn_builder%22) function which sets the `mastername` to `tryserver.chromium.dawn`.
+
+[[chromium/tools/build]//scripts/slave/recipe_modules/chromium_tests/trybots.py](https://source.chromium.org/search/?q=file:trybots.py%20tryserver.chromium.dawn) specifies `tryserver.chromium.dawn` bots as mirroring bots from the `chromium.dawn` waterfall. Example:
+```
+'dawn-linux-x64-deps-rel': {
+    'bot_ids': [
+        {
+            'mastername': 'chromium.dawn',
+            'buildername': 'Dawn Linux x64 DEPS Builder',
+            'tester': 'Dawn Linux x64 DEPS Release (Intel HD 630)',
+        },
+        {
+            'mastername': 'chromium.dawn',
+            'buildername': 'Dawn Linux x64 DEPS Builder',
+            'tester': 'Dawn Linux x64 DEPS Release (NVIDIA)',
+        },
+    ],
+},
+```
+
+Using the [[chromium/tools/build]//scripts/slave/recipes/chromium_trybot.py](https://source.chromium.org/search/?q=file:chromium_trybot.py) recipe, these trybots will cherry-pick a CL and run the same tests as the CI waterfall bots. The trybots also pick up some build mixins from [[chromium]//tools/mb/mb_config.pyl](https://source.chromium.org/search?q=file:mb_config.pyl%20dawn-linux-x64-deps-rel).
+
+## Bot Allocation
+
+Bots are physically allocated based on the configuration in [[chromium/infradata/config]//configs/chromium-swarm/starlark/bots/dawn.star](https://chrome-internal.googlesource.com/infradata/config/+/refs/heads/master/configs/chromium-swarm/starlark/bots/dawn.star) (Google only).
+
+`dawn/try` bots are using builderless configurations which means they use builderless GCEs shared with Chromium bots and don't need explicit allocation.
+
+`chromium/try` bots are still explicitly allocated with a number of GCE instances and lifetime of the build cache. All of the GCE bots should eventually be migrated to builderless (crbug.com/dawn/328). Mac bots such as `dawn-mac-x64-deps-rel`, `mac-dawn-rel`, `Dawn Mac x64 Builder`, and `Dawn Mac x64 DEPS Builder` point to specific ranges of machines that have been reserved by the infrastructure team.
diff --git a/docs/dawn/overview.md b/docs/dawn/overview.md
new file mode 100644
index 0000000..acb3848
--- /dev/null
+++ b/docs/dawn/overview.md
@@ -0,0 +1,54 @@
+# Dawn repository overview
+
+This repository contains the implementation of Dawn, which is itself composed of two main libraries (dawn_native and dawn_wire), along with support libraries, tests, and samples. Dawn makes heavy use of code-generation based on the `dawn.json` file that describes the native WebGPU API. It is used to generate the API headers, C++ wrapper, parts of the client-server implementation, and more!
+
+## Directory structure
+
+- [`dawn.json`](../dawn.json): contains a description of the native WebGPU in JSON form. It is the data model that's used by the code generators.
+- [`dawn_wire.json`](../dawn_wire.json): contains additional information used to generate `dawn_wire` files, such as commands in addition to regular WebGPU commands.
+- [`examples`](../examples): a small collection of samples using the native WebGPU API. They were mostly used when bringing up Dawn for the first time, and to test the `WGPUSwapChain` object.
+- [`generator`](../generator): directory containg the code generators and their templates. Generators are based on Jinja2 and parse data-models from JSON files.
+    - [`dawn_json_generator.py`](../generator/dawn_json_generator.py): the main code generator that outputs the WebGPU headers, C++ wrapper, client-server implementation, etc.
+    - [`templates`](../generator/templates): Jinja2 templates for the generator, with subdirectories for groups of templates that are all used in the same library.
+- [`infra`](../infra): configuration file for the commit-queue infrastructure.
+- [`scripts`](../scripts): contains a grab-bag of files that are used for building Dawn, in testing, etc.
+- [`src`](../src):
+  - [`dawn`](../src/dawn): root directory for Dawn code
+      - [`common`](../src/dawn/common): helper code that is allowed to be used by Dawn's core libraries, `dawn_native` and `dawn_wire`. Also allowed for use in all other Dawn targets.
+      - [`fuzzers`](../src/dawn/fuzzers): various fuzzers for Dawn that are running in [Clusterfuzz](https://google.github.io/clusterfuzz/).
+      - [`native`](../src/dawn/native): code for the implementation of WebGPU on top of graphics APIs. Files in this folder are the "frontend" while subdirectories are "backends".
+         - `<backend>`: code for the implementation of the backend on a specific graphics API, for example `d3d12`, `metal` or `vulkan`.
+      - [`tests`](../src/dawn/tests):
+        - [`end2end`](../src/dawn/tests/end2end): tests for the execution of the WebGPU API and require a GPU to run.
+        - [`perf_tests`](../src/dawn/tests/perf_tests): benchmarks for various aspects of Dawn.
+        - [`unittests`](../src/dawn/tests/unittests): code unittests of internal classes, but also by extension WebGPU API tests that don't require a GPU to run.
+          - [`validation`](../src/dawn/tests/unittests/validation): WebGPU validation tests not using the GPU (frontend tests)
+        - [`white_box`](../src/dawn/tests/white_box): tests using the GPU that need to access the internals of `dawn_native` or `dawn_wire`.
+      - [`wire`](../src/dawn/wire): code for an implementation of WebGPU as a client-server architecture.
+      - [`utils`](../src/dawn/utils): helper code to use Dawn used by tests and samples but disallowed for `dawn_native` and `dawn_wire`.
+      - [`platform`](../src/dawn/platform): definition of interfaces for dependency injection in `dawn_native` or `dawn_wire`.
+  - [`include`](../src/include): public headers with subdirectories for each library. Note that some headers are auto-generated and not present directly in the directory.
+- [`third_party`](../third_party): directory where dependencies live as well as their buildfiles.
+
+## Dawn Native (`dawn_native`)
+
+The largest library in Dawn is `dawn_native` which implements the WebGPU API by translating to native graphics APIs such as D3D12, Metal or Vulkan. It is composed of a frontend that does all the state-tracking and validation, and backends that do the actual translation to the native graphics APIs.
+
+`dawn_native` hosts the [spirv-val](https://github.com/KhronosGroup/SPIRV-Tools) for validation of SPIR-V shaders and uses [Tint](https://dawn.googlesource.com/tint/) shader translator to convert WGSL shaders to an equivalent shader for use in the native graphics API (HLSL for D3D12, MSL for Metal or Vulkan SPIR-V for Vulkan).
+
+## Dawn Wire (`dawn_wire`)
+
+A second library that implements both a client that takes WebGPU commands and serializes them into a buffer, and a server that deserializes commands from a buffer, validates they are well-formed and calls the relevant WebGPU commands. Some server to client communication also happens so the API's callbacks work properly.
+
+Note that `dawn_wire` is meant to do as little state-tracking as possible so that the client can be lean and defer most of the heavy processing to the server side where the server calls into `dawn_native`.
+
+## Dawn Proc (`dawn_proc`)
+
+Normally libraries implementing `webgpu.h` should implement function like `wgpuDeviceCreateBuffer` but instead `dawn_native` and `dawn_wire` implement the `dawnProcTable` which is a structure containing all the WebGPU functions Dawn implements. Then a `dawn_proc` library contains a static version of this `dawnProcTable` and for example forwards `wgpuDeviceCreateBuffer` to the `procTable.deviceCreateBuffer` function pointer. This is useful in two ways:
+
+ - It allows deciding at runtime whether to use `dawn_native` and `dawn_wire`, which is useful to test boths paths with the same binary in our infrastructure.
+ - It avoids applications that know they will only use Dawn to query all entrypoints at once instead of using `wgpuGetProcAddress` repeatedly.
+
+## Code generation
+
+When the WebGPU API evolves, a lot of places in Dawn have to be updated, so to reduce efforts, Dawn relies heavily on code generation for things like headers, proc tables and de/serialization. For more information, see [codegen.md](codegen.md).
diff --git a/docs/dawn/testing.md b/docs/dawn/testing.md
new file mode 100644
index 0000000..749736b
--- /dev/null
+++ b/docs/dawn/testing.md
@@ -0,0 +1,69 @@
+# Testing Dawn
+
+(TODO)
+
+## Dawn Perf Tests
+
+For benchmarking with `dawn_perf_tests`, it's best to build inside a Chromium checkout using the following GN args:
+```
+is_official_build = true  # Enables highest optimization level, using LTO on some platforms
+use_dawn = true           # Required to build Dawn
+use_cfi_icall=false       # Required because Dawn dynamically loads function pointers, and we don't sanitize them yet.
+```
+
+A Chromium checkout is required for the highest optimization flags. It is possible to build and run `dawn_perf_tests` from a standalone Dawn checkout as well, only using GN arg `is_debug=false`. For more information on building, please see [building.md](./building.md).
+
+### Terminology
+
+ - Iteration: The unit of work being measured. It could be a frame, a draw call, a data upload, a computation, etc. `dawn_perf_tests` metrics are reported as time per iteration.
+ - Step: A group of Iterations run together. The number of `iterationsPerStep` is provided to the constructor of `DawnPerfTestBase`.
+ - Trial: A group of Steps run consecutively. `kNumTrials` are run for each test. A Step in a Trial is run repetitively for approximately `kCalibrationRunTimeSeconds`. Metrics are accumlated per-trial and reported as the total time divided by `numSteps * iterationsPerStep`. `maxStepsInFlight` is passed to the `DawnPerfTestsBase` constructor to limit the number of Steps pipelined.
+
+(See [`//src/dawn/tests/perf_tests/DawnPerfTest.h`](https://cs.chromium.org/chromium/src/third_party/dawn/src/dawn/tests/perf_tests/DawnPerfTest.h) for the values of the constants).
+
+### Metrics
+
+`dawn_perf_tests` measures the following metrics:
+ - `wall_time`: The time per iteration, including time waiting for the GPU between Steps in a Trial.
+ - `cpu_time`: The time per iteration, not including time waiting for the GPU between Steps in a Trial.
+ - `validation_time`: The time for CommandBuffer / RenderBundle validation.
+ - `recording_time`: The time to convert Dawn commands to native commands.
+
+Metrics are reported according to the format specified at
+[[chromium]//build/scripts/slave/performance_log_processor.py](https://cs.chromium.org/chromium/build/scripts/slave/performance_log_processor.py)
+
+### Dumping Trace Files
+
+The test harness supports a `--trace-file=path/to/trace.json` argument where Dawn trace events can be dumped. The traces can be viewed in Chrome's `about://tracing` viewer.
+
+### Test Runner
+
+[`//scripts/perf_test_runner.py`](https://cs.chromium.org/chromium/src/third_party/dawn/scripts/perf_test_runner.py) may be run to continuously run a test and report mean times and variances.
+
+Currently the script looks in the `out/Release` build directory and measures the `wall_time` metric (hardcoded into the script). These should eventually become arguments.
+
+Example usage:
+
+```
+scripts/perf_test_runner.py DrawCallPerf.Run/Vulkan__e_skip_validation
+```
+
+### Tests
+
+**BufferUploadPerf**
+
+Tests repetitively uploading data to the GPU using either `WriteBuffer` or `CreateBuffer` with `mappedAtCreation = true`.
+
+**DrawCallPerf**
+
+DrawCallPerf tests drawing a simple triangle with many ways of encoding commands,
+binding, and uploading data to the GPU. The rationale for this is the following:
+  - Static/Multiple/Dynamic vertex buffers: Tests switching buffer bindings. This has
+    a state tracking cost as well as a GPU driver cost.
+  - Static/Multiple/Dynamic bind groups: Same rationale as vertex buffers
+  - Static/Dynamic pipelines: In addition to a change to GPU state, changing the pipeline
+    layout incurs additional state tracking costs in Dawn.
+  - With/Without render bundles: All of the above can have lower validation costs if
+    precomputed in a render bundle.
+  - Static/Dynamic data: Updating data for each draw is a common use case. It also tests
+    the efficiency of resource transitions.
diff --git a/docs/imgs/README.md b/docs/imgs/README.md
new file mode 100644
index 0000000..60f42f4
--- /dev/null
+++ b/docs/imgs/README.md
@@ -0,0 +1 @@
+Dawn's logo and derivatives found in this folder are under the [Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) license.
diff --git a/docs/imgs/dawn_logo.png b/docs/imgs/dawn_logo.png
new file mode 100644
index 0000000..a7d40cb
--- /dev/null
+++ b/docs/imgs/dawn_logo.png
Binary files differ
diff --git a/docs/imgs/dawn_logo.svg b/docs/imgs/dawn_logo.svg
new file mode 100644
index 0000000..034aad2
--- /dev/null
+++ b/docs/imgs/dawn_logo.svg
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="768"
+   height="768"
+   viewBox="0 0 768 768"
+   fill="none"
+   version="1.1"
+   id="svg47"
+   sodipodi:docname="dawn_logo.svg"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <defs
+     id="defs51" />
+  <sodipodi:namedview
+     id="namedview49"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="true"
+     showgrid="false"
+     inkscape:zoom="1.6830615"
+     inkscape:cx="187.15894"
+     inkscape:cy="384.41852"
+     inkscape:window-width="3840"
+     inkscape:window-height="2066"
+     inkscape:window-x="-11"
+     inkscape:window-y="-11"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="svg47" />
+  <circle
+     cx="309"
+     cy="214"
+     r="121"
+     fill="#FDE293"
+     id="circle33" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M429.5 126L670.255 543L188.745 543L429.5 126Z"
+     fill="#005A9C"
+     id="path35" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M429.5 543L550 335L309 335L429.5 543Z"
+     fill="#0066B0"
+     id="path37" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M308.5 335L369 439L248 439L308.5 335Z"
+     fill="#0086E8"
+     id="path39" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M308.5 543L369 439L248 439L308.5 543Z"
+     fill="#0093FF"
+     id="path41" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M217.5 335L338 543L97 543L217.5 335Z"
+     fill="#0076CC"
+     id="path43" />
+  <path
+     d="M210.52 682V578.896H241.624C252.376 578.896 261.64 581.104 269.416 585.52C277.192 589.84 283.192 595.888 287.416 603.664C291.64 611.44 293.752 620.368 293.752 630.448C293.752 640.528 291.64 649.456 287.416 657.232C283.192 664.912 277.192 670.96 269.416 675.376C261.64 679.792 252.376 682 241.624 682H210.52ZM222.76 670.336H241.624C249.688 670.336 256.696 668.8 262.648 665.728C268.6 662.56 273.208 658 276.472 652.048C279.736 646.096 281.368 638.896 281.368 630.448C281.368 622 279.736 614.8 276.472 608.848C273.208 602.896 268.6 598.384 262.648 595.312C256.696 592.144 249.688 590.56 241.624 590.56H222.76V670.336ZM332.794 684.304C327.322 684.304 322.522 683.248 318.394 681.136C314.266 679.024 311.002 676.144 308.602 672.496C306.298 668.752 305.146 664.528 305.146 659.824C305.146 654.448 306.538 649.936 309.322 646.288C312.106 642.544 315.85 639.76 320.554 637.936C325.258 636.016 330.442 635.056 336.106 635.056C341.002 635.056 345.322 635.584 349.066 636.64C352.81 637.696 355.498 638.752 357.13 639.808V635.344C357.13 629.776 355.162 625.36 351.226 622.096C347.29 618.832 342.49 617.2 336.826 617.2C332.794 617.2 329.002 618.112 325.45 619.936C321.994 621.664 319.258 624.112 317.242 627.28L308.026 620.368C310.906 616.048 314.842 612.64 319.834 610.144C324.922 607.552 330.586 606.256 336.826 606.256C346.81 606.256 354.634 608.896 360.298 614.176C365.962 619.456 368.794 626.56 368.794 635.488V682H357.13V671.488H356.554C354.634 674.752 351.61 677.728 347.482 680.416C343.354 683.008 338.458 684.304 332.794 684.304ZM333.946 673.504C338.17 673.504 342.01 672.448 345.466 670.336C349.018 668.224 351.85 665.392 353.962 661.84C356.074 658.288 357.13 654.4 357.13 650.176C354.922 648.64 352.138 647.392 348.778 646.432C345.514 645.472 341.914 644.992 337.978 644.992C330.97 644.992 325.834 646.432 322.57 649.312C319.306 652.192 317.674 655.744 317.674 659.968C317.674 664 319.21 667.264 322.282 669.76C325.354 672.256 329.242 673.504 333.946 673.504ZM399.968 682L376.352 608.56H389.024L406.448 666.592H406.592L425.168 608.56H437.696L456.272 666.448H456.416L473.84 608.56H486.224L462.464 682H450.08L431.072 623.392L412.208 682H399.968ZM496.353 682V608.56H508.017V619.36H508.593C510.513 615.808 513.633 612.736 517.953 610.144C522.369 607.552 527.169 606.256 532.353 606.256C541.377 606.256 548.145 608.896 552.657 614.176C557.265 619.36 559.569 626.272 559.569 634.912V682H547.329V636.784C547.329 629.68 545.601 624.688 542.145 621.808C538.785 618.832 534.417 617.344 529.041 617.344C525.009 617.344 521.457 618.496 518.385 620.8C515.313 623.008 512.913 625.888 511.185 629.44C509.457 632.992 508.593 636.736 508.593 640.672V682H496.353Z"
+     fill="black"
+     id="path45" />
+</svg>
diff --git a/docs/imgs/dawn_logo_black.png b/docs/imgs/dawn_logo_black.png
new file mode 100644
index 0000000..3c74794
--- /dev/null
+++ b/docs/imgs/dawn_logo_black.png
Binary files differ
diff --git a/docs/imgs/dawn_logo_black.svg b/docs/imgs/dawn_logo_black.svg
new file mode 100644
index 0000000..627bf3b
--- /dev/null
+++ b/docs/imgs/dawn_logo_black.svg
@@ -0,0 +1,87 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="768"
+   height="768"
+   viewBox="0 0 768 768"
+   fill="none"
+   version="1.1"
+   id="svg47"
+   sodipodi:docname="dawn_logo_black.svg"
+   inkscape:version="1.1 (c4e8f9e, 2021-05-24)"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <defs
+     id="defs51" />
+  <sodipodi:namedview
+     id="namedview49"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0"
+     inkscape:pagecheckerboard="true"
+     showgrid="false"
+     inkscape:zoom="0.96102398"
+     inkscape:cx="255.45668"
+     inkscape:cy="371.99904"
+     inkscape:window-width="1296"
+     inkscape:window-height="997"
+     inkscape:window-x="0"
+     inkscape:window-y="25"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="svg47"
+     showguides="false" />
+  <path
+     id="circle33"
+     style="fill:#808080"
+     d="m 306.7315,89 a 121,121 0 0 0 -121,121 121,121 0 0 0 120.8418,120.98438 L 411.40142,149.41797 A 121,121 0 0 0 306.7315,89 Z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M429.5 543L550 335L309 335L429.5 543Z"
+     fill="#0066B0"
+     id="path37"
+     style="fill:#000000" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 429.5,124 550.29135,332.50291 H 308.70865 Z"
+     fill="#0066b0"
+     id="path37-7"
+     style="fill:#000000;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     id="path37-7-5"
+     style="fill:#000000;fill-opacity:1;stroke-width:0.489674"
+     d="m 306.64855,336.55438 -44.67773,77.12109 14.32812,24.73242 h 89.35547 z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 552,337 672.79135,545.50291 H 431.20865 Z"
+     fill="#0066b0"
+     id="path37-7-9"
+     style="fill:#000000;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 321.32659,515.63497 43.72296,-75.16014 h -87.44593 z"
+     fill="#0093ff"
+     id="path41"
+     style="stroke-width:0.722694;fill:#000000" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 214.90584,337 120.5,208 H 94.405844 Z"
+     fill="#0076cc"
+     id="path43"
+     style="fill:#000000" />
+  <path
+     d="M 208.52,682 V 578.896 h 31.104 c 10.752,0 20.016,2.208 27.792,6.624 7.776,4.32 13.776,10.368 18,18.144 4.224,7.776 6.336,16.704 6.336,26.784 0,10.08 -2.112,19.008 -6.336,26.784 -4.224,7.68 -10.224,13.728 -18,18.144 -7.776,4.416 -17.04,6.624 -27.792,6.624 z m 12.24,-11.664 h 18.864 c 8.064,0 15.072,-1.536 21.024,-4.608 5.952,-3.168 10.56,-7.728 13.824,-13.68 3.264,-5.952 4.896,-13.152 4.896,-21.6 0,-8.448 -1.632,-15.648 -4.896,-21.6 -3.264,-5.952 -7.872,-10.464 -13.824,-13.536 -5.952,-3.168 -12.96,-4.752 -21.024,-4.752 H 220.76 Z m 110.034,13.968 c -5.472,0 -10.272,-1.056 -14.4,-3.168 -4.128,-2.112 -7.392,-4.992 -9.792,-8.64 -2.304,-3.744 -3.456,-7.968 -3.456,-12.672 0,-5.376 1.392,-9.888 4.176,-13.536 2.784,-3.744 6.528,-6.528 11.232,-8.352 4.704,-1.92 9.888,-2.88 15.552,-2.88 4.896,0 9.216,0.528 12.96,1.584 3.744,1.056 6.432,2.112 8.064,3.168 v -4.464 c 0,-5.568 -1.968,-9.984 -5.904,-13.248 -3.936,-3.264 -8.736,-4.896 -14.4,-4.896 -4.032,0 -7.824,0.912 -11.376,2.736 -3.456,1.728 -6.192,4.176 -8.208,7.344 l -9.216,-6.912 c 2.88,-4.32 6.816,-7.728 11.808,-10.224 5.088,-2.592 10.752,-3.888 16.992,-3.888 9.984,0 17.808,2.64 23.472,7.92 5.664,5.28 8.496,12.384 8.496,21.312 V 682 H 355.13 v -10.512 h -0.576 c -1.92,3.264 -4.944,6.24 -9.072,8.928 -4.128,2.592 -9.024,3.888 -14.688,3.888 z m 1.152,-10.8 c 4.224,0 8.064,-1.056 11.52,-3.168 3.552,-2.112 6.384,-4.944 8.496,-8.496 2.112,-3.552 3.168,-7.44 3.168,-11.664 -2.208,-1.536 -4.992,-2.784 -8.352,-3.744 -3.264,-0.96 -6.864,-1.44 -10.8,-1.44 -7.008,0 -12.144,1.44 -15.408,4.32 -3.264,2.88 -4.896,6.432 -4.896,10.656 0,4.032 1.536,7.296 4.608,9.792 3.072,2.496 6.96,3.744 11.664,3.744 z M 397.968,682 374.352,608.56 h 12.672 l 17.424,58.032 h 0.144 l 18.576,-58.032 h 12.528 l 18.576,57.888 h 0.144 L 471.84,608.56 h 12.384 L 460.464,682 H 448.08 L 429.072,623.392 410.208,682 Z m 96.385,0 v -73.44 h 11.664 v 10.8 h 0.576 c 1.92,-3.552 5.04,-6.624 9.36,-9.216 4.416,-2.592 9.216,-3.888 14.4,-3.888 9.024,0 15.792,2.64 20.304,7.92 4.608,5.184 6.912,12.096 6.912,20.736 V 682 h -12.24 v -45.216 c 0,-7.104 -1.728,-12.096 -5.184,-14.976 -3.36,-2.976 -7.728,-4.464 -13.104,-4.464 -4.032,0 -7.584,1.152 -10.656,3.456 -3.072,2.208 -5.472,5.088 -7.2,8.64 -1.728,3.552 -2.592,7.296 -2.592,11.232 V 682 Z"
+     fill="#000000"
+     id="path45" />
+  <path
+     id="path22856"
+     style="fill:#000000;fill-opacity:1"
+     d="M 367.29708,441 322.58028,517.86914 338.29708,545 h 89.5 z" />
+</svg>
diff --git a/docs/imgs/dawn_logo_black_notext.png b/docs/imgs/dawn_logo_black_notext.png
new file mode 100644
index 0000000..11d5416
--- /dev/null
+++ b/docs/imgs/dawn_logo_black_notext.png
Binary files differ
diff --git a/docs/imgs/dawn_logo_black_notext.svg b/docs/imgs/dawn_logo_black_notext.svg
new file mode 100644
index 0000000..d3804fd
--- /dev/null
+++ b/docs/imgs/dawn_logo_black_notext.svg
@@ -0,0 +1,83 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="768"
+   height="768"
+   viewBox="0 0 768 768"
+   fill="none"
+   version="1.1"
+   id="svg47"
+   sodipodi:docname="dawn_logo_black_notext.svg"
+   inkscape:version="1.1 (c4e8f9e, 2021-05-24)"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <defs
+     id="defs51" />
+  <sodipodi:namedview
+     id="namedview49"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0"
+     inkscape:pagecheckerboard="true"
+     showgrid="false"
+     inkscape:zoom="0.96102398"
+     inkscape:cx="255.45668"
+     inkscape:cy="371.99904"
+     inkscape:window-width="1296"
+     inkscape:window-height="997"
+     inkscape:window-x="0"
+     inkscape:window-y="25"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="svg47"
+     showguides="false" />
+  <path
+     id="circle33"
+     style="fill:#808080"
+     d="m 306.7315,89 a 121,121 0 0 0 -121,121 121,121 0 0 0 120.8418,120.98438 L 411.40142,149.41797 A 121,121 0 0 0 306.7315,89 Z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M429.5 543L550 335L309 335L429.5 543Z"
+     fill="#0066B0"
+     id="path37"
+     style="fill:#000000" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 429.5,124 550.29135,332.50291 H 308.70865 Z"
+     fill="#0066b0"
+     id="path37-7"
+     style="fill:#000000;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     id="path37-7-5"
+     style="fill:#000000;fill-opacity:1;stroke-width:0.489674"
+     d="m 306.64855,336.55438 -44.67773,77.12109 14.32812,24.73242 h 89.35547 z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 552,337 672.79135,545.50291 H 431.20865 Z"
+     fill="#0066b0"
+     id="path37-7-9"
+     style="fill:#000000;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 321.32659,515.63497 43.72296,-75.16014 h -87.44593 z"
+     fill="#0093ff"
+     id="path41"
+     style="stroke-width:0.722694;fill:#000000" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 214.90584,337 120.5,208 H 94.405844 Z"
+     fill="#0076cc"
+     id="path43"
+     style="fill:#000000" />
+  <path
+     id="path22856"
+     style="fill:#000000;fill-opacity:1"
+     d="M 367.29708,441 322.58028,517.86914 338.29708,545 h 89.5 z" />
+</svg>
diff --git a/docs/imgs/dawn_logo_notext.png b/docs/imgs/dawn_logo_notext.png
new file mode 100644
index 0000000..f9732e5
--- /dev/null
+++ b/docs/imgs/dawn_logo_notext.png
Binary files differ
diff --git a/docs/imgs/dawn_logo_notext.svg b/docs/imgs/dawn_logo_notext.svg
new file mode 100644
index 0000000..890619b
--- /dev/null
+++ b/docs/imgs/dawn_logo_notext.svg
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="768"
+   height="768"
+   viewBox="0 0 768 768"
+   fill="none"
+   version="1.1"
+   id="svg47"
+   sodipodi:docname="dawn_notext_logo.svg"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <defs
+     id="defs51" />
+  <sodipodi:namedview
+     id="namedview49"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="true"
+     showgrid="false"
+     inkscape:zoom="1.6830615"
+     inkscape:cx="187.15894"
+     inkscape:cy="384.41852"
+     inkscape:window-width="3840"
+     inkscape:window-height="2066"
+     inkscape:window-x="-11"
+     inkscape:window-y="-11"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="svg47" />
+  <circle
+     cx="304.81589"
+     cy="248.44208"
+     r="141.44208"
+     fill="#fde293"
+     id="circle33"
+     style="stroke-width:1.16894" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 445.67349,145.57511 727.10233,633.02428 H 164.24466 Z"
+     fill="#005a9c"
+     id="path35"
+     style="stroke-width:1.16894" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 445.67349,633.02428 586.5311,389.88417 H 304.81588 Z"
+     fill="#0066b0"
+     id="path37"
+     style="stroke-width:1.16894" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 304.23141,389.88417 70.72104,121.57005 H 233.51037 Z"
+     fill="#0086e8"
+     id="path39"
+     style="stroke-width:1.16894" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 304.23141,633.02428 374.95245,511.45422 H 233.51037 Z"
+     fill="#0093ff"
+     id="path41"
+     style="stroke-width:1.16894" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 197.85761,389.88417 338.71522,633.02428 H 57 Z"
+     fill="#0076cc"
+     id="path43"
+     style="stroke-width:1.16894" />
+</svg>
diff --git a/docs/imgs/dawn_logo_white.png b/docs/imgs/dawn_logo_white.png
new file mode 100644
index 0000000..8d835c4
--- /dev/null
+++ b/docs/imgs/dawn_logo_white.png
Binary files differ
diff --git a/docs/imgs/dawn_logo_white.svg b/docs/imgs/dawn_logo_white.svg
new file mode 100644
index 0000000..0f9d00c
--- /dev/null
+++ b/docs/imgs/dawn_logo_white.svg
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="768"
+   height="768"
+   viewBox="0 0 768 768"
+   fill="none"
+   version="1.1"
+   id="svg47"
+   sodipodi:docname="dawn_white.svg"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <defs
+     id="defs51" />
+  <sodipodi:namedview
+     id="namedview49"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0"
+     inkscape:pagecheckerboard="true"
+     showgrid="false"
+     inkscape:zoom="2.3802083"
+     inkscape:cx="255.01969"
+     inkscape:cy="371.81619"
+     inkscape:window-width="3840"
+     inkscape:window-height="2066"
+     inkscape:window-x="-11"
+     inkscape:window-y="-11"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="svg47"
+     showguides="false" />
+  <path
+     id="circle33"
+     style="fill:#ececec"
+     d="m 306.7315,89 a 121,121 0 0 0 -121,121 121,121 0 0 0 120.8418,120.98438 L 411.40142,149.41797 A 121,121 0 0 0 306.7315,89 Z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M429.5 543L550 335L309 335L429.5 543Z"
+     fill="#0066B0"
+     id="path37"
+     style="fill:#ffffff" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 429.5,124 550.29135,332.50291 H 308.70865 Z"
+     fill="#0066b0"
+     id="path37-7"
+     style="fill:#ffffff;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     id="path37-7-5"
+     style="fill:#ffffff;fill-opacity:1;stroke-width:0.489674"
+     d="m 306.64855,336.55438 -44.67773,77.12109 14.32812,24.73242 h 89.35547 z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 552,337 672.79135,545.50291 H 431.20865 Z"
+     fill="#0066b0"
+     id="path37-7-9"
+     style="fill:#ffffff;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 321.32659,515.63497 43.72296,-75.16014 h -87.44593 z"
+     fill="#0093ff"
+     id="path41"
+     style="stroke-width:0.722694;fill:#ffffff" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 214.90584,337 120.5,208 H 94.405844 Z"
+     fill="#0076cc"
+     id="path43"
+     style="fill:#ffffff" />
+  <path
+     d="M 208.52,682 V 578.896 h 31.104 c 10.752,0 20.016,2.208 27.792,6.624 7.776,4.32 13.776,10.368 18,18.144 4.224,7.776 6.336,16.704 6.336,26.784 0,10.08 -2.112,19.008 -6.336,26.784 -4.224,7.68 -10.224,13.728 -18,18.144 -7.776,4.416 -17.04,6.624 -27.792,6.624 z m 12.24,-11.664 h 18.864 c 8.064,0 15.072,-1.536 21.024,-4.608 5.952,-3.168 10.56,-7.728 13.824,-13.68 3.264,-5.952 4.896,-13.152 4.896,-21.6 0,-8.448 -1.632,-15.648 -4.896,-21.6 -3.264,-5.952 -7.872,-10.464 -13.824,-13.536 -5.952,-3.168 -12.96,-4.752 -21.024,-4.752 H 220.76 Z m 110.034,13.968 c -5.472,0 -10.272,-1.056 -14.4,-3.168 -4.128,-2.112 -7.392,-4.992 -9.792,-8.64 -2.304,-3.744 -3.456,-7.968 -3.456,-12.672 0,-5.376 1.392,-9.888 4.176,-13.536 2.784,-3.744 6.528,-6.528 11.232,-8.352 4.704,-1.92 9.888,-2.88 15.552,-2.88 4.896,0 9.216,0.528 12.96,1.584 3.744,1.056 6.432,2.112 8.064,3.168 v -4.464 c 0,-5.568 -1.968,-9.984 -5.904,-13.248 -3.936,-3.264 -8.736,-4.896 -14.4,-4.896 -4.032,0 -7.824,0.912 -11.376,2.736 -3.456,1.728 -6.192,4.176 -8.208,7.344 l -9.216,-6.912 c 2.88,-4.32 6.816,-7.728 11.808,-10.224 5.088,-2.592 10.752,-3.888 16.992,-3.888 9.984,0 17.808,2.64 23.472,7.92 5.664,5.28 8.496,12.384 8.496,21.312 V 682 H 355.13 v -10.512 h -0.576 c -1.92,3.264 -4.944,6.24 -9.072,8.928 -4.128,2.592 -9.024,3.888 -14.688,3.888 z m 1.152,-10.8 c 4.224,0 8.064,-1.056 11.52,-3.168 3.552,-2.112 6.384,-4.944 8.496,-8.496 2.112,-3.552 3.168,-7.44 3.168,-11.664 -2.208,-1.536 -4.992,-2.784 -8.352,-3.744 -3.264,-0.96 -6.864,-1.44 -10.8,-1.44 -7.008,0 -12.144,1.44 -15.408,4.32 -3.264,2.88 -4.896,6.432 -4.896,10.656 0,4.032 1.536,7.296 4.608,9.792 3.072,2.496 6.96,3.744 11.664,3.744 z M 397.968,682 374.352,608.56 h 12.672 l 17.424,58.032 h 0.144 l 18.576,-58.032 h 12.528 l 18.576,57.888 h 0.144 L 471.84,608.56 h 12.384 L 460.464,682 H 448.08 L 429.072,623.392 410.208,682 Z m 96.385,0 v -73.44 h 11.664 v 10.8 h 0.576 c 1.92,-3.552 5.04,-6.624 9.36,-9.216 4.416,-2.592 9.216,-3.888 14.4,-3.888 9.024,0 15.792,2.64 20.304,7.92 4.608,5.184 6.912,12.096 6.912,20.736 V 682 h -12.24 v -45.216 c 0,-7.104 -1.728,-12.096 -5.184,-14.976 -3.36,-2.976 -7.728,-4.464 -13.104,-4.464 -4.032,0 -7.584,1.152 -10.656,3.456 -3.072,2.208 -5.472,5.088 -7.2,8.64 -1.728,3.552 -2.592,7.296 -2.592,11.232 V 682 Z"
+     fill="#000000"
+     id="path45"
+     style="fill:#ffffff" />
+  <path
+     id="path22856"
+     style="fill:#ffffff;fill-opacity:1"
+     d="M 367.29708,441 322.58028,517.86914 338.29708,545 h 89.5 z" />
+</svg>
diff --git a/docs/imgs/dawn_logo_white_notext.png b/docs/imgs/dawn_logo_white_notext.png
new file mode 100644
index 0000000..e6fe2b5
--- /dev/null
+++ b/docs/imgs/dawn_logo_white_notext.png
Binary files differ
diff --git a/docs/imgs/dawn_logo_white_notext.svg b/docs/imgs/dawn_logo_white_notext.svg
new file mode 100644
index 0000000..fba1f26
--- /dev/null
+++ b/docs/imgs/dawn_logo_white_notext.svg
@@ -0,0 +1,83 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="768"
+   height="768"
+   viewBox="0 0 768 768"
+   fill="none"
+   version="1.1"
+   id="svg47"
+   sodipodi:docname="dawn_logo_white_notext.svg"
+   inkscape:version="1.1 (c4e8f9e, 2021-05-24)"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <defs
+     id="defs51" />
+  <sodipodi:namedview
+     id="namedview49"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0"
+     inkscape:pagecheckerboard="true"
+     showgrid="false"
+     inkscape:zoom="1.1009789"
+     inkscape:cx="254.77327"
+     inkscape:cy="371.94172"
+     inkscape:window-width="1296"
+     inkscape:window-height="997"
+     inkscape:window-x="0"
+     inkscape:window-y="25"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="svg47"
+     showguides="false" />
+  <path
+     id="circle33"
+     style="fill:#ececec"
+     d="m 306.7315,89 a 121,121 0 0 0 -121,121 121,121 0 0 0 120.8418,120.98438 L 411.40142,149.41797 A 121,121 0 0 0 306.7315,89 Z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M429.5 543L550 335L309 335L429.5 543Z"
+     fill="#0066B0"
+     id="path37"
+     style="fill:#ffffff" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 429.5,124 550.29135,332.50291 H 308.70865 Z"
+     fill="#0066b0"
+     id="path37-7"
+     style="fill:#ffffff;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     id="path37-7-5"
+     style="fill:#ffffff;fill-opacity:1;stroke-width:0.489674"
+     d="m 306.64855,336.55438 -44.67773,77.12109 14.32812,24.73242 h 89.35547 z" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="M 552,337 672.79135,545.50291 H 431.20865 Z"
+     fill="#0066b0"
+     id="path37-7-9"
+     style="fill:#ffffff;fill-opacity:1;stroke-width:1.00242" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 321.32659,515.63497 43.72296,-75.16014 h -87.44593 z"
+     fill="#0093ff"
+     id="path41"
+     style="stroke-width:0.722694;fill:#ffffff" />
+  <path
+     fill-rule="evenodd"
+     clip-rule="evenodd"
+     d="m 214.90584,337 120.5,208 H 94.405844 Z"
+     fill="#0076cc"
+     id="path43"
+     style="fill:#ffffff" />
+  <path
+     id="path22856"
+     style="fill:#ffffff;fill-opacity:1"
+     d="M 367.29708,441 322.58028,517.86914 338.29708,545 h 89.5 z" />
+</svg>
diff --git a/generator/BUILD.gn b/generator/BUILD.gn
new file mode 100644
index 0000000..a1d954a
--- /dev/null
+++ b/generator/BUILD.gn
@@ -0,0 +1,63 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../scripts/dawn_overrides_with_defaults.gni")
+import("dawn_generator.gni")
+
+# The list of directories in which to check for stale autogenerated files.
+# It should include the list of all directories in which we ever generated
+# files but we can't just put dawn_gen_root because there are more than
+# autogenerated sources there.
+_stale_dirs = [
+  "dawn",
+  "dawn/native",
+  "dawn/wire",
+  "mock",
+  "src",
+]
+
+_allowed_output_dirs_file =
+    "${dawn_gen_root}/removed_stale_autogen_files.allowed_output_dirs"
+write_file(_allowed_output_dirs_file, dawn_allowed_gen_output_dirs)
+
+_stale_dirs_file = "${dawn_gen_root}/removed_stale_autogen_files.stale_dirs"
+write_file(_stale_dirs_file, _stale_dirs)
+
+_stamp_file = "${dawn_gen_root}/removed_stale_autogen_files.stamp"
+
+# An action that removes autogenerated files that aren't in allowed directories
+# see dawn_generator.gni for more details.
+action("remove_stale_autogen_files") {
+  script = "remove_files.py"
+  args = [
+    "--root-dir",
+    rebase_path(dawn_gen_root, root_build_dir),
+    "--allowed-output-dirs-file",
+    rebase_path(_allowed_output_dirs_file, root_build_dir),
+    "--stale-dirs-file",
+    rebase_path(_stale_dirs_file, root_build_dir),
+    "--stamp",
+    rebase_path(_stamp_file, root_build_dir),
+  ]
+
+  # Have the "list of file" inputs as a dependency so that the action reruns
+  # as soon as they change.
+  inputs = [
+    _allowed_output_dirs_file,
+    _stale_dirs_file,
+  ]
+
+  # Output a stamp file so we don't re-run this action on every build.
+  outputs = [ _stamp_file ]
+}
diff --git a/generator/CMakeLists.txt b/generator/CMakeLists.txt
new file mode 100644
index 0000000..a2d6784
--- /dev/null
+++ b/generator/CMakeLists.txt
@@ -0,0 +1,116 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+find_package(PythonInterp REQUIRED)
+message(STATUS "Dawn: using python at ${PYTHON_EXECUTABLE}")
+
+# Check for Jinja2
+if (NOT DAWN_JINJA2_DIR)
+    message(STATUS "Dawn: Using system jinja2")
+    execute_process(
+        COMMAND ${PYTHON_EXECUTABLE} -c "import jinja2"
+        RESULT_VARIABLE RET
+    )
+    if (NOT RET EQUAL 0)
+        message(FATAL_ERROR "Dawn: Missing dependencies for code generation, please ensure you have python-jinja2 installed.")
+    endif()
+else()
+    message(STATUS "Dawn: Using jinja2 at ${DAWN_JINJA2_DIR}")
+endif()
+
+# Function to invoke a generator_lib.py generator.
+#  - SCRIPT is the name of the script to call
+#  - ARGS are the extra arguments to pass to the script in addition to the base generator_lib.py arguments
+#  - PRINT_NAME is the name to use when outputting status or errors
+#  - RESULT_VARIABLE will be modified to contain the list of files generated by this generator
+function(DawnGenerator)
+    set(oneValueArgs SCRIPT RESULT_VARIABLE PRINT_NAME)
+    set(multiValueArgs ARGS)
+    cmake_parse_arguments(G "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+    # Build the set of args common to all invocation of that generator.
+    set(BASE_ARGS
+        ${PYTHON_EXECUTABLE}
+        ${G_SCRIPT}
+        --template-dir
+        "${DAWN_TEMPLATE_DIR}"
+        --root-dir
+        "${Dawn_SOURCE_DIR}"
+        --output-dir
+        "${DAWN_BUILD_GEN_DIR}"
+        ${G_ARGS}
+    )
+    if (DAWN_JINJA2_DIR)
+        list(APPEND BASE_ARGS --jinja2-path ${DAWN_JINJA2_DIR})
+    endif()
+
+    # Call the generator to get the list of its dependencies.
+    execute_process(
+        COMMAND ${BASE_ARGS} --print-cmake-dependencies
+        OUTPUT_VARIABLE DEPENDENCIES
+        RESULT_VARIABLE RET
+    )
+    if (NOT RET EQUAL 0)
+        message(FATAL_ERROR "Dawn: Failed to get the dependencies for ${G_PRINT_NAME}. Base args are '${BASE_ARGS}'.")
+    endif()
+
+    # Ask CMake to re-run if any of the dependencies changed as it might modify the build graph.
+    if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
+        set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${DEPENDENCIES})
+    endif()
+
+    # Call the generator to get the list of its outputs.
+    execute_process(
+        COMMAND ${BASE_ARGS} --print-cmake-outputs
+        OUTPUT_VARIABLE OUTPUTS
+        RESULT_VARIABLE RET
+    )
+    if (NOT RET EQUAL 0)
+        message(FATAL_ERROR "Dawn: Failed to get the outputs for ${G_PRINT_NAME}. Base args are '${BASE_ARGS}'.")
+    endif()
+
+    # Add the custom command that calls the generator.
+    add_custom_command(
+        COMMAND ${BASE_ARGS}
+        DEPENDS ${DEPENDENCIES}
+        OUTPUT ${OUTPUTS}
+        COMMENT "Dawn: Generating files for ${G_PRINT_NAME}."
+    )
+
+    # Return the list of outputs.
+    set(${G_RESULT_VARIABLE} ${OUTPUTS} PARENT_SCOPE)
+endfunction()
+
+# Helper function to call dawn_generator.py:
+#  - TARGET is the generator target to build
+#  - PRINT_NAME and RESULT_VARIABLE are like for DawnGenerator
+function(DawnJSONGenerator)
+    set(oneValueArgs TARGET RESULT_VARIABLE)
+    cmake_parse_arguments(G "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+    DawnGenerator(
+        SCRIPT "${Dawn_SOURCE_DIR}/generator/dawn_json_generator.py"
+        ARGS --dawn-json
+             "${Dawn_SOURCE_DIR}/dawn.json"
+             --wire-json
+             "${Dawn_SOURCE_DIR}/dawn_wire.json"
+             --targets
+             ${G_TARGET}
+        RESULT_VARIABLE RET
+        ${G_UNPARSED_ARGUMENTS}
+    )
+
+    # Forward the result up one more scope
+    set(${G_RESULT_VARIABLE} ${RET} PARENT_SCOPE)
+endfunction()
diff --git a/generator/dawn_generator.gni b/generator/dawn_generator.gni
new file mode 100644
index 0000000..28c5301
--- /dev/null
+++ b/generator/dawn_generator.gni
@@ -0,0 +1,121 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../scripts/dawn_overrides_with_defaults.gni")
+import("generator_lib.gni")
+
+# Dawn used to put autogenerated files in a lot of different places. When we
+# started to move them around, some compilation issues arised because some
+# stale include files stayed in the build directory and were picked up.
+# To counter this, now Dawn does the following:
+#
+#  1. The generated output file directory structure has to match the structure
+#    of the source tree, starting at dawn_gen_root (gen/ or
+#    gen/third_party/dawn depending on where we are).
+#  2. include and dawn_gen_root/include has to match the structure of
+#    the source tree too.
+#  3. Dawn files must use include relative to src/ or include such as
+#    "dawn/dawn.h" or "dawn/native/backend/BackendStuff.h".
+#
+# The allowed list below ensure 1). Include directory rules for Dawn ensure 3)
+# and 2) is something we need to enforce in code review.
+#
+# However GN's toolchains automatically add some include directories for us
+# which breaks 3) slightly. To avoid stale headers in for example
+# dawn_gen_root/src/dawn/dawn/ to be picked up (instead of
+# dawn_gen_root/src/dawn), we have a special action that removes files in
+# disallowed gen directories.
+
+dawn_allowed_gen_output_dirs = [
+  "src/dawn/",
+  "src/dawn/common/",
+  "src/dawn/native/",
+  "src/dawn/native/opengl/",
+  "src/dawn/wire/client/",
+  "src/dawn/wire/server/",
+  "src/dawn/wire/",
+  "include/dawn/",
+  "emscripten-bits/",
+  "webgpu-headers/",
+]
+
+# Template to help invoking Dawn code generators based on generator_lib
+#
+#   dawn_generator("my_target_gen") {
+#     # The script and generator specific arguments
+#     script = [ "my_awesome_generator.py" ]
+#     args = [
+#       "--be-awesome",
+#       "yes"
+#     ]
+#
+#     # The list of expected outputs, generation fails if there's a mismatch
+#     outputs = [
+#       "MyAwesomeTarget.cpp",
+#       "MyAwesomeTarget.h",
+#     ]
+#   }
+#
+# Using the generated files is done like so:
+#
+#   shared_library("my_target") {
+#     deps = [ ":my_target_gen "]
+#     sources = get_target_outputs(":my_target_gen")
+#   }
+#
+template("dawn_generator") {
+  generator_lib_action(target_name) {
+    forward_variables_from(invoker, "*")
+
+    # Set arguments required to find the python libraries for the generator
+    generator_lib_dir = "${dawn_root}/generator"
+    jinja2_path = dawn_jinja2_dir
+
+    # Force Dawn's autogenerated file structure to mirror exactly the source
+    # tree but start at ${dawn_gen_root} instead of ${dawn_root}
+    allowed_output_dirs = dawn_allowed_gen_output_dirs
+    custom_gen_dir = dawn_gen_root
+
+    # Make sure that we delete stale autogenerated file in directories that are
+    # no longer used by code generation to avoid include conflicts.
+    deps = [ "${dawn_root}/generator:remove_stale_autogen_files" ]
+  }
+}
+
+# Helper generator for calling the generator from dawn.json
+#
+#   dawn_json_generator("my_target_gen") {
+#     # Which generator target to output
+#     target = "my_target"
+#
+#     # Also supports `outputs` and `custom_gen_dir` like dawn_generator.
+#   }
+template("dawn_json_generator") {
+  dawn_generator(target_name) {
+    script = "${dawn_root}/generator/dawn_json_generator.py"
+
+    # The base arguments for the generator: from this dawn.json, generate this
+    # target using templates in this directory.
+    args = [
+      "--dawn-json",
+      rebase_path("${dawn_root}/dawn.json", root_build_dir),
+      "--wire-json",
+      rebase_path("${dawn_root}/dawn_wire.json", root_build_dir),
+      "--targets",
+      invoker.target,
+    ]
+
+    forward_variables_from(invoker, "*", [ "target" ])
+  }
+}
diff --git a/generator/dawn_json_generator.py b/generator/dawn_json_generator.py
new file mode 100644
index 0000000..29c1be9
--- /dev/null
+++ b/generator/dawn_json_generator.py
@@ -0,0 +1,1031 @@
+#!/usr/bin/env python3
+# Copyright 2017 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, os, sys
+from collections import namedtuple
+
+from generator_lib import Generator, run_generator, FileRender
+
+############################################################
+# OBJECT MODEL
+############################################################
+
+
+class Metadata:
+    def __init__(self, metadata):
+        self.api = metadata['api']
+        self.namespace = metadata['namespace']
+        self.c_prefix = metadata.get('c_prefix', self.namespace.upper())
+        self.proc_table_prefix = metadata['proc_table_prefix']
+        self.impl_dir = metadata.get('impl_dir', '')
+        self.native_namespace = metadata['native_namespace']
+        self.copyright_year = metadata.get('copyright_year', None)
+
+
+class Name:
+    def __init__(self, name, native=False):
+        self.native = native
+        self.name = name
+        if native:
+            self.chunks = [name]
+        else:
+            self.chunks = name.split(' ')
+
+    def get(self):
+        return self.name
+
+    def CamelChunk(self, chunk):
+        return chunk[0].upper() + chunk[1:]
+
+    def canonical_case(self):
+        return (' '.join(self.chunks)).lower()
+
+    def concatcase(self):
+        return ''.join(self.chunks)
+
+    def camelCase(self):
+        return self.chunks[0] + ''.join(
+            [self.CamelChunk(chunk) for chunk in self.chunks[1:]])
+
+    def CamelCase(self):
+        return ''.join([self.CamelChunk(chunk) for chunk in self.chunks])
+
+    def SNAKE_CASE(self):
+        return '_'.join([chunk.upper() for chunk in self.chunks])
+
+    def snake_case(self):
+        return '_'.join(self.chunks)
+
+    def namespace_case(self):
+        return '::'.join(self.chunks)
+
+    def Dirs(self):
+        return '/'.join(self.chunks)
+
+    def js_enum_case(self):
+        result = self.chunks[0].lower()
+        for chunk in self.chunks[1:]:
+            if not result[-1].isdigit():
+                result += '-'
+            result += chunk.lower()
+        return result
+
+
+def concat_names(*names):
+    return ' '.join([name.canonical_case() for name in names])
+
+
+class Type:
+    def __init__(self, name, json_data, native=False):
+        self.json_data = json_data
+        self.dict_name = name
+        self.name = Name(name, native=native)
+        self.category = json_data['category']
+        self.is_wire_transparent = False
+
+
+EnumValue = namedtuple('EnumValue', ['name', 'value', 'valid', 'json_data'])
+
+
+class EnumType(Type):
+    def __init__(self, is_enabled, name, json_data):
+        Type.__init__(self, name, json_data)
+
+        self.values = []
+        self.contiguousFromZero = True
+        lastValue = -1
+        for m in self.json_data['values']:
+            if not is_enabled(m):
+                continue
+            value = m['value']
+            if value != lastValue + 1:
+                self.contiguousFromZero = False
+            lastValue = value
+            self.values.append(
+                EnumValue(Name(m['name']), value, m.get('valid', True), m))
+
+        # Assert that all values are unique in enums
+        all_values = set()
+        for value in self.values:
+            if value.value in all_values:
+                raise Exception("Duplicate value {} in enum {}".format(
+                    value.value, name))
+            all_values.add(value.value)
+        self.is_wire_transparent = True
+
+
+BitmaskValue = namedtuple('BitmaskValue', ['name', 'value', 'json_data'])
+
+
+class BitmaskType(Type):
+    def __init__(self, is_enabled, name, json_data):
+        Type.__init__(self, name, json_data)
+        self.values = [
+            BitmaskValue(Name(m['name']), m['value'], m)
+            for m in self.json_data['values'] if is_enabled(m)
+        ]
+        self.full_mask = 0
+        for value in self.values:
+            self.full_mask = self.full_mask | value.value
+        self.is_wire_transparent = True
+
+
+class FunctionPointerType(Type):
+    def __init__(self, is_enabled, name, json_data):
+        Type.__init__(self, name, json_data)
+        self.return_type = None
+        self.arguments = []
+
+
+class TypedefType(Type):
+    def __init__(self, is_enabled, name, json_data):
+        Type.__init__(self, name, json_data)
+        self.type = None
+
+
+class NativeType(Type):
+    def __init__(self, is_enabled, name, json_data):
+        Type.__init__(self, name, json_data, native=True)
+        self.is_wire_transparent = True
+
+
+# Methods and structures are both "records", so record members correspond to
+# method arguments or structure members.
+class RecordMember:
+    def __init__(self,
+                 name,
+                 typ,
+                 annotation,
+                 json_data,
+                 optional=False,
+                 is_return_value=False,
+                 default_value=None,
+                 skip_serialize=False):
+        self.name = name
+        self.type = typ
+        self.annotation = annotation
+        self.json_data = json_data
+        self.length = None
+        self.optional = optional
+        self.is_return_value = is_return_value
+        self.handle_type = None
+        self.default_value = default_value
+        self.skip_serialize = skip_serialize
+
+    def set_handle_type(self, handle_type):
+        assert self.type.dict_name == "ObjectHandle"
+        self.handle_type = handle_type
+
+
+Method = namedtuple('Method',
+                    ['name', 'return_type', 'arguments', 'json_data'])
+
+
+class ObjectType(Type):
+    def __init__(self, is_enabled, name, json_data):
+        json_data_override = {'methods': []}
+        if 'methods' in json_data:
+            json_data_override['methods'] = [
+                m for m in json_data['methods'] if is_enabled(m)
+            ]
+        Type.__init__(self, name, dict(json_data, **json_data_override))
+
+
+class Record:
+    def __init__(self, name):
+        self.name = Name(name)
+        self.members = []
+        self.may_have_dawn_object = False
+
+    def update_metadata(self):
+        def may_have_dawn_object(member):
+            if isinstance(member.type, ObjectType):
+                return True
+            elif isinstance(member.type, StructureType):
+                return member.type.may_have_dawn_object
+            else:
+                return False
+
+        self.may_have_dawn_object = any(
+            may_have_dawn_object(member) for member in self.members)
+
+        # Set may_have_dawn_object to true if the type is chained or
+        # extensible. Chained structs may contain a Dawn object.
+        if isinstance(self, StructureType):
+            self.may_have_dawn_object = (self.may_have_dawn_object
+                                         or self.chained or self.extensible)
+
+
+class StructureType(Record, Type):
+    def __init__(self, is_enabled, name, json_data):
+        Record.__init__(self, name)
+        json_data_override = {}
+        if 'members' in json_data:
+            json_data_override['members'] = [
+                m for m in json_data['members'] if is_enabled(m)
+            ]
+        Type.__init__(self, name, dict(json_data, **json_data_override))
+        self.chained = json_data.get("chained", None)
+        self.extensible = json_data.get("extensible", None)
+        if self.chained:
+            assert (self.chained == "in" or self.chained == "out")
+        if self.extensible:
+            assert (self.extensible == "in" or self.extensible == "out")
+        # Chained structs inherit from wgpu::ChainedStruct, which has
+        # nextInChain, so setting both extensible and chained would result in
+        # two nextInChain members.
+        assert not (self.extensible and self.chained)
+
+    def update_metadata(self):
+        Record.update_metadata(self)
+
+        if self.may_have_dawn_object:
+            self.is_wire_transparent = False
+            return
+
+        assert not (self.chained or self.extensible)
+
+        def get_is_wire_transparent(member):
+            return member.type.is_wire_transparent and member.annotation == 'value'
+
+        self.is_wire_transparent = all(
+            get_is_wire_transparent(m) for m in self.members)
+
+    @property
+    def output(self):
+        return self.chained == "out" or self.extensible == "out"
+
+
+class ConstantDefinition():
+    def __init__(self, is_enabled, name, json_data):
+        self.type = None
+        self.value = json_data['value']
+        self.json_data = json_data
+        self.name = Name(name)
+
+
+class FunctionDeclaration():
+    def __init__(self, is_enabled, name, json_data):
+        self.return_type = None
+        self.arguments = []
+        self.json_data = json_data
+        self.name = Name(name)
+
+
+class Command(Record):
+    def __init__(self, name, members=None):
+        Record.__init__(self, name)
+        self.members = members or []
+        self.derived_object = None
+        self.derived_method = None
+
+
+def linked_record_members(json_data, types):
+    members = []
+    members_by_name = {}
+    for m in json_data:
+        member = RecordMember(Name(m['name']),
+                              types[m['type']],
+                              m.get('annotation', 'value'),
+                              m,
+                              optional=m.get('optional', False),
+                              is_return_value=m.get('is_return_value', False),
+                              default_value=m.get('default', None),
+                              skip_serialize=m.get('skip_serialize', False))
+        handle_type = m.get('handle_type')
+        if handle_type:
+            member.set_handle_type(types[handle_type])
+        members.append(member)
+        members_by_name[member.name.canonical_case()] = member
+
+    for (member, m) in zip(members, json_data):
+        if member.annotation != 'value':
+            if not 'length' in m:
+                if member.type.category != 'object':
+                    member.length = "constant"
+                    member.constant_length = 1
+                else:
+                    assert False
+            elif m['length'] == 'strlen':
+                member.length = 'strlen'
+            elif isinstance(m['length'], int):
+                assert m['length'] > 0
+                member.length = "constant"
+                member.constant_length = m['length']
+            else:
+                member.length = members_by_name[m['length']]
+
+    return members
+
+
+############################################################
+# PARSE
+############################################################
+
+
+def link_object(obj, types):
+    def make_method(json_data):
+        arguments = linked_record_members(json_data.get('args', []), types)
+        return Method(Name(json_data['name']),
+                      types[json_data.get('returns',
+                                          'void')], arguments, json_data)
+
+    obj.methods = [make_method(m) for m in obj.json_data.get('methods', [])]
+    obj.methods.sort(key=lambda method: method.name.canonical_case())
+
+
+def link_structure(struct, types):
+    struct.members = linked_record_members(struct.json_data['members'], types)
+
+
+def link_function_pointer(function_pointer, types):
+    link_function(function_pointer, types)
+
+
+def link_typedef(typedef, types):
+    typedef.type = types[typedef.json_data['type']]
+
+
+def link_constant(constant, types):
+    constant.type = types[constant.json_data['type']]
+    assert constant.type.name.native
+
+
+def link_function(function, types):
+    function.return_type = types[function.json_data.get('returns', 'void')]
+    function.arguments = linked_record_members(function.json_data['args'],
+                                               types)
+
+
+# Sort structures so that if struct A has struct B as a member, then B is
+# listed before A.
+#
+# This is a form of topological sort where we try to keep the order reasonably
+# similar to the original order (though the sort isn't technically stable).
+#
+# It works by computing for each struct type what is the depth of its DAG of
+# dependents, then re-sorting based on that depth using Python's stable sort.
+# This makes a toposort because if A depends on B then its depth will be bigger
+# than B's. It is also nice because all nodes with the same depth are kept in
+# the input order.
+def topo_sort_structure(structs):
+    for struct in structs:
+        struct.visited = False
+        struct.subdag_depth = 0
+
+    def compute_depth(struct):
+        if struct.visited:
+            return struct.subdag_depth
+
+        max_dependent_depth = 0
+        for member in struct.members:
+            if member.type.category == 'structure':
+                max_dependent_depth = max(max_dependent_depth,
+                                          compute_depth(member.type) + 1)
+
+        struct.subdag_depth = max_dependent_depth
+        struct.visited = True
+        return struct.subdag_depth
+
+    for struct in structs:
+        compute_depth(struct)
+
+    result = sorted(structs, key=lambda struct: struct.subdag_depth)
+
+    for struct in structs:
+        del struct.visited
+        del struct.subdag_depth
+
+    return result
+
+
+def parse_json(json, enabled_tags, disabled_tags=None):
+    is_enabled = lambda json_data: item_is_enabled(
+        enabled_tags, json_data) and not item_is_disabled(
+            disabled_tags, json_data)
+    category_to_parser = {
+        'bitmask': BitmaskType,
+        'enum': EnumType,
+        'native': NativeType,
+        'function pointer': FunctionPointerType,
+        'object': ObjectType,
+        'structure': StructureType,
+        'typedef': TypedefType,
+        'constant': ConstantDefinition,
+        'function': FunctionDeclaration
+    }
+
+    types = {}
+
+    by_category = {}
+    for name in category_to_parser.keys():
+        by_category[name] = []
+
+    for (name, json_data) in json.items():
+        if name[0] == '_' or not is_enabled(json_data):
+            continue
+        category = json_data['category']
+        parsed = category_to_parser[category](is_enabled, name, json_data)
+        by_category[category].append(parsed)
+        types[name] = parsed
+
+    for obj in by_category['object']:
+        link_object(obj, types)
+
+    for struct in by_category['structure']:
+        link_structure(struct, types)
+
+    for function_pointer in by_category['function pointer']:
+        link_function_pointer(function_pointer, types)
+
+    for typedef in by_category['typedef']:
+        link_typedef(typedef, types)
+
+    for constant in by_category['constant']:
+        link_constant(constant, types)
+
+    for function in by_category['function']:
+        link_function(function, types)
+
+    for category in by_category.keys():
+        by_category[category] = sorted(
+            by_category[category], key=lambda typ: typ.name.canonical_case())
+
+    by_category['structure'] = topo_sort_structure(by_category['structure'])
+
+    for struct in by_category['structure']:
+        struct.update_metadata()
+
+    api_params = {
+        'types': types,
+        'by_category': by_category,
+        'enabled_tags': enabled_tags,
+        'disabled_tags': disabled_tags,
+    }
+    return {
+        'metadata': Metadata(json['_metadata']),
+        'types': types,
+        'by_category': by_category,
+        'enabled_tags': enabled_tags,
+        'disabled_tags': disabled_tags,
+        'c_methods': lambda typ: c_methods(api_params, typ),
+        'c_methods_sorted_by_name': get_c_methods_sorted_by_name(api_params),
+    }
+
+
+############################################################
+# WIRE STUFF
+############################################################
+
+
+# Create wire commands from api methods
+def compute_wire_params(api_params, wire_json):
+    wire_params = api_params.copy()
+    types = wire_params['types']
+
+    commands = []
+    return_commands = []
+
+    wire_json['special items']['client_handwritten_commands'] += wire_json[
+        'special items']['client_side_commands']
+
+    # Generate commands from object methods
+    for api_object in wire_params['by_category']['object']:
+        for method in api_object.methods:
+            command_name = concat_names(api_object.name, method.name)
+            command_suffix = Name(command_name).CamelCase()
+
+            # Only object return values or void are supported.
+            # Other methods must be handwritten.
+            is_object = method.return_type.category == 'object'
+            is_void = method.return_type.name.canonical_case() == 'void'
+            if not (is_object or is_void):
+                assert command_suffix in (
+                    wire_json['special items']['client_handwritten_commands'])
+                continue
+
+            if command_suffix in (
+                    wire_json['special items']['client_side_commands']):
+                continue
+
+            # Create object method commands by prepending "self"
+            members = [
+                RecordMember(Name('self'), types[api_object.dict_name],
+                             'value', {})
+            ]
+            members += method.arguments
+
+            # Client->Server commands that return an object return the
+            # result object handle
+            if method.return_type.category == 'object':
+                result = RecordMember(Name('result'),
+                                      types['ObjectHandle'],
+                                      'value', {},
+                                      is_return_value=True)
+                result.set_handle_type(method.return_type)
+                members.append(result)
+
+            command = Command(command_name, members)
+            command.derived_object = api_object
+            command.derived_method = method
+            commands.append(command)
+
+    for (name, json_data) in wire_json['commands'].items():
+        commands.append(Command(name, linked_record_members(json_data, types)))
+
+    for (name, json_data) in wire_json['return commands'].items():
+        return_commands.append(
+            Command(name, linked_record_members(json_data, types)))
+
+    wire_params['cmd_records'] = {
+        'command': commands,
+        'return command': return_commands
+    }
+
+    for commands in wire_params['cmd_records'].values():
+        for command in commands:
+            command.update_metadata()
+        commands.sort(key=lambda c: c.name.canonical_case())
+
+    wire_params.update(wire_json.get('special items', {}))
+
+    return wire_params
+
+
+#############################################################
+# Generator
+#############################################################
+
+
+def as_varName(*names):
+    return names[0].camelCase() + ''.join(
+        [name.CamelCase() for name in names[1:]])
+
+
+def as_cType(c_prefix, name):
+    if name.native:
+        return name.concatcase()
+    else:
+        return c_prefix + name.CamelCase()
+
+
+def as_cppType(name):
+    if name.native:
+        return name.concatcase()
+    else:
+        return name.CamelCase()
+
+
+def as_jsEnumValue(value):
+    if 'jsrepr' in value.json_data: return value.json_data['jsrepr']
+    return "'" + value.name.js_enum_case() + "'"
+
+
+def convert_cType_to_cppType(typ, annotation, arg, indent=0):
+    if typ.category == 'native':
+        return arg
+    if annotation == 'value':
+        if typ.category == 'object':
+            return '{}::Acquire({})'.format(as_cppType(typ.name), arg)
+        elif typ.category == 'structure':
+            converted_members = [
+                convert_cType_to_cppType(
+                    member.type, member.annotation,
+                    '{}.{}'.format(arg, as_varName(member.name)), indent + 1)
+                for member in typ.members
+            ]
+
+            converted_members = [(' ' * 4) + m for m in converted_members]
+            converted_members = ',\n'.join(converted_members)
+
+            return as_cppType(typ.name) + ' {\n' + converted_members + '\n}'
+        elif typ.category == 'function pointer':
+            return 'reinterpret_cast<{}>({})'.format(as_cppType(typ.name), arg)
+        else:
+            return 'static_cast<{}>({})'.format(as_cppType(typ.name), arg)
+    else:
+        return 'reinterpret_cast<{} {}>({})'.format(as_cppType(typ.name),
+                                                    annotation, arg)
+
+
+def decorate(name, typ, arg):
+    if arg.annotation == 'value':
+        return typ + ' ' + name
+    elif arg.annotation == '*':
+        return typ + ' * ' + name
+    elif arg.annotation == 'const*':
+        return typ + ' const * ' + name
+    elif arg.annotation == 'const*const*':
+        return 'const ' + typ + '* const * ' + name
+    else:
+        assert False
+
+
+def annotated(typ, arg):
+    name = as_varName(arg.name)
+    return decorate(name, typ, arg)
+
+
+def item_is_enabled(enabled_tags, json_data):
+    tags = json_data.get('tags')
+    if tags is None: return True
+    return any(tag in enabled_tags for tag in tags)
+
+
+def item_is_disabled(disabled_tags, json_data):
+    if disabled_tags is None: return False
+    tags = json_data.get('tags')
+    if tags is None: return False
+
+    return any(tag in disabled_tags for tag in tags)
+
+
+def as_cppEnum(value_name):
+    assert not value_name.native
+    if value_name.concatcase()[0].isdigit():
+        return "e" + value_name.CamelCase()
+    return value_name.CamelCase()
+
+
+def as_MethodSuffix(type_name, method_name):
+    assert not type_name.native and not method_name.native
+    return type_name.CamelCase() + method_name.CamelCase()
+
+
+def as_frontendType(metadata, typ):
+    if typ.category == 'object':
+        return typ.name.CamelCase() + 'Base*'
+    elif typ.category in ['bitmask', 'enum']:
+        return metadata.namespace + '::' + typ.name.CamelCase()
+    elif typ.category == 'structure':
+        return as_cppType(typ.name)
+    else:
+        return as_cType(metadata.c_prefix, typ.name)
+
+
+def as_wireType(metadata, typ):
+    if typ.category == 'object':
+        return typ.name.CamelCase() + '*'
+    elif typ.category in ['bitmask', 'enum', 'structure']:
+        return metadata.c_prefix + typ.name.CamelCase()
+    else:
+        return as_cppType(typ.name)
+
+
+def as_formatType(typ):
+    # Unsigned integral types
+    if typ.json_data['type'] in ['bool', 'uint32_t', 'uint64_t']:
+        return 'u'
+
+    # Defaults everything else to strings.
+    return 's'
+
+
+def c_methods(params, typ):
+    return typ.methods + [
+        x for x in [
+            Method(Name('reference'), params['types']['void'], [],
+                   {'tags': ['dawn', 'emscripten']}),
+            Method(Name('release'), params['types']['void'], [],
+                   {'tags': ['dawn', 'emscripten']}),
+        ] if item_is_enabled(params['enabled_tags'], x.json_data)
+        and not item_is_disabled(params['disabled_tags'], x.json_data)
+    ]
+
+
+def get_c_methods_sorted_by_name(api_params):
+    unsorted = [(as_MethodSuffix(typ.name, method.name), typ, method) \
+            for typ in api_params['by_category']['object'] \
+            for method in c_methods(api_params, typ) ]
+    return [(typ, method) for (_, typ, method) in sorted(unsorted)]
+
+
+def has_callback_arguments(method):
+    return any(arg.type.category == 'function pointer' for arg in method.arguments)
+
+
+def make_base_render_params(metadata):
+    c_prefix = metadata.c_prefix
+
+    def as_cTypeEnumSpecialCase(typ):
+        if typ.category == 'bitmask':
+            return as_cType(c_prefix, typ.name) + 'Flags'
+        return as_cType(c_prefix, typ.name)
+
+    def as_cEnum(type_name, value_name):
+        assert not type_name.native and not value_name.native
+        return c_prefix + type_name.CamelCase() + '_' + value_name.CamelCase()
+
+    def as_cMethod(type_name, method_name):
+        c_method = c_prefix.lower()
+        if type_name != None:
+            assert not type_name.native
+            c_method += type_name.CamelCase()
+        assert not method_name.native
+        c_method += method_name.CamelCase()
+        return c_method
+
+    def as_cProc(type_name, method_name):
+        c_proc = c_prefix + 'Proc'
+        if type_name != None:
+            assert not type_name.native
+            c_proc += type_name.CamelCase()
+        assert not method_name.native
+        c_proc += method_name.CamelCase()
+        return c_proc
+
+    return {
+            'Name': lambda name: Name(name),
+            'as_annotated_cType': \
+                lambda arg: annotated(as_cTypeEnumSpecialCase(arg.type), arg),
+            'as_annotated_cppType': \
+                lambda arg: annotated(as_cppType(arg.type.name), arg),
+            'as_cEnum': as_cEnum,
+            'as_cppEnum': as_cppEnum,
+            'as_cMethod': as_cMethod,
+            'as_MethodSuffix': as_MethodSuffix,
+            'as_cProc': as_cProc,
+            'as_cType': lambda name: as_cType(c_prefix, name),
+            'as_cppType': as_cppType,
+            'as_jsEnumValue': as_jsEnumValue,
+            'convert_cType_to_cppType': convert_cType_to_cppType,
+            'as_varName': as_varName,
+            'decorate': decorate,
+            'as_formatType': as_formatType
+        }
+
+
+class MultiGeneratorFromDawnJSON(Generator):
+    def get_description(self):
+        return 'Generates code for various target from Dawn.json.'
+
+    def add_commandline_arguments(self, parser):
+        allowed_targets = [
+            'dawn_headers', 'cpp_headers', 'cpp', 'proc', 'mock_api', 'wire',
+            'native_utils'
+        ]
+
+        parser.add_argument('--dawn-json',
+                            required=True,
+                            type=str,
+                            help='The DAWN JSON definition to use.')
+        parser.add_argument('--wire-json',
+                            default=None,
+                            type=str,
+                            help='The DAWN WIRE JSON definition to use.')
+        parser.add_argument(
+            '--targets',
+            required=True,
+            type=str,
+            help=
+            'Comma-separated subset of targets to output. Available targets: '
+            + ', '.join(allowed_targets))
+    def get_file_renders(self, args):
+        with open(args.dawn_json) as f:
+            loaded_json = json.loads(f.read())
+
+        targets = args.targets.split(',')
+
+        wire_json = None
+        if args.wire_json:
+            with open(args.wire_json) as f:
+                wire_json = json.loads(f.read())
+
+        renders = []
+
+        params_dawn = parse_json(loaded_json,
+                                 enabled_tags=['dawn', 'native', 'deprecated'])
+        metadata = params_dawn['metadata']
+        RENDER_PARAMS_BASE = make_base_render_params(metadata)
+
+        api = metadata.api.lower()
+        prefix = metadata.proc_table_prefix.lower()
+        if 'headers' in targets:
+            renders.append(
+                FileRender('api.h', 'include/dawn/' + api + '.h',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+            renders.append(
+                FileRender('dawn_proc_table.h',
+                           'include/dawn/' + prefix + '_proc_table.h',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+
+        if 'cpp_headers' in targets:
+            renders.append(
+                FileRender('api_cpp.h', 'include/dawn/' + api + '_cpp.h',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+
+            renders.append(
+                FileRender('api_cpp_print.h',
+                           'include/dawn/' + api + '_cpp_print.h',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+
+        if 'proc' in targets:
+            renders.append(
+                FileRender('dawn_proc.c', 'src/dawn/' + prefix + '_proc.c',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+            renders.append(
+                FileRender('dawn_thread_dispatch_proc.cpp',
+                           'src/dawn/' + prefix + '_thread_dispatch_proc.cpp',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+
+        if 'webgpu_dawn_native_proc' in targets:
+            renders.append(
+                FileRender('dawn/native/api_dawn_native_proc.cpp',
+                           'src/dawn/native/webgpu_dawn_native_proc.cpp',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+
+        if 'cpp' in targets:
+            renders.append(
+                FileRender('api_cpp.cpp', 'src/dawn/' + api + '_cpp.cpp',
+                           [RENDER_PARAMS_BASE, params_dawn]))
+
+        if 'webgpu_headers' in targets:
+            params_upstream = parse_json(loaded_json,
+                                         enabled_tags=['upstream', 'native'],
+                                         disabled_tags=['dawn'])
+            renders.append(
+                FileRender('api.h', 'webgpu-headers/' + api + '.h',
+                           [RENDER_PARAMS_BASE, params_upstream]))
+
+        if 'emscripten_bits' in targets:
+            params_emscripten = parse_json(loaded_json,
+                                           enabled_tags=['emscripten'])
+            renders.append(
+                FileRender('api.h', 'emscripten-bits/' + api + '.h',
+                           [RENDER_PARAMS_BASE, params_emscripten]))
+            renders.append(
+                FileRender('api_cpp.h', 'emscripten-bits/' + api + '_cpp.h',
+                           [RENDER_PARAMS_BASE, params_emscripten]))
+            renders.append(
+                FileRender('api_cpp.cpp', 'emscripten-bits/' + api + '_cpp.cpp',
+                           [RENDER_PARAMS_BASE, params_emscripten]))
+            renders.append(
+                FileRender('api_struct_info.json',
+                           'emscripten-bits/' + api + '_struct_info.json',
+                           [RENDER_PARAMS_BASE, params_emscripten]))
+            renders.append(
+                FileRender('library_api_enum_tables.js',
+                           'emscripten-bits/library_' + api + '_enum_tables.js',
+                           [RENDER_PARAMS_BASE, params_emscripten]))
+
+        if 'mock_api' in targets:
+            mock_params = [
+                RENDER_PARAMS_BASE, params_dawn, {
+                    'has_callback_arguments': has_callback_arguments
+                }
+            ]
+            renders.append(
+                FileRender('mock_api.h', 'src/dawn/mock_' + api + '.h',
+                           mock_params))
+            renders.append(
+                FileRender('mock_api.cpp', 'src/dawn/mock_' + api + '.cpp',
+                           mock_params))
+
+        if 'native_utils' in targets:
+            frontend_params = [
+                RENDER_PARAMS_BASE,
+                params_dawn,
+                {
+                    # TODO: as_frontendType and co. take a Type, not a Name :(
+                    'as_frontendType': lambda typ: as_frontendType(metadata, typ),
+                    'as_annotated_frontendType': \
+                        lambda arg: annotated(as_frontendType(metadata, arg.type), arg),
+                }
+            ]
+
+            impl_dir = metadata.impl_dir + '/' if metadata.impl_dir else ''
+            native_dir = impl_dir + Name(metadata.native_namespace).Dirs()
+            namespace = metadata.namespace
+            renders.append(
+                FileRender('dawn/native/ValidationUtils.h',
+                           'src/' + native_dir + '/ValidationUtils_autogen.h',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/ValidationUtils.cpp',
+                           'src/' + native_dir + '/ValidationUtils_autogen.cpp',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/dawn_platform.h',
+                           'src/' + native_dir + '/' + prefix + '_platform_autogen.h',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/api_structs.h',
+                           'src/' + native_dir + '/' + namespace + '_structs_autogen.h',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/api_structs.cpp',
+                           'src/' + native_dir + '/' + namespace + '_structs_autogen.cpp',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/ProcTable.cpp',
+                           'src/' + native_dir + '/ProcTable.cpp', frontend_params))
+            renders.append(
+                FileRender('dawn/native/ChainUtils.h',
+                           'src/' + native_dir + '/ChainUtils_autogen.h',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/ChainUtils.cpp',
+                           'src/' + native_dir + '/ChainUtils_autogen.cpp',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/api_absl_format.h',
+                           'src/' + native_dir + '/' + api + '_absl_format_autogen.h',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/api_absl_format.cpp',
+                           'src/' + native_dir + '/' + api + '_absl_format_autogen.cpp',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/ObjectType.h',
+                           'src/' + native_dir + '/ObjectType_autogen.h',
+                           frontend_params))
+            renders.append(
+                FileRender('dawn/native/ObjectType.cpp',
+                           'src/' + native_dir + '/ObjectType_autogen.cpp',
+                           frontend_params))
+
+        if 'wire' in targets:
+            params_dawn_wire = parse_json(loaded_json,
+                                          enabled_tags=['dawn', 'deprecated'],
+                                          disabled_tags=['native'])
+            additional_params = compute_wire_params(params_dawn_wire,
+                                                    wire_json)
+
+            wire_params = [
+                RENDER_PARAMS_BASE, params_dawn_wire, {
+                    'as_wireType': lambda type : as_wireType(metadata, type),
+                    'as_annotated_wireType': \
+                        lambda arg: annotated(as_wireType(metadata, arg.type), arg),
+                }, additional_params
+            ]
+            renders.append(
+                FileRender('dawn/wire/ObjectType.h',
+                           'src/dawn/wire/ObjectType_autogen.h', wire_params))
+            renders.append(
+                FileRender('dawn/wire/WireCmd.h',
+                           'src/dawn/wire/WireCmd_autogen.h', wire_params))
+            renders.append(
+                FileRender('dawn/wire/WireCmd.cpp',
+                           'src/dawn/wire/WireCmd_autogen.cpp', wire_params))
+            renders.append(
+                FileRender('dawn/wire/client/ApiObjects.h',
+                           'src/dawn/wire/client/ApiObjects_autogen.h',
+                           wire_params))
+            renders.append(
+                FileRender('dawn/wire/client/ApiProcs.cpp',
+                           'src/dawn/wire/client/ApiProcs_autogen.cpp',
+                           wire_params))
+            renders.append(
+                FileRender('dawn/wire/client/ClientBase.h',
+                           'src/dawn/wire/client/ClientBase_autogen.h',
+                           wire_params))
+            renders.append(
+                FileRender('dawn/wire/client/ClientHandlers.cpp',
+                           'src/dawn/wire/client/ClientHandlers_autogen.cpp',
+                           wire_params))
+            renders.append(
+                FileRender(
+                    'dawn/wire/client/ClientPrototypes.inc',
+                    'src/dawn/wire/client/ClientPrototypes_autogen.inc',
+                    wire_params))
+            renders.append(
+                FileRender('dawn/wire/server/ServerBase.h',
+                           'src/dawn/wire/server/ServerBase_autogen.h',
+                           wire_params))
+            renders.append(
+                FileRender('dawn/wire/server/ServerDoers.cpp',
+                           'src/dawn/wire/server/ServerDoers_autogen.cpp',
+                           wire_params))
+            renders.append(
+                FileRender('dawn/wire/server/ServerHandlers.cpp',
+                           'src/dawn/wire/server/ServerHandlers_autogen.cpp',
+                           wire_params))
+            renders.append(
+                FileRender(
+                    'dawn/wire/server/ServerPrototypes.inc',
+                    'src/dawn/wire/server/ServerPrototypes_autogen.inc',
+                    wire_params))
+
+        return renders
+
+    def get_dependencies(self, args):
+        deps = [os.path.abspath(args.dawn_json)]
+        if args.wire_json != None:
+            deps += [os.path.abspath(args.wire_json)]
+        return deps
+
+
+if __name__ == '__main__':
+    sys.exit(run_generator(MultiGeneratorFromDawnJSON()))
diff --git a/generator/dawn_version_generator.py b/generator/dawn_version_generator.py
new file mode 100644
index 0000000..1907e88
--- /dev/null
+++ b/generator/dawn_version_generator.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, subprocess, sys
+
+from generator_lib import Generator, run_generator, FileRender
+
+
+def get_git():
+    return 'git.bat' if sys.platform == 'win32' else 'git'
+
+
+def get_gitHash(dawnDir):
+    result = subprocess.run([get_git(), 'rev-parse', 'HEAD'],
+                            stdout=subprocess.PIPE,
+                            cwd=dawnDir)
+    if result.returncode == 0:
+        return result.stdout.decode('utf-8').strip()
+    # No hash was available (possibly) because the directory was not a git checkout. Dawn should
+    # explicitly handle its absenece and disable features relying on the hash, i.e. caching.
+    return ''
+
+
+def get_gitHead(dawnDir):
+    return os.path.join(dawnDir, '.git', 'HEAD')
+
+
+def gitExists(dawnDir):
+    return os.path.exists(get_gitHead(dawnDir))
+
+
+def unpackGitRef(packed, resolved):
+    with open(packed) as fin:
+        refs = fin.read().strip().split('\n')
+
+    # Strip comments
+    refs = [ref.split(' ') for ref in refs if ref.strip()[0] != '#']
+
+    # Parse results which are in the format [<gitHash>, <refFile>] from previous step.
+    refs = [gitHash for (gitHash, refFile) in refs if refFile == resolved]
+    if len(refs) == 1:
+        with open(resolved, 'w') as fout:
+            fout.write(refs[0] + '\n')
+        return True
+    return False
+
+
+def get_gitResolvedHead(dawnDir):
+    result = subprocess.run(
+        [get_git(), 'rev-parse', '--symbolic-full-name', 'HEAD'],
+        stdout=subprocess.PIPE,
+        cwd=dawnDir)
+    if result.returncode != 0:
+        raise Exception('Failed to execute git rev-parse to resolve git head.')
+
+    resolved = os.path.join(dawnDir, '.git',
+                            result.stdout.decode('utf-8').strip())
+
+    # Check a packed-refs file exists. If so, we need to potentially unpack and include it as a dep.
+    packed = os.path.join(dawnDir, '.git', 'packed-refs')
+    if os.path.exists(packed) and unpackGitRef(packed, resolved):
+        return [packed, resolved]
+
+    if not os.path.exists(resolved):
+        raise Exception('Unable to resolve git HEAD hash file:', path)
+    return [resolved]
+
+
+def compute_params(args):
+    return {
+        'get_gitHash': lambda: get_gitHash(os.path.abspath(args.dawn_dir)),
+    }
+
+
+class DawnVersionGenerator(Generator):
+    def get_description(self):
+        return 'Generates version dependent Dawn code. Currently regenerated dependent on git hash.'
+
+    def add_commandline_arguments(self, parser):
+        parser.add_argument('--dawn-dir',
+                            required=True,
+                            type=str,
+                            help='The Dawn root directory path to use')
+
+    def get_dependencies(self, args):
+        dawnDir = os.path.abspath(args.dawn_dir)
+        if gitExists(dawnDir):
+            return [get_gitHead(dawnDir)] + get_gitResolvedHead(dawnDir)
+        return []
+
+    def get_file_renders(self, args):
+        params = compute_params(args)
+
+        return [
+            FileRender('dawn/common/Version.h',
+                       'src/dawn/common/Version_autogen.h', [params]),
+        ]
+
+
+if __name__ == '__main__':
+    sys.exit(run_generator(DawnVersionGenerator()))
diff --git a/generator/extract_json.py b/generator/extract_json.py
new file mode 100644
index 0000000..67114bf
--- /dev/null
+++ b/generator/extract_json.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+# Copyright 2018 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os, sys, json
+
+if __name__ == "__main__":
+    if len(sys.argv) != 3:
+        print("Usage: extract_json.py JSON DIR")
+        sys.exit(1)
+
+    with open(sys.argv[1]) as f:
+        files = json.loads(f.read())
+
+    output_dir = sys.argv[2]
+
+    for (name, content) in files.items():
+        output_file = output_dir + os.path.sep + name
+
+        # Create the output directory if needed.
+        directory = os.path.dirname(output_file)
+        if not os.path.exists(directory):
+            os.makedirs(directory)
+
+        # Skip writing to the file if it already has the correct content.
+        try:
+            with open(output_file, 'r') as outfile:
+                if outfile.read() == content:
+                    continue
+        except (OSError, EnvironmentError):
+            pass
+
+        with open(output_file, 'w') as outfile:
+            outfile.write(content)
diff --git a/generator/generator_lib.gni b/generator/generator_lib.gni
new file mode 100644
index 0000000..8b9e04c
--- /dev/null
+++ b/generator/generator_lib.gni
@@ -0,0 +1,164 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Template to help invoking code generators based on generator_lib.py
+# Internal use only, this should only be called from templates implementing
+# generator-specific actions.
+#
+# Variables:
+#   script: Path to generator script.
+#
+#   args: List of extra command-line arguments passed to the generator.
+#
+#   outputs: List of expected outputs, generation will fail if there is a
+#     mistmatch.
+#
+#   deps: additional deps for the code generation targets.
+#
+#   generator_lib_dir: directory where generator_lib.py is located.
+#
+#   custom_gen_dir: Optional custom target gen dir. Defaults to $target_gen_dir
+#     but allows output files to not depend on the location of the BUILD.gn
+#     that generates them.
+#
+#   template_dir: Optional template root directory. Defaults to
+#     "${generator_lib_dir}/templates".
+#
+#   jinja2_path: Optional Jinja2 installation path.
+#
+#   allowed_output_dirs: Optional list of directories that are the only
+#     directories in which files of `outputs` are allowed to be (and not
+#     in children directories). Generation will fail if an output isn't
+#     in a directory in the list.
+#
+#   root_dir: Optional root source dir for Python dependencies
+#     computation. Defaults to "${generator_lib_dir}/..". Any dependency
+#     outside of this directory is considered a system file and will be
+#     omitted.
+#
+template("generator_lib_action") {
+  _generator_args = []
+  if (defined(invoker.args)) {
+    _generator_args += invoker.args
+  }
+
+  assert(defined(invoker.generator_lib_dir),
+         "generator_lib_dir must be defined before calling this action!")
+
+  _template_dir = "${invoker.generator_lib_dir}/templates"
+  if (defined(invoker.template_dir)) {
+    _template_dir = invoker.template_dir
+  }
+  _generator_args += [
+    "--template-dir",
+    rebase_path(_template_dir),
+  ]
+
+  if (defined(invoker.root_dir)) {
+    _generator_args += [
+      "--root-dir",
+      rebase_path(_root_dir, root_build_dir),
+    ]
+  }
+
+  if (defined(invoker.jinja2_path)) {
+    _generator_args += [
+      "--jinja2-path",
+      rebase_path(invoker.jinja2_path),
+    ]
+  }
+
+  # Chooses either the default gen_dir or the custom one required by the
+  # invoker. This allows moving the definition of code generators in different
+  # BUILD.gn files without changing the location of generated file. Without
+  # this generated headers could cause issues when old headers aren't removed.
+  _gen_dir = target_gen_dir
+  if (defined(invoker.custom_gen_dir)) {
+    _gen_dir = invoker.custom_gen_dir
+  }
+
+  # For build parallelism GN wants to know the exact inputs and outputs of
+  # action targets like we use for our code generator. We avoid asking the
+  # generator about its inputs by using the "depfile" feature of GN/Ninja.
+  #
+  # A ninja limitation is that the depfile is a subset of Makefile that can
+  # contain a single target, so we output a single "JSON-tarball" instead.
+  _json_tarball = "${_gen_dir}/${target_name}.json_tarball"
+  _json_tarball_target = "${target_name}__json_tarball"
+  _json_tarball_depfile = "${_json_tarball}.d"
+
+  _generator_args += [
+    "--output-json-tarball",
+    rebase_path(_json_tarball, root_build_dir),
+    "--depfile",
+    rebase_path(_json_tarball_depfile, root_build_dir),
+  ]
+
+  # After the JSON tarball is created we need an action target to extract it
+  # with a list of its outputs. The invoker provided a list of expected
+  # outputs. To make sure the list is in sync between the generator and the
+  # build files, we write it to a file and ask the generator to assert it is
+  # correct.
+  _expected_outputs_file = "${_gen_dir}/${target_name}.expected_outputs"
+  write_file(_expected_outputs_file, invoker.outputs)
+
+  _generator_args += [
+    "--expected-outputs-file",
+    rebase_path(_expected_outputs_file, root_build_dir),
+  ]
+
+  # Check that all of the outputs are in a directory that's allowed. This is
+  # useful to keep the list of directories in sink with other parts of the
+  # build.
+  if (defined(invoker.allowed_output_dirs)) {
+    _allowed_output_dirs_file = "${_gen_dir}/${target_name}.allowed_output_dirs"
+    write_file(_allowed_output_dirs_file, invoker.allowed_output_dirs)
+
+    _generator_args += [
+      "--allowed-output-dirs-file",
+      rebase_path(_allowed_output_dirs_file, root_build_dir),
+    ]
+  }
+
+  # The code generator invocation that will write the JSON tarball, check the
+  # outputs are what's expected and write a depfile for Ninja.
+  action(_json_tarball_target) {
+    script = invoker.script
+    outputs = [ _json_tarball ]
+    depfile = _json_tarball_depfile
+    args = _generator_args
+    if (defined(invoker.deps)) {
+      deps = invoker.deps
+    }
+  }
+
+  # Extract the JSON tarball into the gen_dir
+  action(target_name) {
+    script = "${invoker.generator_lib_dir}/extract_json.py"
+    args = [
+      rebase_path(_json_tarball, root_build_dir),
+      rebase_path(_gen_dir, root_build_dir),
+    ]
+
+    deps = [ ":${_json_tarball_target}" ]
+    inputs = [ _json_tarball ]
+
+    # The expected output list is relative to the gen_dir but action
+    # target outputs are from the root dir so we need to rebase them.
+    outputs = []
+    foreach(source, invoker.outputs) {
+      outputs += [ "${_gen_dir}/${source}" ]
+    }
+  }
+}
diff --git a/generator/generator_lib.py b/generator/generator_lib.py
new file mode 100644
index 0000000..11b3ed2
--- /dev/null
+++ b/generator/generator_lib.py
@@ -0,0 +1,374 @@
+#!/usr/bin/env python3
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Module to create generators that render multiple Jinja2 templates for GN.
+
+A helper module that can be used to create generator scripts (clients)
+that expand one or more Jinja2 templates, without outputs usable from
+GN and Ninja build-based systems. See generator_lib.gni as well.
+
+Clients should create a Generator sub-class, then call run_generator()
+with a proper derived class instance.
+
+Clients specify a list of FileRender operations, each one of them will
+output a file into a temporary output directory through Jinja2 expansion.
+All temporary output files are then grouped and written to into a single JSON
+file, that acts as a convenient single GN output target. Use extract_json.py
+to extract the output files from the JSON tarball in another GN action.
+
+--depfile can be used to specify an output Ninja dependency file for the
+JSON tarball, to ensure it is regenerated any time one of its dependencies
+changes.
+
+Finally, --expected-output-files can be used to check the list of generated
+output files.
+"""
+
+import argparse, json, os, re, sys
+from collections import namedtuple
+
+# A FileRender represents a single Jinja2 template render operation:
+#
+#   template: Jinja2 template name, relative to --template-dir path.
+#
+#   output: Output file path, relative to temporary output directory.
+#
+#   params_dicts: iterable of (name:string -> value:string) dictionaries.
+#       All of them will be merged before being sent as Jinja2 template
+#       expansion parameters.
+#
+# Example:
+#   FileRender('api.c', 'src/project_api.c', [{'PROJECT_VERSION': '1.0.0'}])
+#
+FileRender = namedtuple('FileRender', ['template', 'output', 'params_dicts'])
+
+
+# The interface that must be implemented by generators.
+class Generator:
+    def get_description(self):
+        """Return generator description for --help."""
+        return ""
+
+    def add_commandline_arguments(self, parser):
+        """Add generator-specific argparse arguments."""
+        pass
+
+    def get_file_renders(self, args):
+        """Return the list of FileRender objects to process."""
+        return []
+
+    def get_dependencies(self, args):
+        """Return a list of extra input dependencies."""
+        return []
+
+
+# Allow custom Jinja2 installation path through an additional python
+# path from the arguments if present. This isn't done through the regular
+# argparse because PreprocessingLoader uses jinja2 in the global scope before
+# "main" gets to run.
+#
+# NOTE: If this argument appears several times, this only uses the first
+#       value, while argparse would typically keep the last one!
+kJinja2Path = '--jinja2-path'
+try:
+    jinja2_path_argv_index = sys.argv.index(kJinja2Path)
+    # Add parent path for the import to succeed.
+    path = os.path.join(sys.argv[jinja2_path_argv_index + 1], os.pardir)
+    sys.path.insert(1, path)
+except ValueError:
+    # --jinja2-path isn't passed, ignore the exception and just import Jinja2
+    # assuming it already is in the Python PATH.
+    pass
+
+import jinja2
+
+
+# A custom Jinja2 template loader that removes the extra indentation
+# of the template blocks so that the output is correctly indented
+class _PreprocessingLoader(jinja2.BaseLoader):
+    def __init__(self, path):
+        self.path = path
+
+    def get_source(self, environment, template):
+        path = os.path.join(self.path, template)
+        if not os.path.exists(path):
+            raise jinja2.TemplateNotFound(template)
+        mtime = os.path.getmtime(path)
+        with open(path) as f:
+            source = self.preprocess(f.read())
+        return source, path, lambda: mtime == os.path.getmtime(path)
+
+    blockstart = re.compile('{%-?\s*(if|elif|else|for|block|macro)[^}]*%}')
+    blockend = re.compile('{%-?\s*(end(if|for|block|macro)|elif|else)[^}]*%}')
+
+    def preprocess(self, source):
+        lines = source.split('\n')
+
+        # Compute the current indentation level of the template blocks and
+        # remove their indentation
+        result = []
+        indentation_level = 0
+
+        # Filter lines that are pure comments. line_comment_prefix is not
+        # enough because it removes the comment but doesn't completely remove
+        # the line, resulting in more verbose output.
+        lines = filter(lambda line: not line.strip().startswith('//*'), lines)
+
+        # Remove indentation templates have for the Jinja control flow.
+        for line in lines:
+            # The capture in the regex adds one element per block start or end,
+            # so we divide by two. There is also an extra line chunk
+            # corresponding to the line end, so we subtract it.
+            numends = (len(self.blockend.split(line)) - 1) // 2
+            indentation_level -= numends
+
+            result.append(self.remove_indentation(line, indentation_level))
+
+            numstarts = (len(self.blockstart.split(line)) - 1) // 2
+            indentation_level += numstarts
+
+        return '\n'.join(result) + '\n'
+
+    def remove_indentation(self, line, n):
+        for _ in range(n):
+            if line.startswith(' '):
+                line = line[4:]
+            elif line.startswith('\t'):
+                line = line[1:]
+            else:
+                assert line.strip() == ''
+        return line
+
+
+_FileOutput = namedtuple('FileOutput', ['name', 'content'])
+
+
+def _do_renders(renders, template_dir):
+    loader = _PreprocessingLoader(template_dir)
+    env = jinja2.Environment(extensions=['jinja2.ext.do'],
+                             loader=loader,
+                             lstrip_blocks=True,
+                             trim_blocks=True,
+                             line_comment_prefix='//*')
+
+    def do_assert(expr):
+        assert expr
+        return ''
+
+    def debug(text):
+        print(text)
+
+    base_params = {
+        'enumerate': enumerate,
+        'format': format,
+        'len': len,
+        'debug': debug,
+        'assert': do_assert,
+    }
+
+    outputs = []
+    for render in renders:
+        params = {}
+        params.update(base_params)
+        for param_dict in render.params_dicts:
+            params.update(param_dict)
+        content = env.get_template(render.template).render(**params)
+        outputs.append(_FileOutput(render.output, content))
+
+    return outputs
+
+
+# Compute the list of imported, non-system Python modules.
+# It assumes that any path outside of the root directory is system.
+def _compute_python_dependencies(root_dir=None):
+    if not root_dir:
+        # Assume this script is under generator/ by default.
+        root_dir = os.path.join(os.path.dirname(__file__), os.pardir)
+    root_dir = os.path.abspath(root_dir)
+
+    module_paths = (module.__file__ for module in sys.modules.values()
+                    if module and hasattr(module, '__file__'))
+
+    paths = set()
+    for path in module_paths:
+        # Builtin/namespaced modules may return None for the file path.
+        if not path:
+            continue
+
+        path = os.path.abspath(path)
+
+        if not path.startswith(root_dir):
+            continue
+
+        if (path.endswith('.pyc')
+                or (path.endswith('c') and not os.path.splitext(path)[1])):
+            path = path[:-1]
+
+        paths.add(path)
+
+    return paths
+
+
+def run_generator(generator):
+    parser = argparse.ArgumentParser(
+        description=generator.get_description(),
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+    )
+
+    generator.add_commandline_arguments(parser)
+    parser.add_argument('--template-dir',
+                        default='templates',
+                        type=str,
+                        help='Directory with template files.')
+    parser.add_argument(
+        kJinja2Path,
+        default=None,
+        type=str,
+        help='Additional python path to set before loading Jinja2')
+    parser.add_argument(
+        '--output-json-tarball',
+        default=None,
+        type=str,
+        help=('Name of the "JSON tarball" to create (tar is too annoying '
+              'to use in python).'))
+    parser.add_argument(
+        '--depfile',
+        default=None,
+        type=str,
+        help='Name of the Ninja depfile to create for the JSON tarball')
+    parser.add_argument(
+        '--expected-outputs-file',
+        default=None,
+        type=str,
+        help="File to compare outputs with and fail if it doesn't match")
+    parser.add_argument(
+        '--root-dir',
+        default=None,
+        type=str,
+        help=('Optional source root directory for Python dependency '
+              'computations'))
+    parser.add_argument(
+        '--allowed-output-dirs-file',
+        default=None,
+        type=str,
+        help=("File containing a list of allowed directories where files "
+              "can be output."))
+    parser.add_argument(
+        '--print-cmake-dependencies',
+        default=False,
+        action="store_true",
+        help=("Prints a semi-colon separated list of dependencies to "
+              "stdout and exits."))
+    parser.add_argument(
+        '--print-cmake-outputs',
+        default=False,
+        action="store_true",
+        help=("Prints a semi-colon separated list of outputs to "
+              "stdout and exits."))
+    parser.add_argument('--output-dir',
+                        default=None,
+                        type=str,
+                        help='Directory where to output generate files.')
+
+    args = parser.parse_args()
+
+    renders = generator.get_file_renders(args)
+
+    # Output a list of all dependencies for CMake or the tarball for GN/Ninja.
+    if args.depfile != None or args.print_cmake_dependencies:
+        dependencies = generator.get_dependencies(args)
+        dependencies += [
+            args.template_dir + os.path.sep + render.template
+            for render in renders
+        ]
+        dependencies += _compute_python_dependencies(args.root_dir)
+
+        if args.depfile != None:
+            with open(args.depfile, 'w') as f:
+                f.write(args.output_json_tarball + ": " +
+                        " ".join(dependencies))
+
+        if args.print_cmake_dependencies:
+            sys.stdout.write(";".join(dependencies))
+            return 0
+
+    # The caller wants to assert that the outputs are what it expects.
+    # Load the file and compare with our renders.
+    if args.expected_outputs_file != None:
+        with open(args.expected_outputs_file) as f:
+            expected = set([line.strip() for line in f.readlines()])
+
+        actual = {render.output for render in renders}
+
+        if actual != expected:
+            print("Wrong expected outputs, caller expected:\n    " +
+                  repr(sorted(expected)))
+            print("Actual output:\n    " + repr(sorted(actual)))
+            return 1
+
+    # Print the list of all the outputs for cmake.
+    if args.print_cmake_outputs:
+        sys.stdout.write(";".join([
+            os.path.join(args.output_dir, render.output) for render in renders
+        ]))
+        return 0
+
+    outputs = _do_renders(renders, args.template_dir)
+
+    # The caller wants to assert that the outputs are only in specific
+    # directories.
+    if args.allowed_output_dirs_file != None:
+        with open(args.allowed_output_dirs_file) as f:
+            allowed_dirs = set([line.strip() for line in f.readlines()])
+
+        for directory in allowed_dirs:
+            if not directory.endswith('/'):
+                print('Allowed directory entry "{}" doesn\'t '
+                      'end with /'.format(directory))
+                return 1
+
+        def check_in_subdirectory(path, directory):
+            return path.startswith(
+                directory) and not '/' in path[len(directory):]
+
+        for render in renders:
+            if not any(
+                    check_in_subdirectory(render.output, directory)
+                    for directory in allowed_dirs):
+                print('Output file "{}" is not in the allowed directory '
+                      'list below:'.format(render.output))
+                for directory in sorted(allowed_dirs):
+                    print('    "{}"'.format(directory))
+                return 1
+
+    # Output the JSON tarball
+    if args.output_json_tarball != None:
+        json_root = {}
+        for output in outputs:
+            json_root[output.name] = output.content
+
+        with open(args.output_json_tarball, 'w') as f:
+            f.write(json.dumps(json_root))
+
+    # Output the files directly.
+    if args.output_dir != None:
+        for output in outputs:
+            output_path = os.path.join(args.output_dir, output.name)
+
+            directory = os.path.dirname(output_path)
+            if not os.path.exists(directory):
+                os.makedirs(directory)
+
+            with open(output_path, 'w') as outfile:
+                outfile.write(output.content)
diff --git a/generator/opengl_loader_generator.py b/generator/opengl_loader_generator.py
new file mode 100644
index 0000000..db253e2
--- /dev/null
+++ b/generator/opengl_loader_generator.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python3
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, json, sys
+from collections import namedtuple
+import xml.etree.ElementTree as etree
+
+from generator_lib import Generator, run_generator, FileRender
+
+
+class ProcName:
+    def __init__(self, gl_name, proc_name=None):
+        assert gl_name.startswith('gl')
+        if proc_name == None:
+            proc_name = gl_name[2:]
+
+        self.gl_name = gl_name
+        self.proc_name = proc_name
+
+    def glProcName(self):
+        return self.gl_name
+
+    def ProcName(self):
+        return self.proc_name
+
+    def PFNPROCNAME(self):
+        return 'PFN' + self.gl_name.upper() + 'PROC'
+
+    def __repr__(self):
+        return 'Proc("{}", "{}")'.format(self.gl_name, self.proc_name)
+
+
+ProcParam = namedtuple('ProcParam', ['name', 'type'])
+
+
+class Proc:
+    def __init__(self, element):
+        # Type declaration for return values and arguments all have the same
+        # (weird) format.
+        # <element>[A][<ptype>B</ptype>][C]<other stuff.../></element>
+        #
+        # Some examples are:
+        #   <proto>void <name>glFinish</name></proto>
+        #   <proto><ptype>GLenum</ptype><name>glFenceSync</name></proto>
+        #   <proto>const <ptype>GLubyte</ptype> *<name>glGetString</name></proto>
+        #
+        # This handles all the shapes found in gl.xml except for this one that
+        # has an array specifier after </name>:
+        #   <param><ptype>GLuint</ptype> <name>baseAndCount</name>[2]</param>
+        def parse_type_declaration(element):
+            result = ''
+            if element.text != None:
+                result += element.text
+            ptype = element.find('ptype')
+            if ptype != None:
+                result += ptype.text
+                if ptype.tail != None:
+                    result += ptype.tail
+            return result.strip()
+
+        proto = element.find('proto')
+
+        self.return_type = parse_type_declaration(proto)
+
+        self.params = []
+        for param in element.findall('./param'):
+            self.params.append(
+                ProcParam(
+                    param.find('name').text, parse_type_declaration(param)))
+
+        self.gl_name = proto.find('name').text
+        self.alias = None
+        if element.find('alias') != None:
+            self.alias = element.find('alias').attrib['name']
+
+    def glProcName(self):
+        return self.gl_name
+
+    def ProcName(self):
+        assert self.gl_name.startswith('gl')
+        return self.gl_name[2:]
+
+    def PFNGLPROCNAME(self):
+        return 'PFN' + self.gl_name.upper() + 'PROC'
+
+    def __repr__(self):
+        return 'Proc("{}")'.format(self.gl_name)
+
+
+EnumDefine = namedtuple('EnumDefine', ['name', 'value'])
+Version = namedtuple('Version', ['major', 'minor'])
+VersionBlock = namedtuple('VersionBlock', ['version', 'procs', 'enums'])
+HeaderBlock = namedtuple('HeaderBlock', ['description', 'procs', 'enums'])
+ExtensionBlock = namedtuple('ExtensionBlock',
+                            ['extension', 'procs', 'enums', 'supported_specs'])
+
+
+def parse_version(version):
+    return Version(*map(int, version.split('.')))
+
+
+def compute_params(root, supported_extensions):
+    # Parse all the commands and enums
+    all_procs = {}
+    for command in root.findall('''commands[@namespace='GL']/command'''):
+        proc = Proc(command)
+        assert proc.gl_name not in all_procs
+        all_procs[proc.gl_name] = proc
+
+    all_enums = {}
+    for enum in root.findall('''enums[@namespace='GL']/enum'''):
+        enum_name = enum.attrib['name']
+        # Special case an enum we'll never use that has different values in GL and GLES
+        if enum_name == 'GL_ACTIVE_PROGRAM_EXT':
+            continue
+
+        assert enum_name not in all_enums
+        all_enums[enum_name] = EnumDefine(enum_name, enum.attrib['value'])
+
+    # Get the list of all Desktop OpenGL function removed by the Core Profile.
+    core_removed_procs = set()
+    for proc in root.findall('''feature/remove[@profile='core']/command'''):
+        core_removed_procs.add(proc.attrib['name'])
+
+    # Get list of enums and procs per OpenGL ES/Desktop OpenGL version
+    def parse_version_blocks(api, removed_procs=set()):
+        blocks = []
+        for section in root.findall('''feature[@api='{}']'''.format(api)):
+            section_procs = []
+            for command in section.findall('./require/command'):
+                proc_name = command.attrib['name']
+                assert all_procs[proc_name].alias == None
+                if proc_name not in removed_procs:
+                    section_procs.append(all_procs[proc_name])
+
+            section_enums = []
+            for enum in section.findall('./require/enum'):
+                section_enums.append(all_enums[enum.attrib['name']])
+
+            blocks.append(
+                VersionBlock(parse_version(section.attrib['number']),
+                             section_procs, section_enums))
+
+        return blocks
+
+    gles_blocks = parse_version_blocks('gles2')
+    desktop_gl_blocks = parse_version_blocks('gl', core_removed_procs)
+
+    def parse_extension_block(extension):
+        section = root.find(
+            '''extensions/extension[@name='{}']'''.format(extension))
+        supported_specs = section.attrib['supported'].split('|')
+        section_procs = []
+        for command in section.findall('./require/command'):
+            proc_name = command.attrib['name']
+            assert all_procs[proc_name].alias == None
+            section_procs.append(all_procs[proc_name])
+
+        section_enums = []
+        for enum in section.findall('./require/enum'):
+            section_enums.append(all_enums[enum.attrib['name']])
+
+        return ExtensionBlock(extension, section_procs, section_enums,
+                              supported_specs)
+
+    extension_desktop_gl_blocks = []
+    extension_gles_blocks = []
+    for extension in supported_extensions:
+        extension_block = parse_extension_block(extension)
+        if 'gl' in extension_block.supported_specs:
+            extension_desktop_gl_blocks.append(extension_block)
+        if 'gles2' in extension_block.supported_specs:
+            extension_gles_blocks.append(extension_block)
+
+    # Compute the blocks for headers such that there is no duplicate definition
+    already_added_header_procs = set()
+    already_added_header_enums = set()
+    header_blocks = []
+
+    def add_header_block(description, block):
+        block_procs = []
+        for proc in block.procs:
+            if not proc.glProcName() in already_added_header_procs:
+                already_added_header_procs.add(proc.glProcName())
+                block_procs.append(proc)
+
+        block_enums = []
+        for enum in block.enums:
+            if not enum.name in already_added_header_enums:
+                already_added_header_enums.add(enum.name)
+                block_enums.append(enum)
+
+        if len(block_procs) > 0 or len(block_enums) > 0:
+            header_blocks.append(
+                HeaderBlock(description, block_procs, block_enums))
+
+    for block in gles_blocks:
+        add_header_block(
+            'OpenGL ES {}.{}'.format(block.version.major, block.version.minor),
+            block)
+
+    for block in desktop_gl_blocks:
+        add_header_block(
+            'Desktop OpenGL {}.{}'.format(block.version.major,
+                                          block.version.minor), block)
+
+    for block in extension_desktop_gl_blocks:
+        add_header_block(block.extension, block)
+
+    for block in extension_gles_blocks:
+        add_header_block(block.extension, block)
+
+    return {
+        'gles_blocks': gles_blocks,
+        'desktop_gl_blocks': desktop_gl_blocks,
+        'extension_desktop_gl_blocks': extension_desktop_gl_blocks,
+        'extension_gles_blocks': extension_gles_blocks,
+        'header_blocks': header_blocks,
+    }
+
+
+class OpenGLLoaderGenerator(Generator):
+    def get_description(self):
+        return 'Generates code to load OpenGL function pointers'
+
+    def add_commandline_arguments(self, parser):
+        parser.add_argument('--gl-xml',
+                            required=True,
+                            type=str,
+                            help='The Khronos gl.xml to use.')
+        parser.add_argument(
+            '--supported-extensions',
+            required=True,
+            type=str,
+            help=
+            'The JSON file that defines the OpenGL and GLES extensions to use.'
+        )
+
+    def get_file_renders(self, args):
+        supported_extensions = []
+        with open(args.supported_extensions) as f:
+            supported_extensions_json = json.loads(f.read())
+            supported_extensions = supported_extensions_json[
+                'supported_extensions']
+
+        params = compute_params(
+            etree.parse(args.gl_xml).getroot(), supported_extensions)
+
+        return [
+            FileRender(
+                'opengl/OpenGLFunctionsBase.cpp',
+                'src/dawn/native/opengl/OpenGLFunctionsBase_autogen.cpp',
+                [params]),
+            FileRender('opengl/OpenGLFunctionsBase.h',
+                       'src/dawn/native/opengl/OpenGLFunctionsBase_autogen.h',
+                       [params]),
+            FileRender('opengl/opengl_platform.h',
+                       'src/dawn/native/opengl/opengl_platform_autogen.h',
+                       [params]),
+        ]
+
+    def get_dependencies(self, args):
+        return [
+            os.path.abspath(args.gl_xml),
+            os.path.abspath(args.supported_extensions)
+        ]
+
+
+if __name__ == '__main__':
+    sys.exit(run_generator(OpenGLLoaderGenerator()))
diff --git a/generator/remove_files.py b/generator/remove_files.py
new file mode 100644
index 0000000..6ddf463
--- /dev/null
+++ b/generator/remove_files.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python3
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse, glob, os, sys
+
+
+def check_in_subdirectory(path, directory):
+    return path.startswith(directory) and not '/' in path[len(directory):]
+
+
+def check_is_allowed(path, allowed_dirs):
+    return any(
+        check_in_subdirectory(path, directory) for directory in allowed_dirs)
+
+
+def get_all_files_in_dir(find_directory):
+    result = []
+    for (directory, _, files) in os.walk(find_directory):
+        result += [os.path.join(directory, filename) for filename in files]
+    return result
+
+
+def run():
+    # Parse command line arguments
+    parser = argparse.ArgumentParser(
+        description="Removes stale autogenerated files from gen/ directories.")
+    parser.add_argument(
+        '--root-dir',
+        type=str,
+        help='The root directory, all other paths in files are relative to it.'
+    )
+    parser.add_argument(
+        '--allowed-output-dirs-file',
+        type=str,
+        help='The file containing a list of allowed directories')
+    parser.add_argument(
+        '--stale-dirs-file',
+        type=str,
+        help=
+        'The file containing a list of directories to check for stale files')
+    parser.add_argument('--stamp',
+                        type=str,
+                        help='A stamp written once this script completes')
+    args = parser.parse_args()
+
+    root_dir = args.root_dir
+    stamp_file = args.stamp
+
+    # Load the list of allowed and stale directories
+    with open(args.allowed_output_dirs_file) as f:
+        allowed_dirs = set(
+            [os.path.join(root_dir, line.strip()) for line in f.readlines()])
+
+    for directory in allowed_dirs:
+        if not directory.endswith('/'):
+            print('Allowed directory entry "{}" doesn\'t end with /'.format(
+                directory))
+            return 1
+
+    with open(args.stale_dirs_file) as f:
+        stale_dirs = set([line.strip() for line in f.readlines()])
+
+    # Remove all files in stale dirs that aren't in the allowed dirs.
+    for stale_dir in stale_dirs:
+        stale_dir = os.path.join(root_dir, stale_dir)
+
+        for candidate in get_all_files_in_dir(stale_dir):
+            if not check_is_allowed(candidate, allowed_dirs):
+                os.remove(candidate)
+
+    # Finished! Write the stamp file so ninja knows to not run this again.
+    with open(stamp_file, "w") as f:
+        f.write("")
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(run())
diff --git a/generator/templates/.clang-format b/generator/templates/.clang-format
new file mode 100644
index 0000000..9d15924
--- /dev/null
+++ b/generator/templates/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/generator/templates/BSD_LICENSE b/generator/templates/BSD_LICENSE
new file mode 100644
index 0000000..eaef87a
--- /dev/null
+++ b/generator/templates/BSD_LICENSE
@@ -0,0 +1,29 @@
+// BSD 3-Clause License
+//
+// Copyright (c) {{metadata.copyright_year}}, "{{metadata.api}} native" developers
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the copyright holder nor the names of its
+//    contributors may be used to endorse or promote products derived from
+//    this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/generator/templates/api.h b/generator/templates/api.h
new file mode 100644
index 0000000..db9d94a
--- /dev/null
+++ b/generator/templates/api.h
@@ -0,0 +1,181 @@
+//* This template itself is part of the Dawn source and follows Dawn's license,
+//* which is Apache 2.0.
+//*
+//* The WebGPU native API is a joint project used by Google, Mozilla, and Apple.
+//* It was agreed to use a BSD 3-Clause license so that it is GPLv2-compatible.
+//*
+//* As a result, the template comments using //* at the top of the file are
+//* removed during generation such that the resulting file starts with the
+//* BSD 3-Clause comment, which is inside BSD_LICENSE as included below.
+//*
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+//*
+//*
+{% include 'BSD_LICENSE' %}
+{% if 'dawn' in enabled_tags %}
+    #ifdef __EMSCRIPTEN__
+    #error "Do not include this header. Emscripten already provides headers needed for {{metadata.api}}."
+    #endif
+{% endif %}
+#ifndef {{metadata.api.upper()}}_H_
+#define {{metadata.api.upper()}}_H_
+
+{% set c_prefix = metadata.c_prefix %}
+#if defined({{c_prefix}}_SHARED_LIBRARY)
+#    if defined(_WIN32)
+#        if defined({{c_prefix}}_IMPLEMENTATION)
+#            define {{c_prefix}}_EXPORT __declspec(dllexport)
+#        else
+#            define {{c_prefix}}_EXPORT __declspec(dllimport)
+#        endif
+#    else  // defined(_WIN32)
+#        if defined({{c_prefix}}_IMPLEMENTATION)
+#            define {{c_prefix}}_EXPORT __attribute__((visibility("default")))
+#        else
+#            define {{c_prefix}}_EXPORT
+#        endif
+#    endif  // defined(_WIN32)
+#else       // defined({{c_prefix}}_SHARED_LIBRARY)
+#    define {{c_prefix}}_EXPORT
+#endif  // defined({{c_prefix}}_SHARED_LIBRARY)
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+{% for constant in by_category["constant"] %}
+    #define {{c_prefix}}_{{constant.name.SNAKE_CASE()}} {{constant.value}}
+{% endfor %}
+
+typedef uint32_t {{c_prefix}}Flags;
+
+{% for type in by_category["object"] %}
+    typedef struct {{as_cType(type.name)}}Impl* {{as_cType(type.name)}};
+{% endfor %}
+
+{% for type in by_category["enum"] + by_category["bitmask"] %}
+    typedef enum {{as_cType(type.name)}} {
+        {% for value in type.values %}
+            {{as_cEnum(type.name, value.name)}} = 0x{{format(value.value, "08X")}},
+        {% endfor %}
+        {{as_cEnum(type.name, Name("force32"))}} = 0x7FFFFFFF
+    } {{as_cType(type.name)}};
+    {% if type.category == "bitmask" %}
+        typedef {{c_prefix}}Flags {{as_cType(type.name)}}Flags;
+    {% endif %}
+
+{% endfor -%}
+
+typedef struct {{c_prefix}}ChainedStruct {
+    struct {{c_prefix}}ChainedStruct const * next;
+    {{c_prefix}}SType sType;
+} {{c_prefix}}ChainedStruct;
+
+typedef struct {{c_prefix}}ChainedStructOut {
+    struct {{c_prefix}}ChainedStructOut * next;
+    {{c_prefix}}SType sType;
+} {{c_prefix}}ChainedStructOut;
+
+{% for type in by_category["structure"] %}
+    typedef struct {{as_cType(type.name)}} {
+        {% set Out = "Out" if type.output else "" %}
+        {% set const = "const " if not type.output else "" %}
+        {% if type.extensible %}
+            {{c_prefix}}ChainedStruct{{Out}} {{const}}* nextInChain;
+        {% endif %}
+        {% if type.chained %}
+            {{c_prefix}}ChainedStruct{{Out}} chain;
+        {% endif %}
+        {% for member in type.members %}
+            {{as_annotated_cType(member)}};
+        {% endfor %}
+    } {{as_cType(type.name)}};
+
+{% endfor %}
+{% for typeDef in by_category["typedef"] %}
+    // {{as_cType(typeDef.name)}} is deprecated.
+    // Use {{as_cType(typeDef.type.name)}} instead.
+    typedef {{as_cType(typeDef.type.name)}} {{as_cType(typeDef.name)}};
+
+{% endfor %}
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+{% for type in by_category["function pointer"] %}
+    typedef {{as_cType(type.return_type.name)}} (*{{as_cType(type.name)}})(
+        {%- if type.arguments == [] -%}
+            void
+        {%- else -%}
+            {%- for arg in type.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        {%- endif -%}
+    );
+{% endfor %}
+
+#if !defined({{c_prefix}}_SKIP_PROCS)
+
+{% for function in by_category["function"] %}
+    typedef {{as_cType(function.return_type.name)}} (*{{as_cProc(None, function.name)}})(
+            {%- for arg in function.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        );
+{% endfor %}
+
+{% for type in by_category["object"] if len(c_methods(type)) > 0 %}
+    // Procs of {{type.name.CamelCase()}}
+    {% for method in c_methods(type) %}
+        typedef {{as_cType(method.return_type.name)}} (*{{as_cProc(type.name, method.name)}})(
+            {{-as_cType(type.name)}} {{as_varName(type.name)}}
+            {%- for arg in method.arguments -%}
+                , {{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        );
+    {% endfor %}
+
+{% endfor %}
+#endif  // !defined({{c_prefix}}_SKIP_PROCS)
+
+#if !defined({{c_prefix}}_SKIP_DECLARATIONS)
+
+{% for function in by_category["function"] %}
+    {{c_prefix}}_EXPORT {{as_cType(function.return_type.name)}} {{as_cMethod(None, function.name)}}(
+            {%- for arg in function.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        );
+{% endfor %}
+
+{% for type in by_category["object"] if len(c_methods(type)) > 0 %}
+    // Methods of {{type.name.CamelCase()}}
+    {% for method in c_methods(type) %}
+        {{c_prefix}}_EXPORT {{as_cType(method.return_type.name)}} {{as_cMethod(type.name, method.name)}}(
+            {{-as_cType(type.name)}} {{as_varName(type.name)}}
+            {%- for arg in method.arguments -%}
+                , {{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        );
+    {% endfor %}
+
+{% endfor %}
+#endif  // !defined({{c_prefix}}_SKIP_DECLARATIONS)
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // {{metadata.api.upper()}}_H_
diff --git a/generator/templates/api_cpp.cpp b/generator/templates/api_cpp.cpp
new file mode 100644
index 0000000..d540e0b
--- /dev/null
+++ b/generator/templates/api_cpp.cpp
@@ -0,0 +1,175 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+{% set api = metadata.api.lower() %}
+{% if 'dawn' in enabled_tags %}
+    #include "dawn/{{api}}_cpp.h"
+{% else %}
+    #include "{{api}}/{{api}}_cpp.h"
+{% endif %}
+
+#ifdef __GNUC__
+// error: 'offsetof' within non-standard-layout type '{{metadata.namespace}}::XXX' is conditionally-supported
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#endif
+
+namespace {{metadata.namespace}} {
+    {% for type in by_category["enum"] %}
+        {% set CppType = as_cppType(type.name) %}
+        {% set CType = as_cType(type.name) %}
+
+        // {{CppType}}
+
+        static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
+        static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
+
+        {% for value in type.values %}
+            static_assert(static_cast<uint32_t>({{CppType}}::{{as_cppEnum(value.name)}}) == {{as_cEnum(type.name, value.name)}}, "value mismatch for {{CppType}}::{{as_cppEnum(value.name)}}");
+        {% endfor %}
+    {% endfor -%}
+
+    {% for type in by_category["bitmask"] %}
+        {% set CppType = as_cppType(type.name) %}
+        {% set CType = as_cType(type.name) + "Flags" %}
+
+        // {{CppType}}
+
+        static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
+        static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
+
+        {% for value in type.values %}
+            static_assert(static_cast<uint32_t>({{CppType}}::{{as_cppEnum(value.name)}}) == {{as_cEnum(type.name, value.name)}}, "value mismatch for {{CppType}}::{{as_cppEnum(value.name)}}");
+        {% endfor %}
+    {% endfor %}
+
+    // ChainedStruct
+
+    {% set c_prefix = metadata.c_prefix %}
+    static_assert(sizeof(ChainedStruct) == sizeof({{c_prefix}}ChainedStruct),
+            "sizeof mismatch for ChainedStruct");
+    static_assert(alignof(ChainedStruct) == alignof({{c_prefix}}ChainedStruct),
+            "alignof mismatch for ChainedStruct");
+    static_assert(offsetof(ChainedStruct, nextInChain) == offsetof({{c_prefix}}ChainedStruct, next),
+            "offsetof mismatch for ChainedStruct::nextInChain");
+    static_assert(offsetof(ChainedStruct, sType) == offsetof({{c_prefix}}ChainedStruct, sType),
+            "offsetof mismatch for ChainedStruct::sType");
+    {% for type in by_category["structure"] %}
+        {% set CppType = as_cppType(type.name) %}
+        {% set CType = as_cType(type.name) %}
+
+        // {{CppType}}
+
+        static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
+        static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
+
+        {% if type.extensible %}
+            static_assert(offsetof({{CppType}}, nextInChain) == offsetof({{CType}}, nextInChain),
+                    "offsetof mismatch for {{CppType}}::nextInChain");
+        {% endif %}
+        {% for member in type.members %}
+            {% set memberName = member.name.camelCase() %}
+            static_assert(offsetof({{CppType}}, {{memberName}}) == offsetof({{CType}}, {{memberName}}),
+                    "offsetof mismatch for {{CppType}}::{{memberName}}");
+        {% endfor %}
+    {% endfor -%}
+
+    {%- macro render_c_actual_arg(arg) -%}
+        {%- if arg.annotation == "value" -%}
+            {%- if arg.type.category == "object" -%}
+                {{as_varName(arg.name)}}.Get()
+            {%- elif arg.type.category == "enum" or arg.type.category == "bitmask" -%}
+                static_cast<{{as_cType(arg.type.name)}}>({{as_varName(arg.name)}})
+            {%- elif arg.type.category in ["function pointer", "native"] -%}
+                {{as_varName(arg.name)}}
+            {%- else -%}
+                UNHANDLED
+            {%- endif -%}
+        {%- else -%}
+            reinterpret_cast<{{decorate("", as_cType(arg.type.name), arg)}}>({{as_varName(arg.name)}})
+        {%- endif -%}
+    {%- endmacro -%}
+
+    {% for type in by_category["object"] %}
+        {% set CppType = as_cppType(type.name) %}
+        {% set CType = as_cType(type.name) %}
+
+        // {{CppType}}
+
+        static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
+        static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
+
+        {% macro render_cpp_method_declaration(type, method) -%}
+            {% set CppType = as_cppType(type.name) %}
+            {{as_cppType(method.return_type.name)}} {{CppType}}::{{method.name.CamelCase()}}(
+                {%- for arg in method.arguments -%}
+                    {%- if not loop.first %}, {% endif -%}
+                    {%- if arg.type.category == "object" and arg.annotation == "value" -%}
+                        {{as_cppType(arg.type.name)}} const& {{as_varName(arg.name)}}
+                    {%- else -%}
+                        {{as_annotated_cppType(arg)}}
+                    {%- endif -%}
+                {%- endfor -%}
+            ) const
+        {%- endmacro -%}
+
+        {%- macro render_cpp_to_c_method_call(type, method) -%}
+            {{as_cMethod(type.name, method.name)}}(Get()
+                {%- for arg in method.arguments -%},{{" "}}{{render_c_actual_arg(arg)}}
+                {%- endfor -%}
+            )
+        {%- endmacro -%}
+
+        {% for method in type.methods -%}
+            {{render_cpp_method_declaration(type, method)}} {
+                {% if method.return_type.name.concatcase() == "void" %}
+                    {{render_cpp_to_c_method_call(type, method)}};
+                {% else %}
+                    auto result = {{render_cpp_to_c_method_call(type, method)}};
+                    return {{convert_cType_to_cppType(method.return_type, 'value', 'result') | indent(8)}};
+                {% endif %}
+            }
+        {% endfor %}
+        void {{CppType}}::{{c_prefix}}Reference({{CType}} handle) {
+            if (handle != nullptr) {
+                {{as_cMethod(type.name, Name("reference"))}}(handle);
+            }
+        }
+        void {{CppType}}::{{c_prefix}}Release({{CType}} handle) {
+            if (handle != nullptr) {
+                {{as_cMethod(type.name, Name("release"))}}(handle);
+            }
+        }
+    {% endfor %}
+
+    // Function
+
+    {% for function in by_category["function"] %}
+        {%- macro render_function_call(function) -%}
+            {{as_cMethod(None, function.name)}}(
+                {%- for arg in function.arguments -%}
+                    {% if not loop.first %}, {% endif %}{{render_c_actual_arg(arg)}}
+                {%- endfor -%}
+            )
+        {%- endmacro -%}
+
+        {{as_cppType(function.return_type.name) | indent(4, true) }} {{as_cppType(function.name) }}(
+            {%- for arg in function.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_annotated_cppType(arg)}}
+            {%- endfor -%}
+        ) {
+            auto result = {{render_function_call(function)}};
+            return {{convert_cType_to_cppType(function.return_type, 'value', 'result')}};
+        }
+    {% endfor %}
+
+}
diff --git a/generator/templates/api_cpp.h b/generator/templates/api_cpp.h
new file mode 100644
index 0000000..c3b21fb
--- /dev/null
+++ b/generator/templates/api_cpp.h
@@ -0,0 +1,260 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+{% set API = metadata.api.upper() %}
+{% set api = API.lower() %}
+{% if 'dawn' not in enabled_tags %}
+    #ifdef __EMSCRIPTEN__
+    #error "Do not include this header. Emscripten already provides headers needed for {{metadata.api}}."
+    #endif
+{% endif %}
+#ifndef {{API}}_CPP_H_
+#define {{API}}_CPP_H_
+
+#include "dawn/{{api}}.h"
+#include "dawn/EnumClassBitmasks.h"
+#include <cmath>
+
+namespace {{metadata.namespace}} {
+
+    {% set c_prefix = metadata.c_prefix %}
+    {% for constant in by_category["constant"] %}
+        {% set type = as_cppType(constant.type.name) %}
+        {% set value = c_prefix + "_" +  constant.name.SNAKE_CASE() %}
+        static constexpr {{type}} k{{as_cppType(constant.name)}} = {{ value }};
+    {% endfor %}
+
+    {% for type in by_category["enum"] %}
+        enum class {{as_cppType(type.name)}} : uint32_t {
+            {% for value in type.values %}
+                {{as_cppEnum(value.name)}} = 0x{{format(value.value, "08X")}},
+            {% endfor %}
+        };
+
+    {% endfor %}
+
+    {% for type in by_category["bitmask"] %}
+        enum class {{as_cppType(type.name)}} : uint32_t {
+            {% for value in type.values %}
+                {{as_cppEnum(value.name)}} = 0x{{format(value.value, "08X")}},
+            {% endfor %}
+        };
+
+    {% endfor %}
+
+    {% for type in by_category["function pointer"] %}
+        using {{as_cppType(type.name)}} = {{as_cType(type.name)}};
+    {% endfor %}
+
+    {% for type in by_category["object"] %}
+        class {{as_cppType(type.name)}};
+    {% endfor %}
+
+    {% for type in by_category["structure"] %}
+        struct {{as_cppType(type.name)}};
+    {% endfor %}
+
+    {% for typeDef in by_category["typedef"] %}
+        // {{as_cppType(typeDef.name)}} is deprecated.
+        // Use {{as_cppType(typeDef.type.name)}} instead.
+        using {{as_cppType(typeDef.name)}} = {{as_cppType(typeDef.type.name)}};
+
+    {% endfor %}
+    template<typename Derived, typename CType>
+    class ObjectBase {
+      public:
+        ObjectBase() = default;
+        ObjectBase(CType handle): mHandle(handle) {
+            if (mHandle) Derived::{{c_prefix}}Reference(mHandle);
+        }
+        ~ObjectBase() {
+            if (mHandle) Derived::{{c_prefix}}Release(mHandle);
+        }
+
+        ObjectBase(ObjectBase const& other)
+            : ObjectBase(other.Get()) {
+        }
+        Derived& operator=(ObjectBase const& other) {
+            if (&other != this) {
+                if (mHandle) Derived::{{c_prefix}}Release(mHandle);
+                mHandle = other.mHandle;
+                if (mHandle) Derived::{{c_prefix}}Reference(mHandle);
+            }
+
+            return static_cast<Derived&>(*this);
+        }
+
+        ObjectBase(ObjectBase&& other) {
+            mHandle = other.mHandle;
+            other.mHandle = 0;
+        }
+        Derived& operator=(ObjectBase&& other) {
+            if (&other != this) {
+                if (mHandle) Derived::{{c_prefix}}Release(mHandle);
+                mHandle = other.mHandle;
+                other.mHandle = 0;
+            }
+
+            return static_cast<Derived&>(*this);
+        }
+
+        ObjectBase(std::nullptr_t) {}
+        Derived& operator=(std::nullptr_t) {
+            if (mHandle != nullptr) {
+                Derived::{{c_prefix}}Release(mHandle);
+                mHandle = nullptr;
+            }
+            return static_cast<Derived&>(*this);
+        }
+
+        bool operator==(std::nullptr_t) const {
+            return mHandle == nullptr;
+        }
+        bool operator!=(std::nullptr_t) const {
+            return mHandle != nullptr;
+        }
+
+        explicit operator bool() const {
+            return mHandle != nullptr;
+        }
+        CType Get() const {
+            return mHandle;
+        }
+        CType Release() {
+            CType result = mHandle;
+            mHandle = 0;
+            return result;
+        }
+        static Derived Acquire(CType handle) {
+            Derived result;
+            result.mHandle = handle;
+            return result;
+        }
+
+      protected:
+        CType mHandle = nullptr;
+    };
+
+{% macro render_cpp_default_value(member, is_struct=True) -%}
+    {%- if member.annotation in ["*", "const*"] and member.optional or member.default_value == "nullptr" -%}
+        {{" "}}= nullptr
+    {%- elif member.type.category == "object" and member.optional and is_struct -%}
+        {{" "}}= nullptr
+    {%- elif member.type.category in ["enum", "bitmask"] and member.default_value != None -%}
+        {{" "}}= {{as_cppType(member.type.name)}}::{{as_cppEnum(Name(member.default_value))}}
+    {%- elif member.type.category == "native" and member.default_value != None -%}
+        {{" "}}= {{member.default_value}}
+    {%- elif member.default_value != None -%}
+        {{" "}}= {{member.default_value}}
+    {%- else -%}
+        {{assert(member.default_value == None)}}
+    {%- endif -%}
+{%- endmacro %}
+
+{% macro render_cpp_method_declaration(type, method) %}
+    {% set CppType = as_cppType(type.name) %}
+    {{as_cppType(method.return_type.name)}} {{method.name.CamelCase()}}(
+        {%- for arg in method.arguments -%}
+            {%- if not loop.first %}, {% endif -%}
+            {%- if arg.type.category == "object" and arg.annotation == "value" -%}
+                {{as_cppType(arg.type.name)}} const& {{as_varName(arg.name)}}
+            {%- else -%}
+                {{as_annotated_cppType(arg)}}
+            {%- endif -%}
+            {{render_cpp_default_value(arg, False)}}
+        {%- endfor -%}
+    ) const
+{%- endmacro %}
+
+    {% for type in by_category["object"] %}
+        {% set CppType = as_cppType(type.name) %}
+        {% set CType = as_cType(type.name) %}
+        class {{CppType}} : public ObjectBase<{{CppType}}, {{CType}}> {
+          public:
+            using ObjectBase::ObjectBase;
+            using ObjectBase::operator=;
+
+            {% for method in type.methods %}
+                {{render_cpp_method_declaration(type, method)}};
+            {% endfor %}
+
+          private:
+            friend ObjectBase<{{CppType}}, {{CType}}>;
+            static void {{c_prefix}}Reference({{CType}} handle);
+            static void {{c_prefix}}Release({{CType}} handle);
+        };
+
+    {% endfor %}
+
+    {% for function in by_category["function"] %}
+        {{as_cppType(function.return_type.name)}} {{as_cppType(function.name)}}(
+            {%- for arg in function.arguments -%}
+                {%- if not loop.first %}, {% endif -%}
+                {{as_annotated_cppType(arg)}}{{render_cpp_default_value(arg, False)}}
+            {%- endfor -%}
+        );
+    {% endfor %}
+
+    struct ChainedStruct {
+        ChainedStruct const * nextInChain = nullptr;
+        SType sType = SType::Invalid;
+    };
+
+    struct ChainedStructOut {
+        ChainedStruct * nextInChain = nullptr;
+        SType sType = SType::Invalid;
+    };
+
+    {% for type in by_category["structure"] %}
+        {% set Out = "Out" if type.output else "" %}
+        {% set const = "const" if not type.output else "" %}
+        {% if type.chained %}
+            struct {{as_cppType(type.name)}} : ChainedStruct{{Out}} {
+                {{as_cppType(type.name)}}() {
+                    sType = SType::{{type.name.CamelCase()}};
+                }
+        {% else %}
+            struct {{as_cppType(type.name)}} {
+        {% endif %}
+            {% if type.extensible %}
+                ChainedStruct{{Out}} {{const}} * nextInChain = nullptr;
+            {% endif %}
+            {% for member in type.members %}
+                {% set member_declaration = as_annotated_cppType(member) + render_cpp_default_value(member) %}
+                {% if type.chained and loop.first %}
+                    //* Align the first member to ChainedStruct to match the C struct layout.
+                    alignas(ChainedStruct{{Out}}) {{member_declaration}};
+                {% else %}
+                    {{member_declaration}};
+                {% endif %}
+            {% endfor %}
+        };
+
+    {% endfor %}
+
+    // The operators of EnumClassBitmmasks in the dawn:: namespace need to be imported
+    // in the {{metadata.namespace}} namespace for Argument Dependent Lookup.
+    DAWN_IMPORT_BITMASK_OPERATORS
+}  // namespace {{metadata.namespace}}
+
+namespace dawn {
+    {% for type in by_category["bitmask"] %}
+        template<>
+        struct IsDawnBitmask<{{metadata.namespace}}::{{as_cppType(type.name)}}> {
+            static constexpr bool enable = true;
+        };
+
+    {% endfor %}
+} // namespace dawn
+
+#endif // {{API}}_CPP_H_
diff --git a/generator/templates/api_cpp_print.h b/generator/templates/api_cpp_print.h
new file mode 100644
index 0000000..040f29c
--- /dev/null
+++ b/generator/templates/api_cpp_print.h
@@ -0,0 +1,92 @@
+//* Copyright 2021 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set API = metadata.api.upper() %}
+{% set api = API.lower() %}
+#ifndef {{API}}_CPP_PRINT_H_
+#define {{API}}_CPP_PRINT_H_
+
+#include "dawn/{{api}}_cpp.h"
+
+#include <iomanip>
+#include <ios>
+#include <ostream>
+#include <type_traits>
+
+namespace {{metadata.namespace}} {
+
+  {% for type in by_category["enum"] %}
+      template <typename CharT, typename Traits>
+      std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& o, {{as_cppType(type.name)}} value) {
+          switch (value) {
+            {% for value in type.values %}
+              case {{as_cppType(type.name)}}::{{as_cppEnum(value.name)}}:
+                o << "{{as_cppType(type.name)}}::{{as_cppEnum(value.name)}}";
+                break;
+            {% endfor %}
+              default:
+                o << "{{as_cppType(type.name)}}::" << std::showbase << std::hex << std::setfill('0') << std::setw(4) << static_cast<typename std::underlying_type<{{as_cppType(type.name)}}>::type>(value);
+          }
+          return o;
+      }
+  {% endfor %}
+
+  {% for type in by_category["bitmask"] %}
+      template <typename CharT, typename Traits>
+      std::basic_ostream<CharT, Traits>& operator<<(std::basic_ostream<CharT, Traits>& o, {{as_cppType(type.name)}} value) {
+        o << "{{as_cppType(type.name)}}::";
+        if (!static_cast<bool>(value)) {
+          {% for value in type.values if value.value == 0 %}
+            // 0 is often explicitly declared as None.
+            o << "{{as_cppEnum(value.name)}}";
+          {% else %}
+            o << std::showbase << std::hex << std::setfill('0') << std::setw(4) << 0;
+          {% endfor %}
+          return o;
+        }
+
+        bool moreThanOneBit = !HasZeroOrOneBits(value);
+        if (moreThanOneBit) {
+          o << "(";
+        }
+
+        bool first = true;
+        {% for value in type.values if value.value != 0 %}
+          if (value & {{as_cppType(type.name)}}::{{as_cppEnum(value.name)}}) {
+            if (!first) {
+              o << "|";
+            }
+            first = false;
+            o << "{{as_cppEnum(value.name)}}";
+            value &= ~{{as_cppType(type.name)}}::{{as_cppEnum(value.name)}};
+          }
+        {% endfor %}
+
+        if (static_cast<bool>(value)) {
+          if (!first) {
+            o << "|";
+          }
+          o << std::showbase << std::hex << std::setfill('0') << std::setw(4) << static_cast<typename std::underlying_type<{{as_cppType(type.name)}}>::type>(value);
+        }
+
+        if (moreThanOneBit) {
+          o << ")";
+        }
+        return o;
+      }
+  {% endfor %}
+
+}  // namespace {{metadata.namespace}}
+
+#endif // {{API}}_CPP_PRINT_H_
diff --git a/generator/templates/api_struct_info.json b/generator/templates/api_struct_info.json
new file mode 100644
index 0000000..04e56cf
--- /dev/null
+++ b/generator/templates/api_struct_info.json
@@ -0,0 +1,51 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+//*
+//*
+//* This generator is used to produce part of Emscripten's struct_info.json,
+//* which is a list of struct fields that it uses to generate field offset
+//* information for its own code generators.
+//* https://github.com/emscripten-core/emscripten/blob/master/src/struct_info.json
+//*
+    {
+        {% set api = metadata.api.lower() %}
+        "file": "{api}/{api}.h",
+        "defines": [],
+        "structs": {
+            "{{metadata.c_prefix}}ChainedStruct": [
+                "next",
+                "sType"
+            ],
+            {% for type in by_category["structure"] %}
+                "{{as_cType(type.name)}}": [
+                    {% if type.chained %}
+                        "chain"
+                    {%- elif type.extensible %}
+                        "nextInChain"
+                    {%- endif %}
+                    {% for member in type.members -%}
+                        {%- if (type.chained or type.extensible) or not loop.first -%}
+                            ,
+                        {% endif %}
+                        "{{as_varName(member.name)}}"
+                    {%- endfor %}
+
+                ]
+                {%- if not loop.last -%}
+                    ,
+                {% endif %}
+            {% endfor %}
+
+        }
+    }
diff --git a/generator/templates/dawn/common/Version.h b/generator/templates/dawn/common/Version.h
new file mode 100644
index 0000000..f9f67e7
--- /dev/null
+++ b/generator/templates/dawn/common/Version.h
@@ -0,0 +1,24 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_VERISON_AUTOGEN_H_
+#define COMMON_VERISON_AUTOGEN_H_
+
+namespace dawn {
+
+static constexpr char kGitHash[] = "{{get_gitHash()}}";
+
+} // namespace dawn
+
+#endif  // COMMON_VERISON_AUTOGEN_H_
diff --git a/generator/templates/dawn/native/ChainUtils.cpp b/generator/templates/dawn/native/ChainUtils.cpp
new file mode 100644
index 0000000..2973788
--- /dev/null
+++ b/generator/templates/dawn/native/ChainUtils.cpp
@@ -0,0 +1,66 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+#include "{{native_dir}}/ChainUtils_autogen.h"
+
+#include <unordered_set>
+
+namespace {{native_namespace}} {
+
+{% set namespace = metadata.namespace %}
+{% for value in types["s type"].values %}
+    {% if value.valid %}
+        void FindInChain(const ChainedStruct* chain, const {{as_cppEnum(value.name)}}** out) {
+            for (; chain; chain = chain->nextInChain) {
+                if (chain->sType == {{namespace}}::SType::{{as_cppEnum(value.name)}}) {
+                    *out = static_cast<const {{as_cppEnum(value.name)}}*>(chain);
+                    break;
+                }
+            }
+        }
+    {% endif %}
+{% endfor %}
+
+MaybeError ValidateSTypes(const ChainedStruct* chain,
+                          std::vector<std::vector<{{namespace}}::SType>> oneOfConstraints) {
+    std::unordered_set<{{namespace}}::SType> allSTypes;
+    for (; chain; chain = chain->nextInChain) {
+        if (allSTypes.find(chain->sType) != allSTypes.end()) {
+            return DAWN_VALIDATION_ERROR("Chain cannot have duplicate sTypes");
+        }
+        allSTypes.insert(chain->sType);
+    }
+    for (const auto& oneOfConstraint : oneOfConstraints) {
+        bool satisfied = false;
+        for ({{namespace}}::SType oneOfSType : oneOfConstraint) {
+            if (allSTypes.find(oneOfSType) != allSTypes.end()) {
+                if (satisfied) {
+                    return DAWN_VALIDATION_ERROR("Unsupported sType combination");
+                }
+                satisfied = true;
+                allSTypes.erase(oneOfSType);
+            }
+        }
+    }
+    if (!allSTypes.empty()) {
+        return DAWN_VALIDATION_ERROR("Unsupported sType");
+    }
+    return {};
+}
+
+}  // namespace {{native_namespace}}
diff --git a/generator/templates/dawn/native/ChainUtils.h b/generator/templates/dawn/native/ChainUtils.h
new file mode 100644
index 0000000..3377220
--- /dev/null
+++ b/generator/templates/dawn/native/ChainUtils.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set DIR = namespace_name.concatcase().upper() %}
+#ifndef {{DIR}}_CHAIN_UTILS_H_
+#define {{DIR}}_CHAIN_UTILS_H_
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+{% set prefix = metadata.proc_table_prefix.lower() %}
+#include "{{native_dir}}/{{prefix}}_platform.h"
+#include "{{native_dir}}/Error.h"
+
+namespace {{native_namespace}} {
+    {% for value in types["s type"].values %}
+        {% if value.valid %}
+            void FindInChain(const ChainedStruct* chain, const {{as_cppEnum(value.name)}}** out);
+        {% endif %}
+    {% endfor %}
+
+    // Verifies that |chain| only contains ChainedStructs of types enumerated in
+    // |oneOfConstraints| and contains no duplicate sTypes. Each vector in
+    // |oneOfConstraints| defines a set of sTypes that cannot coexist in the same chain.
+    // For example:
+    //   ValidateSTypes(chain, { { ShaderModuleSPIRVDescriptor, ShaderModuleWGSLDescriptor } }))
+    //   ValidateSTypes(chain, { { Extension1 }, { Extension2 } })
+    {% set namespace = metadata.namespace %}
+    MaybeError ValidateSTypes(const ChainedStruct* chain,
+                              std::vector<std::vector<{{namespace}}::SType>> oneOfConstraints);
+
+    template <typename T>
+    MaybeError ValidateSingleSTypeInner(const ChainedStruct* chain, T sType) {
+        DAWN_INVALID_IF(chain->sType != sType,
+            "Unsupported sType (%s). Expected (%s)", chain->sType, sType);
+        return {};
+    }
+
+    template <typename T, typename... Args>
+    MaybeError ValidateSingleSTypeInner(const ChainedStruct* chain, T sType, Args... sTypes) {
+        if (chain->sType == sType) {
+            return {};
+        }
+        return ValidateSingleSTypeInner(chain, sTypes...);
+    }
+
+    // Verifies that |chain| contains a single ChainedStruct of type |sType| or no ChainedStructs
+    // at all.
+    template <typename T>
+    MaybeError ValidateSingleSType(const ChainedStruct* chain, T sType) {
+        if (chain == nullptr) {
+            return {};
+        }
+        DAWN_INVALID_IF(chain->nextInChain != nullptr,
+            "Chain can only contain a single chained struct.");
+        return ValidateSingleSTypeInner(chain, sType);
+    }
+
+    // Verifies that |chain| contains a single ChainedStruct with a type enumerated in the
+    // parameter pack or no ChainedStructs at all.
+    template <typename T, typename... Args>
+    MaybeError ValidateSingleSType(const ChainedStruct* chain, T sType, Args... sTypes) {
+        if (chain == nullptr) {
+            return {};
+        }
+        DAWN_INVALID_IF(chain->nextInChain != nullptr,
+            "Chain can only contain a single chained struct.");
+        return ValidateSingleSTypeInner(chain, sType, sTypes...);
+    }
+
+}  // namespace {{native_namespace}}
+
+#endif  // {{DIR}}_CHAIN_UTILS_H_
diff --git a/generator/templates/dawn/native/ObjectType.cpp b/generator/templates/dawn/native/ObjectType.cpp
new file mode 100644
index 0000000..8fad3d4
--- /dev/null
+++ b/generator/templates/dawn/native/ObjectType.cpp
@@ -0,0 +1,34 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+#include "{{native_dir}}/ObjectType_autogen.h"
+
+namespace {{native_namespace}} {
+
+    const char* ObjectTypeAsString(ObjectType type) {
+        switch (type) {
+            {% for type in by_category["object"] %}
+                case ObjectType::{{type.name.CamelCase()}}:
+                    return "{{type.name.CamelCase()}}";
+            {% endfor %}
+            default:
+                UNREACHABLE();
+        }
+    }
+
+} // namespace {{native_namespace}}
diff --git a/generator/templates/dawn/native/ObjectType.h b/generator/templates/dawn/native/ObjectType.h
new file mode 100644
index 0000000..1d59b50
--- /dev/null
+++ b/generator/templates/dawn/native/ObjectType.h
@@ -0,0 +1,41 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set DIR = namespace_name.concatcase().upper() %}
+#ifndef {{DIR}}_OBJECTTPYE_AUTOGEN_H_
+#define {{DIR}}_OBJECTTPYE_AUTOGEN_H_
+
+#include "dawn/common/ityp_array.h"
+
+#include <cstdint>
+
+{% set native_namespace = namespace_name.namespace_case() %}
+namespace {{native_namespace}} {
+
+    enum class ObjectType : uint32_t {
+        {% for type in by_category["object"] %}
+            {{type.name.CamelCase()}},
+        {% endfor %}
+    };
+
+    template <typename T>
+    using PerObjectType = ityp::array<ObjectType, T, {{len(by_category["object"])}}>;
+
+    const char* ObjectTypeAsString(ObjectType type);
+
+} // namespace {{native_namespace}}
+
+
+#endif  // {{DIR}}_OBJECTTPYE_AUTOGEN_H_
diff --git a/generator/templates/dawn/native/ProcTable.cpp b/generator/templates/dawn/native/ProcTable.cpp
new file mode 100644
index 0000000..47ac2f5
--- /dev/null
+++ b/generator/templates/dawn/native/ProcTable.cpp
@@ -0,0 +1,159 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set Prefix = metadata.proc_table_prefix %}
+{% set prefix = Prefix.lower() %}
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+#include "{{native_dir}}/{{prefix}}_platform.h"
+#include "{{native_dir}}/{{Prefix}}Native.h"
+
+#include <algorithm>
+#include <vector>
+
+{% for type in by_category["object"] %}
+    {% if type.name.canonical_case() not in ["texture view"] %}
+        #include "{{native_dir}}/{{type.name.CamelCase()}}.h"
+    {% endif %}
+{% endfor %}
+
+namespace {{native_namespace}} {
+
+    {% for type in by_category["object"] %}
+        {% for method in c_methods(type) %}
+            {% set suffix = as_MethodSuffix(type.name, method.name) %}
+
+            {{as_cType(method.return_type.name)}} Native{{suffix}}(
+                {{-as_cType(type.name)}} cSelf
+                {%- for arg in method.arguments -%}
+                    , {{as_annotated_cType(arg)}}
+                {%- endfor -%}
+            ) {
+                //* Perform conversion between C types and frontend types
+                auto self = FromAPI(cSelf);
+
+                {% for arg in method.arguments %}
+                    {% set varName = as_varName(arg.name) %}
+                    {% if arg.type.category in ["enum", "bitmask"] and arg.annotation == "value" %}
+                        auto {{varName}}_ = static_cast<{{as_frontendType(arg.type)}}>({{varName}});
+                    {% elif arg.annotation != "value" or arg.type.category == "object" %}
+                        auto {{varName}}_ = reinterpret_cast<{{decorate("", as_frontendType(arg.type), arg)}}>({{varName}});
+                    {% else %}
+                        auto {{varName}}_ = {{as_varName(arg.name)}};
+                    {% endif %}
+                {%- endfor-%}
+
+                {% if method.return_type.name.canonical_case() != "void" %}
+                    auto result =
+                {%- endif %}
+                self->API{{method.name.CamelCase()}}(
+                    {%- for arg in method.arguments -%}
+                        {%- if not loop.first %}, {% endif -%}
+                        {{as_varName(arg.name)}}_
+                    {%- endfor -%}
+                );
+                {% if method.return_type.name.canonical_case() != "void" %}
+                    {% if method.return_type.category == "object" %}
+                        return ToAPI(result);
+                    {% else %}
+                        return result;
+                    {% endif %}
+                {% endif %}
+            }
+        {% endfor %}
+    {% endfor %}
+
+    namespace {
+
+        {% set c_prefix = metadata.c_prefix %}
+        struct ProcEntry {
+            {{c_prefix}}Proc proc;
+            const char* name;
+        };
+        static const ProcEntry sProcMap[] = {
+            {% for (type, method) in c_methods_sorted_by_name %}
+                { reinterpret_cast<{{c_prefix}}Proc>(Native{{as_MethodSuffix(type.name, method.name)}}), "{{as_cMethod(type.name, method.name)}}" },
+            {% endfor %}
+        };
+        static constexpr size_t sProcMapSize = sizeof(sProcMap) / sizeof(sProcMap[0]);
+
+    }  // anonymous namespace
+
+    {% for function in by_category["function"] %}
+        {{as_cType(function.return_type.name)}} Native{{as_cppType(function.name)}}(
+            {%- for arg in function.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        ) {
+            {% if function.name.canonical_case() == "get proc address" %}
+                if (procName == nullptr) {
+                    return nullptr;
+                }
+
+                const ProcEntry* entry = std::lower_bound(&sProcMap[0], &sProcMap[sProcMapSize], procName,
+                    [](const ProcEntry &a, const char *b) -> bool {
+                        return strcmp(a.name, b) < 0;
+                    }
+                );
+
+                if (entry != &sProcMap[sProcMapSize] && strcmp(entry->name, procName) == 0) {
+                    return entry->proc;
+                }
+
+                // Special case the free-standing functions of the API.
+                // TODO(dawn:1238) Checking string one by one is slow, it needs to be optimized.
+                {% for function in by_category["function"] %}
+                    if (strcmp(procName, "{{as_cMethod(None, function.name)}}") == 0) {
+                        return reinterpret_cast<{{c_prefix}}Proc>(Native{{as_cppType(function.name)}});
+                    }
+
+                {% endfor %}
+                return nullptr;
+            {% else %}
+                return ToAPI({{as_cppType(function.return_type.name)}}Base::Create(
+                    {%- for arg in function.arguments -%}
+                        FromAPI({% if not loop.first %}, {% endif %}{{as_varName(arg.name)}})
+                    {%- endfor -%}
+                ));
+            {% endif %}
+        }
+
+    {% endfor %}
+
+    std::vector<const char*> GetProcMapNamesForTestingInternal() {
+        std::vector<const char*> result;
+        result.reserve(sProcMapSize);
+        for (const ProcEntry& entry : sProcMap) {
+            result.push_back(entry.name);
+        }
+        return result;
+    }
+
+    static {{Prefix}}ProcTable gProcTable = {
+        {% for function in by_category["function"] %}
+            Native{{as_cppType(function.name)}},
+        {% endfor %}
+        {% for type in by_category["object"] %}
+            {% for method in c_methods(type) %}
+                Native{{as_MethodSuffix(type.name, method.name)}},
+            {% endfor %}
+        {% endfor %}
+    };
+
+    const {{Prefix}}ProcTable& GetProcsAutogen() {
+        return gProcTable;
+    }
+}
diff --git a/generator/templates/dawn/native/ValidationUtils.cpp b/generator/templates/dawn/native/ValidationUtils.cpp
new file mode 100644
index 0000000..1cb78c6
--- /dev/null
+++ b/generator/templates/dawn/native/ValidationUtils.cpp
@@ -0,0 +1,48 @@
+//* Copyright 2018 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+#include "{{native_dir}}/ValidationUtils_autogen.h"
+
+namespace {{native_namespace}} {
+
+    {% set namespace = metadata.namespace %}
+    {% for type in by_category["enum"] %}
+        MaybeError Validate{{type.name.CamelCase()}}({{namespace}}::{{as_cppType(type.name)}} value) {
+            switch (value) {
+                {% for value in type.values if value.valid %}
+                    case {{namespace}}::{{as_cppType(type.name)}}::{{as_cppEnum(value.name)}}:
+                        return {};
+                {% endfor %}
+                default:
+                    return DAWN_VALIDATION_ERROR("Invalid value for {{as_cType(type.name)}}");
+            }
+        }
+
+    {% endfor %}
+
+    {% for type in by_category["bitmask"] %}
+        MaybeError Validate{{type.name.CamelCase()}}({{namespace}}::{{as_cppType(type.name)}} value) {
+            if ((value & static_cast<{{namespace}}::{{as_cppType(type.name)}}>(~{{type.full_mask}})) == 0) {
+                return {};
+            }
+            return DAWN_VALIDATION_ERROR("Invalid value for {{as_cType(type.name)}}");
+        }
+
+    {% endfor %}
+
+} // namespace {{native_namespace}}
diff --git a/generator/templates/dawn/native/ValidationUtils.h b/generator/templates/dawn/native/ValidationUtils.h
new file mode 100644
index 0000000..06d3cc7
--- /dev/null
+++ b/generator/templates/dawn/native/ValidationUtils.h
@@ -0,0 +1,37 @@
+//* Copyright 2018 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#ifndef BACKEND_VALIDATIONUTILS_H_
+#define BACKEND_VALIDATIONUTILS_H_
+
+{% set api = metadata.api.lower() %}
+#include "dawn/{{api}}_cpp.h"
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+#include "{{native_dir}}/Error.h"
+
+namespace {{native_namespace}} {
+
+    // Helper functions to check the value of enums and bitmasks
+    {% for type in by_category["enum"] + by_category["bitmask"] %}
+        {% set namespace = metadata.namespace %}
+        MaybeError Validate{{type.name.CamelCase()}}({{namespace}}::{{as_cppType(type.name)}} value);
+    {% endfor %}
+
+} // namespace {{native_namespace}}
+
+#endif  // BACKEND_VALIDATIONUTILS_H_
diff --git a/generator/templates/dawn/native/api_absl_format.cpp b/generator/templates/dawn/native/api_absl_format.cpp
new file mode 100644
index 0000000..a3b7ea2
--- /dev/null
+++ b/generator/templates/dawn/native/api_absl_format.cpp
@@ -0,0 +1,173 @@
+//* Copyright 2021 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+{% set api = metadata.api.lower() %}
+#include "{{native_dir}}/{{api}}_absl_format_autogen.h"
+
+#include "{{native_dir}}/ObjectType_autogen.h"
+
+namespace {{native_namespace}} {
+
+    //
+    // Descriptors
+    //
+
+    {% for type in by_category["structure"] %}
+        {% for member in type.members %}
+            {% if member.name.canonical_case() == "label" %}
+                absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+                AbslFormatConvert(const {{as_cppType(type.name)}}* value,
+                                    const absl::FormatConversionSpec& spec,
+                                    absl::FormatSink* s) {
+                    if (value == nullptr) {
+                        s->Append("[null]");
+                        return {true};
+                    }
+                    s->Append("[{{as_cppType(type.name)}}");
+                    if (value->label != nullptr) {
+                        s->Append(absl::StrFormat(" \"%s\"", value->label));
+                    }
+                    s->Append("]");
+                    return {true};
+                }
+            {% endif %}
+        {% endfor %}
+    {% endfor %}
+
+    //
+    // Compatible with absl::StrFormat (Needs to be disjoint from having a 'label' for now.)
+    // Currently uses a hard-coded list to determine which structures are actually supported. If
+    // additional structures are added, be sure to update the header file's list as well.
+    //
+    using absl::ParsedFormat;
+
+    {% for type in by_category["structure"] %}
+        {% if type.name.get() in [
+             "buffer binding layout",
+             "sampler binding layout",
+             "texture binding layout",
+             "storage texture binding layout"
+           ]
+        %}
+        absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+            AbslFormatConvert(const {{as_cppType(type.name)}}& value,
+                              const absl::FormatConversionSpec& spec,
+                              absl::FormatSink* s) {
+            {% set members = [] %}
+            {% set format = [] %}
+            {% set template = [] %}
+            {% for member in type.members %}
+                {% set memberName = member.name.camelCase() %}
+                {% do members.append("value." + memberName) %}
+                {% do format.append(memberName + ": %" + as_formatType(member)) %}
+                {% do template.append("'" + as_formatType(member) + "'") %}
+            {% endfor %}
+            static const auto* const fmt =
+                new ParsedFormat<{{template|join(",")}}>("{ {{format|join(", ")}} }");
+            s->Append(absl::StrFormat(*fmt, {{members|join(", ")}}));
+            return {true};
+        }
+        {% endif %}
+    {% endfor %}
+
+}  // namespace {{native_namespace}}
+
+{% set namespace = metadata.namespace %}
+namespace {{namespace}} {
+
+    //
+    // Enums
+    //
+
+    {% for type in by_category["enum"] %}
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString|absl::FormatConversionCharSet::kIntegral>
+    AbslFormatConvert({{as_cppType(type.name)}} value,
+                      const absl::FormatConversionSpec& spec,
+                      absl::FormatSink* s) {
+        if (spec.conversion_char() == absl::FormatConversionChar::s) {
+            s->Append("{{as_cppType(type.name)}}::");
+            switch (value) {
+            {% for value in type.values %}
+                case {{as_cppType(type.name)}}::{{as_cppEnum(value.name)}}:
+                    s->Append("{{as_cppEnum(value.name)}}");
+                    break;
+            {% endfor %}
+            }
+        } else {
+            s->Append(absl::StrFormat("%u", static_cast<typename std::underlying_type<{{as_cppType(type.name)}}>::type>(value)));
+        }
+        return {true};
+    }
+    {% endfor %}
+
+    //
+    // Bitmasks
+    //
+
+    {% for type in by_category["bitmask"] %}
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString|absl::FormatConversionCharSet::kIntegral>
+    AbslFormatConvert({{as_cppType(type.name)}} value,
+                      const absl::FormatConversionSpec& spec,
+                      absl::FormatSink* s) {
+        if (spec.conversion_char() == absl::FormatConversionChar::s) {
+            s->Append("{{as_cppType(type.name)}}::");
+            if (!static_cast<bool>(value)) {
+                {% for value in type.values if value.value == 0 %}
+                    // 0 is often explicitly declared as None.
+                    s->Append("{{as_cppEnum(value.name)}}");
+                {% else %}
+                    s->Append(absl::StrFormat("{{as_cppType(type.name)}}::%x", 0));
+                {% endfor %}
+                return {true};
+            }
+
+            bool moreThanOneBit = !HasZeroOrOneBits(value);
+            if (moreThanOneBit) {
+                s->Append("(");
+            }
+
+            bool first = true;
+            {% for value in type.values if value.value != 0 %}
+                if (value & {{as_cppType(type.name)}}::{{as_cppEnum(value.name)}}) {
+                    if (!first) {
+                        s->Append("|");
+                    }
+                    first = false;
+                    s->Append("{{as_cppEnum(value.name)}}");
+                    value &= ~{{as_cppType(type.name)}}::{{as_cppEnum(value.name)}};
+                }
+            {% endfor %}
+
+            if (static_cast<bool>(value)) {
+                if (!first) {
+                    s->Append("|");
+                }
+                s->Append(absl::StrFormat("{{as_cppType(type.name)}}::%x", static_cast<typename std::underlying_type<{{as_cppType(type.name)}}>::type>(value)));
+            }
+
+            if (moreThanOneBit) {
+                s->Append(")");
+            }
+        } else {
+            s->Append(absl::StrFormat("%u", static_cast<typename std::underlying_type<{{as_cppType(type.name)}}>::type>(value)));
+        }
+        return {true};
+    }
+    {% endfor %}
+
+}  // namespace {{namespace}}
diff --git a/generator/templates/dawn/native/api_absl_format.h b/generator/templates/dawn/native/api_absl_format.h
new file mode 100644
index 0000000..ab06098
--- /dev/null
+++ b/generator/templates/dawn/native/api_absl_format.h
@@ -0,0 +1,95 @@
+//* Copyright 2021 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set API = metadata.api.upper() %}
+#ifndef {{API}}_ABSL_FORMAT_H_
+#define {{API}}_ABSL_FORMAT_H_
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+{% set prefix = metadata.proc_table_prefix.lower() %}
+#include "{{native_dir}}/{{prefix}}_platform.h"
+
+#include "absl/strings/str_format.h"
+
+namespace {{native_namespace}} {
+
+    //
+    // Descriptors
+    //
+
+    // Only includes structures that have a 'label' member.
+    {% for type in by_category["structure"] %}
+        {% for member in type.members %}
+            {% if member.name.canonical_case() == "label" %}
+                absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+                    AbslFormatConvert(const {{as_cppType(type.name)}}* value,
+                                      const absl::FormatConversionSpec& spec,
+                                      absl::FormatSink* s);
+            {% endif %}
+        {% endfor %}
+    {% endfor %}
+
+    //
+    // Compatible with absl::StrFormat (Needs to be disjoint from having a 'label' for now.)
+    // Currently uses a hard-coded list to determine which structures are actually supported. If
+    // additional structures are added, be sure to update the cpp file's list as well.
+    //
+    {% for type in by_category["structure"] %}
+        {% if type.name.get() in [
+             "buffer binding layout",
+             "sampler binding layout",
+             "texture binding layout",
+             "storage texture binding layout"
+           ]
+        %}
+        absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+            AbslFormatConvert(const {{as_cppType(type.name)}}& value,
+                              const absl::FormatConversionSpec& spec,
+                              absl::FormatSink* s);
+        {% endif %}
+    {% endfor %}
+
+} // namespace {{native_namespace}}
+
+{% set namespace = metadata.namespace %}
+namespace {{namespace}} {
+
+    //
+    // Enums
+    //
+
+    {% for type in by_category["enum"] %}
+        absl::FormatConvertResult<absl::FormatConversionCharSet::kString|absl::FormatConversionCharSet::kIntegral>
+        AbslFormatConvert({{as_cppType(type.name)}} value,
+                          const absl::FormatConversionSpec& spec,
+                          absl::FormatSink* s);
+    {% endfor %}
+
+    //
+    // Bitmasks
+    //
+
+    {% for type in by_category["bitmask"] %}
+        absl::FormatConvertResult<absl::FormatConversionCharSet::kString|absl::FormatConversionCharSet::kIntegral>
+        AbslFormatConvert({{as_cppType(type.name)}} value,
+                          const absl::FormatConversionSpec& spec,
+                          absl::FormatSink* s);
+    {% endfor %}
+
+}  // namespace {{namespace}}
+
+#endif // {{API}}_ABSL_FORMAT_H_
diff --git a/generator/templates/dawn/native/api_dawn_native_proc.cpp b/generator/templates/dawn/native/api_dawn_native_proc.cpp
new file mode 100644
index 0000000..f9147c6
--- /dev/null
+++ b/generator/templates/dawn/native/api_dawn_native_proc.cpp
@@ -0,0 +1,75 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <dawn/{{metadata.api.lower()}}.h>
+
+namespace dawn::native {
+
+// This file should be kept in sync with generator/templates/dawn/native/ProcTable.cpp
+
+{% for function in by_category["function"] %}
+    extern {{as_cType(function.return_type.name)}} Native{{as_cppType(function.name)}}(
+        {%- for arg in function.arguments -%}
+            {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+        {%- endfor -%}
+    );
+{% endfor %}
+{% for type in by_category["object"] %}
+    {% for method in c_methods(type) %}
+        extern {{as_cType(method.return_type.name)}} Native{{as_MethodSuffix(type.name, method.name)}}(
+            {{-as_cType(type.name)}} cSelf
+            {%- for arg in method.arguments -%}
+                , {{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        );
+    {% endfor %}
+{% endfor %}
+
+}
+
+extern "C" {
+    using namespace dawn::native;
+
+    {% for function in by_category["function"] %}
+        {{as_cType(function.return_type.name)}} {{metadata.namespace}}{{as_cppType(function.name)}} (
+            {%- for arg in function.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        ) {
+            return Native{{as_cppType(function.name)}}(
+                {%- for arg in function.arguments -%}
+                    {% if not loop.first %}, {% endif %}{{as_varName(arg.name)}}
+                {%- endfor -%}
+            );
+        }
+    {% endfor %}
+
+    {% for type in by_category["object"] %}
+        {% for method in c_methods(type) %}
+            {{as_cType(method.return_type.name)}} {{metadata.namespace}}{{as_MethodSuffix(type.name, method.name)}}(
+                {{-as_cType(type.name)}} cSelf
+                {%- for arg in method.arguments -%}
+                    , {{as_annotated_cType(arg)}}
+                {%- endfor -%}
+            ) {
+                return Native{{as_MethodSuffix(type.name, method.name)}}(
+                    cSelf
+                    {%- for arg in method.arguments -%}
+                        , {{as_varName(arg.name)}}
+                    {%- endfor -%}
+                );
+            }
+        {% endfor %}
+    {% endfor %}
+}
diff --git a/generator/templates/dawn/native/api_structs.cpp b/generator/templates/dawn/native/api_structs.cpp
new file mode 100644
index 0000000..86f54f0
--- /dev/null
+++ b/generator/templates/dawn/native/api_structs.cpp
@@ -0,0 +1,75 @@
+//* Copyright 2018 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+{% set namespace = metadata.namespace %}
+#include "{{native_dir}}/{{namespace}}_structs_autogen.h"
+
+#include <tuple>
+
+#ifdef __GNUC__
+// error: 'offsetof' within non-standard-layout type '{{namespace}}::XXX' is conditionally-supported
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#endif
+
+namespace {{native_namespace}} {
+
+    {% set c_prefix = metadata.c_prefix %}
+    static_assert(sizeof(ChainedStruct) == sizeof({{c_prefix}}ChainedStruct),
+            "sizeof mismatch for ChainedStruct");
+    static_assert(alignof(ChainedStruct) == alignof({{c_prefix}}ChainedStruct),
+            "alignof mismatch for ChainedStruct");
+    static_assert(offsetof(ChainedStruct, nextInChain) == offsetof({{c_prefix}}ChainedStruct, next),
+            "offsetof mismatch for ChainedStruct::nextInChain");
+    static_assert(offsetof(ChainedStruct, sType) == offsetof({{c_prefix}}ChainedStruct, sType),
+            "offsetof mismatch for ChainedStruct::sType");
+
+    {% for type in by_category["structure"] %}
+        {% set CppType = as_cppType(type.name) %}
+        {% set CType = as_cType(type.name) %}
+
+        static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
+        static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
+
+        {% if type.extensible %}
+            static_assert(offsetof({{CppType}}, nextInChain) == offsetof({{CType}}, nextInChain),
+                    "offsetof mismatch for {{CppType}}::nextInChain");
+        {% endif %}
+        {% for member in type.members %}
+            {% set memberName = member.name.camelCase() %}
+            static_assert(offsetof({{CppType}}, {{memberName}}) == offsetof({{CType}}, {{memberName}}),
+                    "offsetof mismatch for {{CppType}}::{{memberName}}");
+        {% endfor %}
+
+        bool {{CppType}}::operator==(const {{as_cppType(type.name)}}& rhs) const {
+            return {% if type.extensible or type.chained -%}
+                (nextInChain == rhs.nextInChain) &&
+            {%- endif %} std::tie(
+                {% for member in type.members %}
+                    {{member.name.camelCase()-}}
+                    {{ "," if not loop.last else "" }}
+                {% endfor %}
+            ) == std::tie(
+                {% for member in type.members %}
+                    rhs.{{member.name.camelCase()-}}
+                    {{ "," if not loop.last else "" }}
+                {% endfor %}
+            );
+        }
+
+    {% endfor %}
+} // namespace {{native_namespace}}
diff --git a/generator/templates/dawn/native/api_structs.h b/generator/templates/dawn/native/api_structs.h
new file mode 100644
index 0000000..d655344
--- /dev/null
+++ b/generator/templates/dawn/native/api_structs.h
@@ -0,0 +1,87 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set DIR = namespace_name.concatcase().upper() %}
+{% set namespace = metadata.namespace %}
+#ifndef {{DIR}}_{{namespace.upper()}}_STRUCTS_H_
+#define {{DIR}}_{{namespace.upper()}}_STRUCTS_H_
+
+{% set api = metadata.api.lower() %}
+#include "dawn/{{api}}_cpp.h"
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+#include "{{native_dir}}/Forward.h"
+#include <cmath>
+
+namespace {{native_namespace}} {
+
+{% macro render_cpp_default_value(member) -%}
+    {%- if member.annotation in ["*", "const*"] and member.optional or member.default_value == "nullptr" -%}
+        {{" "}}= nullptr
+    {%- elif member.type.category == "object" and member.optional -%}
+        {{" "}}= nullptr
+    {%- elif member.type.category in ["enum", "bitmask"] and member.default_value != None -%}
+        {{" "}}= {{namespace}}::{{as_cppType(member.type.name)}}::{{as_cppEnum(Name(member.default_value))}}
+    {%- elif member.type.category == "native" and member.default_value != None -%}
+        {{" "}}= {{member.default_value}}
+    {%- elif member.default_value != None -%}
+        {{" "}}= {{member.default_value}}
+    {%- else -%}
+        {{assert(member.default_value == None)}}
+    {%- endif -%}
+{%- endmacro %}
+
+    struct ChainedStruct {
+        ChainedStruct const * nextInChain = nullptr;
+        {{namespace}}::SType sType = {{namespace}}::SType::Invalid;
+    };
+
+    {% for type in by_category["structure"] %}
+        {% if type.chained %}
+            struct {{as_cppType(type.name)}} : ChainedStruct {
+                {{as_cppType(type.name)}}() {
+                    sType = {{namespace}}::SType::{{type.name.CamelCase()}};
+                }
+        {% else %}
+            struct {{as_cppType(type.name)}} {
+        {% endif %}
+            {% if type.extensible %}
+                ChainedStruct const * nextInChain = nullptr;
+            {% endif %}
+            {% for member in type.members %}
+                {% set member_declaration = as_annotated_frontendType(member) + render_cpp_default_value(member) %}
+                {% if type.chained and loop.first %}
+                    //* Align the first member to ChainedStruct to match the C struct layout.
+                    alignas(ChainedStruct) {{member_declaration}};
+                {% else %}
+                    {{member_declaration}};
+                {% endif %}
+            {% endfor %}
+
+            // Equality operators, mostly for testing. Note that this tests
+            // strict pointer-pointer equality if the struct contains member pointers.
+            bool operator==(const {{as_cppType(type.name)}}& rhs) const;
+        };
+
+    {% endfor %}
+
+    {% for typeDef in by_category["typedef"] if typeDef.type.category == "structure" %}
+        using {{as_cppType(typeDef.name)}} = {{as_cppType(typeDef.type.name)}};
+    {% endfor %}
+
+} // namespace {{native_namespace}}
+
+#endif  // {{DIR}}_{{namespace.upper()}}_STRUCTS_H_
diff --git a/generator/templates/dawn/native/dawn_platform.h b/generator/templates/dawn/native/dawn_platform.h
new file mode 100644
index 0000000..e3f1c91
--- /dev/null
+++ b/generator/templates/dawn/native/dawn_platform.h
@@ -0,0 +1,82 @@
+//* Copyright 2021 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set namespace_name = Name(metadata.native_namespace) %}
+{% set NATIVE_DIR = namespace_name.concatcase().upper() %}
+{% set PREFIX = metadata.proc_table_prefix.upper() %}
+#ifndef {{NATIVE_DIR}}_{{PREFIX}}_PLATFORM_AUTOGEN_H_
+#define {{NATIVE_DIR}}_{{PREFIX}}_PLATFORM_AUTOGEN_H_
+
+{% set api = metadata.api.lower() %}
+#include "dawn/{{api}}_cpp.h"
+{% set impl_dir = metadata.impl_dir + "/" if metadata.impl_dir else "" %}
+{% set native_namespace = namespace_name.namespace_case() %}
+{% set native_dir = impl_dir + namespace_name.Dirs() %}
+#include "{{native_dir}}/Forward.h"
+
+{% set namespace = metadata.namespace %}
+// Use our autogenerated version of the {{namespace}} structures that point to {{native_namespace}} object types
+// (wgpu::Buffer is dawn::native::BufferBase*)
+#include <{{native_dir}}/{{namespace}}_structs_autogen.h>
+
+namespace {{native_namespace}} {
+
+    {% for type in by_category["structure"] %}
+        inline const {{as_cType(type.name)}}* ToAPI(const {{as_cppType(type.name)}}* rhs) {
+            return reinterpret_cast<const {{as_cType(type.name)}}*>(rhs);
+        }
+
+        inline {{as_cType(type.name)}}* ToAPI({{as_cppType(type.name)}}* rhs) {
+            return reinterpret_cast<{{as_cType(type.name)}}*>(rhs);
+        }
+
+        inline const {{as_cppType(type.name)}}* FromAPI(const {{as_cType(type.name)}}* rhs) {
+            return reinterpret_cast<const {{as_cppType(type.name)}}*>(rhs);
+        }
+
+        inline {{as_cppType(type.name)}}* FromAPI({{as_cType(type.name)}}* rhs) {
+            return reinterpret_cast<{{as_cppType(type.name)}}*>(rhs);
+        }
+    {% endfor %}
+
+    {% for type in by_category["object"] %}
+        inline const {{as_cType(type.name)}}Impl* ToAPI(const {{as_cppType(type.name)}}Base* rhs) {
+            return reinterpret_cast<const {{as_cType(type.name)}}Impl*>(rhs);
+        }
+
+        inline {{as_cType(type.name)}}Impl* ToAPI({{as_cppType(type.name)}}Base* rhs) {
+            return reinterpret_cast<{{as_cType(type.name)}}Impl*>(rhs);
+        }
+
+        inline const {{as_cppType(type.name)}}Base* FromAPI(const {{as_cType(type.name)}}Impl* rhs) {
+            return reinterpret_cast<const {{as_cppType(type.name)}}Base*>(rhs);
+        }
+
+        inline {{as_cppType(type.name)}}Base* FromAPI({{as_cType(type.name)}}Impl* rhs) {
+            return reinterpret_cast<{{as_cppType(type.name)}}Base*>(rhs);
+        }
+    {% endfor %}
+
+    template <typename T>
+    struct EnumCount;
+
+    {% for e in by_category["enum"] if e.contiguousFromZero %}
+        template<>
+        struct EnumCount<{{namespace}}::{{as_cppType(e.name)}}> {
+            static constexpr uint32_t value = {{len(e.values)}};
+        };
+    {% endfor %}
+}
+
+#endif  // {{NATIVE_DIR}}_{{PREFIX}}_PLATFORM_AUTOGEN_H_
diff --git a/generator/templates/dawn/wire/ObjectType.h b/generator/templates/dawn/wire/ObjectType.h
new file mode 100644
index 0000000..54ae08e
--- /dev/null
+++ b/generator/templates/dawn/wire/ObjectType.h
@@ -0,0 +1,34 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#ifndef DAWNWIRE_OBJECTTPYE_AUTOGEN_H_
+#define DAWNWIRE_OBJECTTPYE_AUTOGEN_H_
+
+#include "dawn/common/ityp_array.h"
+
+namespace dawn::wire {
+
+    enum class ObjectType : uint32_t {
+        {% for type in by_category["object"] %}
+            {{type.name.CamelCase()}},
+        {% endfor %}
+    };
+
+    template <typename T>
+    using PerObjectType = ityp::array<ObjectType, T, {{len(by_category["object"])}}>;
+
+} // namespace dawn::wire
+
+
+#endif  // DAWNWIRE_OBJECTTPYE_AUTOGEN_H_
diff --git a/generator/templates/dawn/wire/WireCmd.cpp b/generator/templates/dawn/wire/WireCmd.cpp
new file mode 100644
index 0000000..c945bee
--- /dev/null
+++ b/generator/templates/dawn/wire/WireCmd.cpp
@@ -0,0 +1,855 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/wire/BufferConsumer_impl.h"
+#include "dawn/wire/Wire.h"
+
+#include <algorithm>
+#include <cstring>
+#include <limits>
+
+#ifdef __GNUC__
+// error: 'offsetof' within non-standard-layout type 'wgpu::XXX' is conditionally-supported
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#endif
+
+//* Helper macros so that the main [de]serialization functions can be written in a generic manner.
+
+//* Outputs an rvalue that's the number of elements a pointer member points to.
+{% macro member_length(member, record_accessor) -%}
+    {%- if member.length == "constant" -%}
+        {{member.constant_length}}u
+    {%- else -%}
+        {{record_accessor}}{{as_varName(member.length.name)}}
+    {%- endif -%}
+{%- endmacro %}
+
+//* Outputs the type that will be used on the wire for the member
+{% macro member_transfer_type(member) -%}
+    {%- if member.type.category == "object" -%}
+        ObjectId
+    {%- elif member.type.category == "structure" -%}
+        {{as_cType(member.type.name)}}Transfer
+    {%- elif member.type.category == "bitmask" -%}
+        {{as_cType(member.type.name)}}Flags
+    {%- else -%}
+        {{ assert(as_cType(member.type.name) != "size_t") }}
+        {{as_cType(member.type.name)}}
+    {%- endif -%}
+{%- endmacro %}
+
+//* Outputs the size of one element of the type that will be used on the wire for the member
+{% macro member_transfer_sizeof(member) -%}
+    sizeof({{member_transfer_type(member)}})
+{%- endmacro %}
+
+//* Outputs the serialization code to put `in` in `out`
+{% macro serialize_member(member, in, out) %}
+    {%- if member.type.category == "object" -%}
+        {%- set Optional = "Optional" if member.optional else "" -%}
+        WIRE_TRY(provider.Get{{Optional}}Id({{in}}, &{{out}}));
+    {%- elif member.type.category == "structure" -%}
+        {%- if member.type.is_wire_transparent -%}
+            static_assert(sizeof({{out}}) == sizeof({{in}}), "Serialize memcpy size must match.");
+            memcpy(&{{out}}, &{{in}}, {{member_transfer_sizeof(member)}});
+        {%- else -%}
+            {%- set Provider = ", provider" if member.type.may_have_dawn_object else "" -%}
+            WIRE_TRY({{as_cType(member.type.name)}}Serialize({{in}}, &{{out}}, buffer{{Provider}}));
+        {%- endif -%}
+    {%- else -%}
+        {{out}} = {{in}};
+    {%- endif -%}
+{% endmacro %}
+
+//* Outputs the deserialization code to put `in` in `out`
+{% macro deserialize_member(member, in, out) %}
+    {%- if member.type.category == "object" -%}
+        {%- set Optional = "Optional" if member.optional else "" -%}
+        WIRE_TRY(resolver.Get{{Optional}}FromId({{in}}, &{{out}}));
+    {%- elif member.type.category == "structure" -%}
+        {%- if member.type.is_wire_transparent -%}
+            static_assert(sizeof({{out}}) == sizeof({{in}}), "Deserialize memcpy size must match.");
+            memcpy(&{{out}}, const_cast<const {{member_transfer_type(member)}}*>(&{{in}}), {{member_transfer_sizeof(member)}});
+        {%- else -%}
+            WIRE_TRY({{as_cType(member.type.name)}}Deserialize(&{{out}}, &{{in}}, deserializeBuffer, allocator
+                {%- if member.type.may_have_dawn_object -%}
+                    , resolver
+                {%- endif -%}
+            ));
+        {%- endif -%}
+    {%- else -%}
+        static_assert(sizeof({{out}}) >= sizeof({{in}}), "Deserialize assignment may not narrow.");
+        {{out}} = {{in}};
+    {%- endif -%}
+{% endmacro %}
+
+//* The main [de]serialization macro
+//* Methods are very similar to structures that have one member corresponding to each arguments.
+//* This macro takes advantage of the similarity to output [de]serialization code for a record
+//* that is either a structure or a method, with some special cases for each.
+{% macro write_record_serialization_helpers(record, name, members, is_cmd=False, is_return_command=False) %}
+    {% set Return = "Return" if is_return_command else "" %}
+    {% set Cmd = "Cmd" if is_cmd else "" %}
+    {% set Inherits = " : CmdHeader" if is_cmd else "" %}
+
+    //* Structure for the wire format of each of the records. Members that are values
+    //* are embedded directly in the structure. Other members are assumed to be in the
+    //* memory directly following the structure in the buffer.
+    struct {{Return}}{{name}}Transfer{{Inherits}} {
+        static_assert({{[is_cmd, record.extensible, record.chained].count(True)}} <= 1,
+                      "Record must be at most one of is_cmd, extensible, and chained.");
+        {% if is_cmd %}
+            //* Start the transfer structure with the command ID, so that casting to WireCmd gives the ID.
+            {{Return}}WireCmd commandId;
+        {% elif record.extensible %}
+            bool hasNextInChain;
+        {% elif record.chained %}
+            WGPUChainedStructTransfer chain;
+        {% endif %}
+
+        //* Value types are directly in the command, objects being replaced with their IDs.
+        {% for member in members if member.annotation == "value" %}
+            {{member_transfer_type(member)}} {{as_varName(member.name)}};
+        {% endfor %}
+
+        //* const char* have their length embedded directly in the command.
+        {% for member in members if member.length == "strlen" %}
+            uint64_t {{as_varName(member.name)}}Strlen;
+        {% endfor %}
+
+        {% for member in members if member.optional and member.annotation != "value" and member.type.category != "object" %}
+            bool has_{{as_varName(member.name)}};
+        {% endfor %}
+    };
+
+    {% if is_cmd %}
+        static_assert(offsetof({{Return}}{{name}}Transfer, commandSize) == 0);
+        static_assert(offsetof({{Return}}{{name}}Transfer, commandId) == sizeof(CmdHeader));
+    {% endif %}
+
+    {% if record.chained %}
+        static_assert(offsetof({{Return}}{{name}}Transfer, chain) == 0);
+    {% endif %}
+
+    //* Returns the required transfer size for `record` in addition to the transfer structure.
+    DAWN_DECLARE_UNUSED size_t {{Return}}{{name}}GetExtraRequiredSize(const {{Return}}{{name}}{{Cmd}}& record) {
+        DAWN_UNUSED(record);
+
+        size_t result = 0;
+
+        //* Gather how much space will be needed for the extension chain.
+        {% if record.extensible %}
+            if (record.nextInChain != nullptr) {
+                result += GetChainedStructExtraRequiredSize(record.nextInChain);
+            }
+        {% endif %}
+
+        //* Special handling of const char* that have their length embedded directly in the command
+        {% for member in members if member.length == "strlen" %}
+            {% set memberName = as_varName(member.name) %}
+
+            {% if member.optional %}
+                bool has_{{memberName}} = record.{{memberName}} != nullptr;
+                if (has_{{memberName}})
+            {% endif %}
+            {
+            result += std::strlen(record.{{memberName}});
+            }
+        {% endfor %}
+
+        //* Gather how much space will be needed for pointer members.
+        {% for member in members if member.length != "strlen" and not member.skip_serialize %}
+            {% if member.type.category != "object" and member.optional %}
+                if (record.{{as_varName(member.name)}} != nullptr)
+            {% endif %}
+            {
+                {% if member.annotation != "value" %}
+                    {{ assert(member.annotation != "const*const*") }}
+                    auto memberLength = {{member_length(member, "record.")}};
+                    result += memberLength * {{member_transfer_sizeof(member)}};
+                    //* Structures might contain more pointers so we need to add their extra size as well.
+                    {% if member.type.category == "structure" %}
+                        for (decltype(memberLength) i = 0; i < memberLength; ++i) {
+                            {{assert(member.annotation == "const*")}}
+                            result += {{as_cType(member.type.name)}}GetExtraRequiredSize(record.{{as_varName(member.name)}}[i]);
+                        }
+                    {% endif %}
+                {% elif member.type.category == "structure" %}
+                    result += {{as_cType(member.type.name)}}GetExtraRequiredSize(record.{{as_varName(member.name)}});
+                {% endif %}
+            }
+        {% endfor %}
+
+        return result;
+    }
+    // GetExtraRequiredSize isn't used for structures that are value members of other structures
+    // because we assume they cannot contain pointers themselves.
+    DAWN_UNUSED_FUNC({{Return}}{{name}}GetExtraRequiredSize);
+
+    //* Serializes `record` into `transfer`, using `buffer` to get more space for pointed-to data
+    //* and `provider` to serialize objects.
+    DAWN_DECLARE_UNUSED WireResult {{Return}}{{name}}Serialize(
+        const {{Return}}{{name}}{{Cmd}}& record,
+        {{Return}}{{name}}Transfer* transfer,
+        SerializeBuffer* buffer
+        {%- if record.may_have_dawn_object -%}
+            , const ObjectIdProvider& provider
+        {%- endif -%}
+    ) {
+        DAWN_UNUSED(buffer);
+
+        //* Handle special transfer members of methods.
+        {% if is_cmd %}
+            transfer->commandId = {{Return}}WireCmd::{{name}};
+        {% endif %}
+
+        //* Value types are directly in the transfer record, objects being replaced with their IDs.
+        {% for member in members if member.annotation == "value" %}
+            {% set memberName = as_varName(member.name) %}
+            {{serialize_member(member, "record." + memberName, "transfer->" + memberName)}}
+        {% endfor %}
+
+        {% if record.extensible %}
+            if (record.nextInChain != nullptr) {
+                transfer->hasNextInChain = true;
+                WIRE_TRY(SerializeChainedStruct(record.nextInChain, buffer, provider));
+            } else {
+                transfer->hasNextInChain = false;
+            }
+        {% endif %}
+
+        {% if record.chained %}
+            //* Should be set by the root descriptor's call to SerializeChainedStruct.
+            ASSERT(transfer->chain.sType == {{as_cEnum(types["s type"].name, record.name)}});
+            ASSERT(transfer->chain.hasNext == (record.chain.next != nullptr));
+        {% endif %}
+
+        //* Special handling of const char* that have their length embedded directly in the command
+        {% for member in members if member.length == "strlen" %}
+            {% set memberName = as_varName(member.name) %}
+
+            {% if member.optional %}
+                bool has_{{memberName}} = record.{{memberName}} != nullptr;
+                transfer->has_{{memberName}} = has_{{memberName}};
+                if (has_{{memberName}})
+            {% endif %}
+            {
+                transfer->{{memberName}}Strlen = std::strlen(record.{{memberName}});
+
+                char* stringInBuffer;
+                WIRE_TRY(buffer->NextN(transfer->{{memberName}}Strlen, &stringInBuffer));
+                memcpy(stringInBuffer, record.{{memberName}}, transfer->{{memberName}}Strlen);
+            }
+        {% endfor %}
+
+        //* Allocate space and write the non-value arguments in it.
+        {% for member in members if member.annotation != "value" and member.length != "strlen" and not member.skip_serialize %}
+            {{ assert(member.annotation != "const*const*") }}
+            {% set memberName = as_varName(member.name) %}
+
+            {% if member.type.category != "object" and member.optional %}
+                bool has_{{memberName}} = record.{{memberName}} != nullptr;
+                transfer->has_{{memberName}} = has_{{memberName}};
+                if (has_{{memberName}})
+            {% endif %}
+            {
+                auto memberLength = {{member_length(member, "record.")}};
+
+                {{member_transfer_type(member)}}* memberBuffer;
+                WIRE_TRY(buffer->NextN(memberLength, &memberBuffer));
+
+                {% if member.type.is_wire_transparent %}
+                    memcpy(
+                        memberBuffer, record.{{memberName}},
+                        {{member_transfer_sizeof(member)}} * memberLength);
+                {% else %}
+                    //* This loop cannot overflow because it iterates up to |memberLength|. Even if
+                    //* memberLength were the maximum integer value, |i| would become equal to it
+                    //* just before exiting the loop, but not increment past or wrap around.
+                    for (decltype(memberLength) i = 0; i < memberLength; ++i) {
+                        {{serialize_member(member, "record." + memberName + "[i]", "memberBuffer[i]" )}}
+                    }
+                {% endif %}
+            }
+        {% endfor %}
+        return WireResult::Success;
+    }
+    DAWN_UNUSED_FUNC({{Return}}{{name}}Serialize);
+
+    //* Deserializes `transfer` into `record` getting more serialized data from `buffer` and `size`
+    //* if needed, using `allocator` to store pointed-to values and `resolver` to translate object
+    //* Ids to actual objects.
+    DAWN_DECLARE_UNUSED WireResult {{Return}}{{name}}Deserialize(
+        {{Return}}{{name}}{{Cmd}}* record,
+        const volatile {{Return}}{{name}}Transfer* transfer,
+        DeserializeBuffer* deserializeBuffer,
+        DeserializeAllocator* allocator
+        {%- if record.may_have_dawn_object -%}
+            , const ObjectIdResolver& resolver
+        {%- endif -%}
+    ) {
+        DAWN_UNUSED(allocator);
+
+        {% if is_cmd %}
+            ASSERT(transfer->commandId == {{Return}}WireCmd::{{name}});
+        {% endif %}
+
+        {% if record.derived_method %}
+            record->selfId = transfer->self;
+        {% endif %}
+
+        //* Value types are directly in the transfer record, objects being replaced with their IDs.
+        {% for member in members if member.annotation == "value" %}
+            {% set memberName = as_varName(member.name) %}
+            {{deserialize_member(member, "transfer->" + memberName, "record->" + memberName)}}
+        {% endfor %}
+
+        {% if record.extensible %}
+            record->nextInChain = nullptr;
+            if (transfer->hasNextInChain) {
+                WIRE_TRY(DeserializeChainedStruct(&record->nextInChain, deserializeBuffer, allocator, resolver));
+            }
+        {% endif %}
+
+        {% if record.chained %}
+            //* Should be set by the root descriptor's call to DeserializeChainedStruct.
+            //* Don't check |record->chain.next| matches because it is not set until the
+            //* next iteration inside DeserializeChainedStruct.
+            ASSERT(record->chain.sType == {{as_cEnum(types["s type"].name, record.name)}});
+            ASSERT(record->chain.next == nullptr);
+        {% endif %}
+
+        //* Special handling of const char* that have their length embedded directly in the command
+        {% for member in members if member.length == "strlen" %}
+            {% set memberName = as_varName(member.name) %}
+
+            {% if member.optional %}
+                bool has_{{memberName}} = transfer->has_{{memberName}};
+                record->{{memberName}} = nullptr;
+                if (has_{{memberName}})
+            {% endif %}
+            {
+                uint64_t stringLength64 = transfer->{{memberName}}Strlen;
+                if (stringLength64 >= std::numeric_limits<size_t>::max()) {
+                    //* Cannot allocate space for the string. It can be at most
+                    //* size_t::max() - 1. We need 1 byte for the null-terminator.
+                    return WireResult::FatalError;
+                }
+                size_t stringLength = static_cast<size_t>(stringLength64);
+
+                const volatile char* stringInBuffer;
+                WIRE_TRY(deserializeBuffer->ReadN(stringLength, &stringInBuffer));
+
+                char* copiedString;
+                WIRE_TRY(GetSpace(allocator, stringLength + 1, &copiedString));
+                //* We can cast away the volatile qualifier because DeserializeBuffer::ReadN already
+                //* validated that the range [stringInBuffer, stringInBuffer + stringLength) is valid.
+                //* memcpy may have an unknown access pattern, but this is fine since the string is only
+                //* data and won't affect control flow of this function.
+                memcpy(copiedString, const_cast<const char*>(stringInBuffer), stringLength);
+                copiedString[stringLength] = '\0';
+                record->{{memberName}} = copiedString;
+            }
+        {% endfor %}
+
+        //* Get extra buffer data, and copy pointed to values in extra allocated space.
+        {% for member in members if member.annotation != "value" and member.length != "strlen" %}
+            {{ assert(member.annotation != "const*const*") }}
+            {% set memberName = as_varName(member.name) %}
+
+            {% if member.type.category != "object" and member.optional %}
+                //* Non-constant length optional members use length=0 to denote they aren't present.
+                //* Otherwise we could have length=N and has_member=false, causing reads from an
+                //* uninitialized pointer.
+                {{ assert(member.length == "constant") }}
+                bool has_{{memberName}} = transfer->has_{{memberName}};
+                record->{{memberName}} = nullptr;
+                if (has_{{memberName}})
+            {% endif %}
+            {
+                auto memberLength = {{member_length(member, "record->")}};
+                const volatile {{member_transfer_type(member)}}* memberBuffer;
+                WIRE_TRY(deserializeBuffer->ReadN(memberLength, &memberBuffer));
+
+                //* For data-only members (e.g. "data" in WriteBuffer and WriteTexture), they are
+                //* not security sensitive so we can directly refer the data inside the transfer
+                //* buffer in dawn_native. For other members, as prevention of TOCTOU attacks is an
+                //* important feature of the wire, we must make sure every single value returned to
+                //* dawn_native must be a copy of what's in the wire.
+                {% if member.json_data["wire_is_data_only"] %}
+                    record->{{memberName}} =
+                        const_cast<const {{member_transfer_type(member)}}*>(memberBuffer);
+
+                {% else %}
+                    {{as_cType(member.type.name)}}* copiedMembers;
+                    WIRE_TRY(GetSpace(allocator, memberLength, &copiedMembers));
+                    record->{{memberName}} = copiedMembers;
+
+                    {% if member.type.is_wire_transparent %}
+                        //* memcpy is not allowed to copy from volatile objects. However, these
+                        //* arrays are just used as plain data, and don't impact control flow. So if
+                        //* the underlying data were changed while the copy was still executing, we
+                        //* would get different data - but it wouldn't cause unexpected downstream
+                        //* effects.
+                        memcpy(
+                            copiedMembers,
+                            const_cast<const {{member_transfer_type(member)}}*>(memberBuffer),
+                           {{member_transfer_sizeof(member)}} * memberLength);
+                    {% else %}
+                        //* This loop cannot overflow because it iterates up to |memberLength|. Even
+                        //* if memberLength were the maximum integer value, |i| would become equal
+                        //* to it just before exiting the loop, but not increment past or wrap
+                        //* around.
+                        for (decltype(memberLength) i = 0; i < memberLength; ++i) {
+                            {{deserialize_member(member, "memberBuffer[i]", "copiedMembers[i]")}}
+                        }
+                    {% endif %}
+                {% endif %}
+            }
+        {% endfor %}
+
+        return WireResult::Success;
+    }
+    DAWN_UNUSED_FUNC({{Return}}{{name}}Deserialize);
+{% endmacro %}
+
+{% macro write_command_serialization_methods(command, is_return) %}
+    {% set Return = "Return" if is_return else "" %}
+    {% set Name = Return + command.name.CamelCase() %}
+    {% set Cmd = Name + "Cmd" %}
+
+    size_t {{Cmd}}::GetRequiredSize() const {
+        size_t size = sizeof({{Name}}Transfer) + {{Name}}GetExtraRequiredSize(*this);
+        return size;
+    }
+
+    {% if command.may_have_dawn_object %}
+        WireResult {{Cmd}}::Serialize(
+            size_t commandSize,
+            SerializeBuffer* buffer,
+            const ObjectIdProvider& provider
+        ) const {
+            {{Name}}Transfer* transfer;
+            WIRE_TRY(buffer->Next(&transfer));
+            transfer->commandSize = commandSize;
+            return ({{Name}}Serialize(*this, transfer, buffer, provider));
+        }
+        WireResult {{Cmd}}::Serialize(size_t commandSize, SerializeBuffer* buffer) const {
+            ErrorObjectIdProvider provider;
+            return Serialize(commandSize, buffer, provider);
+        }
+
+        WireResult {{Cmd}}::Deserialize(
+            DeserializeBuffer* deserializeBuffer,
+            DeserializeAllocator* allocator,
+            const ObjectIdResolver& resolver
+        ) {
+            const volatile {{Name}}Transfer* transfer;
+            WIRE_TRY(deserializeBuffer->Read(&transfer));
+            return {{Name}}Deserialize(this, transfer, deserializeBuffer, allocator, resolver);
+        }
+        WireResult {{Cmd}}::Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator) {
+            ErrorObjectIdResolver resolver;
+            return Deserialize(deserializeBuffer, allocator, resolver);
+        }
+    {% else %}
+        WireResult {{Cmd}}::Serialize(size_t commandSize, SerializeBuffer* buffer) const {
+            {{Name}}Transfer* transfer;
+            WIRE_TRY(buffer->Next(&transfer));
+            transfer->commandSize = commandSize;
+            return ({{Name}}Serialize(*this, transfer, buffer));
+        }
+        WireResult {{Cmd}}::Serialize(
+            size_t commandSize,
+            SerializeBuffer* buffer,
+            const ObjectIdProvider&
+        ) const {
+            return Serialize(commandSize, buffer);
+        }
+
+        WireResult {{Cmd}}::Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator) {
+            const volatile {{Name}}Transfer* transfer;
+            WIRE_TRY(deserializeBuffer->Read(&transfer));
+            return {{Name}}Deserialize(this, transfer, deserializeBuffer, allocator);
+        }
+        WireResult {{Cmd}}::Deserialize(
+            DeserializeBuffer* deserializeBuffer,
+            DeserializeAllocator* allocator,
+            const ObjectIdResolver&
+        ) {
+            return Deserialize(deserializeBuffer, allocator);
+        }
+    {% endif %}
+{% endmacro %}
+
+{% macro make_chained_struct_serialization_helpers(out=None) %}
+        {% set ChainedStructPtr = "WGPUChainedStructOut*" if out else "const WGPUChainedStruct*" %}
+        {% set ChainedStruct = "WGPUChainedStructOut" if out else "WGPUChainedStruct" %}
+        size_t GetChainedStructExtraRequiredSize({{ChainedStructPtr}} chainedStruct) {
+            ASSERT(chainedStruct != nullptr);
+            size_t result = 0;
+            while (chainedStruct != nullptr) {
+                switch (chainedStruct->sType) {
+                    {% for sType in types["s type"].values if (
+                            sType.valid and
+                            (sType.name.CamelCase() not in client_side_structures) and
+                            (types[sType.name.get()].output == out)
+                    ) %}
+                        case {{as_cEnum(types["s type"].name, sType.name)}}: {
+                            const auto& typedStruct = *reinterpret_cast<{{as_cType(sType.name)}} const *>(chainedStruct);
+                            result += sizeof({{as_cType(sType.name)}}Transfer);
+                            result += {{as_cType(sType.name)}}GetExtraRequiredSize(typedStruct);
+                            chainedStruct = typedStruct.chain.next;
+                            break;
+                        }
+                    {% endfor %}
+                    // Explicitly list the Invalid enum. MSVC complains about no case labels.
+                    case WGPUSType_Invalid:
+                    default:
+                        // Invalid enum. Reserve space just for the transfer header (sType and hasNext).
+                        result += sizeof(WGPUChainedStructTransfer);
+                        chainedStruct = chainedStruct->next;
+                        break;
+                }
+            }
+            return result;
+        }
+
+        [[nodiscard]] WireResult SerializeChainedStruct({{ChainedStructPtr}} chainedStruct,
+                                                          SerializeBuffer* buffer,
+                                                          const ObjectIdProvider& provider) {
+            ASSERT(chainedStruct != nullptr);
+            ASSERT(buffer != nullptr);
+            do {
+                switch (chainedStruct->sType) {
+                    {% for sType in types["s type"].values if (
+                            sType.valid and
+                            (sType.name.CamelCase() not in client_side_structures) and
+                            (types[sType.name.get()].output == out)
+                    ) %}
+                        {% set CType = as_cType(sType.name) %}
+                        case {{as_cEnum(types["s type"].name, sType.name)}}: {
+
+                            {{CType}}Transfer* transfer;
+                            WIRE_TRY(buffer->Next(&transfer));
+                            transfer->chain.sType = chainedStruct->sType;
+                            transfer->chain.hasNext = chainedStruct->next != nullptr;
+
+                            WIRE_TRY({{CType}}Serialize(*reinterpret_cast<{{CType}} const*>(chainedStruct), transfer, buffer
+                                {%- if types[sType.name.get()].may_have_dawn_object -%}
+                                , provider
+                                {%- endif -%}
+                            ));
+
+                            chainedStruct = chainedStruct->next;
+                        } break;
+                    {% endfor %}
+                    // Explicitly list the Invalid enum. MSVC complains about no case labels.
+                    case WGPUSType_Invalid:
+                    default: {
+                        // Invalid enum. Serialize just the transfer header with Invalid as the sType.
+                        // TODO(crbug.com/dawn/369): Unknown sTypes are silently discarded.
+                        if (chainedStruct->sType != WGPUSType_Invalid) {
+                            dawn::WarningLog() << "Unknown sType " << chainedStruct->sType << " discarded.";
+                        }
+
+                        WGPUChainedStructTransfer* transfer;
+                        WIRE_TRY(buffer->Next(&transfer));
+                        transfer->sType = WGPUSType_Invalid;
+                        transfer->hasNext = chainedStruct->next != nullptr;
+
+                        // Still move on in case there are valid structs after this.
+                        chainedStruct = chainedStruct->next;
+                        break;
+                    }
+                }
+            } while (chainedStruct != nullptr);
+            return WireResult::Success;
+        }
+
+        WireResult DeserializeChainedStruct({{ChainedStructPtr}}* outChainNext,
+                                            DeserializeBuffer* deserializeBuffer,
+                                            DeserializeAllocator* allocator,
+                                            const ObjectIdResolver& resolver) {
+            bool hasNext;
+            do {
+                const volatile WGPUChainedStructTransfer* header;
+                WIRE_TRY(deserializeBuffer->Peek(&header));
+                WGPUSType sType = header->sType;
+                switch (sType) {
+                    {% for sType in types["s type"].values if (
+                            sType.valid and
+                            (sType.name.CamelCase() not in client_side_structures) and
+                            (types[sType.name.get()].output == out)
+                    ) %}
+                        {% set CType = as_cType(sType.name) %}
+                        case {{as_cEnum(types["s type"].name, sType.name)}}: {
+                            const volatile {{CType}}Transfer* transfer;
+                            WIRE_TRY(deserializeBuffer->Read(&transfer));
+
+                            {{CType}}* outStruct;
+                            WIRE_TRY(GetSpace(allocator, sizeof({{CType}}), &outStruct));
+                            outStruct->chain.sType = sType;
+                            outStruct->chain.next = nullptr;
+
+                            *outChainNext = &outStruct->chain;
+                            outChainNext = &outStruct->chain.next;
+
+                            WIRE_TRY({{CType}}Deserialize(outStruct, transfer, deserializeBuffer, allocator
+                                {%- if types[sType.name.get()].may_have_dawn_object -%}
+                                    , resolver
+                                {%- endif -%}
+                            ));
+
+                            hasNext = transfer->chain.hasNext;
+                        } break;
+                    {% endfor %}
+                    // Explicitly list the Invalid enum. MSVC complains about no case labels.
+                    case WGPUSType_Invalid:
+                    default: {
+                        // Invalid enum. Deserialize just the transfer header with Invalid as the sType.
+                        // TODO(crbug.com/dawn/369): Unknown sTypes are silently discarded.
+                        if (sType != WGPUSType_Invalid) {
+                            dawn::WarningLog() << "Unknown sType " << sType << " discarded.";
+                        }
+
+                        const volatile WGPUChainedStructTransfer* transfer;
+                        WIRE_TRY(deserializeBuffer->Read(&transfer));
+
+                        {{ChainedStruct}}* outStruct;
+                        WIRE_TRY(GetSpace(allocator, sizeof({{ChainedStruct}}), &outStruct));
+                        outStruct->sType = WGPUSType_Invalid;
+                        outStruct->next = nullptr;
+
+                        // Still move on in case there are valid structs after this.
+                        *outChainNext = outStruct;
+                        outChainNext = &outStruct->next;
+                        hasNext = transfer->hasNext;
+                        break;
+                    }
+                }
+            } while (hasNext);
+
+            return WireResult::Success;
+        }
+{% endmacro %}
+
+namespace dawn::wire {
+
+    ObjectHandle::ObjectHandle() = default;
+    ObjectHandle::ObjectHandle(ObjectId id, ObjectGeneration generation)
+        : id(id), generation(generation) {
+    }
+
+    ObjectHandle::ObjectHandle(const volatile ObjectHandle& rhs)
+        : id(rhs.id), generation(rhs.generation) {
+    }
+    ObjectHandle& ObjectHandle::operator=(const volatile ObjectHandle& rhs) {
+        id = rhs.id;
+        generation = rhs.generation;
+        return *this;
+    }
+
+    ObjectHandle& ObjectHandle::AssignFrom(const ObjectHandle& rhs) {
+        id = rhs.id;
+        generation = rhs.generation;
+        return *this;
+    }
+    ObjectHandle& ObjectHandle::AssignFrom(const volatile ObjectHandle& rhs) {
+        id = rhs.id;
+        generation = rhs.generation;
+        return *this;
+    }
+
+    namespace {
+        // Allocates enough space from allocator to countain T[count] and return it in out.
+        // Return FatalError if the allocator couldn't allocate the memory.
+        // Always writes to |out| on success.
+        template <typename T, typename N>
+        WireResult GetSpace(DeserializeAllocator* allocator, N count, T** out) {
+            constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
+            if (count > kMaxCountWithoutOverflows) {
+                return WireResult::FatalError;
+            }
+
+            size_t totalSize = sizeof(T) * count;
+            *out = static_cast<T*>(allocator->GetSpace(totalSize));
+            if (*out == nullptr) {
+                return WireResult::FatalError;
+            }
+
+            return WireResult::Success;
+        }
+
+        struct WGPUChainedStructTransfer {
+            WGPUSType sType;
+            bool hasNext;
+        };
+
+        size_t GetChainedStructExtraRequiredSize(const WGPUChainedStruct* chainedStruct);
+        [[nodiscard]] WireResult SerializeChainedStruct(const WGPUChainedStruct* chainedStruct,
+                                                          SerializeBuffer* buffer,
+                                                          const ObjectIdProvider& provider);
+        WireResult DeserializeChainedStruct(const WGPUChainedStruct** outChainNext,
+                                            DeserializeBuffer* deserializeBuffer,
+                                            DeserializeAllocator* allocator,
+                                            const ObjectIdResolver& resolver);
+
+        size_t GetChainedStructExtraRequiredSize(WGPUChainedStructOut* chainedStruct);
+        [[nodiscard]] WireResult SerializeChainedStruct(WGPUChainedStructOut* chainedStruct,
+                                                          SerializeBuffer* buffer,
+                                                          const ObjectIdProvider& provider);
+        WireResult DeserializeChainedStruct(WGPUChainedStructOut** outChainNext,
+                                            DeserializeBuffer* deserializeBuffer,
+                                            DeserializeAllocator* allocator,
+                                            const ObjectIdResolver& resolver);
+
+        //* Output structure [de]serialization first because it is used by commands.
+        {% for type in by_category["structure"] %}
+            {% set name = as_cType(type.name) %}
+            {% if type.name.CamelCase() not in client_side_structures %}
+                {{write_record_serialization_helpers(type, name, type.members, is_cmd=False)}}
+            {% endif %}
+        {% endfor %}
+
+
+        {{ make_chained_struct_serialization_helpers(out=False) }}
+        {{ make_chained_struct_serialization_helpers(out=True) }}
+
+        //* Output [de]serialization helpers for commands
+        {% for command in cmd_records["command"] %}
+            {% set name = command.name.CamelCase() %}
+            {{write_record_serialization_helpers(command, name, command.members, is_cmd=True)}}
+        {% endfor %}
+
+        //* Output [de]serialization helpers for return commands
+        {% for command in cmd_records["return command"] %}
+            {% set name = command.name.CamelCase() %}
+            {{write_record_serialization_helpers(command, name, command.members,
+                                                 is_cmd=True, is_return_command=True)}}
+        {% endfor %}
+
+        // Implementation of ObjectIdResolver that always errors.
+        // Used when the generator adds a provider argument because of a chained
+        // struct, but in practice, a chained struct in that location is invalid.
+        class ErrorObjectIdResolver final : public ObjectIdResolver {
+            public:
+                {% for type in by_category["object"] %}
+                    WireResult GetFromId(ObjectId id, {{as_cType(type.name)}}* out) const override {
+                        return WireResult::FatalError;
+                    }
+                    WireResult GetOptionalFromId(ObjectId id, {{as_cType(type.name)}}* out) const override {
+                        return WireResult::FatalError;
+                    }
+                {% endfor %}
+        };
+
+        // Implementation of ObjectIdProvider that always errors.
+        // Used when the generator adds a provider argument because of a chained
+        // struct, but in practice, a chained struct in that location is invalid.
+        class ErrorObjectIdProvider final : public ObjectIdProvider {
+            public:
+                {% for type in by_category["object"] %}
+                    WireResult GetId({{as_cType(type.name)}} object, ObjectId* out) const override {
+                        return WireResult::FatalError;
+                    }
+                    WireResult GetOptionalId({{as_cType(type.name)}} object, ObjectId* out) const override {
+                        return WireResult::FatalError;
+                    }
+                {% endfor %}
+        };
+
+    }  // anonymous namespace
+
+    {% for command in cmd_records["command"] %}
+        {{ write_command_serialization_methods(command, False) }}
+    {% endfor %}
+
+    {% for command in cmd_records["return command"] %}
+        {{ write_command_serialization_methods(command, True) }}
+    {% endfor %}
+
+    // Implementations of serialization/deserialization of WPGUDeviceProperties.
+    size_t SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties) {
+        return sizeof(WGPUDeviceProperties) +
+               WGPUDevicePropertiesGetExtraRequiredSize(*deviceProperties);
+    }
+
+    void SerializeWGPUDeviceProperties(const WGPUDeviceProperties* deviceProperties,
+                                       char* buffer) {
+        SerializeBuffer serializeBuffer(buffer, SerializedWGPUDevicePropertiesSize(deviceProperties));
+
+        WGPUDevicePropertiesTransfer* transfer;
+
+        WireResult result = serializeBuffer.Next(&transfer);
+        ASSERT(result == WireResult::Success);
+
+        ErrorObjectIdProvider provider;
+        result = WGPUDevicePropertiesSerialize(*deviceProperties, transfer, &serializeBuffer, provider);
+        ASSERT(result == WireResult::Success);
+    }
+
+    bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
+                                         const volatile char* buffer,
+                                         size_t size) {
+        const volatile WGPUDevicePropertiesTransfer* transfer;
+        DeserializeBuffer deserializeBuffer(buffer, size);
+        if (deserializeBuffer.Read(&transfer) != WireResult::Success) {
+            return false;
+        }
+
+        ErrorObjectIdResolver resolver;
+        return WGPUDevicePropertiesDeserialize(deviceProperties, transfer, &deserializeBuffer,
+                                               nullptr, resolver) == WireResult::Success;
+    }
+
+    size_t SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits) {
+        return sizeof(WGPUSupportedLimits) +
+               WGPUSupportedLimitsGetExtraRequiredSize(*supportedLimits);
+    }
+
+    void SerializeWGPUSupportedLimits(
+        const WGPUSupportedLimits* supportedLimits,
+        char* buffer) {
+        SerializeBuffer serializeBuffer(buffer, SerializedWGPUSupportedLimitsSize(supportedLimits));
+
+        WGPUSupportedLimitsTransfer* transfer;
+
+        WireResult result = serializeBuffer.Next(&transfer);
+        ASSERT(result == WireResult::Success);
+
+        ErrorObjectIdProvider provider;
+        result = WGPUSupportedLimitsSerialize(*supportedLimits, transfer, &serializeBuffer, provider);
+        ASSERT(result == WireResult::Success);
+    }
+
+    bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
+                                        const volatile char* buffer,
+                                        size_t size) {
+        const volatile WGPUSupportedLimitsTransfer* transfer;
+        DeserializeBuffer deserializeBuffer(buffer, size);
+        if (deserializeBuffer.Read(&transfer) != WireResult::Success) {
+            return false;
+        }
+
+        ErrorObjectIdResolver resolver;
+        return WGPUSupportedLimitsDeserialize(supportedLimits, transfer, &deserializeBuffer,
+                                              nullptr, resolver) == WireResult::Success;
+    }
+
+}  // namespace dawn::wire
diff --git a/generator/templates/dawn/wire/WireCmd.h b/generator/templates/dawn/wire/WireCmd.h
new file mode 100644
index 0000000..f8c2762
--- /dev/null
+++ b/generator/templates/dawn/wire/WireCmd.h
@@ -0,0 +1,138 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#ifndef DAWNWIRE_WIRECMD_AUTOGEN_H_
+#define DAWNWIRE_WIRECMD_AUTOGEN_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/BufferConsumer.h"
+#include "dawn/wire/ObjectType_autogen.h"
+#include "dawn/wire/WireResult.h"
+
+namespace dawn::wire {
+
+    using ObjectId = uint32_t;
+    using ObjectGeneration = uint32_t;
+    struct ObjectHandle {
+      ObjectId id;
+      ObjectGeneration generation;
+
+      ObjectHandle();
+      ObjectHandle(ObjectId id, ObjectGeneration generation);
+
+      ObjectHandle(const volatile ObjectHandle& rhs);
+      ObjectHandle& operator=(const volatile ObjectHandle& rhs);
+
+      // MSVC has a bug where it thinks the volatile copy assignment is a duplicate.
+      // Workaround this by forwarding to a different function AssignFrom.
+      template <typename T>
+      ObjectHandle& operator=(const T& rhs) {
+          return AssignFrom(rhs);
+      }
+      ObjectHandle& AssignFrom(const ObjectHandle& rhs);
+      ObjectHandle& AssignFrom(const volatile ObjectHandle& rhs);
+    };
+
+    // Interface to allocate more space to deserialize pointed-to data.
+    // nullptr is treated as an error.
+    class DeserializeAllocator {
+        public:
+            virtual void* GetSpace(size_t size) = 0;
+    };
+
+    // Interface to convert an ID to a server object, if possible.
+    // Methods return FatalError if the ID is for a non-existent object and Success otherwise.
+    class ObjectIdResolver {
+        public:
+            {% for type in by_category["object"] %}
+                virtual WireResult GetFromId(ObjectId id, {{as_cType(type.name)}}* out) const = 0;
+                virtual WireResult GetOptionalFromId(ObjectId id, {{as_cType(type.name)}}* out) const = 0;
+            {% endfor %}
+    };
+
+    // Interface to convert a client object to its ID for the wiring.
+    class ObjectIdProvider {
+        public:
+            {% for type in by_category["object"] %}
+                virtual WireResult GetId({{as_cType(type.name)}} object, ObjectId* out) const = 0;
+                virtual WireResult GetOptionalId({{as_cType(type.name)}} object, ObjectId* out) const = 0;
+            {% endfor %}
+    };
+
+    //* Enum used as a prefix to each command on the wire format.
+    enum class WireCmd : uint32_t {
+        {% for command in cmd_records["command"] %}
+            {{command.name.CamelCase()}},
+        {% endfor %}
+    };
+
+    //* Enum used as a prefix to each command on the return wire format.
+    enum class ReturnWireCmd : uint32_t {
+        {% for command in cmd_records["return command"] %}
+            {{command.name.CamelCase()}},
+        {% endfor %}
+    };
+
+    struct CmdHeader {
+        uint64_t commandSize;
+    };
+
+{% macro write_command_struct(command, is_return_command) %}
+    {% set Return = "Return" if is_return_command else "" %}
+    {% set Cmd = command.name.CamelCase() + "Cmd" %}
+    struct {{Return}}{{Cmd}} {
+        //* From a filled structure, compute how much size will be used in the serialization buffer.
+        size_t GetRequiredSize() const;
+
+        //* Serialize the structure and everything it points to into serializeBuffer which must be
+        //* big enough to contain all the data (as queried from GetRequiredSize).
+        WireResult Serialize(size_t commandSize, SerializeBuffer* serializeBuffer, const ObjectIdProvider& objectIdProvider) const;
+        // Override which produces a FatalError if any object is used.
+        WireResult Serialize(size_t commandSize, SerializeBuffer* serializeBuffer) const;
+
+        //* Deserializes the structure from a buffer, consuming a maximum of *size bytes. When this
+        //* function returns, buffer and size will be updated by the number of bytes consumed to
+        //* deserialize the structure. Structures containing pointers will use allocator to get
+        //* scratch space to deserialize the pointed-to data.
+        //* Deserialize returns:
+        //*  - Success if everything went well (yay!)
+        //*  - FatalError is something bad happened (buffer too small for example)
+        WireResult Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator, const ObjectIdResolver& resolver);
+        // Override which produces a FatalError if any object is used.
+        WireResult Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator);
+
+        {% if command.derived_method %}
+            //* Command handlers want to know the object ID in addition to the backing object.
+            //* Doesn't need to be filled before Serialize, or GetRequiredSize.
+            ObjectId selfId;
+        {% endif %}
+
+        {% for member in command.members %}
+            {{as_annotated_cType(member)}};
+        {% endfor %}
+    };
+{% endmacro %}
+
+    {% for command in cmd_records["command"] %}
+        {{write_command_struct(command, False)}}
+    {% endfor %}
+
+    {% for command in cmd_records["return command"] %}
+        {{write_command_struct(command, True)}}
+    {% endfor %}
+
+}  // namespace dawn::wire
+
+#endif // DAWNWIRE_WIRECMD_AUTOGEN_H_
diff --git a/generator/templates/dawn/wire/client/ApiObjects.h b/generator/templates/dawn/wire/client/ApiObjects.h
new file mode 100644
index 0000000..8c1729d
--- /dev/null
+++ b/generator/templates/dawn/wire/client/ApiObjects.h
@@ -0,0 +1,53 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_APIOBJECTS_AUTOGEN_H_
+#define DAWNWIRE_CLIENT_APIOBJECTS_AUTOGEN_H_
+
+#include "dawn/wire/ObjectType_autogen.h"
+#include "dawn/wire/client/ObjectBase.h"
+
+namespace dawn::wire::client {
+
+    template <typename T>
+    struct ObjectTypeToTypeEnum {
+        static constexpr ObjectType value = static_cast<ObjectType>(-1);
+    };
+
+    {% for type in by_category["object"] %}
+        {% set Type = type.name.CamelCase() %}
+        {% if type.name.CamelCase() in client_special_objects %}
+            class {{Type}};
+        {% else %}
+            struct {{type.name.CamelCase()}} final : ObjectBase {
+                using ObjectBase::ObjectBase;
+            };
+        {% endif %}
+
+        inline {{Type}}* FromAPI(WGPU{{Type}} obj) {
+            return reinterpret_cast<{{Type}}*>(obj);
+        }
+        inline WGPU{{Type}} ToAPI({{Type}}* obj) {
+            return reinterpret_cast<WGPU{{Type}}>(obj);
+        }
+
+        template <>
+        struct ObjectTypeToTypeEnum<{{Type}}> {
+            static constexpr ObjectType value = ObjectType::{{Type}};
+        };
+
+    {% endfor %}
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_APIOBJECTS_AUTOGEN_H_
diff --git a/generator/templates/dawn/wire/client/ApiProcs.cpp b/generator/templates/dawn/wire/client/ApiProcs.cpp
new file mode 100644
index 0000000..d6e5279
--- /dev/null
+++ b/generator/templates/dawn/wire/client/ApiProcs.cpp
@@ -0,0 +1,178 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#include "dawn/common/Log.h"
+#include "dawn/wire/client/ApiObjects.h"
+#include "dawn/wire/client/Client.h"
+
+#include <algorithm>
+#include <cstring>
+#include <string>
+#include <vector>
+
+namespace dawn::wire::client {
+
+    //* Outputs an rvalue that's the number of elements a pointer member points to.
+    {% macro member_length(member, accessor) -%}
+        {%- if member.length == "constant" -%}
+            {{member.constant_length}}
+        {%- else -%}
+            {{accessor}}{{as_varName(member.length.name)}}
+        {%- endif -%}
+    {%- endmacro %}
+
+    //* Implementation of the client API functions.
+    {% for type in by_category["object"] %}
+        {% set Type = type.name.CamelCase() %}
+        {% set cType = as_cType(type.name) %}
+
+        {% for method in type.methods %}
+            {% set Suffix = as_MethodSuffix(type.name, method.name) %}
+
+            {% if Suffix in client_handwritten_commands %}
+                static
+            {% endif %}
+            {{as_cType(method.return_type.name)}} Client{{Suffix}}(
+                {{-cType}} cSelf
+                {%- for arg in method.arguments -%}
+                    , {{as_annotated_cType(arg)}}
+                {%- endfor -%}
+            ) {
+                auto self = reinterpret_cast<{{as_wireType(type)}}>(cSelf);
+                {% if Suffix not in client_handwritten_commands %}
+                    {{Suffix}}Cmd cmd;
+
+                    //* Create the structure going on the wire on the stack and fill it with the value
+                    //* arguments so it can compute its size.
+                    cmd.self = cSelf;
+
+                    //* For object creation, store the object ID the client will use for the result.
+                    {% if method.return_type.category == "object" %}
+                        auto* allocation = self->client->{{method.return_type.name.CamelCase()}}Allocator().New(self->client);
+                        cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+                    {% endif %}
+
+                    {% for arg in method.arguments %}
+                        //* Commands with mutable pointers should not be autogenerated.
+                        {{assert(arg.annotation != "*")}}
+                        cmd.{{as_varName(arg.name)}} = {{as_varName(arg.name)}};
+                    {% endfor %}
+
+                    //* Allocate space to send the command and copy the value args over.
+                    self->client->SerializeCommand(cmd);
+
+                    {% if method.return_type.category == "object" %}
+                        return reinterpret_cast<{{as_cType(method.return_type.name)}}>(allocation->object.get());
+                    {% endif %}
+                {% else %}
+                    return self->{{method.name.CamelCase()}}(
+                        {%- for arg in method.arguments -%}
+                            {%if not loop.first %}, {% endif %} {{as_varName(arg.name)}}
+                        {%- endfor -%});
+                {% endif %}
+            }
+        {% endfor %}
+
+        //* When an object's refcount reaches 0, notify the server side of it and delete it.
+        void Client{{as_MethodSuffix(type.name, Name("release"))}}({{cType}} cObj) {
+            {{Type}}* obj = reinterpret_cast<{{Type}}*>(cObj);
+            obj->refcount --;
+
+            if (obj->refcount > 0) {
+                return;
+            }
+
+            DestroyObjectCmd cmd;
+            cmd.objectType = ObjectType::{{type.name.CamelCase()}};
+            cmd.objectId = obj->id;
+
+            obj->client->SerializeCommand(cmd);
+            obj->client->{{type.name.CamelCase()}}Allocator().Free(obj);
+        }
+
+        void Client{{as_MethodSuffix(type.name, Name("reference"))}}({{cType}} cObj) {
+            {{Type}}* obj = reinterpret_cast<{{Type}}*>(cObj);
+            obj->refcount ++;
+        }
+    {% endfor %}
+
+    namespace {
+        WGPUInstance ClientCreateInstance(WGPUInstanceDescriptor const* descriptor) {
+            UNREACHABLE();
+            return nullptr;
+        }
+
+        struct ProcEntry {
+            WGPUProc proc;
+            const char* name;
+        };
+        static const ProcEntry sProcMap[] = {
+            {% for (type, method) in c_methods_sorted_by_name %}
+                { reinterpret_cast<WGPUProc>(Client{{as_MethodSuffix(type.name, method.name)}}), "{{as_cMethod(type.name, method.name)}}" },
+            {% endfor %}
+        };
+        static constexpr size_t sProcMapSize = sizeof(sProcMap) / sizeof(sProcMap[0]);
+    }  // anonymous namespace
+
+    WGPUProc ClientGetProcAddress(WGPUDevice, const char* procName) {
+        if (procName == nullptr) {
+            return nullptr;
+        }
+
+        const ProcEntry* entry = std::lower_bound(&sProcMap[0], &sProcMap[sProcMapSize], procName,
+            [](const ProcEntry &a, const char *b) -> bool {
+                return strcmp(a.name, b) < 0;
+            }
+        );
+
+        if (entry != &sProcMap[sProcMapSize] && strcmp(entry->name, procName) == 0) {
+            return entry->proc;
+        }
+
+        // Special case the two free-standing functions of the API.
+        if (strcmp(procName, "wgpuGetProcAddress") == 0) {
+            return reinterpret_cast<WGPUProc>(ClientGetProcAddress);
+        }
+
+        if (strcmp(procName, "wgpuCreateInstance") == 0) {
+            return reinterpret_cast<WGPUProc>(ClientCreateInstance);
+        }
+
+        return nullptr;
+    }
+
+    std::vector<const char*> GetProcMapNamesForTesting() {
+        std::vector<const char*> result;
+        result.reserve(sProcMapSize);
+        for (const ProcEntry& entry : sProcMap) {
+            result.push_back(entry.name);
+        }
+        return result;
+    }
+
+    {% set Prefix = metadata.proc_table_prefix %}
+    static {{Prefix}}ProcTable gProcTable = {
+        {% for function in by_category["function"] %}
+            Client{{as_cppType(function.name)}},
+        {% endfor %}
+        {% for type in by_category["object"] %}
+            {% for method in c_methods(type) %}
+                Client{{as_MethodSuffix(type.name, method.name)}},
+            {% endfor %}
+        {% endfor %}
+    };
+    const {{Prefix}}ProcTable& GetProcs() {
+        return gProcTable;
+    }
+}  // namespace dawn::wire::client
diff --git a/generator/templates/dawn/wire/client/ClientBase.h b/generator/templates/dawn/wire/client/ClientBase.h
new file mode 100644
index 0000000..0f9cbfe
--- /dev/null
+++ b/generator/templates/dawn/wire/client/ClientBase.h
@@ -0,0 +1,74 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_CLIENTBASE_AUTOGEN_H_
+#define DAWNWIRE_CLIENT_CLIENTBASE_AUTOGEN_H_
+
+#include "dawn/wire/ChunkedCommandHandler.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/ApiObjects.h"
+#include "dawn/wire/client/ObjectAllocator.h"
+
+namespace dawn::wire::client {
+
+    class ClientBase : public ChunkedCommandHandler, public ObjectIdProvider {
+      public:
+        ClientBase() = default;
+        virtual ~ClientBase() = default;
+
+        {% for type in by_category["object"] %}
+            const ObjectAllocator<{{type.name.CamelCase()}}>& {{type.name.CamelCase()}}Allocator() const {
+                return m{{type.name.CamelCase()}}Allocator;
+            }
+            ObjectAllocator<{{type.name.CamelCase()}}>& {{type.name.CamelCase()}}Allocator() {
+                return m{{type.name.CamelCase()}}Allocator;
+            }
+        {% endfor %}
+
+        void FreeObject(ObjectType objectType, ObjectBase* obj) {
+            switch (objectType) {
+                {% for type in by_category["object"] %}
+                    case ObjectType::{{type.name.CamelCase()}}:
+                        m{{type.name.CamelCase()}}Allocator.Free(static_cast<{{type.name.CamelCase()}}*>(obj));
+                        break;
+                {% endfor %}
+            }
+        }
+
+      private:
+        // Implementation of the ObjectIdProvider interface
+        {% for type in by_category["object"] %}
+            WireResult GetId({{as_cType(type.name)}} object, ObjectId* out) const final {
+                ASSERT(out != nullptr);
+                if (object == nullptr) {
+                    return WireResult::FatalError;
+                }
+                *out = reinterpret_cast<{{as_wireType(type)}}>(object)->id;
+                return WireResult::Success;
+            }
+            WireResult GetOptionalId({{as_cType(type.name)}} object, ObjectId* out) const final {
+                ASSERT(out != nullptr);
+                *out = (object == nullptr ? 0 : reinterpret_cast<{{as_wireType(type)}}>(object)->id);
+                return WireResult::Success;
+            }
+        {% endfor %}
+
+        {% for type in by_category["object"] %}
+            ObjectAllocator<{{type.name.CamelCase()}}> m{{type.name.CamelCase()}}Allocator;
+        {% endfor %}
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_CLIENTBASE_AUTOGEN_H_
diff --git a/generator/templates/dawn/wire/client/ClientHandlers.cpp b/generator/templates/dawn/wire/client/ClientHandlers.cpp
new file mode 100644
index 0000000..ace8475
--- /dev/null
+++ b/generator/templates/dawn/wire/client/ClientHandlers.cpp
@@ -0,0 +1,97 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/client/Client.h"
+
+#include <string>
+
+namespace dawn::wire::client {
+    {% for command in cmd_records["return command"] %}
+        bool Client::Handle{{command.name.CamelCase()}}(DeserializeBuffer* deserializeBuffer) {
+            Return{{command.name.CamelCase()}}Cmd cmd;
+            WireResult deserializeResult = cmd.Deserialize(deserializeBuffer, &mAllocator);
+
+            if (deserializeResult == WireResult::FatalError) {
+                return false;
+            }
+
+            {% for member in command.members if member.handle_type %}
+                {% set Type = member.handle_type.name.CamelCase() %}
+                {% set name = as_varName(member.name) %}
+
+                {% if member.type.dict_name == "ObjectHandle" %}
+                    {{Type}}* {{name}} = {{Type}}Allocator().GetObject(cmd.{{name}}.id);
+                    uint32_t {{name}}Generation = {{Type}}Allocator().GetGeneration(cmd.{{name}}.id);
+                    if ({{name}}Generation != cmd.{{name}}.generation) {
+                        {{name}} = nullptr;
+                    }
+                {% endif %}
+            {% endfor %}
+
+            return Do{{command.name.CamelCase()}}(
+                {%- for member in command.members -%}
+                    {%- if member.handle_type -%}
+                        {{as_varName(member.name)}}
+                    {%- else -%}
+                        cmd.{{as_varName(member.name)}}
+                    {%- endif -%}
+                    {%- if not loop.last -%}, {% endif %}
+                {%- endfor -%}
+            );
+        }
+    {% endfor %}
+
+    const volatile char* Client::HandleCommandsImpl(const volatile char* commands, size_t size) {
+        DeserializeBuffer deserializeBuffer(commands, size);
+
+        while (deserializeBuffer.AvailableSize() >= sizeof(CmdHeader) + sizeof(ReturnWireCmd)) {
+            // Start by chunked command handling, if it is done, then it means the whole buffer
+            // was consumed by it, so we return a pointer to the end of the commands.
+            switch (HandleChunkedCommands(deserializeBuffer.Buffer(), deserializeBuffer.AvailableSize())) {
+                case ChunkedCommandsResult::Consumed:
+                    return commands + size;
+                case ChunkedCommandsResult::Error:
+                    return nullptr;
+                case ChunkedCommandsResult::Passthrough:
+                    break;
+            }
+
+            ReturnWireCmd cmdId = *static_cast<const volatile ReturnWireCmd*>(static_cast<const volatile void*>(
+                deserializeBuffer.Buffer() + sizeof(CmdHeader)));
+            bool success = false;
+            switch (cmdId) {
+                {% for command in cmd_records["return command"] %}
+                    {% set Suffix = command.name.CamelCase() %}
+                    case ReturnWireCmd::{{Suffix}}:
+                        success = Handle{{Suffix}}(&deserializeBuffer);
+                        break;
+                {% endfor %}
+                default:
+                    success = false;
+            }
+
+            if (!success) {
+                return nullptr;
+            }
+            mAllocator.Reset();
+        }
+
+        if (deserializeBuffer.AvailableSize() != 0) {
+            return nullptr;
+        }
+
+        return commands;
+    }
+}  // namespace dawn::wire::client
diff --git a/generator/templates/dawn/wire/client/ClientPrototypes.inc b/generator/templates/dawn/wire/client/ClientPrototypes.inc
new file mode 100644
index 0000000..3a5f62f
--- /dev/null
+++ b/generator/templates/dawn/wire/client/ClientPrototypes.inc
@@ -0,0 +1,32 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+//* Return command handlers
+{% for command in cmd_records["return command"] %}
+    bool Handle{{command.name.CamelCase()}}(DeserializeBuffer* deserializeBuffer);
+{% endfor %}
+
+//* Return command doers
+{% for command in cmd_records["return command"] %}
+    bool Do{{command.name.CamelCase()}}(
+        {%- for member in command.members -%}
+            {%- if member.handle_type -%}
+                {{as_wireType(member.handle_type)}} {{as_varName(member.name)}}
+            {%- else -%}
+                {{as_annotated_wireType(member)}}
+            {%- endif -%}
+            {%- if not loop.last -%}, {% endif %}
+        {%- endfor -%}
+    );
+{% endfor %}
diff --git a/generator/templates/dawn/wire/server/ServerBase.h b/generator/templates/dawn/wire/server/ServerBase.h
new file mode 100644
index 0000000..8fef34a
--- /dev/null
+++ b/generator/templates/dawn/wire/server/ServerBase.h
@@ -0,0 +1,105 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#ifndef DAWNWIRE_SERVER_SERVERBASE_H_
+#define DAWNWIRE_SERVER_SERVERBASE_H_
+
+#include "dawn/dawn_proc_table.h"
+#include "dawn/wire/ChunkedCommandHandler.h"
+#include "dawn/wire/Wire.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/WireDeserializeAllocator.h"
+#include "dawn/wire/server/ObjectStorage.h"
+
+namespace dawn::wire::server {
+
+    class ServerBase : public ChunkedCommandHandler, public ObjectIdResolver {
+      public:
+        ServerBase() = default;
+        virtual ~ServerBase() = default;
+
+      protected:
+        void DestroyAllObjects(const DawnProcTable& procs) {
+            //* Free all objects when the server is destroyed
+            {% for type in by_category["object"] if type.name.get() != "device" %}
+                {
+                    std::vector<{{as_cType(type.name)}}> handles = mKnown{{type.name.CamelCase()}}.AcquireAllHandles();
+                    for ({{as_cType(type.name)}} handle : handles) {
+                        procs.{{as_varName(type.name, Name("release"))}}(handle);
+                    }
+                }
+            {% endfor %}
+            //* Release devices last because dawn_native requires this.
+            {
+                std::vector<WGPUDevice> handles = mKnownDevice.AcquireAllHandles();
+                for (WGPUDevice handle : handles) {
+                    procs.deviceRelease(handle);
+                }
+            }
+        }
+
+        {% for type in by_category["object"] %}
+            const KnownObjects<{{as_cType(type.name)}}>& {{type.name.CamelCase()}}Objects() const {
+                return mKnown{{type.name.CamelCase()}};
+            }
+            KnownObjects<{{as_cType(type.name)}}>& {{type.name.CamelCase()}}Objects() {
+                return mKnown{{type.name.CamelCase()}};
+            }
+        {% endfor %}
+
+        {% for type in by_category["object"] if type.name.CamelCase() in server_reverse_lookup_objects %}
+            const ObjectIdLookupTable<{{as_cType(type.name)}}>& {{type.name.CamelCase()}}ObjectIdTable() const {
+                return m{{type.name.CamelCase()}}IdTable;
+            }
+            ObjectIdLookupTable<{{as_cType(type.name)}}>& {{type.name.CamelCase()}}ObjectIdTable() {
+                return m{{type.name.CamelCase()}}IdTable;
+            }
+        {% endfor %}
+
+      private:
+        // Implementation of the ObjectIdResolver interface
+        {% for type in by_category["object"] %}
+            WireResult GetFromId(ObjectId id, {{as_cType(type.name)}}* out) const final {
+                auto data = mKnown{{type.name.CamelCase()}}.Get(id);
+                if (data == nullptr) {
+                    return WireResult::FatalError;
+                }
+
+                *out = data->handle;
+                return WireResult::Success;
+            }
+
+            WireResult GetOptionalFromId(ObjectId id, {{as_cType(type.name)}}* out) const final {
+                if (id == 0) {
+                    *out = nullptr;
+                    return WireResult::Success;
+                }
+
+                return GetFromId(id, out);
+            }
+        {% endfor %}
+
+        //* The list of known IDs for each object type.
+        {% for type in by_category["object"] %}
+            KnownObjects<{{as_cType(type.name)}}> mKnown{{type.name.CamelCase()}};
+        {% endfor %}
+
+        {% for type in by_category["object"] if type.name.CamelCase() in server_reverse_lookup_objects %}
+            ObjectIdLookupTable<{{as_cType(type.name)}}> m{{type.name.CamelCase()}}IdTable;
+        {% endfor %}
+    };
+
+}  // namespace dawn::wire::server
+
+#endif  // DAWNWIRE_SERVER_SERVERBASE_H_
diff --git a/generator/templates/dawn/wire/server/ServerDoers.cpp b/generator/templates/dawn/wire/server/ServerDoers.cpp
new file mode 100644
index 0000000..9c6df80
--- /dev/null
+++ b/generator/templates/dawn/wire/server/ServerDoers.cpp
@@ -0,0 +1,121 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+    //* Implementation of the command doers
+    {% for command in cmd_records["command"] %}
+        {% set type = command.derived_object %}
+        {% set method = command.derived_method %}
+        {% set is_method = method is not none %}
+
+        {% set Suffix = command.name.CamelCase() %}
+        {% if Suffix not in client_side_commands %}
+            {% if is_method and Suffix not in server_handwritten_commands %}
+                bool Server::Do{{Suffix}}(
+                    {%- for member in command.members -%}
+                        {%- if member.is_return_value -%}
+                            {%- if member.handle_type -%}
+                                {{as_cType(member.handle_type.name)}}* {{as_varName(member.name)}}
+                            {%- else -%}
+                                {{as_cType(member.type.name)}}* {{as_varName(member.name)}}
+                            {%- endif -%}
+                        {%- else -%}
+                            {{as_annotated_cType(member)}}
+                        {%- endif -%}
+                        {%- if not loop.last -%}, {% endif %}
+                    {%- endfor -%}
+                ) {
+                    {% set ret = command.members|selectattr("is_return_value")|list %}
+                    //* If there is a return value, assign it.
+                    {% if ret|length == 1 %}
+                        *{{as_varName(ret[0].name)}} =
+                    {% else %}
+                        //* Only one member should be a return value.
+                        {{ assert(ret|length == 0) }}
+                    {% endif %}
+                    mProcs.{{as_varName(type.name, method.name)}}(
+                        {%- for member in command.members if not member.is_return_value -%}
+                            {{as_varName(member.name)}}
+                            {%- if not loop.last -%}, {% endif %}
+                        {%- endfor -%}
+                    );
+                    {% if ret|length == 1 %}
+                        //* WebGPU error handling guarantees that no null object can be returned by
+                        //* object creation functions.
+                        ASSERT(*{{as_varName(ret[0].name)}} != nullptr);
+                    {% endif %}
+                    return true;
+                }
+            {% endif %}
+        {% endif %}
+    {% endfor %}
+
+    bool Server::DoDestroyObject(ObjectType objectType, ObjectId objectId) {
+        //* ID 0 are reserved for nullptr and cannot be destroyed.
+        if (objectId == 0) {
+            return false;
+        }
+
+        switch(objectType) {
+            {% for type in by_category["object"] %}
+                case ObjectType::{{type.name.CamelCase()}}: {
+                    auto* data = {{type.name.CamelCase()}}Objects().Get(objectId);
+                    if (data == nullptr) {
+                        return false;
+                    }
+                    if (data->deviceInfo != nullptr) {
+                        if (!UntrackDeviceChild(data->deviceInfo, objectType, objectId)) {
+                            return false;
+                        }
+                    }
+                    if (data->state == AllocationState::Allocated) {
+                        ASSERT(data->handle != nullptr);
+                        {% if type.name.CamelCase() in server_reverse_lookup_objects %}
+                            {{type.name.CamelCase()}}ObjectIdTable().Remove(data->handle);
+                        {% endif %}
+
+                        {% if type.name.get() == "device" %}
+                            //* TODO(crbug.com/dawn/384): This is a hack to make sure that all child objects
+                            //* are destroyed before their device. We should have a solution in
+                            //* Dawn native that makes all child objects internally null if their
+                            //* Device is destroyed.
+                            while (data->info->childObjectTypesAndIds.size() > 0) {
+                                auto [childObjectType, childObjectId] = UnpackObjectTypeAndId(
+                                    *data->info->childObjectTypesAndIds.begin());
+                                if (!DoDestroyObject(childObjectType, childObjectId)) {
+                                    return false;
+                                }
+                            }
+                            if (data->handle != nullptr) {
+                                //* Deregisters uncaptured error and device lost callbacks since
+                                //* they should not be forwarded if the device no longer exists on the wire.
+                                ClearDeviceCallbacks(data->handle);
+                            }
+                        {% endif %}
+
+                        mProcs.{{as_varName(type.name, Name("release"))}}(data->handle);
+                    }
+                    {{type.name.CamelCase()}}Objects().Free(objectId);
+                    return true;
+                }
+            {% endfor %}
+            default:
+                return false;
+        }
+    }
+
+}  // namespace dawn::wire::server
diff --git a/generator/templates/dawn/wire/server/ServerHandlers.cpp b/generator/templates/dawn/wire/server/ServerHandlers.cpp
new file mode 100644
index 0000000..5514a33
--- /dev/null
+++ b/generator/templates/dawn/wire/server/ServerHandlers.cpp
@@ -0,0 +1,150 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+    {% for command in cmd_records["command"] %}
+        {% set method = command.derived_method %}
+        {% set is_method = method != None %}
+        {% set returns = is_method and method.return_type.name.canonical_case() != "void" %}
+
+        {% set Suffix = command.name.CamelCase() %}
+        //* The generic command handlers
+        bool Server::Handle{{Suffix}}(DeserializeBuffer* deserializeBuffer) {
+            {{Suffix}}Cmd cmd;
+            WireResult deserializeResult = cmd.Deserialize(deserializeBuffer, &mAllocator
+                {%- if command.may_have_dawn_object -%}
+                    , *this
+                {%- endif -%}
+            );
+
+            if (deserializeResult == WireResult::FatalError) {
+                return false;
+            }
+
+            {% if Suffix in server_custom_pre_handler_commands %}
+                if (!PreHandle{{Suffix}}(cmd)) {
+                    return false;
+                }
+            {% endif %}
+
+            //* Allocate any result objects
+            {%- for member in command.members if member.is_return_value -%}
+                {{ assert(member.handle_type) }}
+                {% set Type = member.handle_type.name.CamelCase() %}
+                {% set name = as_varName(member.name) %}
+
+                auto* {{name}}Data = {{Type}}Objects().Allocate(cmd.{{name}}.id);
+                if ({{name}}Data == nullptr) {
+                    return false;
+                }
+                {{name}}Data->generation = cmd.{{name}}.generation;
+
+                //* TODO(crbug.com/dawn/384): This is a hack to make sure that all child objects
+                //* are destroyed before their device. The dawn_native device needs to track all child objects so
+                //* it can destroy them if the device is destroyed first.
+                {% if command.derived_object %}
+                    {% set type = command.derived_object %}
+                    {% if type.name.get() == "device" %}
+                        {{name}}Data->deviceInfo = DeviceObjects().Get(cmd.selfId)->info.get();
+                    {% else %}
+                        auto* selfData = {{type.name.CamelCase()}}Objects().Get(cmd.selfId);
+                        {{name}}Data->deviceInfo = selfData->deviceInfo;
+                    {% endif %}
+                    if ({{name}}Data->deviceInfo != nullptr) {
+                        if (!TrackDeviceChild({{name}}Data->deviceInfo, ObjectType::{{Type}}, cmd.{{name}}.id)) {
+                            return false;
+                        }
+                    }
+                {% endif %}
+            {% endfor %}
+
+            //* Do command
+            bool success = Do{{Suffix}}(
+                {%- for member in command.members -%}
+                    {%- if member.is_return_value -%}
+                        {%- if member.handle_type -%}
+                            &{{as_varName(member.name)}}Data->handle //* Pass the handle of the output object to be written by the doer
+                        {%- else -%}
+                            &cmd.{{as_varName(member.name)}}
+                        {%- endif -%}
+                    {%- else -%}
+                        cmd.{{as_varName(member.name)}}
+                    {%- endif -%}
+                    {%- if not loop.last -%}, {% endif %}
+                {%- endfor -%}
+            );
+
+            if (!success) {
+                return false;
+            }
+
+            {%- for member in command.members if member.is_return_value and member.handle_type -%}
+                {% set Type = member.handle_type.name.CamelCase() %}
+                {% set name = as_varName(member.name) %}
+
+                {% if Type in server_reverse_lookup_objects %}
+                    //* For created objects, store a mapping from them back to their client IDs
+                    {{Type}}ObjectIdTable().Store({{name}}Data->handle, cmd.{{name}}.id);
+                {% endif %}
+            {% endfor %}
+
+            return true;
+        }
+    {% endfor %}
+
+    const volatile char* Server::HandleCommandsImpl(const volatile char* commands, size_t size) {
+        DeserializeBuffer deserializeBuffer(commands, size);
+
+        while (deserializeBuffer.AvailableSize() >= sizeof(CmdHeader) + sizeof(WireCmd)) {
+            // Start by chunked command handling, if it is done, then it means the whole buffer
+            // was consumed by it, so we return a pointer to the end of the commands.
+            switch (HandleChunkedCommands(deserializeBuffer.Buffer(), deserializeBuffer.AvailableSize())) {
+                case ChunkedCommandsResult::Consumed:
+                    return commands + size;
+                case ChunkedCommandsResult::Error:
+                    return nullptr;
+                case ChunkedCommandsResult::Passthrough:
+                    break;
+            }
+
+            WireCmd cmdId = *static_cast<const volatile WireCmd*>(static_cast<const volatile void*>(
+                deserializeBuffer.Buffer() + sizeof(CmdHeader)));
+            bool success = false;
+            switch (cmdId) {
+                {% for command in cmd_records["command"] %}
+                    case WireCmd::{{command.name.CamelCase()}}:
+                        success = Handle{{command.name.CamelCase()}}(&deserializeBuffer);
+                        break;
+                {% endfor %}
+                default:
+                    success = false;
+            }
+
+            if (!success) {
+                return nullptr;
+            }
+            mAllocator.Reset();
+        }
+
+        if (deserializeBuffer.AvailableSize() != 0) {
+            return nullptr;
+        }
+
+        return commands;
+    }
+
+}  // namespace dawn::wire::server
diff --git a/generator/templates/dawn/wire/server/ServerPrototypes.inc b/generator/templates/dawn/wire/server/ServerPrototypes.inc
new file mode 100644
index 0000000..31af0ed
--- /dev/null
+++ b/generator/templates/dawn/wire/server/ServerPrototypes.inc
@@ -0,0 +1,38 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+// Command handlers & doers
+{% for command in cmd_records["command"] %}
+    {% set Suffix = command.name.CamelCase() %}
+    bool Handle{{Suffix}}(DeserializeBuffer* deserializeBuffer);
+
+    bool Do{{Suffix}}(
+        {%- for member in command.members -%}
+            {%- if member.is_return_value -%}
+                {%- if member.handle_type -%}
+                    {{as_cType(member.handle_type.name)}}* {{as_varName(member.name)}}
+                {%- else -%}
+                    {{as_cType(member.type.name)}}* {{as_varName(member.name)}}
+                {%- endif -%}
+            {%- else -%}
+                {{as_annotated_cType(member)}}
+            {%- endif -%}
+            {%- if not loop.last -%}, {% endif %}
+        {%- endfor -%}
+    );
+{% endfor %}
+
+{% for CommandName in server_custom_pre_handler_commands %}
+    bool PreHandle{{CommandName}}(const {{CommandName}}Cmd& cmd);
+{% endfor %}
diff --git a/generator/templates/dawn_proc.c b/generator/templates/dawn_proc.c
new file mode 100644
index 0000000..68970c6
--- /dev/null
+++ b/generator/templates/dawn_proc.c
@@ -0,0 +1,63 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set Prefix = metadata.proc_table_prefix %}
+{% set prefix = Prefix.lower() %}
+#include "dawn/{{prefix}}_proc.h"
+
+static {{Prefix}}ProcTable procs;
+
+static {{Prefix}}ProcTable nullProcs;
+
+void {{prefix}}ProcSetProcs(const {{Prefix}}ProcTable* procs_) {
+    if (procs_) {
+        procs = *procs_;
+    } else {
+        procs = nullProcs;
+    }
+}
+
+{% for function in by_category["function"] %}
+    {{as_cType(function.return_type.name)}} {{as_cMethod(None, function.name)}}(
+        {%- for arg in function.arguments -%}
+            {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+        {%- endfor -%}
+    ) {
+        {% if function.return_type.name.canonical_case() != "void" %}return {% endif %}
+        procs.{{as_varName(function.name)}}(
+            {%- for arg in function.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_varName(arg.name)}}
+            {%- endfor -%}
+        );
+    }
+{% endfor %}
+
+{% for type in by_category["object"] %}
+    {% for method in c_methods(type) %}
+        {{as_cType(method.return_type.name)}} {{as_cMethod(type.name, method.name)}}(
+            {{-as_cType(type.name)}} {{as_varName(type.name)}}
+            {%- for arg in method.arguments -%}
+                , {{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        ) {
+            {% if method.return_type.name.canonical_case() != "void" %}return {% endif %}
+            procs.{{as_varName(type.name, method.name)}}({{as_varName(type.name)}}
+                {%- for arg in method.arguments -%}
+                    , {{as_varName(arg.name)}}
+                {%- endfor -%}
+            );
+        }
+    {% endfor %}
+
+{% endfor %}
diff --git a/generator/templates/dawn_proc_table.h b/generator/templates/dawn_proc_table.h
new file mode 100644
index 0000000..16f3fc2
--- /dev/null
+++ b/generator/templates/dawn_proc_table.h
@@ -0,0 +1,35 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set Prefix = metadata.proc_table_prefix %}
+#ifndef DAWN_{{Prefix.upper()}}_PROC_TABLE_H_
+#define DAWN_{{Prefix.upper()}}_PROC_TABLE_H_
+
+#include "dawn/{{metadata.api.lower()}}.h"
+
+// Note: Often allocated as a static global. Do not add a complex constructor.
+typedef struct {{Prefix}}ProcTable {
+    {% for function in by_category["function"] %}
+        {{as_cProc(None, function.name)}} {{as_varName(function.name)}};
+    {% endfor %}
+
+    {% for type in by_category["object"] %}
+        {% for method in c_methods(type) %}
+            {{as_cProc(type.name, method.name)}} {{as_varName(type.name, method.name)}};
+        {% endfor %}
+
+    {% endfor %}
+} {{Prefix}}ProcTable;
+
+#endif  // DAWN_{{Prefix.upper()}}_PROC_TABLE_H_
diff --git a/generator/templates/dawn_thread_dispatch_proc.cpp b/generator/templates/dawn_thread_dispatch_proc.cpp
new file mode 100644
index 0000000..fc79464
--- /dev/null
+++ b/generator/templates/dawn_thread_dispatch_proc.cpp
@@ -0,0 +1,62 @@
+{% set Prefix = metadata.proc_table_prefix %}
+{% set prefix = Prefix.lower() %}
+#include "dawn/{{prefix}}_thread_dispatch_proc.h"
+
+#include <thread>
+
+static {{Prefix}}ProcTable nullProcs;
+thread_local {{Prefix}}ProcTable perThreadProcs;
+
+void {{prefix}}ProcSetPerThreadProcs(const {{Prefix}}ProcTable* procs) {
+    if (procs) {
+        perThreadProcs = *procs;
+    } else {
+        perThreadProcs = nullProcs;
+    }
+}
+
+{% for function in by_category["function"] %}
+    static {{as_cType(function.return_type.name)}} ThreadDispatch{{as_cppType(function.name)}}(
+        {%- for arg in function.arguments -%}
+            {% if not loop.first %}, {% endif %}{{as_annotated_cType(arg)}}
+        {%- endfor -%}
+    ) {
+        {% if function.return_type.name.canonical_case() != "void" %}return {% endif %}
+        perThreadProcs.{{as_varName(function.name)}}(
+            {%- for arg in function.arguments -%}
+                {% if not loop.first %}, {% endif %}{{as_varName(arg.name)}}
+            {%- endfor -%}
+        );
+    }
+{% endfor %}
+
+{% for type in by_category["object"] %}
+    {% for method in c_methods(type) %}
+        static {{as_cType(method.return_type.name)}} ThreadDispatch{{as_MethodSuffix(type.name, method.name)}}(
+            {{-as_cType(type.name)}} {{as_varName(type.name)}}
+            {%- for arg in method.arguments -%}
+                , {{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        ) {
+            {% if method.return_type.name.canonical_case() != "void" %}return {% endif %}
+            perThreadProcs.{{as_varName(type.name, method.name)}}({{as_varName(type.name)}}
+                {%- for arg in method.arguments -%}
+                    , {{as_varName(arg.name)}}
+                {%- endfor -%}
+            );
+        }
+    {% endfor %}
+{% endfor %}
+
+extern "C" {
+    {{Prefix}}ProcTable {{prefix}}ThreadDispatchProcTable = {
+        {% for function in by_category["function"] %}
+            ThreadDispatch{{as_cppType(function.name)}},
+        {% endfor %}
+        {% for type in by_category["object"] %}
+            {% for method in c_methods(type) %}
+                ThreadDispatch{{as_MethodSuffix(type.name, method.name)}},
+            {% endfor %}
+        {% endfor %}
+    };
+}
diff --git a/generator/templates/library_api_enum_tables.js b/generator/templates/library_api_enum_tables.js
new file mode 100644
index 0000000..2ec4eb6
--- /dev/null
+++ b/generator/templates/library_api_enum_tables.js
@@ -0,0 +1,35 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+//*
+//*
+//* This generator is used to produce the number-to-string mappings for
+//* Emscripten's library_webgpu.js.
+//* https://github.com/emscripten-core/emscripten/blob/master/src/library_webgpu.js
+//*
+    {% for type in by_category["enum"] if not type.json_data.get("emscripten_no_enum_table") %}
+        {{type.name.CamelCase()}}: {% if type.contiguousFromZero -%}
+            [
+                {% for value in type.values %}
+                  {{as_jsEnumValue(value)}},
+                {% endfor %}
+            ]
+        {%- else -%}
+            {
+                {% for value in type.values %}
+                  {{value.value}}: {{as_jsEnumValue(value)}},
+                {% endfor %}
+            }
+        {%- endif -%}
+        ,
+    {% endfor %}
diff --git a/generator/templates/mock_api.cpp b/generator/templates/mock_api.cpp
new file mode 100644
index 0000000..bf8f871
--- /dev/null
+++ b/generator/templates/mock_api.cpp
@@ -0,0 +1,110 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set api = metadata.api.lower() %}
+#include "mock_{{api}}.h"
+
+using namespace testing;
+
+namespace {
+    {% for type in by_category["object"] %}
+        {% for method in c_methods(type) %}
+            {{as_cType(method.return_type.name)}} Forward{{as_MethodSuffix(type.name, method.name)}}(
+                {{-as_cType(type.name)}} self
+                {%- for arg in method.arguments -%}
+                    , {{as_annotated_cType(arg)}}
+                {%- endfor -%}
+            ) {
+                auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
+                return object->procs->{{as_MethodSuffix(type.name, method.name)}}(self
+                    {%- for arg in method.arguments -%}
+                        , {{as_varName(arg.name)}}
+                    {%- endfor -%}
+                );
+            }
+        {% endfor %}
+
+    {% endfor %}
+}
+
+ProcTableAsClass::~ProcTableAsClass() {
+}
+
+{% set Prefix = metadata.proc_table_prefix %}
+void ProcTableAsClass::GetProcTable({{Prefix}}ProcTable* table) {
+    {% for type in by_category["object"] %}
+        {% for method in c_methods(type) %}
+            table->{{as_varName(type.name, method.name)}} = reinterpret_cast<{{as_cProc(type.name, method.name)}}>(Forward{{as_MethodSuffix(type.name, method.name)}});
+        {% endfor %}
+    {% endfor %}
+}
+
+{% for type in by_category["object"] %}
+    {% for method in type.methods if has_callback_arguments(method) %}
+        {% set Suffix = as_MethodSuffix(type.name, method.name) %}
+
+        {{as_cType(method.return_type.name)}} ProcTableAsClass::{{Suffix}}(
+            {{-as_cType(type.name)}} {{as_varName(type.name)}}
+            {%- for arg in method.arguments -%}
+                , {{as_annotated_cType(arg)}}
+            {%- endfor -%}
+        ) {
+            ProcTableAsClass::Object* object = reinterpret_cast<ProcTableAsClass::Object*>({{as_varName(type.name)}});
+            {% for callback_arg in method.arguments if callback_arg.type.category == 'function pointer' %}
+                object->m{{as_MethodSuffix(type.name, method.name)}}Callback = {{as_varName(callback_arg.name)}};
+            {% endfor %}
+            object->userdata = userdata;
+            return On{{as_MethodSuffix(type.name, method.name)}}(
+                {{-as_varName(type.name)}}
+                {%- for arg in method.arguments -%}
+                    , {{as_varName(arg.name)}}
+                {%- endfor -%}
+            );
+        }
+
+        {% for callback_arg in method.arguments if callback_arg.type.category == 'function pointer' %}
+            void ProcTableAsClass::Call{{Suffix}}Callback(
+                {{-as_cType(type.name)}} {{as_varName(type.name)}}
+                {%- for arg in callback_arg.type.arguments -%}
+                    {%- if not loop.last -%}, {{as_annotated_cType(arg)}}{%- endif -%}
+                {%- endfor -%}
+            ) {
+                ProcTableAsClass::Object* object = reinterpret_cast<ProcTableAsClass::Object*>({{as_varName(type.name)}});
+                object->m{{Suffix}}Callback(
+                    {%- for arg in callback_arg.type.arguments -%}
+                        {%- if not loop.last -%}{{as_varName(arg.name)}}, {% endif -%}
+                    {%- endfor -%}
+                    object->userdata);
+            }
+        {% endfor %}
+    {% endfor %}
+{% endfor %}
+
+{% for type in by_category["object"] %}
+    {{as_cType(type.name)}} ProcTableAsClass::GetNew{{type.name.CamelCase()}}() {
+        mObjects.emplace_back(new Object);
+        mObjects.back()->procs = this;
+        return reinterpret_cast<{{as_cType(type.name)}}>(mObjects.back().get());
+    }
+{% endfor %}
+
+MockProcTable::MockProcTable() = default;
+
+MockProcTable::~MockProcTable() = default;
+
+void MockProcTable::IgnoreAllReleaseCalls() {
+    {% for type in by_category["object"] %}
+        EXPECT_CALL(*this, {{as_MethodSuffix(type.name, Name("release"))}}(_)).Times(AnyNumber());
+    {% endfor %}
+}
diff --git a/generator/templates/mock_api.h b/generator/templates/mock_api.h
new file mode 100644
index 0000000..1c0a880
--- /dev/null
+++ b/generator/templates/mock_api.h
@@ -0,0 +1,138 @@
+//* Copyright 2017 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+{% set API = metadata.api.upper() %}
+{% set api = API.lower() %}
+#ifndef MOCK_{{API}}_H
+#define MOCK_{{API}}_H
+
+{% set Prefix = metadata.proc_table_prefix %}
+{% set prefix = Prefix.lower() %}
+#include <dawn/{{prefix}}_proc_table.h>
+#include <dawn/{{api}}.h>
+#include <gmock/gmock.h>
+
+#include <memory>
+
+// An abstract base class representing a proc table so that API calls can be mocked. Most API calls
+// are directly represented by a delete virtual method but others need minimal state tracking to be
+// useful as mocks.
+class ProcTableAsClass {
+    public:
+        virtual ~ProcTableAsClass();
+
+        void GetProcTable({{Prefix}}ProcTable* table);
+
+        // Creates an object that can be returned by a mocked call as in WillOnce(Return(foo)).
+        // It returns an object of the write type that isn't equal to any previously returned object.
+        // Otherwise some mock expectation could be triggered by two different objects having the same
+        // value.
+        {% for type in by_category["object"] %}
+            {{as_cType(type.name)}} GetNew{{type.name.CamelCase()}}();
+        {% endfor %}
+
+        {% for type in by_category["object"] %}
+            {% for method in type.methods if not has_callback_arguments(method) %}
+                virtual {{as_cType(method.return_type.name)}} {{as_MethodSuffix(type.name, method.name)}}(
+                    {{-as_cType(type.name)}} {{as_varName(type.name)}}
+                    {%- for arg in method.arguments -%}
+                        , {{as_annotated_cType(arg)}}
+                    {%- endfor -%}
+                ) = 0;
+            {% endfor %}
+
+            virtual void {{as_MethodSuffix(type.name, Name("reference"))}}({{as_cType(type.name)}} self) = 0;
+            virtual void {{as_MethodSuffix(type.name, Name("release"))}}({{as_cType(type.name)}} self) = 0;
+
+            {% for method in type.methods if has_callback_arguments(method) %}
+                {% set Suffix = as_MethodSuffix(type.name, method.name) %}
+                //* Stores callback and userdata and calls the On* method.
+                {{as_cType(method.return_type.name)}} {{Suffix}}(
+                    {{-as_cType(type.name)}} {{as_varName(type.name)}}
+                    {%- for arg in method.arguments -%}
+                        , {{as_annotated_cType(arg)}}
+                    {%- endfor -%}
+                );
+                //* The virtual function to call after saving the callback and userdata in the proc.
+                //* This function can be mocked.
+                virtual {{as_cType(method.return_type.name)}} On{{Suffix}}(
+                    {{-as_cType(type.name)}} {{as_varName(type.name)}}
+                    {%- for arg in method.arguments -%}
+                        , {{as_annotated_cType(arg)}}
+                    {%- endfor -%}
+                ) = 0;
+
+                //* Calls the stored callback.
+                {% for callback_arg in method.arguments if callback_arg.type.category == 'function pointer' %}
+                    void Call{{as_MethodSuffix(type.name, method.name)}}Callback(
+                        {{-as_cType(type.name)}} {{as_varName(type.name)}}
+                        {%- for arg in callback_arg.type.arguments -%}
+                            {%- if not loop.last -%}, {{as_annotated_cType(arg)}}{%- endif -%}
+                        {%- endfor -%}
+                    );
+                {% endfor %}
+            {% endfor %}
+        {% endfor %}
+
+        struct Object {
+            ProcTableAsClass* procs = nullptr;
+            {% for type in by_category["object"] %}
+                {% for method in type.methods if has_callback_arguments(method) %}
+                    {% for callback_arg in method.arguments if callback_arg.type.category == 'function pointer' %}
+                        {{as_cType(callback_arg.type.name)}} m{{as_MethodSuffix(type.name, method.name)}}Callback = nullptr;
+                    {% endfor %}
+                {% endfor %}
+            {% endfor %}
+            void* userdata = 0;
+        };
+
+    private:
+        // Remembers the values returned by GetNew* so they can be freed.
+        std::vector<std::unique_ptr<Object>> mObjects;
+};
+
+class MockProcTable : public ProcTableAsClass {
+    public:
+        MockProcTable();
+        ~MockProcTable() override;
+
+        void IgnoreAllReleaseCalls();
+
+        {% for type in by_category["object"] %}
+            {% for method in type.methods if not has_callback_arguments(method) %}
+                MOCK_METHOD({{as_cType(method.return_type.name)}},{{" "}}
+                    {{-as_MethodSuffix(type.name, method.name)}}, (
+                        {{-as_cType(type.name)}} {{as_varName(type.name)}}
+                        {%- for arg in method.arguments -%}
+                            , {{as_annotated_cType(arg)}}
+                        {%- endfor -%}
+                    ), (override));
+            {% endfor %}
+
+            MOCK_METHOD(void, {{as_MethodSuffix(type.name, Name("reference"))}}, ({{as_cType(type.name)}} self), (override));
+            MOCK_METHOD(void, {{as_MethodSuffix(type.name, Name("release"))}}, ({{as_cType(type.name)}} self), (override));
+
+            {% for method in type.methods if has_callback_arguments(method) %}
+                MOCK_METHOD({{as_cType(method.return_type.name)}},{{" "-}}
+                    On{{as_MethodSuffix(type.name, method.name)}}, (
+                        {{-as_cType(type.name)}} {{as_varName(type.name)}}
+                        {%- for arg in method.arguments -%}
+                            , {{as_annotated_cType(arg)}}
+                        {%- endfor -%}
+                    ), (override));
+            {% endfor %}
+        {% endfor %}
+};
+
+#endif  // MOCK_{{API}}_H
diff --git a/generator/templates/opengl/OpenGLFunctionsBase.cpp b/generator/templates/opengl/OpenGLFunctionsBase.cpp
new file mode 100644
index 0000000..acddd4f
--- /dev/null
+++ b/generator/templates/opengl/OpenGLFunctionsBase.cpp
@@ -0,0 +1,70 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#include "dawn/native/opengl/OpenGLFunctionsBase_autogen.h"
+
+namespace dawn::native::opengl {
+
+template<typename T>
+MaybeError OpenGLFunctionsBase::LoadProc(GetProcAddress getProc, T* memberProc, const char* name) {
+    *memberProc = reinterpret_cast<T>(getProc(name));
+    if (DAWN_UNLIKELY(memberProc == nullptr)) {
+        return DAWN_INTERNAL_ERROR(std::string("Couldn't load GL proc: ") + name);
+    }
+    return {};
+}
+
+MaybeError OpenGLFunctionsBase::LoadOpenGLESProcs(GetProcAddress getProc, int majorVersion, int minorVersion) {
+    {% for block in gles_blocks %}
+        // OpenGL ES {{block.version.major}}.{{block.version.minor}}
+        if (majorVersion > {{block.version.major}} || (majorVersion == {{block.version.major}} && minorVersion >= {{block.version.minor}})) {
+            {% for proc in block.procs %}
+                DAWN_TRY(LoadProc(getProc, &{{proc.ProcName()}}, "{{proc.glProcName()}}"));
+            {% endfor %}
+        }
+
+    {% endfor %}
+
+    {% for block in extension_gles_blocks %}
+        // {{block.extension}}
+        {% for proc in block.procs %}
+            DAWN_TRY(LoadProc(getProc, &{{proc.ProcName()}}, "{{proc.glProcName()}}"));
+        {% endfor %}
+    {% endfor %}
+
+    return {};
+}
+
+MaybeError OpenGLFunctionsBase::LoadDesktopGLProcs(GetProcAddress getProc, int majorVersion, int minorVersion) {
+    {% for block in desktop_gl_blocks %}
+        // Desktop OpenGL {{block.version.major}}.{{block.version.minor}}
+        if (majorVersion > {{block.version.major}} || (majorVersion == {{block.version.major}} && minorVersion >= {{block.version.minor}})) {
+            {% for proc in block.procs %}
+                DAWN_TRY(LoadProc(getProc, &{{proc.ProcName()}}, "{{proc.glProcName()}}"));
+            {% endfor %}
+        }
+
+    {% endfor %}
+
+    {% for block in extension_desktop_gl_blocks %}
+        // {{block.extension}}
+        {% for proc in block.procs %}
+            DAWN_TRY(LoadProc(getProc, &{{proc.ProcName()}}, "{{proc.glProcName()}}"));
+        {% endfor %}
+    {% endfor %}
+
+    return {};
+}
+
+}  // namespace dawn::native::opengl
diff --git a/generator/templates/opengl/OpenGLFunctionsBase.h b/generator/templates/opengl/OpenGLFunctionsBase.h
new file mode 100644
index 0000000..ac313c4
--- /dev/null
+++ b/generator/templates/opengl/OpenGLFunctionsBase.h
@@ -0,0 +1,45 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_OPENGLFUNCTIONSBASE_H_
+#define DAWNNATIVE_OPENGL_OPENGLFUNCTIONSBASE_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+    using GetProcAddress = void* (*) (const char*);
+
+    struct OpenGLFunctionsBase {
+      public:
+        {% for block in header_blocks %}
+            // {{block.description}}
+            {% for proc in block.procs %}
+                {{proc.PFNGLPROCNAME()}} {{proc.ProcName()}} = nullptr;
+            {% endfor %}
+
+        {% endfor%}
+
+      protected:
+        MaybeError LoadDesktopGLProcs(GetProcAddress getProc, int majorVersion, int minorVersion);
+        MaybeError LoadOpenGLESProcs(GetProcAddress getProc, int majorVersion, int minorVersion);
+
+      private:
+        template<typename T>
+        MaybeError LoadProc(GetProcAddress getProc, T* memberProc, const char* name);
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_OPENGLFUNCTIONSBASE_H_
diff --git a/generator/templates/opengl/opengl_platform.h b/generator/templates/opengl/opengl_platform.h
new file mode 100644
index 0000000..c2063b7
--- /dev/null
+++ b/generator/templates/opengl/opengl_platform.h
@@ -0,0 +1,73 @@
+//* Copyright 2019 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//*     http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+
+#include <KHR/khrplatform.h>
+
+using GLvoid = void;
+using GLchar = char;
+using GLenum = unsigned int;
+using GLboolean = unsigned char;
+using GLbitfield = unsigned int;
+using GLbyte = khronos_int8_t;
+using GLshort = short;
+using GLint = int;
+using GLsizei = int;
+using GLubyte = khronos_uint8_t;
+using GLushort = unsigned short;
+using GLuint = unsigned int;
+using GLfloat = khronos_float_t;
+using GLclampf = khronos_float_t;
+using GLdouble = double;
+using GLclampd = double;
+using GLfixed = khronos_int32_t;
+using GLintptr = khronos_intptr_t;
+using GLsizeiptr = khronos_ssize_t;
+using GLhalf = unsigned short;
+using GLint64 = khronos_int64_t;
+using GLuint64 = khronos_uint64_t;
+using GLsync = struct __GLsync*;
+using GLeglImageOES = void*;
+using GLDEBUGPROC = void(KHRONOS_APIENTRY*)(GLenum source,
+                                            GLenum type,
+                                            GLuint id,
+                                            GLenum severity,
+                                            GLsizei length,
+                                            const GLchar* message,
+                                            const void* userParam);
+using GLDEBUGPROCARB = GLDEBUGPROC;
+using GLDEBUGPROCKHR = GLDEBUGPROC;
+using GLDEBUGPROCAMD = void(KHRONOS_APIENTRY*)(GLuint id,
+                                               GLenum category,
+                                               GLenum severity,
+                                               GLsizei length,
+                                               const GLchar* message,
+                                               void* userParam);
+
+{% for block in header_blocks %}
+    // {{block.description}}
+    {% for enum in block.enums %}
+        #define {{enum.name}} {{enum.value}}
+    {% endfor %}
+
+    {% for proc in block.procs %}
+        using {{proc.PFNGLPROCNAME()}} = {{proc.return_type}}(KHRONOS_APIENTRY *)(
+            {%- for param in proc.params -%}
+                {%- if not loop.first %}, {% endif -%}
+                {{param.type}} {{param.name}}
+            {%- endfor -%}
+        );
+    {% endfor %}
+
+{% endfor%}
+#undef DAWN_GL_APIENTRY
diff --git a/include/dawn/BUILD.gn b/include/dawn/BUILD.gn
new file mode 100644
index 0000000..d493820
--- /dev/null
+++ b/include/dawn/BUILD.gn
@@ -0,0 +1,84 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_component.gni")
+
+###############################################################################
+# Dawn headers
+###############################################################################
+
+dawn_json_generator("headers_gen") {
+  target = "headers"
+  outputs = [
+    "include/dawn/dawn_proc_table.h",
+    "include/dawn/webgpu.h",
+  ]
+}
+
+source_set("headers") {
+  all_dependent_configs = [ ":public" ]
+  public_deps = [ ":headers_gen" ]
+
+  sources = get_target_outputs(":headers_gen")
+  sources += [ "${dawn_root}/include/dawn/dawn_wsi.h" ]
+}
+
+###############################################################################
+# Dawn C++ headers
+###############################################################################
+
+dawn_json_generator("cpp_headers_gen") {
+  target = "cpp_headers"
+  outputs = [
+    "include/dawn/webgpu_cpp.h",
+    "include/dawn/webgpu_cpp_print.h",
+  ]
+}
+
+source_set("cpp_headers") {
+  public_deps = [
+    ":cpp_headers_gen",
+    ":headers",
+  ]
+
+  sources = get_target_outputs(":cpp_headers_gen")
+  sources += [ "${dawn_root}/include/dawn/EnumClassBitmasks.h" ]
+}
+
+###############################################################################
+# Dawn public include directories
+###############################################################################
+
+config("public") {
+  include_dirs = [
+    "${target_gen_dir}/../../include",
+    "${dawn_root}/include",
+
+    "${dawn_root}/src/include",  # TODO(crbug.com/dawn/1275) - remove
+  ]
+}
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawncpp_headers") {
+  public_deps = [ ":cpp_headers" ]
+}
+group("dawn_headers") {
+  public_deps = [ ":headers" ]
+}
diff --git a/include/dawn/EnumClassBitmasks.h b/include/dawn/EnumClassBitmasks.h
new file mode 100644
index 0000000..3947f00
--- /dev/null
+++ b/include/dawn/EnumClassBitmasks.h
@@ -0,0 +1,156 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_ENUM_CLASS_BITMASKS_H_
+#define DAWN_ENUM_CLASS_BITMASKS_H_
+
+#include <type_traits>
+
+// The operators in dawn:: namespace need be introduced into other namespaces with
+// using-declarations for C++ Argument Dependent Lookup to work.
+#define DAWN_IMPORT_BITMASK_OPERATORS \
+    using dawn::operator|;            \
+    using dawn::operator&;            \
+    using dawn::operator^;            \
+    using dawn::operator~;            \
+    using dawn::operator&=;           \
+    using dawn::operator|=;           \
+    using dawn::operator^=;           \
+    using dawn::HasZeroOrOneBits;
+
+namespace dawn {
+
+    template <typename T>
+    struct IsDawnBitmask {
+        static constexpr bool enable = false;
+    };
+
+    template <typename T, typename Enable = void>
+    struct LowerBitmask {
+        static constexpr bool enable = false;
+    };
+
+    template <typename T>
+    struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
+        static constexpr bool enable = true;
+        using type = T;
+        constexpr static T Lower(T t) {
+            return t;
+        }
+    };
+
+    template <typename T>
+    struct BoolConvertible {
+        using Integral = typename std::underlying_type<T>::type;
+
+        constexpr BoolConvertible(Integral value) : value(value) {
+        }
+        constexpr operator bool() const {
+            return value != 0;
+        }
+        constexpr operator T() const {
+            return static_cast<T>(value);
+        }
+
+        Integral value;
+    };
+
+    template <typename T>
+    struct LowerBitmask<BoolConvertible<T>> {
+        static constexpr bool enable = true;
+        using type = T;
+        static constexpr type Lower(BoolConvertible<T> t) {
+            return t;
+        }
+    };
+
+    template <typename T1,
+              typename T2,
+              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
+               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+    }
+
+    template <typename T1,
+              typename T2,
+              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
+               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+    }
+
+    template <typename T1,
+              typename T2,
+              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
+               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+    }
+
+    template <typename T1>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
+    }
+
+    template <typename T,
+              typename T2,
+              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr T& operator&=(T& l, T2 right) {
+        T r = LowerBitmask<T2>::Lower(right);
+        l = l & r;
+        return l;
+    }
+
+    template <typename T,
+              typename T2,
+              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr T& operator|=(T& l, T2 right) {
+        T r = LowerBitmask<T2>::Lower(right);
+        l = l | r;
+        return l;
+    }
+
+    template <typename T,
+              typename T2,
+              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr T& operator^=(T& l, T2 right) {
+        T r = LowerBitmask<T2>::Lower(right);
+        l = l ^ r;
+        return l;
+    }
+
+    template <typename T>
+    constexpr bool HasZeroOrOneBits(T value) {
+        using Integral = typename std::underlying_type<T>::type;
+        return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
+    }
+
+}  // namespace dawn
+
+#endif  // DAWN_ENUM_CLASS_BITMASKS_H_
diff --git a/include/dawn/dawn_proc.h b/include/dawn/dawn_proc.h
new file mode 100644
index 0000000..adeec46
--- /dev/null
+++ b/include/dawn/dawn_proc.h
@@ -0,0 +1,36 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_DAWN_PROC_H_
+#define DAWN_DAWN_PROC_H_
+
+#include "dawn/dawn_proc_table.h"
+#include "dawn/webgpu.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Sets the static proctable used by libdawn_proc to implement the Dawn entrypoints. Passing NULL
+// for `procs` sets up the null proctable that contains only null function pointers. It is the
+// default value of the proctable. Setting the proctable back to null is good practice when you
+// are done using libdawn_proc since further usage will cause a segfault instead of calling an
+// unexpected function.
+WGPU_EXPORT void dawnProcSetProcs(const DawnProcTable* procs);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // DAWN_DAWN_PROC_H_
diff --git a/include/dawn/dawn_thread_dispatch_proc.h b/include/dawn/dawn_thread_dispatch_proc.h
new file mode 100644
index 0000000..4d08ba8
--- /dev/null
+++ b/include/dawn/dawn_thread_dispatch_proc.h
@@ -0,0 +1,33 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_DAWN_THREAD_DISPATCH_PROC_H_
+#define DAWN_DAWN_THREAD_DISPATCH_PROC_H_
+
+#include "dawn/dawn_proc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Call dawnProcSetProcs(&dawnThreadDispatchProcTable) and then use dawnProcSetPerThreadProcs
+// to set per-thread procs.
+WGPU_EXPORT extern DawnProcTable dawnThreadDispatchProcTable;
+WGPU_EXPORT void dawnProcSetPerThreadProcs(const DawnProcTable* procs);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // DAWN_DAWN_THREAD_DISPATCH_PROC_H_
diff --git a/include/dawn/dawn_wsi.h b/include/dawn/dawn_wsi.h
new file mode 100644
index 0000000..f1a6047
--- /dev/null
+++ b/include/dawn/dawn_wsi.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_DAWN_WSI_H_
+#define DAWN_DAWN_WSI_H_
+
+#include <dawn/webgpu.h>
+
+// Error message (or nullptr if there was no error)
+typedef const char* DawnSwapChainError;
+constexpr DawnSwapChainError DAWN_SWAP_CHAIN_NO_ERROR = nullptr;
+
+typedef struct {
+    /// Backend-specific texture id/name/pointer
+    union {
+        void* ptr;
+        uint64_t u64;
+        uint32_t u32;
+    } texture;
+} DawnSwapChainNextTexture;
+
+typedef struct {
+    /// Initialize the swap chain implementation.
+    ///   (*wsiContext) is one of DawnWSIContext{D3D12,Metal,GL}
+    void (*Init)(void* userData, void* wsiContext);
+
+    /// Destroy the swap chain implementation.
+    void (*Destroy)(void* userData);
+
+    /// Configure/reconfigure the swap chain.
+    DawnSwapChainError (*Configure)(void* userData,
+                                    WGPUTextureFormat format,
+                                    WGPUTextureUsage allowedUsage,
+                                    uint32_t width,
+                                    uint32_t height);
+
+    /// Acquire the next texture from the swap chain.
+    DawnSwapChainError (*GetNextTexture)(void* userData, DawnSwapChainNextTexture* nextTexture);
+
+    /// Present the last acquired texture to the screen.
+    DawnSwapChainError (*Present)(void* userData);
+
+    /// Each function is called with userData as its first argument.
+    void* userData;
+
+    /// For use by the D3D12 and Vulkan backends: how the swapchain will use the texture.
+    WGPUTextureUsage textureUsage;
+} DawnSwapChainImplementation;
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12) && defined(__cplusplus)
+struct DawnWSIContextD3D12 {
+    WGPUDevice device = nullptr;
+};
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__)
+#    import <Metal/Metal.h>
+
+struct DawnWSIContextMetal {
+    id<MTLDevice> device = nil;
+    id<MTLCommandQueue> queue = nil;
+};
+#endif
+
+#ifdef DAWN_ENABLE_BACKEND_OPENGL
+typedef struct {
+} DawnWSIContextGL;
+#endif
+
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+typedef struct {
+} DawnWSIContextVulkan;
+#endif
+
+#endif  // DAWN_DAWN_WSI_H
diff --git a/include/dawn/native/D3D12Backend.h b/include/dawn/native/D3D12Backend.h
new file mode 100644
index 0000000..6f11bb7
--- /dev/null
+++ b/include/dawn/native/D3D12Backend.h
@@ -0,0 +1,111 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12BACKEND_H_
+#define DAWNNATIVE_D3D12BACKEND_H_
+
+#include <dawn/dawn_wsi.h>
+#include <dawn/native/DawnNative.h>
+
+#include <DXGI1_4.h>
+#include <d3d12.h>
+#include <windows.h>
+#include <wrl/client.h>
+
+#include <memory>
+
+struct ID3D12Device;
+struct ID3D12Resource;
+
+namespace dawn::native::d3d12 {
+
+    class D3D11on12ResourceCache;
+
+    DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
+    DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+                                                                             HWND window);
+    DAWN_NATIVE_EXPORT WGPUTextureFormat
+    GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+
+    enum MemorySegment {
+        Local,
+        NonLocal,
+    };
+
+    DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
+                                                             uint64_t requestedReservationSize,
+                                                             MemorySegment memorySegment);
+
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
+      public:
+        ExternalImageDescriptorDXGISharedHandle();
+
+        // Note: SharedHandle must be a handle to a texture object.
+        HANDLE sharedHandle;
+    };
+
+    // Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
+    constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
+
+    struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
+        : ExternalImageAccessDescriptor {
+      public:
+        // TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated
+        // code from Chromium - we use a fixed key of 0 for acquire and release everywhere now.
+        uint64_t acquireMutexKey;
+        uint64_t releaseMutexKey;
+        bool isSwapChainTexture = false;
+    };
+
+    class DAWN_NATIVE_EXPORT ExternalImageDXGI {
+      public:
+        ~ExternalImageDXGI();
+
+        // Note: SharedHandle must be a handle to a texture object.
+        static std::unique_ptr<ExternalImageDXGI> Create(
+            WGPUDevice device,
+            const ExternalImageDescriptorDXGISharedHandle* descriptor);
+
+        WGPUTexture ProduceTexture(WGPUDevice device,
+                                   const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
+
+      private:
+        ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
+                          const WGPUTextureDescriptor* descriptor);
+
+        Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
+
+        // Contents of WGPUTextureDescriptor are stored individually since the descriptor
+        // could outlive this image.
+        WGPUTextureUsageFlags mUsage;
+        WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
+        WGPUTextureDimension mDimension;
+        WGPUExtent3D mSize;
+        WGPUTextureFormat mFormat;
+        uint32_t mMipLevelCount;
+        uint32_t mSampleCount;
+
+        std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
+    };
+
+    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+        AdapterDiscoveryOptions();
+        AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
+
+        Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12BACKEND_H_
diff --git a/include/dawn/native/DawnNative.h b/include/dawn/native/DawnNative.h
new file mode 100644
index 0000000..d62d0ef
--- /dev/null
+++ b/include/dawn/native/DawnNative.h
@@ -0,0 +1,261 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_DAWNNATIVE_H_
+#define DAWNNATIVE_DAWNNATIVE_H_
+
+#include <dawn/dawn_proc_table.h>
+#include <dawn/native/dawn_native_export.h>
+#include <dawn/webgpu.h>
+
+#include <string>
+#include <vector>
+
+namespace dawn::platform {
+    class Platform;
+}  // namespace dawn::platform
+
+namespace wgpu {
+    struct AdapterProperties;
+    struct DeviceDescriptor;
+}  // namespace wgpu
+
+namespace dawn::native {
+
+    class InstanceBase;
+    class AdapterBase;
+
+    // An optional parameter of Adapter::CreateDevice() to send additional information when creating
+    // a Device. For example, we can use it to enable a workaround, optimization or feature.
+    struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
+        std::vector<const char*> requiredFeatures;
+        std::vector<const char*> forceEnabledToggles;
+        std::vector<const char*> forceDisabledToggles;
+
+        const WGPURequiredLimits* requiredLimits = nullptr;
+    };
+
+    // A struct to record the information of a toggle. A toggle is a code path in Dawn device that
+    // can be manually configured to run or not outside Dawn, including workarounds, special
+    // features and optimizations.
+    struct ToggleInfo {
+        const char* name;
+        const char* description;
+        const char* url;
+    };
+
+    // A struct to record the information of a feature. A feature is a GPU feature that is not
+    // required to be supported by all Dawn backends and can only be used when it is enabled on the
+    // creation of device.
+    using FeatureInfo = ToggleInfo;
+
+    // An adapter is an object that represent on possibility of creating devices in the system.
+    // Most of the time it will represent a combination of a physical GPU and an API. Not that the
+    // same GPU can be represented by multiple adapters but on different APIs.
+    //
+    // The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
+    // a reference to an underlying adapter.
+    class DAWN_NATIVE_EXPORT Adapter {
+      public:
+        Adapter();
+        Adapter(AdapterBase* impl);
+        ~Adapter();
+
+        Adapter(const Adapter& other);
+        Adapter& operator=(const Adapter& other);
+
+        // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
+        // dawn.json
+        void GetProperties(wgpu::AdapterProperties* properties) const;
+        void GetProperties(WGPUAdapterProperties* properties) const;
+
+        std::vector<const char*> GetSupportedExtensions() const;
+        std::vector<const char*> GetSupportedFeatures() const;
+        WGPUDeviceProperties GetAdapterProperties() const;
+        bool GetLimits(WGPUSupportedLimits* limits) const;
+
+        void SetUseTieredLimits(bool useTieredLimits);
+
+        // Check that the Adapter is able to support importing external images. This is necessary
+        // to implement the swapchain and interop APIs in Chromium.
+        bool SupportsExternalImages() const;
+
+        explicit operator bool() const;
+
+        // Create a device on this adapter. On an error, nullptr is returned.
+        WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor);
+        WGPUDevice CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor);
+        WGPUDevice CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor = nullptr);
+
+        void RequestDevice(const DawnDeviceDescriptor* descriptor,
+                           WGPURequestDeviceCallback callback,
+                           void* userdata);
+        void RequestDevice(const wgpu::DeviceDescriptor* descriptor,
+                           WGPURequestDeviceCallback callback,
+                           void* userdata);
+        void RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                           WGPURequestDeviceCallback callback,
+                           void* userdata);
+
+        // Returns the underlying WGPUAdapter object.
+        WGPUAdapter Get() const;
+
+        // Reset the backend device object for testing purposes.
+        void ResetInternalDeviceForTesting();
+
+      private:
+        AdapterBase* mImpl = nullptr;
+    };
+
+    // Base class for options passed to Instance::DiscoverAdapters.
+    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
+      public:
+        const WGPUBackendType backendType;
+
+      protected:
+        AdapterDiscoveryOptionsBase(WGPUBackendType type);
+    };
+
+    enum BackendValidationLevel { Full, Partial, Disabled };
+
+    // Represents a connection to dawn_native and is used for dependency injection, discovering
+    // system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
+    //
+    // This is an RAII class for Dawn instances and also controls the lifetime of all adapters
+    // for this instance.
+    class DAWN_NATIVE_EXPORT Instance {
+      public:
+        explicit Instance(const WGPUInstanceDescriptor* desc = nullptr);
+        ~Instance();
+
+        Instance(const Instance& other) = delete;
+        Instance& operator=(const Instance& other) = delete;
+
+        // Gather all adapters in the system that can be accessed with no special options. These
+        // adapters will later be returned by GetAdapters.
+        void DiscoverDefaultAdapters();
+
+        // Adds adapters that can be discovered with the options provided (like a getProcAddress).
+        // The backend is chosen based on the type of the options used. Returns true on success.
+        bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
+
+        // Returns all the adapters that the instance knows about.
+        std::vector<Adapter> GetAdapters() const;
+
+        const ToggleInfo* GetToggleInfo(const char* toggleName);
+        const FeatureInfo* GetFeatureInfo(WGPUFeatureName feature);
+
+        // Enables backend validation layers
+        void EnableBackendValidation(bool enableBackendValidation);
+        void SetBackendValidationLevel(BackendValidationLevel validationLevel);
+
+        // Enable debug capture on Dawn startup
+        void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+
+        void SetPlatform(dawn::platform::Platform* platform);
+
+        // Returns the underlying WGPUInstance object.
+        WGPUInstance Get() const;
+
+      private:
+        InstanceBase* mImpl = nullptr;
+    };
+
+    // Backend-agnostic API for dawn_native
+    DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
+
+    // Query the names of all the toggles that are enabled in device
+    DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
+
+    // Backdoor to get the number of lazy clears for testing
+    DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
+
+    // Backdoor to get the number of deprecation warnings for testing
+    DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
+
+    //  Query if texture has been initialized
+    DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
+        WGPUTexture texture,
+        uint32_t baseMipLevel,
+        uint32_t levelCount,
+        uint32_t baseArrayLayer,
+        uint32_t layerCount,
+        WGPUTextureAspect aspect = WGPUTextureAspect_All);
+
+    // Backdoor to get the order of the ProcMap for testing
+    DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+
+    DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
+
+    // ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
+    DAWN_NATIVE_EXPORT void EnableErrorInjector();
+    DAWN_NATIVE_EXPORT void DisableErrorInjector();
+    DAWN_NATIVE_EXPORT void ClearErrorInjector();
+    DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
+    DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
+
+    // The different types of external images
+    enum ExternalImageType {
+        OpaqueFD,
+        DmaBuf,
+        IOSurface,
+        DXGISharedHandle,
+        EGLImage,
+    };
+
+    // Common properties of external images
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
+      public:
+        const WGPUTextureDescriptor* cTextureDescriptor;  // Must match image creation params
+        bool isInitialized;  // Whether the texture is initialized on import
+        ExternalImageType GetType() const;
+
+      protected:
+        ExternalImageDescriptor(ExternalImageType type);
+
+      private:
+        ExternalImageType mType;
+    };
+
+    struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
+      public:
+        bool isInitialized;  // Whether the texture is initialized on import
+        WGPUTextureUsageFlags usage;
+    };
+
+    struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
+      public:
+        bool isInitialized;  // Whether the texture is initialized after export
+        ExternalImageType GetType() const;
+
+      protected:
+        ExternalImageExportInfo(ExternalImageType type);
+
+      private:
+        ExternalImageType mType;
+    };
+
+    DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
+
+    DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
+
+    DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
+                                                                   WGPUBindGroupLayout b);
+
+}  // namespace dawn::native
+
+// TODO(dawn:824): Remove once the deprecation period is passed.
+namespace dawn_native = dawn::native;
+
+#endif  // DAWNNATIVE_DAWNNATIVE_H_
diff --git a/include/dawn/native/MetalBackend.h b/include/dawn/native/MetalBackend.h
new file mode 100644
index 0000000..6db34a1
--- /dev/null
+++ b/include/dawn/native/MetalBackend.h
@@ -0,0 +1,73 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METALBACKEND_H_
+#define DAWNNATIVE_METALBACKEND_H_
+
+#include <dawn/dawn_wsi.h>
+#include <dawn/native/DawnNative.h>
+
+// The specifics of the Metal backend expose types in function signatures that might not be
+// available in dependent's minimum supported SDK version. Suppress all availability errors using
+// clang's pragmas. Dependents using the types without guarded availability will still get errors
+// when using the types.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+
+struct __IOSurface;
+typedef __IOSurface* IOSurfaceRef;
+
+#ifdef __OBJC__
+#    import <Metal/Metal.h>
+#endif  //__OBJC__
+
+namespace dawn::native::metal {
+
+    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+        AdapterDiscoveryOptions();
+    };
+
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
+      public:
+        ExternalImageDescriptorIOSurface();
+
+        IOSurfaceRef ioSurface;
+
+        // This has been deprecated.
+        uint32_t plane;
+    };
+
+    DAWN_NATIVE_EXPORT WGPUTexture
+    WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor);
+
+    // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
+    // mean that the operations will be visible to other APIs/Metal devices right away. macOS
+    // does have a global queue of graphics operations, but the command buffers are inserted there
+    // when they are "scheduled". Submitting other operations before the command buffer is
+    // scheduled could lead to races in who gets scheduled first and incorrect rendering.
+    DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
+
+}  // namespace dawn::native::metal
+
+#ifdef __OBJC__
+namespace dawn::native::metal {
+
+    DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
+
+}  // namespace dawn::native::metal
+#endif  // __OBJC__
+
+#pragma clang diagnostic pop
+
+#endif  // DAWNNATIVE_METALBACKEND_H_
diff --git a/include/dawn/native/NullBackend.h b/include/dawn/native/NullBackend.h
new file mode 100644
index 0000000..d2799e3
--- /dev/null
+++ b/include/dawn/native/NullBackend.h
@@ -0,0 +1,25 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_NULLBACKEND_H_
+#define DAWNNATIVE_NULLBACKEND_H_
+
+#include <dawn/dawn_wsi.h>
+#include <dawn/native/DawnNative.h>
+
+namespace dawn::native::null {
+    DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
+}  // namespace dawn::native::null
+
+#endif  // DAWNNATIVE_NULLBACKEND_H_
diff --git a/include/dawn/native/OpenGLBackend.h b/include/dawn/native/OpenGLBackend.h
new file mode 100644
index 0000000..53c878c
--- /dev/null
+++ b/include/dawn/native/OpenGLBackend.h
@@ -0,0 +1,55 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGLBACKEND_H_
+#define DAWNNATIVE_OPENGLBACKEND_H_
+
+typedef void* EGLImage;
+
+#include <dawn/dawn_wsi.h>
+#include <dawn/native/DawnNative.h>
+
+namespace dawn::native::opengl {
+
+    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+        AdapterDiscoveryOptions();
+
+        void* (*getProc)(const char*);
+    };
+
+    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
+        AdapterDiscoveryOptionsES();
+
+        void* (*getProc)(const char*);
+    };
+
+    using PresentCallback = void (*)(void*);
+    DAWN_NATIVE_EXPORT DawnSwapChainImplementation
+    CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata);
+    DAWN_NATIVE_EXPORT WGPUTextureFormat
+    GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
+      public:
+        ExternalImageDescriptorEGLImage();
+
+        ::EGLImage image;
+    };
+
+    DAWN_NATIVE_EXPORT WGPUTexture
+    WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGLBACKEND_H_
diff --git a/include/dawn/native/VulkanBackend.h b/include/dawn/native/VulkanBackend.h
new file mode 100644
index 0000000..a02cc3c
--- /dev/null
+++ b/include/dawn/native/VulkanBackend.h
@@ -0,0 +1,140 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKANBACKEND_H_
+#define DAWNNATIVE_VULKANBACKEND_H_
+
+#include <dawn/dawn_wsi.h>
+#include <dawn/native/DawnNative.h>
+
+#include <vulkan/vulkan.h>
+
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+    DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
+
+    DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
+
+    DAWN_NATIVE_EXPORT DawnSwapChainImplementation
+    CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface);
+    DAWN_NATIVE_EXPORT WGPUTextureFormat
+    GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+
+    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+        AdapterDiscoveryOptions();
+
+        bool forceSwiftShader = false;
+    };
+
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
+      public:
+        // The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
+        // since the import does not need to preserve texture contents.
+
+        // See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire
+        // operation old/new layouts must match exactly the layouts in the release operation. So
+        // we may need to issue two barriers releasedOldLayout -> releasedNewLayout ->
+        // cTextureDescriptor.usage if the new layout is not compatible with the desired usage.
+        // The first barrier is the queue transfer, the second is the layout transition to our
+        // desired usage.
+        VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL;
+        VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+      protected:
+        using ExternalImageDescriptor::ExternalImageDescriptor;
+    };
+
+    struct ExternalImageExportInfoVk : ExternalImageExportInfo {
+      public:
+        // See comments in |ExternalImageDescriptorVk|
+        // Contains the old/new layouts used in the queue release operation.
+        VkImageLayout releasedOldLayout;
+        VkImageLayout releasedNewLayout;
+
+      protected:
+        using ExternalImageExportInfo::ExternalImageExportInfo;
+    };
+
+// Can't use DAWN_PLATFORM_LINUX since header included in both Dawn and Chrome
+#ifdef __linux__
+
+    // Common properties of external images represented by FDs. On successful import the file
+    // descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
+    // used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
+    // caller can assume the FD is always consumed.
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
+      public:
+        int memoryFD;              // A file descriptor from an export of the memory of the image
+        std::vector<int> waitFDs;  // File descriptors of semaphores which will be waited on
+
+      protected:
+        using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
+    };
+
+    // Descriptor for opaque file descriptor image import
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
+        ExternalImageDescriptorOpaqueFD();
+
+        VkDeviceSize allocationSize;  // Must match VkMemoryAllocateInfo from image creation
+        uint32_t memoryTypeIndex;     // Must match VkMemoryAllocateInfo from image creation
+    };
+
+    // Descriptor for dma-buf file descriptor image import
+    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
+        ExternalImageDescriptorDmaBuf();
+
+        uint32_t stride;       // Stride of the buffer in bytes
+        uint64_t drmModifier;  // DRM modifier of the buffer
+    };
+
+    // Info struct that is written to in |ExportVulkanImage|.
+    struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
+      public:
+        // Contains the exported semaphore handles.
+        std::vector<int> semaphoreHandles;
+
+      protected:
+        using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
+    };
+
+    struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
+        ExternalImageExportInfoOpaqueFD();
+    };
+
+    struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
+        ExternalImageExportInfoDmaBuf();
+    };
+
+#endif  // __linux__
+
+    // Imports external memory into a Vulkan image. Internally, this uses external memory /
+    // semaphore extensions to import the image and wait on the provided synchronizaton
+    // primitives before the texture can be used.
+    // On failure, returns a nullptr.
+    DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
+                                                   const ExternalImageDescriptorVk* descriptor);
+
+    // Exports external memory from a Vulkan image. This must be called on wrapped textures
+    // before they are destroyed. It writes the semaphore to wait on and the old/new image
+    // layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
+    // perform a layout transition.
+    DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
+                                              VkImageLayout desiredLayout,
+                                              ExternalImageExportInfoVk* info);
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKANBACKEND_H_
diff --git a/include/dawn/native/dawn_native_export.h b/include/dawn/native/dawn_native_export.h
new file mode 100644
index 0000000..ffbd9cc
--- /dev/null
+++ b/include/dawn/native/dawn_native_export.h
@@ -0,0 +1,36 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_EXPORT_H_
+#define DAWNNATIVE_EXPORT_H_
+
+#if defined(DAWN_NATIVE_SHARED_LIBRARY)
+#    if defined(_WIN32)
+#        if defined(DAWN_NATIVE_IMPLEMENTATION)
+#            define DAWN_NATIVE_EXPORT __declspec(dllexport)
+#        else
+#            define DAWN_NATIVE_EXPORT __declspec(dllimport)
+#        endif
+#    else  // defined(_WIN32)
+#        if defined(DAWN_NATIVE_IMPLEMENTATION)
+#            define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
+#        else
+#            define DAWN_NATIVE_EXPORT
+#        endif
+#    endif  // defined(_WIN32)
+#else       // defined(DAWN_NATIVE_SHARED_LIBRARY)
+#    define DAWN_NATIVE_EXPORT
+#endif  // defined(DAWN_NATIVE_SHARED_LIBRARY)
+
+#endif  // DAWNNATIVE_EXPORT_H_
diff --git a/include/dawn/platform/DawnPlatform.h b/include/dawn/platform/DawnPlatform.h
new file mode 100644
index 0000000..d983794
--- /dev/null
+++ b/include/dawn/platform/DawnPlatform.h
@@ -0,0 +1,119 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNPLATFORM_DAWNPLATFORM_H_
+#define DAWNPLATFORM_DAWNPLATFORM_H_
+
+#include "dawn/platform/dawn_platform_export.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+#include <dawn/webgpu.h>
+
+namespace dawn::platform {
+
+    enum class TraceCategory {
+        General,     // General trace events
+        Validation,  // Dawn validation
+        Recording,   // Native command recording
+        GPUWork,     // Actual GPU work
+    };
+
+    class DAWN_PLATFORM_EXPORT CachingInterface {
+      public:
+        CachingInterface();
+        virtual ~CachingInterface();
+
+        // LoadData has two modes. The first mode is used to get a value which
+        // corresponds to the |key|. The |valueOut| is a caller provided buffer
+        // allocated to the size |valueSize| which is loaded with data of the
+        // size returned. The second mode is used to query for the existence of
+        // the |key| where |valueOut| is nullptr and |valueSize| must be 0.
+        // The return size is non-zero if the |key| exists.
+        virtual size_t LoadData(const WGPUDevice device,
+                                const void* key,
+                                size_t keySize,
+                                void* valueOut,
+                                size_t valueSize) = 0;
+
+        // StoreData puts a |value| in the cache which corresponds to the |key|.
+        virtual void StoreData(const WGPUDevice device,
+                               const void* key,
+                               size_t keySize,
+                               const void* value,
+                               size_t valueSize) = 0;
+
+      private:
+        CachingInterface(const CachingInterface&) = delete;
+        CachingInterface& operator=(const CachingInterface&) = delete;
+    };
+
+    class DAWN_PLATFORM_EXPORT WaitableEvent {
+      public:
+        WaitableEvent() = default;
+        virtual ~WaitableEvent() = default;
+        virtual void Wait() = 0;        // Wait for completion
+        virtual bool IsComplete() = 0;  // Non-blocking check if the event is complete
+    };
+
+    using PostWorkerTaskCallback = void (*)(void* userdata);
+
+    class DAWN_PLATFORM_EXPORT WorkerTaskPool {
+      public:
+        WorkerTaskPool() = default;
+        virtual ~WorkerTaskPool() = default;
+        virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
+                                                              void* userdata) = 0;
+    };
+
+    class DAWN_PLATFORM_EXPORT Platform {
+      public:
+        Platform();
+        virtual ~Platform();
+
+        virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category);
+
+        virtual double MonotonicallyIncreasingTime();
+
+        virtual uint64_t AddTraceEvent(char phase,
+                                       const unsigned char* categoryGroupEnabled,
+                                       const char* name,
+                                       uint64_t id,
+                                       double timestamp,
+                                       int numArgs,
+                                       const char** argNames,
+                                       const unsigned char* argTypes,
+                                       const uint64_t* argValues,
+                                       unsigned char flags);
+
+        // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
+        // when the fingerprint changes. The returned CachingInterface is expected to outlive the
+        // device which uses it to persistently cache objects.
+        virtual CachingInterface* GetCachingInterface(const void* fingerprint,
+                                                      size_t fingerprintSize);
+        virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
+
+      private:
+        Platform(const Platform&) = delete;
+        Platform& operator=(const Platform&) = delete;
+    };
+
+}  // namespace dawn::platform
+
+// TODO(dawn:824): Remove once the deprecation period is passed.
+namespace dawn_platform = dawn::platform;
+
+#endif  // DAWNPLATFORM_DAWNPLATFORM_H_
diff --git a/include/dawn/platform/dawn_platform_export.h b/include/dawn/platform/dawn_platform_export.h
new file mode 100644
index 0000000..0626467
--- /dev/null
+++ b/include/dawn/platform/dawn_platform_export.h
@@ -0,0 +1,36 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNPLATFORM_EXPORT_H_
+#define DAWNPLATFORM_EXPORT_H_
+
+#if defined(DAWN_PLATFORM_SHARED_LIBRARY)
+#    if defined(_WIN32)
+#        if defined(DAWN_PLATFORM_IMPLEMENTATION)
+#            define DAWN_PLATFORM_EXPORT __declspec(dllexport)
+#        else
+#            define DAWN_PLATFORM_EXPORT __declspec(dllimport)
+#        endif
+#    else  // defined(_WIN32)
+#        if defined(DAWN_PLATFORM_IMPLEMENTATION)
+#            define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
+#        else
+#            define DAWN_PLATFORM_EXPORT
+#        endif
+#    endif  // defined(_WIN32)
+#else       // defined(DAWN_PLATFORM_SHARED_LIBRARY)
+#    define DAWN_PLATFORM_EXPORT
+#endif  // defined(DAWN_PLATFORM_SHARED_LIBRARY)
+
+#endif  // DAWNPLATFORM_EXPORT_H_
diff --git a/include/dawn/wire/Wire.h b/include/dawn/wire/Wire.h
new file mode 100644
index 0000000..6e63b3e
--- /dev/null
+++ b/include/dawn/wire/Wire.h
@@ -0,0 +1,79 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIRE_H_
+#define DAWNWIRE_WIRE_H_
+
+#include <cstdint>
+#include <limits>
+
+#include "dawn/webgpu.h"
+#include "dawn/wire/dawn_wire_export.h"
+
+namespace dawn::wire {
+
+    class DAWN_WIRE_EXPORT CommandSerializer {
+      public:
+        CommandSerializer();
+        virtual ~CommandSerializer();
+        CommandSerializer(const CommandSerializer& rhs) = delete;
+        CommandSerializer& operator=(const CommandSerializer& rhs) = delete;
+
+        // Get space for serializing commands.
+        // GetCmdSpace will never be called with a value larger than
+        // what GetMaximumAllocationSize returns. Return nullptr to indicate
+        // a fatal error.
+        virtual void* GetCmdSpace(size_t size) = 0;
+        virtual bool Flush() = 0;
+        virtual size_t GetMaximumAllocationSize() const = 0;
+        virtual void OnSerializeError();
+    };
+
+    class DAWN_WIRE_EXPORT CommandHandler {
+      public:
+        CommandHandler();
+        virtual ~CommandHandler();
+        CommandHandler(const CommandHandler& rhs) = delete;
+        CommandHandler& operator=(const CommandHandler& rhs) = delete;
+
+        virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
+    };
+
+    DAWN_WIRE_EXPORT size_t
+    SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
+
+    DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(
+        const WGPUDeviceProperties* deviceProperties,
+        char* serializeBuffer);
+
+    DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
+                                                          const volatile char* deserializeBuffer,
+                                                          size_t deserializeBufferSize);
+
+    DAWN_WIRE_EXPORT size_t
+    SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
+
+    DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
+                                                       char* serializeBuffer);
+
+    DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
+                                                         const volatile char* deserializeBuffer,
+                                                         size_t deserializeBufferSize);
+
+}  // namespace dawn::wire
+
+// TODO(dawn:824): Remove once the deprecation period is passed.
+namespace dawn_wire = dawn::wire;
+
+#endif  // DAWNWIRE_WIRE_H_
diff --git a/include/dawn/wire/WireClient.h b/include/dawn/wire/WireClient.h
new file mode 100644
index 0000000..d5e9629
--- /dev/null
+++ b/include/dawn/wire/WireClient.h
@@ -0,0 +1,183 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIRECLIENT_H_
+#define DAWNWIRE_WIRECLIENT_H_
+
+#include "dawn/dawn_proc_table.h"
+#include "dawn/wire/Wire.h"
+
+#include <memory>
+#include <vector>
+
+namespace dawn::wire {
+
+    namespace client {
+        class Client;
+        class MemoryTransferService;
+
+        DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
+    }  // namespace client
+
+    struct ReservedTexture {
+        WGPUTexture texture;
+        uint32_t id;
+        uint32_t generation;
+        uint32_t deviceId;
+        uint32_t deviceGeneration;
+    };
+
+    struct ReservedSwapChain {
+        WGPUSwapChain swapchain;
+        uint32_t id;
+        uint32_t generation;
+        uint32_t deviceId;
+        uint32_t deviceGeneration;
+    };
+
+    struct ReservedDevice {
+        WGPUDevice device;
+        uint32_t id;
+        uint32_t generation;
+    };
+
+    struct ReservedInstance {
+        WGPUInstance instance;
+        uint32_t id;
+        uint32_t generation;
+    };
+
+    struct DAWN_WIRE_EXPORT WireClientDescriptor {
+        CommandSerializer* serializer;
+        client::MemoryTransferService* memoryTransferService = nullptr;
+    };
+
+    class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
+      public:
+        WireClient(const WireClientDescriptor& descriptor);
+        ~WireClient() override;
+
+        const volatile char* HandleCommands(const volatile char* commands,
+                                            size_t size) override final;
+
+        ReservedTexture ReserveTexture(WGPUDevice device);
+        ReservedSwapChain ReserveSwapChain(WGPUDevice device);
+        ReservedDevice ReserveDevice();
+        ReservedInstance ReserveInstance();
+
+        void ReclaimTextureReservation(const ReservedTexture& reservation);
+        void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
+        void ReclaimDeviceReservation(const ReservedDevice& reservation);
+        void ReclaimInstanceReservation(const ReservedInstance& reservation);
+
+        // Disconnects the client.
+        // Commands allocated after this point will not be sent.
+        void Disconnect();
+
+      private:
+        std::unique_ptr<client::Client> mImpl;
+    };
+
+    namespace client {
+        class DAWN_WIRE_EXPORT MemoryTransferService {
+          public:
+            MemoryTransferService();
+            virtual ~MemoryTransferService();
+
+            class ReadHandle;
+            class WriteHandle;
+
+            // Create a handle for reading server data.
+            // This may fail and return nullptr.
+            virtual ReadHandle* CreateReadHandle(size_t) = 0;
+
+            // Create a handle for writing server data.
+            // This may fail and return nullptr.
+            virtual WriteHandle* CreateWriteHandle(size_t) = 0;
+
+            class DAWN_WIRE_EXPORT ReadHandle {
+              public:
+                ReadHandle();
+                virtual ~ReadHandle();
+
+                // Get the required serialization size for SerializeCreate
+                virtual size_t SerializeCreateSize() = 0;
+
+                // Serialize the handle into |serializePointer| so it can be received by the server.
+                virtual void SerializeCreate(void* serializePointer) = 0;
+
+                // Simply return the base address of the allocation (without applying any offset)
+                // Returns nullptr if the allocation failed.
+                // The data must live at least until the ReadHandle is destructued
+                virtual const void* GetData() = 0;
+
+                // Gets called when a MapReadCallback resolves.
+                // deserialize the data update and apply
+                // it to the range (offset, offset + size) of allocation
+                // There could be nothing to be deserialized (if using shared memory)
+                // Needs to check potential offset/size OOB and overflow
+                virtual bool DeserializeDataUpdate(const void* deserializePointer,
+                                                   size_t deserializeSize,
+                                                   size_t offset,
+                                                   size_t size) = 0;
+
+              private:
+                ReadHandle(const ReadHandle&) = delete;
+                ReadHandle& operator=(const ReadHandle&) = delete;
+            };
+
+            class DAWN_WIRE_EXPORT WriteHandle {
+              public:
+                WriteHandle();
+                virtual ~WriteHandle();
+
+                // Get the required serialization size for SerializeCreate
+                virtual size_t SerializeCreateSize() = 0;
+
+                // Serialize the handle into |serializePointer| so it can be received by the server.
+                virtual void SerializeCreate(void* serializePointer) = 0;
+
+                // Simply return the base address of the allocation (without applying any offset)
+                // The data returned should be zero-initialized.
+                // The data returned must live at least until the WriteHandle is destructed.
+                // On failure, the pointer returned should be null.
+                virtual void* GetData() = 0;
+
+                // Get the required serialization size for SerializeDataUpdate
+                virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
+
+                // Serialize a command to send the modified contents of
+                // the subrange (offset, offset + size) of the allocation at buffer unmap
+                // This subrange is always the whole mapped region for now
+                // There could be nothing to be serialized (if using shared memory)
+                virtual void SerializeDataUpdate(void* serializePointer,
+                                                 size_t offset,
+                                                 size_t size) = 0;
+
+              private:
+                WriteHandle(const WriteHandle&) = delete;
+                WriteHandle& operator=(const WriteHandle&) = delete;
+            };
+
+          private:
+            MemoryTransferService(const MemoryTransferService&) = delete;
+            MemoryTransferService& operator=(const MemoryTransferService&) = delete;
+        };
+
+        // Backdoor to get the order of the ProcMap for testing
+        DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+    }  // namespace client
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_WIRECLIENT_H_
diff --git a/include/dawn/wire/WireServer.h b/include/dawn/wire/WireServer.h
new file mode 100644
index 0000000..b561bbb
--- /dev/null
+++ b/include/dawn/wire/WireServer.h
@@ -0,0 +1,150 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIRESERVER_H_
+#define DAWNWIRE_WIRESERVER_H_
+
+#include <memory>
+
+#include "dawn/wire/Wire.h"
+
+struct DawnProcTable;
+
+namespace dawn::wire {
+
+    namespace server {
+        class Server;
+        class MemoryTransferService;
+    }  // namespace server
+
+    struct DAWN_WIRE_EXPORT WireServerDescriptor {
+        const DawnProcTable* procs;
+        CommandSerializer* serializer;
+        server::MemoryTransferService* memoryTransferService = nullptr;
+    };
+
+    class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
+      public:
+        WireServer(const WireServerDescriptor& descriptor);
+        ~WireServer() override;
+
+        const volatile char* HandleCommands(const volatile char* commands,
+                                            size_t size) override final;
+
+        bool InjectTexture(WGPUTexture texture,
+                           uint32_t id,
+                           uint32_t generation,
+                           uint32_t deviceId,
+                           uint32_t deviceGeneration);
+        bool InjectSwapChain(WGPUSwapChain swapchain,
+                             uint32_t id,
+                             uint32_t generation,
+                             uint32_t deviceId,
+                             uint32_t deviceGeneration);
+
+        bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
+
+        bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
+
+        // Look up a device by (id, generation) pair. Returns nullptr if the generation
+        // has expired or the id is not found.
+        // The Wire does not have destroy hooks to allow an embedder to observe when an object
+        // has been destroyed, but in Chrome, we need to know the list of live devices so we
+        // can call device.Tick() on all of them periodically to ensure progress on asynchronous
+        // work is made. Getting this list can be done by tracking the (id, generation) of
+        // previously injected devices, and observing if GetDevice(id, generation) returns non-null.
+        WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+
+      private:
+        std::unique_ptr<server::Server> mImpl;
+    };
+
+    namespace server {
+        class DAWN_WIRE_EXPORT MemoryTransferService {
+          public:
+            MemoryTransferService();
+            virtual ~MemoryTransferService();
+
+            class ReadHandle;
+            class WriteHandle;
+
+            // Deserialize data to create Read/Write handles. These handles are for the client
+            // to Read/Write data.
+            virtual bool DeserializeReadHandle(const void* deserializePointer,
+                                               size_t deserializeSize,
+                                               ReadHandle** readHandle) = 0;
+            virtual bool DeserializeWriteHandle(const void* deserializePointer,
+                                                size_t deserializeSize,
+                                                WriteHandle** writeHandle) = 0;
+
+            class DAWN_WIRE_EXPORT ReadHandle {
+              public:
+                ReadHandle();
+                virtual ~ReadHandle();
+
+                // Return the size of the command serialized if
+                // SerializeDataUpdate is called with the same offset/size args
+                virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
+
+                // Gets called when a MapReadCallback resolves.
+                // Serialize the data update for the range (offset, offset + size) into
+                // |serializePointer| to the client There could be nothing to be serialized (if
+                // using shared memory)
+                virtual void SerializeDataUpdate(const void* data,
+                                                 size_t offset,
+                                                 size_t size,
+                                                 void* serializePointer) = 0;
+
+              private:
+                ReadHandle(const ReadHandle&) = delete;
+                ReadHandle& operator=(const ReadHandle&) = delete;
+            };
+
+            class DAWN_WIRE_EXPORT WriteHandle {
+              public:
+                WriteHandle();
+                virtual ~WriteHandle();
+
+                // Set the target for writes from the client. DeserializeFlush should copy data
+                // into the target.
+                void SetTarget(void* data);
+                // Set Staging data length for OOB check
+                void SetDataLength(size_t dataLength);
+
+                // This function takes in the serialized result of
+                // client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
+                // Needs to check potential offset/size OOB and overflow
+                virtual bool DeserializeDataUpdate(const void* deserializePointer,
+                                                   size_t deserializeSize,
+                                                   size_t offset,
+                                                   size_t size) = 0;
+
+              protected:
+                void* mTargetData = nullptr;
+                size_t mDataLength = 0;
+
+              private:
+                WriteHandle(const WriteHandle&) = delete;
+                WriteHandle& operator=(const WriteHandle&) = delete;
+            };
+
+          private:
+            MemoryTransferService(const MemoryTransferService&) = delete;
+            MemoryTransferService& operator=(const MemoryTransferService&) = delete;
+        };
+    }  // namespace server
+
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_WIRESERVER_H_
diff --git a/include/dawn/wire/dawn_wire_export.h b/include/dawn/wire/dawn_wire_export.h
new file mode 100644
index 0000000..8043f61
--- /dev/null
+++ b/include/dawn/wire/dawn_wire_export.h
@@ -0,0 +1,36 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_EXPORT_H_
+#define DAWNWIRE_EXPORT_H_
+
+#if defined(DAWN_WIRE_SHARED_LIBRARY)
+#    if defined(_WIN32)
+#        if defined(DAWN_WIRE_IMPLEMENTATION)
+#            define DAWN_WIRE_EXPORT __declspec(dllexport)
+#        else
+#            define DAWN_WIRE_EXPORT __declspec(dllimport)
+#        endif
+#    else  // defined(_WIN32)
+#        if defined(DAWN_WIRE_IMPLEMENTATION)
+#            define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
+#        else
+#            define DAWN_WIRE_EXPORT
+#        endif
+#    endif  // defined(_WIN32)
+#else       // defined(DAWN_WIRE_SHARED_LIBRARY)
+#    define DAWN_WIRE_EXPORT
+#endif  // defined(DAWN_WIRE_SHARED_LIBRARY)
+
+#endif  // DAWNWIRE_EXPORT_H_
diff --git a/include/webgpu/webgpu.h b/include/webgpu/webgpu.h
new file mode 100644
index 0000000..4a29d37
--- /dev/null
+++ b/include/webgpu/webgpu.h
@@ -0,0 +1 @@
+#include "dawn/webgpu.h"
diff --git a/include/webgpu/webgpu_cpp.h b/include/webgpu/webgpu_cpp.h
new file mode 100644
index 0000000..5bbd869
--- /dev/null
+++ b/include/webgpu/webgpu_cpp.h
@@ -0,0 +1 @@
+#include <dawn/webgpu_cpp.h>
diff --git a/infra/OWNERS b/infra/OWNERS
new file mode 100644
index 0000000..0473960
--- /dev/null
+++ b/infra/OWNERS
@@ -0,0 +1,2 @@
+rharrison@chromium.org
+enga@chromium.org
diff --git a/infra/config/OWNERS b/infra/config/OWNERS
new file mode 100644
index 0000000..6b5005f
--- /dev/null
+++ b/infra/config/OWNERS
@@ -0,0 +1,2 @@
+cwallez@chromium.org
+tandrii@chromium.org
diff --git a/infra/config/PRESUBMIT.py b/infra/config/PRESUBMIT.py
index 6f2e2a0..6193c41 100644
--- a/infra/config/PRESUBMIT.py
+++ b/infra/config/PRESUBMIT.py
@@ -1,4 +1,4 @@
-# Copyright 2021 The Tint Authors
+# Copyright 2018 The Dawn Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/infra/config/global/generated/commit-queue.cfg b/infra/config/global/generated/commit-queue.cfg
index 6158aa5..ac774d1 100644
--- a/infra/config/global/generated/commit-queue.cfg
+++ b/infra/config/global/generated/commit-queue.cfg
@@ -12,59 +12,68 @@
   }
 }
 config_groups {
-  name: "Tint-CQ"
+  name: "Dawn-CQ"
   gerrit {
     url: "https://dawn-review.googlesource.com"
     projects {
-      name: "tint"
+      name: "dawn"
       ref_regexp: "refs/heads/.+"
     }
   }
   verifiers {
     gerrit_cq_ability {
-      committer_list: "project-tint-committers"
-      dry_run_access_list: "project-tint-tryjobs-access"
+      committer_list: "project-dawn-committers"
+      dry_run_access_list: "project-dawn-tryjob-access"
     }
     tryjob {
       builders {
-        name: "tint/try/linux-clang-dbg-x64"
+        name: "chromium/try/linux-dawn-rel"
       }
       builders {
-        name: "tint/try/linux-clang-dbg-x86"
+        name: "chromium/try/mac-dawn-rel"
       }
       builders {
-        name: "tint/try/linux-clang-rel-x64"
+        name: "chromium/try/win-dawn-rel"
       }
       builders {
-        name: "tint/try/linux-clang-rel-x86"
+        name: "dawn/try/linux-clang-dbg-x64"
       }
       builders {
-        name: "tint/try/mac-dbg"
+        name: "dawn/try/linux-clang-dbg-x86"
       }
       builders {
-        name: "tint/try/mac-rel"
+        name: "dawn/try/linux-clang-rel-x64"
       }
       builders {
-        name: "tint/try/presubmit"
+        name: "dawn/try/linux-clang-rel-x86"
+      }
+      builders {
+        name: "dawn/try/mac-dbg"
+      }
+      builders {
+        name: "dawn/try/mac-rel"
+      }
+      builders {
+        name: "dawn/try/presubmit"
         disable_reuse: true
       }
       builders {
-        name: "tint/try/win-clang-dbg-x64"
+        name: "dawn/try/win-clang-dbg-x64"
       }
       builders {
-        name: "tint/try/win-clang-dbg-x86"
+        name: "dawn/try/win-clang-dbg-x86"
       }
       builders {
-        name: "tint/try/win-clang-rel-x64"
+        name: "dawn/try/win-clang-rel-x64"
       }
       builders {
-        name: "tint/try/win-clang-rel-x86"
+        name: "dawn/try/win-clang-rel-x86"
       }
       builders {
-        name: "tint/try/win-msvc-dbg-x64"
+        name: "dawn/try/win-msvc-dbg-x64"
       }
       builders {
-        name: "tint/try/win-msvc-rel-x64"
+        name: "dawn/try/win-msvc-rel-x64"
       }
       retry_config {
         single_quota: 1
diff --git a/infra/config/global/generated/cr-buildbucket.cfg b/infra/config/global/generated/cr-buildbucket.cfg
index b94912c..732cf74 100644
--- a/infra/config/global/generated/cr-buildbucket.cfg
+++ b/infra/config/global/generated/cr-buildbucket.cfg
@@ -11,13 +11,35 @@
   }
   swarming {
     builders {
+      name: "cron-linux-clang-rel-x64"
+      swarming_host: "chromium-swarm.appspot.com"
+      dimensions: "cpu:x86-64"
+      dimensions: "os:Ubuntu-18.04"
+      dimensions: "pool:luci.flex.ci"
+      recipe {
+        name: "dawn"
+        cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
+        cipd_version: "refs/heads/master"
+        properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
+        properties_j: "clang:true"
+        properties_j: "debug:false"
+        properties_j: "gen_fuzz_corpus:true"
+        properties_j: "target_cpu:\"x64\""
+      }
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      experiments {
+        key: "luci.recipes.use_python3"
+        value: 100
+      }
+    }
+    builders {
       name: "linux-clang-dbg-x64"
       swarming_host: "chromium-swarm.appspot.com"
       dimensions: "cpu:x86-64"
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -25,7 +47,7 @@
         properties_j: "debug:true"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -38,7 +60,7 @@
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -46,7 +68,7 @@
         properties_j: "debug:true"
         properties_j: "target_cpu:\"x86\""
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -59,7 +81,7 @@
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -67,7 +89,7 @@
         properties_j: "debug:false"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -80,7 +102,7 @@
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -88,7 +110,7 @@
         properties_j: "debug:false"
         properties_j: "target_cpu:\"x86\""
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -101,7 +123,7 @@
       dimensions: "os:Mac-10.15|Mac-11"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -113,7 +135,7 @@
         name: "osx_sdk"
         path: "osx_sdk"
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -126,7 +148,7 @@
       dimensions: "os:Mac-10.15|Mac-11"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -138,7 +160,7 @@
         name: "osx_sdk"
         path: "osx_sdk"
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -151,7 +173,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -163,7 +185,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -176,7 +198,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -188,7 +210,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -201,7 +223,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -213,7 +235,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -226,7 +248,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -238,7 +260,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -251,14 +273,14 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "clang:false"
         properties_j: "debug:true"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -271,14 +293,14 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.ci"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "clang:false"
         properties_j: "debug:false"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -293,7 +315,7 @@
   }
   acls {
     role: SCHEDULER
-    group: "project-tint-tryjob-access"
+    group: "project-dawn-tryjob-access"
   }
   acls {
     role: SCHEDULER
@@ -307,7 +329,7 @@
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -316,7 +338,7 @@
         properties_j: "debug:true"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -329,7 +351,7 @@
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -338,7 +360,7 @@
         properties_j: "debug:true"
         properties_j: "target_cpu:\"x86\""
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -351,7 +373,7 @@
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -360,7 +382,7 @@
         properties_j: "debug:false"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -373,7 +395,7 @@
       dimensions: "os:Ubuntu-18.04"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -382,7 +404,7 @@
         properties_j: "debug:false"
         properties_j: "target_cpu:\"x86\""
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -395,7 +417,7 @@
       dimensions: "os:Mac-10.15|Mac-11"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -408,7 +430,7 @@
         name: "osx_sdk"
         path: "osx_sdk"
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -421,7 +443,7 @@
       dimensions: "os:Mac-10.15|Mac-11"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -434,7 +456,7 @@
         name: "osx_sdk"
         path: "osx_sdk"
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -451,10 +473,10 @@
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
-        properties_j: "repo_name:\"tint\""
+        properties_j: "repo_name:\"dawn\""
         properties_j: "runhooks:true"
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -467,7 +489,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -480,7 +502,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -493,7 +515,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -506,7 +528,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -519,7 +541,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -532,7 +554,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -545,7 +567,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$build/goma:{\"enable_ats\":true,\"rpc_extra_params\":\"?prod\",\"server_host\":\"goma.chromium.org\"}"
@@ -558,7 +580,7 @@
         name: "win_toolchain"
         path: "win_toolchain"
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -571,7 +593,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
@@ -579,7 +601,7 @@
         properties_j: "debug:true"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
@@ -592,7 +614,7 @@
       dimensions: "os:Windows-10"
       dimensions: "pool:luci.flex.try"
       recipe {
-        name: "tint"
+        name: "dawn"
         cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
         cipd_version: "refs/heads/master"
         properties_j: "$depot_tools/bot_update:{\"apply_patch_on_gclient\":true}"
@@ -600,7 +622,7 @@
         properties_j: "debug:false"
         properties_j: "target_cpu:\"x64\""
       }
-      service_account: "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+      service_account: "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
       experiments {
         key: "luci.recipes.use_python3"
         value: 100
diff --git a/infra/config/global/generated/luci-milo.cfg b/infra/config/global/generated/luci-milo.cfg
index 075a076..5770d7d 100644
--- a/infra/config/global/generated/luci-milo.cfg
+++ b/infra/config/global/generated/luci-milo.cfg
@@ -6,112 +6,118 @@
 
 consoles {
   id: "ci"
-  name: "Tint CI Builders"
-  repo_url: "https://dawn.googlesource.com/tint"
+  name: "Dawn CI Builders"
+  repo_url: "https://dawn.googlesource.com/dawn"
   refs: "regexp:refs/heads/main"
   manifest_name: "REVISION"
   builders {
-    name: "buildbucket/luci.tint.ci/linux-clang-dbg-x64"
+    name: "buildbucket/luci.dawn.ci/linux-clang-dbg-x64"
     category: "linux|clang|dbg"
     short_name: "x64"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/linux-clang-rel-x64"
-    category: "linux|clang|rel"
-    short_name: "x64"
-  }
-  builders {
-    name: "buildbucket/luci.tint.ci/linux-clang-dbg-x86"
+    name: "buildbucket/luci.dawn.ci/linux-clang-dbg-x86"
     category: "linux|clang|dbg"
     short_name: "x86"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/linux-clang-rel-x86"
+    name: "buildbucket/luci.dawn.ci/linux-clang-rel-x64"
+    category: "linux|clang|rel"
+    short_name: "x64"
+  }
+  builders {
+    name: "buildbucket/luci.dawn.ci/linux-clang-rel-x86"
     category: "linux|clang|rel"
     short_name: "x86"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/mac-dbg"
+    name: "buildbucket/luci.dawn.ci/mac-dbg"
     category: "mac"
     short_name: "dbg"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/mac-rel"
+    name: "buildbucket/luci.dawn.ci/mac-rel"
     category: "mac"
     short_name: "rel"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/win-clang-dbg-x64"
+    name: "buildbucket/luci.dawn.ci/win-clang-dbg-x64"
     category: "win|clang|dbg"
     short_name: "x64"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/win-clang-rel-x64"
-    category: "win|clang|rel"
-    short_name: "x64"
-  }
-  builders {
-    name: "buildbucket/luci.tint.ci/win-clang-dbg-x86"
+    name: "buildbucket/luci.dawn.ci/win-clang-dbg-x86"
     category: "win|clang|dbg"
     short_name: "x86"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/win-clang-rel-x86"
+    name: "buildbucket/luci.dawn.ci/win-clang-rel-x64"
+    category: "win|clang|rel"
+    short_name: "x64"
+  }
+  builders {
+    name: "buildbucket/luci.dawn.ci/win-clang-rel-x86"
     category: "win|clang|rel"
     short_name: "x86"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/win-msvc-dbg-x64"
+    name: "buildbucket/luci.dawn.ci/win-msvc-dbg-x64"
     category: "win|msvc"
     short_name: "dbg"
   }
   builders {
-    name: "buildbucket/luci.tint.ci/win-msvc-rel-x64"
+    name: "buildbucket/luci.dawn.ci/win-msvc-rel-x64"
     category: "win|msvc"
     short_name: "rel"
   }
+  builders {
+    name: "buildbucket/luci.dawn.ci/cron-linux-clang-rel-x64"
+    category: "cron|linux|clang|rel"
+    short_name: "x64"
+  }
 }
 consoles {
   id: "try"
-  name: "Tint try Builders"
+  name: "Dawn try Builders"
   builders {
-    name: "buildbucket/luci.tint.try/presubmit"
+    name: "buildbucket/luci.dawn.try/presubmit"
   }
   builders {
-    name: "buildbucket/luci.tint.try/linux-clang-dbg-x64"
+    name: "buildbucket/luci.dawn.try/linux-clang-dbg-x64"
   }
   builders {
-    name: "buildbucket/luci.tint.try/linux-clang-rel-x64"
+    name: "buildbucket/luci.dawn.try/linux-clang-dbg-x86"
   }
   builders {
-    name: "buildbucket/luci.tint.try/linux-clang-dbg-x86"
+    name: "buildbucket/luci.dawn.try/linux-clang-rel-x64"
   }
   builders {
-    name: "buildbucket/luci.tint.try/linux-clang-rel-x86"
+    name: "buildbucket/luci.dawn.try/linux-clang-rel-x86"
   }
   builders {
-    name: "buildbucket/luci.tint.try/mac-dbg"
+    name: "buildbucket/luci.dawn.try/mac-dbg"
   }
   builders {
-    name: "buildbucket/luci.tint.try/mac-rel"
+    name: "buildbucket/luci.dawn.try/mac-rel"
   }
   builders {
-    name: "buildbucket/luci.tint.try/win-clang-dbg-x64"
+    name: "buildbucket/luci.dawn.try/win-clang-dbg-x64"
   }
   builders {
-    name: "buildbucket/luci.tint.try/win-clang-rel-x64"
+    name: "buildbucket/luci.dawn.try/win-clang-dbg-x86"
   }
   builders {
-    name: "buildbucket/luci.tint.try/win-clang-dbg-x86"
+    name: "buildbucket/luci.dawn.try/win-clang-rel-x64"
   }
   builders {
-    name: "buildbucket/luci.tint.try/win-clang-rel-x86"
+    name: "buildbucket/luci.dawn.try/win-clang-rel-x86"
   }
   builders {
-    name: "buildbucket/luci.tint.try/win-msvc-dbg-x64"
+    name: "buildbucket/luci.dawn.try/win-msvc-dbg-x64"
   }
   builders {
-    name: "buildbucket/luci.tint.try/win-msvc-rel-x64"
+    name: "buildbucket/luci.dawn.try/win-msvc-rel-x64"
   }
   builder_view_only: true
 }
+logo_url: "https://storage.googleapis.com/chrome-infra-public/logo/dawn-logo.png"
diff --git a/infra/config/global/generated/luci-scheduler.cfg b/infra/config/global/generated/luci-scheduler.cfg
index ccdeef0..6774014 100644
--- a/infra/config/global/generated/luci-scheduler.cfg
+++ b/infra/config/global/generated/luci-scheduler.cfg
@@ -5,6 +5,17 @@
 #   https://luci-config.appspot.com/schemas/projects:luci-scheduler.cfg
 
 job {
+  id: "cron-linux-clang-rel-x64"
+  realm: "ci"
+  schedule: "0 0 0 * * * *"
+  acl_sets: "ci"
+  buildbucket {
+    server: "cr-buildbucket.appspot.com"
+    bucket: "ci"
+    builder: "cron-linux-clang-rel-x64"
+  }
+}
+job {
   id: "linux-clang-dbg-x64"
   realm: "ci"
   acl_sets: "ci"
@@ -141,7 +152,7 @@
   triggers: "win-msvc-dbg-x64"
   triggers: "win-msvc-rel-x64"
   gitiles {
-    repo: "https://dawn.googlesource.com/tint"
+    repo: "https://dawn.googlesource.com/dawn"
     refs: "regexp:refs/heads/main"
   }
 }
@@ -149,7 +160,7 @@
   name: "ci"
   acls {
     role: OWNER
-    granted_to: "group:project-tint-admins"
+    granted_to: "group:project-dawn-admins"
   }
   acls {
     granted_to: "group:all"
diff --git a/infra/config/global/generated/project.cfg b/infra/config/global/generated/project.cfg
index 59dd096..06a9172 100644
--- a/infra/config/global/generated/project.cfg
+++ b/infra/config/global/generated/project.cfg
@@ -4,7 +4,7 @@
 # For the schema of this file, see ProjectCfg message:
 #   https://luci-config.appspot.com/schemas/projects:project.cfg
 
-name: "tint"
+name: "dawn"
 access: "group:all"
 lucicfg {
   version: "1.30.9"
diff --git a/infra/config/global/generated/realms.cfg b/infra/config/global/generated/realms.cfg
index 4f4827d..94dd87b 100644
--- a/infra/config/global/generated/realms.cfg
+++ b/infra/config/global/generated/realms.cfg
@@ -16,7 +16,7 @@
   }
   bindings {
     role: "role/configs.validator"
-    principals: "user:tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+    principals: "user:dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
   }
   bindings {
     role: "role/logdog.reader"
@@ -28,7 +28,7 @@
   }
   bindings {
     role: "role/scheduler.owner"
-    principals: "group:project-tint-admins"
+    principals: "group:project-dawn-admins"
   }
   bindings {
     role: "role/scheduler.reader"
@@ -39,22 +39,30 @@
   name: "ci"
   bindings {
     role: "role/buildbucket.builderServiceAccount"
-    principals: "user:tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+    principals: "user:dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
   }
   bindings {
     role: "role/buildbucket.reader"
     principals: "group:all"
   }
+  bindings {
+    role: "role/swarming.taskTriggerer"
+    principals: "group:flex-ci-led-users"
+  }
 }
 realms {
   name: "try"
   bindings {
     role: "role/buildbucket.builderServiceAccount"
-    principals: "user:tint-try-builder@chops-service-accounts.iam.gserviceaccount.com"
+    principals: "user:dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com"
   }
   bindings {
     role: "role/buildbucket.triggerer"
-    principals: "group:project-tint-tryjob-access"
+    principals: "group:project-dawn-tryjob-access"
     principals: "group:service-account-cq"
   }
+  bindings {
+    role: "role/swarming.taskTriggerer"
+    principals: "group:flex-try-led-users"
+  }
 }
diff --git a/infra/config/global/main.star b/infra/config/global/main.star
index 3a0b597..7331e9a 100755
--- a/infra/config/global/main.star
+++ b/infra/config/global/main.star
@@ -1,11 +1,11 @@
 #!/usr/bin/env lucicfg
 #
-# Copyright 2021 The Tint Authors. All rights reserved.
+# Copyright 2021 The Dawn Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
 """
-main.star: lucicfg configuration for Tint's standalone builers.
+main.star: lucicfg configuration for Dawn's standalone builers.
 """
 
 # Use LUCI Scheduler BBv2 names and add Scheduler realms configs.
@@ -18,7 +18,7 @@
 lucicfg.config(fail_on_warnings = True)
 
 luci.project(
-    name = "tint",
+    name = "dawn",
     buildbucket = "cr-buildbucket.appspot.com",
     logdog = "luci-logdog.appspot.com",
     milo = "luci-milo.appspot.com",
@@ -39,7 +39,7 @@
             roles = [
                 acl.SCHEDULER_OWNER,
             ],
-            groups = "project-tint-admins",
+            groups = "project-dawn-admins",
         ),
         acl.entry(
             roles = [
@@ -51,7 +51,7 @@
     bindings = [
         luci.binding(
             roles = "role/configs.validator",
-            users = "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com",
+            users = "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com",
         ),
     ],
 )
@@ -73,24 +73,39 @@
     ],
 )
 
+# Allow LED users to trigger swarming tasks directly when debugging ci
+# builders.
+luci.binding(
+    realm = "ci",
+    roles = "role/swarming.taskTriggerer",
+    groups = "flex-ci-led-users",
+)
+
 luci.bucket(
     name = "try",
     acls = [
         acl.entry(
             acl.BUILDBUCKET_TRIGGERER,
             groups = [
-                "project-tint-tryjob-access",
+                "project-dawn-tryjob-access",
                 "service-account-cq",
             ],
         ),
     ],
 )
 
+# Allow LED users to trigger swarming tasks directly when debugging try
+# builders.
+luci.binding(
+    realm = "try",
+    roles = "role/swarming.taskTriggerer",
+    groups = "flex-try-led-users",
+)
+
 os_category = struct(
     LINUX = "Linux",
     MAC = "Mac",
     WINDOWS = "Windows",
-    UNKNOWN = "Unknown",
 )
 
 def os_enum(dimension, category, console_name):
@@ -100,7 +115,6 @@
     LINUX = os_enum("Ubuntu-18.04", os_category.LINUX, "linux"),
     MAC = os_enum("Mac-10.15|Mac-11", os_category.MAC, "mac"),
     WINDOWS = os_enum("Windows-10", os_category.WINDOWS, "win"),
-    UNKNOWN = os_enum("Unknown", os_category.UNKNOWN, "unknown"),
 )
 
 # Recipes
@@ -112,7 +126,19 @@
       A luci.recipe
     """
     return luci.recipe(
-        name = "tint",
+        name = "dawn",
+        cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build",
+        cipd_version = "refs/heads/master",
+    )
+
+def get_presubmit_executable():
+    """Get standard executable for presubmit
+
+    Returns:
+      A luci.recipe
+    """
+    return luci.recipe(
+        name = "run_presubmit",
         cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build",
         cipd_version = "refs/heads/master",
     )
@@ -128,13 +154,13 @@
 
     """
 
-    if arg.startswith("linux"):
+    if arg.find("linux") != -1:
         return os.LINUX
-    if arg.startswith("win"):
+    if arg.find("win") != -1:
         return os.WINDOWS
-    if arg.startswith("mac"):
+    if arg.find("mac") != -1:
         return os.MAC
-    return os.UNKNOWN
+    return os.MAC
 
 def get_default_caches(os, clang):
     """Get standard caches for builders
@@ -162,7 +188,6 @@
 
     Returns:
       A dimension dict
-
     """
     dimensions = {}
 
@@ -172,7 +197,7 @@
 
     return dimensions
 
-def get_default_properties(os, clang, debug, cpu):
+def get_default_properties(os, clang, debug, cpu, fuzzer):
     """Get the properties for a builder that don't depend on being CI vs Try
 
     Args:
@@ -180,6 +205,7 @@
       clang: is this builder running clang
       debug: is this builder generating debug builds
       cpu: string representing the target CPU architecture
+      fuzzer: is this builder running the fuzzers
 
     Returns:
       A properties dict
@@ -192,7 +218,10 @@
     properties["clang"] = clang
     msvc = os.category == os_category.WINDOWS and not clang
 
-    if msvc != True:
+    if fuzzer:
+        properties["gen_fuzz_corpus"] = True
+
+    if not msvc:
         goma_props = {}
         goma_props.update({
             "server_host": "goma.chromium.org",
@@ -204,7 +233,7 @@
 
     return properties
 
-def add_ci_builder(name, os, clang, debug, cpu):
+def add_ci_builder(name, os, clang, debug, cpu, fuzzer):
     """Add a CI builder
 
     Args:
@@ -213,23 +242,30 @@
       clang: is this builder running clang
       debug: is this builder generating debug builds
       cpu: string representing the target CPU architecture
+      fuzzer: is this builder running the fuzzers
     """
     dimensions_ci = get_default_dimensions(os)
     dimensions_ci["pool"] = "luci.flex.ci"
-    properties_ci = get_default_properties(os, clang, debug, cpu)
-    triggered_by_ci = ["primary-poller"]
+    properties_ci = get_default_properties(os, clang, debug, cpu, fuzzer)
+    schedule_ci = None
+    if fuzzer:
+        schedule_ci = "0 0 0 * * * *"
+    triggered_by_ci = None
+    if not fuzzer:
+        triggered_by_ci = ["primary-poller"]
     luci.builder(
         name = name,
         bucket = "ci",
+        schedule = schedule_ci,
         triggered_by = triggered_by_ci,
         executable = get_builder_executable(),
         properties = properties_ci,
         dimensions = dimensions_ci,
         caches = get_default_caches(os, clang),
-        service_account = "tint-ci-builder@chops-service-accounts.iam.gserviceaccount.com",
+        service_account = "dawn-ci-builder@chops-service-accounts.iam.gserviceaccount.com",
     )
 
-def add_try_builder(name, os, clang, debug, cpu):
+def add_try_builder(name, os, clang, debug, cpu, fuzzer):
     """Add a Try builder
 
     Args:
@@ -238,10 +274,11 @@
       clang: is this builder running clang
       debug: is this builder generating debug builds
       cpu: string representing the target CPU architecture
+      fuzzer: is this builder running the fuzzers
     """
     dimensions_try = get_default_dimensions(os)
     dimensions_try["pool"] = "luci.flex.try"
-    properties_try = get_default_properties(os, clang, debug, cpu)
+    properties_try = get_default_properties(os, clang, debug, cpu, fuzzer)
     properties_try["$depot_tools/bot_update"] = {
         "apply_patch_on_gclient": True,
     }
@@ -252,23 +289,25 @@
         properties = properties_try,
         dimensions = dimensions_try,
         caches = get_default_caches(os, clang),
-        service_account = "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com",
+        service_account = "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com",
     )
 
-def tint_standalone_builder(name, clang, debug, cpu):
-    """Adds both the CI and Try standalone builders
+def dawn_standalone_builder(name, clang, debug, cpu, fuzzer = False):
+    """Adds both the CI and Try standalone builders as appropriate
 
     Args:
       name: builder's name in string form
       clang: is this builder running clang
       debug: is this builder generating debug builds
       cpu: string representing the target CPU architecture
+      fuzzer: enable building fuzzer corpus
 
     """
     os = get_os_from_arg(name)
 
-    add_ci_builder(name, os, clang, debug, cpu)
-    add_try_builder(name, os, clang, debug, cpu)
+    add_ci_builder(name, os, clang, debug, cpu, fuzzer)
+    if not fuzzer:
+        add_try_builder(name, os, clang, debug, cpu, fuzzer)
 
     config = ""
     if clang:
@@ -276,7 +315,10 @@
     elif os.category == os_category.WINDOWS:
         config = "msvc"
 
-    category = os.console_name
+    category = ""
+    if fuzzer:
+        category += "cron|"
+    category += os.console_name
 
     if os.category != os_category.MAC:
         category += "|" + config
@@ -295,20 +337,32 @@
         short_name = short_name,
     )
 
-    luci.list_view_entry(
-        list_view = "try",
-        builder = "try/" + name,
-    )
+    if not fuzzer:
+        luci.list_view_entry(
+            list_view = "try",
+            builder = "try/" + name,
+        )
 
+        luci.cq_tryjob_verifier(
+            cq_group = "Dawn-CQ",
+            builder = "dawn:try/" + name,
+        )
+
+def chromium_dawn_tryjob(os):
+    """Adds a tryjob that tests against Chromium
+
+    Args:
+      os: string for the OS, should be one or linux|mac|win
+    """
     luci.cq_tryjob_verifier(
-        cq_group = "Tint-CQ",
-        builder = "tint:try/" + name,
+        cq_group = "Dawn-CQ",
+        builder = "chromium:try/" + os + "-dawn-rel",
     )
 
 luci.gitiles_poller(
     name = "primary-poller",
     bucket = "ci",
-    repo = "https://dawn.googlesource.com/tint",
+    repo = "https://dawn.googlesource.com/dawn",
     refs = [
         "refs/heads/main",
     ],
@@ -322,52 +376,57 @@
 luci.builder(
     name = "presubmit",
     bucket = "try",
-    executable = luci.recipe(
-        name = "run_presubmit",
-        cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build",
-        cipd_version = "refs/heads/master",
-    ),
+    executable = get_presubmit_executable(),
     dimensions = {
         "cpu": "x86-64",
         "os": os.LINUX.dimension,
         "pool": "luci.flex.try",
     },
     properties = {
-        "repo_name": "tint",
+        "repo_name": "dawn",
         "runhooks": True,
         "$depot_tools/bot_update": {
             "apply_patch_on_gclient": True,
         },
     },
-    service_account = "tint-try-builder@chops-service-accounts.iam.gserviceaccount.com",
+    service_account = "dawn-try-builder@chops-service-accounts.iam.gserviceaccount.com",
 )
 
-#                        name, clang, debug, cpu
-tint_standalone_builder("linux-clang-dbg-x64", True, True, "x64")
-tint_standalone_builder("linux-clang-rel-x64", True, False, "x64")
-tint_standalone_builder("linux-clang-dbg-x86", True, True, "x86")
-tint_standalone_builder("linux-clang-rel-x86", True, False, "x86")
-tint_standalone_builder("mac-dbg", True, True, "x64")
-tint_standalone_builder("mac-rel", True, False, "x64")
-tint_standalone_builder("win-clang-dbg-x64", True, True, "x64")
-tint_standalone_builder("win-clang-rel-x64", True, False, "x64")
-tint_standalone_builder("win-clang-dbg-x86", True, True, "x86")
-tint_standalone_builder("win-clang-rel-x86", True, False, "x86")
-tint_standalone_builder("win-msvc-dbg-x64", False, True, "x64")
-tint_standalone_builder("win-msvc-rel-x64", False, False, "x64")
+#                        name, clang, debug, cpu(, fuzzer)
+dawn_standalone_builder("linux-clang-dbg-x64", True, True, "x64")
+dawn_standalone_builder("linux-clang-dbg-x86", True, True, "x86")
+dawn_standalone_builder("linux-clang-rel-x64", True, False, "x64")
+dawn_standalone_builder("linux-clang-rel-x86", True, False, "x86")
+dawn_standalone_builder("mac-dbg", True, True, "x64")
+dawn_standalone_builder("mac-rel", True, False, "x64")
+dawn_standalone_builder("win-clang-dbg-x64", True, True, "x64")
+dawn_standalone_builder("win-clang-dbg-x86", True, True, "x86")
+dawn_standalone_builder("win-clang-rel-x64", True, False, "x64")
+dawn_standalone_builder("win-clang-rel-x86", True, False, "x86")
+dawn_standalone_builder("win-msvc-dbg-x64", False, True, "x64")
+dawn_standalone_builder("win-msvc-rel-x64", False, False, "x64")
+dawn_standalone_builder("cron-linux-clang-rel-x64", True, False, "x64", True)
+
+chromium_dawn_tryjob("linux")
+chromium_dawn_tryjob("mac")
+chromium_dawn_tryjob("win")
 
 # Views
 
+luci.milo(
+    logo = "https://storage.googleapis.com/chrome-infra-public/logo/dawn-logo.png",
+)
+
 luci.console_view(
     name = "ci",
-    title = "Tint CI Builders",
-    repo = "https://dawn.googlesource.com/tint",
+    title = "Dawn CI Builders",
+    repo = "https://dawn.googlesource.com/dawn",
     refs = ["refs/heads/main"],
 )
 
 luci.list_view(
     name = "try",
-    title = "Tint try Builders",
+    title = "Dawn try Builders",
 )
 
 # CQ
@@ -379,24 +438,24 @@
 )
 
 luci.cq_group(
-    name = "Tint-CQ",
+    name = "Dawn-CQ",
     watch = cq.refset(
-        "https://dawn.googlesource.com/tint",
+        "https://dawn.googlesource.com/dawn",
         refs = ["refs/heads/.+"],
     ),
     acls = [
         acl.entry(
             acl.CQ_COMMITTER,
-            groups = "project-tint-committers",
+            groups = "project-dawn-committers",
         ),
         acl.entry(
             acl.CQ_DRY_RUNNER,
-            groups = "project-tint-tryjobs-access",
+            groups = "project-dawn-tryjob-access",
         ),
     ],
     verifiers = [
         luci.cq_tryjob_verifier(
-            builder = "tint:try/presubmit",
+            builder = "dawn:try/presubmit",
             disable_reuse = True,
         ),
     ],
diff --git a/samples/dawn/Animometer.cpp b/samples/dawn/Animometer.cpp
new file mode 100644
index 0000000..9ac7cbe
--- /dev/null
+++ b/samples/dawn/Animometer.cpp
@@ -0,0 +1,192 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "SampleUtils.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/ScopedAutoreleasePool.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cstdio>
+#include <cstdlib>
+#include <vector>
+
+wgpu::Device device;
+wgpu::Queue queue;
+wgpu::SwapChain swapchain;
+wgpu::RenderPipeline pipeline;
+wgpu::BindGroup bindGroup;
+wgpu::Buffer ubo;
+
+float RandomFloat(float min, float max) {
+    float zeroOne = rand() / float(RAND_MAX);
+    return zeroOne * (max - min) + min;
+}
+
+constexpr size_t kNumTriangles = 10000;
+
+// Aligned as minUniformBufferOffsetAlignment
+struct alignas(256) ShaderData {
+    float scale;
+    float time;
+    float offsetX;
+    float offsetY;
+    float scalar;
+    float scalarOffset;
+};
+
+static std::vector<ShaderData> shaderData;
+
+void init() {
+    device = CreateCppDawnDevice();
+
+    queue = device.GetQueue();
+    swapchain = GetSwapChain(device);
+    swapchain.Configure(GetPreferredSwapChainTextureFormat(), wgpu::TextureUsage::RenderAttachment,
+                        640, 480);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        struct Constants {
+            scale : f32;
+            time : f32;
+            offsetX : f32;
+            offsetY : f32;
+            scalar : f32;
+            scalarOffset : f32;
+        };
+        @group(0) @binding(0) var<uniform> c : Constants;
+
+        struct VertexOut {
+            @location(0) v_color : vec4<f32>;
+            @builtin(position) Position : vec4<f32>;
+        };
+
+        @stage(vertex) fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+            var positions : array<vec4<f32>, 3> = array<vec4<f32>, 3>(
+                vec4<f32>( 0.0,  0.1, 0.0, 1.0),
+                vec4<f32>(-0.1, -0.1, 0.0, 1.0),
+                vec4<f32>( 0.1, -0.1, 0.0, 1.0)
+            );
+
+            var colors : array<vec4<f32>, 3> = array<vec4<f32>, 3>(
+                vec4<f32>(1.0, 0.0, 0.0, 1.0),
+                vec4<f32>(0.0, 1.0, 0.0, 1.0),
+                vec4<f32>(0.0, 0.0, 1.0, 1.0)
+            );
+
+            var position : vec4<f32> = positions[VertexIndex];
+            var color : vec4<f32> = colors[VertexIndex];
+
+            // TODO(dawn:572): Revisit once modf has been reworked in WGSL.
+            var fade : f32 = c.scalarOffset + c.time * c.scalar / 10.0;
+            fade = fade - floor(fade);
+            if (fade < 0.5) {
+                fade = fade * 2.0;
+            } else {
+                fade = (1.0 - fade) * 2.0;
+            }
+
+            var xpos : f32 = position.x * c.scale;
+            var ypos : f32 = position.y * c.scale;
+            let angle : f32 = 3.14159 * 2.0 * fade;
+            let xrot : f32 = xpos * cos(angle) - ypos * sin(angle);
+            let yrot : f32 = xpos * sin(angle) + ypos * cos(angle);
+            xpos = xrot + c.offsetX;
+            ypos = yrot + c.offsetY;
+
+            var output : VertexOut;
+            output.v_color = vec4<f32>(fade, 1.0 - fade, 0.0, 1.0) + color;
+            output.Position = vec4<f32>(xpos, ypos, 0.0, 1.0);
+            return output;
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main(@location(0) v_color : vec4<f32>) -> @location(0) vec4<f32> {
+            return v_color;
+        })");
+
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform, true}});
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+
+    pipeline = device.CreateRenderPipeline(&descriptor);
+
+    shaderData.resize(kNumTriangles);
+    for (auto& data : shaderData) {
+        data.scale = RandomFloat(0.2f, 0.4f);
+        data.time = 0.0;
+        data.offsetX = RandomFloat(-0.9f, 0.9f);
+        data.offsetY = RandomFloat(-0.9f, 0.9f);
+        data.scalar = RandomFloat(0.5f, 2.0f);
+        data.scalarOffset = RandomFloat(0.0f, 10.0f);
+    }
+
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = kNumTriangles * sizeof(ShaderData);
+    bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
+    ubo = device.CreateBuffer(&bufferDesc);
+
+    bindGroup = utils::MakeBindGroup(device, bgl, {{0, ubo, 0, sizeof(ShaderData)}});
+}
+
+void frame() {
+    wgpu::TextureView backbufferView = swapchain.GetCurrentTextureView();
+
+    static int f = 0;
+    f++;
+    for (auto& data : shaderData) {
+        data.time = f / 60.0f;
+    }
+    queue.WriteBuffer(ubo, 0, shaderData.data(), kNumTriangles * sizeof(ShaderData));
+
+    utils::ComboRenderPassDescriptor renderPass({backbufferView});
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline);
+
+        for (size_t i = 0; i < kNumTriangles; i++) {
+            uint32_t offset = i * sizeof(ShaderData);
+            pass.SetBindGroup(0, bindGroup, 1, &offset);
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+    swapchain.Present();
+    DoFlush();
+    fprintf(stderr, "frame %i\n", f);
+}
+
+int main(int argc, const char* argv[]) {
+    if (!InitSample(argc, argv)) {
+        return 1;
+    }
+    init();
+
+    while (!ShouldQuit()) {
+        utils::ScopedAutoreleasePool pool;
+        frame();
+        utils::USleep(16000);
+    }
+}
diff --git a/samples/dawn/BUILD.gn b/samples/dawn/BUILD.gn
new file mode 100644
index 0000000..c7e04a5
--- /dev/null
+++ b/samples/dawn/BUILD.gn
@@ -0,0 +1,77 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../scripts/dawn_overrides_with_defaults.gni")
+
+group("samples") {
+  deps = [
+    ":Animometer",
+    ":CHelloTriangle",
+    ":ComputeBoids",
+    ":CppHelloTriangle",
+    ":ManualSwapChainTest",
+  ]
+}
+
+# Static library to contain code and dependencies common to all samples
+static_library("utils") {
+  sources = [
+    "SampleUtils.cpp",
+    "SampleUtils.h",
+  ]
+
+  # Export all of these as public deps so that `gn check` allows includes
+  public_deps = [
+    "${dawn_root}/src/dawn:cpp",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_root}/src/dawn/native",
+    "${dawn_root}/src/dawn/utils",
+    "${dawn_root}/src/dawn/utils:bindings",
+    "${dawn_root}/src/dawn/utils:glfw",
+    "${dawn_root}/src/dawn/wire",
+  ]
+  public_configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+}
+
+# Template for samples to avoid listing utils as a dep every time
+template("sample") {
+  executable(target_name) {
+    deps = [ ":utils" ]
+    forward_variables_from(invoker, "*", [ "deps" ])
+
+    if (defined(invoker.deps)) {
+      deps += invoker.deps
+    }
+  }
+}
+
+sample("CppHelloTriangle") {
+  sources = [ "CppHelloTriangle.cpp" ]
+}
+
+sample("CHelloTriangle") {
+  sources = [ "CHelloTriangle.cpp" ]
+}
+
+sample("ComputeBoids") {
+  sources = [ "ComputeBoids.cpp" ]
+}
+
+sample("Animometer") {
+  sources = [ "Animometer.cpp" ]
+}
+
+sample("ManualSwapChainTest") {
+  sources = [ "ManualSwapChainTest.cpp" ]
+}
diff --git a/samples/dawn/CHelloTriangle.cpp b/samples/dawn/CHelloTriangle.cpp
new file mode 100644
index 0000000..1f7e374
--- /dev/null
+++ b/samples/dawn/CHelloTriangle.cpp
@@ -0,0 +1,155 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "SampleUtils.h"
+
+#include "dawn/utils/ScopedAutoreleasePool.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+WGPUDevice device;
+WGPUQueue queue;
+WGPUSwapChain swapchain;
+WGPURenderPipeline pipeline;
+
+WGPUTextureFormat swapChainFormat;
+
+void init() {
+    device = CreateCppDawnDevice().Release();
+    queue = wgpuDeviceGetQueue(device);
+
+    {
+        WGPUSwapChainDescriptor descriptor = {};
+        descriptor.implementation = GetSwapChainImplementation();
+        swapchain = wgpuDeviceCreateSwapChain(device, nullptr, &descriptor);
+    }
+    swapChainFormat = static_cast<WGPUTextureFormat>(GetPreferredSwapChainTextureFormat());
+    wgpuSwapChainConfigure(swapchain, swapChainFormat, WGPUTextureUsage_RenderAttachment, 640, 480);
+
+    const char* vs = R"(
+        @stage(vertex) fn main(
+            @builtin(vertex_index) VertexIndex : u32
+        ) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>( 0.0,  0.5),
+                vec2<f32>(-0.5, -0.5),
+                vec2<f32>( 0.5, -0.5)
+            );
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        })";
+    WGPUShaderModule vsModule = utils::CreateShaderModule(device, vs).Release();
+
+    const char* fs = R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+        })";
+    WGPUShaderModule fsModule = utils::CreateShaderModule(device, fs).Release();
+
+    {
+        WGPURenderPipelineDescriptor descriptor = {};
+
+        // Fragment state
+        WGPUBlendState blend = {};
+        blend.color.operation = WGPUBlendOperation_Add;
+        blend.color.srcFactor = WGPUBlendFactor_One;
+        blend.color.dstFactor = WGPUBlendFactor_One;
+        blend.alpha.operation = WGPUBlendOperation_Add;
+        blend.alpha.srcFactor = WGPUBlendFactor_One;
+        blend.alpha.dstFactor = WGPUBlendFactor_One;
+
+        WGPUColorTargetState colorTarget = {};
+        colorTarget.format = swapChainFormat;
+        colorTarget.blend = &blend;
+        colorTarget.writeMask = WGPUColorWriteMask_All;
+
+        WGPUFragmentState fragment = {};
+        fragment.module = fsModule;
+        fragment.entryPoint = "main";
+        fragment.targetCount = 1;
+        fragment.targets = &colorTarget;
+        descriptor.fragment = &fragment;
+
+        // Other state
+        descriptor.layout = nullptr;
+        descriptor.depthStencil = nullptr;
+
+        descriptor.vertex.module = vsModule;
+        descriptor.vertex.entryPoint = "main";
+        descriptor.vertex.bufferCount = 0;
+        descriptor.vertex.buffers = nullptr;
+
+        descriptor.multisample.count = 1;
+        descriptor.multisample.mask = 0xFFFFFFFF;
+        descriptor.multisample.alphaToCoverageEnabled = false;
+
+        descriptor.primitive.frontFace = WGPUFrontFace_CCW;
+        descriptor.primitive.cullMode = WGPUCullMode_None;
+        descriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+        descriptor.primitive.stripIndexFormat = WGPUIndexFormat_Undefined;
+
+        pipeline = wgpuDeviceCreateRenderPipeline(device, &descriptor);
+    }
+
+    wgpuShaderModuleRelease(vsModule);
+    wgpuShaderModuleRelease(fsModule);
+}
+
+void frame() {
+    WGPUTextureView backbufferView = wgpuSwapChainGetCurrentTextureView(swapchain);
+    WGPURenderPassDescriptor renderpassInfo = {};
+    WGPURenderPassColorAttachment colorAttachment = {};
+    {
+        colorAttachment.view = backbufferView;
+        colorAttachment.resolveTarget = nullptr;
+        colorAttachment.clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
+        colorAttachment.loadOp = WGPULoadOp_Clear;
+        colorAttachment.storeOp = WGPUStoreOp_Store;
+        renderpassInfo.colorAttachmentCount = 1;
+        renderpassInfo.colorAttachments = &colorAttachment;
+        renderpassInfo.depthStencilAttachment = nullptr;
+    }
+    WGPUCommandBuffer commands;
+    {
+        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+        WGPURenderPassEncoder pass = wgpuCommandEncoderBeginRenderPass(encoder, &renderpassInfo);
+        wgpuRenderPassEncoderSetPipeline(pass, pipeline);
+        wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0);
+        wgpuRenderPassEncoderEnd(pass);
+        wgpuRenderPassEncoderRelease(pass);
+
+        commands = wgpuCommandEncoderFinish(encoder, nullptr);
+        wgpuCommandEncoderRelease(encoder);
+    }
+
+    wgpuQueueSubmit(queue, 1, &commands);
+    wgpuCommandBufferRelease(commands);
+    wgpuSwapChainPresent(swapchain);
+    wgpuTextureViewRelease(backbufferView);
+
+    DoFlush();
+}
+
+int main(int argc, const char* argv[]) {
+    if (!InitSample(argc, argv)) {
+        return 1;
+    }
+    init();
+
+    while (!ShouldQuit()) {
+        utils::ScopedAutoreleasePool pool;
+        frame();
+        utils::USleep(16000);
+    }
+}
diff --git a/samples/dawn/CMakeLists.txt b/samples/dawn/CMakeLists.txt
new file mode 100644
index 0000000..3fc9ec9
--- /dev/null
+++ b/samples/dawn/CMakeLists.txt
@@ -0,0 +1,41 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_sample_utils STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_sample_utils PRIVATE
+    "SampleUtils.cpp"
+    "SampleUtils.h"
+)
+target_link_libraries(dawn_sample_utils PUBLIC
+    dawn_internal_config
+    dawncpp
+    dawn_proc
+    dawn_common
+    dawn_native
+    dawn_wire
+    dawn_utils
+    glfw
+)
+
+add_executable(CppHelloTriangle "CppHelloTriangle.cpp")
+target_link_libraries(CppHelloTriangle dawn_sample_utils)
+
+add_executable(CHelloTriangle "CHelloTriangle.cpp")
+target_link_libraries(CHelloTriangle dawn_sample_utils)
+
+add_executable(ComputeBoids "ComputeBoids.cpp")
+target_link_libraries(ComputeBoids dawn_sample_utils)
+
+add_executable(Animometer "Animometer.cpp")
+target_link_libraries(Animometer dawn_sample_utils)
diff --git a/samples/dawn/ComputeBoids.cpp b/samples/dawn/ComputeBoids.cpp
new file mode 100644
index 0000000..f8c3764
--- /dev/null
+++ b/samples/dawn/ComputeBoids.cpp
@@ -0,0 +1,330 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "SampleUtils.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/ScopedAutoreleasePool.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <array>
+#include <cstring>
+#include <random>
+
+wgpu::Device device;
+wgpu::Queue queue;
+wgpu::SwapChain swapchain;
+wgpu::TextureView depthStencilView;
+
+wgpu::Buffer modelBuffer;
+std::array<wgpu::Buffer, 2> particleBuffers;
+
+wgpu::RenderPipeline renderPipeline;
+
+wgpu::Buffer updateParams;
+wgpu::ComputePipeline updatePipeline;
+std::array<wgpu::BindGroup, 2> updateBGs;
+
+size_t pingpong = 0;
+
+static const uint32_t kNumParticles = 1000;
+
+struct Particle {
+    std::array<float, 2> pos;
+    std::array<float, 2> vel;
+};
+
+struct SimParams {
+    float deltaT;
+    float rule1Distance;
+    float rule2Distance;
+    float rule3Distance;
+    float rule1Scale;
+    float rule2Scale;
+    float rule3Scale;
+    int particleCount;
+};
+
+void initBuffers() {
+    std::array<std::array<float, 2>, 3> model = {{
+        {-0.01, -0.02},
+        {0.01, -0.02},
+        {0.00, 0.02},
+    }};
+    modelBuffer =
+        utils::CreateBufferFromData(device, &model, sizeof(model), wgpu::BufferUsage::Vertex);
+
+    SimParams params = {0.04f, 0.1f, 0.025f, 0.025f, 0.02f, 0.05f, 0.005f, kNumParticles};
+    updateParams =
+        utils::CreateBufferFromData(device, &params, sizeof(params), wgpu::BufferUsage::Uniform);
+
+    std::vector<Particle> initialParticles(kNumParticles);
+    {
+        std::mt19937 generator;
+        std::uniform_real_distribution<float> dist(-1.0f, 1.0f);
+        for (auto& p : initialParticles) {
+            p.pos = {dist(generator), dist(generator)};
+            p.vel = {dist(generator) * 0.1f, dist(generator) * 0.1f};
+        }
+    }
+
+    for (size_t i = 0; i < 2; i++) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = sizeof(Particle) * kNumParticles;
+        descriptor.usage =
+            wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage;
+        particleBuffers[i] = device.CreateBuffer(&descriptor);
+
+        queue.WriteBuffer(particleBuffers[i], 0,
+                          reinterpret_cast<uint8_t*>(initialParticles.data()),
+                          sizeof(Particle) * kNumParticles);
+    }
+}
+
+void initRender() {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        struct VertexIn {
+            @location(0) a_particlePos : vec2<f32>;
+            @location(1) a_particleVel : vec2<f32>;
+            @location(2) a_pos : vec2<f32>;
+        };
+
+        @stage(vertex)
+        fn main(input : VertexIn) -> @builtin(position) vec4<f32> {
+            var angle : f32 = -atan2(input.a_particleVel.x, input.a_particleVel.y);
+            var pos : vec2<f32> = vec2<f32>(
+                (input.a_pos.x * cos(angle)) - (input.a_pos.y * sin(angle)),
+                (input.a_pos.x * sin(angle)) + (input.a_pos.y * cos(angle)));
+            return vec4<f32>(pos + input.a_particlePos, 0.0, 1.0);
+        }
+    )");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment)
+        fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+        }
+    )");
+
+    depthStencilView = CreateDefaultDepthStencilView(device);
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+
+    descriptor.vertex.module = vsModule;
+    descriptor.vertex.bufferCount = 2;
+    descriptor.cBuffers[0].arrayStride = sizeof(Particle);
+    descriptor.cBuffers[0].stepMode = wgpu::VertexStepMode::Instance;
+    descriptor.cBuffers[0].attributeCount = 2;
+    descriptor.cAttributes[0].offset = offsetof(Particle, pos);
+    descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x2;
+    descriptor.cAttributes[1].shaderLocation = 1;
+    descriptor.cAttributes[1].offset = offsetof(Particle, vel);
+    descriptor.cAttributes[1].format = wgpu::VertexFormat::Float32x2;
+    descriptor.cBuffers[1].arrayStride = 2 * sizeof(float);
+    descriptor.cBuffers[1].attributeCount = 1;
+    descriptor.cBuffers[1].attributes = &descriptor.cAttributes[2];
+    descriptor.cAttributes[2].shaderLocation = 2;
+    descriptor.cAttributes[2].format = wgpu::VertexFormat::Float32x2;
+
+    descriptor.cFragment.module = fsModule;
+    descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+    descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+
+    renderPipeline = device.CreateRenderPipeline(&descriptor);
+}
+
+void initSim() {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Particle {
+            pos : vec2<f32>;
+            vel : vec2<f32>;
+        };
+        struct SimParams {
+            deltaT : f32;
+            rule1Distance : f32;
+            rule2Distance : f32;
+            rule3Distance : f32;
+            rule1Scale : f32;
+            rule2Scale : f32;
+            rule3Scale : f32;
+            particleCount : u32;
+        };
+        struct Particles {
+            particles : array<Particle>;
+        };
+        @binding(0) @group(0) var<uniform> params : SimParams;
+        @binding(1) @group(0) var<storage, read_write> particlesA : Particles;
+        @binding(2) @group(0) var<storage, read_write> particlesB : Particles;
+
+        // https://github.com/austinEng/Project6-Vulkan-Flocking/blob/master/data/shaders/computeparticles/particle.comp
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            var index : u32 = GlobalInvocationID.x;
+            if (index >= params.particleCount) {
+                return;
+            }
+            var vPos : vec2<f32> = particlesA.particles[index].pos;
+            var vVel : vec2<f32> = particlesA.particles[index].vel;
+            var cMass : vec2<f32> = vec2<f32>(0.0, 0.0);
+            var cVel : vec2<f32> = vec2<f32>(0.0, 0.0);
+            var colVel : vec2<f32> = vec2<f32>(0.0, 0.0);
+            var cMassCount : u32 = 0u;
+            var cVelCount : u32 = 0u;
+            var pos : vec2<f32>;
+            var vel : vec2<f32>;
+
+            for (var i : u32 = 0u; i < params.particleCount; i = i + 1u) {
+                if (i == index) {
+                    continue;
+                }
+
+                pos = particlesA.particles[i].pos.xy;
+                vel = particlesA.particles[i].vel.xy;
+                if (distance(pos, vPos) < params.rule1Distance) {
+                    cMass = cMass + pos;
+                    cMassCount = cMassCount + 1u;
+                }
+                if (distance(pos, vPos) < params.rule2Distance) {
+                    colVel = colVel - (pos - vPos);
+                }
+                if (distance(pos, vPos) < params.rule3Distance) {
+                    cVel = cVel + vel;
+                    cVelCount = cVelCount + 1u;
+                }
+            }
+
+            if (cMassCount > 0u) {
+                cMass = (cMass / vec2<f32>(f32(cMassCount), f32(cMassCount))) - vPos;
+            }
+
+            if (cVelCount > 0u) {
+                cVel = cVel / vec2<f32>(f32(cVelCount), f32(cVelCount));
+            }
+            vVel = vVel + (cMass * params.rule1Scale) + (colVel * params.rule2Scale) +
+                (cVel * params.rule3Scale);
+
+            // clamp velocity for a more pleasing simulation
+            vVel = normalize(vVel) * clamp(length(vVel), 0.0, 0.1);
+            // kinematic update
+            vPos = vPos + (vVel * params.deltaT);
+
+            // Wrap around boundary
+            if (vPos.x < -1.0) {
+                vPos.x = 1.0;
+            }
+            if (vPos.x > 1.0) {
+                vPos.x = -1.0;
+            }
+            if (vPos.y < -1.0) {
+                vPos.y = 1.0;
+            }
+            if (vPos.y > 1.0) {
+                vPos.y = -1.0;
+            }
+
+            // Write back
+            particlesB.particles[index].pos = vPos;
+            particlesB.particles[index].vel = vVel;
+            return;
+        }
+    )");
+
+    auto bgl = utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                    {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                    {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                });
+
+    wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.layout = pl;
+    csDesc.compute.module = module;
+    csDesc.compute.entryPoint = "main";
+    updatePipeline = device.CreateComputePipeline(&csDesc);
+
+    for (uint32_t i = 0; i < 2; ++i) {
+        updateBGs[i] = utils::MakeBindGroup(
+            device, bgl,
+            {
+                {0, updateParams, 0, sizeof(SimParams)},
+                {1, particleBuffers[i], 0, kNumParticles * sizeof(Particle)},
+                {2, particleBuffers[(i + 1) % 2], 0, kNumParticles * sizeof(Particle)},
+            });
+    }
+}
+
+wgpu::CommandBuffer createCommandBuffer(const wgpu::TextureView backbufferView, size_t i) {
+    auto& bufferDst = particleBuffers[(i + 1) % 2];
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(updatePipeline);
+        pass.SetBindGroup(0, updateBGs[i]);
+        pass.Dispatch(kNumParticles);
+        pass.End();
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({backbufferView}, depthStencilView);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(renderPipeline);
+        pass.SetVertexBuffer(0, bufferDst);
+        pass.SetVertexBuffer(1, modelBuffer);
+        pass.Draw(3, kNumParticles);
+        pass.End();
+    }
+
+    return encoder.Finish();
+}
+
+void init() {
+    device = CreateCppDawnDevice();
+
+    queue = device.GetQueue();
+    swapchain = GetSwapChain(device);
+    swapchain.Configure(GetPreferredSwapChainTextureFormat(), wgpu::TextureUsage::RenderAttachment,
+                        640, 480);
+
+    initBuffers();
+    initRender();
+    initSim();
+}
+
+void frame() {
+    wgpu::TextureView backbufferView = swapchain.GetCurrentTextureView();
+
+    wgpu::CommandBuffer commandBuffer = createCommandBuffer(backbufferView, pingpong);
+    queue.Submit(1, &commandBuffer);
+    swapchain.Present();
+    DoFlush();
+
+    pingpong = (pingpong + 1) % 2;
+}
+
+int main(int argc, const char* argv[]) {
+    if (!InitSample(argc, argv)) {
+        return 1;
+    }
+    init();
+
+    while (!ShouldQuit()) {
+        utils::ScopedAutoreleasePool pool;
+        frame();
+        utils::USleep(16000);
+    }
+}
diff --git a/samples/dawn/CppHelloTriangle.cpp b/samples/dawn/CppHelloTriangle.cpp
new file mode 100644
index 0000000..14b4f55
--- /dev/null
+++ b/samples/dawn/CppHelloTriangle.cpp
@@ -0,0 +1,184 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "SampleUtils.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/ScopedAutoreleasePool.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <vector>
+
+wgpu::Device device;
+
+wgpu::Buffer indexBuffer;
+wgpu::Buffer vertexBuffer;
+
+wgpu::Texture texture;
+wgpu::Sampler sampler;
+
+wgpu::Queue queue;
+wgpu::SwapChain swapchain;
+wgpu::TextureView depthStencilView;
+wgpu::RenderPipeline pipeline;
+wgpu::BindGroup bindGroup;
+
+void initBuffers() {
+    static const uint32_t indexData[3] = {
+        0,
+        1,
+        2,
+    };
+    indexBuffer =
+        utils::CreateBufferFromData(device, indexData, sizeof(indexData), wgpu::BufferUsage::Index);
+
+    static const float vertexData[12] = {
+        0.0f, 0.5f, 0.0f, 1.0f, -0.5f, -0.5f, 0.0f, 1.0f, 0.5f, -0.5f, 0.0f, 1.0f,
+    };
+    vertexBuffer = utils::CreateBufferFromData(device, vertexData, sizeof(vertexData),
+                                               wgpu::BufferUsage::Vertex);
+}
+
+void initTextures() {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = 1024;
+    descriptor.size.height = 1024;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+    texture = device.CreateTexture(&descriptor);
+
+    sampler = device.CreateSampler();
+
+    // Initialize the texture with arbitrary data until we can load images
+    std::vector<uint8_t> data(4 * 1024 * 1024, 0);
+    for (size_t i = 0; i < data.size(); ++i) {
+        data[i] = static_cast<uint8_t>(i % 253);
+    }
+
+    wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, 4 * 1024);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {1024, 1024, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+
+    wgpu::CommandBuffer copy = encoder.Finish();
+    queue.Submit(1, &copy);
+}
+
+void init() {
+    device = CreateCppDawnDevice();
+
+    queue = device.GetQueue();
+    swapchain = GetSwapChain(device);
+    swapchain.Configure(GetPreferredSwapChainTextureFormat(), wgpu::TextureUsage::RenderAttachment,
+                        640, 480);
+
+    initBuffers();
+    initTextures();
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main(@location(0) pos : vec4<f32>)
+                            -> @builtin(position) vec4<f32> {
+            return pos;
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var mySampler: sampler;
+        @group(0) @binding(1) var myTexture : texture_2d<f32>;
+
+        @stage(fragment) fn main(@builtin(position) FragCoord : vec4<f32>)
+                              -> @location(0) vec4<f32> {
+            return textureSample(myTexture, mySampler, FragCoord.xy / vec2<f32>(640.0, 480.0));
+        })");
+
+    auto bgl = utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+                    {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
+                });
+
+    wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
+
+    depthStencilView = CreateDefaultDepthStencilView(device);
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+    descriptor.vertex.module = vsModule;
+    descriptor.vertex.bufferCount = 1;
+    descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+    descriptor.cBuffers[0].attributeCount = 1;
+    descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    descriptor.cFragment.module = fsModule;
+    descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+    descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+
+    pipeline = device.CreateRenderPipeline(&descriptor);
+
+    wgpu::TextureView view = texture.CreateView();
+
+    bindGroup = utils::MakeBindGroup(device, bgl, {{0, sampler}, {1, view}});
+}
+
+struct {
+    uint32_t a;
+    float b;
+} s;
+void frame() {
+    s.a = (s.a + 1) % 256;
+    s.b += 0.02f;
+    if (s.b >= 1.0f) {
+        s.b = 0.0f;
+    }
+
+    wgpu::TextureView backbufferView = swapchain.GetCurrentTextureView();
+    utils::ComboRenderPassDescriptor renderPass({backbufferView}, depthStencilView);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.DrawIndexed(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+    swapchain.Present();
+    DoFlush();
+}
+
+int main(int argc, const char* argv[]) {
+    if (!InitSample(argc, argv)) {
+        return 1;
+    }
+    init();
+
+    while (!ShouldQuit()) {
+        utils::ScopedAutoreleasePool pool;
+        frame();
+        utils::USleep(16000);
+    }
+}
diff --git a/samples/dawn/ManualSwapChainTest.cpp b/samples/dawn/ManualSwapChainTest.cpp
new file mode 100644
index 0000000..9c6e757
--- /dev/null
+++ b/samples/dawn/ManualSwapChainTest.cpp
@@ -0,0 +1,364 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This is an example to manually test swapchain code. Controls are the following, scoped to the
+// currently focused window:
+//  - W: creates a new window.
+//  - L: Latches the current swapchain, to check what happens when the window changes but not the
+//    swapchain.
+//  - R: switches the rendering mode, between "The Red Triangle" and color-cycling clears that's
+//    (WARNING) likely seizure inducing.
+//  - D: cycles the divisor for the swapchain size.
+//  - P: switches present modes.
+//
+// Closing all the windows exits the example. ^C also works.
+//
+// Things to test manually:
+//
+//  - Basic tests (with the triangle render mode):
+//    - Check the triangle is red on a black background and with the pointy side up.
+//    - Cycle render modes a bunch and check that the triangle background is always solid black.
+//    - Check that rendering triangles to multiple windows works.
+//
+//  - Present mode single-window tests (with cycling color render mode):
+//    - Check that Fifo cycles at about 1 cycle per second and has no tearing.
+//    - Check that Mailbox cycles faster than Fifo and has no tearing.
+//    - Check that Immediate cycles faster than Fifo, it is allowed to have tearing. (dragging
+//      between two monitors can help see tearing)
+//
+//  - Present mode multi-window tests, it should have the same results as single-window tests when
+//    all windows are in the same present mode. In mixed present modes only Immediate windows are
+//    allowed to tear.
+//
+//  - Resizing tests (with the triangle render mode):
+//    - Check that cycling divisors on the triangle produces lower and lower resolution triangles.
+//    - Check latching the swapchain config and resizing the window a bunch (smaller, bigger, and
+//      diagonal aspect ratio).
+//
+//  - Config change tests:
+//    - Check that cycling between present modes works.
+//    - TODO can't be tested yet: check cycling the same window over multiple devices.
+//    - TODO can't be tested yet: check cycling the same window over multiple formats.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/GLFWUtils.h"
+#include "dawn/utils/ScopedAutoreleasePool.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <dawn/dawn_proc.h>
+#include <dawn/native/DawnNative.h>
+#include <dawn/webgpu_cpp.h>
+#include "GLFW/glfw3.h"
+
+#include <memory>
+#include <unordered_map>
+
+struct WindowData {
+    GLFWwindow* window = nullptr;
+    uint64_t serial = 0;
+
+    float clearCycle = 1.0f;
+    bool latched = false;
+    bool renderTriangle = true;
+    uint32_t divisor = 1;
+
+    wgpu::Surface surface = nullptr;
+    wgpu::SwapChain swapchain = nullptr;
+
+    wgpu::SwapChainDescriptor currentDesc;
+    wgpu::SwapChainDescriptor targetDesc;
+};
+
+static std::unordered_map<GLFWwindow*, std::unique_ptr<WindowData>> windows;
+static uint64_t windowSerial = 0;
+
+static std::unique_ptr<dawn::native::Instance> instance;
+static wgpu::Device device;
+static wgpu::Queue queue;
+static wgpu::RenderPipeline trianglePipeline;
+
+bool IsSameDescriptor(const wgpu::SwapChainDescriptor& a, const wgpu::SwapChainDescriptor& b) {
+    return a.usage == b.usage && a.format == b.format && a.width == b.width &&
+           a.height == b.height && a.presentMode == b.presentMode;
+}
+
+void OnKeyPress(GLFWwindow* window, int key, int, int action, int);
+
+void SyncFromWindow(WindowData* data) {
+    int width;
+    int height;
+    glfwGetFramebufferSize(data->window, &width, &height);
+
+    data->targetDesc.width = std::max(1u, width / data->divisor);
+    data->targetDesc.height = std::max(1u, height / data->divisor);
+}
+
+void AddWindow() {
+    glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+    GLFWwindow* window = glfwCreateWindow(400, 400, "", nullptr, nullptr);
+    glfwSetKeyCallback(window, OnKeyPress);
+
+    wgpu::SwapChainDescriptor descriptor;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    descriptor.format = wgpu::TextureFormat::BGRA8Unorm;
+    descriptor.width = 0;
+    descriptor.height = 0;
+    descriptor.presentMode = wgpu::PresentMode::Fifo;
+
+    std::unique_ptr<WindowData> data = std::make_unique<WindowData>();
+    data->window = window;
+    data->serial = windowSerial++;
+    data->surface = utils::CreateSurfaceForWindow(instance->Get(), window);
+    data->currentDesc = descriptor;
+    data->targetDesc = descriptor;
+    SyncFromWindow(data.get());
+
+    windows[window] = std::move(data);
+}
+
+void DoRender(WindowData* data) {
+    wgpu::TextureView view = data->swapchain.GetCurrentTextureView();
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    if (data->renderTriangle) {
+        utils::ComboRenderPassDescriptor desc({view});
+        // Use Load to check the swapchain is lazy cleared (we shouldn't see garbage from previous
+        // frames).
+        desc.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&desc);
+        pass.SetPipeline(trianglePipeline);
+        pass.Draw(3);
+        pass.End();
+    } else {
+        data->clearCycle -= 1.0 / 60.f;
+        if (data->clearCycle < 0.0) {
+            data->clearCycle = 1.0f;
+        }
+
+        utils::ComboRenderPassDescriptor desc({view});
+        desc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        desc.cColorAttachments[0].clearValue = {data->clearCycle, 1.0f - data->clearCycle, 0.0f,
+                                                1.0f};
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&desc);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    data->swapchain.Present();
+}
+
+std::ostream& operator<<(std::ostream& o, const wgpu::SwapChainDescriptor& desc) {
+    // For now only render attachment is possible.
+    ASSERT(desc.usage == wgpu::TextureUsage::RenderAttachment);
+    o << "RenderAttachment ";
+    o << desc.width << "x" << desc.height << " ";
+
+    // For now only BGRA is allowed
+    ASSERT(desc.format == wgpu::TextureFormat::BGRA8Unorm);
+    o << "BGRA8Unorm ";
+
+    switch (desc.presentMode) {
+        case wgpu::PresentMode::Immediate:
+            o << "Immediate";
+            break;
+        case wgpu::PresentMode::Fifo:
+            o << "Fifo";
+            break;
+        case wgpu::PresentMode::Mailbox:
+            o << "Mailbox";
+            break;
+    }
+    return o;
+}
+
+void UpdateTitle(WindowData* data) {
+    std::ostringstream o;
+
+    o << data->serial << " ";
+    if (data->divisor != 1) {
+        o << "Divisor:" << data->divisor << " ";
+    }
+
+    if (data->latched) {
+        o << "Latched: (" << data->currentDesc << ") ";
+        o << "Target: (" << data->targetDesc << ")";
+    } else {
+        o << "(" << data->currentDesc << ")";
+    }
+
+    glfwSetWindowTitle(data->window, o.str().c_str());
+}
+
+void OnKeyPress(GLFWwindow* window, int key, int, int action, int) {
+    if (action != GLFW_PRESS) {
+        return;
+    }
+
+    ASSERT(windows.count(window) == 1);
+
+    WindowData* data = windows[window].get();
+    switch (key) {
+        case GLFW_KEY_W:
+            AddWindow();
+            break;
+
+        case GLFW_KEY_L:
+            data->latched = !data->latched;
+            UpdateTitle(data);
+            break;
+
+        case GLFW_KEY_R:
+            data->renderTriangle = !data->renderTriangle;
+            UpdateTitle(data);
+            break;
+
+        case GLFW_KEY_D:
+            data->divisor *= 2;
+            if (data->divisor > 32) {
+                data->divisor = 1;
+            }
+            break;
+
+        case GLFW_KEY_P:
+            switch (data->targetDesc.presentMode) {
+                case wgpu::PresentMode::Immediate:
+                    data->targetDesc.presentMode = wgpu::PresentMode::Fifo;
+                    break;
+                case wgpu::PresentMode::Fifo:
+                    data->targetDesc.presentMode = wgpu::PresentMode::Mailbox;
+                    break;
+                case wgpu::PresentMode::Mailbox:
+                    data->targetDesc.presentMode = wgpu::PresentMode::Immediate;
+                    break;
+            }
+            break;
+
+        default:
+            break;
+    }
+}
+
+int main(int argc, const char* argv[]) {
+    // Setup GLFW
+    glfwSetErrorCallback([](int code, const char* message) {
+        dawn::ErrorLog() << "GLFW error " << code << " " << message;
+    });
+    if (!glfwInit()) {
+        return 1;
+    }
+
+    // Choose an adapter we like.
+    // TODO: allow switching the window between devices.
+    DawnProcTable procs = dawn::native::GetProcs();
+    dawnProcSetProcs(&procs);
+
+    instance = std::make_unique<dawn::native::Instance>();
+    instance->DiscoverDefaultAdapters();
+
+    std::vector<dawn::native::Adapter> adapters = instance->GetAdapters();
+    dawn::native::Adapter chosenAdapter;
+    for (dawn::native::Adapter& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+        if (properties.backendType != wgpu::BackendType::Null) {
+            chosenAdapter = adapter;
+            break;
+        }
+    }
+    ASSERT(chosenAdapter);
+
+    // Setup the device on that adapter.
+    device = wgpu::Device::Acquire(chosenAdapter.CreateDevice());
+    device.SetUncapturedErrorCallback(
+        [](WGPUErrorType errorType, const char* message, void*) {
+            const char* errorTypeName = "";
+            switch (errorType) {
+                case WGPUErrorType_Validation:
+                    errorTypeName = "Validation";
+                    break;
+                case WGPUErrorType_OutOfMemory:
+                    errorTypeName = "Out of memory";
+                    break;
+                case WGPUErrorType_Unknown:
+                    errorTypeName = "Unknown";
+                    break;
+                case WGPUErrorType_DeviceLost:
+                    errorTypeName = "Device lost";
+                    break;
+                default:
+                    UNREACHABLE();
+                    return;
+            }
+            dawn::ErrorLog() << errorTypeName << " error: " << message;
+        },
+        nullptr);
+    queue = device.GetQueue();
+
+    // The hacky pipeline to render a triangle.
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main(@builtin(vertex_index) VertexIndex : u32)
+                            -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>( 0.0,  0.5),
+                vec2<f32>(-0.5, -0.5),
+                vec2<f32>( 0.5, -0.5)
+            );
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        })");
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+        })");
+    // BGRA shouldn't be hardcoded. Consider having a map[format -> pipeline].
+    pipelineDesc.cTargets[0].format = wgpu::TextureFormat::BGRA8Unorm;
+    trianglePipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+    // Craete the first window, since the example exits when there are no windows.
+    AddWindow();
+
+    while (windows.size() != 0) {
+        utils::ScopedAutoreleasePool pool;
+        glfwPollEvents();
+
+        for (auto it = windows.begin(); it != windows.end();) {
+            GLFWwindow* window = it->first;
+
+            if (glfwWindowShouldClose(window)) {
+                glfwDestroyWindow(window);
+                it = windows.erase(it);
+            } else {
+                it++;
+            }
+        }
+
+        for (auto& it : windows) {
+            WindowData* data = it.second.get();
+
+            SyncFromWindow(data);
+            if (!IsSameDescriptor(data->currentDesc, data->targetDesc) && !data->latched) {
+                data->swapchain = device.CreateSwapChain(data->surface, &data->targetDesc);
+                data->currentDesc = data->targetDesc;
+            }
+            UpdateTitle(data);
+            DoRender(data);
+        }
+    }
+}
diff --git a/samples/dawn/OWNERS b/samples/dawn/OWNERS
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/samples/dawn/OWNERS
@@ -0,0 +1 @@
+*
diff --git a/samples/dawn/SampleUtils.cpp b/samples/dawn/SampleUtils.cpp
new file mode 100644
index 0000000..db14027
--- /dev/null
+++ b/samples/dawn/SampleUtils.cpp
@@ -0,0 +1,279 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "SampleUtils.h"
+
+#include "GLFW/glfw3.h"
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/utils/BackendBinding.h"
+#include "dawn/utils/GLFWUtils.h"
+#include "dawn/utils/TerribleCommandBuffer.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+#include <algorithm>
+#include <cstring>
+
+void PrintDeviceError(WGPUErrorType errorType, const char* message, void*) {
+    const char* errorTypeName = "";
+    switch (errorType) {
+        case WGPUErrorType_Validation:
+            errorTypeName = "Validation";
+            break;
+        case WGPUErrorType_OutOfMemory:
+            errorTypeName = "Out of memory";
+            break;
+        case WGPUErrorType_Unknown:
+            errorTypeName = "Unknown";
+            break;
+        case WGPUErrorType_DeviceLost:
+            errorTypeName = "Device lost";
+            break;
+        default:
+            UNREACHABLE();
+            return;
+    }
+    dawn::ErrorLog() << errorTypeName << " error: " << message;
+}
+
+void PrintGLFWError(int code, const char* message) {
+    dawn::ErrorLog() << "GLFW error: " << code << " - " << message;
+}
+
+enum class CmdBufType {
+    None,
+    Terrible,
+    // TODO(cwallez@chromium.org): double terrible cmdbuf
+};
+
+// Default to D3D12, Metal, Vulkan, OpenGL in that order as D3D12 and Metal are the preferred on
+// their respective platforms, and Vulkan is preferred to OpenGL
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+static wgpu::BackendType backendType = wgpu::BackendType::D3D12;
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+static wgpu::BackendType backendType = wgpu::BackendType::Metal;
+#elif defined(DAWN_ENABLE_BACKEND_VULKAN)
+static wgpu::BackendType backendType = wgpu::BackendType::Vulkan;
+#elif defined(DAWN_ENABLE_BACKEND_OPENGLES)
+static wgpu::BackendType backendType = wgpu::BackendType::OpenGLES;
+#elif defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+static wgpu::BackendType backendType = wgpu::BackendType::OpenGL;
+#else
+#    error
+#endif
+
+static CmdBufType cmdBufType = CmdBufType::Terrible;
+static std::unique_ptr<dawn::native::Instance> instance;
+static utils::BackendBinding* binding = nullptr;
+
+static GLFWwindow* window = nullptr;
+
+static dawn::wire::WireServer* wireServer = nullptr;
+static dawn::wire::WireClient* wireClient = nullptr;
+static utils::TerribleCommandBuffer* c2sBuf = nullptr;
+static utils::TerribleCommandBuffer* s2cBuf = nullptr;
+
+wgpu::Device CreateCppDawnDevice() {
+    ScopedEnvironmentVar angleDefaultPlatform;
+    if (GetEnvironmentVar("ANGLE_DEFAULT_PLATFORM").first.empty()) {
+        angleDefaultPlatform.Set("ANGLE_DEFAULT_PLATFORM", "swiftshader");
+    }
+
+    glfwSetErrorCallback(PrintGLFWError);
+    if (!glfwInit()) {
+        return wgpu::Device();
+    }
+
+    // Create the test window and discover adapters using it (esp. for OpenGL)
+    utils::SetupGLFWWindowHintsForBackend(backendType);
+    glfwWindowHint(GLFW_COCOA_RETINA_FRAMEBUFFER, GLFW_FALSE);
+    window = glfwCreateWindow(640, 480, "Dawn window", nullptr, nullptr);
+    if (!window) {
+        return wgpu::Device();
+    }
+
+    instance = std::make_unique<dawn::native::Instance>();
+    utils::DiscoverAdapter(instance.get(), window, backendType);
+
+    // Get an adapter for the backend to use, and create the device.
+    dawn::native::Adapter backendAdapter;
+    {
+        std::vector<dawn::native::Adapter> adapters = instance->GetAdapters();
+        auto adapterIt = std::find_if(adapters.begin(), adapters.end(),
+                                      [](const dawn::native::Adapter adapter) -> bool {
+                                          wgpu::AdapterProperties properties;
+                                          adapter.GetProperties(&properties);
+                                          return properties.backendType == backendType;
+                                      });
+        ASSERT(adapterIt != adapters.end());
+        backendAdapter = *adapterIt;
+    }
+
+    WGPUDevice backendDevice = backendAdapter.CreateDevice();
+    DawnProcTable backendProcs = dawn::native::GetProcs();
+
+    binding = utils::CreateBinding(backendType, window, backendDevice);
+    if (binding == nullptr) {
+        return wgpu::Device();
+    }
+
+    // Choose whether to use the backend procs and devices directly, or set up the wire.
+    WGPUDevice cDevice = nullptr;
+    DawnProcTable procs;
+
+    switch (cmdBufType) {
+        case CmdBufType::None:
+            procs = backendProcs;
+            cDevice = backendDevice;
+            break;
+
+        case CmdBufType::Terrible: {
+            c2sBuf = new utils::TerribleCommandBuffer();
+            s2cBuf = new utils::TerribleCommandBuffer();
+
+            dawn::wire::WireServerDescriptor serverDesc = {};
+            serverDesc.procs = &backendProcs;
+            serverDesc.serializer = s2cBuf;
+
+            wireServer = new dawn::wire::WireServer(serverDesc);
+            c2sBuf->SetHandler(wireServer);
+
+            dawn::wire::WireClientDescriptor clientDesc = {};
+            clientDesc.serializer = c2sBuf;
+
+            wireClient = new dawn::wire::WireClient(clientDesc);
+            procs = dawn::wire::client::GetProcs();
+            s2cBuf->SetHandler(wireClient);
+
+            auto deviceReservation = wireClient->ReserveDevice();
+            wireServer->InjectDevice(backendDevice, deviceReservation.id,
+                                     deviceReservation.generation);
+
+            cDevice = deviceReservation.device;
+        } break;
+    }
+
+    dawnProcSetProcs(&procs);
+    procs.deviceSetUncapturedErrorCallback(cDevice, PrintDeviceError, nullptr);
+    return wgpu::Device::Acquire(cDevice);
+}
+
+uint64_t GetSwapChainImplementation() {
+    return binding->GetSwapChainImplementation();
+}
+
+wgpu::TextureFormat GetPreferredSwapChainTextureFormat() {
+    DoFlush();
+    return static_cast<wgpu::TextureFormat>(binding->GetPreferredSwapChainTextureFormat());
+}
+
+wgpu::SwapChain GetSwapChain(const wgpu::Device& device) {
+    wgpu::SwapChainDescriptor swapChainDesc;
+    swapChainDesc.implementation = GetSwapChainImplementation();
+    return device.CreateSwapChain(nullptr, &swapChainDesc);
+}
+
+wgpu::TextureView CreateDefaultDepthStencilView(const wgpu::Device& device) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = 640;
+    descriptor.size.height = 480;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    auto depthStencilTexture = device.CreateTexture(&descriptor);
+    return depthStencilTexture.CreateView();
+}
+
+bool InitSample(int argc, const char** argv) {
+    for (int i = 1; i < argc; i++) {
+        if (std::string("-b") == argv[i] || std::string("--backend") == argv[i]) {
+            i++;
+            if (i < argc && std::string("d3d12") == argv[i]) {
+                backendType = wgpu::BackendType::D3D12;
+                continue;
+            }
+            if (i < argc && std::string("metal") == argv[i]) {
+                backendType = wgpu::BackendType::Metal;
+                continue;
+            }
+            if (i < argc && std::string("null") == argv[i]) {
+                backendType = wgpu::BackendType::Null;
+                continue;
+            }
+            if (i < argc && std::string("opengl") == argv[i]) {
+                backendType = wgpu::BackendType::OpenGL;
+                continue;
+            }
+            if (i < argc && std::string("opengles") == argv[i]) {
+                backendType = wgpu::BackendType::OpenGLES;
+                continue;
+            }
+            if (i < argc && std::string("vulkan") == argv[i]) {
+                backendType = wgpu::BackendType::Vulkan;
+                continue;
+            }
+            fprintf(stderr,
+                    "--backend expects a backend name (opengl, opengles, metal, d3d12, null, "
+                    "vulkan)\n");
+            return false;
+        }
+        if (std::string("-c") == argv[i] || std::string("--command-buffer") == argv[i]) {
+            i++;
+            if (i < argc && std::string("none") == argv[i]) {
+                cmdBufType = CmdBufType::None;
+                continue;
+            }
+            if (i < argc && std::string("terrible") == argv[i]) {
+                cmdBufType = CmdBufType::Terrible;
+                continue;
+            }
+            fprintf(stderr, "--command-buffer expects a command buffer name (none, terrible)\n");
+            return false;
+        }
+        if (std::string("-h") == argv[i] || std::string("--help") == argv[i]) {
+            printf("Usage: %s [-b BACKEND] [-c COMMAND_BUFFER]\n", argv[0]);
+            printf("  BACKEND is one of: d3d12, metal, null, opengl, opengles, vulkan\n");
+            printf("  COMMAND_BUFFER is one of: none, terrible\n");
+            return false;
+        }
+    }
+    return true;
+}
+
+void DoFlush() {
+    if (cmdBufType == CmdBufType::Terrible) {
+        bool c2sSuccess = c2sBuf->Flush();
+        bool s2cSuccess = s2cBuf->Flush();
+
+        ASSERT(c2sSuccess && s2cSuccess);
+    }
+    glfwPollEvents();
+}
+
+bool ShouldQuit() {
+    return glfwWindowShouldClose(window);
+}
+
+GLFWwindow* GetGLFWWindow() {
+    return window;
+}
diff --git a/samples/dawn/SampleUtils.h b/samples/dawn/SampleUtils.h
new file mode 100644
index 0000000..8c6fdd7
--- /dev/null
+++ b/samples/dawn/SampleUtils.h
@@ -0,0 +1,29 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <dawn/dawn_wsi.h>
+#include <dawn/webgpu_cpp.h>
+
+bool InitSample(int argc, const char** argv);
+void DoFlush();
+bool ShouldQuit();
+
+struct GLFWwindow;
+struct GLFWwindow* GetGLFWWindow();
+
+wgpu::Device CreateCppDawnDevice();
+uint64_t GetSwapChainImplementation();
+wgpu::TextureFormat GetPreferredSwapChainTextureFormat();
+wgpu::SwapChain GetSwapChain(const wgpu::Device& device);
+wgpu::TextureView CreateDefaultDepthStencilView(const wgpu::Device& device);
diff --git a/scripts/dawn_component.gni b/scripts/dawn_component.gni
new file mode 100644
index 0000000..8a69794
--- /dev/null
+++ b/scripts/dawn_component.gni
@@ -0,0 +1,140 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build_overrides/build.gni")
+import("dawn_features.gni")
+import("dawn_overrides_with_defaults.gni")
+
+###############################################################################
+# Template to produce a component for one of Dawn's libraries.
+###############################################################################
+
+# Template that produces static and shared versions of the same library as well
+# as a target similar to Chromium's component targets.
+#  - The shared version exports symbols and has dependent import the symbols
+#    as libdawn_${name}.so. If the target name matches the package directory
+#    name, then the shared library target will be named 'shared', otherwise
+#    '${target_name}_shared'.
+#  - The static library doesn't export symbols nor make dependents import them.
+#    If the target name matches the package directory name, then the static
+#    library target will be named 'static', otherwise '${target_name}_static'.
+#  - The libname target is similar to a Chromium component and is an alias for
+#    either the static or the shared library.
+#
+# The DEFINE_PREFIX must be provided and must match the respective "_export.h"
+# file.
+#
+# Example usage:
+#
+#   dawn_component("my_library") {
+#     // my_library_export.h must use the MY_LIBRARY_IMPLEMENTATION and
+#     // MY_LIBRARY_SHARED_LIBRARY macros.
+#     DEFINE_PREFIX = "MY_LIBRARY"
+#
+#     sources = [...]
+#     deps = [...]
+#     configs = [...]
+#   }
+#
+#   executable("foo") {
+#     deps = [ ":my_library_shared" ] // or :my_library for the same effect
+#   }
+template("dawn_component") {
+  # Copy the target_name in the local scope so it doesn't get shadowed in the
+  # definition of targets.
+  name = target_name
+
+  prefix = "${name}_"
+
+  # Remove prefix if the target name matches directory
+  if (get_label_info(get_label_info(":$target_name", "dir"), "name") == name) {
+    prefix = ""
+  }
+
+  # The config that will apply to dependents of the shared library so they know
+  # they should "import" the symbols
+  config("${prefix}shared_public_config") {
+    defines = [ "${invoker.DEFINE_PREFIX}_SHARED_LIBRARY" ]
+
+    # Executable needs an rpath to find our shared libraries on OSX and Linux
+    if (is_mac) {
+      ldflags = [
+        "-rpath",
+        "@executable_path/",
+      ]
+    }
+    if ((is_linux || is_chromeos) && dawn_has_build) {
+      configs = [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
+    }
+  }
+
+  shared_library("${prefix}shared") {
+    # The "tool" for creating shared libraries will automatically add the "lib" prefix
+    output_name = "dawn_${name}"
+
+    # Copy all variables except "configs", which has a default value
+    forward_variables_from(invoker, "*", [ "configs" ])
+    if (defined(invoker.configs)) {
+      configs += invoker.configs
+    }
+
+    # Tell dependents where to find this shared library
+    if (is_mac) {
+      ldflags = [
+        "-install_name",
+        "@rpath/lib${name}.dylib",
+      ]
+    }
+
+    # Use the config that makes the ${DEFINE_PREFIX}_EXPORT macro do something
+    if (!defined(public_configs)) {
+      public_configs = []
+    }
+    public_configs += [ ":${prefix}shared_public_config" ]
+
+    # Tell sources of this library to export the symbols (and not import)
+    if (!defined(defines)) {
+      defines = []
+    }
+    defines += [ "${invoker.DEFINE_PREFIX}_IMPLEMENTATION" ]
+
+    # Chromium adds a config that uses a special linker script that removes
+    # all symbols except JNI ones. Remove this config so that our
+    # shared_library symbols are visible. This matches what Chromium's
+    # component template does.
+    if (build_with_chromium && is_android) {
+      configs -= [ "//build/config/android:hide_all_but_jni_onload" ]
+    }
+  }
+
+  static_library("${prefix}static") {
+    output_name = "dawn_${name}_static"
+
+    complete_static_lib = dawn_complete_static_libs
+
+    # Copy all variables except "configs", which has a default value
+    forward_variables_from(invoker, "*", [ "configs" ])
+    if (defined(invoker.configs)) {
+      configs += invoker.configs
+    }
+  }
+
+  group(name) {
+    if (is_component_build) {
+      public_deps = [ ":${prefix}shared" ]
+    } else {
+      public_deps = [ ":${prefix}static" ]
+    }
+  }
+}
diff --git a/scripts/dawn_features.gni b/scripts/dawn_features.gni
new file mode 100644
index 0000000..234791c
--- /dev/null
+++ b/scripts/dawn_features.gni
@@ -0,0 +1,98 @@
+# Copyright 2018 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build_overrides/build.gni")
+
+if (build_with_chromium) {
+  import("//build/config/ozone.gni")
+  import("//build/config/sanitizers/sanitizers.gni")
+
+  dawn_use_x11 = ozone_platform_x11
+} else {
+  declare_args() {
+    # Whether Dawn should enable X11 support.
+    dawn_use_x11 = is_linux && !is_chromeos
+  }
+}
+
+# Enable the compilation for UWP
+dawn_is_winuwp = is_win && target_os == "winuwp"
+
+declare_args() {
+  dawn_use_angle = true
+
+  # Enables SwiftShader as the fallback adapter. Requires dawn_swiftshader_dir
+  # to be set to take effect.
+  dawn_use_swiftshader = true
+}
+
+declare_args() {
+  # Enable Dawn's ASSERTs even in release builds
+  dawn_always_assert = false
+
+  # Should the Dawn static libraries be fully linked vs. GN's default of
+  # treating them as source sets. This is useful for people using Dawn
+  # standalone to produce static libraries to use in their projects.
+  dawn_complete_static_libs = false
+
+  # Enables the compilation of Dawn's D3D12 backend
+  dawn_enable_d3d12 = is_win
+
+  # Enables the compilation of Dawn's Metal backend
+  dawn_enable_metal = is_mac
+
+  # Enables the compilation of Dawn's Null backend
+  # (required for unittests, obviously non-conformant)
+  dawn_enable_null = true
+
+  # Enables the compilation of Dawn's OpenGL backend
+  # (best effort, non-conformant)
+  dawn_enable_desktop_gl = is_linux && !is_chromeos
+
+  # Enables the compilation of Dawn's OpenGLES backend
+  # (WebGPU/Compat subset)
+  # Disables OpenGLES when compiling for UWP, since UWP only supports d3d
+  dawn_enable_opengles =
+      (is_linux && !is_chromeos) || (is_win && !dawn_is_winuwp)
+
+  # Enables the compilation of Dawn's Vulkan backend
+  # Disables vulkan when compiling for UWP, since UWP only supports d3d
+  dawn_enable_vulkan = is_linux || is_chromeos || (is_win && !dawn_is_winuwp) ||
+                       is_fuchsia || is_android || dawn_use_swiftshader
+
+  # Enables error injection for faking failures to native API calls
+  dawn_enable_error_injection =
+      is_debug || (build_with_chromium && use_fuzzing_engine)
+}
+
+# GN does not allow reading a variable defined in the same declare_args().
+# Put them in two separate declare_args() when setting the value of one
+# argument based on another.
+declare_args() {
+  # Uses our built version of the Vulkan validation layers
+  dawn_enable_vulkan_validation_layers =
+      dawn_enable_vulkan && ((is_linux && !is_chromeos) || is_win || is_mac)
+
+  # Uses our built version of the Vulkan loader on platforms where we can't
+  # assume to have one present at the system level.
+  dawn_enable_vulkan_loader =
+      dawn_enable_vulkan && (is_mac || (is_linux && !is_android))
+}
+
+# UWP only supports CoreWindow for windowing
+dawn_supports_glfw_for_windowing =
+    (is_win && !dawn_is_winuwp) || (is_linux && !is_chromeos) || is_mac
+
+# Much of the GL backend code is shared, so define a convenience var.
+dawn_enable_opengl = dawn_enable_opengles || dawn_enable_desktop_gl
diff --git a/scripts/dawn_overrides_with_defaults.gni b/scripts/dawn_overrides_with_defaults.gni
new file mode 100644
index 0000000..46f44ef
--- /dev/null
+++ b/scripts/dawn_overrides_with_defaults.gni
@@ -0,0 +1,80 @@
+# Copyright 2018 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This files imports the overrides for Dawn but sets the defaults so that
+# projects including Dawn don't have to set dirs if they happen to use the
+# same.
+# It takes advantage of GN's variable scoping rules to define global variables
+# inside if constructs.
+
+import("//build_overrides/dawn.gni")
+
+if (!defined(dawn_standalone)) {
+  dawn_standalone = false
+}
+
+if (!defined(dawn_has_build)) {
+  dawn_has_build = true
+}
+
+if (!defined(dawn_root)) {
+  dawn_root = get_path_info("..", "abspath")
+}
+dawn_gen_root = get_path_info("${dawn_root}", "gen_dir")
+
+if (!defined(dawn_jinja2_dir)) {
+  dawn_jinja2_dir = "//third_party/jinja2"
+}
+
+if (!defined(dawn_glfw_dir)) {
+  dawn_glfw_dir = "//third_party/glfw"
+}
+
+if (!defined(dawn_googletest_dir)) {
+  dawn_googletest_dir = "//third_party/googletest"
+}
+
+if (!defined(dawn_spirv_tools_dir)) {
+  dawn_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
+}
+
+if (!defined(dawn_swiftshader_dir)) {
+  # Default to swiftshader not being available.
+  dawn_swiftshader_dir = ""
+}
+
+if (!defined(dawn_vulkan_headers_dir)) {
+  dawn_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
+  if (dawn_standalone) {
+    dawn_vulkan_headers_dir =
+        "${dawn_root}/third_party/vulkan-deps/vulkan-headers/src"
+  }
+}
+
+if (!defined(dawn_vulkan_loader_dir)) {
+  # Default to the Vulkan loader not being available except in standalone.
+  dawn_vulkan_loader_dir = ""
+  if (dawn_standalone) {
+    dawn_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
+  }
+}
+
+if (!defined(dawn_vulkan_validation_layers_dir)) {
+  # Default to VVLs not being available.
+  dawn_vulkan_validation_layers_dir = ""
+}
+
+if (!defined(dawn_abseil_dir)) {
+  dawn_abseil_dir = "//third_party/abseil-cpp"
+}
diff --git a/scripts/extract.py b/scripts/extract.py
new file mode 100644
index 0000000..ed263f4
--- /dev/null
+++ b/scripts/extract.py
@@ -0,0 +1,182 @@
+# Copyright (c) 2015, Google Inc.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""Extracts archives."""
+
+import hashlib
+import optparse
+import os
+import os.path
+import tarfile
+import shutil
+import sys
+import zipfile
+
+
+def CheckedJoin(output, path):
+    """
+  CheckedJoin returns os.path.join(output, path). It does sanity checks to
+  ensure the resulting path is under output, but shouldn't be used on untrusted
+  input.
+  """
+    path = os.path.normpath(path)
+    if os.path.isabs(path) or path.startswith('.'):
+        raise ValueError(path)
+    return os.path.join(output, path)
+
+
+class FileEntry(object):
+    def __init__(self, path, mode, fileobj):
+        self.path = path
+        self.mode = mode
+        self.fileobj = fileobj
+
+
+class SymlinkEntry(object):
+    def __init__(self, path, mode, target):
+        self.path = path
+        self.mode = mode
+        self.target = target
+
+
+def IterateZip(path):
+    """
+  IterateZip opens the zip file at path and returns a generator of entry objects
+  for each file in it.
+  """
+    with zipfile.ZipFile(path, 'r') as zip_file:
+        for info in zip_file.infolist():
+            if info.filename.endswith('/'):
+                continue
+            yield FileEntry(info.filename, None, zip_file.open(info))
+
+
+def IterateTar(path, compression):
+    """
+  IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
+  entry objects for each file in it.
+  """
+    with tarfile.open(path, 'r:' + compression) as tar_file:
+        for info in tar_file:
+            if info.isdir():
+                pass
+            elif info.issym():
+                yield SymlinkEntry(info.name, None, info.linkname)
+            elif info.isfile():
+                yield FileEntry(info.name, info.mode,
+                                tar_file.extractfile(info))
+            else:
+                raise ValueError('Unknown entry type "%s"' % (info.name, ))
+
+
+def main(args):
+    parser = optparse.OptionParser(usage='Usage: %prog ARCHIVE OUTPUT')
+    parser.add_option('--no-prefix',
+                      dest='no_prefix',
+                      action='store_true',
+                      help='Do not remove a prefix from paths in the archive.')
+    options, args = parser.parse_args(args)
+
+    if len(args) != 2:
+        parser.print_help()
+        return 1
+
+    archive, output = args
+
+    if not os.path.exists(archive):
+        # Skip archives that weren't downloaded.
+        return 0
+
+    with open(archive, 'rb') as f:
+        sha256 = hashlib.sha256()
+        while True:
+            chunk = f.read(1024 * 1024)
+            if not chunk:
+                break
+            sha256.update(chunk)
+        digest = sha256.hexdigest()
+
+    stamp_path = os.path.join(output, ".dawn_archive_digest")
+    if os.path.exists(stamp_path):
+        with open(stamp_path) as f:
+            if f.read().strip() == digest:
+                print("Already up-to-date.")
+                return 0
+
+    if archive.endswith('.zip'):
+        entries = IterateZip(archive)
+    elif archive.endswith('.tar.gz'):
+        entries = IterateTar(archive, 'gz')
+    elif archive.endswith('.tar.bz2'):
+        entries = IterateTar(archive, 'bz2')
+    else:
+        raise ValueError(archive)
+
+    try:
+        if os.path.exists(output):
+            print("Removing %s" % (output, ))
+            shutil.rmtree(output)
+
+        print("Extracting %s to %s" % (archive, output))
+        prefix = None
+        num_extracted = 0
+        for entry in entries:
+            # Even on Windows, zip files must always use forward slashes.
+            if '\\' in entry.path or entry.path.startswith('/'):
+                raise ValueError(entry.path)
+
+            if not options.no_prefix:
+                new_prefix, rest = entry.path.split('/', 1)
+
+                # Ensure the archive is consistent.
+                if prefix is None:
+                    prefix = new_prefix
+                if prefix != new_prefix:
+                    raise ValueError((prefix, new_prefix))
+            else:
+                rest = entry.path
+
+            # Extract the file into the output directory.
+            fixed_path = CheckedJoin(output, rest)
+            if not os.path.isdir(os.path.dirname(fixed_path)):
+                os.makedirs(os.path.dirname(fixed_path))
+            if isinstance(entry, FileEntry):
+                with open(fixed_path, 'wb') as out:
+                    shutil.copyfileobj(entry.fileobj, out)
+            elif isinstance(entry, SymlinkEntry):
+                os.symlink(entry.target, fixed_path)
+            else:
+                raise TypeError('unknown entry type')
+
+            # Fix up permissions if needbe.
+            # TODO(davidben): To be extra tidy, this should only track the execute bit
+            # as in git.
+            if entry.mode is not None:
+                os.chmod(fixed_path, entry.mode)
+
+            # Print every 100 files, so bots do not time out on large archives.
+            num_extracted += 1
+            if num_extracted % 100 == 0:
+                print("Extracted %d files..." % (num_extracted, ))
+    finally:
+        entries.close()
+
+    with open(stamp_path, 'w') as f:
+        f.write(digest)
+
+    print("Done. Extracted %d files." % (num_extracted, ))
+    return 0
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/scripts/perf_test_runner.py b/scripts/perf_test_runner.py
new file mode 100755
index 0000000..157d449
--- /dev/null
+++ b/scripts/perf_test_runner.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Based on Angle's perf_test_runner.py
+
+import glob
+import subprocess
+import sys
+import os
+import re
+
+base_path = os.path.abspath(
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
+
+# Look for a [Rr]elease build.
+perftests_paths = glob.glob('out/*elease*')
+metric = 'wall_time'
+max_experiments = 10
+
+binary_name = 'dawn_perf_tests'
+if sys.platform == 'win32':
+    binary_name += '.exe'
+
+scores = []
+
+
+def mean(data):
+    """Return the sample arithmetic mean of data."""
+    n = len(data)
+    if n < 1:
+        raise ValueError('mean requires at least one data point')
+    return float(sum(data)) / float(n)  # in Python 2 use sum(data)/float(n)
+
+
+def sum_of_square_deviations(data, c):
+    """Return sum of square deviations of sequence data."""
+    ss = sum((float(x) - c)**2 for x in data)
+    return ss
+
+
+def coefficient_of_variation(data):
+    """Calculates the population coefficient of variation."""
+    n = len(data)
+    if n < 2:
+        raise ValueError('variance requires at least two data points')
+    c = mean(data)
+    ss = sum_of_square_deviations(data, c)
+    pvar = ss / n  # the population variance
+    stddev = (pvar**0.5)  # population standard deviation
+    return stddev / c
+
+
+def truncated_list(data, n):
+    """Compute a truncated list, n is truncation size"""
+    if len(data) < n * 2:
+        raise ValueError('list not large enough to truncate')
+    return sorted(data)[n:-n]
+
+
+def truncated_mean(data, n):
+    """Compute a truncated mean, n is truncation size"""
+    return mean(truncated_list(data, n))
+
+
+def truncated_cov(data, n):
+    """Compute a truncated coefficient of variation, n is truncation size"""
+    return coefficient_of_variation(truncated_list(data, n))
+
+
+# Find most recent binary
+newest_binary = None
+newest_mtime = None
+
+for path in perftests_paths:
+    binary_path = os.path.join(base_path, path, binary_name)
+    if os.path.exists(binary_path):
+        binary_mtime = os.path.getmtime(binary_path)
+        if (newest_binary is None) or (binary_mtime > newest_mtime):
+            newest_binary = binary_path
+            newest_mtime = binary_mtime
+
+perftests_path = newest_binary
+
+if perftests_path == None or not os.path.exists(perftests_path):
+    print('Cannot find Release %s!' % binary_name)
+    sys.exit(1)
+
+if len(sys.argv) >= 2:
+    test_name = sys.argv[1]
+
+print('Using test executable: ' + perftests_path)
+print('Test name: ' + test_name)
+
+
+def get_results(metric, extra_args=[]):
+    process = subprocess.Popen(
+        [perftests_path, '--gtest_filter=' + test_name] + extra_args,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE)
+    output, err = process.communicate()
+
+    m = re.search(r'Running (\d+) tests', output)
+    if m and int(m.group(1)) > 1:
+        print("Found more than one test result in output:")
+        print(output)
+        sys.exit(3)
+
+    pattern = metric + r'.*= ([0-9.]+)'
+    m = re.findall(pattern, output)
+    if not m:
+        print("Did not find the metric '%s' in the test output:" % metric)
+        print(output)
+        sys.exit(1)
+
+    return [float(value) for value in m]
+
+
+# Calibrate the number of steps
+steps = get_results("steps", ["--calibration"])[0]
+print("running with %d steps." % steps)
+
+# Loop 'max_experiments' times, running the tests.
+for experiment in range(max_experiments):
+    experiment_scores = get_results(metric, ["--override-steps", str(steps)])
+
+    for score in experiment_scores:
+        sys.stdout.write("%s: %.2f" % (metric, score))
+        scores.append(score)
+
+        if (len(scores) > 1):
+            sys.stdout.write(", mean: %.2f" % mean(scores))
+            sys.stdout.write(", variation: %.2f%%" %
+                             (coefficient_of_variation(scores) * 100.0))
+
+        if (len(scores) > 7):
+            truncation_n = len(scores) >> 3
+            sys.stdout.write(", truncated mean: %.2f" %
+                             truncated_mean(scores, truncation_n))
+            sys.stdout.write(", variation: %.2f%%" %
+                             (truncated_cov(scores, truncation_n) * 100.0))
+
+        print("")
diff --git a/scripts/standalone-with-node.gclient b/scripts/standalone-with-node.gclient
new file mode 100644
index 0000000..b695f8a
--- /dev/null
+++ b/scripts/standalone-with-node.gclient
@@ -0,0 +1,13 @@
+# Copy this file to <dawn clone dir>/.gclient to bootstrap gclient in a
+# standalone checkout of Dawn that also compiles dawn_node.
+
+solutions = [
+  { "name"        : ".",
+    "url"         : "https://dawn.googlesource.com/dawn",
+    "deps_file"   : "DEPS",
+    "managed"     : False,
+    "custom_vars" : {
+      "dawn_node" : True,
+    }
+  },
+]
diff --git a/scripts/standalone.gclient b/scripts/standalone.gclient
new file mode 100644
index 0000000..86a7f0c
--- /dev/null
+++ b/scripts/standalone.gclient
@@ -0,0 +1,10 @@
+# Copy this file to <dawn clone dir>/.gclient to bootstrap gclient in a
+# standalone checkout of Dawn.
+
+solutions = [
+  { "name"        : ".",
+    "url"         : "https://dawn.googlesource.com/dawn",
+    "deps_file"   : "DEPS",
+    "managed"     : False,
+  },
+]
diff --git a/src/Dummy.cpp b/src/Dummy.cpp
new file mode 100644
index 0000000..5959a87
--- /dev/null
+++ b/src/Dummy.cpp
@@ -0,0 +1,18 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// CMake requires that targets contain at least on file. This file is used when we want to create
+// empty targets.
+
+int someSymbolToMakeXCodeHappy = 0;
diff --git a/src/dawn/BUILD.gn b/src/dawn/BUILD.gn
new file mode 100644
index 0000000..67991ad
--- /dev/null
+++ b/src/dawn/BUILD.gn
@@ -0,0 +1,99 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_component.gni")
+
+###############################################################################
+# Dawn C++ wrapper
+###############################################################################
+
+dawn_json_generator("cpp_gen") {
+  target = "cpp"
+  outputs = [ "src/dawn/webgpu_cpp.cpp" ]
+}
+
+source_set("cpp") {
+  deps = [
+    ":cpp_gen",
+    "${dawn_root}/include/dawn:cpp_headers",
+  ]
+  sources = get_target_outputs(":cpp_gen")
+}
+
+###############################################################################
+# Dawn proc
+###############################################################################
+
+dawn_json_generator("proc_gen") {
+  target = "proc"
+  outputs = [
+    "src/dawn/dawn_proc.c",
+    "src/dawn/dawn_thread_dispatch_proc.cpp",
+  ]
+}
+
+dawn_component("proc") {
+  DEFINE_PREFIX = "WGPU"
+
+  public_deps = [ "${dawn_root}/include/dawn:headers" ]
+  deps = [ ":proc_gen" ]
+  sources = get_target_outputs(":proc_gen")
+  sources += [
+    "${dawn_root}/include/dawn/dawn_proc.h",
+    "${dawn_root}/include/dawn/dawn_thread_dispatch_proc.h",
+  ]
+}
+
+###############################################################################
+# Other generated files (upstream header, emscripten header, emscripten bits)
+###############################################################################
+
+dawn_json_generator("webgpu_headers_gen") {
+  target = "webgpu_headers"
+  outputs = [ "webgpu-headers/webgpu.h" ]
+}
+
+dawn_json_generator("emscripten_bits_gen") {
+  target = "emscripten_bits"
+  outputs = [
+    "emscripten-bits/webgpu.h",
+    "emscripten-bits/webgpu_cpp.h",
+    "emscripten-bits/webgpu_cpp.cpp",
+    "emscripten-bits/webgpu_struct_info.json",
+    "emscripten-bits/library_webgpu_enum_tables.js",
+  ]
+}
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawncpp") {
+  public_deps = [ ":cpp" ]
+}
+group("dawncpp_headers") {
+  public_deps = [ "${dawn_root}/include/dawn:cpp_headers" ]
+}
+group("dawn_proc") {
+  public_deps = [ ":proc" ]
+}
+group("dawn_headers") {
+  public_deps = [ "${dawn_root}/include/dawn:headers" ]
+}
+group("dawn_cpp") {
+  public_deps = [ ":cpp" ]
+}
diff --git a/src/dawn/CMakeLists.txt b/src/dawn/CMakeLists.txt
new file mode 100644
index 0000000..578e61c
--- /dev/null
+++ b/src/dawn/CMakeLists.txt
@@ -0,0 +1,144 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+###############################################################################
+# Dawn projects
+###############################################################################
+
+add_subdirectory(common)
+add_subdirectory(platform)
+add_subdirectory(native)
+add_subdirectory(wire)
+# TODO(dawn:269): Remove once the implementation-based swapchains are removed.
+add_subdirectory(utils)
+
+if (DAWN_BUILD_NODE_BINDINGS)
+    set(NODE_BINDING_DEPS
+        ${NODE_ADDON_API_DIR}
+        ${NODE_API_HEADERS_DIR}
+        ${WEBGPU_IDL_PATH}
+    )
+    foreach(DEP ${NODE_BINDING_DEPS})
+        if (NOT EXISTS ${DEP})
+            message(FATAL_ERROR
+                "DAWN_BUILD_NODE_BINDINGS requires missing dependency '${DEP}'\n"
+                "Please follow the 'Fetch dependencies' instructions at:\n"
+                "./src/dawn/node/README.md"
+            )
+        endif()
+    endforeach()
+    if (NOT CMAKE_POSITION_INDEPENDENT_CODE)
+        message(FATAL_ERROR "DAWN_BUILD_NODE_BINDINGS requires building with DAWN_ENABLE_PIC")
+    endif()
+
+    add_subdirectory(node)
+endif()
+
+###############################################################################
+# Dawn headers
+###############################################################################
+
+DawnJSONGenerator(
+    TARGET "headers"
+    PRINT_NAME "Dawn headers"
+    RESULT_VARIABLE "DAWN_HEADERS_GEN_SOURCES"
+)
+
+# Headers only INTERFACE library with generated headers don't work in CMake
+# because the GENERATED property is local to a directory. Instead we make a
+# STATIC library with a Dummy cpp file.
+#
+# INTERFACE libraries can only have INTERFACE sources so the sources get added
+# to the dependant's list of sources. If these dependents are in another
+# directory, they don't see the GENERATED property and fail to configure
+# because the file doesn't exist on disk.
+add_library(dawn_headers STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_headers PRIVATE
+    "${DAWN_INCLUDE_DIR}/dawn/dawn_wsi.h"
+    ${DAWN_HEADERS_GEN_SOURCES}
+)
+target_link_libraries(dawn_headers INTERFACE dawn_public_config)
+
+###############################################################################
+# Dawn C++ headers
+###############################################################################
+
+DawnJSONGenerator(
+    TARGET "cpp_headers"
+    PRINT_NAME "Dawn C++ headers"
+    RESULT_VARIABLE "DAWNCPP_HEADERS_GEN_SOURCES"
+)
+
+# This headers only library needs to be a STATIC library, see comment for
+# dawn_headers above.
+add_library(dawncpp_headers STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawncpp_headers PRIVATE
+    "${DAWN_INCLUDE_DIR}/dawn/EnumClassBitmasks.h"
+    ${DAWNCPP_HEADERS_GEN_SOURCES}
+)
+target_link_libraries(dawncpp_headers INTERFACE dawn_headers)
+
+###############################################################################
+# Dawn C++ wrapper
+###############################################################################
+
+DawnJSONGenerator(
+    TARGET "cpp"
+    PRINT_NAME "Dawn C++ wrapper"
+    RESULT_VARIABLE "DAWNCPP_GEN_SOURCES"
+)
+
+add_library(dawncpp STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawncpp PRIVATE ${DAWNCPP_GEN_SOURCES})
+target_link_libraries(dawncpp PUBLIC dawncpp_headers)
+
+###############################################################################
+# libdawn_proc
+###############################################################################
+
+DawnJSONGenerator(
+    TARGET "proc"
+    PRINT_NAME "Dawn C++ wrapper"
+    RESULT_VARIABLE "DAWNPROC_GEN_SOURCES"
+)
+
+add_library(dawn_proc ${DAWN_DUMMY_FILE})
+target_compile_definitions(dawn_proc PRIVATE "WGPU_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+    target_compile_definitions(dawn_proc PRIVATE "WGPU_SHARED_LIBRARY")
+endif()
+target_sources(dawn_proc PRIVATE ${DAWNPROC_GEN_SOURCES})
+target_link_libraries(dawn_proc PUBLIC dawn_headers)
+
+###############################################################################
+# Other generated files (upstream header, emscripten header, emscripten bits)
+###############################################################################
+
+DawnJSONGenerator(
+    TARGET "webgpu_headers"
+    PRINT_NAME "WebGPU headers"
+    RESULT_VARIABLE "WEBGPU_HEADERS_GEN_SOURCES"
+)
+add_custom_target(webgpu_headers_gen
+    DEPENDS ${WEBGPU_HEADERS_GEN_SOURCES}
+)
+
+DawnJSONGenerator(
+    TARGET "emscripten_bits"
+    PRINT_NAME "Emscripten WebGPU bits"
+    RESULT_VARIABLE "EMSCRIPTEN_BITS_GEN_SOURCES"
+)
+add_custom_target(emscripten_bits_gen
+    DEPENDS ${EMSCRIPTEN_BITS_GEN_SOURCES}
+)
diff --git a/src/dawn/common/Alloc.h b/src/dawn/common/Alloc.h
new file mode 100644
index 0000000..940d5ff
--- /dev/null
+++ b/src/dawn/common/Alloc.h
@@ -0,0 +1,33 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ALLOC_H_
+#define COMMON_ALLOC_H_
+
+#include <cstddef>
+#include <new>
+
+template <typename T>
+T* AllocNoThrow(size_t count) {
+#if defined(ADDRESS_SANITIZER)
+    if (count * sizeof(T) >= 0x70000000) {
+        // std::nothrow isn't implemented on ASAN and it has a 2GB allocation limit.
+        // Catch large allocations and error out so fuzzers make progress.
+        return nullptr;
+    }
+#endif
+    return new (std::nothrow) T[count];
+}
+
+#endif  // COMMON_ALLOC_H_
diff --git a/src/dawn/common/Assert.cpp b/src/dawn/common/Assert.cpp
new file mode 100644
index 0000000..95d2efd
--- /dev/null
+++ b/src/dawn/common/Assert.cpp
@@ -0,0 +1,31 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+
+#include <cstdlib>
+
+void HandleAssertionFailure(const char* file,
+                            const char* function,
+                            int line,
+                            const char* condition) {
+    dawn::ErrorLog() << "Assertion failure at " << file << ":" << line << " (" << function
+                     << "): " << condition;
+#if defined(DAWN_ABORT_ON_ASSERT)
+    abort();
+#else
+    DAWN_BREAKPOINT();
+#endif
+}
diff --git a/src/dawn/common/Assert.h b/src/dawn/common/Assert.h
new file mode 100644
index 0000000..e7961d7
--- /dev/null
+++ b/src/dawn/common/Assert.h
@@ -0,0 +1,80 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ASSERT_H_
+#define COMMON_ASSERT_H_
+
+#include "dawn/common/Compiler.h"
+
+// Dawn asserts to be used instead of the regular C stdlib assert function (if you don't use assert
+// yet, you should start now!). In debug ASSERT(condition) will trigger an error, otherwise in
+// release it does nothing at runtime.
+//
+// In case of name clashes (with for example a testing library), you can define the
+// DAWN_SKIP_ASSERT_SHORTHANDS to only define the DAWN_ prefixed macros.
+//
+// These asserts feature:
+//     - Logging of the error with file, line and function information.
+//     - Breaking in the debugger when an assert is triggered and a debugger is attached.
+//     - Use the assert information to help the compiler optimizer in release builds.
+
+// MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
+// points out that it looks like an owl face.
+#if defined(DAWN_COMPILER_MSVC)
+#    define DAWN_ASSERT_LOOP_CONDITION (0, 0)
+#else
+#    define DAWN_ASSERT_LOOP_CONDITION (0)
+#endif
+
+// DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
+// expect of an assert and in release it tries to give hints to make the compiler generate better
+// code.
+#if defined(DAWN_ENABLE_ASSERTS)
+#    define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition)  \
+        do {                                                          \
+            if (!(condition)) {                                       \
+                HandleAssertionFailure(file, func, line, #condition); \
+            }                                                         \
+        } while (DAWN_ASSERT_LOOP_CONDITION)
+#else
+#    if defined(DAWN_COMPILER_MSVC)
+#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
+#    elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
+#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
+#    else
+#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
+            do {                                                         \
+                DAWN_UNUSED(sizeof(condition));                          \
+            } while (DAWN_ASSERT_LOOP_CONDITION)
+#    endif
+#endif
+
+#define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
+#define DAWN_UNREACHABLE()                                                 \
+    do {                                                                   \
+        DAWN_ASSERT(DAWN_ASSERT_LOOP_CONDITION && "Unreachable code hit"); \
+        DAWN_BUILTIN_UNREACHABLE();                                        \
+    } while (DAWN_ASSERT_LOOP_CONDITION)
+
+#if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
+#    define ASSERT DAWN_ASSERT
+#    define UNREACHABLE DAWN_UNREACHABLE
+#endif
+
+void HandleAssertionFailure(const char* file,
+                            const char* function,
+                            int line,
+                            const char* condition);
+
+#endif  // COMMON_ASSERT_H_
diff --git a/src/dawn/common/BUILD.gn b/src/dawn/common/BUILD.gn
new file mode 100644
index 0000000..d0b0086
--- /dev/null
+++ b/src/dawn/common/BUILD.gn
@@ -0,0 +1,262 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("//build_overrides/build.gni")
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_features.gni")
+
+# Use Chromium's dcheck_always_on when available so that we respect it when
+# running tests on the GPU builders
+if (build_with_chromium) {
+  import("//build/config/dcheck_always_on.gni")
+} else {
+  dcheck_always_on = false
+}
+
+if (build_with_chromium) {
+  import("//build/config/sanitizers/sanitizers.gni")
+} else {
+  use_fuzzing_engine = false
+}
+
+###############################################################################
+# Common dawn configs
+###############################################################################
+
+config("internal_config") {
+  include_dirs = [
+    "${target_gen_dir}/../../../src",
+    "${dawn_root}/src",
+  ]
+
+  defines = []
+  if (dawn_always_assert || dcheck_always_on || is_debug ||
+      use_fuzzing_engine) {
+    defines += [ "DAWN_ENABLE_ASSERTS" ]
+  }
+
+  if (use_fuzzing_engine) {
+    # Does a hard abort when an assertion fails so that fuzzers catch and parse the failure.
+    defines += [ "DAWN_ABORT_ON_ASSERT" ]
+  }
+
+  if (dawn_enable_d3d12) {
+    defines += [ "DAWN_ENABLE_BACKEND_D3D12" ]
+  }
+  if (dawn_enable_metal) {
+    defines += [ "DAWN_ENABLE_BACKEND_METAL" ]
+  }
+  if (dawn_enable_null) {
+    defines += [ "DAWN_ENABLE_BACKEND_NULL" ]
+  }
+  if (dawn_enable_opengl) {
+    defines += [ "DAWN_ENABLE_BACKEND_OPENGL" ]
+  }
+  if (dawn_enable_desktop_gl) {
+    defines += [ "DAWN_ENABLE_BACKEND_DESKTOP_GL" ]
+  }
+  if (dawn_enable_opengles) {
+    defines += [ "DAWN_ENABLE_BACKEND_OPENGLES" ]
+  }
+  if (dawn_enable_vulkan) {
+    defines += [ "DAWN_ENABLE_BACKEND_VULKAN" ]
+  }
+
+  if (dawn_use_x11) {
+    defines += [ "DAWN_USE_X11" ]
+  }
+
+  if (dawn_enable_error_injection) {
+    defines += [ "DAWN_ENABLE_ERROR_INJECTION" ]
+  }
+
+  # Only internal Dawn targets can use this config, this means only targets in
+  # this BUILD.gn file and related subdirs.
+  visibility = [
+    "${dawn_root}/samples/dawn/*",
+    "${dawn_root}/src/dawn/*",
+  ]
+
+  cflags = []
+  if (is_clang) {
+    cflags += [ "-Wno-shadow" ]
+  }
+
+  # Enable more warnings that were found when using Dawn in other projects.
+  # Add them only when building in standalone because we control which clang
+  # version we use. Otherwise we risk breaking projects depending on Dawn when
+  # the use a different clang version.
+  if (dawn_standalone && is_clang) {
+    cflags += [
+      "-Wconditional-uninitialized",
+      "-Wcstring-format-directive",
+      "-Wc++11-narrowing",
+      "-Wdeprecated-copy",
+      "-Wdeprecated-copy-dtor",
+      "-Wduplicate-enum",
+      "-Wextra-semi-stmt",
+      "-Wimplicit-fallthrough",
+      "-Winconsistent-missing-destructor-override",
+      "-Winvalid-offsetof",
+      "-Wmissing-field-initializers",
+      "-Wnon-c-typedef-for-linkage",
+      "-Wpessimizing-move",
+      "-Wrange-loop-analysis",
+      "-Wredundant-move",
+      "-Wshadow-field",
+      "-Wstrict-prototypes",
+      "-Wtautological-unsigned-zero-compare",
+      "-Wunreachable-code-aggressive",
+      "-Wunused-but-set-variable",
+    ]
+
+    if (is_win) {
+      cflags += [
+        # clang-cl doesn't know -pedantic, pass it explicitly to the clang driver
+        "/clang:-pedantic",
+
+        # Allow the use of __uuidof()
+        "-Wno-language-extension-token",
+      ]
+    } else {
+      cflags += [ "-pedantic" ]
+    }
+  }
+
+  if (!is_clang && is_win) {
+    # Dawn extends wgpu enums with internal enums.
+    # MSVC considers these invalid switch values. crbug.com/dawn/397.
+    cflags += [ "/wd4063" ]
+
+    # MSVC things that a switch over all the enum values of an enum class is
+    # not sufficient to cover all control paths. Turn off this warning so that
+    # the respective clang warning tells us where to add switch cases
+    # (otherwise we have to add default: UNREACHABLE() that silences clang too)
+    cflags += [ "/wd4715" ]
+
+    # MSVC emits warnings when using constructs deprecated in C++17. Silence
+    # them until they are fixed.
+    # TODO(dawn:824): Fix all uses of C++ features deprecated in C++17.
+    defines += [ "_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS" ]
+    if (dawn_is_winuwp) {
+      # /ZW makes sure we don't add calls that are forbidden in UWP.
+      # and /EHsc is required to be used in combination with it,
+      # even if it is already added by the windows GN defaults,
+      # we still add it to make every /ZW paired with a /EHsc
+      cflags_cc = [
+        "/ZW:nostdlib",
+        "/EHsc",
+      ]
+    }
+  }
+}
+
+###############################################################################
+# Common dawn library
+###############################################################################
+
+dawn_generator("dawn_version_gen") {
+  script = "${dawn_root}/generator/dawn_version_generator.py"
+  args = [
+    "--dawn-dir",
+    rebase_path("${dawn_root}", root_build_dir),
+  ]
+  outputs = [ "src/dawn/common/Version_autogen.h" ]
+}
+
+# This GN file is discovered by all Chromium builds, but common doesn't support
+# all of Chromium's OSes so we explicitly make the target visible only on
+# systems we know Dawn is able to compile on.
+if (is_win || is_linux || is_chromeos || is_mac || is_fuchsia || is_android) {
+  static_library("common") {
+    sources = [
+      "Alloc.h",
+      "Assert.cpp",
+      "Assert.h",
+      "BitSetIterator.h",
+      "Compiler.h",
+      "ConcurrentCache.h",
+      "Constants.h",
+      "CoreFoundationRef.h",
+      "DynamicLib.cpp",
+      "DynamicLib.h",
+      "GPUInfo.cpp",
+      "GPUInfo.h",
+      "HashUtils.h",
+      "IOKitRef.h",
+      "LinkedList.h",
+      "Log.cpp",
+      "Log.h",
+      "Math.cpp",
+      "Math.h",
+      "NSRef.h",
+      "NonCopyable.h",
+      "PlacementAllocated.h",
+      "Platform.h",
+      "Preprocessor.h",
+      "RefBase.h",
+      "RefCounted.cpp",
+      "RefCounted.h",
+      "Result.cpp",
+      "Result.h",
+      "SerialMap.h",
+      "SerialQueue.h",
+      "SerialStorage.h",
+      "SlabAllocator.cpp",
+      "SlabAllocator.h",
+      "StackContainer.h",
+      "SwapChainUtils.h",
+      "SystemUtils.cpp",
+      "SystemUtils.h",
+      "TypeTraits.h",
+      "TypedInteger.h",
+      "UnderlyingType.h",
+      "ityp_array.h",
+      "ityp_bitset.h",
+      "ityp_span.h",
+      "ityp_stack_vec.h",
+      "ityp_vector.h",
+      "vulkan_platform.h",
+      "xlib_with_undefs.h",
+    ]
+
+    public_deps = [ ":dawn_version_gen" ]
+
+    if (is_mac) {
+      sources += [ "SystemUtils_mac.mm" ]
+    }
+
+    public_configs = [ ":internal_config" ]
+    deps = [
+      "${dawn_root}/include/dawn:cpp_headers",
+      "${dawn_root}/include/dawn:headers",
+    ]
+
+    if (is_win) {
+      sources += [
+        "WindowsUtils.cpp",
+        "WindowsUtils.h",
+        "windows_with_undefs.h",
+      ]
+    }
+    if (dawn_enable_vulkan) {
+      public_deps += [ "${dawn_vulkan_headers_dir}:vulkan_headers" ]
+    }
+    if (is_android) {
+      libs = [ "log" ]
+    }
+  }
+}
diff --git a/src/dawn/common/BitSetIterator.h b/src/dawn/common/BitSetIterator.h
new file mode 100644
index 0000000..f14a76c
--- /dev/null
+++ b/src/dawn/common/BitSetIterator.h
@@ -0,0 +1,139 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_BITSETITERATOR_H_
+#define COMMON_BITSETITERATOR_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <bitset>
+#include <limits>
+
+// This is ANGLE's BitSetIterator class with a customizable return type
+// TODO(crbug.com/dawn/306): it could be optimized, in particular when N <= 64
+
+template <typename T>
+T roundUp(const T value, const T alignment) {
+    auto temp = value + alignment - static_cast<T>(1);
+    return temp - temp % alignment;
+}
+
+template <size_t N, typename T>
+class BitSetIterator final {
+  public:
+    BitSetIterator(const std::bitset<N>& bitset);
+    BitSetIterator(const BitSetIterator& other);
+    BitSetIterator& operator=(const BitSetIterator& other);
+
+    class Iterator final {
+      public:
+        Iterator(const std::bitset<N>& bits);
+        Iterator& operator++();
+
+        bool operator==(const Iterator& other) const;
+        bool operator!=(const Iterator& other) const;
+
+        T operator*() const {
+            using U = UnderlyingType<T>;
+            ASSERT(static_cast<U>(mCurrentBit) <= std::numeric_limits<U>::max());
+            return static_cast<T>(static_cast<U>(mCurrentBit));
+        }
+
+      private:
+        unsigned long getNextBit();
+
+        static constexpr size_t kBitsPerWord = sizeof(uint32_t) * 8;
+        std::bitset<N> mBits;
+        unsigned long mCurrentBit;
+        unsigned long mOffset;
+    };
+
+    Iterator begin() const {
+        return Iterator(mBits);
+    }
+    Iterator end() const {
+        return Iterator(std::bitset<N>(0));
+    }
+
+  private:
+    const std::bitset<N> mBits;
+};
+
+template <size_t N, typename T>
+BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
+}
+
+template <size_t N, typename T>
+BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
+}
+
+template <size_t N, typename T>
+BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
+    mBits = other.mBits;
+    return *this;
+}
+
+template <size_t N, typename T>
+BitSetIterator<N, T>::Iterator::Iterator(const std::bitset<N>& bits)
+    : mBits(bits), mCurrentBit(0), mOffset(0) {
+    if (bits.any()) {
+        mCurrentBit = getNextBit();
+    } else {
+        mOffset = static_cast<unsigned long>(roundUp(N, kBitsPerWord));
+    }
+}
+
+template <size_t N, typename T>
+typename BitSetIterator<N, T>::Iterator& BitSetIterator<N, T>::Iterator::operator++() {
+    DAWN_ASSERT(mBits.any());
+    mBits.set(mCurrentBit - mOffset, 0);
+    mCurrentBit = getNextBit();
+    return *this;
+}
+
+template <size_t N, typename T>
+bool BitSetIterator<N, T>::Iterator::operator==(const Iterator& other) const {
+    return mOffset == other.mOffset && mBits == other.mBits;
+}
+
+template <size_t N, typename T>
+bool BitSetIterator<N, T>::Iterator::operator!=(const Iterator& other) const {
+    return !(*this == other);
+}
+
+template <size_t N, typename T>
+unsigned long BitSetIterator<N, T>::Iterator::getNextBit() {
+    static std::bitset<N> wordMask(std::numeric_limits<uint32_t>::max());
+
+    while (mOffset < N) {
+        uint32_t wordBits = static_cast<uint32_t>((mBits & wordMask).to_ulong());
+        if (wordBits != 0ul) {
+            return ScanForward(wordBits) + mOffset;
+        }
+
+        mBits >>= kBitsPerWord;
+        mOffset += kBitsPerWord;
+    }
+    return 0;
+}
+
+// Helper to avoid needing to specify the template parameter size
+template <size_t N>
+BitSetIterator<N, uint32_t> IterateBitSet(const std::bitset<N>& bitset) {
+    return BitSetIterator<N, uint32_t>(bitset);
+}
+
+#endif  // COMMON_BITSETITERATOR_H_
diff --git a/src/dawn/common/CMakeLists.txt b/src/dawn/common/CMakeLists.txt
new file mode 100644
index 0000000..1c28e71
--- /dev/null
+++ b/src/dawn/common/CMakeLists.txt
@@ -0,0 +1,91 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DawnGenerator(
+    SCRIPT "${Dawn_SOURCE_DIR}/generator/dawn_version_generator.py"
+    PRINT_NAME "Dawn version based utilities"
+    ARGS "--dawn-dir"
+         "${Dawn_SOURCE_DIR}"
+    RESULT_VARIABLE "DAWN_VERSION_AUTOGEN_SOURCES"
+)
+
+add_library(dawn_common STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_common PRIVATE
+    ${DAWN_VERSION_AUTOGEN_SOURCES}
+    "Alloc.h"
+    "Assert.cpp"
+    "Assert.h"
+    "BitSetIterator.h"
+    "Compiler.h"
+    "ConcurrentCache.h"
+    "Constants.h"
+    "CoreFoundationRef.h"
+    "DynamicLib.cpp"
+    "DynamicLib.h"
+    "GPUInfo.cpp"
+    "GPUInfo.h"
+    "HashUtils.h"
+    "IOKitRef.h"
+    "LinkedList.h"
+    "Log.cpp"
+    "Log.h"
+    "Math.cpp"
+    "Math.h"
+    "NSRef.h"
+    "NonCopyable.h"
+    "PlacementAllocated.h"
+    "Platform.h"
+    "Preprocessor.h"
+    "RefBase.h"
+    "RefCounted.cpp"
+    "RefCounted.h"
+    "Result.cpp"
+    "Result.h"
+    "SerialMap.h"
+    "SerialQueue.h"
+    "SerialStorage.h"
+    "SlabAllocator.cpp"
+    "SlabAllocator.h"
+    "StackContainer.h"
+    "SwapChainUtils.h"
+    "SystemUtils.cpp"
+    "SystemUtils.h"
+    "TypeTraits.h"
+    "TypedInteger.h"
+    "UnderlyingType.h"
+    "ityp_array.h"
+    "ityp_bitset.h"
+    "ityp_span.h"
+    "ityp_stack_vec.h"
+    "ityp_vector.h"
+    "vulkan_platform.h"
+    "xlib_with_undefs.h"
+)
+
+if (WIN32)
+    target_sources(dawn_common PRIVATE
+        "WindowsUtils.cpp"
+        "WindowsUtils.h"
+        "windows_with_undefs.h"
+    )
+elseif(APPLE)
+    target_sources(dawn_common PRIVATE
+        "SystemUtils_mac.mm"
+    )
+endif()
+
+target_link_libraries(dawn_common PUBLIC dawncpp_headers PRIVATE dawn_internal_config)
+
+# TODO Android Log support
+# TODO Vulkan headers support
diff --git a/src/dawn/common/Compiler.h b/src/dawn/common/Compiler.h
new file mode 100644
index 0000000..ae4f5c0
--- /dev/null
+++ b/src/dawn/common/Compiler.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_COMPILER_H_
+#define COMMON_COMPILER_H_
+
+// Defines macros for compiler-specific functionality
+//  - DAWN_COMPILER_[CLANG|GCC|MSVC]: Compiler detection
+//  - DAWN_BREAKPOINT(): Raises an exception and breaks in the debugger
+//  - DAWN_BUILTIN_UNREACHABLE(): Hints the compiler that a code path is unreachable
+//  - DAWN_(UN)?LIKELY(EXPR): Where available, hints the compiler that the expression will be true
+//      (resp. false) to help it generate code that leads to better branch prediction.
+//  - DAWN_UNUSED(EXPR): Prevents unused variable/expression warnings on EXPR.
+//  - DAWN_UNUSED_FUNC(FUNC): Prevents unused function warnings on FUNC.
+//  - DAWN_DECLARE_UNUSED:    Prevents unused function warnings a subsequent declaration.
+//  Both DAWN_UNUSED_FUNC and DAWN_DECLARE_UNUSED may be necessary, e.g. to suppress clang's
+//  unneeded-internal-declaration warning.
+
+// Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
+#if defined(__GNUC__) || defined(__clang__)
+#    if defined(__clang__)
+#        define DAWN_COMPILER_CLANG
+#    else
+#        define DAWN_COMPILER_GCC
+#    endif
+
+#    if defined(__i386__) || defined(__x86_64__)
+#        define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
+#    else
+// TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
+#        define DAWN_BREAKPOINT()
+#    endif
+
+#    define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
+#    define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
+#    define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
+
+#    if !defined(__has_cpp_attribute)
+#        define __has_cpp_attribute(name) 0
+#    endif
+
+#    define DAWN_DECLARE_UNUSED __attribute__((unused))
+#    if defined(NDEBUG)
+#        define DAWN_FORCE_INLINE inline __attribute__((always_inline))
+#    endif
+#    define DAWN_NOINLINE __attribute__((noinline))
+
+// MSVC
+#elif defined(_MSC_VER)
+#    define DAWN_COMPILER_MSVC
+
+extern void __cdecl __debugbreak(void);
+#    define DAWN_BREAKPOINT() __debugbreak()
+
+#    define DAWN_BUILTIN_UNREACHABLE() __assume(false)
+
+#    define DAWN_DECLARE_UNUSED
+#    if defined(NDEBUG)
+#        define DAWN_FORCE_INLINE __forceinline
+#    endif
+#    define DAWN_NOINLINE __declspec(noinline)
+
+#else
+#    error "Unsupported compiler"
+#endif
+
+// It seems that (void) EXPR works on all compilers to silence the unused variable warning.
+#define DAWN_UNUSED(EXPR) (void)EXPR
+// Likewise using static asserting on sizeof(&FUNC) seems to make it tagged as used
+#define DAWN_UNUSED_FUNC(FUNC) static_assert(sizeof(&FUNC) == sizeof(void (*)()))
+
+// Add noop replacements for macros for features that aren't supported by the compiler.
+#if !defined(DAWN_LIKELY)
+#    define DAWN_LIKELY(X) X
+#endif
+#if !defined(DAWN_UNLIKELY)
+#    define DAWN_UNLIKELY(X) X
+#endif
+#if !defined(DAWN_FORCE_INLINE)
+#    define DAWN_FORCE_INLINE inline
+#endif
+#if !defined(DAWN_NOINLINE)
+#    define DAWN_NOINLINE
+#endif
+
+#endif  // COMMON_COMPILER_H_
diff --git a/src/dawn/common/ConcurrentCache.h b/src/dawn/common/ConcurrentCache.h
new file mode 100644
index 0000000..e11b646
--- /dev/null
+++ b/src/dawn/common/ConcurrentCache.h
@@ -0,0 +1,54 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_CONCURRENT_CACHE_H_
+#define COMMON_CONCURRENT_CACHE_H_
+
+#include "dawn/common/NonCopyable.h"
+
+#include <mutex>
+#include <unordered_set>
+#include <utility>
+
+template <typename T>
+class ConcurrentCache : public NonMovable {
+  public:
+    ConcurrentCache() = default;
+
+    T* Find(T* object) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        auto iter = mCache.find(object);
+        if (iter == mCache.end()) {
+            return nullptr;
+        }
+        return *iter;
+    }
+
+    std::pair<T*, bool> Insert(T* object) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        auto [value, inserted] = mCache.insert(object);
+        return {*value, inserted};
+    }
+
+    size_t Erase(T* object) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        return mCache.erase(object);
+    }
+
+  private:
+    std::mutex mMutex;
+    std::unordered_set<T*, typename T::HashFunc, typename T::EqualityFunc> mCache;
+};
+
+#endif
diff --git a/src/dawn/common/Constants.h b/src/dawn/common/Constants.h
new file mode 100644
index 0000000..13b5995
--- /dev/null
+++ b/src/dawn/common/Constants.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_CONSTANTS_H_
+#define COMMON_CONSTANTS_H_
+
+#include <cstdint>
+
+static constexpr uint32_t kMaxBindGroups = 4u;
+static constexpr uint8_t kMaxVertexAttributes = 16u;
+static constexpr uint8_t kMaxVertexBuffers = 8u;
+static constexpr uint32_t kMaxVertexBufferArrayStride = 2048u;
+static constexpr uint32_t kNumStages = 3;
+static constexpr uint8_t kMaxColorAttachments = 8u;
+static constexpr uint32_t kTextureBytesPerRowAlignment = 256u;
+static constexpr uint32_t kMaxInterStageShaderComponents = 60u;
+static constexpr uint32_t kMaxInterStageShaderVariables = kMaxInterStageShaderComponents / 4;
+
+// Per stage limits
+static constexpr uint32_t kMaxSampledTexturesPerShaderStage = 16;
+static constexpr uint32_t kMaxSamplersPerShaderStage = 16;
+static constexpr uint32_t kMaxStorageBuffersPerShaderStage = 8;
+static constexpr uint32_t kMaxStorageTexturesPerShaderStage = 4;
+static constexpr uint32_t kMaxUniformBuffersPerShaderStage = 12;
+
+// Per pipeline layout limits
+static constexpr uint32_t kMaxDynamicUniformBuffersPerPipelineLayout = 8u;
+static constexpr uint32_t kMaxDynamicStorageBuffersPerPipelineLayout = 4u;
+
+// Indirect command sizes
+static constexpr uint64_t kDispatchIndirectSize = 3 * sizeof(uint32_t);
+static constexpr uint64_t kDrawIndirectSize = 4 * sizeof(uint32_t);
+static constexpr uint64_t kDrawIndexedIndirectSize = 5 * sizeof(uint32_t);
+
+// Non spec defined constants.
+static constexpr float kLodMin = 0.0;
+static constexpr float kLodMax = 1000.0;
+
+// Offset alignment for CopyB2B. Strictly speaking this alignment is required only
+// on macOS, but we decide to do it on all platforms.
+static constexpr uint64_t kCopyBufferToBufferOffsetAlignment = 4u;
+
+// The maximum size of visibilityResultBuffer is 256KB on Metal, to fit the restriction, limit the
+// maximum size of query set to 64KB. The size of a query is 8-bytes, the maximum query count is 64
+// * 1024 / 8.
+static constexpr uint32_t kMaxQueryCount = 8192u;
+
+// An external texture occupies multiple binding slots. These are the per-external-texture bindings
+// needed.
+static constexpr uint8_t kSampledTexturesPerExternalTexture = 4u;
+static constexpr uint8_t kSamplersPerExternalTexture = 1u;
+static constexpr uint8_t kUniformsPerExternalTexture = 1u;
+
+// A spec defined constant but that doesn't have a name.
+static constexpr uint32_t kMaxBindingNumber = 65535;
+
+#endif  // COMMON_CONSTANTS_H_
diff --git a/src/dawn/common/CoreFoundationRef.h b/src/dawn/common/CoreFoundationRef.h
new file mode 100644
index 0000000..e6cafbe
--- /dev/null
+++ b/src/dawn/common/CoreFoundationRef.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_COREFOUNDATIONREF_H_
+#define COMMON_COREFOUNDATIONREF_H_
+
+#include "dawn/common/RefBase.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+template <typename T>
+struct CoreFoundationRefTraits {
+    static constexpr T kNullValue = nullptr;
+    static void Reference(T value) {
+        CFRetain(value);
+    }
+    static void Release(T value) {
+        CFRelease(value);
+    }
+};
+
+template <typename T>
+class CFRef : public RefBase<T, CoreFoundationRefTraits<T>> {
+  public:
+    using RefBase<T, CoreFoundationRefTraits<T>>::RefBase;
+};
+
+template <typename T>
+CFRef<T> AcquireCFRef(T pointee) {
+    CFRef<T> ref;
+    ref.Acquire(pointee);
+    return ref;
+}
+
+#endif  // COMMON_COREFOUNDATIONREF_H_
diff --git a/src/dawn/common/DynamicLib.cpp b/src/dawn/common/DynamicLib.cpp
new file mode 100644
index 0000000..ab4f2d7
--- /dev/null
+++ b/src/dawn/common/DynamicLib.cpp
@@ -0,0 +1,106 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/DynamicLib.h"
+
+#include "dawn/common/Platform.h"
+
+#if DAWN_PLATFORM_WINDOWS
+#    include "dawn/common/windows_with_undefs.h"
+#    if DAWN_PLATFORM_WINUWP
+#        include "dawn/common/WindowsUtils.h"
+#    endif
+#elif DAWN_PLATFORM_POSIX
+#    include <dlfcn.h>
+#else
+#    error "Unsupported platform for DynamicLib"
+#endif
+
+DynamicLib::~DynamicLib() {
+    Close();
+}
+
+DynamicLib::DynamicLib(DynamicLib&& other) {
+    std::swap(mHandle, other.mHandle);
+}
+
+DynamicLib& DynamicLib::operator=(DynamicLib&& other) {
+    std::swap(mHandle, other.mHandle);
+    return *this;
+}
+
+bool DynamicLib::Valid() const {
+    return mHandle != nullptr;
+}
+
+bool DynamicLib::Open(const std::string& filename, std::string* error) {
+#if DAWN_PLATFORM_WINDOWS
+#    if DAWN_PLATFORM_WINUWP
+    mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
+#    else
+    mHandle = LoadLibraryA(filename.c_str());
+#    endif
+    if (mHandle == nullptr && error != nullptr) {
+        *error = "Windows Error: " + std::to_string(GetLastError());
+    }
+#elif DAWN_PLATFORM_POSIX
+    mHandle = dlopen(filename.c_str(), RTLD_NOW);
+
+    if (mHandle == nullptr && error != nullptr) {
+        *error = dlerror();
+    }
+#else
+#    error "Unsupported platform for DynamicLib"
+#endif
+
+    return mHandle != nullptr;
+}
+
+void DynamicLib::Close() {
+    if (mHandle == nullptr) {
+        return;
+    }
+
+#if DAWN_PLATFORM_WINDOWS
+    FreeLibrary(static_cast<HMODULE>(mHandle));
+#elif DAWN_PLATFORM_POSIX
+    dlclose(mHandle);
+#else
+#    error "Unsupported platform for DynamicLib"
+#endif
+
+    mHandle = nullptr;
+}
+
+void* DynamicLib::GetProc(const std::string& procName, std::string* error) const {
+    void* proc = nullptr;
+
+#if DAWN_PLATFORM_WINDOWS
+    proc = reinterpret_cast<void*>(GetProcAddress(static_cast<HMODULE>(mHandle), procName.c_str()));
+
+    if (proc == nullptr && error != nullptr) {
+        *error = "Windows Error: " + std::to_string(GetLastError());
+    }
+#elif DAWN_PLATFORM_POSIX
+    proc = reinterpret_cast<void*>(dlsym(mHandle, procName.c_str()));
+
+    if (proc == nullptr && error != nullptr) {
+        *error = dlerror();
+    }
+#else
+#    error "Unsupported platform for DynamicLib"
+#endif
+
+    return proc;
+}
diff --git a/src/dawn/common/DynamicLib.h b/src/dawn/common/DynamicLib.h
new file mode 100644
index 0000000..66d846e
--- /dev/null
+++ b/src/dawn/common/DynamicLib.h
@@ -0,0 +1,54 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_DYNAMICLIB_H_
+#define COMMON_DYNAMICLIB_H_
+
+#include "dawn/common/Assert.h"
+
+#include <string>
+#include <type_traits>
+
+class DynamicLib {
+  public:
+    DynamicLib() = default;
+    ~DynamicLib();
+
+    DynamicLib(const DynamicLib&) = delete;
+    DynamicLib& operator=(const DynamicLib&) = delete;
+
+    DynamicLib(DynamicLib&& other);
+    DynamicLib& operator=(DynamicLib&& other);
+
+    bool Valid() const;
+
+    bool Open(const std::string& filename, std::string* error = nullptr);
+    void Close();
+
+    void* GetProc(const std::string& procName, std::string* error = nullptr) const;
+
+    template <typename T>
+    bool GetProc(T** proc, const std::string& procName, std::string* error = nullptr) const {
+        ASSERT(proc != nullptr);
+        static_assert(std::is_function<T>::value);
+
+        *proc = reinterpret_cast<T*>(GetProc(procName, error));
+        return *proc != nullptr;
+    }
+
+  private:
+    void* mHandle = nullptr;
+};
+
+#endif  // COMMON_DYNAMICLIB_H_
diff --git a/src/dawn/common/GPUInfo.cpp b/src/dawn/common/GPUInfo.cpp
new file mode 100644
index 0000000..ddd8459
--- /dev/null
+++ b/src/dawn/common/GPUInfo.cpp
@@ -0,0 +1,108 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/GPUInfo.h"
+
+#include "dawn/common/Assert.h"
+
+#include <algorithm>
+#include <array>
+
+namespace gpu_info {
+    namespace {
+        // Intel
+        // Referenced from the following Mesa source code:
+        // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
+        // gen9
+        const std::array<uint32_t, 25> Skylake = {
+            {0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
+             0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
+             0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
+        // gen9p5
+        const std::array<uint32_t, 20> Kabylake = {
+            {0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
+             0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
+        const std::array<uint32_t, 17> Coffeelake = {
+            {0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
+             0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
+        const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
+        const std::array<uint32_t, 21> Cometlake = {
+            {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
+             0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
+
+        // According to Intel graphics driver version schema, build number is generated from the
+        // last two fields.
+        // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
+        // more details.
+        uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
+            return driverVersion[2] * 10000 + driverVersion[3];
+        }
+
+    }  // anonymous namespace
+
+    bool IsAMD(PCIVendorID vendorId) {
+        return vendorId == kVendorID_AMD;
+    }
+    bool IsARM(PCIVendorID vendorId) {
+        return vendorId == kVendorID_ARM;
+    }
+    bool IsImgTec(PCIVendorID vendorId) {
+        return vendorId == kVendorID_ImgTec;
+    }
+    bool IsIntel(PCIVendorID vendorId) {
+        return vendorId == kVendorID_Intel;
+    }
+    bool IsMesa(PCIVendorID vendorId) {
+        return vendorId == kVendorID_Mesa;
+    }
+    bool IsNvidia(PCIVendorID vendorId) {
+        return vendorId == kVendorID_Nvidia;
+    }
+    bool IsQualcomm(PCIVendorID vendorId) {
+        return vendorId == kVendorID_Qualcomm;
+    }
+    bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
+        return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
+    }
+    bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
+        return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
+    }
+
+    int CompareD3DDriverVersion(PCIVendorID vendorId,
+                                const D3DDriverVersion& version1,
+                                const D3DDriverVersion& version2) {
+        if (IsIntel(vendorId)) {
+            uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
+            uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
+            return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
+        }
+
+        // TODO(crbug.com/dawn/823): support other GPU vendors
+        UNREACHABLE();
+        return 0;
+    }
+
+    // Intel GPUs
+    bool IsSkylake(PCIDeviceID deviceId) {
+        return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
+    }
+    bool IsKabylake(PCIDeviceID deviceId) {
+        return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
+    }
+    bool IsCoffeelake(PCIDeviceID deviceId) {
+        return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
+               (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
+               (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
+    }
+}  // namespace gpu_info
diff --git a/src/dawn/common/GPUInfo.h b/src/dawn/common/GPUInfo.h
new file mode 100644
index 0000000..26c9103
--- /dev/null
+++ b/src/dawn/common/GPUInfo.h
@@ -0,0 +1,66 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_GPUINFO_H
+#define COMMON_GPUINFO_H
+
+#include <array>
+#include <cstdint>
+
+using PCIVendorID = uint32_t;
+using PCIDeviceID = uint32_t;
+
+namespace gpu_info {
+
+    static constexpr PCIVendorID kVendorID_AMD = 0x1002;
+    static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
+    static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
+    static constexpr PCIVendorID kVendorID_Intel = 0x8086;
+    static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
+    static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
+    static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
+    static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
+    static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
+
+    static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
+    static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
+
+    bool IsAMD(PCIVendorID vendorId);
+    bool IsARM(PCIVendorID vendorId);
+    bool IsImgTec(PCIVendorID vendorId);
+    bool IsIntel(PCIVendorID vendorId);
+    bool IsMesa(PCIVendorID vendorId);
+    bool IsNvidia(PCIVendorID vendorId);
+    bool IsQualcomm(PCIVendorID vendorId);
+    bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
+    bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
+
+    using D3DDriverVersion = std::array<uint16_t, 4>;
+
+    // Do comparison between two driver versions. Currently we only support the comparison between
+    // Intel D3D driver versions.
+    // - Return -1 if build number of version1 is smaller
+    // - Return 1 if build number of version1 is bigger
+    // - Return 0 if version1 and version2 represent same driver version
+    int CompareD3DDriverVersion(PCIVendorID vendorId,
+                                const D3DDriverVersion& version1,
+                                const D3DDriverVersion& version2);
+
+    // Intel architectures
+    bool IsSkylake(PCIDeviceID deviceId);
+    bool IsKabylake(PCIDeviceID deviceId);
+    bool IsCoffeelake(PCIDeviceID deviceId);
+
+}  // namespace gpu_info
+#endif  // COMMON_GPUINFO_H
diff --git a/src/dawn/common/HashUtils.h b/src/dawn/common/HashUtils.h
new file mode 100644
index 0000000..e59e8c5
--- /dev/null
+++ b/src/dawn/common/HashUtils.h
@@ -0,0 +1,101 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_HASHUTILS_H_
+#define COMMON_HASHUTILS_H_
+
+#include "dawn/common/Platform.h"
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_bitset.h"
+
+#include <bitset>
+#include <functional>
+
+// Wrapper around std::hash to make it a templated function instead of a functor. It is marginally
+// nicer, and avoids adding to the std namespace to add hashing of other types.
+template <typename T>
+size_t Hash(const T& value) {
+    return std::hash<T>()(value);
+}
+
+// Add hashing of TypedIntegers
+template <typename Tag, typename T>
+size_t Hash(const TypedInteger<Tag, T>& value) {
+    return Hash(static_cast<T>(value));
+}
+
+// When hashing sparse structures we want to iteratively build a hash value with only parts of the
+// data. HashCombine "hashes" together an existing hash and hashable values.
+//
+// Example usage to compute the hash of a mask and values corresponding to the mask:
+//
+//    size_t hash = Hash(mask):
+//    for (uint32_t i : IterateBitSet(mask)) { HashCombine(&hash, hashables[i]); }
+//    return hash;
+template <typename T>
+void HashCombine(size_t* hash, const T& value) {
+#if defined(DAWN_PLATFORM_64_BIT)
+    const size_t offset = 0x9e3779b97f4a7c16;
+#elif defined(DAWN_PLATFORM_32_BIT)
+    const size_t offset = 0x9e3779b9;
+#else
+#    error "Unsupported platform"
+#endif
+    *hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
+}
+
+template <typename T, typename... Args>
+void HashCombine(size_t* hash, const T& value, const Args&... args) {
+    HashCombine(hash, value);
+    HashCombine(hash, args...);
+}
+
+// Workaround a bug between clang++ and libstdlibc++ by defining our own hashing for bitsets.
+// When _GLIBCXX_DEBUG is enabled libstdc++ wraps containers into debug containers. For bitset this
+// means what is normally std::bitset is defined as std::__cxx1988::bitset and is replaced by the
+// debug version of bitset.
+// When hashing, std::hash<std::bitset> proxies the call to std::hash<std::__cxx1998::bitset> and
+// fails on clang because the latter tries to access the private _M_getdata member of the bitset.
+// It looks like it should work because the non-debug bitset declares
+//
+//     friend struct std::hash<bitset> // bitset is the name of the class itself
+//
+// which should friend std::hash<std::__cxx1998::bitset> but somehow doesn't work on clang.
+#if defined(_GLIBCXX_DEBUG)
+template <size_t N>
+size_t Hash(const std::bitset<N>& value) {
+    constexpr size_t kWindowSize = sizeof(unsigned long long);
+
+    std::bitset<N> bits = value;
+    size_t hash = 0;
+    for (size_t processedBits = 0; processedBits < N; processedBits += kWindowSize) {
+        HashCombine(&hash, bits.to_ullong());
+        bits >>= kWindowSize;
+    }
+
+    return hash;
+}
+#endif
+
+namespace std {
+    template <typename Index, size_t N>
+    struct hash<ityp::bitset<Index, N>> {
+      public:
+        size_t operator()(const ityp::bitset<Index, N>& value) const {
+            return Hash(static_cast<const std::bitset<N>&>(value));
+        }
+    };
+}  // namespace std
+
+#endif  // COMMON_HASHUTILS_H_
diff --git a/src/dawn/common/IOKitRef.h b/src/dawn/common/IOKitRef.h
new file mode 100644
index 0000000..4ff4413
--- /dev/null
+++ b/src/dawn/common/IOKitRef.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_IOKITREF_H_
+#define COMMON_IOKITREF_H_
+
+#include "dawn/common/RefBase.h"
+
+#include <IOKit/IOKitLib.h>
+
+template <typename T>
+struct IOKitRefTraits {
+    static constexpr T kNullValue = IO_OBJECT_NULL;
+    static void Reference(T value) {
+        IOObjectRetain(value);
+    }
+    static void Release(T value) {
+        IOObjectRelease(value);
+    }
+};
+
+template <typename T>
+class IORef : public RefBase<T, IOKitRefTraits<T>> {
+  public:
+    using RefBase<T, IOKitRefTraits<T>>::RefBase;
+};
+
+template <typename T>
+IORef<T> AcquireIORef(T pointee) {
+    IORef<T> ref;
+    ref.Acquire(pointee);
+    return ref;
+}
+
+#endif  // COMMON_IOKITREF_H_
diff --git a/src/dawn/common/LinkedList.h b/src/dawn/common/LinkedList.h
new file mode 100644
index 0000000..673f596
--- /dev/null
+++ b/src/dawn/common/LinkedList.h
@@ -0,0 +1,274 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a copy of Chromium's /src/base/containers/linked_list.h with the following
+// modifications:
+//   - Added iterators for ranged based iterations
+//   - Added in list check before removing node to prevent segfault, now returns true iff removed
+//   - Added MoveInto functionality for moving list elements to another list
+
+#ifndef COMMON_LINKED_LIST_H
+#define COMMON_LINKED_LIST_H
+
+#include "dawn/common/Assert.h"
+
+// Simple LinkedList type. (See the Q&A section to understand how this
+// differs from std::list).
+//
+// To use, start by declaring the class which will be contained in the linked
+// list, as extending LinkNode (this gives it next/previous pointers).
+//
+//   class MyNodeType : public LinkNode<MyNodeType> {
+//     ...
+//   };
+//
+// Next, to keep track of the list's head/tail, use a LinkedList instance:
+//
+//   LinkedList<MyNodeType> list;
+//
+// To add elements to the list, use any of LinkedList::Append,
+// LinkNode::InsertBefore, or LinkNode::InsertAfter:
+//
+//   LinkNode<MyNodeType>* n1 = ...;
+//   LinkNode<MyNodeType>* n2 = ...;
+//   LinkNode<MyNodeType>* n3 = ...;
+//
+//   list.Append(n1);
+//   list.Append(n3);
+//   n3->InsertBefore(n3);
+//
+// Lastly, to iterate through the linked list forwards:
+//
+//   for (LinkNode<MyNodeType>* node = list.head();
+//        node != list.end();
+//        node = node->next()) {
+//     MyNodeType* value = node->value();
+//     ...
+//   }
+//
+//   for (LinkNode<MyNodeType*> node : list) {
+//     MyNodeType* value = node->value();
+//     ...
+//   }
+//
+// Or to iterate the linked list backwards:
+//
+//   for (LinkNode<MyNodeType>* node = list.tail();
+//        node != list.end();
+//        node = node->previous()) {
+//     MyNodeType* value = node->value();
+//     ...
+//   }
+//
+// Questions and Answers:
+//
+// Q. Should I use std::list or base::LinkedList?
+//
+// A. The main reason to use base::LinkedList over std::list is
+//    performance. If you don't care about the performance differences
+//    then use an STL container, as it makes for better code readability.
+//
+//    Comparing the performance of base::LinkedList<T> to std::list<T*>:
+//
+//    * Erasing an element of type T* from base::LinkedList<T> is
+//      an O(1) operation. Whereas for std::list<T*> it is O(n).
+//      That is because with std::list<T*> you must obtain an
+//      iterator to the T* element before you can call erase(iterator).
+//
+//    * Insertion operations with base::LinkedList<T> never require
+//      heap allocations.
+//
+// Q. How does base::LinkedList implementation differ from std::list?
+//
+// A. Doubly-linked lists are made up of nodes that contain "next" and
+//    "previous" pointers that reference other nodes in the list.
+//
+//    With base::LinkedList<T>, the type being inserted already reserves
+//    space for the "next" and "previous" pointers (base::LinkNode<T>*).
+//    Whereas with std::list<T> the type can be anything, so the implementation
+//    needs to glue on the "next" and "previous" pointers using
+//    some internal node type.
+
+// Forward declarations of the types in order for recursive referencing and friending.
+template <typename T>
+class LinkNode;
+template <typename T>
+class LinkedList;
+
+template <typename T>
+class LinkNode {
+  public:
+    LinkNode() : previous_(nullptr), next_(nullptr) {
+    }
+    LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
+    }
+
+    LinkNode(LinkNode<T>&& rhs) {
+        next_ = rhs.next_;
+        rhs.next_ = nullptr;
+        previous_ = rhs.previous_;
+        rhs.previous_ = nullptr;
+
+        // If the node belongs to a list, next_ and previous_ are both non-null.
+        // Otherwise, they are both null.
+        if (next_) {
+            next_->previous_ = this;
+            previous_->next_ = this;
+        }
+    }
+
+    // Insert |this| into the linked list, before |e|.
+    void InsertBefore(LinkNode<T>* e) {
+        this->next_ = e;
+        this->previous_ = e->previous_;
+        e->previous_->next_ = this;
+        e->previous_ = this;
+    }
+
+    // Insert |this| into the linked list, after |e|.
+    void InsertAfter(LinkNode<T>* e) {
+        this->next_ = e->next_;
+        this->previous_ = e;
+        e->next_->previous_ = this;
+        e->next_ = this;
+    }
+
+    // Check if |this| is in a list.
+    bool IsInList() const {
+        ASSERT((this->previous_ == nullptr) == (this->next_ == nullptr));
+        return this->next_ != nullptr;
+    }
+
+    // Remove |this| from the linked list. Returns true iff removed from a list.
+    bool RemoveFromList() {
+        if (!IsInList()) {
+            return false;
+        }
+
+        this->previous_->next_ = this->next_;
+        this->next_->previous_ = this->previous_;
+        // next() and previous() return non-null if and only this node is not in any list.
+        this->next_ = nullptr;
+        this->previous_ = nullptr;
+        return true;
+    }
+
+    LinkNode<T>* previous() const {
+        return previous_;
+    }
+
+    LinkNode<T>* next() const {
+        return next_;
+    }
+
+    // Cast from the node-type to the value type.
+    const T* value() const {
+        return static_cast<const T*>(this);
+    }
+
+    T* value() {
+        return static_cast<T*>(this);
+    }
+
+  private:
+    friend class LinkedList<T>;
+    LinkNode<T>* previous_;
+    LinkNode<T>* next_;
+};
+
+template <typename T>
+class LinkedList {
+  public:
+    // The "root" node is self-referential, and forms the basis of a circular
+    // list (root_.next() will point back to the start of the list,
+    // and root_->previous() wraps around to the end of the list).
+    LinkedList() : root_(&root_, &root_) {
+    }
+
+    ~LinkedList() {
+        // If any LinkNodes still exist in the LinkedList, there will be outstanding references to
+        // root_ even after it has been freed. We should remove root_ from the list to prevent any
+        // future access.
+        root_.RemoveFromList();
+    }
+
+    // Appends |e| to the end of the linked list.
+    void Append(LinkNode<T>* e) {
+        e->InsertBefore(&root_);
+    }
+
+    // Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
+    void MoveInto(LinkedList<T>* l) {
+        if (empty()) {
+            return;
+        }
+        l->root_.previous_->next_ = root_.next_;
+        root_.next_->previous_ = l->root_.previous_;
+        l->root_.previous_ = root_.previous_;
+        root_.previous_->next_ = &l->root_;
+
+        root_.next_ = &root_;
+        root_.previous_ = &root_;
+    }
+
+    LinkNode<T>* head() const {
+        return root_.next();
+    }
+
+    LinkNode<T>* tail() const {
+        return root_.previous();
+    }
+
+    const LinkNode<T>* end() const {
+        return &root_;
+    }
+
+    bool empty() const {
+        return head() == end();
+    }
+
+  private:
+    LinkNode<T> root_;
+};
+
+template <typename T>
+class LinkedListIterator {
+  public:
+    LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
+    }
+
+    // We keep an early reference to the next node in the list so that even if the current element
+    // is modified or removed from the list, we have a valid next node.
+    LinkedListIterator<T> const& operator++() {
+        current_ = next_;
+        next_ = current_->next();
+        return *this;
+    }
+
+    bool operator!=(const LinkedListIterator<T>& other) const {
+        return current_ != other.current_;
+    }
+
+    LinkNode<T>* operator*() const {
+        return current_;
+    }
+
+  private:
+    LinkNode<T>* current_;
+    LinkNode<T>* next_;
+};
+
+template <typename T>
+LinkedListIterator<T> begin(LinkedList<T>& l) {
+    return LinkedListIterator<T>(l.head());
+}
+
+// Free end function does't use LinkedList<T>::end because of it's const nature. Instead we wrap
+// around from tail.
+template <typename T>
+LinkedListIterator<T> end(LinkedList<T>& l) {
+    return LinkedListIterator<T>(l.tail()->next());
+}
+
+#endif  // COMMON_LINKED_LIST_H
diff --git a/src/dawn/common/Log.cpp b/src/dawn/common/Log.cpp
new file mode 100644
index 0000000..b85094b
--- /dev/null
+++ b/src/dawn/common/Log.cpp
@@ -0,0 +1,116 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Log.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Platform.h"
+
+#include <cstdio>
+
+#if defined(DAWN_PLATFORM_ANDROID)
+#    include <android/log.h>
+#endif
+
+namespace dawn {
+
+    namespace {
+
+        const char* SeverityName(LogSeverity severity) {
+            switch (severity) {
+                case LogSeverity::Debug:
+                    return "Debug";
+                case LogSeverity::Info:
+                    return "Info";
+                case LogSeverity::Warning:
+                    return "Warning";
+                case LogSeverity::Error:
+                    return "Error";
+                default:
+                    UNREACHABLE();
+                    return "";
+            }
+        }
+
+#if defined(DAWN_PLATFORM_ANDROID)
+        android_LogPriority AndroidLogPriority(LogSeverity severity) {
+            switch (severity) {
+                case LogSeverity::Debug:
+                    return ANDROID_LOG_INFO;
+                case LogSeverity::Info:
+                    return ANDROID_LOG_INFO;
+                case LogSeverity::Warning:
+                    return ANDROID_LOG_WARN;
+                case LogSeverity::Error:
+                    return ANDROID_LOG_ERROR;
+                default:
+                    UNREACHABLE();
+                    return ANDROID_LOG_ERROR;
+            }
+        }
+#endif  // defined(DAWN_PLATFORM_ANDROID)
+
+    }  // anonymous namespace
+
+    LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
+    }
+
+    LogMessage::~LogMessage() {
+        std::string fullMessage = mStream.str();
+
+        // If this message has been moved, its stream is empty.
+        if (fullMessage.empty()) {
+            return;
+        }
+
+        const char* severityName = SeverityName(mSeverity);
+
+#if defined(DAWN_PLATFORM_ANDROID)
+        android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
+        __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
+#else   // defined(DAWN_PLATFORM_ANDROID)
+        FILE* outputStream = stdout;
+        if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
+            outputStream = stderr;
+        }
+
+        // Note: we use fprintf because <iostream> includes static initializers.
+        fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
+        fflush(outputStream);
+#endif  // defined(DAWN_PLATFORM_ANDROID)
+    }
+
+    LogMessage DebugLog() {
+        return {LogSeverity::Debug};
+    }
+
+    LogMessage InfoLog() {
+        return {LogSeverity::Info};
+    }
+
+    LogMessage WarningLog() {
+        return {LogSeverity::Warning};
+    }
+
+    LogMessage ErrorLog() {
+        return {LogSeverity::Error};
+    }
+
+    LogMessage DebugLog(const char* file, const char* function, int line) {
+        LogMessage message = DebugLog();
+        message << file << ":" << line << "(" << function << ")";
+        return message;
+    }
+
+}  // namespace dawn
diff --git a/src/dawn/common/Log.h b/src/dawn/common/Log.h
new file mode 100644
index 0000000..0504af6
--- /dev/null
+++ b/src/dawn/common/Log.h
@@ -0,0 +1,95 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_LOG_H_
+#define COMMON_LOG_H_
+
+// Dawn targets shouldn't use iostream or printf directly for several reasons:
+//  - iostream adds static initializers which we want to avoid.
+//  - printf and iostream don't show up in logcat on Android so printf debugging doesn't work but
+//  log-message debugging does.
+//  - log severity helps provide intent compared to a printf.
+//
+// Logging should in general be avoided: errors should go through the regular WebGPU error reporting
+// mechanism and others form of logging should (TODO: eventually) go through the logging dependency
+// injection, so for example they show up in Chromium's about:gpu page. Nonetheless there are some
+// cases where logging is necessary and when this file was first introduced we needed to replace all
+// uses of iostream so we could see them in Android's logcat.
+//
+// Regular logging is done using the [Debug|Info|Warning|Error]Log() function this way:
+//
+//   InfoLog() << things << that << ostringstream << supports; // No need for a std::endl or "\n"
+//
+// It creates a LogMessage object that isn't stored anywhere and gets its destructor called
+// immediately which outputs the stored ostringstream in the right place.
+//
+// This file also contains DAWN_DEBUG for "printf debugging" which works on Android and
+// additionally outputs the file, line and function name. Use it this way:
+//
+//   // Pepper this throughout code to get a log of the execution
+//   DAWN_DEBUG();
+//
+//   // Get more information
+//   DAWN_DEBUG() << texture.GetFormat();
+
+#include <sstream>
+
+namespace dawn {
+
+    // Log levels mostly used to signal intent where the log message is produced and used to route
+    // the message to the correct output.
+    enum class LogSeverity {
+        Debug,
+        Info,
+        Warning,
+        Error,
+    };
+
+    // Essentially an ostringstream that will print itself in its destructor.
+    class LogMessage {
+      public:
+        LogMessage(LogSeverity severity);
+        ~LogMessage();
+
+        LogMessage(LogMessage&& other) = default;
+        LogMessage& operator=(LogMessage&& other) = default;
+
+        template <typename T>
+        LogMessage& operator<<(T&& value) {
+            mStream << value;
+            return *this;
+        }
+
+      private:
+        LogMessage(const LogMessage& other) = delete;
+        LogMessage& operator=(const LogMessage& other) = delete;
+
+        LogSeverity mSeverity;
+        std::ostringstream mStream;
+    };
+
+    // Short-hands to create a LogMessage with the respective severity.
+    LogMessage DebugLog();
+    LogMessage InfoLog();
+    LogMessage WarningLog();
+    LogMessage ErrorLog();
+
+    // DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
+    // information
+    LogMessage DebugLog(const char* file, const char* function, int line);
+#define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__)
+
+}  // namespace dawn
+
+#endif  // COMMON_LOG_H_
diff --git a/src/dawn/common/Math.cpp b/src/dawn/common/Math.cpp
new file mode 100644
index 0000000..bd936a8
--- /dev/null
+++ b/src/dawn/common/Math.cpp
@@ -0,0 +1,160 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Math.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Platform.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+
+#if defined(DAWN_COMPILER_MSVC)
+#    include <intrin.h>
+#endif
+
+uint32_t ScanForward(uint32_t bits) {
+    ASSERT(bits != 0);
+#if defined(DAWN_COMPILER_MSVC)
+    unsigned long firstBitIndex = 0ul;
+    unsigned char ret = _BitScanForward(&firstBitIndex, bits);
+    ASSERT(ret != 0);
+    return firstBitIndex;
+#else
+    return static_cast<uint32_t>(__builtin_ctz(bits));
+#endif
+}
+
+uint32_t Log2(uint32_t value) {
+    ASSERT(value != 0);
+#if defined(DAWN_COMPILER_MSVC)
+    unsigned long firstBitIndex = 0ul;
+    unsigned char ret = _BitScanReverse(&firstBitIndex, value);
+    ASSERT(ret != 0);
+    return firstBitIndex;
+#else
+    return 31 - static_cast<uint32_t>(__builtin_clz(value));
+#endif
+}
+
+uint32_t Log2(uint64_t value) {
+    ASSERT(value != 0);
+#if defined(DAWN_COMPILER_MSVC)
+#    if defined(DAWN_PLATFORM_64_BIT)
+    unsigned long firstBitIndex = 0ul;
+    unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
+    ASSERT(ret != 0);
+    return firstBitIndex;
+#    else   // defined(DAWN_PLATFORM_64_BIT)
+    unsigned long firstBitIndex = 0ul;
+    if (_BitScanReverse(&firstBitIndex, value >> 32)) {
+        return firstBitIndex + 32;
+    }
+    unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
+    ASSERT(ret != 0);
+    return firstBitIndex;
+#    endif  // defined(DAWN_PLATFORM_64_BIT)
+#else       // defined(DAWN_COMPILER_MSVC)
+    return 63 - static_cast<uint32_t>(__builtin_clzll(value));
+#endif      // defined(DAWN_COMPILER_MSVC)
+}
+
+uint64_t NextPowerOfTwo(uint64_t n) {
+    if (n <= 1) {
+        return 1;
+    }
+
+    return 1ull << (Log2(n - 1) + 1);
+}
+
+bool IsPowerOfTwo(uint64_t n) {
+    ASSERT(n != 0);
+    return (n & (n - 1)) == 0;
+}
+
+bool IsPtrAligned(const void* ptr, size_t alignment) {
+    ASSERT(IsPowerOfTwo(alignment));
+    ASSERT(alignment != 0);
+    return (reinterpret_cast<size_t>(ptr) & (alignment - 1)) == 0;
+}
+
+bool IsAligned(uint32_t value, size_t alignment) {
+    ASSERT(alignment <= UINT32_MAX);
+    ASSERT(IsPowerOfTwo(alignment));
+    ASSERT(alignment != 0);
+    uint32_t alignment32 = static_cast<uint32_t>(alignment);
+    return (value & (alignment32 - 1)) == 0;
+}
+
+uint16_t Float32ToFloat16(float fp32) {
+    uint32_t fp32i = BitCast<uint32_t>(fp32);
+    uint32_t sign16 = (fp32i & 0x80000000) >> 16;
+    uint32_t mantissaAndExponent = fp32i & 0x7FFFFFFF;
+
+    if (mantissaAndExponent > 0x7F800000) {  // NaN
+        return 0x7FFF;
+    } else if (mantissaAndExponent > 0x47FFEFFF) {  // Infinity
+        return static_cast<uint16_t>(sign16 | 0x7C00);
+    } else if (mantissaAndExponent < 0x38800000) {  // Denormal
+        uint32_t mantissa = (mantissaAndExponent & 0x007FFFFF) | 0x00800000;
+        int32_t exponent = 113 - (mantissaAndExponent >> 23);
+
+        if (exponent < 24) {
+            mantissaAndExponent = mantissa >> exponent;
+        } else {
+            mantissaAndExponent = 0;
+        }
+
+        return static_cast<uint16_t>(
+            sign16 | (mantissaAndExponent + 0x00000FFF + ((mantissaAndExponent >> 13) & 1)) >> 13);
+    } else {
+        return static_cast<uint16_t>(sign16 | (mantissaAndExponent + 0xC8000000 + 0x00000FFF +
+                                               ((mantissaAndExponent >> 13) & 1)) >>
+                                                  13);
+    }
+}
+
+float Float16ToFloat32(uint16_t fp16) {
+    uint32_t tmp = (fp16 & 0x7fff) << 13 | (fp16 & 0x8000) << 16;
+    float tmp2 = *reinterpret_cast<float*>(&tmp);
+    return pow(2, 127 - 15) * tmp2;
+}
+
+bool IsFloat16NaN(uint16_t fp16) {
+    return (fp16 & 0x7FFF) > 0x7C00;
+}
+
+// Based on the Khronos Data Format Specification 1.2 Section 13.3 sRGB transfer functions
+float SRGBToLinear(float srgb) {
+    // sRGB is always used in unsigned normalized formats so clamp to [0.0, 1.0]
+    if (srgb <= 0.0f) {
+        return 0.0f;
+    } else if (srgb > 1.0f) {
+        return 1.0f;
+    }
+
+    if (srgb < 0.04045f) {
+        return srgb / 12.92f;
+    } else {
+        return std::pow((srgb + 0.055f) / 1.055f, 2.4f);
+    }
+}
+
+uint64_t RoundUp(uint64_t n, uint64_t m) {
+    ASSERT(m > 0);
+    ASSERT(n > 0);
+    ASSERT(m <= std::numeric_limits<uint64_t>::max() - n);
+    return ((n + m - 1) / m) * m;
+}
diff --git a/src/dawn/common/Math.h b/src/dawn/common/Math.h
new file mode 100644
index 0000000..9ef02d0
--- /dev/null
+++ b/src/dawn/common/Math.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_MATH_H_
+#define COMMON_MATH_H_
+
+#include "dawn/common/Assert.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include <limits>
+#include <type_traits>
+
+// The following are not valid for 0
+uint32_t ScanForward(uint32_t bits);
+uint32_t Log2(uint32_t value);
+uint32_t Log2(uint64_t value);
+bool IsPowerOfTwo(uint64_t n);
+uint64_t RoundUp(uint64_t n, uint64_t m);
+
+constexpr uint32_t ConstexprLog2(uint64_t v) {
+    return v <= 1 ? 0 : 1 + ConstexprLog2(v / 2);
+}
+
+constexpr uint32_t ConstexprLog2Ceil(uint64_t v) {
+    return v <= 1 ? 0 : ConstexprLog2(v - 1) + 1;
+}
+
+inline uint32_t Log2Ceil(uint32_t v) {
+    return v <= 1 ? 0 : Log2(v - 1) + 1;
+}
+
+inline uint32_t Log2Ceil(uint64_t v) {
+    return v <= 1 ? 0 : Log2(v - 1) + 1;
+}
+
+uint64_t NextPowerOfTwo(uint64_t n);
+bool IsPtrAligned(const void* ptr, size_t alignment);
+void* AlignVoidPtr(void* ptr, size_t alignment);
+bool IsAligned(uint32_t value, size_t alignment);
+
+template <typename T>
+T Align(T value, size_t alignment) {
+    ASSERT(value <= std::numeric_limits<T>::max() - (alignment - 1));
+    ASSERT(IsPowerOfTwo(alignment));
+    ASSERT(alignment != 0);
+    T alignmentT = static_cast<T>(alignment);
+    return (value + (alignmentT - 1)) & ~(alignmentT - 1);
+}
+
+template <typename T>
+DAWN_FORCE_INLINE T* AlignPtr(T* ptr, size_t alignment) {
+    ASSERT(IsPowerOfTwo(alignment));
+    ASSERT(alignment != 0);
+    return reinterpret_cast<T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
+                                ~(alignment - 1));
+}
+
+template <typename T>
+DAWN_FORCE_INLINE const T* AlignPtr(const T* ptr, size_t alignment) {
+    ASSERT(IsPowerOfTwo(alignment));
+    ASSERT(alignment != 0);
+    return reinterpret_cast<const T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
+                                      ~(alignment - 1));
+}
+
+template <typename destType, typename sourceType>
+destType BitCast(const sourceType& source) {
+    static_assert(sizeof(destType) == sizeof(sourceType), "BitCast: cannot lose precision.");
+    destType output;
+    std::memcpy(&output, &source, sizeof(destType));
+    return output;
+}
+
+uint16_t Float32ToFloat16(float fp32);
+float Float16ToFloat32(uint16_t fp16);
+bool IsFloat16NaN(uint16_t fp16);
+
+template <typename T>
+T FloatToUnorm(float value) {
+    return static_cast<T>(value * static_cast<float>(std::numeric_limits<T>::max()));
+}
+
+float SRGBToLinear(float srgb);
+
+template <typename T1,
+          typename T2,
+          typename Enable = typename std::enable_if<sizeof(T1) == sizeof(T2)>::type>
+constexpr bool IsSubset(T1 subset, T2 set) {
+    T2 bitsAlsoInSet = subset & set;
+    return bitsAlsoInSet == subset;
+}
+
+#endif  // COMMON_MATH_H_
diff --git a/src/dawn/common/NSRef.h b/src/dawn/common/NSRef.h
new file mode 100644
index 0000000..5bf4914
--- /dev/null
+++ b/src/dawn/common/NSRef.h
@@ -0,0 +1,123 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_NSREF_H_
+#define COMMON_NSREF_H_
+
+#include "dawn/common/RefBase.h"
+
+#import <Foundation/NSObject.h>
+
+#if !defined(__OBJC__)
+#    error "NSRef can only be used in Objective C/C++ code."
+#endif
+
+// This file contains smart pointers that automatically reference and release Objective C objects
+// and prototocals in a manner very similar to Ref<>. Note that NSRef<> and NSPRef's constructor add
+// a reference to the object by default, so the pattern to get a reference for a newly created
+// NSObject is the following:
+//
+//    NSRef<NSFoo> foo = AcquireNSRef([NSFoo alloc]);
+//
+// NSRef overloads -> and * but these operators don't work extremely well with Objective C's
+// features. For example automatic dereferencing when doing the following doesn't work:
+//
+//    NSFoo* foo;
+//    foo.member = 1;
+//    someVar = foo.member;
+//
+// Instead use the message passing syntax:
+//
+//    NSRef<NSFoo> foo;
+//    [*foo setMember: 1];
+//    someVar = [*foo member];
+//
+// Also did you notive the extra '*' in the example above? That's because Objective C's message
+// passing doesn't automatically call a C++ operator to dereference smart pointers (like -> does) so
+// we have to dereference manually using '*'. In some cases the extra * or message passing syntax
+// can get a bit annoying so instead a local "naked" pointer can be borrowed from the NSRef. This
+// would change the syntax overload in the following:
+//
+//    NSRef<NSFoo> foo;
+//    [*foo setA:1];
+//    [*foo setB:2];
+//    [*foo setC:3];
+//
+// Into (note access to members of ObjC classes referenced via pointer is done with . and not ->):
+//
+//    NSRef<NSFoo> fooRef;
+//    NSFoo* foo = fooRef.Get();
+//    foo.a = 1;
+//    foo.b = 2;
+//    boo.c = 3;
+//
+// Which can be subjectively easier to read.
+
+template <typename T>
+struct NSRefTraits {
+    static constexpr T kNullValue = nullptr;
+    static void Reference(T value) {
+        [value retain];
+    }
+    static void Release(T value) {
+        [value release];
+    }
+};
+
+template <typename T>
+class NSRef : public RefBase<T*, NSRefTraits<T*>> {
+  public:
+    using RefBase<T*, NSRefTraits<T*>>::RefBase;
+
+    const T* operator*() const {
+        return this->Get();
+    }
+
+    T* operator*() {
+        return this->Get();
+    }
+};
+
+template <typename T>
+NSRef<T> AcquireNSRef(T* pointee) {
+    NSRef<T> ref;
+    ref.Acquire(pointee);
+    return ref;
+}
+
+// This is a RefBase<> for an Objective C protocol (hence the P). Objective C protocols must always
+// be referenced with id<ProtocolName> and not just ProtocolName* so they cannot use NSRef<>
+// itself. That's what the P in NSPRef stands for: Protocol.
+template <typename T>
+class NSPRef : public RefBase<T, NSRefTraits<T>> {
+  public:
+    using RefBase<T, NSRefTraits<T>>::RefBase;
+
+    const T operator*() const {
+        return this->Get();
+    }
+
+    T operator*() {
+        return this->Get();
+    }
+};
+
+template <typename T>
+NSPRef<T> AcquireNSPRef(T pointee) {
+    NSPRef<T> ref;
+    ref.Acquire(pointee);
+    return ref;
+}
+
+#endif  // COMMON_NSREF_H_
diff --git a/src/dawn/common/NonCopyable.h b/src/dawn/common/NonCopyable.h
new file mode 100644
index 0000000..2d217df
--- /dev/null
+++ b/src/dawn/common/NonCopyable.h
@@ -0,0 +1,43 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_NONCOPYABLE_H_
+#define COMMON_NONCOPYABLE_H_
+
+// A base class to make a class non-copyable.
+class NonCopyable {
+  protected:
+    constexpr NonCopyable() = default;
+    ~NonCopyable() = default;
+
+    NonCopyable(NonCopyable&&) = default;
+    NonCopyable& operator=(NonCopyable&&) = default;
+
+  private:
+    NonCopyable(const NonCopyable&) = delete;
+    void operator=(const NonCopyable&) = delete;
+};
+
+// A base class to make a class non-movable.
+class NonMovable : NonCopyable {
+  protected:
+    constexpr NonMovable() = default;
+    ~NonMovable() = default;
+
+  private:
+    NonMovable(NonMovable&&) = delete;
+    void operator=(NonMovable&&) = delete;
+};
+
+#endif
diff --git a/src/dawn/common/PlacementAllocated.h b/src/dawn/common/PlacementAllocated.h
new file mode 100644
index 0000000..6c715ca
--- /dev/null
+++ b/src/dawn/common/PlacementAllocated.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_PLACEMENTALLOCATED_H_
+#define COMMON_PLACEMENTALLOCATED_H_
+
+#include <cstddef>
+
+class PlacementAllocated {
+  public:
+    // Delete the default new operator so this can only be created with placement new.
+    void* operator new(size_t) = delete;
+
+    void* operator new(size_t size, void* ptr) {
+        // Pass through the pointer of the allocation. This is essentially the default
+        // placement-new implementation, but we must define it if we delete the default
+        // new operator.
+        return ptr;
+    }
+
+    void operator delete(void* ptr) {
+        // Object is placement-allocated. Don't free the memory.
+    }
+
+    void operator delete(void*, void*) {
+        // This is added to match new(size_t size, void* ptr)
+        // Otherwise it triggers C4291 warning in MSVC
+    }
+};
+
+#endif  // COMMON_PLACEMENTALLOCATED_H_
diff --git a/src/dawn/common/Platform.h b/src/dawn/common/Platform.h
new file mode 100644
index 0000000..f947102
--- /dev/null
+++ b/src/dawn/common/Platform.h
@@ -0,0 +1,82 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_PLATFORM_H_
+#define COMMON_PLATFORM_H_
+
+#if defined(_WIN32) || defined(_WIN64)
+#    include <winapifamily.h>
+#    define DAWN_PLATFORM_WINDOWS 1
+#    if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
+#        define DAWN_PLATFORM_WIN32 1
+#    elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
+#        define DAWN_PLATFORM_WINUWP 1
+#    else
+#        error "Unsupported Windows platform."
+#    endif
+
+#elif defined(__linux__)
+#    define DAWN_PLATFORM_LINUX 1
+#    define DAWN_PLATFORM_POSIX 1
+#    if defined(__ANDROID__)
+#        define DAWN_PLATFORM_ANDROID 1
+#    endif
+
+#elif defined(__APPLE__)
+#    define DAWN_PLATFORM_APPLE 1
+#    define DAWN_PLATFORM_POSIX 1
+#    include <TargetConditionals.h>
+#    if TARGET_OS_IPHONE
+#        define DAWN_PLATFORM_IOS
+#    elif TARGET_OS_MAC
+#        define DAWN_PLATFORM_MACOS
+#    else
+#        error "Unsupported Apple platform."
+#    endif
+
+#elif defined(__Fuchsia__)
+#    define DAWN_PLATFORM_FUCHSIA 1
+#    define DAWN_PLATFORM_POSIX 1
+
+#elif defined(__EMSCRIPTEN__)
+#    define DAWN_PLATFORM_EMSCRIPTEN 1
+#    define DAWN_PLATFORM_POSIX 1
+
+#else
+#    error "Unsupported platform."
+#endif
+
+// Distinguish mips32.
+#if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__)
+#    define __mips32__
+#endif
+
+// Distinguish mips64.
+#if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__)
+#    define __mips64__
+#endif
+
+#if defined(_WIN64) || defined(__aarch64__) || defined(__x86_64__) || defined(__mips64__) || \
+    defined(__s390x__) || defined(__PPC64__)
+#    define DAWN_PLATFORM_64_BIT 1
+static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8");
+#elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \
+    defined(__s390__) || defined(__EMSCRIPTEN__)
+#    define DAWN_PLATFORM_32_BIT 1
+static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4");
+#else
+#    error "Unsupported platform"
+#endif
+
+#endif  // COMMON_PLATFORM_H_
diff --git a/src/dawn/common/Preprocessor.h b/src/dawn/common/Preprocessor.h
new file mode 100644
index 0000000..4eef736
--- /dev/null
+++ b/src/dawn/common/Preprocessor.h
@@ -0,0 +1,70 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_PREPROCESSOR_H_
+#define COMMON_PREPROCESSOR_H_
+
+// DAWN_PP_GET_HEAD: get the first element of a __VA_ARGS__ without triggering empty
+// __VA_ARGS__ warnings.
+#define DAWN_INTERNAL_PP_GET_HEAD(firstParam, ...) firstParam
+#define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, dummyArg)
+
+// DAWN_PP_CONCATENATE: Concatenate tokens, first expanding the arguments passed in.
+#define DAWN_PP_CONCATENATE(arg1, arg2) DAWN_PP_CONCATENATE_1(arg1, arg2)
+#define DAWN_PP_CONCATENATE_1(arg1, arg2) DAWN_PP_CONCATENATE_2(arg1, arg2)
+#define DAWN_PP_CONCATENATE_2(arg1, arg2) arg1##arg2
+
+// DAWN_PP_EXPAND: Needed to help expand __VA_ARGS__ out on MSVC
+#define DAWN_PP_EXPAND(...) __VA_ARGS__
+
+// Implementation of DAWN_PP_FOR_EACH, called by concatenating DAWN_PP_FOR_EACH_ with a number.
+#define DAWN_PP_FOR_EACH_1(func, x) func(x)
+#define DAWN_PP_FOR_EACH_2(func, x, ...) \
+    func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_1)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_3(func, x, ...) \
+    func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_2)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_4(func, x, ...) \
+    func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_3)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_5(func, x, ...) \
+    func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_4)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_6(func, x, ...) \
+    func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_5)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_7(func, x, ...) \
+    func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_6)(func, __VA_ARGS__))
+#define DAWN_PP_FOR_EACH_8(func, x, ...) \
+    func(x) DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_7)(func, __VA_ARGS__))
+
+// Implementation for DAWN_PP_FOR_EACH. Get the number of args in __VA_ARGS__ so we can concat
+// DAWN_PP_FOR_EACH_ and N.
+// ex.) DAWN_PP_FOR_EACH_NARG(a, b, c) ->
+//      DAWN_PP_FOR_EACH_NARG(a, b, c, DAWN_PP_FOR_EACH_RSEQ()) ->
+//      DAWN_PP_FOR_EACH_NARG_(a, b, c, 8, 7, 6, 5, 4, 3, 2, 1, 0) ->
+//      DAWN_PP_FOR_EACH_ARG_N(a, b, c, 8, 7, 6, 5, 4, 3, 2, 1, 0) ->
+//      DAWN_PP_FOR_EACH_ARG_N( ,  ,  ,  ,  ,  ,  , ,  N) ->
+//      3
+#define DAWN_PP_FOR_EACH_NARG(...) DAWN_PP_FOR_EACH_NARG_(__VA_ARGS__, DAWN_PP_FOR_EACH_RSEQ())
+#define DAWN_PP_FOR_EACH_NARG_(...) \
+    DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH_ARG_N)(__VA_ARGS__))
+#define DAWN_PP_FOR_EACH_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, N, ...) N
+#define DAWN_PP_FOR_EACH_RSEQ() 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+// Implementation for DAWN_PP_FOR_EACH.
+// Creates a call to DAWN_PP_FOR_EACH_X where X is 1, 2, ..., etc.
+#define DAWN_PP_FOR_EACH_(N, func, ...) DAWN_PP_CONCATENATE(DAWN_PP_FOR_EACH_, N)(func, __VA_ARGS__)
+
+// DAWN_PP_FOR_EACH: Apply |func| to each argument in |x| and __VA_ARGS__
+#define DAWN_PP_FOR_EACH(func, ...) \
+    DAWN_PP_FOR_EACH_(DAWN_PP_FOR_EACH_NARG(__VA_ARGS__), func, __VA_ARGS__)
+
+#endif  // COMMON_PREPROCESSOR_H_
diff --git a/src/dawn/common/RefBase.h b/src/dawn/common/RefBase.h
new file mode 100644
index 0000000..5d10789
--- /dev/null
+++ b/src/dawn/common/RefBase.h
@@ -0,0 +1,183 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_REFBASE_H_
+#define COMMON_REFBASE_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+
+#include <type_traits>
+#include <utility>
+
+// A common class for various smart-pointers acting on referenceable/releasable pointer-like
+// objects. Logic for each specialization can be customized using a Traits type that looks
+// like the following:
+//
+//   struct {
+//      static constexpr T kNullValue = ...;
+//      static void Reference(T value) { ... }
+//      static void Release(T value) { ... }
+//   };
+//
+// RefBase supports
+template <typename T, typename Traits>
+class RefBase {
+  public:
+    // Default constructor and destructor.
+    RefBase() : mValue(Traits::kNullValue) {
+    }
+
+    ~RefBase() {
+        Release(mValue);
+    }
+
+    // Constructors from nullptr.
+    constexpr RefBase(std::nullptr_t) : RefBase() {
+    }
+
+    RefBase<T, Traits>& operator=(std::nullptr_t) {
+        Set(Traits::kNullValue);
+        return *this;
+    }
+
+    // Constructors from a value T.
+    RefBase(T value) : mValue(value) {
+        Reference(value);
+    }
+
+    RefBase<T, Traits>& operator=(const T& value) {
+        Set(value);
+        return *this;
+    }
+
+    // Constructors from a RefBase<T>
+    RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
+        Reference(other.mValue);
+    }
+
+    RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
+        Set(other.mValue);
+        return *this;
+    }
+
+    RefBase(RefBase<T, Traits>&& other) {
+        mValue = other.Detach();
+    }
+
+    RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
+        if (&other != this) {
+            Release(mValue);
+            mValue = other.Detach();
+        }
+        return *this;
+    }
+
+    // Constructors from a RefBase<U>. Note that in the *-assignment operators this cannot be the
+    // same as `other` because overload resolution rules would have chosen the *-assignement
+    // operators defined with `other` == RefBase<T, Traits>.
+    template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+    RefBase(const RefBase<U, UTraits>& other) : mValue(other.mValue) {
+        Reference(other.mValue);
+    }
+
+    template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+    RefBase<T, Traits>& operator=(const RefBase<U, UTraits>& other) {
+        Set(other.mValue);
+        return *this;
+    }
+
+    template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+    RefBase(RefBase<U, UTraits>&& other) {
+        mValue = other.Detach();
+    }
+
+    template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+    RefBase<T, Traits>& operator=(RefBase<U, UTraits>&& other) {
+        Release(mValue);
+        mValue = other.Detach();
+        return *this;
+    }
+
+    // Comparison operators.
+    bool operator==(const T& other) const {
+        return mValue == other;
+    }
+
+    bool operator!=(const T& other) const {
+        return mValue != other;
+    }
+
+    const T operator->() const {
+        return mValue;
+    }
+    T operator->() {
+        return mValue;
+    }
+
+    // Smart pointer methods.
+    const T& Get() const {
+        return mValue;
+    }
+    T& Get() {
+        return mValue;
+    }
+
+    [[nodiscard]] T Detach() {
+        T value{std::move(mValue)};
+        mValue = Traits::kNullValue;
+        return value;
+    }
+
+    void Acquire(T value) {
+        Release(mValue);
+        mValue = value;
+    }
+
+    [[nodiscard]] T* InitializeInto() {
+        ASSERT(mValue == Traits::kNullValue);
+        return &mValue;
+    }
+
+  private:
+    // Friend is needed so that instances of RefBase<U> can call Reference and Release on
+    // RefBase<T>.
+    template <typename U, typename UTraits>
+    friend class RefBase;
+
+    static void Reference(T value) {
+        if (value != Traits::kNullValue) {
+            Traits::Reference(value);
+        }
+    }
+    static void Release(T value) {
+        if (value != Traits::kNullValue) {
+            Traits::Release(value);
+        }
+    }
+
+    void Set(T value) {
+        if (mValue != value) {
+            // Ensure that the new value is referenced before the old is released to prevent any
+            // transitive frees that may affect the new value.
+            Reference(value);
+            Release(mValue);
+            mValue = value;
+        }
+    }
+
+    T mValue;
+};
+
+#endif  // COMMON_REFBASE_H_
diff --git a/src/dawn/common/RefCounted.cpp b/src/dawn/common/RefCounted.cpp
new file mode 100644
index 0000000..6950d13
--- /dev/null
+++ b/src/dawn/common/RefCounted.cpp
@@ -0,0 +1,86 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/RefCounted.h"
+
+#include "dawn/common/Assert.h"
+
+#include <cstddef>
+
+static constexpr size_t kPayloadBits = 1;
+static constexpr uint64_t kPayloadMask = (uint64_t(1) << kPayloadBits) - 1;
+static constexpr uint64_t kRefCountIncrement = (uint64_t(1) << kPayloadBits);
+
+RefCounted::RefCounted(uint64_t payload) : mRefCount(kRefCountIncrement + payload) {
+    ASSERT((payload & kPayloadMask) == payload);
+}
+
+uint64_t RefCounted::GetRefCountForTesting() const {
+    return mRefCount >> kPayloadBits;
+}
+
+uint64_t RefCounted::GetRefCountPayload() const {
+    // We only care about the payload bits of the refcount. These never change after
+    // initialization so we can use the relaxed memory order. The order doesn't guarantee
+    // anything except the atomicity of the load, which is enough since any past values of the
+    // atomic will have the correct payload bits.
+    return kPayloadMask & mRefCount.load(std::memory_order_relaxed);
+}
+
+void RefCounted::Reference() {
+    ASSERT((mRefCount & ~kPayloadMask) != 0);
+
+    // The relaxed ordering guarantees only the atomicity of the update, which is enough here
+    // because the reference we are copying from still exists and makes sure other threads
+    // don't delete `this`.
+    // See the explanation in the Boost documentation:
+    //     https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
+    mRefCount.fetch_add(kRefCountIncrement, std::memory_order_relaxed);
+}
+
+void RefCounted::Release() {
+    ASSERT((mRefCount & ~kPayloadMask) != 0);
+
+    // The release fence here is to make sure all accesses to the object on a thread A
+    // happen-before the object is deleted on a thread B. The release memory order ensures that
+    // all accesses on thread A happen-before the refcount is decreased and the atomic variable
+    // makes sure the refcount decrease in A happens-before the refcount decrease in B. Finally
+    // the acquire fence in the destruction case makes sure the refcount decrease in B
+    // happens-before the `delete this`.
+    //
+    // See the explanation in the Boost documentation:
+    //     https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
+    uint64_t previousRefCount = mRefCount.fetch_sub(kRefCountIncrement, std::memory_order_release);
+
+    // Check that the previous reference count was strictly less than 2, ignoring payload bits.
+    if (previousRefCount < 2 * kRefCountIncrement) {
+        // Note that on ARM64 this will generate a `dmb ish` instruction which is a global
+        // memory barrier, when an acquire load on mRefCount (using the `ldar` instruction)
+        // should be enough and could end up being faster.
+        std::atomic_thread_fence(std::memory_order_acquire);
+        DeleteThis();
+    }
+}
+
+void RefCounted::APIReference() {
+    Reference();
+}
+
+void RefCounted::APIRelease() {
+    Release();
+}
+
+void RefCounted::DeleteThis() {
+    delete this;
+}
diff --git a/src/dawn/common/RefCounted.h b/src/dawn/common/RefCounted.h
new file mode 100644
index 0000000..65f37b9
--- /dev/null
+++ b/src/dawn/common/RefCounted.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_REFCOUNTED_H_
+#define COMMON_REFCOUNTED_H_
+
+#include "dawn/common/RefBase.h"
+
+#include <atomic>
+#include <cstdint>
+
+class RefCounted {
+  public:
+    RefCounted(uint64_t payload = 0);
+
+    uint64_t GetRefCountForTesting() const;
+    uint64_t GetRefCountPayload() const;
+
+    void Reference();
+    void Release();
+
+    void APIReference();
+    void APIRelease();
+
+  protected:
+    virtual ~RefCounted() = default;
+    // A Derived class may override this if they require a custom deleter.
+    virtual void DeleteThis();
+
+  private:
+    std::atomic<uint64_t> mRefCount;
+};
+
+template <typename T>
+struct RefCountedTraits {
+    static constexpr T* kNullValue = nullptr;
+    static void Reference(T* value) {
+        value->Reference();
+    }
+    static void Release(T* value) {
+        value->Release();
+    }
+};
+
+template <typename T>
+class Ref : public RefBase<T*, RefCountedTraits<T>> {
+  public:
+    using RefBase<T*, RefCountedTraits<T>>::RefBase;
+};
+
+template <typename T>
+Ref<T> AcquireRef(T* pointee) {
+    Ref<T> ref;
+    ref.Acquire(pointee);
+    return ref;
+}
+
+#endif  // COMMON_REFCOUNTED_H_
diff --git a/src/dawn/common/Result.cpp b/src/dawn/common/Result.cpp
new file mode 100644
index 0000000..2101e47
--- /dev/null
+++ b/src/dawn/common/Result.cpp
@@ -0,0 +1,30 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Result.h"
+
+// Implementation details of the tagged pointer Results
+namespace detail {
+
+    intptr_t MakePayload(const void* pointer, PayloadType type) {
+        intptr_t payload = reinterpret_cast<intptr_t>(pointer);
+        ASSERT((payload & 3) == 0);
+        return payload | type;
+    }
+
+    PayloadType GetPayloadType(intptr_t payload) {
+        return static_cast<PayloadType>(payload & 3);
+    }
+
+}  // namespace detail
diff --git a/src/dawn/common/Result.h b/src/dawn/common/Result.h
new file mode 100644
index 0000000..5566829
--- /dev/null
+++ b/src/dawn/common/Result.h
@@ -0,0 +1,526 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_RESULT_H_
+#define COMMON_RESULT_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+// Result<T, E> is the following sum type (Haskell notation):
+//
+//      data Result T E = Success T | Error E | Empty
+//
+// It is meant to be used as the return type of functions that might fail. The reason for the Empty
+// case is that a Result should never be discarded, only destructured (its error or success moved
+// out) or moved into a different Result. The Empty case tags Results that have been moved out and
+// Result's destructor should ASSERT on it being Empty.
+//
+// Since C++ doesn't have efficient sum types for the special cases we care about, we provide
+// template specializations for them.
+
+template <typename T, typename E>
+class Result;
+
+// The interface of Result<T, E> should look like the following.
+//  public:
+//    Result(T&& success);
+//    Result(std::unique_ptr<E> error);
+//
+//    Result(Result<T, E>&& other);
+//    Result<T, E>& operator=(Result<T, E>&& other);
+//
+//    ~Result();
+//
+//    bool IsError() const;
+//    bool IsSuccess() const;
+//
+//    T&& AcquireSuccess();
+//    std::unique_ptr<E> AcquireError();
+
+// Specialization of Result for returning errors only via pointers. It is basically a pointer
+// where nullptr is both Success and Empty.
+template <typename E>
+class [[nodiscard]] Result<void, E> {
+  public:
+    Result();
+    Result(std::unique_ptr<E> error);
+
+    Result(Result<void, E> && other);
+    Result<void, E>& operator=(Result<void, E>&& other);
+
+    ~Result();
+
+    bool IsError() const;
+    bool IsSuccess() const;
+
+    void AcquireSuccess();
+    std::unique_ptr<E> AcquireError();
+
+  private:
+    std::unique_ptr<E> mError;
+};
+
+// Uses SFINAE to try to get alignof(T) but fallback to Default if T isn't defined.
+template <typename T, size_t Default, typename = size_t>
+constexpr size_t alignof_if_defined_else_default = Default;
+
+template <typename T, size_t Default>
+constexpr size_t alignof_if_defined_else_default<T, Default, decltype(alignof(T))> = alignof(T);
+
+// Specialization of Result when both the error an success are pointers. It is implemented as a
+// tagged pointer. The tag for Success is 0 so that returning the value is fastest.
+
+namespace detail {
+    // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
+    // but we really want them inlined so we keep them in the headers
+    enum PayloadType {
+        Success = 0,
+        Error = 1,
+        Empty = 2,
+    };
+
+    intptr_t MakePayload(const void* pointer, PayloadType type);
+    PayloadType GetPayloadType(intptr_t payload);
+
+    template <typename T>
+    static T* GetSuccessFromPayload(intptr_t payload);
+    template <typename E>
+    static E* GetErrorFromPayload(intptr_t payload);
+
+    constexpr static intptr_t kEmptyPayload = Empty;
+}  // namespace detail
+
+template <typename T, typename E>
+class [[nodiscard]] Result<T*, E> {
+  public:
+    static_assert(alignof_if_defined_else_default<T, 4> >= 4,
+                  "Result<T*, E*> reserves two bits for tagging pointers");
+    static_assert(alignof_if_defined_else_default<E, 4> >= 4,
+                  "Result<T*, E*> reserves two bits for tagging pointers");
+
+    Result(T * success);
+    Result(std::unique_ptr<E> error);
+
+    // Support returning a Result<T*, E*> from a Result<TChild*, E*>
+    template <typename TChild>
+    Result(Result<TChild*, E> && other);
+    template <typename TChild>
+    Result<T*, E>& operator=(Result<TChild*, E>&& other);
+
+    ~Result();
+
+    bool IsError() const;
+    bool IsSuccess() const;
+
+    T* AcquireSuccess();
+    std::unique_ptr<E> AcquireError();
+
+  private:
+    template <typename T2, typename E2>
+    friend class Result;
+
+    intptr_t mPayload = detail::kEmptyPayload;
+};
+
+template <typename T, typename E>
+class [[nodiscard]] Result<const T*, E> {
+  public:
+    static_assert(alignof_if_defined_else_default<T, 4> >= 4,
+                  "Result<T*, E*> reserves two bits for tagging pointers");
+    static_assert(alignof_if_defined_else_default<E, 4> >= 4,
+                  "Result<T*, E*> reserves two bits for tagging pointers");
+
+    Result(const T* success);
+    Result(std::unique_ptr<E> error);
+
+    Result(Result<const T*, E> && other);
+    Result<const T*, E>& operator=(Result<const T*, E>&& other);
+
+    ~Result();
+
+    bool IsError() const;
+    bool IsSuccess() const;
+
+    const T* AcquireSuccess();
+    std::unique_ptr<E> AcquireError();
+
+  private:
+    intptr_t mPayload = detail::kEmptyPayload;
+};
+
+template <typename T>
+class Ref;
+
+template <typename T, typename E>
+class [[nodiscard]] Result<Ref<T>, E> {
+  public:
+    static_assert(alignof_if_defined_else_default<T, 4> >= 4,
+                  "Result<Ref<T>, E> reserves two bits for tagging pointers");
+    static_assert(alignof_if_defined_else_default<E, 4> >= 4,
+                  "Result<Ref<T>, E> reserves two bits for tagging pointers");
+
+    template <typename U>
+    Result(Ref<U> && success);
+    template <typename U>
+    Result(const Ref<U>& success);
+    Result(std::unique_ptr<E> error);
+
+    template <typename U>
+    Result(Result<Ref<U>, E> && other);
+    template <typename U>
+    Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
+
+    ~Result();
+
+    bool IsError() const;
+    bool IsSuccess() const;
+
+    Ref<T> AcquireSuccess();
+    std::unique_ptr<E> AcquireError();
+
+  private:
+    template <typename T2, typename E2>
+    friend class Result;
+
+    intptr_t mPayload = detail::kEmptyPayload;
+};
+
+// Catchall definition of Result<T, E> implemented as a tagged struct. It could be improved to use
+// a tagged union instead if it turns out to be a hotspot. T and E must be movable and default
+// constructible.
+template <typename T, typename E>
+class [[nodiscard]] Result {
+  public:
+    Result(T && success);
+    Result(std::unique_ptr<E> error);
+
+    Result(Result<T, E> && other);
+    Result<T, E>& operator=(Result<T, E>&& other);
+
+    ~Result();
+
+    bool IsError() const;
+    bool IsSuccess() const;
+
+    T&& AcquireSuccess();
+    std::unique_ptr<E> AcquireError();
+
+  private:
+    enum PayloadType {
+        Success = 0,
+        Error = 1,
+        Acquired = 2,
+    };
+    PayloadType mType;
+
+    std::unique_ptr<E> mError;
+    T mSuccess;
+};
+
+// Implementation of Result<void, E>
+template <typename E>
+Result<void, E>::Result() {
+}
+
+template <typename E>
+Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
+}
+
+template <typename E>
+Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
+}
+
+template <typename E>
+Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
+    ASSERT(mError == nullptr);
+    mError = std::move(other.mError);
+    return *this;
+}
+
+template <typename E>
+Result<void, E>::~Result() {
+    ASSERT(mError == nullptr);
+}
+
+template <typename E>
+bool Result<void, E>::IsError() const {
+    return mError != nullptr;
+}
+
+template <typename E>
+bool Result<void, E>::IsSuccess() const {
+    return mError == nullptr;
+}
+
+template <typename E>
+void Result<void, E>::AcquireSuccess() {
+}
+
+template <typename E>
+std::unique_ptr<E> Result<void, E>::AcquireError() {
+    return std::move(mError);
+}
+
+// Implementation details of the tagged pointer Results
+namespace detail {
+
+    template <typename T>
+    T* GetSuccessFromPayload(intptr_t payload) {
+        ASSERT(GetPayloadType(payload) == Success);
+        return reinterpret_cast<T*>(payload);
+    }
+
+    template <typename E>
+    E* GetErrorFromPayload(intptr_t payload) {
+        ASSERT(GetPayloadType(payload) == Error);
+        return reinterpret_cast<E*>(payload ^ 1);
+    }
+
+}  // namespace detail
+
+// Implementation of Result<T*, E>
+template <typename T, typename E>
+Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
+}
+
+template <typename T, typename E>
+Result<T*, E>::Result(std::unique_ptr<E> error)
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
+}
+
+template <typename T, typename E>
+template <typename TChild>
+Result<T*, E>::Result(Result<TChild*, E>&& other) : mPayload(other.mPayload) {
+    other.mPayload = detail::kEmptyPayload;
+    static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value);
+}
+
+template <typename T, typename E>
+template <typename TChild>
+Result<T*, E>& Result<T*, E>::operator=(Result<TChild*, E>&& other) {
+    ASSERT(mPayload == detail::kEmptyPayload);
+    static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value);
+    mPayload = other.mPayload;
+    other.mPayload = detail::kEmptyPayload;
+    return *this;
+}
+
+template <typename T, typename E>
+Result<T*, E>::~Result() {
+    ASSERT(mPayload == detail::kEmptyPayload);
+}
+
+template <typename T, typename E>
+bool Result<T*, E>::IsError() const {
+    return detail::GetPayloadType(mPayload) == detail::Error;
+}
+
+template <typename T, typename E>
+bool Result<T*, E>::IsSuccess() const {
+    return detail::GetPayloadType(mPayload) == detail::Success;
+}
+
+template <typename T, typename E>
+T* Result<T*, E>::AcquireSuccess() {
+    T* success = detail::GetSuccessFromPayload<T>(mPayload);
+    mPayload = detail::kEmptyPayload;
+    return success;
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<T*, E>::AcquireError() {
+    std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
+    mPayload = detail::kEmptyPayload;
+    return std::move(error);
+}
+
+// Implementation of Result<const T*, E*>
+template <typename T, typename E>
+Result<const T*, E>::Result(const T* success)
+    : mPayload(detail::MakePayload(success, detail::Success)) {
+}
+
+template <typename T, typename E>
+Result<const T*, E>::Result(std::unique_ptr<E> error)
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
+}
+
+template <typename T, typename E>
+Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
+    other.mPayload = detail::kEmptyPayload;
+}
+
+template <typename T, typename E>
+Result<const T*, E>& Result<const T*, E>::operator=(Result<const T*, E>&& other) {
+    ASSERT(mPayload == detail::kEmptyPayload);
+    mPayload = other.mPayload;
+    other.mPayload = detail::kEmptyPayload;
+    return *this;
+}
+
+template <typename T, typename E>
+Result<const T*, E>::~Result() {
+    ASSERT(mPayload == detail::kEmptyPayload);
+}
+
+template <typename T, typename E>
+bool Result<const T*, E>::IsError() const {
+    return detail::GetPayloadType(mPayload) == detail::Error;
+}
+
+template <typename T, typename E>
+bool Result<const T*, E>::IsSuccess() const {
+    return detail::GetPayloadType(mPayload) == detail::Success;
+}
+
+template <typename T, typename E>
+const T* Result<const T*, E>::AcquireSuccess() {
+    T* success = detail::GetSuccessFromPayload<T>(mPayload);
+    mPayload = detail::kEmptyPayload;
+    return success;
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<const T*, E>::AcquireError() {
+    std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
+    mPayload = detail::kEmptyPayload;
+    return std::move(error);
+}
+
+// Implementation of Result<Ref<T>, E>
+template <typename T, typename E>
+template <typename U>
+Result<Ref<T>, E>::Result(Ref<U>&& success)
+    : mPayload(detail::MakePayload(success.Detach(), detail::Success)) {
+    static_assert(std::is_convertible<U*, T*>::value);
+}
+
+template <typename T, typename E>
+template <typename U>
+Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {
+}
+
+template <typename T, typename E>
+Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
+}
+
+template <typename T, typename E>
+template <typename U>
+Result<Ref<T>, E>::Result(Result<Ref<U>, E>&& other) : mPayload(other.mPayload) {
+    static_assert(std::is_convertible<U*, T*>::value);
+    other.mPayload = detail::kEmptyPayload;
+}
+
+template <typename T, typename E>
+template <typename U>
+Result<Ref<U>, E>& Result<Ref<T>, E>::operator=(Result<Ref<U>, E>&& other) {
+    static_assert(std::is_convertible<U*, T*>::value);
+    ASSERT(mPayload == detail::kEmptyPayload);
+    mPayload = other.mPayload;
+    other.mPayload = detail::kEmptyPayload;
+    return *this;
+}
+
+template <typename T, typename E>
+Result<Ref<T>, E>::~Result() {
+    ASSERT(mPayload == detail::kEmptyPayload);
+}
+
+template <typename T, typename E>
+bool Result<Ref<T>, E>::IsError() const {
+    return detail::GetPayloadType(mPayload) == detail::Error;
+}
+
+template <typename T, typename E>
+bool Result<Ref<T>, E>::IsSuccess() const {
+    return detail::GetPayloadType(mPayload) == detail::Success;
+}
+
+template <typename T, typename E>
+Ref<T> Result<Ref<T>, E>::AcquireSuccess() {
+    ASSERT(IsSuccess());
+    Ref<T> success = AcquireRef(detail::GetSuccessFromPayload<T>(mPayload));
+    mPayload = detail::kEmptyPayload;
+    return success;
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
+    ASSERT(IsError());
+    std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
+    mPayload = detail::kEmptyPayload;
+    return std::move(error);
+}
+
+// Implementation of Result<T, E>
+template <typename T, typename E>
+Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
+}
+
+template <typename T, typename E>
+Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
+}
+
+template <typename T, typename E>
+Result<T, E>::~Result() {
+    ASSERT(mType == Acquired);
+}
+
+template <typename T, typename E>
+Result<T, E>::Result(Result<T, E>&& other)
+    : mType(other.mType), mError(std::move(other.mError)), mSuccess(std::move(other.mSuccess)) {
+    other.mType = Acquired;
+}
+template <typename T, typename E>
+Result<T, E>& Result<T, E>::operator=(Result<T, E>&& other) {
+    mType = other.mType;
+    mError = std::move(other.mError);
+    mSuccess = std::move(other.mSuccess);
+    other.mType = Acquired;
+    return *this;
+}
+
+template <typename T, typename E>
+bool Result<T, E>::IsError() const {
+    return mType == Error;
+}
+
+template <typename T, typename E>
+bool Result<T, E>::IsSuccess() const {
+    return mType == Success;
+}
+
+template <typename T, typename E>
+T&& Result<T, E>::AcquireSuccess() {
+    ASSERT(mType == Success);
+    mType = Acquired;
+    return std::move(mSuccess);
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<T, E>::AcquireError() {
+    ASSERT(mType == Error);
+    mType = Acquired;
+    return std::move(mError);
+}
+
+#endif  // COMMON_RESULT_H_
diff --git a/src/dawn/common/SerialMap.h b/src/dawn/common/SerialMap.h
new file mode 100644
index 0000000..750f16e
--- /dev/null
+++ b/src/dawn/common/SerialMap.h
@@ -0,0 +1,76 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SERIALMAP_H_
+#define COMMON_SERIALMAP_H_
+
+#include "dawn/common/SerialStorage.h"
+
+#include <map>
+#include <vector>
+
+template <typename Serial, typename Value>
+class SerialMap;
+
+template <typename SerialT, typename ValueT>
+struct SerialStorageTraits<SerialMap<SerialT, ValueT>> {
+    using Serial = SerialT;
+    using Value = ValueT;
+    using Storage = std::map<Serial, std::vector<Value>>;
+    using StorageIterator = typename Storage::iterator;
+    using ConstStorageIterator = typename Storage::const_iterator;
+};
+
+// SerialMap stores a map from Serial to Value.
+// Unlike SerialQueue, items may be enqueued with Serials in any
+// arbitrary order. SerialMap provides useful iterators for iterating
+// through Value items in order of increasing Serial.
+template <typename Serial, typename Value>
+class SerialMap : public SerialStorage<SerialMap<Serial, Value>> {
+  public:
+    void Enqueue(const Value& value, Serial serial);
+    void Enqueue(Value&& value, Serial serial);
+    void Enqueue(const std::vector<Value>& values, Serial serial);
+    void Enqueue(std::vector<Value>&& values, Serial serial);
+};
+
+// SerialMap
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(const Value& value, Serial serial) {
+    this->mStorage[serial].emplace_back(value);
+}
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(Value&& value, Serial serial) {
+    this->mStorage[serial].emplace_back(value);
+}
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(const std::vector<Value>& values, Serial serial) {
+    DAWN_ASSERT(values.size() > 0);
+    for (const Value& value : values) {
+        Enqueue(value, serial);
+    }
+}
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(std::vector<Value>&& values, Serial serial) {
+    DAWN_ASSERT(values.size() > 0);
+    for (const Value& value : values) {
+        Enqueue(value, serial);
+    }
+}
+
+#endif  // COMMON_SERIALMAP_H_
diff --git a/src/dawn/common/SerialQueue.h b/src/dawn/common/SerialQueue.h
new file mode 100644
index 0000000..3e33f1e
--- /dev/null
+++ b/src/dawn/common/SerialQueue.h
@@ -0,0 +1,85 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SERIALQUEUE_H_
+#define COMMON_SERIALQUEUE_H_
+
+#include "dawn/common/SerialStorage.h"
+
+#include <vector>
+
+template <typename Serial, typename Value>
+class SerialQueue;
+
+template <typename SerialT, typename ValueT>
+struct SerialStorageTraits<SerialQueue<SerialT, ValueT>> {
+    using Serial = SerialT;
+    using Value = ValueT;
+    using SerialPair = std::pair<Serial, std::vector<Value>>;
+    using Storage = std::vector<SerialPair>;
+    using StorageIterator = typename Storage::iterator;
+    using ConstStorageIterator = typename Storage::const_iterator;
+};
+
+// SerialQueue stores an associative list mapping a Serial to Value.
+// It enforces that the Serials enqueued are strictly non-decreasing.
+// This makes it very efficient iterate or clear all items added up
+// to some Serial value because they are stored contiguously in memory.
+template <typename Serial, typename Value>
+class SerialQueue : public SerialStorage<SerialQueue<Serial, Value>> {
+  public:
+    // The serial must be given in (not strictly) increasing order.
+    void Enqueue(const Value& value, Serial serial);
+    void Enqueue(Value&& value, Serial serial);
+    void Enqueue(const std::vector<Value>& values, Serial serial);
+    void Enqueue(std::vector<Value>&& values, Serial serial);
+};
+
+// SerialQueue
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(const Value& value, Serial serial) {
+    DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+
+    if (this->Empty() || this->mStorage.back().first < serial) {
+        this->mStorage.emplace_back(serial, std::vector<Value>{});
+    }
+    this->mStorage.back().second.push_back(value);
+}
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(Value&& value, Serial serial) {
+    DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+
+    if (this->Empty() || this->mStorage.back().first < serial) {
+        this->mStorage.emplace_back(serial, std::vector<Value>{});
+    }
+    this->mStorage.back().second.push_back(std::move(value));
+}
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(const std::vector<Value>& values, Serial serial) {
+    DAWN_ASSERT(values.size() > 0);
+    DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+    this->mStorage.emplace_back(serial, values);
+}
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(std::vector<Value>&& values, Serial serial) {
+    DAWN_ASSERT(values.size() > 0);
+    DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+    this->mStorage.emplace_back(serial, values);
+}
+
+#endif  // COMMON_SERIALQUEUE_H_
diff --git a/src/dawn/common/SerialStorage.h b/src/dawn/common/SerialStorage.h
new file mode 100644
index 0000000..8a103f5
--- /dev/null
+++ b/src/dawn/common/SerialStorage.h
@@ -0,0 +1,322 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SERIALSTORAGE_H_
+#define COMMON_SERIALSTORAGE_H_
+
+#include "dawn/common/Assert.h"
+
+#include <cstdint>
+#include <utility>
+
+template <typename T>
+struct SerialStorageTraits {};
+
+template <typename Derived>
+class SerialStorage {
+  protected:
+    using Serial = typename SerialStorageTraits<Derived>::Serial;
+    using Value = typename SerialStorageTraits<Derived>::Value;
+    using Storage = typename SerialStorageTraits<Derived>::Storage;
+    using StorageIterator = typename SerialStorageTraits<Derived>::StorageIterator;
+    using ConstStorageIterator = typename SerialStorageTraits<Derived>::ConstStorageIterator;
+
+  public:
+    class Iterator {
+      public:
+        Iterator(StorageIterator start);
+        Iterator& operator++();
+
+        bool operator==(const Iterator& other) const;
+        bool operator!=(const Iterator& other) const;
+        Value& operator*() const;
+
+      private:
+        StorageIterator mStorageIterator;
+        // Special case the mSerialIterator when it should be equal to mStorageIterator.begin()
+        // otherwise we could ask mStorageIterator.begin() when mStorageIterator is mStorage.end()
+        // which is invalid. mStorageIterator.begin() is tagged with a nullptr.
+        Value* mSerialIterator;
+    };
+
+    class ConstIterator {
+      public:
+        ConstIterator(ConstStorageIterator start);
+        ConstIterator& operator++();
+
+        bool operator==(const ConstIterator& other) const;
+        bool operator!=(const ConstIterator& other) const;
+        const Value& operator*() const;
+
+      private:
+        ConstStorageIterator mStorageIterator;
+        const Value* mSerialIterator;
+    };
+
+    class BeginEnd {
+      public:
+        BeginEnd(StorageIterator start, StorageIterator end);
+
+        Iterator begin() const;
+        Iterator end() const;
+
+      private:
+        StorageIterator mStartIt;
+        StorageIterator mEndIt;
+    };
+
+    class ConstBeginEnd {
+      public:
+        ConstBeginEnd(ConstStorageIterator start, ConstStorageIterator end);
+
+        ConstIterator begin() const;
+        ConstIterator end() const;
+
+      private:
+        ConstStorageIterator mStartIt;
+        ConstStorageIterator mEndIt;
+    };
+
+    // Derived classes may specialize constraits for elements stored
+    // Ex.) SerialQueue enforces that the serial must be given in (not strictly)
+    //      increasing order
+    template <typename... Params>
+    void Enqueue(Params&&... args, Serial serial) {
+        Derived::Enqueue(std::forward<Params>(args)..., serial);
+    }
+
+    bool Empty() const;
+
+    // The UpTo variants of Iterate and Clear affect all values associated to a serial
+    // that is smaller OR EQUAL to the given serial. Iterating is done like so:
+    //     for (const T& value : queue.IterateAll()) { stuff(T); }
+    ConstBeginEnd IterateAll() const;
+    ConstBeginEnd IterateUpTo(Serial serial) const;
+    BeginEnd IterateAll();
+    BeginEnd IterateUpTo(Serial serial);
+
+    void Clear();
+    void ClearUpTo(Serial serial);
+
+    Serial FirstSerial() const;
+    Serial LastSerial() const;
+
+  protected:
+    // Returns the first StorageIterator that a serial bigger than serial.
+    ConstStorageIterator FindUpTo(Serial serial) const;
+    StorageIterator FindUpTo(Serial serial);
+    Storage mStorage;
+};
+
+// SerialStorage
+
+template <typename Derived>
+bool SerialStorage<Derived>::Empty() const {
+    return mStorage.empty();
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstBeginEnd SerialStorage<Derived>::IterateAll() const {
+    return {mStorage.begin(), mStorage.end()};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstBeginEnd SerialStorage<Derived>::IterateUpTo(
+    Serial serial) const {
+    return {mStorage.begin(), FindUpTo(serial)};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::BeginEnd SerialStorage<Derived>::IterateAll() {
+    return {mStorage.begin(), mStorage.end()};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::BeginEnd SerialStorage<Derived>::IterateUpTo(Serial serial) {
+    return {mStorage.begin(), FindUpTo(serial)};
+}
+
+template <typename Derived>
+void SerialStorage<Derived>::Clear() {
+    mStorage.clear();
+}
+
+template <typename Derived>
+void SerialStorage<Derived>::ClearUpTo(Serial serial) {
+    mStorage.erase(mStorage.begin(), FindUpTo(serial));
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Serial SerialStorage<Derived>::FirstSerial() const {
+    DAWN_ASSERT(!Empty());
+    return mStorage.begin()->first;
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Serial SerialStorage<Derived>::LastSerial() const {
+    DAWN_ASSERT(!Empty());
+    return mStorage.back().first;
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstStorageIterator SerialStorage<Derived>::FindUpTo(
+    Serial serial) const {
+    auto it = mStorage.begin();
+    while (it != mStorage.end() && it->first <= serial) {
+        it++;
+    }
+    return it;
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpTo(Serial serial) {
+    auto it = mStorage.begin();
+    while (it != mStorage.end() && it->first <= serial) {
+        it++;
+    }
+    return it;
+}
+
+// SerialStorage::BeginEnd
+
+template <typename Derived>
+SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
+                                           typename SerialStorage<Derived>::StorageIterator end)
+    : mStartIt(start), mEndIt(end) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
+    return {mStartIt};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end() const {
+    return {mEndIt};
+}
+
+// SerialStorage::Iterator
+
+template <typename Derived>
+SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
+    : mStorageIterator(start), mSerialIterator(nullptr) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
+    Value* vectorData = mStorageIterator->second.data();
+
+    if (mSerialIterator == nullptr) {
+        mSerialIterator = vectorData + 1;
+    } else {
+        mSerialIterator++;
+    }
+
+    if (mSerialIterator >= vectorData + mStorageIterator->second.size()) {
+        mSerialIterator = nullptr;
+        mStorageIterator++;
+    }
+
+    return *this;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::Iterator::operator==(
+    const typename SerialStorage<Derived>::Iterator& other) const {
+    return other.mStorageIterator == mStorageIterator && other.mSerialIterator == mSerialIterator;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::Iterator::operator!=(
+    const typename SerialStorage<Derived>::Iterator& other) const {
+    return !(*this == other);
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Value& SerialStorage<Derived>::Iterator::operator*() const {
+    if (mSerialIterator == nullptr) {
+        return *mStorageIterator->second.begin();
+    }
+    return *mSerialIterator;
+}
+
+// SerialStorage::ConstBeginEnd
+
+template <typename Derived>
+SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
+    typename SerialStorage<Derived>::ConstStorageIterator start,
+    typename SerialStorage<Derived>::ConstStorageIterator end)
+    : mStartIt(start), mEndIt(end) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
+    const {
+    return {mStartIt};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::end() const {
+    return {mEndIt};
+}
+
+// SerialStorage::ConstIterator
+
+template <typename Derived>
+SerialStorage<Derived>::ConstIterator::ConstIterator(
+    typename SerialStorage<Derived>::ConstStorageIterator start)
+    : mStorageIterator(start), mSerialIterator(nullptr) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstIterator&
+SerialStorage<Derived>::ConstIterator::operator++() {
+    const Value* vectorData = mStorageIterator->second.data();
+
+    if (mSerialIterator == nullptr) {
+        mSerialIterator = vectorData + 1;
+    } else {
+        mSerialIterator++;
+    }
+
+    if (mSerialIterator >= vectorData + mStorageIterator->second.size()) {
+        mSerialIterator = nullptr;
+        mStorageIterator++;
+    }
+
+    return *this;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::ConstIterator::operator==(
+    const typename SerialStorage<Derived>::ConstIterator& other) const {
+    return other.mStorageIterator == mStorageIterator && other.mSerialIterator == mSerialIterator;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::ConstIterator::operator!=(
+    const typename SerialStorage<Derived>::ConstIterator& other) const {
+    return !(*this == other);
+}
+
+template <typename Derived>
+const typename SerialStorage<Derived>::Value& SerialStorage<Derived>::ConstIterator::operator*()
+    const {
+    if (mSerialIterator == nullptr) {
+        return *mStorageIterator->second.begin();
+    }
+    return *mSerialIterator;
+}
+
+#endif  // COMMON_SERIALSTORAGE_H_
diff --git a/src/dawn/common/SlabAllocator.cpp b/src/dawn/common/SlabAllocator.cpp
new file mode 100644
index 0000000..d680ee3
--- /dev/null
+++ b/src/dawn/common/SlabAllocator.cpp
@@ -0,0 +1,247 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/SlabAllocator.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <limits>
+#include <new>
+
+// IndexLinkNode
+
+SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
+    : index(index), nextIndex(nextIndex) {
+}
+
+// Slab
+
+SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
+    : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
+}
+
+SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
+
+SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
+}
+
+SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
+
+SlabAllocatorImpl::SentinelSlab::~SentinelSlab() {
+    Slab* slab = this->next;
+    while (slab != nullptr) {
+        Slab* next = slab->next;
+        ASSERT(slab->blocksInUse == 0);
+        // Delete the slab's allocation. The slab is allocated inside slab->allocation.
+        delete[] slab->allocation;
+        slab = next;
+    }
+}
+
+// SlabAllocatorImpl
+
+SlabAllocatorImpl::Index SlabAllocatorImpl::kInvalidIndex =
+    std::numeric_limits<SlabAllocatorImpl::Index>::max();
+
+SlabAllocatorImpl::SlabAllocatorImpl(Index blocksPerSlab,
+                                     uint32_t objectSize,
+                                     uint32_t objectAlignment)
+    : mAllocationAlignment(std::max(static_cast<uint32_t>(alignof(Slab)), objectAlignment)),
+      mSlabBlocksOffset(Align(sizeof(Slab), objectAlignment)),
+      mIndexLinkNodeOffset(Align(objectSize, alignof(IndexLinkNode))),
+      mBlockStride(Align(mIndexLinkNodeOffset + sizeof(IndexLinkNode), objectAlignment)),
+      mBlocksPerSlab(blocksPerSlab),
+      mTotalAllocationSize(
+          // required allocation size
+          static_cast<size_t>(mSlabBlocksOffset) + mBlocksPerSlab * mBlockStride +
+          // Pad the allocation size by mAllocationAlignment so that the aligned allocation still
+          // fulfills the required size.
+          mAllocationAlignment) {
+    ASSERT(IsPowerOfTwo(mAllocationAlignment));
+}
+
+SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
+    : mAllocationAlignment(rhs.mAllocationAlignment),
+      mSlabBlocksOffset(rhs.mSlabBlocksOffset),
+      mIndexLinkNodeOffset(rhs.mIndexLinkNodeOffset),
+      mBlockStride(rhs.mBlockStride),
+      mBlocksPerSlab(rhs.mBlocksPerSlab),
+      mTotalAllocationSize(rhs.mTotalAllocationSize),
+      mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
+      mFullSlabs(std::move(rhs.mFullSlabs)),
+      mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
+}
+
+SlabAllocatorImpl::~SlabAllocatorImpl() = default;
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::OffsetFrom(
+    IndexLinkNode* node,
+    std::make_signed_t<Index> offset) const {
+    return reinterpret_cast<IndexLinkNode*>(reinterpret_cast<char*>(node) +
+                                            static_cast<intptr_t>(mBlockStride) * offset);
+}
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::NodeFromObject(void* object) const {
+    return reinterpret_cast<SlabAllocatorImpl::IndexLinkNode*>(static_cast<char*>(object) +
+                                                               mIndexLinkNodeOffset);
+}
+
+void* SlabAllocatorImpl::ObjectFromNode(IndexLinkNode* node) const {
+    return static_cast<void*>(reinterpret_cast<char*>(node) - mIndexLinkNodeOffset);
+}
+
+bool SlabAllocatorImpl::IsNodeInSlab(Slab* slab, IndexLinkNode* node) const {
+    char* firstObjectPtr = reinterpret_cast<char*>(slab) + mSlabBlocksOffset;
+    IndexLinkNode* firstNode = NodeFromObject(firstObjectPtr);
+    IndexLinkNode* lastNode = OffsetFrom(firstNode, mBlocksPerSlab - 1);
+    return node >= firstNode && node <= lastNode && node->index < mBlocksPerSlab;
+}
+
+void SlabAllocatorImpl::PushFront(Slab* slab, IndexLinkNode* node) const {
+    ASSERT(IsNodeInSlab(slab, node));
+
+    IndexLinkNode* head = slab->freeList;
+    if (head == nullptr) {
+        node->nextIndex = kInvalidIndex;
+    } else {
+        ASSERT(IsNodeInSlab(slab, head));
+        node->nextIndex = head->index;
+    }
+    slab->freeList = node;
+
+    ASSERT(slab->blocksInUse != 0);
+    slab->blocksInUse--;
+}
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::PopFront(Slab* slab) const {
+    ASSERT(slab->freeList != nullptr);
+
+    IndexLinkNode* head = slab->freeList;
+    if (head->nextIndex == kInvalidIndex) {
+        slab->freeList = nullptr;
+    } else {
+        ASSERT(IsNodeInSlab(slab, head));
+        slab->freeList = OffsetFrom(head, head->nextIndex - head->index);
+        ASSERT(IsNodeInSlab(slab, slab->freeList));
+    }
+
+    ASSERT(slab->blocksInUse < mBlocksPerSlab);
+    slab->blocksInUse++;
+    return head;
+}
+
+void SlabAllocatorImpl::SentinelSlab::Prepend(SlabAllocatorImpl::Slab* slab) {
+    if (this->next != nullptr) {
+        this->next->prev = slab;
+    }
+    slab->prev = this;
+    slab->next = this->next;
+    this->next = slab;
+}
+
+void SlabAllocatorImpl::Slab::Splice() {
+    SlabAllocatorImpl::Slab* originalPrev = this->prev;
+    SlabAllocatorImpl::Slab* originalNext = this->next;
+
+    this->prev = nullptr;
+    this->next = nullptr;
+
+    ASSERT(originalPrev != nullptr);
+
+    // Set the originalNext's prev pointer.
+    if (originalNext != nullptr) {
+        originalNext->prev = originalPrev;
+    }
+
+    // Now, set the originalNext as the originalPrev's new next.
+    originalPrev->next = originalNext;
+}
+
+void* SlabAllocatorImpl::Allocate() {
+    if (mAvailableSlabs.next == nullptr) {
+        GetNewSlab();
+    }
+
+    Slab* slab = mAvailableSlabs.next;
+    IndexLinkNode* node = PopFront(slab);
+    ASSERT(node != nullptr);
+
+    // Move full slabs to a separate list, so allocate can always return quickly.
+    if (slab->blocksInUse == mBlocksPerSlab) {
+        slab->Splice();
+        mFullSlabs.Prepend(slab);
+    }
+
+    return ObjectFromNode(node);
+}
+
+void SlabAllocatorImpl::Deallocate(void* ptr) {
+    IndexLinkNode* node = NodeFromObject(ptr);
+
+    ASSERT(node->index < mBlocksPerSlab);
+    void* firstAllocation = ObjectFromNode(OffsetFrom(node, -node->index));
+    Slab* slab = reinterpret_cast<Slab*>(static_cast<char*>(firstAllocation) - mSlabBlocksOffset);
+    ASSERT(slab != nullptr);
+
+    bool slabWasFull = slab->blocksInUse == mBlocksPerSlab;
+
+    ASSERT(slab->blocksInUse != 0);
+    PushFront(slab, node);
+
+    if (slabWasFull) {
+        // Slab is in the full list. Move it to the recycled list.
+        ASSERT(slab->freeList != nullptr);
+        slab->Splice();
+        mRecycledSlabs.Prepend(slab);
+    }
+
+    // TODO(crbug.com/dawn/825): Occasionally prune slabs if |blocksInUse == 0|.
+    // Doing so eagerly hurts performance.
+}
+
+void SlabAllocatorImpl::GetNewSlab() {
+    // Should only be called when there are no available slabs.
+    ASSERT(mAvailableSlabs.next == nullptr);
+
+    if (mRecycledSlabs.next != nullptr) {
+        // If the recycled list is non-empty, swap their contents.
+        std::swap(mAvailableSlabs.next, mRecycledSlabs.next);
+
+        // We swapped the next pointers, so the prev pointer is wrong.
+        // Update it here.
+        mAvailableSlabs.next->prev = &mAvailableSlabs;
+        ASSERT(mRecycledSlabs.next == nullptr);
+        return;
+    }
+
+    // TODO(crbug.com/dawn/824): Use aligned_alloc when possible. It should be available with
+    // C++17 but on macOS it also requires macOS 10.15 to work.
+    char* allocation = new char[mTotalAllocationSize];
+    char* alignedPtr = AlignPtr(allocation, mAllocationAlignment);
+
+    char* dataStart = alignedPtr + mSlabBlocksOffset;
+
+    IndexLinkNode* node = NodeFromObject(dataStart);
+    for (uint32_t i = 0; i < mBlocksPerSlab; ++i) {
+        new (OffsetFrom(node, i)) IndexLinkNode(i, i + 1);
+    }
+
+    IndexLinkNode* lastNode = OffsetFrom(node, mBlocksPerSlab - 1);
+    lastNode->nextIndex = kInvalidIndex;
+
+    mAvailableSlabs.Prepend(new (alignedPtr) Slab(allocation, node));
+}
diff --git a/src/dawn/common/SlabAllocator.h b/src/dawn/common/SlabAllocator.h
new file mode 100644
index 0000000..58d2d94
--- /dev/null
+++ b/src/dawn/common/SlabAllocator.h
@@ -0,0 +1,184 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SLABALLOCATOR_H_
+#define COMMON_SLABALLOCATOR_H_
+
+#include "dawn/common/PlacementAllocated.h"
+
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+
+// The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory.
+// This makes it very quick to allocate and deallocate fixed-size objects because the allocator only
+// needs to index an offset into pre-allocated memory. It is similar to a pool-allocator that
+// recycles memory from previous allocations, except multiple allocations are hosted contiguously in
+// one large slab.
+//
+// Internally, the SlabAllocator stores slabs as a linked list to avoid extra indirections indexing
+// into an std::vector. To service an allocation request, the allocator only needs to know the first
+// currently available slab. There are three backing linked lists: AVAILABLE, FULL, and RECYCLED.
+// A slab that is AVAILABLE can be used to immediately service allocation requests. Once it has no
+// remaining space, it is moved to the FULL state. When a FULL slab sees any deallocations, it is
+// moved to the RECYCLED state. The RECYCLED state is separate from the AVAILABLE state so that
+// deallocations don't immediately prepend slabs to the AVAILABLE list, and change the current slab
+// servicing allocations. When the AVAILABLE list becomes empty is it swapped with the RECYCLED
+// list.
+//
+// Allocated objects are placement-allocated with some extra info at the end (we'll call the Object
+// plus the extra bytes a "block") used to specify the constant index of the block in its parent
+// slab, as well as the index of the next available block. So, following the block next-indices
+// forms a linked list of free blocks.
+//
+// Slab creation: When a new slab is allocated, sufficient memory is allocated for it, and then the
+// slab metadata plus all of its child blocks are placement-allocated into the memory. Indices and
+// next-indices are initialized to form the free-list of blocks.
+//
+// Allocation: When an object is allocated, if there is no space available in an existing slab, a
+// new slab is created (or an old slab is recycled). The first block of the slab is removed and
+// returned.
+//
+// Deallocation: When an object is deallocated, it can compute the pointer to its parent slab
+// because it stores the index of its own allocation. That block is then prepended to the slab's
+// free list.
+class SlabAllocatorImpl {
+  public:
+    // Allocations host their current index and the index of the next free block.
+    // Because this is an index, and not a byte offset, it can be much smaller than a size_t.
+    // TODO(crbug.com/dawn/825): Is uint8_t sufficient?
+    using Index = uint16_t;
+
+    SlabAllocatorImpl(SlabAllocatorImpl&& rhs);
+
+  protected:
+    // This is essentially a singly linked list using indices instead of pointers,
+    // so we store the index of "this" in |this->index|.
+    struct IndexLinkNode : PlacementAllocated {
+        IndexLinkNode(Index index, Index nextIndex);
+
+        const Index index;  // The index of this block in the slab.
+        Index nextIndex;    // The index of the next available block. kInvalidIndex, if none.
+    };
+
+    struct Slab : PlacementAllocated {
+        // A slab is placement-allocated into an aligned pointer from a separate allocation.
+        // Ownership of the allocation is transferred to the slab on creation.
+        // | ---------- allocation --------- |
+        // | pad | Slab | data ------------> |
+        Slab(char allocation[], IndexLinkNode* head);
+        Slab(Slab&& rhs);
+
+        void Splice();
+
+        char* allocation;
+        IndexLinkNode* freeList;
+        Slab* prev;
+        Slab* next;
+        Index blocksInUse;
+    };
+
+    SlabAllocatorImpl(Index blocksPerSlab, uint32_t objectSize, uint32_t objectAlignment);
+    ~SlabAllocatorImpl();
+
+    // Allocate a new block of memory.
+    void* Allocate();
+
+    // Deallocate a block of memory.
+    void Deallocate(void* ptr);
+
+  private:
+    // The maximum value is reserved to indicate the end of the list.
+    static Index kInvalidIndex;
+
+    // Get the IndexLinkNode |offset| slots away.
+    IndexLinkNode* OffsetFrom(IndexLinkNode* node, std::make_signed_t<Index> offset) const;
+
+    // Compute the pointer to the IndexLinkNode from an allocated object.
+    IndexLinkNode* NodeFromObject(void* object) const;
+
+    // Compute the pointer to the object from an IndexLinkNode.
+    void* ObjectFromNode(IndexLinkNode* node) const;
+
+    bool IsNodeInSlab(Slab* slab, IndexLinkNode* node) const;
+
+    // The Slab stores a linked-list of free allocations.
+    // PushFront/PopFront adds/removes an allocation from the free list.
+    void PushFront(Slab* slab, IndexLinkNode* node) const;
+    IndexLinkNode* PopFront(Slab* slab) const;
+
+    // Replace the current slab with a new one, and chain the old one off of it.
+    // Both slabs may still be used for for allocation/deallocation, but older slabs
+    // will be a little slower to get allocations from.
+    void GetNewSlab();
+
+    const uint32_t mAllocationAlignment;
+
+    // | Slab | pad | Obj | pad | Node | pad | Obj | pad | Node | pad | ....
+    // | -----------|                              mSlabBlocksOffset
+    // |            | ---------------------- |     mBlockStride
+    // |            | ----------|                  mIndexLinkNodeOffset
+    // | --------------------------------------> (mSlabBlocksOffset + mBlocksPerSlab * mBlockStride)
+
+    // A Slab is metadata, followed by the aligned memory to allocate out of. |mSlabBlocksOffset| is
+    // the offset to the start of the aligned memory region.
+    const uint32_t mSlabBlocksOffset;
+
+    // The IndexLinkNode is stored after the Allocation itself. This is the offset to it.
+    const uint32_t mIndexLinkNodeOffset;
+
+    // Because alignment of allocations may introduce padding, |mBlockStride| is the
+    // distance between aligned blocks of (Allocation + IndexLinkNode)
+    const uint32_t mBlockStride;
+
+    const Index mBlocksPerSlab;  // The total number of blocks in a slab.
+
+    const size_t mTotalAllocationSize;
+
+    struct SentinelSlab : Slab {
+        SentinelSlab();
+        ~SentinelSlab();
+
+        SentinelSlab(SentinelSlab&& rhs);
+
+        void Prepend(Slab* slab);
+    };
+
+    SentinelSlab mAvailableSlabs;  // Available slabs to service allocations.
+    SentinelSlab mFullSlabs;       // Full slabs. Stored here so we can skip checking them.
+    SentinelSlab mRecycledSlabs;   // Recycled slabs. Not immediately added to |mAvailableSlabs| so
+                                   // we don't thrash the current "active" slab.
+};
+
+template <typename T>
+class SlabAllocator : public SlabAllocatorImpl {
+  public:
+    SlabAllocator(size_t totalObjectBytes,
+                  uint32_t objectSize = sizeof(T),
+                  uint32_t objectAlignment = alignof(T))
+        : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
+    }
+
+    template <typename... Args>
+    T* Allocate(Args&&... args) {
+        void* ptr = SlabAllocatorImpl::Allocate();
+        return new (ptr) T(std::forward<Args>(args)...);
+    }
+
+    void Deallocate(T* object) {
+        SlabAllocatorImpl::Deallocate(object);
+    }
+};
+
+#endif  // COMMON_SLABALLOCATOR_H_
diff --git a/src/dawn/common/StackContainer.h b/src/dawn/common/StackContainer.h
new file mode 100644
index 0000000..4de688f
--- /dev/null
+++ b/src/dawn/common/StackContainer.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a modified copy of Chromium's /src/base/containers/stack_container.h
+
+#ifndef COMMON_STACKCONTAINER_H_
+#define COMMON_STACKCONTAINER_H_
+
+#include "dawn/common/Compiler.h"
+
+#include <cstddef>
+#include <vector>
+
+// This allocator can be used with STL containers to provide a stack buffer
+// from which to allocate memory and overflows onto the heap. This stack buffer
+// would be allocated on the stack and allows us to avoid heap operations in
+// some situations.
+//
+// STL likes to make copies of allocators, so the allocator itself can't hold
+// the data. Instead, we make the creator responsible for creating a
+// StackAllocator::Source which contains the data. Copying the allocator
+// merely copies the pointer to this shared source, so all allocators created
+// based on our allocator will share the same stack buffer.
+//
+// This stack buffer implementation is very simple. The first allocation that
+// fits in the stack buffer will use the stack buffer. Any subsequent
+// allocations will not use the stack buffer, even if there is unused room.
+// This makes it appropriate for array-like containers, but the caller should
+// be sure to reserve() in the container up to the stack buffer size. Otherwise
+// the container will allocate a small array which will "use up" the stack
+// buffer.
+template <typename T, size_t stack_capacity>
+class StackAllocator : public std::allocator<T> {
+  public:
+    typedef typename std::allocator<T>::pointer pointer;
+    typedef typename std::allocator<T>::size_type size_type;
+
+    // Backing store for the allocator. The container owner is responsible for
+    // maintaining this for as long as any containers using this allocator are
+    // live.
+    struct Source {
+        Source() : used_stack_buffer_(false) {
+        }
+
+        // Casts the buffer in its right type.
+        T* stack_buffer() {
+            return reinterpret_cast<T*>(stack_buffer_);
+        }
+        const T* stack_buffer() const {
+            return reinterpret_cast<const T*>(&stack_buffer_);
+        }
+
+        // The buffer itself. It is not of type T because we don't want the
+        // constructors and destructors to be automatically called. Define a POD
+        // buffer of the right size instead.
+        alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
+#if defined(DAWN_COMPILER_GCC) && !defined(__x86_64__) && !defined(__i386__)
+        static_assert(alignof(T) <= 16, "http://crbug.com/115612");
+#endif
+
+        // Set when the stack buffer is used for an allocation. We do not track
+        // how much of the buffer is used, only that somebody is using it.
+        bool used_stack_buffer_;
+    };
+
+    // Used by containers when they want to refer to an allocator of type U.
+    template <typename U>
+    struct rebind {
+        typedef StackAllocator<U, stack_capacity> other;
+    };
+
+    // For the straight up copy c-tor, we can share storage.
+    StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
+        : std::allocator<T>(), source_(rhs.source_) {
+    }
+
+    // ISO C++ requires the following constructor to be defined,
+    // and std::vector in VC++2008SP1 Release fails with an error
+    // in the class _Container_base_aux_alloc_real (from <xutility>)
+    // if the constructor does not exist.
+    // For this constructor, we cannot share storage; there's
+    // no guarantee that the Source buffer of Ts is large enough
+    // for Us.
+    // TODO: If we were fancy pants, perhaps we could share storage
+    // iff sizeof(T) == sizeof(U).
+    template <typename U, size_t other_capacity>
+    StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
+    }
+
+    // This constructor must exist. It creates a default allocator that doesn't
+    // actually have a stack buffer. glibc's std::string() will compare the
+    // current allocator against the default-constructed allocator, so this
+    // should be fast.
+    StackAllocator() : source_(nullptr) {
+    }
+
+    explicit StackAllocator(Source* source) : source_(source) {
+    }
+
+    // Actually do the allocation. Use the stack buffer if nobody has used it yet
+    // and the size requested fits. Otherwise, fall through to the standard
+    // allocator.
+    pointer allocate(size_type n) {
+        if (source_ && !source_->used_stack_buffer_ && n <= stack_capacity) {
+            source_->used_stack_buffer_ = true;
+            return source_->stack_buffer();
+        } else {
+            return std::allocator<T>::allocate(n);
+        }
+    }
+
+    // Free: when trying to free the stack buffer, just mark it as free. For
+    // non-stack-buffer pointers, just fall though to the standard allocator.
+    void deallocate(pointer p, size_type n) {
+        if (source_ && p == source_->stack_buffer())
+            source_->used_stack_buffer_ = false;
+        else
+            std::allocator<T>::deallocate(p, n);
+    }
+
+  private:
+    Source* source_;
+};
+
+// A wrapper around STL containers that maintains a stack-sized buffer that the
+// initial capacity of the vector is based on. Growing the container beyond the
+// stack capacity will transparently overflow onto the heap. The container must
+// support reserve().
+//
+// This will not work with std::string since some implementations allocate
+// more bytes than requested in calls to reserve(), forcing the allocation onto
+// the heap.  http://crbug.com/709273
+//
+// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
+// type. This object is really intended to be used only internally. You'll want
+// to use the wrappers below for different types.
+template <typename TContainerType, size_t stack_capacity>
+class StackContainer {
+  public:
+    typedef TContainerType ContainerType;
+    typedef typename ContainerType::value_type ContainedType;
+    typedef StackAllocator<ContainedType, stack_capacity> Allocator;
+
+    // Allocator must be constructed before the container!
+    StackContainer() : allocator_(&stack_data_), container_(allocator_) {
+        // Make the container use the stack allocation by reserving our buffer size
+        // before doing anything else.
+        container_.reserve(stack_capacity);
+    }
+
+    // Getters for the actual container.
+    //
+    // Danger: any copies of this made using the copy constructor must have
+    // shorter lifetimes than the source. The copy will share the same allocator
+    // and therefore the same stack buffer as the original. Use std::copy to
+    // copy into a "real" container for longer-lived objects.
+    ContainerType& container() {
+        return container_;
+    }
+    const ContainerType& container() const {
+        return container_;
+    }
+
+    // Support operator-> to get to the container. This allows nicer syntax like:
+    //   StackContainer<...> foo;
+    //   std::sort(foo->begin(), foo->end());
+    ContainerType* operator->() {
+        return &container_;
+    }
+    const ContainerType* operator->() const {
+        return &container_;
+    }
+
+    // Retrieves the stack source so that that unit tests can verify that the
+    // buffer is being used properly.
+    const typename Allocator::Source& stack_data() const {
+        return stack_data_;
+    }
+
+  protected:
+    typename Allocator::Source stack_data_;
+    Allocator allocator_;
+    ContainerType container_;
+
+  private:
+    StackContainer(const StackContainer& rhs) = delete;
+    StackContainer& operator=(const StackContainer& rhs) = delete;
+    StackContainer(StackContainer&& rhs) = delete;
+    StackContainer& operator=(StackContainer&& rhs) = delete;
+};
+
+// Range-based iteration support for StackContainer.
+template <typename TContainerType, size_t stack_capacity>
+auto begin(const StackContainer<TContainerType, stack_capacity>& stack_container)
+    -> decltype(begin(stack_container.container())) {
+    return begin(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto begin(StackContainer<TContainerType, stack_capacity>& stack_container)
+    -> decltype(begin(stack_container.container())) {
+    return begin(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto end(StackContainer<TContainerType, stack_capacity>& stack_container)
+    -> decltype(end(stack_container.container())) {
+    return end(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto end(const StackContainer<TContainerType, stack_capacity>& stack_container)
+    -> decltype(end(stack_container.container())) {
+    return end(stack_container.container());
+}
+
+// StackVector -----------------------------------------------------------------
+
+// Example:
+//   StackVector<int, 16> foo;
+//   foo->push_back(22);  // we have overloaded operator->
+//   foo[0] = 10;         // as well as operator[]
+template <typename T, size_t stack_capacity>
+class StackVector
+    : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
+  public:
+    StackVector()
+        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
+    }
+
+    // We need to put this in STL containers sometimes, which requires a copy
+    // constructor. We can't call the regular copy constructor because that will
+    // take the stack buffer from the original. Here, we create an empty object
+    // and make a stack buffer of its own.
+    StackVector(const StackVector<T, stack_capacity>& other)
+        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
+        this->container().assign(other->begin(), other->end());
+    }
+
+    StackVector<T, stack_capacity>& operator=(const StackVector<T, stack_capacity>& other) {
+        this->container().assign(other->begin(), other->end());
+        return *this;
+    }
+
+    // Vectors are commonly indexed, which isn't very convenient even with
+    // operator-> (using "->at()" does exception stuff we don't want).
+    T& operator[](size_t i) {
+        return this->container().operator[](i);
+    }
+    const T& operator[](size_t i) const {
+        return this->container().operator[](i);
+    }
+
+  private:
+    // StackVector(const StackVector& rhs) = delete;
+    // StackVector& operator=(const StackVector& rhs) = delete;
+    StackVector(StackVector&& rhs) = delete;
+    StackVector& operator=(StackVector&& rhs) = delete;
+};
+
+#endif  // COMMON_STACKCONTAINER_H_
diff --git a/src/dawn/common/SwapChainUtils.h b/src/dawn/common/SwapChainUtils.h
new file mode 100644
index 0000000..c1ad5f2
--- /dev/null
+++ b/src/dawn/common/SwapChainUtils.h
@@ -0,0 +1,40 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SWAPCHAINUTILS_H_
+#define COMMON_SWAPCHAINUTILS_H_
+
+#include "dawn/dawn_wsi.h"
+
+template <typename T>
+DawnSwapChainImplementation CreateSwapChainImplementation(T* swapChain) {
+    DawnSwapChainImplementation impl = {};
+    impl.userData = swapChain;
+    impl.Init = [](void* userData, void* wsiContext) {
+        auto* ctx = static_cast<typename T::WSIContext*>(wsiContext);
+        reinterpret_cast<T*>(userData)->Init(ctx);
+    };
+    impl.Destroy = [](void* userData) { delete reinterpret_cast<T*>(userData); };
+    impl.Configure = [](void* userData, WGPUTextureFormat format, WGPUTextureUsage allowedUsage,
+                        uint32_t width, uint32_t height) {
+        return static_cast<T*>(userData)->Configure(format, allowedUsage, width, height);
+    };
+    impl.GetNextTexture = [](void* userData, DawnSwapChainNextTexture* nextTexture) {
+        return static_cast<T*>(userData)->GetNextTexture(nextTexture);
+    };
+    impl.Present = [](void* userData) { return static_cast<T*>(userData)->Present(); };
+    return impl;
+}
+
+#endif  // COMMON_SWAPCHAINUTILS_H_
diff --git a/src/dawn/common/SystemUtils.cpp b/src/dawn/common/SystemUtils.cpp
new file mode 100644
index 0000000..a5ce0f1
--- /dev/null
+++ b/src/dawn/common/SystemUtils.cpp
@@ -0,0 +1,229 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/SystemUtils.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    include <Windows.h>
+#    include <vector>
+#elif defined(DAWN_PLATFORM_LINUX)
+#    include <dlfcn.h>
+#    include <limits.h>
+#    include <unistd.h>
+#    include <cstdlib>
+#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+#    include <dlfcn.h>
+#    include <mach-o/dyld.h>
+#    include <vector>
+#endif
+
+#include <array>
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+const char* GetPathSeparator() {
+    return "\\";
+}
+
+std::pair<std::string, bool> GetEnvironmentVar(const char* variableName) {
+    // First pass a size of 0 to get the size of variable value.
+    DWORD sizeWithNullTerminator = GetEnvironmentVariableA(variableName, nullptr, 0);
+    if (sizeWithNullTerminator == 0) {
+        DWORD err = GetLastError();
+        if (err != ERROR_ENVVAR_NOT_FOUND) {
+            dawn::WarningLog() << "GetEnvironmentVariableA failed with code " << err;
+        }
+        return std::make_pair(std::string(), false);
+    }
+
+    // Then get variable value with its actual size.
+    std::vector<char> buffer(sizeWithNullTerminator);
+    DWORD sizeStored =
+        GetEnvironmentVariableA(variableName, buffer.data(), static_cast<DWORD>(buffer.size()));
+    if (sizeStored + 1 != sizeWithNullTerminator) {
+        DWORD err = GetLastError();
+        if (err) {
+            dawn::WarningLog() << "GetEnvironmentVariableA failed with code " << err;
+        }
+        return std::make_pair(std::string(), false);
+    }
+    return std::make_pair(std::string(buffer.data(), sizeStored), true);
+}
+
+bool SetEnvironmentVar(const char* variableName, const char* value) {
+    return SetEnvironmentVariableA(variableName, value) == TRUE;
+}
+#elif defined(DAWN_PLATFORM_POSIX)
+const char* GetPathSeparator() {
+    return "/";
+}
+
+std::pair<std::string, bool> GetEnvironmentVar(const char* variableName) {
+    char* value = getenv(variableName);
+    return value == nullptr ? std::make_pair(std::string(), false)
+                            : std::make_pair(std::string(value), true);
+}
+
+bool SetEnvironmentVar(const char* variableName, const char* value) {
+    if (value == nullptr) {
+        return unsetenv(variableName) == 0;
+    }
+    return setenv(variableName, value, 1) == 0;
+}
+#else
+#    error "Implement Get/SetEnvironmentVar for your platform."
+#endif
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+std::optional<std::string> GetHModulePath(HMODULE module) {
+    std::array<char, MAX_PATH> executableFileBuf;
+    DWORD executablePathLen = GetModuleFileNameA(nullptr, executableFileBuf.data(),
+                                                 static_cast<DWORD>(executableFileBuf.size()));
+    if (executablePathLen == 0) {
+        return {};
+    }
+    return executableFileBuf.data();
+}
+std::optional<std::string> GetExecutablePath() {
+    return GetHModulePath(nullptr);
+}
+#elif defined(DAWN_PLATFORM_LINUX)
+std::optional<std::string> GetExecutablePath() {
+    std::array<char, PATH_MAX> path;
+    ssize_t result = readlink("/proc/self/exe", path.data(), PATH_MAX - 1);
+    if (result < 0 || static_cast<size_t>(result) >= PATH_MAX - 1) {
+        return {};
+    }
+
+    path[result] = '\0';
+    return path.data();
+}
+#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+std::optional<std::string> GetExecutablePath() {
+    uint32_t size = 0;
+    _NSGetExecutablePath(nullptr, &size);
+
+    std::vector<char> buffer(size + 1);
+    if (_NSGetExecutablePath(buffer.data(), &size) != 0) {
+        return {};
+    }
+
+    buffer[size] = '\0';
+    return buffer.data();
+}
+#elif defined(DAWN_PLATFORM_FUCHSIA)
+std::optional<std::string> GetExecutablePath() {
+    // TODO: Implement on Fuchsia
+    return {};
+}
+#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
+std::optional<std::string> GetExecutablePath() {
+    return {};
+}
+#else
+#    error "Implement GetExecutablePath for your platform."
+#endif
+
+std::optional<std::string> GetExecutableDirectory() {
+    std::optional<std::string> exePath = GetExecutablePath();
+    if (!exePath) {
+        return {};
+    }
+    size_t lastPathSepLoc = exePath->find_last_of(GetPathSeparator());
+    if (lastPathSepLoc == std::string::npos) {
+        return {};
+    }
+    return exePath->substr(0, lastPathSepLoc + 1);
+}
+
+#if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+std::optional<std::string> GetModulePath() {
+    static int placeholderSymbol = 0;
+    Dl_info dlInfo;
+    if (dladdr(&placeholderSymbol, &dlInfo) == 0) {
+        return {};
+    }
+
+    std::array<char, PATH_MAX> absolutePath;
+    if (realpath(dlInfo.dli_fname, absolutePath.data()) == NULL) {
+        return {};
+    }
+    return absolutePath.data();
+}
+#elif defined(DAWN_PLATFORM_WINDOWS)
+std::optional<std::string> GetModulePath() {
+    static int placeholderSymbol = 0;
+    HMODULE module = nullptr;
+// GetModuleHandleEx is unavailable on UWP
+#    if defined(DAWN_IS_WINUWP)
+    return {};
+#    else
+    if (!GetModuleHandleExA(
+            GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+            reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) {
+        return {};
+    }
+#    endif
+    return GetHModulePath(module);
+}
+#elif defined(DAWN_PLATFORM_FUCHSIA)
+std::optional<std::string> GetModulePath() {
+    return {};
+}
+#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
+std::optional<std::string> GetModulePath() {
+    return {};
+}
+#else
+#    error "Implement GetModulePath for your platform."
+#endif
+
+std::optional<std::string> GetModuleDirectory() {
+    std::optional<std::string> modPath = GetModulePath();
+    if (!modPath) {
+        return {};
+    }
+    size_t lastPathSepLoc = modPath->find_last_of(GetPathSeparator());
+    if (lastPathSepLoc == std::string::npos) {
+        return {};
+    }
+    return modPath->substr(0, lastPathSepLoc + 1);
+}
+
+// ScopedEnvironmentVar
+
+ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
+    : mName(variableName),
+      mOriginalValue(GetEnvironmentVar(variableName)),
+      mIsSet(SetEnvironmentVar(variableName, value)) {
+}
+
+ScopedEnvironmentVar::~ScopedEnvironmentVar() {
+    if (mIsSet) {
+        bool success = SetEnvironmentVar(
+            mName.c_str(), mOriginalValue.second ? mOriginalValue.first.c_str() : nullptr);
+        // If we set the environment variable in the constructor, we should never fail restoring it.
+        ASSERT(success);
+    }
+}
+
+bool ScopedEnvironmentVar::Set(const char* variableName, const char* value) {
+    ASSERT(!mIsSet);
+    mName = variableName;
+    mOriginalValue = GetEnvironmentVar(variableName);
+    mIsSet = SetEnvironmentVar(variableName, value);
+    return mIsSet;
+}
diff --git a/src/dawn/common/SystemUtils.h b/src/dawn/common/SystemUtils.h
new file mode 100644
index 0000000..bb59966
--- /dev/null
+++ b/src/dawn/common/SystemUtils.h
@@ -0,0 +1,57 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SYSTEMUTILS_H_
+#define COMMON_SYSTEMUTILS_H_
+
+#include "dawn/common/Platform.h"
+
+#include <optional>
+#include <string>
+
+const char* GetPathSeparator();
+// Returns a pair of the environment variable's value, and a boolean indicating whether the variable
+// was present.
+std::pair<std::string, bool> GetEnvironmentVar(const char* variableName);
+bool SetEnvironmentVar(const char* variableName, const char* value);
+// Directories are always returned with a trailing path separator.
+// May return std::nullopt if the path is too long, there is no current
+// module (statically linked into executable), or the function is not
+// implemented on the platform.
+std::optional<std::string> GetExecutableDirectory();
+std::optional<std::string> GetModuleDirectory();
+
+#ifdef DAWN_PLATFORM_MACOS
+void GetMacOSVersion(int32_t* majorVersion, int32_t* minorVersion = nullptr);
+bool IsMacOSVersionAtLeast(uint32_t majorVersion, uint32_t minorVersion = 0);
+#endif
+
+class ScopedEnvironmentVar {
+  public:
+    ScopedEnvironmentVar() = default;
+    ScopedEnvironmentVar(const char* variableName, const char* value);
+    ~ScopedEnvironmentVar();
+
+    ScopedEnvironmentVar(const ScopedEnvironmentVar& rhs) = delete;
+    ScopedEnvironmentVar& operator=(const ScopedEnvironmentVar& rhs) = delete;
+
+    bool Set(const char* variableName, const char* value);
+
+  private:
+    std::string mName;
+    std::pair<std::string, bool> mOriginalValue;
+    bool mIsSet = false;
+};
+
+#endif  // COMMON_SYSTEMUTILS_H_
diff --git a/src/dawn/common/SystemUtils_mac.mm b/src/dawn/common/SystemUtils_mac.mm
new file mode 100644
index 0000000..b706c20
--- /dev/null
+++ b/src/dawn/common/SystemUtils_mac.mm
@@ -0,0 +1,33 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/SystemUtils.h"
+
+#include "dawn/common/Assert.h"
+
+#import <Foundation/NSProcessInfo.h>
+
+void GetMacOSVersion(int32_t* majorVersion, int32_t* minorVersion) {
+    NSOperatingSystemVersion version = [[NSProcessInfo processInfo] operatingSystemVersion];
+    ASSERT(majorVersion != nullptr);
+    *majorVersion = version.majorVersion;
+    if (minorVersion != nullptr) {
+        *minorVersion = version.minorVersion;
+    }
+}
+
+bool IsMacOSVersionAtLeast(uint32_t majorVersion, uint32_t minorVersion) {
+    return
+        [NSProcessInfo.processInfo isOperatingSystemAtLeastVersion:{majorVersion, minorVersion, 0}];
+}
diff --git a/src/dawn/common/TypeTraits.h b/src/dawn/common/TypeTraits.h
new file mode 100644
index 0000000..3348b89
--- /dev/null
+++ b/src/dawn/common/TypeTraits.h
@@ -0,0 +1,34 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_TYPETRAITS_H_
+#define COMMON_TYPETRAITS_H_
+
+#include <type_traits>
+
+template <typename LHS, typename RHS = LHS, typename T = void>
+struct HasEqualityOperator {
+    static constexpr const bool value = false;
+};
+
+template <typename LHS, typename RHS>
+struct HasEqualityOperator<
+    LHS,
+    RHS,
+    std::enable_if_t<
+        std::is_same<decltype(std::declval<LHS>() == std::declval<RHS>()), bool>::value>> {
+    static constexpr const bool value = true;
+};
+
+#endif  // COMMON_TYPE_TRAITS_H_
diff --git a/src/dawn/common/TypedInteger.h b/src/dawn/common/TypedInteger.h
new file mode 100644
index 0000000..6669d14
--- /dev/null
+++ b/src/dawn/common/TypedInteger.h
@@ -0,0 +1,262 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_TYPEDINTEGER_H_
+#define COMMON_TYPEDINTEGER_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <limits>
+#include <type_traits>
+
+// TypedInteger is helper class that provides additional type safety in Debug.
+//  - Integers of different (Tag, BaseIntegerType) may not be used interoperably
+//  - Allows casts only to the underlying type.
+//  - Integers of the same (Tag, BaseIntegerType) may be compared or assigned.
+// This class helps ensure that the many types of indices in Dawn aren't mixed up and used
+// interchangably.
+// In Release builds, when DAWN_ENABLE_ASSERTS is not defined, TypedInteger is a passthrough
+// typedef of the underlying type.
+//
+// Example:
+//     using UintA = TypedInteger<struct TypeA, uint32_t>;
+//     using UintB = TypedInteger<struct TypeB, uint32_t>;
+//
+//  in Release:
+//     using UintA = uint32_t;
+//     using UintB = uint32_t;
+//
+//  in Debug:
+//     using UintA = detail::TypedIntegerImpl<struct TypeA, uint32_t>;
+//     using UintB = detail::TypedIntegerImpl<struct TypeB, uint32_t>;
+//
+//     Assignment, construction, comparison, and arithmetic with TypedIntegerImpl are allowed
+//     only for typed integers of exactly the same type. Further, they must be
+//     created / cast explicitly; there is no implicit conversion.
+//
+//     UintA a(2);
+//     uint32_t aValue = static_cast<uint32_t>(a);
+//
+namespace detail {
+    template <typename Tag, typename T>
+    class TypedIntegerImpl;
+}  // namespace detail
+
+template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
+#if defined(DAWN_ENABLE_ASSERTS)
+using TypedInteger = detail::TypedIntegerImpl<Tag, T>;
+#else
+using TypedInteger = T;
+#endif
+
+namespace detail {
+    template <typename Tag, typename T>
+    class alignas(T) TypedIntegerImpl {
+        static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
+        T mValue;
+
+      public:
+        constexpr TypedIntegerImpl() : mValue(0) {
+            static_assert(alignof(TypedIntegerImpl) == alignof(T));
+            static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
+        }
+
+        // Construction from non-narrowing integral types.
+        template <typename I,
+                  typename = std::enable_if_t<
+                      std::is_integral<I>::value &&
+                      std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
+                      std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
+        explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
+        }
+
+        // Allow explicit casts only to the underlying type. If you're casting out of an
+        // TypedInteger, you should know what what you're doing, and exactly what type you
+        // expect.
+        explicit constexpr operator T() const {
+            return static_cast<T>(this->mValue);
+        }
+
+// Same-tag TypedInteger comparison operators
+#define TYPED_COMPARISON(op)                                        \
+    constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
+        return mValue op rhs.mValue;                                \
+    }
+        TYPED_COMPARISON(<)
+        TYPED_COMPARISON(<=)
+        TYPED_COMPARISON(>)
+        TYPED_COMPARISON(>=)
+        TYPED_COMPARISON(==)
+        TYPED_COMPARISON(!=)
+#undef TYPED_COMPARISON
+
+        // Increment / decrement operators for for-loop iteration
+        constexpr TypedIntegerImpl& operator++() {
+            ASSERT(this->mValue < std::numeric_limits<T>::max());
+            ++this->mValue;
+            return *this;
+        }
+
+        constexpr TypedIntegerImpl operator++(int) {
+            TypedIntegerImpl ret = *this;
+
+            ASSERT(this->mValue < std::numeric_limits<T>::max());
+            ++this->mValue;
+            return ret;
+        }
+
+        constexpr TypedIntegerImpl& operator--() {
+            assert(this->mValue > std::numeric_limits<T>::min());
+            --this->mValue;
+            return *this;
+        }
+
+        constexpr TypedIntegerImpl operator--(int) {
+            TypedIntegerImpl ret = *this;
+
+            ASSERT(this->mValue > std::numeric_limits<T>::min());
+            --this->mValue;
+            return ret;
+        }
+
+        template <typename T2 = T>
+        static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
+        AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
+            static_assert(std::is_same<T, T2>::value);
+
+            // Overflow would wrap around
+            ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
+            return lhs.mValue + rhs.mValue;
+        }
+
+        template <typename T2 = T>
+        static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
+        AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
+            static_assert(std::is_same<T, T2>::value);
+
+            if (lhs.mValue > 0) {
+                // rhs is positive: |rhs| is at most the distance between max and |lhs|.
+                // rhs is negative: (positive + negative) won't overflow
+                ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
+            } else {
+                // rhs is postive: (negative + positive) won't underflow
+                // rhs is negative: |rhs| isn't less than the (negative) distance between min
+                // and |lhs|
+                ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
+            }
+            return lhs.mValue + rhs.mValue;
+        }
+
+        template <typename T2 = T>
+        static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
+        SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
+            static_assert(std::is_same<T, T2>::value);
+
+            // Overflow would wrap around
+            ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
+            return lhs.mValue - rhs.mValue;
+        }
+
+        template <typename T2 = T>
+        static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
+            TypedIntegerImpl<Tag, T> lhs,
+            TypedIntegerImpl<Tag, T2> rhs) {
+            static_assert(std::is_same<T, T2>::value);
+
+            if (lhs.mValue > 0) {
+                // rhs is positive: positive minus positive won't overflow
+                // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
+                // and max.
+                ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
+            } else {
+                // rhs is positive: |rhs| is at most the distance between min and |lhs|
+                // rhs is negative: negative minus negative won't overflow
+                ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
+            }
+            return lhs.mValue - rhs.mValue;
+        }
+
+        template <typename T2 = T>
+        constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
+            static_assert(std::is_same<T, T2>::value);
+            // The negation of the most negative value cannot be represented.
+            ASSERT(this->mValue != std::numeric_limits<T>::min());
+            return TypedIntegerImpl(-this->mValue);
+        }
+
+        constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
+            auto result = AddImpl(*this, rhs);
+            static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
+            return TypedIntegerImpl(result);
+        }
+
+        constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
+            auto result = SubImpl(*this, rhs);
+            static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
+            return TypedIntegerImpl(result);
+        }
+    };
+
+}  // namespace detail
+
+namespace std {
+
+    template <typename Tag, typename T>
+    class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
+      public:
+        static detail::TypedIntegerImpl<Tag, T> max() noexcept {
+            return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
+        }
+        static detail::TypedIntegerImpl<Tag, T> min() noexcept {
+            return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
+        }
+    };
+
+}  // namespace std
+
+namespace ityp {
+
+    // These helpers below are provided since the default arithmetic operators for small integer
+    // types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
+    // casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
+    // ityp::Sub(a, b) instead.
+
+    template <typename Tag, typename T>
+    constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
+                                                     ::detail::TypedIntegerImpl<Tag, T> rhs) {
+        return ::detail::TypedIntegerImpl<Tag, T>(
+            static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
+    }
+
+    template <typename Tag, typename T>
+    constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
+                                                     ::detail::TypedIntegerImpl<Tag, T> rhs) {
+        return ::detail::TypedIntegerImpl<Tag, T>(
+            static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
+    }
+
+    template <typename T>
+    constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
+        return static_cast<T>(lhs + rhs);
+    }
+
+    template <typename T>
+    constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
+        return static_cast<T>(lhs - rhs);
+    }
+
+}  // namespace ityp
+
+#endif  // COMMON_TYPEDINTEGER_H_
diff --git a/src/dawn/common/UnderlyingType.h b/src/dawn/common/UnderlyingType.h
new file mode 100644
index 0000000..09c72c0
--- /dev/null
+++ b/src/dawn/common/UnderlyingType.h
@@ -0,0 +1,51 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_UNDERLYINGTYPE_H_
+#define COMMON_UNDERLYINGTYPE_H_
+
+#include <type_traits>
+
+// UnderlyingType is similar to std::underlying_type_t. It is a passthrough for already
+// integer types which simplifies getting the underlying primitive type for an arbitrary
+// template parameter. It includes a specialization for detail::TypedIntegerImpl which yields
+// the wrapped integer type.
+namespace detail {
+    template <typename T, typename Enable = void>
+    struct UnderlyingTypeImpl;
+
+    template <typename I>
+    struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
+        using type = I;
+    };
+
+    template <typename E>
+    struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
+        using type = std::underlying_type_t<E>;
+    };
+
+    // Forward declare the TypedInteger impl.
+    template <typename Tag, typename T>
+    class TypedIntegerImpl;
+
+    template <typename Tag, typename I>
+    struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
+        using type = typename UnderlyingTypeImpl<I>::type;
+    };
+}  // namespace detail
+
+template <typename T>
+using UnderlyingType = typename detail::UnderlyingTypeImpl<T>::type;
+
+#endif  // COMMON_UNDERLYINGTYPE_H_
diff --git a/src/dawn/common/WindowsUtils.cpp b/src/dawn/common/WindowsUtils.cpp
new file mode 100644
index 0000000..fd924f4
--- /dev/null
+++ b/src/dawn/common/WindowsUtils.cpp
@@ -0,0 +1,43 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/WindowsUtils.h"
+
+#include "dawn/common/windows_with_undefs.h"
+
+#include <memory>
+
+std::string WCharToUTF8(const wchar_t* input) {
+    // The -1 argument asks WideCharToMultiByte to use the null terminator to know the size of
+    // input. It will return a size that includes the null terminator.
+    int requiredSize = WideCharToMultiByte(CP_UTF8, 0, input, -1, nullptr, 0, nullptr, nullptr);
+
+    std::string result;
+    result.resize(requiredSize - 1);
+    WideCharToMultiByte(CP_UTF8, 0, input, -1, result.data(), requiredSize, nullptr, nullptr);
+
+    return result;
+}
+
+std::wstring UTF8ToWStr(const char* input) {
+    // The -1 argument asks MultiByteToWideChar to use the null terminator to know the size of
+    // input. It will return a size that includes the null terminator.
+    int requiredSize = MultiByteToWideChar(CP_UTF8, 0, input, -1, nullptr, 0);
+
+    std::wstring result;
+    result.resize(requiredSize - 1);
+    MultiByteToWideChar(CP_UTF8, 0, input, -1, result.data(), requiredSize);
+
+    return result;
+}
diff --git a/src/dawn/common/WindowsUtils.h b/src/dawn/common/WindowsUtils.h
new file mode 100644
index 0000000..3ab916b
--- /dev/null
+++ b/src/dawn/common/WindowsUtils.h
@@ -0,0 +1,24 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_WINDOWSUTILS_H_
+#define COMMON_WINDOWSUTILS_H_
+
+#include <string>
+
+std::string WCharToUTF8(const wchar_t* input);
+
+std::wstring UTF8ToWStr(const char* input);
+
+#endif  // COMMON_WINDOWSUTILS_H_
diff --git a/src/dawn/common/ityp_array.h b/src/dawn/common/ityp_array.h
new file mode 100644
index 0000000..c7db71a
--- /dev/null
+++ b/src/dawn/common/ityp_array.h
@@ -0,0 +1,98 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_ARRAY_H_
+#define COMMON_ITYP_ARRAY_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <array>
+#include <cstddef>
+#include <type_traits>
+
+namespace ityp {
+
+    // ityp::array is a helper class that wraps std::array with the restriction that
+    // indices must be a particular type |Index|. Dawn uses multiple flat maps of
+    // index-->data, and this class helps ensure an indices cannot be passed interchangably
+    // to a flat map of a different type.
+    template <typename Index, typename Value, size_t Size>
+    class array : private std::array<Value, Size> {
+        using I = UnderlyingType<Index>;
+        using Base = std::array<Value, Size>;
+
+        static_assert(Size <= std::numeric_limits<I>::max());
+
+      public:
+        constexpr array() = default;
+
+        template <typename... Values>
+        constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
+        }
+
+        Value& operator[](Index i) {
+            I index = static_cast<I>(i);
+            ASSERT(index >= 0 && index < I(Size));
+            return Base::operator[](index);
+        }
+
+        constexpr const Value& operator[](Index i) const {
+            I index = static_cast<I>(i);
+            ASSERT(index >= 0 && index < I(Size));
+            return Base::operator[](index);
+        }
+
+        Value& at(Index i) {
+            I index = static_cast<I>(i);
+            ASSERT(index >= 0 && index < I(Size));
+            return Base::at(index);
+        }
+
+        constexpr const Value& at(Index i) const {
+            I index = static_cast<I>(i);
+            ASSERT(index >= 0 && index < I(Size));
+            return Base::at(index);
+        }
+
+        typename Base::iterator begin() noexcept {
+            return Base::begin();
+        }
+
+        typename Base::const_iterator begin() const noexcept {
+            return Base::begin();
+        }
+
+        typename Base::iterator end() noexcept {
+            return Base::end();
+        }
+
+        typename Base::const_iterator end() const noexcept {
+            return Base::end();
+        }
+
+        constexpr Index size() const {
+            return Index(I(Size));
+        }
+
+        using Base::back;
+        using Base::data;
+        using Base::empty;
+        using Base::fill;
+        using Base::front;
+    };
+
+}  // namespace ityp
+
+#endif  // COMMON_ITYP_ARRAY_H_
diff --git a/src/dawn/common/ityp_bitset.h b/src/dawn/common/ityp_bitset.h
new file mode 100644
index 0000000..9c27cfe
--- /dev/null
+++ b/src/dawn/common/ityp_bitset.h
@@ -0,0 +1,188 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_BITSET_H_
+#define COMMON_ITYP_BITSET_H_
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+namespace ityp {
+
+    // ityp::bitset is a helper class that wraps std::bitset with the restriction that
+    // indices must be a particular type |Index|.
+    template <typename Index, size_t N>
+    class bitset : private std::bitset<N> {
+        using I = UnderlyingType<Index>;
+        using Base = std::bitset<N>;
+
+        static_assert(sizeof(I) <= sizeof(size_t));
+
+        constexpr bitset(const Base& rhs) : Base(rhs) {
+        }
+
+      public:
+        using reference = typename Base::reference;
+
+        constexpr bitset() noexcept : Base() {
+        }
+
+        constexpr bitset(unsigned long long value) noexcept : Base(value) {
+        }
+
+        constexpr bool operator[](Index i) const {
+            return Base::operator[](static_cast<I>(i));
+        }
+
+        typename Base::reference operator[](Index i) {
+            return Base::operator[](static_cast<I>(i));
+        }
+
+        bool test(Index i) const {
+            return Base::test(static_cast<I>(i));
+        }
+
+        using Base::all;
+        using Base::any;
+        using Base::count;
+        using Base::none;
+        using Base::size;
+
+        bool operator==(const bitset& other) const noexcept {
+            return Base::operator==(static_cast<const Base&>(other));
+        }
+
+        bool operator!=(const bitset& other) const noexcept {
+            return Base::operator!=(static_cast<const Base&>(other));
+        }
+
+        bitset& operator&=(const bitset& other) noexcept {
+            return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
+        }
+
+        bitset& operator|=(const bitset& other) noexcept {
+            return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
+        }
+
+        bitset& operator^=(const bitset& other) noexcept {
+            return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
+        }
+
+        bitset operator~() const noexcept {
+            return bitset(*this).flip();
+        }
+
+        bitset& set() noexcept {
+            return static_cast<bitset&>(Base::set());
+        }
+
+        bitset& set(Index i, bool value = true) {
+            return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
+        }
+
+        bitset& reset() noexcept {
+            return static_cast<bitset&>(Base::reset());
+        }
+
+        bitset& reset(Index i) {
+            return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
+        }
+
+        bitset& flip() noexcept {
+            return static_cast<bitset&>(Base::flip());
+        }
+
+        bitset& flip(Index i) {
+            return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
+        }
+
+        using Base::to_string;
+        using Base::to_ullong;
+        using Base::to_ulong;
+
+        friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
+            return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
+        }
+
+        friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
+            return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
+        }
+
+        friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
+            return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
+        }
+
+        friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
+            return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
+        }
+
+        friend struct std::hash<bitset>;
+    };
+
+}  // namespace ityp
+
+// Assume we have bitset of at most 64 bits
+// Returns i which is the next integer of the index of the highest bit
+// i == 0 if there is no bit set to true
+// i == 1 if only the least significant bit (at index 0) is the bit set to true with the
+// highest index
+// ...
+// i == 64 if the most significant bit (at index 64) is the bit set to true with the highest
+// index
+template <typename Index, size_t N>
+Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
+    using I = UnderlyingType<Index>;
+#if defined(DAWN_COMPILER_MSVC)
+    if constexpr (N > 32) {
+#    if defined(DAWN_PLATFORM_64_BIT)
+        unsigned long firstBitIndex = 0ul;
+        unsigned char ret = _BitScanReverse64(&firstBitIndex, bitset.to_ullong());
+        if (ret == 0) {
+            return Index(static_cast<I>(0));
+        }
+        return Index(static_cast<I>(firstBitIndex + 1));
+#    else   // defined(DAWN_PLATFORM_64_BIT)
+        if (bitset.none()) {
+            return Index(static_cast<I>(0));
+        }
+        for (size_t i = 0u; i < N; i++) {
+            if (bitset.test(Index(static_cast<I>(N - 1 - i)))) {
+                return Index(static_cast<I>(N - i));
+            }
+        }
+        UNREACHABLE();
+#    endif  // defined(DAWN_PLATFORM_64_BIT)
+    } else {
+        unsigned long firstBitIndex = 0ul;
+        unsigned char ret = _BitScanReverse(&firstBitIndex, bitset.to_ulong());
+        if (ret == 0) {
+            return Index(static_cast<I>(0));
+        }
+        return Index(static_cast<I>(firstBitIndex + 1));
+    }
+#else   // defined(DAWN_COMPILER_MSVC)
+    if (bitset.none()) {
+        return Index(static_cast<I>(0));
+    }
+    if constexpr (N > 32) {
+        return Index(
+            static_cast<I>(64 - static_cast<uint32_t>(__builtin_clzll(bitset.to_ullong()))));
+    } else {
+        return Index(static_cast<I>(32 - static_cast<uint32_t>(__builtin_clz(bitset.to_ulong()))));
+    }
+#endif  // defined(DAWN_COMPILER_MSVC)
+}
+
+#endif  // COMMON_ITYP_BITSET_H_
diff --git a/src/dawn/common/ityp_span.h b/src/dawn/common/ityp_span.h
new file mode 100644
index 0000000..c73f983
--- /dev/null
+++ b/src/dawn/common/ityp_span.h
@@ -0,0 +1,103 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_SPAN_H_
+#define COMMON_ITYP_SPAN_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <type_traits>
+
+namespace ityp {
+
+    // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
+    // It stores the size and pointer to first element. It has the restriction that
+    // indices must be a particular type |Index|. This provides a type-safe way to index
+    // raw pointers.
+    template <typename Index, typename Value>
+    class span {
+        using I = UnderlyingType<Index>;
+
+      public:
+        constexpr span() : mData(nullptr), mSize(0) {
+        }
+        constexpr span(Value* data, Index size) : mData(data), mSize(size) {
+        }
+
+        constexpr Value& operator[](Index i) const {
+            ASSERT(i < mSize);
+            return mData[static_cast<I>(i)];
+        }
+
+        Value* data() noexcept {
+            return mData;
+        }
+
+        const Value* data() const noexcept {
+            return mData;
+        }
+
+        Value* begin() noexcept {
+            return mData;
+        }
+
+        const Value* begin() const noexcept {
+            return mData;
+        }
+
+        Value* end() noexcept {
+            return mData + static_cast<I>(mSize);
+        }
+
+        const Value* end() const noexcept {
+            return mData + static_cast<I>(mSize);
+        }
+
+        Value& front() {
+            ASSERT(mData != nullptr);
+            ASSERT(static_cast<I>(mSize) >= 0);
+            return *mData;
+        }
+
+        const Value& front() const {
+            ASSERT(mData != nullptr);
+            ASSERT(static_cast<I>(mSize) >= 0);
+            return *mData;
+        }
+
+        Value& back() {
+            ASSERT(mData != nullptr);
+            ASSERT(static_cast<I>(mSize) >= 0);
+            return *(mData + static_cast<I>(mSize) - 1);
+        }
+
+        const Value& back() const {
+            ASSERT(mData != nullptr);
+            ASSERT(static_cast<I>(mSize) >= 0);
+            return *(mData + static_cast<I>(mSize) - 1);
+        }
+
+        Index size() const {
+            return mSize;
+        }
+
+      private:
+        Value* mData;
+        Index mSize;
+    };
+
+}  // namespace ityp
+
+#endif  // COMMON_ITYP_SPAN_H_
diff --git a/src/dawn/common/ityp_stack_vec.h b/src/dawn/common/ityp_stack_vec.h
new file mode 100644
index 0000000..47c437e
--- /dev/null
+++ b/src/dawn/common/ityp_stack_vec.h
@@ -0,0 +1,103 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_STACK_VEC_H_
+#define COMMON_ITYP_STACK_VEC_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/StackContainer.h"
+#include "dawn/common/UnderlyingType.h"
+
+namespace ityp {
+
+    template <typename Index, typename Value, size_t StaticCapacity>
+    class stack_vec : private StackVector<Value, StaticCapacity> {
+        using I = UnderlyingType<Index>;
+        using Base = StackVector<Value, StaticCapacity>;
+        using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
+        static_assert(StaticCapacity <= std::numeric_limits<I>::max());
+
+      public:
+        stack_vec() : Base() {
+        }
+        stack_vec(Index size) : Base() {
+            this->container().resize(static_cast<I>(size));
+        }
+
+        Value& operator[](Index i) {
+            ASSERT(i < size());
+            return Base::operator[](static_cast<I>(i));
+        }
+
+        constexpr const Value& operator[](Index i) const {
+            ASSERT(i < size());
+            return Base::operator[](static_cast<I>(i));
+        }
+
+        void resize(Index size) {
+            this->container().resize(static_cast<I>(size));
+        }
+
+        void reserve(Index size) {
+            this->container().reserve(static_cast<I>(size));
+        }
+
+        Value* data() {
+            return this->container().data();
+        }
+
+        const Value* data() const {
+            return this->container().data();
+        }
+
+        typename VectorBase::iterator begin() noexcept {
+            return this->container().begin();
+        }
+
+        typename VectorBase::const_iterator begin() const noexcept {
+            return this->container().begin();
+        }
+
+        typename VectorBase::iterator end() noexcept {
+            return this->container().end();
+        }
+
+        typename VectorBase::const_iterator end() const noexcept {
+            return this->container().end();
+        }
+
+        typename VectorBase::reference front() {
+            return this->container().front();
+        }
+
+        typename VectorBase::const_reference front() const {
+            return this->container().front();
+        }
+
+        typename VectorBase::reference back() {
+            return this->container().back();
+        }
+
+        typename VectorBase::const_reference back() const {
+            return this->container().back();
+        }
+
+        Index size() const {
+            return Index(static_cast<I>(this->container().size()));
+        }
+    };
+
+}  // namespace ityp
+
+#endif  // COMMON_ITYP_STACK_VEC_H_
diff --git a/src/dawn/common/ityp_vector.h b/src/dawn/common/ityp_vector.h
new file mode 100644
index 0000000..9d83adf
--- /dev/null
+++ b/src/dawn/common/ityp_vector.h
@@ -0,0 +1,108 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_VECTOR_H_
+#define COMMON_ITYP_VECTOR_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <type_traits>
+#include <vector>
+
+namespace ityp {
+
+    // ityp::vector is a helper class that wraps std::vector with the restriction that
+    // indices must be a particular type |Index|.
+    template <typename Index, typename Value>
+    class vector : public std::vector<Value> {
+        using I = UnderlyingType<Index>;
+        using Base = std::vector<Value>;
+
+      private:
+        // Disallow access to base constructors and untyped index/size-related operators.
+        using Base::Base;
+        using Base::operator=;
+        using Base::operator[];
+        using Base::at;
+        using Base::reserve;
+        using Base::resize;
+        using Base::size;
+
+      public:
+        vector() : Base() {
+        }
+
+        explicit vector(Index size) : Base(static_cast<I>(size)) {
+        }
+
+        vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
+        }
+
+        vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
+        }
+
+        vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
+        }
+
+        vector(std::initializer_list<Value> init) : Base(init) {
+        }
+
+        vector& operator=(const vector& rhs) {
+            Base::operator=(static_cast<const Base&>(rhs));
+            return *this;
+        }
+
+        vector& operator=(vector&& rhs) noexcept {
+            Base::operator=(static_cast<Base&&>(rhs));
+            return *this;
+        }
+
+        Value& operator[](Index i) {
+            ASSERT(i >= Index(0) && i < size());
+            return Base::operator[](static_cast<I>(i));
+        }
+
+        constexpr const Value& operator[](Index i) const {
+            ASSERT(i >= Index(0) && i < size());
+            return Base::operator[](static_cast<I>(i));
+        }
+
+        Value& at(Index i) {
+            ASSERT(i >= Index(0) && i < size());
+            return Base::at(static_cast<I>(i));
+        }
+
+        constexpr const Value& at(Index i) const {
+            ASSERT(i >= Index(0) && i < size());
+            return Base::at(static_cast<I>(i));
+        }
+
+        constexpr Index size() const {
+            ASSERT(std::numeric_limits<I>::max() >= Base::size());
+            return Index(static_cast<I>(Base::size()));
+        }
+
+        void resize(Index size) {
+            Base::resize(static_cast<I>(size));
+        }
+
+        void reserve(Index size) {
+            Base::reserve(static_cast<I>(size));
+        }
+    };
+
+}  // namespace ityp
+
+#endif  // COMMON_ITYP_VECTOR_H_
diff --git a/src/dawn/common/vulkan_platform.h b/src/dawn/common/vulkan_platform.h
new file mode 100644
index 0000000..620034f
--- /dev/null
+++ b/src/dawn/common/vulkan_platform.h
@@ -0,0 +1,206 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_VULKANPLATFORM_H_
+#define COMMON_VULKANPLATFORM_H_
+
+#if !defined(DAWN_ENABLE_BACKEND_VULKAN)
+#    error "vulkan_platform.h included without the Vulkan backend enabled"
+#endif
+#if defined(VULKAN_CORE_H_)
+#    error "vulkan.h included before vulkan_platform.h"
+#endif
+
+#include "dawn/common/Platform.h"
+
+#include <cstddef>
+#include <cstdint>
+
+// vulkan.h defines non-dispatchable handles to opaque pointers on 64bit architectures and uint64_t
+// on 32bit architectures. This causes a problem in 32bit where the handles cannot be used to
+// distinguish between overloads of the same function.
+// Change the definition of non-dispatchable handles to be opaque structures containing a uint64_t
+// and overload the comparison operators between themselves and VK_NULL_HANDLE (which will be
+// redefined to be nullptr). This keeps the type-safety of having the handles be different types
+// (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
+
+#if defined(DAWN_PLATFORM_64_BIT)
+#    define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
+// This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
+// TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
+template <typename T>
+T NativeNonDispatachableHandleFromU64(uint64_t u64) {
+    return reinterpret_cast<T>(u64);
+}
+#elif defined(DAWN_PLATFORM_32_BIT)
+#    define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
+template <typename T>
+T NativeNonDispatachableHandleFromU64(uint64_t u64) {
+    return u64;
+}
+#else
+#    error "Unsupported platform"
+#endif
+
+// Define a dummy Vulkan handle for use before we include vulkan.h
+DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
+
+// Find out the alignment of native handles. Logically we would use alignof(VkSomeHandleNative) so
+// why bother with the wrapper struct? It turns out that on Linux Intel x86 alignof(uint64_t) is 8
+// but alignof(struct{uint64_t a;}) is 4. This is because this Intel ABI doesn't say anything about
+// double-word alignment so for historical reasons compilers violated the standard and use an
+// alignment of 4 for uint64_t (and double) inside structures.
+// See https://stackoverflow.com/questions/44877185
+// One way to get the alignment inside structures of a type is to look at the alignment of it
+// wrapped in a structure. Hence VkSameHandleNativeWrappe
+
+namespace dawn::native::vulkan {
+
+    namespace detail {
+        template <typename T>
+        struct WrapperStruct {
+            T member;
+        };
+
+        template <typename T>
+        static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
+
+        static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
+        static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
+
+        // Simple handle types that supports "nullptr_t" as a 0 value.
+        template <typename Tag, typename HandleType>
+        class alignas(detail::kNativeVkHandleAlignment) VkHandle {
+          public:
+            // Default constructor and assigning of VK_NULL_HANDLE
+            VkHandle() = default;
+            VkHandle(std::nullptr_t) {
+            }
+
+            // Use default copy constructor/assignment
+            VkHandle(const VkHandle<Tag, HandleType>& other) = default;
+            VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
+
+            // Comparisons between handles
+            bool operator==(VkHandle<Tag, HandleType> other) const {
+                return mHandle == other.mHandle;
+            }
+            bool operator!=(VkHandle<Tag, HandleType> other) const {
+                return mHandle != other.mHandle;
+            }
+
+            // Comparisons between handles and VK_NULL_HANDLE
+            bool operator==(std::nullptr_t) const {
+                return mHandle == 0;
+            }
+            bool operator!=(std::nullptr_t) const {
+                return mHandle != 0;
+            }
+
+            // Implicit conversion to real Vulkan types.
+            operator HandleType() const {
+                return GetHandle();
+            }
+
+            HandleType GetHandle() const {
+                return mHandle;
+            }
+
+            HandleType& operator*() {
+                return mHandle;
+            }
+
+            static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
+                return VkHandle{handle};
+            }
+
+          private:
+            explicit VkHandle(HandleType handle) : mHandle(handle) {
+            }
+
+            HandleType mHandle = 0;
+        };
+    }  // namespace detail
+
+    static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
+
+    template <typename Tag, typename HandleType>
+    HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
+        return reinterpret_cast<HandleType*>(handle);
+    }
+
+}  // namespace dawn::native::vulkan
+
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object)                           \
+    DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object)                      \
+    namespace dawn::native::vulkan {                                        \
+        using object = detail::VkHandle<struct VkTag##object, ::object>;    \
+        static_assert(sizeof(object) == sizeof(uint64_t));                  \
+        static_assert(alignof(object) == detail::kUint64Alignment);         \
+        static_assert(sizeof(object) == sizeof(::object));                  \
+        static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
+    }  // namespace dawn::native::vulkan
+
+// Import additional parts of Vulkan that are supported on our architecture and preemptively include
+// headers that vulkan.h includes that we have "undefs" for. Note that some of the VK_USE_PLATFORM_*
+// defines are defined already in the Vulkan-Header BUILD.gn, but are needed when building with
+// CMake, hence they cannot be removed at the moment.
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    ifndef VK_USE_PLATFORM_WIN32_KHR
+#        define VK_USE_PLATFORM_WIN32_KHR
+#    endif
+#    include "dawn/common/windows_with_undefs.h"
+#endif  // DAWN_PLATFORM_WINDOWS
+
+#if defined(DAWN_USE_X11)
+#    define VK_USE_PLATFORM_XLIB_KHR
+#    ifndef VK_USE_PLATFORM_XCB_KHR
+#        define VK_USE_PLATFORM_XCB_KHR
+#    endif
+#    include "dawn/common/xlib_with_undefs.h"
+#endif  // defined(DAWN_USE_X11)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+#    ifndef VK_USE_PLATFORM_METAL_EXT
+#        define VK_USE_PLATFORM_METAL_EXT
+#    endif
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_ANDROID)
+#    ifndef VK_USE_PLATFORM_ANDROID_KHR
+#        define VK_USE_PLATFORM_ANDROID_KHR
+#    endif
+#endif  // defined(DAWN_PLATFORM_ANDROID)
+
+#if defined(DAWN_PLATFORM_FUCHSIA)
+#    ifndef VK_USE_PLATFORM_FUCHSIA
+#        define VK_USE_PLATFORM_FUCHSIA
+#    endif
+#endif  // defined(DAWN_PLATFORM_FUCHSIA)
+
+// The actual inclusion of vulkan.h!
+#define VK_NO_PROTOTYPES
+#include <vulkan/vulkan.h>
+
+// Redefine VK_NULL_HANDLE for better type safety where possible.
+#undef VK_NULL_HANDLE
+#if defined(DAWN_PLATFORM_64_BIT)
+static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
+#elif defined(DAWN_PLATFORM_32_BIT)
+static constexpr uint64_t VK_NULL_HANDLE = 0;
+#else
+#    error "Unsupported platform"
+#endif
+
+#endif  // COMMON_VULKANPLATFORM_H_
diff --git a/src/dawn/common/windows_with_undefs.h b/src/dawn/common/windows_with_undefs.h
new file mode 100644
index 0000000..686da9f
--- /dev/null
+++ b/src/dawn/common/windows_with_undefs.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_WINDOWS_WITH_UNDEFS_H_
+#define COMMON_WINDOWS_WITH_UNDEFS_H_
+
+#include "dawn/common/Platform.h"
+
+#if !defined(DAWN_PLATFORM_WINDOWS)
+#    error "windows_with_undefs.h included on non-Windows"
+#endif
+
+// This header includes <windows.h> but removes all the extra defines that conflict with identifiers
+// in internal code. It should never be included in something that is part of the public interface.
+#include <windows.h>
+
+// Macros defined for ANSI / Unicode support
+#undef CreateWindow
+#undef GetMessage
+
+// Macros defined to produce compiler intrinsics
+#undef MemoryBarrier
+
+// Macro defined as an alias of GetTickCount
+#undef GetCurrentTime
+
+#endif  // COMMON_WINDOWS_WITH_UNDEFS_H_
diff --git a/src/dawn/common/xlib_with_undefs.h b/src/dawn/common/xlib_with_undefs.h
new file mode 100644
index 0000000..7ac5a62
--- /dev/null
+++ b/src/dawn/common/xlib_with_undefs.h
@@ -0,0 +1,40 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_XLIB_WITH_UNDEFS_H_
+#define COMMON_XLIB_WITH_UNDEFS_H_
+
+#include "dawn/common/Platform.h"
+
+#if !defined(DAWN_PLATFORM_LINUX)
+#    error "xlib_with_undefs.h included on non-Linux"
+#endif
+
+// This header includes <X11/Xlib.h> but removes all the extra defines that conflict with
+// identifiers in internal code. It should never be included in something that is part of the public
+// interface.
+#include <X11/Xlib.h>
+
+// Xlib-xcb.h technically includes Xlib.h but we separate the includes to make it more clear what
+// the problem is if one of these two includes fail.
+#include <X11/Xlib-xcb.h>
+
+#undef Success
+#undef None
+#undef Always
+#undef Bool
+
+using XErrorHandler = int (*)(Display*, XErrorEvent*);
+
+#endif  // COMMON_XLIB_WITH_UNDEFS_H_
diff --git a/src/dawn/fuzzers/BUILD.gn b/src/dawn/fuzzers/BUILD.gn
new file mode 100644
index 0000000..f7ea2a0
--- /dev/null
+++ b/src/dawn/fuzzers/BUILD.gn
@@ -0,0 +1,124 @@
+# Copyright 2018 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build_overrides/build.gni")
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+# We only have libfuzzer in Chromium builds but if we build fuzzer targets only
+# there, we would risk breaking fuzzer targets all the time when making changes
+# to Dawn. To avoid that, we make fuzzer targets compile in standalone builds
+# as well with a dawn_fuzzer_test target that acts like Chromium's fuzzer_test.
+#
+# The standalone fuzzer targets are able to run a single fuzzer input which
+# could help reproduce fuzzer crashes more easily because you don't need a
+# whole Chromium checkout.
+
+if (build_with_chromium) {
+  import("//testing/libfuzzer/fuzzer_test.gni")
+
+  # In Chromium build we just proxy everything to the real fuzzer_test
+  template("dawn_fuzzer_test") {
+    fuzzer_test(target_name) {
+      forward_variables_from(invoker, "*")
+    }
+  }
+} else {
+  import("//testing/test.gni")
+
+  # In standalone build we do something similar to fuzzer_test.
+  template("dawn_fuzzer_test") {
+    test(target_name) {
+      forward_variables_from(invoker,
+                             [
+                               "asan_options",
+                               "cflags",
+                               "cflags_cc",
+                               "check_includes",
+                               "defines",
+                               "deps",
+                               "include_dirs",
+                               "sources",
+                             ])
+
+      if (defined(asan_options)) {
+        not_needed([ "asan_options" ])
+      }
+
+      if (!defined(configs)) {
+        configs = []
+      }
+
+      # Weirdly fuzzer_test uses a special variable for additional configs.
+      if (defined(invoker.additional_configs)) {
+        configs += invoker.additional_configs
+      }
+
+      sources += [ "StandaloneFuzzerMain.cpp" ]
+    }
+  }
+}
+
+static_library("dawn_wire_server_fuzzer_common") {
+  sources = [
+    "DawnWireServerFuzzer.cpp",
+    "DawnWireServerFuzzer.h",
+  ]
+  public_deps = [
+    "${dawn_root}/src/dawn:cpp",
+    "${dawn_root}/src/dawn:proc",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_root}/src/dawn/native:static",
+    "${dawn_root}/src/dawn/utils",
+    "${dawn_root}/src/dawn/wire:static",
+  ]
+}
+
+dawn_fuzzer_test("dawn_wire_server_and_frontend_fuzzer") {
+  sources = [ "DawnWireServerAndFrontendFuzzer.cpp" ]
+
+  deps = [ ":dawn_wire_server_fuzzer_common" ]
+
+  additional_configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+}
+
+if (is_win) {
+  dawn_fuzzer_test("dawn_wire_server_and_d3d12_backend_fuzzer") {
+    sources = [ "DawnWireServerAndD3D12BackendFuzzer.cpp" ]
+
+    deps = [ ":dawn_wire_server_fuzzer_common" ]
+
+    additional_configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+  }
+}
+
+dawn_fuzzer_test("dawn_wire_server_and_vulkan_backend_fuzzer") {
+  sources = [ "DawnWireServerAndVulkanBackendFuzzer.cpp" ]
+
+  deps = [ ":dawn_wire_server_fuzzer_common" ]
+
+  additional_configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+}
+
+# A group target to build all the fuzzers
+group("fuzzers") {
+  testonly = true
+  deps = [
+    ":dawn_wire_server_and_frontend_fuzzer",
+    ":dawn_wire_server_and_vulkan_backend_fuzzer",
+  ]
+
+  if (is_win) {
+    deps += [ ":dawn_wire_server_and_d3d12_backend_fuzzer" ]
+  }
+}
diff --git a/src/dawn/fuzzers/DawnWireServerAndD3D12BackendFuzzer.cpp b/src/dawn/fuzzers/DawnWireServerAndD3D12BackendFuzzer.cpp
new file mode 100644
index 0000000..2eff9b4
--- /dev/null
+++ b/src/dawn/fuzzers/DawnWireServerAndD3D12BackendFuzzer.cpp
@@ -0,0 +1,44 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "DawnWireServerFuzzer.h"
+
+#include "dawn/native/DawnNative.h"
+#include "testing/libfuzzer/libfuzzer_exports.h"
+
+extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) {
+    return DawnWireServerFuzzer::Initialize(argc, argv);
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    return DawnWireServerFuzzer::Run(
+        data, size,
+        [](dawn::native::Instance* instance) {
+            std::vector<dawn::native::Adapter> adapters = instance->GetAdapters();
+
+            wgpu::Device device;
+            for (dawn::native::Adapter adapter : adapters) {
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+
+                if (properties.backendType == wgpu::BackendType::D3D12 &&
+                    properties.adapterType == wgpu::AdapterType::CPU) {
+                    device = wgpu::Device::Acquire(adapter.CreateDevice());
+                    break;
+                }
+            }
+            return device;
+        },
+        true /* supportsErrorInjection */);
+}
diff --git a/src/dawn/fuzzers/DawnWireServerAndFrontendFuzzer.cpp b/src/dawn/fuzzers/DawnWireServerAndFrontendFuzzer.cpp
new file mode 100644
index 0000000..26e1cce
--- /dev/null
+++ b/src/dawn/fuzzers/DawnWireServerAndFrontendFuzzer.cpp
@@ -0,0 +1,46 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "DawnWireServerFuzzer.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/DawnNative.h"
+#include "testing/libfuzzer/libfuzzer_exports.h"
+
+extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) {
+    return DawnWireServerFuzzer::Initialize(argc, argv);
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    return DawnWireServerFuzzer::Run(
+        data, size,
+        [](dawn::native::Instance* instance) {
+            std::vector<dawn::native::Adapter> adapters = instance->GetAdapters();
+
+            wgpu::Device nullDevice;
+            for (dawn::native::Adapter adapter : adapters) {
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+
+                if (properties.backendType == wgpu::BackendType::Null) {
+                    nullDevice = wgpu::Device::Acquire(adapter.CreateDevice());
+                    break;
+                }
+            }
+
+            ASSERT(nullDevice.Get() != nullptr);
+            return nullDevice;
+        },
+        false /* supportsErrorInjection */);
+}
diff --git a/src/dawn/fuzzers/DawnWireServerAndVulkanBackendFuzzer.cpp b/src/dawn/fuzzers/DawnWireServerAndVulkanBackendFuzzer.cpp
new file mode 100644
index 0000000..157ce01
--- /dev/null
+++ b/src/dawn/fuzzers/DawnWireServerAndVulkanBackendFuzzer.cpp
@@ -0,0 +1,44 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "DawnWireServerFuzzer.h"
+
+#include "dawn/native/DawnNative.h"
+#include "testing/libfuzzer/libfuzzer_exports.h"
+
+extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) {
+    return DawnWireServerFuzzer::Initialize(argc, argv);
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    return DawnWireServerFuzzer::Run(
+        data, size,
+        [](dawn::native::Instance* instance) {
+            std::vector<dawn::native::Adapter> adapters = instance->GetAdapters();
+
+            wgpu::Device device;
+            for (dawn::native::Adapter adapter : adapters) {
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+
+                if (properties.backendType == wgpu::BackendType::Vulkan &&
+                    properties.adapterType == wgpu::AdapterType::CPU) {
+                    device = wgpu::Device::Acquire(adapter.CreateDevice());
+                    break;
+                }
+            }
+            return device;
+        },
+        true /* supportsErrorInjection */);
+}
diff --git a/src/dawn/fuzzers/DawnWireServerFuzzer.cpp b/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
new file mode 100644
index 0000000..bf35518
--- /dev/null
+++ b/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
@@ -0,0 +1,141 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "DawnWireServerFuzzer.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/webgpu_cpp.h"
+#include "dawn/wire/WireServer.h"
+
+#include <fstream>
+#include <vector>
+
+namespace {
+
+    class DevNull : public dawn::wire::CommandSerializer {
+      public:
+        size_t GetMaximumAllocationSize() const override {
+            // Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that.
+            return 1024 * 1024 * 1024;
+        }
+        void* GetCmdSpace(size_t size) override {
+            if (size > buf.size()) {
+                buf.resize(size);
+            }
+            return buf.data();
+        }
+        bool Flush() override {
+            return true;
+        }
+
+      private:
+        std::vector<char> buf;
+    };
+
+    std::unique_ptr<dawn::native::Instance> sInstance;
+    WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
+
+    bool sCommandsComplete = false;
+
+    WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
+                                             WGPUSurface surface,
+                                             const WGPUSwapChainDescriptor*) {
+        WGPUSwapChainDescriptor desc = {};
+        // A 0 implementation will trigger a swapchain creation error.
+        desc.implementation = 0;
+        return sOriginalDeviceCreateSwapChain(device, surface, &desc);
+    }
+
+}  // namespace
+
+int DawnWireServerFuzzer::Initialize(int* argc, char*** argv) {
+    // TODO(crbug.com/1038952): The Instance must be static because destructing the vkInstance with
+    // Swiftshader crashes libFuzzer. When this is fixed, move this into Run so that error injection
+    // for adapter discovery can be fuzzed.
+    sInstance = std::make_unique<dawn::native::Instance>();
+    sInstance->DiscoverDefaultAdapters();
+
+    return 0;
+}
+
+int DawnWireServerFuzzer::Run(const uint8_t* data,
+                              size_t size,
+                              MakeDeviceFn MakeDevice,
+                              bool supportsErrorInjection) {
+    // We require at least the injected error index.
+    if (size < sizeof(uint64_t)) {
+        return 0;
+    }
+
+    // Get and consume the injected error index.
+    uint64_t injectedErrorIndex = *reinterpret_cast<const uint64_t*>(data);
+    data += sizeof(uint64_t);
+    size -= sizeof(uint64_t);
+
+    if (supportsErrorInjection) {
+        dawn::native::EnableErrorInjector();
+
+        // Clear the error injector since it has the previous run's call counts.
+        dawn::native::ClearErrorInjector();
+
+        dawn::native::InjectErrorAt(injectedErrorIndex);
+    }
+
+    DawnProcTable procs = dawn::native::GetProcs();
+
+    // Swapchains receive a pointer to an implementation. The fuzzer will pass garbage in so we
+    // intercept calls to create swapchains and make sure they always return error swapchains.
+    // This is ok for fuzzing because embedders of dawn_wire would always define their own
+    // swapchain handling.
+    sOriginalDeviceCreateSwapChain = procs.deviceCreateSwapChain;
+    procs.deviceCreateSwapChain = ErrorDeviceCreateSwapChain;
+
+    dawnProcSetProcs(&procs);
+
+    wgpu::Device device = MakeDevice(sInstance.get());
+    if (!device) {
+        // We should only ever fail device creation if an error was injected.
+        ASSERT(supportsErrorInjection);
+        return 0;
+    }
+
+    DevNull devNull;
+    dawn::wire::WireServerDescriptor serverDesc = {};
+    serverDesc.procs = &procs;
+    serverDesc.serializer = &devNull;
+
+    std::unique_ptr<dawn::wire::WireServer> wireServer(new dawn_wire::WireServer(serverDesc));
+    wireServer->InjectDevice(device.Get(), 1, 0);
+
+    wireServer->HandleCommands(reinterpret_cast<const char*>(data), size);
+
+    // Wait for all previous commands before destroying the server.
+    // TODO(enga): Improve this when we improve/finalize how processing events happens.
+    {
+        device.GetQueue().OnSubmittedWorkDone(
+            0u, [](WGPUQueueWorkDoneStatus, void*) { sCommandsComplete = true; }, nullptr);
+        while (!sCommandsComplete) {
+            device.Tick();
+            utils::USleep(100);
+        }
+    }
+
+    wireServer = nullptr;
+    return 0;
+}
diff --git a/src/dawn/fuzzers/DawnWireServerFuzzer.h b/src/dawn/fuzzers/DawnWireServerFuzzer.h
new file mode 100644
index 0000000..83b6d3a
--- /dev/null
+++ b/src/dawn/fuzzers/DawnWireServerFuzzer.h
@@ -0,0 +1,34 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/webgpu_cpp.h"
+
+#include <cstdint>
+#include <functional>
+
+namespace dawn::native {
+
+    class Instance;
+
+}  // namespace dawn::native
+
+namespace DawnWireServerFuzzer {
+
+    using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
+
+    int Initialize(int* argc, char*** argv);
+
+    int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
+
+}  // namespace DawnWireServerFuzzer
diff --git a/src/dawn/fuzzers/StandaloneFuzzerMain.cpp b/src/dawn/fuzzers/StandaloneFuzzerMain.cpp
new file mode 100644
index 0000000..3341199
--- /dev/null
+++ b/src/dawn/fuzzers/StandaloneFuzzerMain.cpp
@@ -0,0 +1,68 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+#include <cstdlib>
+#include <iostream>
+#include <vector>
+
+extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv);
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
+
+int main(int argc, char** argv) {
+    if (LLVMFuzzerInitialize(&argc, &argv)) {
+        std::cerr << "Failed to initialize fuzzer target" << std::endl;
+        return 1;
+    }
+
+    if (argc != 2) {
+        std::cout << "Usage: <standalone reproducer> [options] FILE" << std::endl;
+        return 1;
+    }
+
+    std::cout << "WARNING: this is just a best-effort reproducer for fuzzer issues in standalone "
+              << "Dawn builds. For the real fuzzer, please build inside Chromium." << std::endl;
+
+    const char* filename = argv[1];
+    std::cout << "Reproducing using file: " << filename << std::endl;
+
+    std::vector<char> data;
+    {
+        FILE* file = fopen(filename, "rb");
+        if (!file) {
+            std::cerr << "Failed to open " << filename << std::endl;
+            return 1;
+        }
+
+        fseek(file, 0, SEEK_END);
+        long tellFileSize = ftell(file);
+        if (tellFileSize <= 0) {
+            std::cerr << "Input file of incorrect size: " << filename << std::endl;
+            return 1;
+        }
+        fseek(file, 0, SEEK_SET);
+
+        size_t fileSize = static_cast<size_t>(tellFileSize);
+        data.resize(fileSize);
+
+        size_t bytesRead = fread(data.data(), sizeof(char), fileSize, file);
+        fclose(file);
+        if (bytesRead != fileSize) {
+            std::cerr << "Failed to read " << filename << std::endl;
+            return 1;
+        }
+    }
+
+    return LLVMFuzzerTestOneInput(reinterpret_cast<const uint8_t*>(data.data()), data.size());
+}
diff --git a/src/dawn/native/Adapter.cpp b/src/dawn/native/Adapter.cpp
new file mode 100644
index 0000000..4c000ac
--- /dev/null
+++ b/src/dawn/native/Adapter.cpp
@@ -0,0 +1,227 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Adapter.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+
+    AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
+        : mInstance(instance), mBackend(backend) {
+        mSupportedFeatures.EnableFeature(Feature::DawnNative);
+        mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
+    }
+
+    MaybeError AdapterBase::Initialize() {
+        DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
+        DAWN_TRY_CONTEXT(
+            InitializeSupportedFeaturesImpl(),
+            "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+            "backend=%s type=%s)",
+            mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+        DAWN_TRY_CONTEXT(
+            InitializeSupportedLimitsImpl(&mLimits),
+            "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+            "backend=%s type=%s)",
+            mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+
+        // Enforce internal Dawn constants.
+        mLimits.v1.maxVertexBufferArrayStride =
+            std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
+        mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
+        mLimits.v1.maxVertexAttributes =
+            std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
+        mLimits.v1.maxVertexBuffers =
+            std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
+        mLimits.v1.maxInterStageShaderComponents =
+            std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
+        mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
+            mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
+        mLimits.v1.maxSamplersPerShaderStage =
+            std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
+        mLimits.v1.maxStorageBuffersPerShaderStage =
+            std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
+        mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
+            mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
+        mLimits.v1.maxUniformBuffersPerShaderStage =
+            std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
+        mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
+            std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
+                     kMaxDynamicUniformBuffersPerPipelineLayout);
+        mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
+            std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
+                     kMaxDynamicStorageBuffersPerPipelineLayout);
+
+        return {};
+    }
+
+    bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
+        return GetLimits(limits);
+    }
+
+    void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
+        properties->vendorID = mVendorId;
+        properties->deviceID = mDeviceId;
+        properties->name = mName.c_str();
+        properties->driverDescription = mDriverDescription.c_str();
+        properties->adapterType = mAdapterType;
+        properties->backendType = mBackend;
+    }
+
+    bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
+        return mSupportedFeatures.IsEnabled(feature);
+    }
+
+    size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+        return mSupportedFeatures.EnumerateFeatures(features);
+    }
+
+    DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
+        DeviceDescriptor defaultDesc = {};
+        if (descriptor == nullptr) {
+            descriptor = &defaultDesc;
+        }
+        auto result = CreateDeviceInternal(descriptor);
+        if (result.IsError()) {
+            mInstance->ConsumedError(result.AcquireError());
+            return nullptr;
+        }
+        return result.AcquireSuccess().Detach();
+    }
+
+    void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
+                                       WGPURequestDeviceCallback callback,
+                                       void* userdata) {
+        static constexpr DeviceDescriptor kDefaultDescriptor = {};
+        if (descriptor == nullptr) {
+            descriptor = &kDefaultDescriptor;
+        }
+        auto result = CreateDeviceInternal(descriptor);
+
+        if (result.IsError()) {
+            std::unique_ptr<ErrorData> errorData = result.AcquireError();
+            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+            callback(WGPURequestDeviceStatus_Error, nullptr,
+                     errorData->GetFormattedMessage().c_str(), userdata);
+            return;
+        }
+
+        Ref<DeviceBase> device = result.AcquireSuccess();
+
+        WGPURequestDeviceStatus status =
+            device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        callback(status, ToAPI(device.Detach()), nullptr, userdata);
+    }
+
+    uint32_t AdapterBase::GetVendorId() const {
+        return mVendorId;
+    }
+
+    uint32_t AdapterBase::GetDeviceId() const {
+        return mDeviceId;
+    }
+
+    wgpu::BackendType AdapterBase::GetBackendType() const {
+        return mBackend;
+    }
+
+    InstanceBase* AdapterBase::GetInstance() const {
+        return mInstance;
+    }
+
+    FeaturesSet AdapterBase::GetSupportedFeatures() const {
+        return mSupportedFeatures;
+    }
+
+    bool AdapterBase::SupportsAllRequiredFeatures(
+        const ityp::span<size_t, const wgpu::FeatureName>& features) const {
+        for (wgpu::FeatureName f : features) {
+            if (!mSupportedFeatures.IsEnabled(f)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
+        WGPUDeviceProperties adapterProperties = {};
+        adapterProperties.deviceID = mDeviceId;
+        adapterProperties.vendorID = mVendorId;
+        adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
+
+        mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
+        // This is OK for now because there are no limit feature structs.
+        // If we add additional structs, the caller will need to provide memory
+        // to store them (ex. by calling GetLimits directly instead). Currently,
+        // we keep this function as it's only used internally in Chromium to
+        // send the adapter properties across the wire.
+        GetLimits(FromAPI(&adapterProperties.limits));
+        return adapterProperties;
+    }
+
+    bool AdapterBase::GetLimits(SupportedLimits* limits) const {
+        ASSERT(limits != nullptr);
+        if (limits->nextInChain != nullptr) {
+            return false;
+        }
+        if (mUseTieredLimits) {
+            limits->limits = ApplyLimitTiers(mLimits.v1);
+        } else {
+            limits->limits = mLimits.v1;
+        }
+        return true;
+    }
+
+    ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
+        const DeviceDescriptor* descriptor) {
+        ASSERT(descriptor != nullptr);
+
+        for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
+            wgpu::FeatureName f = descriptor->requiredFeatures[i];
+            DAWN_TRY(ValidateFeatureName(f));
+            DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
+                            "Requested feature %s is not supported.", f);
+        }
+
+        if (descriptor->requiredLimits != nullptr) {
+            DAWN_TRY_CONTEXT(
+                ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
+                               descriptor->requiredLimits->limits),
+                "validating required limits");
+
+            DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
+                            "nextInChain is not nullptr.");
+        }
+        return CreateDeviceImpl(descriptor);
+    }
+
+    void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
+        mUseTieredLimits = useTieredLimits;
+    }
+
+    void AdapterBase::ResetInternalDeviceForTesting() {
+        mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
+    }
+
+    MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
+        return DAWN_INTERNAL_ERROR(
+            "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Adapter.h b/src/dawn/native/Adapter.h
new file mode 100644
index 0000000..bd66c8b
--- /dev/null
+++ b/src/dawn/native/Adapter.h
@@ -0,0 +1,99 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ADAPTER_H_
+#define DAWNNATIVE_ADAPTER_H_
+
+#include "dawn/native/DawnNative.h"
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Limits.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <string>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    class AdapterBase : public RefCounted {
+      public:
+        AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
+        virtual ~AdapterBase() = default;
+
+        MaybeError Initialize();
+
+        // WebGPU API
+        bool APIGetLimits(SupportedLimits* limits) const;
+        void APIGetProperties(AdapterProperties* properties) const;
+        bool APIHasFeature(wgpu::FeatureName feature) const;
+        size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+        void APIRequestDevice(const DeviceDescriptor* descriptor,
+                              WGPURequestDeviceCallback callback,
+                              void* userdata);
+        DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
+
+        uint32_t GetVendorId() const;
+        uint32_t GetDeviceId() const;
+        wgpu::BackendType GetBackendType() const;
+        InstanceBase* GetInstance() const;
+
+        void ResetInternalDeviceForTesting();
+
+        FeaturesSet GetSupportedFeatures() const;
+        bool SupportsAllRequiredFeatures(
+            const ityp::span<size_t, const wgpu::FeatureName>& features) const;
+        WGPUDeviceProperties GetAdapterProperties() const;
+
+        bool GetLimits(SupportedLimits* limits) const;
+
+        void SetUseTieredLimits(bool useTieredLimits);
+
+        virtual bool SupportsExternalImages() const = 0;
+
+      protected:
+        uint32_t mVendorId = 0xFFFFFFFF;
+        uint32_t mDeviceId = 0xFFFFFFFF;
+        std::string mName;
+        wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
+        std::string mDriverDescription;
+        FeaturesSet mSupportedFeatures;
+
+      private:
+        virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+            const DeviceDescriptor* descriptor) = 0;
+
+        virtual MaybeError InitializeImpl() = 0;
+
+        // Check base WebGPU features and discover supported featurees.
+        virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
+
+        // Check base WebGPU limits and populate supported limits.
+        virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
+
+        ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
+
+        virtual MaybeError ResetInternalDeviceForTestingImpl();
+        InstanceBase* mInstance = nullptr;
+        wgpu::BackendType mBackend;
+        CombinedLimits mLimits;
+        bool mUseTieredLimits = false;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ADAPTER_H_
diff --git a/src/dawn/native/AsyncTask.cpp b/src/dawn/native/AsyncTask.cpp
new file mode 100644
index 0000000..a1e2948
--- /dev/null
+++ b/src/dawn/native/AsyncTask.cpp
@@ -0,0 +1,65 @@
+#include "dawn/native/AsyncTask.h"
+
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::native {
+
+    AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
+        : mWorkerTaskPool(workerTaskPool) {
+    }
+
+    void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
+        // If these allocations becomes expensive, we can slab-allocate tasks.
+        Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
+        waitableTask->taskManager = this;
+        waitableTask->asyncTask = std::move(asyncTask);
+
+        {
+            // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
+            // and we may remove waitableTask objects from mPendingTasks in either main thread
+            // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
+            // protected by a mutex.
+            std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+            mPendingTasks.emplace(waitableTask.Get(), waitableTask);
+        }
+
+        // Ref the task since it is accessed inside the worker function.
+        // The worker function will acquire and release the task upon completion.
+        waitableTask->Reference();
+        waitableTask->waitableEvent =
+            mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
+    }
+
+    void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+        std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+        auto iter = mPendingTasks.find(task);
+        if (iter != mPendingTasks.end()) {
+            mPendingTasks.erase(iter);
+        }
+    }
+
+    void AsyncTaskManager::WaitAllPendingTasks() {
+        std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
+
+        {
+            std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+            allPendingTasks.swap(mPendingTasks);
+        }
+
+        for (auto& [_, task] : allPendingTasks) {
+            task->waitableEvent->Wait();
+        }
+    }
+
+    bool AsyncTaskManager::HasPendingTasks() {
+        std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+        return !mPendingTasks.empty();
+    }
+
+    void AsyncTaskManager::DoWaitableTask(void* task) {
+        Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
+        waitableTask->asyncTask();
+        waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/AsyncTask.h b/src/dawn/native/AsyncTask.h
new file mode 100644
index 0000000..ca2edd0
--- /dev/null
+++ b/src/dawn/native/AsyncTask.h
@@ -0,0 +1,65 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ASYC_TASK_H_
+#define DAWNNATIVE_ASYC_TASK_H_
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+
+#include "dawn/common/RefCounted.h"
+
+namespace dawn::platform {
+    class WaitableEvent;
+    class WorkerTaskPool;
+}  // namespace dawn::platform
+
+namespace dawn::native {
+
+    // TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
+    // Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
+    // shutting down the device. RunNow() could be used for more advanced scenarios, for example
+    // always doing ShaderModule initial compilation asynchronously, but being able to steal the
+    // task if we need it for synchronous pipeline compilation.
+    using AsyncTask = std::function<void()>;
+
+    class AsyncTaskManager {
+      public:
+        explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
+
+        void PostTask(AsyncTask asyncTask);
+        void WaitAllPendingTasks();
+        bool HasPendingTasks();
+
+      private:
+        class WaitableTask : public RefCounted {
+          public:
+            AsyncTask asyncTask;
+            AsyncTaskManager* taskManager;
+            std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
+        };
+
+        static void DoWaitableTask(void* task);
+        void HandleTaskCompletion(WaitableTask* task);
+
+        std::mutex mPendingTasksMutex;
+        std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
+        dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+    };
+
+}  // namespace dawn::native
+
+#endif
diff --git a/src/dawn/native/AttachmentState.cpp b/src/dawn/native/AttachmentState.cpp
new file mode 100644
index 0000000..1e38d9d
--- /dev/null
+++ b/src/dawn/native/AttachmentState.cpp
@@ -0,0 +1,175 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/AttachmentState.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+    AttachmentStateBlueprint::AttachmentStateBlueprint(
+        const RenderBundleEncoderDescriptor* descriptor)
+        : mSampleCount(descriptor->sampleCount) {
+        ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+        for (ColorAttachmentIndex i(uint8_t(0));
+             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
+            wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
+            if (format != wgpu::TextureFormat::Undefined) {
+                mColorAttachmentsSet.set(i);
+                mColorFormats[i] = format;
+            }
+        }
+        mDepthStencilFormat = descriptor->depthStencilFormat;
+    }
+
+    AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
+        : mSampleCount(descriptor->multisample.count) {
+        if (descriptor->fragment != nullptr) {
+            ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
+            for (ColorAttachmentIndex i(uint8_t(0));
+                 i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
+                 ++i) {
+                wgpu::TextureFormat format =
+                    descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
+                if (format != wgpu::TextureFormat::Undefined) {
+                    mColorAttachmentsSet.set(i);
+                    mColorFormats[i] = format;
+                }
+            }
+        }
+        if (descriptor->depthStencil != nullptr) {
+            mDepthStencilFormat = descriptor->depthStencil->format;
+        }
+    }
+
+    AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
+        for (ColorAttachmentIndex i(uint8_t(0));
+             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
+             ++i) {
+            TextureViewBase* attachment =
+                descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
+            if (attachment == nullptr) {
+                continue;
+            }
+            mColorAttachmentsSet.set(i);
+            mColorFormats[i] = attachment->GetFormat().format;
+            if (mSampleCount == 0) {
+                mSampleCount = attachment->GetTexture()->GetSampleCount();
+            } else {
+                ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
+            }
+        }
+        if (descriptor->depthStencilAttachment != nullptr) {
+            TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
+            mDepthStencilFormat = attachment->GetFormat().format;
+            if (mSampleCount == 0) {
+                mSampleCount = attachment->GetTexture()->GetSampleCount();
+            } else {
+                ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
+            }
+        }
+        ASSERT(mSampleCount > 0);
+    }
+
+    AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
+        default;
+
+    size_t AttachmentStateBlueprint::HashFunc::operator()(
+        const AttachmentStateBlueprint* attachmentState) const {
+        size_t hash = 0;
+
+        // Hash color formats
+        HashCombine(&hash, attachmentState->mColorAttachmentsSet);
+        for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
+            HashCombine(&hash, attachmentState->mColorFormats[i]);
+        }
+
+        // Hash depth stencil attachment
+        HashCombine(&hash, attachmentState->mDepthStencilFormat);
+
+        // Hash sample count
+        HashCombine(&hash, attachmentState->mSampleCount);
+
+        return hash;
+    }
+
+    bool AttachmentStateBlueprint::EqualityFunc::operator()(
+        const AttachmentStateBlueprint* a,
+        const AttachmentStateBlueprint* b) const {
+        // Check set attachments
+        if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
+            return false;
+        }
+
+        // Check color formats
+        for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
+            if (a->mColorFormats[i] != b->mColorFormats[i]) {
+                return false;
+            }
+        }
+
+        // Check depth stencil format
+        if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
+            return false;
+        }
+
+        // Check sample count
+        if (a->mSampleCount != b->mSampleCount) {
+            return false;
+        }
+
+        return true;
+    }
+
+    AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
+        : AttachmentStateBlueprint(blueprint), ObjectBase(device) {
+    }
+
+    AttachmentState::~AttachmentState() {
+        GetDevice()->UncacheAttachmentState(this);
+    }
+
+    size_t AttachmentState::ComputeContentHash() {
+        // TODO(dawn:549): skip this traversal and reuse the blueprint.
+        return AttachmentStateBlueprint::HashFunc()(this);
+    }
+
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
+    AttachmentState::GetColorAttachmentsMask() const {
+        return mColorAttachmentsSet;
+    }
+
+    wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
+        ColorAttachmentIndex index) const {
+        ASSERT(mColorAttachmentsSet[index]);
+        return mColorFormats[index];
+    }
+
+    bool AttachmentState::HasDepthStencilAttachment() const {
+        return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
+    }
+
+    wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
+        ASSERT(HasDepthStencilAttachment());
+        return mDepthStencilFormat;
+    }
+
+    uint32_t AttachmentState::GetSampleCount() const {
+        return mSampleCount;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/AttachmentState.h b/src/dawn/native/AttachmentState.h
new file mode 100644
index 0000000..21eff85
--- /dev/null
+++ b/src/dawn/native/AttachmentState.h
@@ -0,0 +1,83 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ATTACHMENTSTATE_H_
+#define DAWNNATIVE_ATTACHMENTSTATE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    // AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
+    // can be constructed by copying the blueprint state instead of traversing descriptors.
+    // Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
+    class AttachmentStateBlueprint {
+      public:
+        // Note: Descriptors must be validated before the AttachmentState is constructed.
+        explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
+        explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
+        explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
+
+        AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
+
+        // Functors necessary for the unordered_set<AttachmentState*>-based cache.
+        struct HashFunc {
+            size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
+        };
+        struct EqualityFunc {
+            bool operator()(const AttachmentStateBlueprint* a,
+                            const AttachmentStateBlueprint* b) const;
+        };
+
+      protected:
+        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
+        ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
+        // Default (texture format Undefined) indicates there is no depth stencil attachment.
+        wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
+        uint32_t mSampleCount = 0;
+    };
+
+    class AttachmentState final : public AttachmentStateBlueprint,
+                                  public ObjectBase,
+                                  public CachedObject {
+      public:
+        AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
+
+        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+        wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
+        bool HasDepthStencilAttachment() const;
+        wgpu::TextureFormat GetDepthStencilFormat() const;
+        uint32_t GetSampleCount() const;
+
+        size_t ComputeContentHash() override;
+
+      private:
+        ~AttachmentState() override;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ATTACHMENTSTATE_H_
diff --git a/src/dawn/native/BUILD.gn b/src/dawn/native/BUILD.gn
new file mode 100644
index 0000000..5d97a8e
--- /dev/null
+++ b/src/dawn/native/BUILD.gn
@@ -0,0 +1,773 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("//build_overrides/build.gni")
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_component.gni")
+import("${dawn_root}/scripts/dawn_features.gni")
+
+# Import mac_deployment_target
+if (is_mac) {
+  if (dawn_has_build) {
+    import("//build/config/mac/mac_sdk.gni")
+  } else {
+    mac_deployment_target = "10.11.0"
+  }
+}
+
+# The VVLs are an optional dependency, only use it if the path has been set.
+enable_vulkan_validation_layers = dawn_enable_vulkan_validation_layers &&
+                                  dawn_vulkan_validation_layers_dir != ""
+if (enable_vulkan_validation_layers) {
+  import("//build_overrides/vulkan_validation_layers.gni")
+}
+
+# ANGLE is an optional dependency; only use it if the path has been set.
+use_angle = dawn_use_angle && defined(dawn_angle_dir)
+
+# Swiftshader is an optional dependency, only use it if the path has been set.
+use_swiftshader = dawn_use_swiftshader && dawn_swiftshader_dir != ""
+if (use_swiftshader) {
+  assert(dawn_enable_vulkan,
+         "dawn_use_swiftshader requires dawn_enable_vulkan=true")
+  import("${dawn_swiftshader_dir}/src/Vulkan/vulkan.gni")
+}
+
+# The Vulkan loader is an optional dependency, only use it if the path has been
+# set.
+if (dawn_enable_vulkan) {
+  enable_vulkan_loader =
+      dawn_enable_vulkan_loader && dawn_vulkan_loader_dir != ""
+}
+
+group("abseil") {
+  # When build_with_chromium=true we need to include "//third_party/abseil-cpp:absl" while
+  # it's beneficial to be more specific with standalone Dawn, especially when it comes to
+  # including it as a dependency in other projects (such as Skia).
+  if (build_with_chromium) {
+    public_deps = [ "$dawn_abseil_dir:absl" ]
+  } else {
+    public_deps = [ "${dawn_root}/third_party/gn/abseil-cpp:str_format" ]
+  }
+}
+
+config("internal") {
+  configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+  # Suppress warnings that Metal isn't in the deployment target of Chrome:
+  # initialization of the Metal backend is behind a IsMetalSupported check so
+  # Dawn won't call Metal functions on macOS 10.10.
+  # At the time this is written Chromium supports 10.10.0 and above, so if we
+  # aren't on 10.11 it means we are on 10.11 and above, and Metal is available.
+  # Skipping this check on 10.11 and above is important as it allows getting
+  # proper compilation warning when using 10.12 and above feature for example.
+  # TODO(crbug.com/1004024): Consider using API_AVAILABLE annotations on all
+  # metal code in dawn once crbug.com/1004024 is sorted out if Chromium still
+  # supports 10.10 then.
+  if (is_mac && mac_deployment_target == "10.10.0") {
+    cflags_objcc = [ "-Wno-unguarded-availability" ]
+  }
+}
+
+config("weak_framework") {
+  if (is_mac && dawn_enable_metal) {
+    weak_frameworks = [ "Metal.framework" ]
+  }
+}
+
+# Config that adds the @executable_path rpath if needed so that Swiftshader or the Vulkan loader are found.
+config("vulkan_rpath") {
+  if (is_mac && dawn_enable_vulkan &&
+      (use_swiftshader || enable_vulkan_loader)) {
+    ldflags = [
+      "-rpath",
+      "@executable_path/",
+    ]
+  }
+}
+
+dawn_json_generator("utils_gen") {
+  target = "native_utils"
+  outputs = [
+    "src/dawn/native/ChainUtils_autogen.h",
+    "src/dawn/native/ChainUtils_autogen.cpp",
+    "src/dawn/native/ProcTable.cpp",
+    "src/dawn/native/dawn_platform_autogen.h",
+    "src/dawn/native/wgpu_structs_autogen.h",
+    "src/dawn/native/wgpu_structs_autogen.cpp",
+    "src/dawn/native/ValidationUtils_autogen.h",
+    "src/dawn/native/ValidationUtils_autogen.cpp",
+    "src/dawn/native/webgpu_absl_format_autogen.h",
+    "src/dawn/native/webgpu_absl_format_autogen.cpp",
+    "src/dawn/native/ObjectType_autogen.h",
+    "src/dawn/native/ObjectType_autogen.cpp",
+  ]
+}
+
+if (dawn_enable_opengl) {
+  dawn_generator("opengl_loader_gen") {
+    script = "${dawn_root}/generator/opengl_loader_generator.py"
+    args = [
+      "--gl-xml",
+      rebase_path("${dawn_root}/third_party/khronos/gl.xml", root_build_dir),
+      "--supported-extensions",
+      rebase_path("opengl/supported_extensions.json", root_build_dir),
+    ]
+    outputs = [
+      "src/dawn/native/opengl/OpenGLFunctionsBase_autogen.cpp",
+      "src/dawn/native/opengl/OpenGLFunctionsBase_autogen.h",
+      "src/dawn/native/opengl/opengl_platform_autogen.h",
+    ]
+  }
+}
+
+# Public dawn native headers so they can be publicly visible for
+# dependencies of dawn native
+source_set("headers") {
+  public_deps = [ "${dawn_root}/include/dawn:cpp_headers" ]
+  all_dependent_configs = [ "${dawn_root}/include/dawn:public" ]
+  sources = [
+    "${dawn_root}/include/dawn/native/DawnNative.h",
+    "${dawn_root}/include/dawn/native/dawn_native_export.h",
+
+    # Include all backend's public headers so that dependencies can include
+    # them even when the backends are disabled.
+    "${dawn_root}/include/dawn/native/D3D12Backend.h",
+    "${dawn_root}/include/dawn/native/MetalBackend.h",
+    "${dawn_root}/include/dawn/native/NullBackend.h",
+    "${dawn_root}/include/dawn/native/OpenGLBackend.h",
+    "${dawn_root}/include/dawn/native/VulkanBackend.h",
+  ]
+}
+
+# The meat of the compilation for dawn native so that we can cheaply have
+# shared_library / static_library versions of it. It compiles all the files
+# except those that define exported symbols.
+source_set("sources") {
+  deps = [
+    ":headers",
+    ":utils_gen",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_spirv_tools_dir}:spvtools_opt",
+    "${dawn_spirv_tools_dir}:spvtools_val",
+    "${dawn_root}/src/tint:libtint",
+  ]
+  defines = []
+  libs = []
+  data_deps = []
+
+  configs += [ ":internal" ]
+
+  # Dependencies that are needed to compile dawn native entry points in
+  # FooBackend.cpp need to be public deps so they are propagated to the
+  # dawn native target
+  public_deps = [
+    ":abseil",
+    "${dawn_root}/src/dawn/platform",
+  ]
+
+  sources = get_target_outputs(":utils_gen")
+  sources += [
+    "Adapter.cpp",
+    "Adapter.h",
+    "AsyncTask.cpp",
+    "AsyncTask.h",
+    "AttachmentState.cpp",
+    "AttachmentState.h",
+    "BackendConnection.cpp",
+    "BackendConnection.h",
+    "BindGroup.cpp",
+    "BindGroup.h",
+    "BindGroupLayout.cpp",
+    "BindGroupLayout.h",
+    "BindGroupTracker.h",
+    "BindingInfo.cpp",
+    "BindingInfo.h",
+    "BuddyAllocator.cpp",
+    "BuddyAllocator.h",
+    "BuddyMemoryAllocator.cpp",
+    "BuddyMemoryAllocator.h",
+    "Buffer.cpp",
+    "Buffer.h",
+    "CacheKey.cpp",
+    "CacheKey.h",
+    "CachedObject.cpp",
+    "CachedObject.h",
+    "CallbackTaskManager.cpp",
+    "CallbackTaskManager.h",
+    "CommandAllocator.cpp",
+    "CommandAllocator.h",
+    "CommandBuffer.cpp",
+    "CommandBuffer.h",
+    "CommandBufferStateTracker.cpp",
+    "CommandBufferStateTracker.h",
+    "CommandEncoder.cpp",
+    "CommandEncoder.h",
+    "CommandValidation.cpp",
+    "CommandValidation.h",
+    "Commands.cpp",
+    "Commands.h",
+    "CompilationMessages.cpp",
+    "CompilationMessages.h",
+    "ComputePassEncoder.cpp",
+    "ComputePassEncoder.h",
+    "ComputePipeline.cpp",
+    "ComputePipeline.h",
+    "CopyTextureForBrowserHelper.cpp",
+    "CopyTextureForBrowserHelper.h",
+    "CreatePipelineAsyncTask.cpp",
+    "CreatePipelineAsyncTask.h",
+    "Device.cpp",
+    "Device.h",
+    "DynamicUploader.cpp",
+    "DynamicUploader.h",
+    "EncodingContext.cpp",
+    "EncodingContext.h",
+    "EnumClassBitmasks.h",
+    "EnumMaskIterator.h",
+    "Error.cpp",
+    "Error.h",
+    "ErrorData.cpp",
+    "ErrorData.h",
+    "ErrorInjector.cpp",
+    "ErrorInjector.h",
+    "ErrorScope.cpp",
+    "ErrorScope.h",
+    "ExternalTexture.cpp",
+    "ExternalTexture.h",
+    "Features.cpp",
+    "Features.h",
+    "Format.cpp",
+    "Format.h",
+    "Forward.h",
+    "IndirectDrawMetadata.cpp",
+    "IndirectDrawMetadata.h",
+    "IndirectDrawValidationEncoder.cpp",
+    "IndirectDrawValidationEncoder.h",
+    "Instance.cpp",
+    "Instance.h",
+    "IntegerTypes.h",
+    "InternalPipelineStore.cpp",
+    "InternalPipelineStore.h",
+    "Limits.cpp",
+    "Limits.h",
+    "ObjectBase.cpp",
+    "ObjectBase.h",
+    "ObjectContentHasher.cpp",
+    "ObjectContentHasher.h",
+    "PassResourceUsage.h",
+    "PassResourceUsageTracker.cpp",
+    "PassResourceUsageTracker.h",
+    "PerStage.cpp",
+    "PerStage.h",
+    "PersistentCache.cpp",
+    "PersistentCache.h",
+    "Pipeline.cpp",
+    "Pipeline.h",
+    "PipelineLayout.cpp",
+    "PipelineLayout.h",
+    "PooledResourceMemoryAllocator.cpp",
+    "PooledResourceMemoryAllocator.h",
+    "ProgrammableEncoder.cpp",
+    "ProgrammableEncoder.h",
+    "QueryHelper.cpp",
+    "QueryHelper.h",
+    "QuerySet.cpp",
+    "QuerySet.h",
+    "Queue.cpp",
+    "Queue.h",
+    "RenderBundle.cpp",
+    "RenderBundle.h",
+    "RenderBundleEncoder.cpp",
+    "RenderBundleEncoder.h",
+    "RenderEncoderBase.cpp",
+    "RenderEncoderBase.h",
+    "RenderPassEncoder.cpp",
+    "RenderPassEncoder.h",
+    "RenderPipeline.cpp",
+    "RenderPipeline.h",
+    "ResourceHeap.h",
+    "ResourceHeapAllocator.h",
+    "ResourceMemoryAllocation.cpp",
+    "ResourceMemoryAllocation.h",
+    "RingBufferAllocator.cpp",
+    "RingBufferAllocator.h",
+    "Sampler.cpp",
+    "Sampler.h",
+    "ScratchBuffer.cpp",
+    "ScratchBuffer.h",
+    "ShaderModule.cpp",
+    "ShaderModule.h",
+    "StagingBuffer.cpp",
+    "StagingBuffer.h",
+    "Subresource.cpp",
+    "Subresource.h",
+    "SubresourceStorage.h",
+    "Surface.cpp",
+    "Surface.h",
+    "SwapChain.cpp",
+    "SwapChain.h",
+    "Texture.cpp",
+    "Texture.h",
+    "TintUtils.cpp",
+    "TintUtils.h",
+    "ToBackend.h",
+    "Toggles.cpp",
+    "Toggles.h",
+    "VertexFormat.cpp",
+    "VertexFormat.h",
+    "dawn_platform.h",
+    "utils/WGPUHelpers.cpp",
+    "utils/WGPUHelpers.h",
+    "webgpu_absl_format.cpp",
+    "webgpu_absl_format.h",
+  ]
+
+  if (dawn_use_x11) {
+    libs += [ "X11" ]
+    sources += [
+      "XlibXcbFunctions.cpp",
+      "XlibXcbFunctions.h",
+    ]
+  }
+
+  # Only win32 app needs to link with user32.lib
+  # In UWP, all availiable APIs are defined in WindowsApp.lib
+  if (is_win && !dawn_is_winuwp) {
+    libs += [ "user32.lib" ]
+  }
+
+  if (dawn_is_winuwp && is_debug) {
+    # DXGIGetDebugInterface1 is defined in dxgi.lib
+    # But this API is tagged as a development-only capability
+    # which implies that linking to this function will cause
+    # the application to fail Windows store certification
+    # So we only link to it in debug build when compiling for UWP.
+    # In win32 we load dxgi.dll using LoadLibrary
+    # so no need for static linking.
+    libs += [ "dxgi.lib" ]
+  }
+
+  # TODO(dawn:766):
+  # Should link dxcompiler.lib and WinPixEventRuntime_UAP.lib in UWP
+  # Somehow use dxcompiler.lib makes CoreApp unable to activate
+  # WinPIX should be added as third party tools and linked statically
+
+  if (dawn_enable_d3d12) {
+    libs += [ "dxguid.lib" ]
+    sources += [
+      "d3d12/AdapterD3D12.cpp",
+      "d3d12/AdapterD3D12.h",
+      "d3d12/BackendD3D12.cpp",
+      "d3d12/BackendD3D12.h",
+      "d3d12/BindGroupD3D12.cpp",
+      "d3d12/BindGroupD3D12.h",
+      "d3d12/BindGroupLayoutD3D12.cpp",
+      "d3d12/BindGroupLayoutD3D12.h",
+      "d3d12/BufferD3D12.cpp",
+      "d3d12/BufferD3D12.h",
+      "d3d12/CPUDescriptorHeapAllocationD3D12.cpp",
+      "d3d12/CPUDescriptorHeapAllocationD3D12.h",
+      "d3d12/CommandAllocatorManager.cpp",
+      "d3d12/CommandAllocatorManager.h",
+      "d3d12/CommandBufferD3D12.cpp",
+      "d3d12/CommandBufferD3D12.h",
+      "d3d12/CommandRecordingContext.cpp",
+      "d3d12/CommandRecordingContext.h",
+      "d3d12/ComputePipelineD3D12.cpp",
+      "d3d12/ComputePipelineD3D12.h",
+      "d3d12/D3D11on12Util.cpp",
+      "d3d12/D3D11on12Util.h",
+      "d3d12/D3D12Error.cpp",
+      "d3d12/D3D12Error.h",
+      "d3d12/D3D12Info.cpp",
+      "d3d12/D3D12Info.h",
+      "d3d12/DeviceD3D12.cpp",
+      "d3d12/DeviceD3D12.h",
+      "d3d12/Forward.h",
+      "d3d12/GPUDescriptorHeapAllocationD3D12.cpp",
+      "d3d12/GPUDescriptorHeapAllocationD3D12.h",
+      "d3d12/HeapAllocatorD3D12.cpp",
+      "d3d12/HeapAllocatorD3D12.h",
+      "d3d12/HeapD3D12.cpp",
+      "d3d12/HeapD3D12.h",
+      "d3d12/IntegerTypes.h",
+      "d3d12/NativeSwapChainImplD3D12.cpp",
+      "d3d12/NativeSwapChainImplD3D12.h",
+      "d3d12/PageableD3D12.cpp",
+      "d3d12/PageableD3D12.h",
+      "d3d12/PipelineLayoutD3D12.cpp",
+      "d3d12/PipelineLayoutD3D12.h",
+      "d3d12/PlatformFunctions.cpp",
+      "d3d12/PlatformFunctions.h",
+      "d3d12/QuerySetD3D12.cpp",
+      "d3d12/QuerySetD3D12.h",
+      "d3d12/QueueD3D12.cpp",
+      "d3d12/QueueD3D12.h",
+      "d3d12/RenderPassBuilderD3D12.cpp",
+      "d3d12/RenderPassBuilderD3D12.h",
+      "d3d12/RenderPipelineD3D12.cpp",
+      "d3d12/RenderPipelineD3D12.h",
+      "d3d12/ResidencyManagerD3D12.cpp",
+      "d3d12/ResidencyManagerD3D12.h",
+      "d3d12/ResourceAllocatorManagerD3D12.cpp",
+      "d3d12/ResourceAllocatorManagerD3D12.h",
+      "d3d12/ResourceHeapAllocationD3D12.cpp",
+      "d3d12/ResourceHeapAllocationD3D12.h",
+      "d3d12/SamplerD3D12.cpp",
+      "d3d12/SamplerD3D12.h",
+      "d3d12/SamplerHeapCacheD3D12.cpp",
+      "d3d12/SamplerHeapCacheD3D12.h",
+      "d3d12/ShaderModuleD3D12.cpp",
+      "d3d12/ShaderModuleD3D12.h",
+      "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
+      "d3d12/ShaderVisibleDescriptorAllocatorD3D12.h",
+      "d3d12/StagingBufferD3D12.cpp",
+      "d3d12/StagingBufferD3D12.h",
+      "d3d12/StagingDescriptorAllocatorD3D12.cpp",
+      "d3d12/StagingDescriptorAllocatorD3D12.h",
+      "d3d12/SwapChainD3D12.cpp",
+      "d3d12/SwapChainD3D12.h",
+      "d3d12/TextureCopySplitter.cpp",
+      "d3d12/TextureCopySplitter.h",
+      "d3d12/TextureD3D12.cpp",
+      "d3d12/TextureD3D12.h",
+      "d3d12/UtilsD3D12.cpp",
+      "d3d12/UtilsD3D12.h",
+      "d3d12/d3d12_platform.h",
+    ]
+  }
+
+  if (dawn_enable_metal) {
+    frameworks = [
+      "Cocoa.framework",
+      "IOKit.framework",
+      "IOSurface.framework",
+      "QuartzCore.framework",
+    ]
+    sources += [
+      "Surface_metal.mm",
+      "metal/BackendMTL.h",
+      "metal/BackendMTL.mm",
+      "metal/BindGroupLayoutMTL.h",
+      "metal/BindGroupLayoutMTL.mm",
+      "metal/BindGroupMTL.h",
+      "metal/BindGroupMTL.mm",
+      "metal/BufferMTL.h",
+      "metal/BufferMTL.mm",
+      "metal/CommandBufferMTL.h",
+      "metal/CommandBufferMTL.mm",
+      "metal/CommandRecordingContext.h",
+      "metal/CommandRecordingContext.mm",
+      "metal/ComputePipelineMTL.h",
+      "metal/ComputePipelineMTL.mm",
+      "metal/DeviceMTL.h",
+      "metal/DeviceMTL.mm",
+      "metal/Forward.h",
+      "metal/PipelineLayoutMTL.h",
+      "metal/PipelineLayoutMTL.mm",
+      "metal/QuerySetMTL.h",
+      "metal/QuerySetMTL.mm",
+      "metal/QueueMTL.h",
+      "metal/QueueMTL.mm",
+      "metal/RenderPipelineMTL.h",
+      "metal/RenderPipelineMTL.mm",
+      "metal/SamplerMTL.h",
+      "metal/SamplerMTL.mm",
+      "metal/ShaderModuleMTL.h",
+      "metal/ShaderModuleMTL.mm",
+      "metal/StagingBufferMTL.h",
+      "metal/StagingBufferMTL.mm",
+      "metal/SwapChainMTL.h",
+      "metal/SwapChainMTL.mm",
+      "metal/TextureMTL.h",
+      "metal/TextureMTL.mm",
+      "metal/UtilsMetal.h",
+      "metal/UtilsMetal.mm",
+    ]
+  }
+
+  if (dawn_enable_null) {
+    sources += [
+      "null/DeviceNull.cpp",
+      "null/DeviceNull.h",
+    ]
+  }
+
+  if (dawn_enable_opengl || dawn_enable_vulkan) {
+    sources += [
+      "SpirvValidation.cpp",
+      "SpirvValidation.h",
+    ]
+  }
+
+  if (dawn_enable_opengl) {
+    public_deps += [
+      ":opengl_loader_gen",
+      "${dawn_root}/third_party/khronos:khronos_platform",
+    ]
+    sources += get_target_outputs(":opengl_loader_gen")
+    sources += [
+      "opengl/BackendGL.cpp",
+      "opengl/BackendGL.h",
+      "opengl/BindGroupGL.cpp",
+      "opengl/BindGroupGL.h",
+      "opengl/BindGroupLayoutGL.cpp",
+      "opengl/BindGroupLayoutGL.h",
+      "opengl/BufferGL.cpp",
+      "opengl/BufferGL.h",
+      "opengl/CommandBufferGL.cpp",
+      "opengl/CommandBufferGL.h",
+      "opengl/ComputePipelineGL.cpp",
+      "opengl/ComputePipelineGL.h",
+      "opengl/DeviceGL.cpp",
+      "opengl/DeviceGL.h",
+      "opengl/Forward.h",
+      "opengl/GLFormat.cpp",
+      "opengl/GLFormat.h",
+      "opengl/NativeSwapChainImplGL.cpp",
+      "opengl/NativeSwapChainImplGL.h",
+      "opengl/OpenGLFunctions.cpp",
+      "opengl/OpenGLFunctions.h",
+      "opengl/OpenGLVersion.cpp",
+      "opengl/OpenGLVersion.h",
+      "opengl/PersistentPipelineStateGL.cpp",
+      "opengl/PersistentPipelineStateGL.h",
+      "opengl/PipelineGL.cpp",
+      "opengl/PipelineGL.h",
+      "opengl/PipelineLayoutGL.cpp",
+      "opengl/PipelineLayoutGL.h",
+      "opengl/QuerySetGL.cpp",
+      "opengl/QuerySetGL.h",
+      "opengl/QueueGL.cpp",
+      "opengl/QueueGL.h",
+      "opengl/RenderPipelineGL.cpp",
+      "opengl/RenderPipelineGL.h",
+      "opengl/SamplerGL.cpp",
+      "opengl/SamplerGL.h",
+      "opengl/ShaderModuleGL.cpp",
+      "opengl/ShaderModuleGL.h",
+      "opengl/SwapChainGL.cpp",
+      "opengl/SwapChainGL.h",
+      "opengl/TextureGL.cpp",
+      "opengl/TextureGL.h",
+      "opengl/UtilsGL.cpp",
+      "opengl/UtilsGL.h",
+      "opengl/opengl_platform.h",
+    ]
+  }
+
+  if (dawn_enable_vulkan) {
+    public_deps += [ "${dawn_vulkan_headers_dir}:vulkan_headers" ]
+    sources += [
+      "vulkan/AdapterVk.cpp",
+      "vulkan/AdapterVk.h",
+      "vulkan/BackendVk.cpp",
+      "vulkan/BackendVk.h",
+      "vulkan/BindGroupLayoutVk.cpp",
+      "vulkan/BindGroupLayoutVk.h",
+      "vulkan/BindGroupVk.cpp",
+      "vulkan/BindGroupVk.h",
+      "vulkan/BufferVk.cpp",
+      "vulkan/BufferVk.h",
+      "vulkan/CommandBufferVk.cpp",
+      "vulkan/CommandBufferVk.h",
+      "vulkan/CommandRecordingContext.h",
+      "vulkan/ComputePipelineVk.cpp",
+      "vulkan/ComputePipelineVk.h",
+      "vulkan/DescriptorSetAllocation.h",
+      "vulkan/DescriptorSetAllocator.cpp",
+      "vulkan/DescriptorSetAllocator.h",
+      "vulkan/DeviceVk.cpp",
+      "vulkan/DeviceVk.h",
+      "vulkan/ExternalHandle.h",
+      "vulkan/FencedDeleter.cpp",
+      "vulkan/FencedDeleter.h",
+      "vulkan/Forward.h",
+      "vulkan/NativeSwapChainImplVk.cpp",
+      "vulkan/NativeSwapChainImplVk.h",
+      "vulkan/PipelineLayoutVk.cpp",
+      "vulkan/PipelineLayoutVk.h",
+      "vulkan/QuerySetVk.cpp",
+      "vulkan/QuerySetVk.h",
+      "vulkan/QueueVk.cpp",
+      "vulkan/QueueVk.h",
+      "vulkan/RenderPassCache.cpp",
+      "vulkan/RenderPassCache.h",
+      "vulkan/RenderPipelineVk.cpp",
+      "vulkan/RenderPipelineVk.h",
+      "vulkan/ResourceHeapVk.cpp",
+      "vulkan/ResourceHeapVk.h",
+      "vulkan/ResourceMemoryAllocatorVk.cpp",
+      "vulkan/ResourceMemoryAllocatorVk.h",
+      "vulkan/SamplerVk.cpp",
+      "vulkan/SamplerVk.h",
+      "vulkan/ShaderModuleVk.cpp",
+      "vulkan/ShaderModuleVk.h",
+      "vulkan/StagingBufferVk.cpp",
+      "vulkan/StagingBufferVk.h",
+      "vulkan/SwapChainVk.cpp",
+      "vulkan/SwapChainVk.h",
+      "vulkan/TextureVk.cpp",
+      "vulkan/TextureVk.h",
+      "vulkan/UtilsVulkan.cpp",
+      "vulkan/UtilsVulkan.h",
+      "vulkan/VulkanError.cpp",
+      "vulkan/VulkanError.h",
+      "vulkan/VulkanExtensions.cpp",
+      "vulkan/VulkanExtensions.h",
+      "vulkan/VulkanFunctions.cpp",
+      "vulkan/VulkanFunctions.h",
+      "vulkan/VulkanInfo.cpp",
+      "vulkan/VulkanInfo.h",
+      "vulkan/external_memory/MemoryService.h",
+      "vulkan/external_semaphore/SemaphoreService.h",
+    ]
+
+    if (is_chromeos) {
+      sources += [
+        "vulkan/external_memory/MemoryServiceDmaBuf.cpp",
+        "vulkan/external_semaphore/SemaphoreServiceFD.cpp",
+      ]
+      defines += [ "DAWN_USE_SYNC_FDS" ]
+    } else if (is_linux) {
+      sources += [
+        "vulkan/external_memory/MemoryServiceOpaqueFD.cpp",
+        "vulkan/external_semaphore/SemaphoreServiceFD.cpp",
+      ]
+    } else if (is_fuchsia) {
+      sources += [
+        "vulkan/external_memory/MemoryServiceZirconHandle.cpp",
+        "vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp",
+      ]
+    } else {
+      sources += [
+        "vulkan/external_memory/MemoryServiceNull.cpp",
+        "vulkan/external_semaphore/SemaphoreServiceNull.cpp",
+      ]
+    }
+    if (build_with_chromium && is_fuchsia) {
+      # Necessary to ensure that the Vulkan libraries will be in the
+      # final Fuchsia package.
+      data_deps = [
+        "//third_party/fuchsia-sdk:vulkan_base",
+        "//third_party/fuchsia-sdk:vulkan_validation",
+
+        # NOTE: The line below is a work around for http://crbug.com/1001081
+        "//third_party/fuchsia-sdk/sdk:trace_engine",
+      ]
+    }
+    if (dawn_is_winuwp) {
+      defines += [ "DAWN_IS_WINUWP" ]
+    }
+    if (enable_vulkan_validation_layers) {
+      defines += [
+        "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS",
+        "DAWN_VK_DATA_DIR=\"$vulkan_data_subdir\"",
+      ]
+    }
+    if (enable_vulkan_loader) {
+      data_deps += [ "${dawn_vulkan_loader_dir}:libvulkan" ]
+    }
+    if (use_swiftshader) {
+      data_deps +=
+          [ "${dawn_swiftshader_dir}/src/Vulkan:swiftshader_libvulkan" ]
+      defines += [ "DAWN_ENABLE_SWIFTSHADER" ]
+    }
+  }
+
+  if (use_angle) {
+    data_deps += [
+      "${dawn_angle_dir}:libEGL",
+      "${dawn_angle_dir}:libGLESv2",
+    ]
+  }
+}
+
+# The static and shared libraries for dawn_native. Most of the files are
+# already compiled in dawn_native_sources, but we still need to compile
+# files defining exported symbols.
+dawn_component("native") {
+  DEFINE_PREFIX = "DAWN_NATIVE"
+
+  #Make headers publically visible
+  public_deps = [ ":headers" ]
+
+  deps = [
+    ":sources",
+    "${dawn_root}/src/dawn/common",
+  ]
+  sources = [ "DawnNative.cpp" ]
+  configs = [ ":internal" ]
+  public_configs = [
+    ":weak_framework",
+    ":vulkan_rpath",
+  ]
+
+  if (dawn_enable_d3d12) {
+    sources += [ "d3d12/D3D12Backend.cpp" ]
+  }
+  if (dawn_enable_metal) {
+    sources += [ "metal/MetalBackend.mm" ]
+  }
+  if (dawn_enable_null) {
+    sources += [ "null/NullBackend.cpp" ]
+  }
+  if (dawn_enable_opengl) {
+    sources += [ "opengl/OpenGLBackend.cpp" ]
+  }
+  if (dawn_enable_vulkan) {
+    sources += [ "vulkan/VulkanBackend.cpp" ]
+
+    if (enable_vulkan_validation_layers) {
+      data_deps =
+          [ "${dawn_vulkan_validation_layers_dir}:vulkan_validation_layers" ]
+      if (!is_android) {
+        data_deps +=
+            [ "${dawn_vulkan_validation_layers_dir}:vulkan_gen_json_files" ]
+      }
+    }
+  }
+}
+
+dawn_json_generator("webgpu_dawn_native_proc_gen") {
+  target = "webgpu_dawn_native_proc"
+  outputs = [ "src/dawn/native/webgpu_dawn_native_proc.cpp" ]
+}
+
+dawn_component("webgpu_dawn") {
+  # For a single library - build `webgpu_dawn_shared` with GN args:
+  #   dawn_complete_static_libs = true - to package a single lib
+  #
+  #   is_debug = false
+  #    - setting this to true makes library over 50Mb
+  #
+  #   use_custom_libcxx = false
+  #    - Otherwise, libc++ symbols may conflict if the
+  #      library is used outside of Chromium.
+  #
+  #   dawn_use_swiftshader = false
+  #   angle_enable_swiftshader = false
+  #    - SwiftShader can't be built without use_custom_libcxx.
+  #      It should be built separately.
+  DEFINE_PREFIX = "WGPU"
+
+  sources = get_target_outputs(":webgpu_dawn_native_proc_gen")
+  deps = [
+    ":static",
+    ":webgpu_dawn_native_proc_gen",
+  ]
+}
diff --git a/src/dawn/native/BackendConnection.cpp b/src/dawn/native/BackendConnection.cpp
new file mode 100644
index 0000000..abcc271
--- /dev/null
+++ b/src/dawn/native/BackendConnection.cpp
@@ -0,0 +1,36 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BackendConnection.h"
+
+namespace dawn::native {
+
+    BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
+        : mInstance(instance), mType(type) {
+    }
+
+    wgpu::BackendType BackendConnection::GetType() const {
+        return mType;
+    }
+
+    InstanceBase* BackendConnection::GetInstance() const {
+        return mInstance;
+    }
+
+    ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* options) {
+        return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/BackendConnection.h b/src/dawn/native/BackendConnection.h
new file mode 100644
index 0000000..2879fad
--- /dev/null
+++ b/src/dawn/native/BackendConnection.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BACKENDCONNECTION_H_
+#define DAWNNATIVE_BACKENDCONNECTION_H_
+
+#include "dawn/native/Adapter.h"
+#include "dawn/native/DawnNative.h"
+
+#include <memory>
+
+namespace dawn::native {
+
+    // An common interface for all backends. Mostly used to create adapters for a particular
+    // backend.
+    class BackendConnection {
+      public:
+        BackendConnection(InstanceBase* instance, wgpu::BackendType type);
+        virtual ~BackendConnection() = default;
+
+        wgpu::BackendType GetType() const;
+        InstanceBase* GetInstance() const;
+
+        // Returns all the adapters for the system that can be created by the backend, without extra
+        // options (such as debug adapters, custom driver libraries, etc.)
+        virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
+
+        // Returns new adapters created with the backend-specific options.
+        virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+            const AdapterDiscoveryOptionsBase* options);
+
+      private:
+        InstanceBase* mInstance = nullptr;
+        wgpu::BackendType mType;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BACKENDCONNECTION_H_
diff --git a/src/dawn/native/BindGroup.cpp b/src/dawn/native/BindGroup.cpp
new file mode 100644
index 0000000..503e613
--- /dev/null
+++ b/src/dawn/native/BindGroup.cpp
@@ -0,0 +1,545 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BindGroup.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+    namespace {
+
+        // Helper functions to perform binding-type specific validation
+
+        MaybeError ValidateBufferBinding(const DeviceBase* device,
+                                         const BindGroupEntry& entry,
+                                         const BindingInfo& bindingInfo) {
+            DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
+
+            DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
+                            "Expected only buffer to be set for binding entry.");
+
+            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+            DAWN_TRY(device->ValidateObject(entry.buffer));
+
+            ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+
+            wgpu::BufferUsage requiredUsage;
+            uint64_t maxBindingSize;
+            uint64_t requiredBindingAlignment;
+            switch (bindingInfo.buffer.type) {
+                case wgpu::BufferBindingType::Uniform:
+                    requiredUsage = wgpu::BufferUsage::Uniform;
+                    maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
+                    requiredBindingAlignment =
+                        device->GetLimits().v1.minUniformBufferOffsetAlignment;
+                    break;
+                case wgpu::BufferBindingType::Storage:
+                case wgpu::BufferBindingType::ReadOnlyStorage:
+                    requiredUsage = wgpu::BufferUsage::Storage;
+                    maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+                    requiredBindingAlignment =
+                        device->GetLimits().v1.minStorageBufferOffsetAlignment;
+                    break;
+                case kInternalStorageBufferBinding:
+                    requiredUsage = kInternalStorageBuffer;
+                    maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+                    requiredBindingAlignment =
+                        device->GetLimits().v1.minStorageBufferOffsetAlignment;
+                    break;
+                case wgpu::BufferBindingType::Undefined:
+                    UNREACHABLE();
+            }
+
+            uint64_t bufferSize = entry.buffer->GetSize();
+
+            // Handle wgpu::WholeSize, avoiding overflows.
+            DAWN_INVALID_IF(entry.offset > bufferSize,
+                            "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
+                            bufferSize, entry.buffer);
+
+            uint64_t bindingSize =
+                (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
+
+            DAWN_INVALID_IF(bindingSize > bufferSize,
+                            "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
+                            bufferSize, entry.buffer);
+
+            DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
+
+            // Note that no overflow can happen because we already checked that
+            // bufferSize >= bindingSize
+            DAWN_INVALID_IF(
+                entry.offset > bufferSize - bindingSize,
+                "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
+                entry.offset, bufferSize, bindingSize, entry.buffer);
+
+            DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
+                            "Offset (%u) does not satisfy the minimum %s alignment (%u).",
+                            entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
+
+            DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
+                            "Binding usage (%s) of %s doesn't match expected usage (%s).",
+                            entry.buffer->GetUsage(), entry.buffer, requiredUsage);
+
+            DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
+                            "Binding size (%u) is smaller than the minimum binding size (%u).",
+                            bindingSize, bindingInfo.buffer.minBindingSize);
+
+            DAWN_INVALID_IF(bindingSize > maxBindingSize,
+                            "Binding size (%u) is larger than the maximum binding size (%u).",
+                            bindingSize, maxBindingSize);
+
+            return {};
+        }
+
+        MaybeError ValidateTextureBinding(DeviceBase* device,
+                                          const BindGroupEntry& entry,
+                                          const BindingInfo& bindingInfo) {
+            DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
+
+            DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
+                            "Expected only textureView to be set for binding entry.");
+
+            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+            DAWN_TRY(device->ValidateObject(entry.textureView));
+
+            TextureViewBase* view = entry.textureView;
+
+            Aspect aspect = view->GetAspects();
+            DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect,
+                            view);
+
+            TextureBase* texture = view->GetTexture();
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Texture: {
+                    SampleTypeBit supportedTypes =
+                        texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
+                    SampleTypeBit requiredType =
+                        SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
+
+                    DAWN_INVALID_IF(
+                        !(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
+                        "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
+                        texture->GetUsage(), texture);
+
+                    DAWN_INVALID_IF(
+                        texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
+                        "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
+                        texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
+
+                    DAWN_INVALID_IF(
+                        (supportedTypes & requiredType) == 0,
+                        "None of the supported sample types (%s) of %s match the expected sample "
+                        "types (%s).",
+                        supportedTypes, texture, requiredType);
+
+                    DAWN_INVALID_IF(
+                        entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
+                        "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+                        entry.textureView->GetDimension(), entry.textureView,
+                        bindingInfo.texture.viewDimension);
+                    break;
+                }
+                case BindingInfoType::StorageTexture: {
+                    DAWN_INVALID_IF(
+                        !(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
+                        "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
+                        texture->GetUsage(), texture);
+
+                    ASSERT(!texture->IsMultisampledTexture());
+
+                    DAWN_INVALID_IF(
+                        texture->GetFormat().format != bindingInfo.storageTexture.format,
+                        "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
+                        texture, bindingInfo.storageTexture.format);
+
+                    DAWN_INVALID_IF(
+                        entry.textureView->GetDimension() !=
+                            bindingInfo.storageTexture.viewDimension,
+                        "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+                        entry.textureView->GetDimension(), entry.textureView,
+                        bindingInfo.storageTexture.viewDimension);
+
+                    DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
+                                    "mipLevelCount (%u) of %s expected to be 1.",
+                                    entry.textureView->GetLevelCount(), entry.textureView);
+                    break;
+                }
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateSamplerBinding(const DeviceBase* device,
+                                          const BindGroupEntry& entry,
+                                          const BindingInfo& bindingInfo) {
+            DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
+
+            DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
+                            "Expected only sampler to be set for binding entry.");
+
+            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+            DAWN_TRY(device->ValidateObject(entry.sampler));
+
+            ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
+
+            switch (bindingInfo.sampler.type) {
+                case wgpu::SamplerBindingType::NonFiltering:
+                    DAWN_INVALID_IF(
+                        entry.sampler->IsFiltering(),
+                        "Filtering sampler %s is incompatible with non-filtering sampler "
+                        "binding.",
+                        entry.sampler);
+                    [[fallthrough]];
+                case wgpu::SamplerBindingType::Filtering:
+                    DAWN_INVALID_IF(
+                        entry.sampler->IsComparison(),
+                        "Comparison sampler %s is incompatible with non-comparison sampler "
+                        "binding.",
+                        entry.sampler);
+                    break;
+                case wgpu::SamplerBindingType::Comparison:
+                    DAWN_INVALID_IF(
+                        !entry.sampler->IsComparison(),
+                        "Non-comparison sampler %s is imcompatible with comparison sampler "
+                        "binding.",
+                        entry.sampler);
+                    break;
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateExternalTextureBinding(
+            const DeviceBase* device,
+            const BindGroupEntry& entry,
+            const ExternalTextureBindingEntry* externalTextureBindingEntry,
+            const ExternalTextureBindingExpansionMap& expansions) {
+            DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
+                            "Binding entry external texture not set.");
+
+            DAWN_INVALID_IF(
+                entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
+                "Expected only external texture to be set for binding entry.");
+
+            DAWN_INVALID_IF(
+                expansions.find(BindingNumber(entry.binding)) == expansions.end(),
+                "External texture binding entry %u is not present in the bind group layout.",
+                entry.binding);
+
+            DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
+                                         wgpu::SType::ExternalTextureBindingEntry));
+
+            DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+
+            return {};
+        }
+
+    }  // anonymous namespace
+
+    MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
+                                           const BindGroupDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+        DAWN_TRY(device->ValidateObject(descriptor->layout));
+
+        DAWN_INVALID_IF(
+            descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
+            "Number of entries (%u) did not match the number of entries (%u) specified in %s."
+            "\nExpected layout: %s",
+            descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
+            descriptor->layout, descriptor->layout->EntriesToString());
+
+        const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+        ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
+
+        ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
+        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+            const BindGroupEntry& entry = descriptor->entries[i];
+
+            const auto& it = bindingMap.find(BindingNumber(entry.binding));
+            DAWN_INVALID_IF(it == bindingMap.end(),
+                            "In entries[%u], binding index %u not present in the bind group layout."
+                            "\nExpected layout: %s",
+                            i, entry.binding, descriptor->layout->EntriesToString());
+
+            BindingIndex bindingIndex = it->second;
+            ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+            DAWN_INVALID_IF(bindingsSet[bindingIndex],
+                            "In entries[%u], binding index %u already used by a previous entry", i,
+                            entry.binding);
+
+            bindingsSet.set(bindingIndex);
+
+            // Below this block we validate entries based on the bind group layout, in which
+            // external textures have been expanded into their underlying contents. For this reason
+            // we must identify external texture binding entries by checking the bind group entry
+            // itself.
+            // TODO:(dawn:1293): Store external textures in
+            // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
+            // be moved in the switch below.
+            const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+            FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+            if (externalTextureBindingEntry != nullptr) {
+                DAWN_TRY(ValidateExternalTextureBinding(
+                    device, entry, externalTextureBindingEntry,
+                    descriptor->layout->GetExternalTextureBindingExpansionMap()));
+                continue;
+            }
+
+            const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+
+            // Perform binding-type specific validation.
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer:
+                    DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
+                                     "validating entries[%u] as a Buffer."
+                                     "\nExpected entry layout: %s",
+                                     i, bindingInfo);
+                    break;
+                case BindingInfoType::Texture:
+                case BindingInfoType::StorageTexture:
+                    DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
+                                     "validating entries[%u] as a Texture."
+                                     "\nExpected entry layout: %s",
+                                     i, bindingInfo);
+                    break;
+                case BindingInfoType::Sampler:
+                    DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
+                                     "validating entries[%u] as a Sampler."
+                                     "\nExpected entry layout: %s",
+                                     i, bindingInfo);
+                    break;
+                case BindingInfoType::ExternalTexture:
+                    UNREACHABLE();
+                    break;
+            }
+        }
+
+        // This should always be true because
+        //  - numBindings has to match between the bind group and its layout.
+        //  - Each binding must be set at most once
+        //
+        // We don't validate the equality because it wouldn't be possible to cover it with a test.
+        ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
+
+        return {};
+    }  // anonymous namespace
+
+    // BindGroup
+
+    BindGroupBase::BindGroupBase(DeviceBase* device,
+                                 const BindGroupDescriptor* descriptor,
+                                 void* bindingDataStart)
+        : ApiObjectBase(device, descriptor->label),
+          mLayout(descriptor->layout),
+          mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+        for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+            // TODO(enga): Shouldn't be needed when bindings are tightly packed.
+            // This is to fill Ref<ObjectBase> holes with nullptrs.
+            new (&mBindingData.bindings[i]) Ref<ObjectBase>();
+        }
+
+        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+            const BindGroupEntry& entry = descriptor->entries[i];
+
+            BindingIndex bindingIndex =
+                descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
+            ASSERT(bindingIndex < mLayout->GetBindingCount());
+
+            // Only a single binding type should be set, so once we found it we can skip to the
+            // next loop iteration.
+
+            if (entry.buffer != nullptr) {
+                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+                mBindingData.bindings[bindingIndex] = entry.buffer;
+                mBindingData.bufferData[bindingIndex].offset = entry.offset;
+                uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
+                                          ? entry.buffer->GetSize() - entry.offset
+                                          : entry.size;
+                mBindingData.bufferData[bindingIndex].size = bufferSize;
+                continue;
+            }
+
+            if (entry.textureView != nullptr) {
+                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+                mBindingData.bindings[bindingIndex] = entry.textureView;
+                continue;
+            }
+
+            if (entry.sampler != nullptr) {
+                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+                mBindingData.bindings[bindingIndex] = entry.sampler;
+                continue;
+            }
+
+            // Here we unpack external texture bindings into multiple additional bindings for the
+            // external texture's contents. New binding locations previously determined in the bind
+            // group layout are created in this bind group and filled with the external texture's
+            // underlying resources.
+            const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+            FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+            if (externalTextureBindingEntry != nullptr) {
+                mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
+
+                ExternalTextureBindingExpansionMap expansions =
+                    mLayout->GetExternalTextureBindingExpansionMap();
+                ExternalTextureBindingExpansionMap::iterator it =
+                    expansions.find(BindingNumber(entry.binding));
+
+                ASSERT(it != expansions.end());
+
+                BindingIndex plane0BindingIndex =
+                    descriptor->layout->GetBindingIndex(it->second.plane0);
+                BindingIndex plane1BindingIndex =
+                    descriptor->layout->GetBindingIndex(it->second.plane1);
+                BindingIndex paramsBindingIndex =
+                    descriptor->layout->GetBindingIndex(it->second.params);
+
+                ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
+
+                mBindingData.bindings[plane0BindingIndex] =
+                    externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
+
+                ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
+                mBindingData.bindings[plane1BindingIndex] =
+                    externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
+
+                ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
+                mBindingData.bindings[paramsBindingIndex] =
+                    externalTextureBindingEntry->externalTexture->GetParamsBuffer();
+                mBindingData.bufferData[paramsBindingIndex].offset = 0;
+                mBindingData.bufferData[paramsBindingIndex].size =
+                    sizeof(dawn_native::ExternalTextureParams);
+
+                continue;
+            }
+        }
+
+        uint32_t packedIdx = 0;
+        for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
+             ++bindingIndex) {
+            if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
+                mBindingData.unverifiedBufferSizes[packedIdx] =
+                    mBindingData.bufferData[bindingIndex].size;
+                ++packedIdx;
+            }
+        }
+
+        TrackInDevice();
+    }
+
+    BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    BindGroupBase::~BindGroupBase() = default;
+
+    void BindGroupBase::DestroyImpl() {
+        if (mLayout != nullptr) {
+            ASSERT(!IsError());
+            for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+                mBindingData.bindings[i].~Ref<ObjectBase>();
+            }
+        }
+    }
+
+    void BindGroupBase::DeleteThis() {
+        // Add another ref to the layout so that if this is the last ref, the layout
+        // is destroyed after the bind group. The bind group is slab-allocated inside
+        // memory owned by the layout (except for the null backend).
+        Ref<BindGroupLayoutBase> layout = mLayout;
+        ApiObjectBase::DeleteThis();
+    }
+
+    BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag), mBindingData() {
+    }
+
+    // static
+    BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
+        return new BindGroupBase(device, ObjectBase::kError);
+    }
+
+    ObjectType BindGroupBase::GetType() const {
+        return ObjectType::BindGroup;
+    }
+
+    BindGroupLayoutBase* BindGroupBase::GetLayout() {
+        ASSERT(!IsError());
+        return mLayout.Get();
+    }
+
+    const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
+        ASSERT(!IsError());
+        return mLayout.Get();
+    }
+
+    const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
+        ASSERT(!IsError());
+        return mBindingData.unverifiedBufferSizes;
+    }
+
+    BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
+        ASSERT(!IsError());
+        ASSERT(bindingIndex < mLayout->GetBindingCount());
+        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
+        BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
+        return {buffer, mBindingData.bufferData[bindingIndex].offset,
+                mBindingData.bufferData[bindingIndex].size};
+    }
+
+    SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
+        ASSERT(!IsError());
+        ASSERT(bindingIndex < mLayout->GetBindingCount());
+        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
+        return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
+    }
+
+    TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
+        ASSERT(!IsError());
+        ASSERT(bindingIndex < mLayout->GetBindingCount());
+        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
+               mLayout->GetBindingInfo(bindingIndex).bindingType ==
+                   BindingInfoType::StorageTexture);
+        return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
+    }
+
+    const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
+        return mBoundExternalTextures;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/BindGroup.h b/src/dawn/native/BindGroup.h
new file mode 100644
index 0000000..7ba883a
--- /dev/null
+++ b/src/dawn/native/BindGroup.h
@@ -0,0 +1,96 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUP_H_
+#define DAWNNATIVE_BINDGROUP_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
+                                           const BindGroupDescriptor* descriptor);
+
+    struct BufferBinding {
+        BufferBase* buffer;
+        uint64_t offset;
+        uint64_t size;
+    };
+
+    class BindGroupBase : public ApiObjectBase {
+      public:
+        static BindGroupBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        BindGroupLayoutBase* GetLayout();
+        const BindGroupLayoutBase* GetLayout() const;
+        BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
+        SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
+        TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
+        const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
+        const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
+
+      protected:
+        // To save memory, the size of a bind group is dynamically determined and the bind group is
+        // placement-allocated into memory big enough to hold the bind group with its
+        // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
+        // binding data should be passed as |bindingDataStart|.
+        BindGroupBase(DeviceBase* device,
+                      const BindGroupDescriptor* descriptor,
+                      void* bindingDataStart);
+
+        // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
+        // be first in the allocation. The binding data is stored after the Derived class.
+        template <typename Derived>
+        BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
+            : BindGroupBase(device,
+                            descriptor,
+                            AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
+                                     descriptor->layout->GetBindingDataAlignment())) {
+            static_assert(std::is_base_of<BindGroupBase, Derived>::value);
+        }
+
+        // Constructor used only for mocking and testing.
+        BindGroupBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+        ~BindGroupBase() override;
+
+      private:
+        BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+        void DeleteThis() override;
+
+        Ref<BindGroupLayoutBase> mLayout;
+        BindGroupLayoutBase::BindingDataPointers mBindingData;
+
+        // TODO:(dawn:1293): Store external textures in
+        // BindGroupLayoutBase::BindingDataPointers::bindings
+        std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BINDGROUP_H_
diff --git a/src/dawn/native/BindGroupLayout.cpp b/src/dawn/native/BindGroupLayout.cpp
new file mode 100644
index 0000000..201aecc
--- /dev/null
+++ b/src/dawn/native/BindGroupLayout.cpp
@@ -0,0 +1,676 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BindGroupLayout.h"
+
+#include "dawn/common/BitSetIterator.h"
+
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <algorithm>
+#include <functional>
+#include <set>
+
+namespace dawn::native {
+
+    namespace {
+        MaybeError ValidateStorageTextureFormat(DeviceBase* device,
+                                                wgpu::TextureFormat storageTextureFormat) {
+            const Format* format = nullptr;
+            DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
+
+            ASSERT(format != nullptr);
+            DAWN_INVALID_IF(!format->supportsStorageUsage,
+                            "Texture format (%s) does not support storage textures.",
+                            storageTextureFormat);
+
+            return {};
+        }
+
+        MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
+            switch (dimension) {
+                case wgpu::TextureViewDimension::Cube:
+                case wgpu::TextureViewDimension::CubeArray:
+                    return DAWN_FORMAT_VALIDATION_ERROR(
+                        "%s texture views cannot be used as storage textures.", dimension);
+
+                case wgpu::TextureViewDimension::e1D:
+                case wgpu::TextureViewDimension::e2D:
+                case wgpu::TextureViewDimension::e2DArray:
+                case wgpu::TextureViewDimension::e3D:
+                    return {};
+
+                case wgpu::TextureViewDimension::Undefined:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
+                                                const BindGroupLayoutEntry& entry,
+                                                bool allowInternalBinding) {
+            DAWN_TRY(ValidateShaderStage(entry.visibility));
+
+            int bindingMemberCount = 0;
+            BindingInfoType bindingType;
+            wgpu::ShaderStage allowedStages = kAllStages;
+
+            if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+                bindingMemberCount++;
+                bindingType = BindingInfoType::Buffer;
+                const BufferBindingLayout& buffer = entry.buffer;
+
+                // The kInternalStorageBufferBinding is used internally and not a value
+                // in wgpu::BufferBindingType.
+                if (buffer.type == kInternalStorageBufferBinding) {
+                    DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
+                } else {
+                    DAWN_TRY(ValidateBufferBindingType(buffer.type));
+                }
+
+                if (buffer.type == wgpu::BufferBindingType::Storage ||
+                    buffer.type == kInternalStorageBufferBinding) {
+                    allowedStages &= ~wgpu::ShaderStage::Vertex;
+                }
+            }
+
+            if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+                bindingMemberCount++;
+                bindingType = BindingInfoType::Sampler;
+                DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
+            }
+
+            if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+                bindingMemberCount++;
+                bindingType = BindingInfoType::Texture;
+                const TextureBindingLayout& texture = entry.texture;
+                DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
+
+                // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+                wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
+                if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+                    DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
+                    viewDimension = texture.viewDimension;
+                }
+
+                DAWN_INVALID_IF(
+                    texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
+                    "View dimension (%s) for a multisampled texture bindings was not %s.",
+                    viewDimension, wgpu::TextureViewDimension::e2D);
+            }
+
+            if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+                bindingMemberCount++;
+                bindingType = BindingInfoType::StorageTexture;
+                const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
+                DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
+                DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
+
+                // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+                if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+                    DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
+                    DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
+                }
+
+                if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
+                    allowedStages &= ~wgpu::ShaderStage::Vertex;
+                }
+            }
+
+            const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+            FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+            if (externalTextureBindingLayout != nullptr) {
+                bindingMemberCount++;
+                bindingType = BindingInfoType::ExternalTexture;
+            }
+
+            DAWN_INVALID_IF(bindingMemberCount == 0,
+                            "BindGroupLayoutEntry had none of buffer, sampler, texture, "
+                            "storageTexture, or externalTexture set");
+
+            DAWN_INVALID_IF(bindingMemberCount != 1,
+                            "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
+                            "storageTexture, or externalTexture set");
+
+            DAWN_INVALID_IF(
+                !IsSubset(entry.visibility, allowedStages),
+                "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
+                bindingType, entry.visibility, allowedStages);
+
+            return {};
+        }
+
+        BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(
+            uint32_t binding,
+            wgpu::ShaderStage visibility) {
+            BindGroupLayoutEntry entry;
+            entry.binding = binding;
+            entry.visibility = visibility;
+            entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+            entry.texture.multisampled = false;
+            entry.texture.sampleType = wgpu::TextureSampleType::Float;
+            return entry;
+        }
+
+        BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
+                                                                    wgpu::ShaderStage visibility) {
+            BindGroupLayoutEntry entry;
+            entry.binding = binding;
+            entry.visibility = visibility;
+            entry.buffer.hasDynamicOffset = false;
+            entry.buffer.type = wgpu::BufferBindingType::Uniform;
+            return entry;
+        }
+
+        std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
+            const BindGroupLayoutDescriptor* descriptor,
+            BindingCounts* bindingCounts,
+            ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
+            std::vector<BindGroupLayoutEntry> expandedOutput;
+
+            // When new bgl entries are created, we use binding numbers larger than
+            // kMaxBindingNumber to ensure there are no collisions.
+            uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
+            for (uint32_t i = 0; i < descriptor->entryCount; i++) {
+                const BindGroupLayoutEntry& entry = descriptor->entries[i];
+                const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+                FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+                // External textures are expanded from a texture_external into two sampled texture
+                // bindings and one uniform buffer binding. The original binding number is used
+                // for the first sampled texture.
+                if (externalTextureBindingLayout != nullptr) {
+                    for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+                        // External textures are not fully implemented, which means that expanding
+                        // the external texture at this time will not occupy the same number of
+                        // binding slots as defined in the WebGPU specification. Here we prematurely
+                        // increment the binding counts for an additional sampled textures and a
+                        // sampler so that an external texture will occupy the correct number of
+                        // slots for correct validation of shader binding limits.
+                        // TODO:(dawn:1082): Consider removing this and instead making a change to
+                        // the validation.
+                        constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
+                        constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
+                        bindingCounts->perStage[stage].sampledTextureCount +=
+                            kUnimplementedSampledTexturesPerExternalTexture;
+                        bindingCounts->perStage[stage].samplerCount +=
+                            kUnimplementedSamplersPerExternalTexture;
+                    }
+
+                    dawn_native::ExternalTextureBindingExpansion bindingExpansion;
+
+                    BindGroupLayoutEntry plane0Entry =
+                        CreateSampledTextureBindingForExternalTexture(entry.binding,
+                                                                      entry.visibility);
+                    bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
+                    expandedOutput.push_back(plane0Entry);
+
+                    BindGroupLayoutEntry plane1Entry =
+                        CreateSampledTextureBindingForExternalTexture(
+                            nextOpenBindingNumberForNewEntry++, entry.visibility);
+                    bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
+                    expandedOutput.push_back(plane1Entry);
+
+                    BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
+                        nextOpenBindingNumberForNewEntry++, entry.visibility);
+                    bindingExpansion.params = BindingNumber(paramsEntry.binding);
+                    expandedOutput.push_back(paramsEntry);
+
+                    externalTextureBindingExpansions->insert(
+                        {BindingNumber(entry.binding), bindingExpansion});
+                } else {
+                    expandedOutput.push_back(entry);
+                }
+            }
+
+            return expandedOutput;
+        }
+    }  // anonymous namespace
+
+    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+                                                 const BindGroupLayoutDescriptor* descriptor,
+                                                 bool allowInternalBinding) {
+        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+        std::set<BindingNumber> bindingsSet;
+        BindingCounts bindingCounts = {};
+
+        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+            const BindGroupLayoutEntry& entry = descriptor->entries[i];
+            BindingNumber bindingNumber = BindingNumber(entry.binding);
+
+            DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
+                            "Binding number (%u) exceeds the maximum binding number (%u).",
+                            uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
+            DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
+                            "On entries[%u]: binding index (%u) was specified by a previous entry.",
+                            i, entry.binding);
+
+            DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
+                             "validating entries[%u]", i);
+
+            IncrementBindingCounts(&bindingCounts, entry);
+
+            bindingsSet.insert(bindingNumber);
+        }
+
+        DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
+
+        return {};
+    }
+
+    namespace {
+
+        bool operator!=(const BindingInfo& a, const BindingInfo& b) {
+            if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
+                return true;
+            }
+
+            switch (a.bindingType) {
+                case BindingInfoType::Buffer:
+                    return a.buffer.type != b.buffer.type ||
+                           a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
+                           a.buffer.minBindingSize != b.buffer.minBindingSize;
+                case BindingInfoType::Sampler:
+                    return a.sampler.type != b.sampler.type;
+                case BindingInfoType::Texture:
+                    return a.texture.sampleType != b.texture.sampleType ||
+                           a.texture.viewDimension != b.texture.viewDimension ||
+                           a.texture.multisampled != b.texture.multisampled;
+                case BindingInfoType::StorageTexture:
+                    return a.storageTexture.access != b.storageTexture.access ||
+                           a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
+                           a.storageTexture.format != b.storageTexture.format;
+                case BindingInfoType::ExternalTexture:
+                    return false;
+            }
+            UNREACHABLE();
+        }
+
+        bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
+            return binding.buffer.type != wgpu::BufferBindingType::Undefined;
+        }
+
+        bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
+            if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+                return binding.buffer.hasDynamicOffset;
+            }
+            return false;
+        }
+
+        BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
+            BindingInfo bindingInfo;
+            bindingInfo.binding = BindingNumber(binding.binding);
+            bindingInfo.visibility = binding.visibility;
+
+            if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+                bindingInfo.bindingType = BindingInfoType::Buffer;
+                bindingInfo.buffer = binding.buffer;
+            } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
+                bindingInfo.bindingType = BindingInfoType::Sampler;
+                bindingInfo.sampler = binding.sampler;
+            } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+                bindingInfo.bindingType = BindingInfoType::Texture;
+                bindingInfo.texture = binding.texture;
+
+                if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+                    bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+                }
+            } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+                bindingInfo.bindingType = BindingInfoType::StorageTexture;
+                bindingInfo.storageTexture = binding.storageTexture;
+
+                if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+                    bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
+                }
+            } else {
+                const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+                FindInChain(binding.nextInChain, &externalTextureBindingLayout);
+                if (externalTextureBindingLayout != nullptr) {
+                    bindingInfo.bindingType = BindingInfoType::ExternalTexture;
+                }
+            }
+
+            return bindingInfo;
+        }
+
+        bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
+            const bool aIsBuffer = IsBufferBinding(a);
+            const bool bIsBuffer = IsBufferBinding(b);
+            if (aIsBuffer != bIsBuffer) {
+                // Always place buffers first.
+                return aIsBuffer;
+            }
+
+            if (aIsBuffer) {
+                bool aHasDynamicOffset = BindingHasDynamicOffset(a);
+                bool bHasDynamicOffset = BindingHasDynamicOffset(b);
+                ASSERT(bIsBuffer);
+                if (aHasDynamicOffset != bHasDynamicOffset) {
+                    // Buffers with dynamic offsets should come before those without.
+                    // This makes it easy to iterate over the dynamic buffer bindings
+                    // [0, dynamicBufferCount) during validation.
+                    return aHasDynamicOffset;
+                }
+                if (aHasDynamicOffset) {
+                    ASSERT(bHasDynamicOffset);
+                    ASSERT(a.binding != b.binding);
+                    // Above, we ensured that dynamic buffers are first. Now, ensure that
+                    // dynamic buffer bindings are in increasing order. This is because dynamic
+                    // buffer offsets are applied in increasing order of binding number.
+                    return a.binding < b.binding;
+                }
+            }
+
+            // This applies some defaults and gives us a single value to check for the binding type.
+            BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
+            BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
+
+            // Sort by type.
+            if (aInfo.bindingType != bInfo.bindingType) {
+                return aInfo.bindingType < bInfo.bindingType;
+            }
+
+            if (a.visibility != b.visibility) {
+                return a.visibility < b.visibility;
+            }
+
+            switch (aInfo.bindingType) {
+                case BindingInfoType::Buffer:
+                    if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
+                        return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
+                    }
+                    break;
+                case BindingInfoType::Sampler:
+                    if (aInfo.sampler.type != bInfo.sampler.type) {
+                        return aInfo.sampler.type < bInfo.sampler.type;
+                    }
+                    break;
+                case BindingInfoType::Texture:
+                    if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
+                        return aInfo.texture.multisampled < bInfo.texture.multisampled;
+                    }
+                    if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
+                        return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
+                    }
+                    if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
+                        return aInfo.texture.sampleType < bInfo.texture.sampleType;
+                    }
+                    break;
+                case BindingInfoType::StorageTexture:
+                    if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
+                        return aInfo.storageTexture.access < bInfo.storageTexture.access;
+                    }
+                    if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
+                        return aInfo.storageTexture.viewDimension <
+                               bInfo.storageTexture.viewDimension;
+                    }
+                    if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
+                        return aInfo.storageTexture.format < bInfo.storageTexture.format;
+                    }
+                    break;
+                case BindingInfoType::ExternalTexture:
+                    break;
+            }
+            return a.binding < b.binding;
+        }
+
+        // This is a utility function to help ASSERT that the BGL-binding comparator places buffers
+        // first.
+        bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
+            BindingIndex lastBufferIndex{0};
+            BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
+            for (BindingIndex i{0}; i < bindings.size(); ++i) {
+                if (bindings[i].bindingType == BindingInfoType::Buffer) {
+                    lastBufferIndex = std::max(i, lastBufferIndex);
+                } else {
+                    firstNonBufferIndex = std::min(i, firstNonBufferIndex);
+                }
+            }
+
+            // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
+            // |firstNonBufferIndex| gets set to 0.
+            return firstNonBufferIndex >= lastBufferIndex;
+        }
+
+    }  // namespace
+
+    // BindGroupLayoutBase
+
+    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+                                             const BindGroupLayoutDescriptor* descriptor,
+                                             PipelineCompatibilityToken pipelineCompatibilityToken,
+                                             ApiObjectBase::UntrackedByDeviceTag tag)
+        : ApiObjectBase(device, descriptor->label),
+          mPipelineCompatibilityToken(pipelineCompatibilityToken),
+          mUnexpandedBindingCount(descriptor->entryCount) {
+        std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
+            descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
+
+        std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
+
+        for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
+            const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
+
+            mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
+
+            if (IsBufferBinding(binding)) {
+                // Buffers must be contiguously packed at the start of the binding info.
+                ASSERT(GetBufferCount() == BindingIndex(i));
+            }
+            IncrementBindingCounts(&mBindingCounts, binding);
+
+            const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
+            ASSERT(inserted);
+        }
+        ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
+        ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+    }
+
+    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+                                             const BindGroupLayoutDescriptor* descriptor,
+                                             PipelineCompatibilityToken pipelineCompatibilityToken)
+        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
+        TrackInDevice();
+    }
+
+    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
+        : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    BindGroupLayoutBase::~BindGroupLayoutBase() = default;
+
+    void BindGroupLayoutBase::DestroyImpl() {
+        if (IsCachedReference()) {
+            // Do not uncache the actual cached object if we are a blueprint.
+            GetDevice()->UncacheBindGroupLayout(this);
+        }
+    }
+
+    // static
+    BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
+        return new BindGroupLayoutBase(device, ObjectBase::kError);
+    }
+
+    ObjectType BindGroupLayoutBase::GetType() const {
+        return ObjectType::BindGroupLayout;
+    }
+
+    const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
+        ASSERT(!IsError());
+        return mBindingMap;
+    }
+
+    bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
+        return mBindingMap.count(bindingNumber) != 0;
+    }
+
+    BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
+        ASSERT(!IsError());
+        const auto& it = mBindingMap.find(bindingNumber);
+        ASSERT(it != mBindingMap.end());
+        return it->second;
+    }
+
+    size_t BindGroupLayoutBase::ComputeContentHash() {
+        ObjectContentHasher recorder;
+        recorder.Record(mPipelineCompatibilityToken);
+
+        // std::map is sorted by key, so two BGLs constructed in different orders
+        // will still record the same.
+        for (const auto [id, index] : mBindingMap) {
+            recorder.Record(id, index);
+
+            const BindingInfo& info = mBindingInfo[index];
+            recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
+                            info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
+                            info.texture.sampleType, info.texture.viewDimension,
+                            info.texture.multisampled, info.storageTexture.access,
+                            info.storageTexture.format, info.storageTexture.viewDimension);
+        }
+
+        return recorder.GetContentHash();
+    }
+
+    bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
+                                                       const BindGroupLayoutBase* b) const {
+        return a->IsLayoutEqual(b);
+    }
+
+    BindingIndex BindGroupLayoutBase::GetBindingCount() const {
+        return mBindingInfo.size();
+    }
+
+    BindingIndex BindGroupLayoutBase::GetBufferCount() const {
+        return BindingIndex(mBindingCounts.bufferCount);
+    }
+
+    BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
+        // This is a binding index because dynamic buffers are packed at the front of the binding
+        // info.
+        return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
+                                         mBindingCounts.dynamicUniformBufferCount);
+    }
+
+    uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
+        return mBindingCounts.unverifiedBufferCount;
+    }
+
+    uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
+        return mExternalTextureBindingExpansionMap.size();
+    }
+
+    const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
+        return mBindingCounts;
+    }
+
+    const ExternalTextureBindingExpansionMap&
+    BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
+        return mExternalTextureBindingExpansionMap;
+    }
+
+    uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
+        return mUnexpandedBindingCount;
+    }
+
+    bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
+                                            bool excludePipelineCompatibiltyToken) const {
+        if (!excludePipelineCompatibiltyToken &&
+            GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+            return false;
+        }
+        if (GetBindingCount() != other->GetBindingCount()) {
+            return false;
+        }
+        for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
+            if (mBindingInfo[i] != other->mBindingInfo[i]) {
+                return false;
+            }
+        }
+        return mBindingMap == other->mBindingMap;
+    }
+
+    PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
+        return mPipelineCompatibilityToken;
+    }
+
+    size_t BindGroupLayoutBase::GetBindingDataSize() const {
+        // | ------ buffer-specific ----------| ------------ object pointers -------------|
+        // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
+        // Followed by:
+        // |---------buffer size array--------|
+        // |-uint64_t[mUnverifiedBufferCount]-|
+        size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
+        ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
+        size_t bufferSizeArrayStart =
+            Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
+                  sizeof(uint64_t));
+        ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
+        return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
+    }
+
+    BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
+        void* dataStart) const {
+        BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
+        auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
+        uint64_t* unverifiedBufferSizes = AlignPtr(
+            reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
+
+        ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
+        ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
+        ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
+
+        return {{bufferData, GetBufferCount()},
+                {bindings, GetBindingCount()},
+                {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
+    }
+
+    bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
+        ASSERT(bindingIndex < GetBufferCount());
+        switch (GetBindingInfo(bindingIndex).buffer.type) {
+            case wgpu::BufferBindingType::Uniform:
+                return false;
+            case kInternalStorageBufferBinding:
+            case wgpu::BufferBindingType::Storage:
+            case wgpu::BufferBindingType::ReadOnlyStorage:
+                return true;
+            case wgpu::BufferBindingType::Undefined:
+                UNREACHABLE();
+        }
+    }
+
+    std::string BindGroupLayoutBase::EntriesToString() const {
+        std::string entries = "[";
+        std::string sep = "";
+        const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
+        for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+            const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+            entries += absl::StrFormat("%s%s", sep, bindingInfo);
+            sep = ", ";
+        }
+        entries += "]";
+        return entries;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/BindGroupLayout.h b/src/dawn/native/BindGroupLayout.h
new file mode 100644
index 0000000..5b91a2f
--- /dev/null
+++ b/src/dawn/native/BindGroupLayout.h
@@ -0,0 +1,170 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUPLAYOUT_H_
+#define DAWNNATIVE_BINDGROUPLAYOUT_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/common/ityp_vector.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <bitset>
+#include <map>
+
+namespace dawn::native {
+    // TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
+    struct ExternalTextureBindingExpansion {
+        BindingNumber plane0;
+        BindingNumber plane1;
+        BindingNumber params;
+    };
+
+    using ExternalTextureBindingExpansionMap =
+        std::map<BindingNumber, ExternalTextureBindingExpansion>;
+
+    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+                                                 const BindGroupLayoutDescriptor* descriptor,
+                                                 bool allowInternalBinding = false);
+
+    // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
+    // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
+    // into a packed range of |BindingIndex| integers.
+    class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
+      public:
+        BindGroupLayoutBase(DeviceBase* device,
+                            const BindGroupLayoutDescriptor* descriptor,
+                            PipelineCompatibilityToken pipelineCompatibilityToken,
+                            ApiObjectBase::UntrackedByDeviceTag tag);
+        BindGroupLayoutBase(DeviceBase* device,
+                            const BindGroupLayoutDescriptor* descriptor,
+                            PipelineCompatibilityToken pipelineCompatibilityToken);
+        ~BindGroupLayoutBase() override;
+
+        static BindGroupLayoutBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        // A map from the BindingNumber to its packed BindingIndex.
+        using BindingMap = std::map<BindingNumber, BindingIndex>;
+
+        const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
+            ASSERT(!IsError());
+            ASSERT(bindingIndex < mBindingInfo.size());
+            return mBindingInfo[bindingIndex];
+        }
+        const BindingMap& GetBindingMap() const;
+        bool HasBinding(BindingNumber bindingNumber) const;
+        BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
+
+        // Functions necessary for the unordered_set<BGLBase*>-based cache.
+        size_t ComputeContentHash() override;
+
+        struct EqualityFunc {
+            bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
+        };
+
+        BindingIndex GetBindingCount() const;
+        // Returns |BindingIndex| because buffers are packed at the front.
+        BindingIndex GetBufferCount() const;
+        // Returns |BindingIndex| because dynamic buffers are packed at the front.
+        BindingIndex GetDynamicBufferCount() const;
+        uint32_t GetUnverifiedBufferCount() const;
+
+        // Used to get counts and validate them in pipeline layout creation. Other getters
+        // should be used to get typed integer counts.
+        const BindingCounts& GetBindingCountInfo() const;
+
+        uint32_t GetExternalTextureBindingCount() const;
+
+        // Used to specify unpacked external texture binding slots when transforming shader modules.
+        const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
+
+        uint32_t GetUnexpandedBindingCount() const;
+
+        // Tests that the BindingInfo of two bind groups are equal,
+        // ignoring their compatibility groups.
+        bool IsLayoutEqual(const BindGroupLayoutBase* other,
+                           bool excludePipelineCompatibiltyToken = false) const;
+        PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
+
+        struct BufferBindingData {
+            uint64_t offset;
+            uint64_t size;
+        };
+
+        struct BindingDataPointers {
+            ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
+            ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
+            ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
+        };
+
+        // Compute the amount of space / alignment required to store bindings for a bind group of
+        // this layout.
+        size_t GetBindingDataSize() const;
+        static constexpr size_t GetBindingDataAlignment() {
+            static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
+            return alignof(BufferBindingData);
+        }
+
+        BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
+
+        bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
+
+        // Returns a detailed string representation of the layout entries for use in error messages.
+        std::string EntriesToString() const;
+
+      protected:
+        // Constructor used only for mocking and testing.
+        BindGroupLayoutBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+        template <typename BindGroup>
+        SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
+            return SlabAllocator<BindGroup>(
+                size,  // bytes
+                Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(),  // size
+                std::max(alignof(BindGroup), GetBindingDataAlignment())  // alignment
+            );
+        }
+
+      private:
+        BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        BindingCounts mBindingCounts = {};
+        ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
+
+        // Map from BindGroupLayoutEntry.binding to packed indices.
+        BindingMap mBindingMap;
+
+        ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
+
+        // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
+        const PipelineCompatibilityToken mPipelineCompatibilityToken =
+            PipelineCompatibilityToken(0);
+
+        uint32_t mUnexpandedBindingCount;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BINDGROUPLAYOUT_H_
diff --git a/src/dawn/native/BindGroupTracker.h b/src/dawn/native/BindGroupTracker.h
new file mode 100644
index 0000000..72d0cf4
--- /dev/null
+++ b/src/dawn/native/BindGroupTracker.h
@@ -0,0 +1,142 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUPTRACKER_H_
+#define DAWNNATIVE_BINDGROUPTRACKER_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/PipelineLayout.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+    // Keeps track of the dirty bind groups so they can be lazily applied when we know the
+    // pipeline state or it changes.
+    // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
+    // in other backends.
+    template <bool CanInheritBindGroups, typename DynamicOffset>
+    class BindGroupTrackerBase {
+      public:
+        void OnSetBindGroup(BindGroupIndex index,
+                            BindGroupBase* bindGroup,
+                            uint32_t dynamicOffsetCount,
+                            uint32_t* dynamicOffsets) {
+            ASSERT(index < kMaxBindGroupsTyped);
+
+            if (mBindGroupLayoutsMask[index]) {
+                // It is okay to only dirty bind groups that are used by the current pipeline
+                // layout. If the pipeline layout changes, then the bind groups it uses will
+                // become dirty.
+
+                if (mBindGroups[index] != bindGroup) {
+                    mDirtyBindGroups.set(index);
+                    mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
+                }
+
+                if (dynamicOffsetCount > 0) {
+                    mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
+                }
+            }
+
+            mBindGroups[index] = bindGroup;
+            mDynamicOffsetCounts[index] = dynamicOffsetCount;
+            SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
+        }
+
+        void OnSetPipeline(PipelineBase* pipeline) {
+            mPipelineLayout = pipeline->GetLayout();
+        }
+
+      protected:
+        // The Derived class should call this before it applies bind groups.
+        void BeforeApply() {
+            if (mLastAppliedPipelineLayout == mPipelineLayout) {
+                return;
+            }
+
+            // Use the bind group layout mask to avoid marking unused bind groups as dirty.
+            mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
+
+            // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
+            // the first |k| matching bind groups may be inherited.
+            if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
+                // Dirty bind groups that cannot be inherited.
+                BindGroupLayoutMask dirtiedGroups =
+                    ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
+
+                mDirtyBindGroups |= dirtiedGroups;
+                mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
+
+                // Clear any bind groups not in the mask.
+                mDirtyBindGroups &= mBindGroupLayoutsMask;
+                mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
+            } else {
+                mDirtyBindGroups = mBindGroupLayoutsMask;
+                mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
+            }
+        }
+
+        // The Derived class should call this after it applies bind groups.
+        void AfterApply() {
+            // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
+            // will be dirtied again by the next pipeline change.
+            mDirtyBindGroups.reset();
+            mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
+            // Keep track of the last applied pipeline layout. This allows us to avoid computing
+            // the intersection of the dirty bind groups and bind group layout mask in next Draw
+            // or Dispatch (which is very hot code) until the layout is changed again.
+            mLastAppliedPipelineLayout = mPipelineLayout;
+        }
+
+        BindGroupLayoutMask mDirtyBindGroups = 0;
+        BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
+        BindGroupLayoutMask mBindGroupLayoutsMask = 0;
+        ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
+        ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
+        ityp::array<BindGroupIndex,
+                    std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
+                    kMaxBindGroups>
+            mDynamicOffsets = {};
+
+        // |mPipelineLayout| is the current pipeline layout set on the command buffer.
+        // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
+        // to the bind group bindings.
+        PipelineLayoutBase* mPipelineLayout = nullptr;
+        PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
+
+      private:
+        // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
+        // in other backends.
+        static void SetDynamicOffsets(uint64_t* data,
+                                      uint32_t dynamicOffsetCount,
+                                      uint32_t* dynamicOffsets) {
+            for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+                data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
+            }
+        }
+
+        static void SetDynamicOffsets(uint32_t* data,
+                                      uint32_t dynamicOffsetCount,
+                                      uint32_t* dynamicOffsets) {
+            memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
+        }
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BINDGROUPTRACKER_H_
diff --git a/src/dawn/native/BindingInfo.cpp b/src/dawn/native/BindingInfo.cpp
new file mode 100644
index 0000000..009735c
--- /dev/null
+++ b/src/dawn/native/BindingInfo.cpp
@@ -0,0 +1,195 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BindingInfo.h"
+
+#include "dawn/native/ChainUtils_autogen.h"
+
+namespace dawn::native {
+
+    void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
+        bindingCounts->totalCount += 1;
+
+        uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
+
+        if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+            ++bindingCounts->bufferCount;
+            const BufferBindingLayout& buffer = entry.buffer;
+
+            if (buffer.minBindingSize == 0) {
+                ++bindingCounts->unverifiedBufferCount;
+            }
+
+            switch (buffer.type) {
+                case wgpu::BufferBindingType::Uniform:
+                    if (buffer.hasDynamicOffset) {
+                        ++bindingCounts->dynamicUniformBufferCount;
+                    }
+                    perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
+                    break;
+
+                case wgpu::BufferBindingType::Storage:
+                case kInternalStorageBufferBinding:
+                case wgpu::BufferBindingType::ReadOnlyStorage:
+                    if (buffer.hasDynamicOffset) {
+                        ++bindingCounts->dynamicStorageBufferCount;
+                    }
+                    perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
+                    break;
+
+                case wgpu::BufferBindingType::Undefined:
+                    // Can't get here due to the enclosing if statement.
+                    UNREACHABLE();
+                    break;
+            }
+        } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+            perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
+        } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+            perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
+        } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+            perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
+        } else {
+            const ExternalTextureBindingLayout* externalTextureBindingLayout;
+            FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+            if (externalTextureBindingLayout != nullptr) {
+                perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
+            }
+        }
+
+        ASSERT(perStageBindingCountMember != nullptr);
+        for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+            ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
+        }
+    }
+
+    void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
+        bindingCounts->totalCount += rhs.totalCount;
+        bindingCounts->bufferCount += rhs.bufferCount;
+        bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
+        bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
+        bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
+
+        for (SingleShaderStage stage : IterateStages(kAllStages)) {
+            bindingCounts->perStage[stage].sampledTextureCount +=
+                rhs.perStage[stage].sampledTextureCount;
+            bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
+            bindingCounts->perStage[stage].storageBufferCount +=
+                rhs.perStage[stage].storageBufferCount;
+            bindingCounts->perStage[stage].storageTextureCount +=
+                rhs.perStage[stage].storageTextureCount;
+            bindingCounts->perStage[stage].uniformBufferCount +=
+                rhs.perStage[stage].uniformBufferCount;
+            bindingCounts->perStage[stage].externalTextureCount +=
+                rhs.perStage[stage].externalTextureCount;
+        }
+    }
+
+    MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+        DAWN_INVALID_IF(
+            bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
+            "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
+            "limit (%u).",
+            bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+
+        DAWN_INVALID_IF(
+            bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
+            "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+            "limit (%u).",
+            bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
+
+        for (SingleShaderStage stage : IterateStages(kAllStages)) {
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].sampledTextureCount >
+                    kMaxSampledTexturesPerShaderStage,
+                "The number of sampled textures (%u) in the %s stage exceeds the maximum "
+                "per-stage limit (%u).",
+                bindingCounts.perStage[stage].sampledTextureCount, stage,
+                kMaxSampledTexturesPerShaderStage);
+
+            // The per-stage number of external textures is bound by the maximum sampled textures
+            // per stage.
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].externalTextureCount >
+                    kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
+                "The number of external textures (%u) in the %s stage exceeds the maximum "
+                "per-stage limit (%u).",
+                bindingCounts.perStage[stage].externalTextureCount, stage,
+                kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
+
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].sampledTextureCount +
+                        (bindingCounts.perStage[stage].externalTextureCount *
+                         kSampledTexturesPerExternalTexture) >
+                    kMaxSampledTexturesPerShaderStage,
+                "The combination of sampled textures (%u) and external textures (%u) in the %s "
+                "stage exceeds the maximum per-stage limit (%u).",
+                bindingCounts.perStage[stage].sampledTextureCount,
+                bindingCounts.perStage[stage].externalTextureCount, stage,
+                kMaxSampledTexturesPerShaderStage);
+
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
+                "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
+                "(%u).",
+                bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
+
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].samplerCount +
+                        (bindingCounts.perStage[stage].externalTextureCount *
+                         kSamplersPerExternalTexture) >
+                    kMaxSamplersPerShaderStage,
+                "The combination of samplers (%u) and external textures (%u) in the %s stage "
+                "exceeds the maximum per-stage limit (%u).",
+                bindingCounts.perStage[stage].samplerCount,
+                bindingCounts.perStage[stage].externalTextureCount, stage,
+                kMaxSamplersPerShaderStage);
+
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
+                "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
+                "limit (%u).",
+                bindingCounts.perStage[stage].storageBufferCount, stage,
+                kMaxStorageBuffersPerShaderStage);
+
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].storageTextureCount >
+                    kMaxStorageTexturesPerShaderStage,
+                "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
+                "limit (%u).",
+                bindingCounts.perStage[stage].storageTextureCount, stage,
+                kMaxStorageTexturesPerShaderStage);
+
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
+                "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
+                "limit (%u).",
+                bindingCounts.perStage[stage].uniformBufferCount, stage,
+                kMaxUniformBuffersPerShaderStage);
+
+            DAWN_INVALID_IF(
+                bindingCounts.perStage[stage].uniformBufferCount +
+                        (bindingCounts.perStage[stage].externalTextureCount *
+                         kUniformsPerExternalTexture) >
+                    kMaxUniformBuffersPerShaderStage,
+                "The combination of uniform buffers (%u) and external textures (%u) in the %s "
+                "stage exceeds the maximum per-stage limit (%u).",
+                bindingCounts.perStage[stage].uniformBufferCount,
+                bindingCounts.perStage[stage].externalTextureCount, stage,
+                kMaxUniformBuffersPerShaderStage);
+        }
+
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/BindingInfo.h b/src/dawn/native/BindingInfo.h
new file mode 100644
index 0000000..027ce52
--- /dev/null
+++ b/src/dawn/native/BindingInfo.h
@@ -0,0 +1,98 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDINGINFO_H_
+#define DAWNNATIVE_BINDINGINFO_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PerStage.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <cstdint>
+
+namespace dawn::native {
+
+    // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
+    static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
+        kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
+
+    static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
+        BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
+
+    // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
+    // API. There should never be more bindings than the max per stage, for each stage.
+    static constexpr uint32_t kMaxBindingsPerPipelineLayout =
+        3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
+             kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
+             kMaxUniformBuffersPerShaderStage);
+
+    static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
+        BindingIndex(kMaxBindingsPerPipelineLayout);
+
+    // TODO(enga): Figure out a good number for this.
+    static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
+
+    enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
+
+    struct BindingInfo {
+        BindingNumber binding;
+        wgpu::ShaderStage visibility;
+
+        BindingInfoType bindingType;
+
+        // TODO(dawn:527): These four values could be made into a union.
+        BufferBindingLayout buffer;
+        SamplerBindingLayout sampler;
+        TextureBindingLayout texture;
+        StorageTextureBindingLayout storageTexture;
+    };
+
+    struct BindingSlot {
+        BindGroupIndex group;
+        BindingNumber binding;
+    };
+
+    struct PerStageBindingCounts {
+        uint32_t sampledTextureCount;
+        uint32_t samplerCount;
+        uint32_t storageBufferCount;
+        uint32_t storageTextureCount;
+        uint32_t uniformBufferCount;
+        uint32_t externalTextureCount;
+    };
+
+    struct BindingCounts {
+        uint32_t totalCount;
+        uint32_t bufferCount;
+        uint32_t unverifiedBufferCount;  // Buffers with minimum buffer size unspecified
+        uint32_t dynamicUniformBufferCount;
+        uint32_t dynamicStorageBufferCount;
+        PerStage<PerStageBindingCounts> perStage;
+    };
+
+    void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
+    void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
+    MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
+
+    // For buffer size validation
+    using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BINDINGINFO_H_
diff --git a/src/dawn/native/BuddyAllocator.cpp b/src/dawn/native/BuddyAllocator.cpp
new file mode 100644
index 0000000..76d7a65
--- /dev/null
+++ b/src/dawn/native/BuddyAllocator.cpp
@@ -0,0 +1,264 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BuddyAllocator.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
+namespace dawn::native {
+
+    BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+        ASSERT(IsPowerOfTwo(maxSize));
+
+        mFreeLists.resize(Log2(mMaxBlockSize) + 1);
+
+        // Insert the level0 free block.
+        mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
+        mFreeLists[0] = {mRoot};
+    }
+
+    BuddyAllocator::~BuddyAllocator() {
+        if (mRoot) {
+            DeleteBlock(mRoot);
+        }
+    }
+
+    uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
+        return ComputeNumOfFreeBlocks(mRoot);
+    }
+
+    uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
+        if (block->mState == BlockState::Free) {
+            return 1;
+        } else if (block->mState == BlockState::Split) {
+            return ComputeNumOfFreeBlocks(block->split.pLeft) +
+                   ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
+        }
+        return 0;
+    }
+
+    uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
+        // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
+        // However, mFreeList zero-indexed by level.
+        // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
+        return Log2(mMaxBlockSize) - Log2(blockSize);
+    }
+
+    uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
+                                                     uint64_t alignment) const {
+        ASSERT(IsPowerOfTwo(alignment));
+        // The current level is the level that corresponds to the allocation size. The free list may
+        // not contain a block at that level until a larger one gets allocated (and splits).
+        // Continue to go up the tree until such a larger block exists.
+        //
+        // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
+        // When the alignment is also a power-of-two, we simply use the next free block whose size
+        // is greater than or equal to the alignment value.
+        //
+        //  After one 8-byte allocation:
+        //
+        //  Level          --------------------------------
+        //      0       32 |               S              |
+        //                 --------------------------------
+        //      1       16 |       S       |       F2     |       S - split
+        //                 --------------------------------       F - free
+        //      2       8  |   Aa  |   F1  |              |       A - allocated
+        //                 --------------------------------
+        //
+        //  Allocate(size=8, alignment=8) will be satisfied by using F1.
+        //  Allocate(size=8, alignment=4) will be satified by using F1.
+        //  Allocate(size=8, alignment=16) will be satisified by using F2.
+        //
+        for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
+            size_t currLevel = allocationBlockLevel - ii;
+            BuddyBlock* freeBlock = mFreeLists[currLevel].head;
+            if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
+                return currLevel;
+            }
+        }
+        return kInvalidOffset;  // No free block exists at any level.
+    }
+
+    // Inserts existing free block into the free-list.
+    // Called by allocate upon splitting to insert a child block into a free-list.
+    // Note: Always insert into the head of the free-list. As when a larger free block at a lower
+    // level was split, there were no smaller free blocks at a higher level to allocate.
+    void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
+        ASSERT(block->mState == BlockState::Free);
+
+        // Inserted block is now the front (no prev).
+        block->free.pPrev = nullptr;
+
+        // Old head is now the inserted block's next.
+        block->free.pNext = mFreeLists[level].head;
+
+        // Block already in HEAD position (ex. right child was inserted first).
+        if (mFreeLists[level].head != nullptr) {
+            // Old head's previous is the inserted block.
+            mFreeLists[level].head->free.pPrev = block;
+        }
+
+        mFreeLists[level].head = block;
+    }
+
+    void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
+        ASSERT(block->mState == BlockState::Free);
+
+        if (mFreeLists[level].head == block) {
+            // Block is in HEAD position.
+            mFreeLists[level].head = mFreeLists[level].head->free.pNext;
+        } else {
+            // Block is after HEAD position.
+            BuddyBlock* pPrev = block->free.pPrev;
+            BuddyBlock* pNext = block->free.pNext;
+
+            ASSERT(pPrev != nullptr);
+            ASSERT(pPrev->mState == BlockState::Free);
+
+            pPrev->free.pNext = pNext;
+
+            if (pNext != nullptr) {
+                ASSERT(pNext->mState == BlockState::Free);
+                pNext->free.pPrev = pPrev;
+            }
+        }
+    }
+
+    uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
+        if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
+            return kInvalidOffset;
+        }
+
+        // Compute the level
+        const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
+
+        ASSERT(allocationSizeToLevel < mFreeLists.size());
+
+        uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
+
+        // Error when no free blocks exist (allocator is full)
+        if (currBlockLevel == kInvalidOffset) {
+            return kInvalidOffset;
+        }
+
+        // Split free blocks level-by-level.
+        // Terminate when the current block level is equal to the computed level of the requested
+        // allocation.
+        BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
+
+        for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
+            ASSERT(currBlock->mState == BlockState::Free);
+
+            // Remove curr block (about to be split).
+            RemoveFreeBlock(currBlock, currBlockLevel);
+
+            // Create two free child blocks (the buddies).
+            const uint64_t nextLevelSize = currBlock->mSize / 2;
+            BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
+            BuddyBlock* rightChildBlock =
+                new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
+
+            // Remember the parent to merge these back upon de-allocation.
+            rightChildBlock->pParent = currBlock;
+            leftChildBlock->pParent = currBlock;
+
+            // Make them buddies.
+            leftChildBlock->pBuddy = rightChildBlock;
+            rightChildBlock->pBuddy = leftChildBlock;
+
+            // Insert the children back into the free list into the next level.
+            // The free list does not require a specific order. However, an order is specified as
+            // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
+            InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
+            InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
+
+            // Curr block is now split.
+            currBlock->mState = BlockState::Split;
+            currBlock->split.pLeft = leftChildBlock;
+
+            // Decend down into the next level.
+            currBlock = leftChildBlock;
+        }
+
+        // Remove curr block from free-list (now allocated).
+        RemoveFreeBlock(currBlock, currBlockLevel);
+        currBlock->mState = BlockState::Allocated;
+
+        return currBlock->mOffset;
+    }
+
+    void BuddyAllocator::Deallocate(uint64_t offset) {
+        BuddyBlock* curr = mRoot;
+
+        // TODO(crbug.com/dawn/827): Optimize de-allocation.
+        // Passing allocationSize directly will avoid the following level-by-level search;
+        // however, it requires the size information to be stored outside the allocator.
+
+        // Search for the free block node that corresponds to the block offset.
+        size_t currBlockLevel = 0;
+        while (curr->mState == BlockState::Split) {
+            if (offset < curr->split.pLeft->pBuddy->mOffset) {
+                curr = curr->split.pLeft;
+            } else {
+                curr = curr->split.pLeft->pBuddy;
+            }
+
+            currBlockLevel++;
+        }
+
+        ASSERT(curr->mState == BlockState::Allocated);
+
+        // Ensure the block is at the correct level
+        ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
+
+        // Mark curr free so we can merge.
+        curr->mState = BlockState::Free;
+
+        // Merge the buddies (LevelN-to-Level0).
+        while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
+            // Remove the buddy.
+            RemoveFreeBlock(curr->pBuddy, currBlockLevel);
+
+            BuddyBlock* parent = curr->pParent;
+
+            // The buddies were inserted in a specific order but
+            // could be deleted in any order.
+            DeleteBlock(curr->pBuddy);
+            DeleteBlock(curr);
+
+            // Parent is now free.
+            parent->mState = BlockState::Free;
+
+            // Ascend up to the next level (parent block).
+            curr = parent;
+            currBlockLevel--;
+        }
+
+        InsertFreeBlock(curr, currBlockLevel);
+    }
+
+    // Helper which deletes a block in the tree recursively (post-order).
+    void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
+        ASSERT(block != nullptr);
+
+        if (block->mState == BlockState::Split) {
+            // Delete the pair in same order we inserted.
+            DeleteBlock(block->split.pLeft->pBuddy);
+            DeleteBlock(block->split.pLeft);
+        }
+        delete block;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/BuddyAllocator.h b/src/dawn/native/BuddyAllocator.h
new file mode 100644
index 0000000..31c8b0b
--- /dev/null
+++ b/src/dawn/native/BuddyAllocator.h
@@ -0,0 +1,117 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUDDYALLOCATOR_H_
+#define DAWNNATIVE_BUDDYALLOCATOR_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+namespace dawn::native {
+
+    // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
+    // Memory is split into halves until just large enough to fit to the request. This
+    // requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
+    // returning the starting offset whose size is guaranteed to be greater than or equal to the
+    // allocation size. To deallocate, the same offset is used to find the corresponding block.
+    //
+    // Internally, it manages a free list to track free blocks in a full binary tree.
+    // Every index in the free list corresponds to a level in the tree. That level also determines
+    // the size of the block to be used to satisfy the request. The first level (index=0) represents
+    // the root whose size is also called the max block size.
+    //
+    class BuddyAllocator {
+      public:
+        BuddyAllocator(uint64_t maxSize);
+        ~BuddyAllocator();
+
+        // Required methods.
+        uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
+        void Deallocate(uint64_t offset);
+
+        // For testing purposes only.
+        uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
+
+        static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+
+      private:
+        uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
+        uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
+
+        enum class BlockState { Free, Split, Allocated };
+
+        struct BuddyBlock {
+            BuddyBlock(uint64_t size, uint64_t offset)
+                : mOffset(offset), mSize(size), mState(BlockState::Free) {
+                free.pPrev = nullptr;
+                free.pNext = nullptr;
+            }
+
+            uint64_t mOffset;
+            uint64_t mSize;
+
+            // Pointer to this block's buddy, iff parent is split.
+            // Used to quickly merge buddy blocks upon de-allocate.
+            BuddyBlock* pBuddy = nullptr;
+            BuddyBlock* pParent = nullptr;
+
+            // Track whether this block has been split or not.
+            BlockState mState;
+
+            struct FreeLinks {
+                BuddyBlock* pPrev;
+                BuddyBlock* pNext;
+            };
+
+            struct SplitLink {
+                BuddyBlock* pLeft;
+            };
+
+            union {
+                // Used upon allocation.
+                // Avoids searching for the next free block.
+                FreeLinks free;
+
+                // Used upon de-allocation.
+                // Had this block split upon allocation, it and it's buddy is to be deleted.
+                SplitLink split;
+            };
+        };
+
+        void InsertFreeBlock(BuddyBlock* block, size_t level);
+        void RemoveFreeBlock(BuddyBlock* block, size_t level);
+        void DeleteBlock(BuddyBlock* block);
+
+        uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
+
+        // Keep track the head and tail (for faster insertion/removal).
+        struct BlockList {
+            BuddyBlock* head = nullptr;  // First free block in level.
+            // TODO(crbug.com/dawn/827): Track the tail.
+        };
+
+        BuddyBlock* mRoot = nullptr;  // Used to deallocate non-free blocks.
+
+        uint64_t mMaxBlockSize = 0;
+
+        // List of linked-lists of free blocks where the index is a level that
+        // corresponds to a power-of-two sized block.
+        std::vector<BlockList> mFreeLists;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BUDDYALLOCATOR_H_
diff --git a/src/dawn/native/BuddyMemoryAllocator.cpp b/src/dawn/native/BuddyMemoryAllocator.cpp
new file mode 100644
index 0000000..faee03e
--- /dev/null
+++ b/src/dawn/native/BuddyMemoryAllocator.cpp
@@ -0,0 +1,120 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BuddyMemoryAllocator.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+
+namespace dawn::native {
+
+    BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
+                                               uint64_t memoryBlockSize,
+                                               ResourceHeapAllocator* heapAllocator)
+        : mMemoryBlockSize(memoryBlockSize),
+          mBuddyBlockAllocator(maxSystemSize),
+          mHeapAllocator(heapAllocator) {
+        ASSERT(memoryBlockSize <= maxSystemSize);
+        ASSERT(IsPowerOfTwo(mMemoryBlockSize));
+        ASSERT(maxSystemSize % mMemoryBlockSize == 0);
+
+        mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
+    }
+
+    uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
+        ASSERT(offset != BuddyAllocator::kInvalidOffset);
+        return offset / mMemoryBlockSize;
+    }
+
+    ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
+                                                                           uint64_t alignment) {
+        ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+
+        if (allocationSize == 0) {
+            return std::move(invalidAllocation);
+        }
+
+        // Check the unaligned size to avoid overflowing NextPowerOfTwo.
+        if (allocationSize > mMemoryBlockSize) {
+            return std::move(invalidAllocation);
+        }
+
+        // Round allocation size to nearest power-of-two.
+        allocationSize = NextPowerOfTwo(allocationSize);
+
+        // Allocation cannot exceed the memory size.
+        if (allocationSize > mMemoryBlockSize) {
+            return std::move(invalidAllocation);
+        }
+
+        // Attempt to sub-allocate a block of the requested size.
+        const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
+        if (blockOffset == BuddyAllocator::kInvalidOffset) {
+            return std::move(invalidAllocation);
+        }
+
+        const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
+        if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+            // Transfer ownership to this allocator
+            std::unique_ptr<ResourceHeapBase> memory;
+            DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
+            mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
+        }
+
+        mTrackedSubAllocations[memoryIndex].refcount++;
+
+        AllocationInfo info;
+        info.mBlockOffset = blockOffset;
+        info.mMethod = AllocationMethod::kSubAllocated;
+
+        // Allocation offset is always local to the memory.
+        const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
+
+        return ResourceMemoryAllocation{
+            info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+    }
+
+    void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
+        const AllocationInfo info = allocation.GetInfo();
+
+        ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+
+        const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+
+        ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
+        mTrackedSubAllocations[memoryIndex].refcount--;
+
+        if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+            mHeapAllocator->DeallocateResourceHeap(
+                std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+        }
+
+        mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
+    }
+
+    uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
+        return mMemoryBlockSize;
+    }
+
+    uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
+        uint64_t count = 0;
+        for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
+            if (allocation.refcount > 0) {
+                count++;
+            }
+        }
+        return count;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/BuddyMemoryAllocator.h b/src/dawn/native/BuddyMemoryAllocator.h
new file mode 100644
index 0000000..7fcfe71
--- /dev/null
+++ b/src/dawn/native/BuddyMemoryAllocator.h
@@ -0,0 +1,74 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
+#define DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
+
+#include "dawn/native/BuddyAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+
+#include <memory>
+#include <vector>
+
+namespace dawn::native {
+
+    class ResourceHeapAllocator;
+
+    // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
+    // memory created by MemoryAllocator clients. It creates a very large buddy system
+    // where backing device memory blocks equal a specified level in the system.
+    //
+    // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
+    // memory index and should the memory not exist, it is created. If two sub-allocations share the
+    // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
+    // release the other prematurely.
+    //
+    // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
+    // It should also outlive all the resources that are in the buddy allocator.
+    class BuddyMemoryAllocator {
+      public:
+        BuddyMemoryAllocator(uint64_t maxSystemSize,
+                             uint64_t memoryBlockSize,
+                             ResourceHeapAllocator* heapAllocator);
+        ~BuddyMemoryAllocator() = default;
+
+        ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
+                                                         uint64_t alignment);
+        void Deallocate(const ResourceMemoryAllocation& allocation);
+
+        uint64_t GetMemoryBlockSize() const;
+
+        // For testing purposes.
+        uint64_t ComputeTotalNumOfHeapsForTesting() const;
+
+      private:
+        uint64_t GetMemoryIndex(uint64_t offset) const;
+
+        uint64_t mMemoryBlockSize = 0;
+
+        BuddyAllocator mBuddyBlockAllocator;
+        ResourceHeapAllocator* mHeapAllocator;
+
+        struct TrackedSubAllocations {
+            size_t refcount = 0;
+            std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
+        };
+
+        std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/src/dawn/native/Buffer.cpp b/src/dawn/native/Buffer.cpp
new file mode 100644
index 0000000..f324597
--- /dev/null
+++ b/src/dawn/native/Buffer.cpp
@@ -0,0 +1,562 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/common/Alloc.h"
+#include "dawn/common/Assert.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <cstdio>
+#include <cstring>
+#include <utility>
+
+namespace dawn::native {
+
+    namespace {
+        struct MapRequestTask : QueueBase::TaskInFlight {
+            MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
+                : buffer(std::move(buffer)), id(id) {
+            }
+            void Finish() override {
+                buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
+            }
+            void HandleDeviceLoss() override {
+                buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
+            }
+            ~MapRequestTask() override = default;
+
+          private:
+            Ref<BufferBase> buffer;
+            MapRequestID id;
+        };
+
+        class ErrorBuffer final : public BufferBase {
+          public:
+            ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
+                : BufferBase(device, descriptor, ObjectBase::kError) {
+                if (descriptor->mappedAtCreation) {
+                    // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
+                    // is invalid, and on 32bit systems we should avoid a narrowing conversion that
+                    // would make size = 1 << 32 + 1 allocate one byte.
+                    bool isValidSize =
+                        descriptor->size != 0 &&
+                        descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
+
+                    if (isValidSize) {
+                        mFakeMappedData =
+                            std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
+                    }
+                    // Since error buffers in this case may allocate memory, we need to track them
+                    // for destruction on the device.
+                    TrackInDevice();
+                }
+            }
+
+          private:
+            bool IsCPUWritableAtCreation() const override {
+                UNREACHABLE();
+            }
+
+            MaybeError MapAtCreationImpl() override {
+                UNREACHABLE();
+            }
+
+            MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
+                UNREACHABLE();
+            }
+
+            void* GetMappedPointerImpl() override {
+                return mFakeMappedData.get();
+            }
+
+            void UnmapImpl() override {
+                mFakeMappedData.reset();
+            }
+
+            std::unique_ptr<uint8_t[]> mFakeMappedData;
+        };
+
+    }  // anonymous namespace
+
+    MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+        DAWN_TRY(ValidateBufferUsage(descriptor->usage));
+
+        wgpu::BufferUsage usage = descriptor->usage;
+
+        DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
+
+        const wgpu::BufferUsage kMapWriteAllowedUsages =
+            wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+        DAWN_INVALID_IF(
+            usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
+            "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+            "usage is %s.",
+            usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
+
+        const wgpu::BufferUsage kMapReadAllowedUsages =
+            wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+        DAWN_INVALID_IF(
+            usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
+            "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+            "usage is %s.",
+            usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
+
+        DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
+                        "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
+                        descriptor->size);
+
+        return {};
+    }
+
+    // Buffer
+
+    BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
+        : ApiObjectBase(device, descriptor->label),
+          mSize(descriptor->size),
+          mUsage(descriptor->usage),
+          mState(BufferState::Unmapped) {
+        // Add readonly storage usage if the buffer has a storage usage. The validation rules in
+        // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
+        if (mUsage & wgpu::BufferUsage::Storage) {
+            mUsage |= kReadOnlyStorageBuffer;
+        }
+
+        // The query resolve buffer need to be used as a storage buffer in the internal compute
+        // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
+        // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
+        // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
+        // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
+        // as storage buffer if it's created without Storage usage.
+        if (mUsage & wgpu::BufferUsage::QueryResolve) {
+            mUsage |= kInternalStorageBuffer;
+        }
+
+        // We also add internal storage usage for Indirect buffers for some transformations before
+        // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
+        // D3D12), since these transformations involve binding them as storage buffers for use in a
+        // compute pass.
+        if (mUsage & wgpu::BufferUsage::Indirect) {
+            mUsage |= kInternalStorageBuffer;
+        }
+
+        TrackInDevice();
+    }
+
+    BufferBase::BufferBase(DeviceBase* device,
+                           const BufferDescriptor* descriptor,
+                           ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+        if (descriptor->mappedAtCreation) {
+            mState = BufferState::MappedAtCreation;
+            mMapOffset = 0;
+            mMapSize = mSize;
+        }
+    }
+
+    BufferBase::BufferBase(DeviceBase* device, BufferState state)
+        : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
+        TrackInDevice();
+    }
+
+    BufferBase::~BufferBase() {
+        ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
+    }
+
+    void BufferBase::DestroyImpl() {
+        if (mState == BufferState::Mapped) {
+            UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+        } else if (mState == BufferState::MappedAtCreation) {
+            if (mStagingBuffer != nullptr) {
+                mStagingBuffer.reset();
+            } else if (mSize != 0) {
+                UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+            }
+        }
+        mState = BufferState::Destroyed;
+    }
+
+    // static
+    BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
+        return new ErrorBuffer(device, descriptor);
+    }
+
+    ObjectType BufferBase::GetType() const {
+        return ObjectType::Buffer;
+    }
+
+    uint64_t BufferBase::GetSize() const {
+        ASSERT(!IsError());
+        return mSize;
+    }
+
+    uint64_t BufferBase::GetAllocatedSize() const {
+        ASSERT(!IsError());
+        // The backend must initialize this value.
+        ASSERT(mAllocatedSize != 0);
+        return mAllocatedSize;
+    }
+
+    wgpu::BufferUsage BufferBase::GetUsage() const {
+        ASSERT(!IsError());
+        return mUsage;
+    }
+
+    MaybeError BufferBase::MapAtCreation() {
+        DAWN_TRY(MapAtCreationInternal());
+
+        void* ptr;
+        size_t size;
+        if (mSize == 0) {
+            return {};
+        } else if (mStagingBuffer) {
+            // If there is a staging buffer for initialization, clear its contents directly.
+            // It should be exactly as large as the buffer allocation.
+            ptr = mStagingBuffer->GetMappedPointer();
+            size = mStagingBuffer->GetSize();
+            ASSERT(size == GetAllocatedSize());
+        } else {
+            // Otherwise, the buffer is directly mappable on the CPU.
+            ptr = GetMappedPointerImpl();
+            size = GetAllocatedSize();
+        }
+
+        DeviceBase* device = GetDevice();
+        if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+            memset(ptr, uint8_t(0u), size);
+            SetIsDataInitialized();
+            device->IncrementLazyClearCountForTesting();
+        } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+            memset(ptr, uint8_t(1u), size);
+        }
+
+        return {};
+    }
+
+    MaybeError BufferBase::MapAtCreationInternal() {
+        ASSERT(!IsError());
+        mMapOffset = 0;
+        mMapSize = mSize;
+
+        // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
+        // Skip handling 0-sized buffers so we don't try to map them in the backend.
+        if (mSize != 0) {
+            // Mappable buffers don't use a staging buffer and are just as if mapped through
+            // MapAsync.
+            if (IsCPUWritableAtCreation()) {
+                DAWN_TRY(MapAtCreationImpl());
+            } else {
+                // If any of these fail, the buffer will be deleted and replaced with an error
+                // buffer. The staging buffer is used to return mappable data to inititalize the
+                // buffer contents. Allocate one as large as the real buffer size so that every byte
+                // is initialized.
+                // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
+                // buffer so we don't create many small buffers.
+                DAWN_TRY_ASSIGN(mStagingBuffer,
+                                GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
+            }
+        }
+
+        // Only set the state to mapped at creation if we did no fail any point in this helper.
+        // Otherwise, if we override the default unmapped state before succeeding to create a
+        // staging buffer, we will have issues when we try to destroy the buffer.
+        mState = BufferState::MappedAtCreation;
+        return {};
+    }
+
+    MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
+        ASSERT(!IsError());
+
+        switch (mState) {
+            case BufferState::Destroyed:
+                return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
+            case BufferState::Mapped:
+            case BufferState::MappedAtCreation:
+                return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
+            case BufferState::Unmapped:
+                return {};
+        }
+        UNREACHABLE();
+    }
+
+    void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+        ASSERT(!IsError());
+        if (mMapCallback != nullptr && mapID == mLastMapID) {
+            // Tag the callback as fired before firing it, otherwise it could fire a second time if
+            // for example buffer.Unmap() is called inside the application-provided callback.
+            WGPUBufferMapCallback callback = mMapCallback;
+            mMapCallback = nullptr;
+
+            if (GetDevice()->IsLost()) {
+                callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
+            } else {
+                callback(status, mMapUserdata);
+            }
+        }
+    }
+
+    void BufferBase::APIMapAsync(wgpu::MapMode mode,
+                                 size_t offset,
+                                 size_t size,
+                                 WGPUBufferMapCallback callback,
+                                 void* userdata) {
+        // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
+        // possible to default the function argument (because there is the callback later in the
+        // argument list)
+        if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
+            size = mSize - offset;
+        }
+
+        WGPUBufferMapAsyncStatus status;
+        if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
+                                       "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
+                                       size)) {
+            if (callback) {
+                callback(status, userdata);
+            }
+            return;
+        }
+        ASSERT(!IsError());
+
+        mLastMapID++;
+        mMapMode = mode;
+        mMapOffset = offset;
+        mMapSize = size;
+        mMapCallback = callback;
+        mMapUserdata = userdata;
+        mState = BufferState::Mapped;
+
+        if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
+            CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
+            return;
+        }
+        std::unique_ptr<MapRequestTask> request =
+            std::make_unique<MapRequestTask>(this, mLastMapID);
+        GetDevice()->GetQueue()->TrackTask(std::move(request),
+                                           GetDevice()->GetPendingCommandSerial());
+    }
+
+    void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
+        return GetMappedRange(offset, size, true);
+    }
+
+    const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
+        return GetMappedRange(offset, size, false);
+    }
+
+    void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
+        if (!CanGetMappedRange(writable, offset, size)) {
+            return nullptr;
+        }
+
+        if (mStagingBuffer != nullptr) {
+            return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
+        }
+        if (mSize == 0) {
+            return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+        }
+        uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
+        return start == nullptr ? nullptr : start + offset;
+    }
+
+    void BufferBase::APIDestroy() {
+        Destroy();
+    }
+
+    MaybeError BufferBase::CopyFromStagingBuffer() {
+        ASSERT(mStagingBuffer);
+        if (mSize == 0) {
+            // Staging buffer is not created if zero size.
+            ASSERT(mStagingBuffer == nullptr);
+            return {};
+        }
+
+        DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
+                                                      GetAllocatedSize()));
+
+        DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
+        uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+
+        return {};
+    }
+
+    void BufferBase::APIUnmap() {
+        if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
+            return;
+        }
+        Unmap();
+    }
+
+    void BufferBase::Unmap() {
+        UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
+    }
+
+    void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
+        if (mState == BufferState::Mapped) {
+            // A map request can only be called once, so this will fire only if the request wasn't
+            // completed before the Unmap.
+            // Callbacks are not fired if there is no callback registered, so this is correct for
+            // mappedAtCreation = true.
+            CallMapCallback(mLastMapID, callbackStatus);
+            UnmapImpl();
+
+            mMapCallback = nullptr;
+            mMapUserdata = 0;
+        } else if (mState == BufferState::MappedAtCreation) {
+            if (mStagingBuffer != nullptr) {
+                GetDevice()->ConsumedError(CopyFromStagingBuffer());
+            } else if (mSize != 0) {
+                UnmapImpl();
+            }
+        }
+
+        mState = BufferState::Unmapped;
+    }
+
+    MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
+                                            size_t offset,
+                                            size_t size,
+                                            WGPUBufferMapAsyncStatus* status) const {
+        *status = WGPUBufferMapAsyncStatus_DeviceLost;
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+        *status = WGPUBufferMapAsyncStatus_Error;
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        DAWN_INVALID_IF(uint64_t(offset) > mSize,
+                        "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
+                        this);
+
+        DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
+        DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
+
+        DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
+                        "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
+                        offset, size, mSize, this);
+
+        switch (mState) {
+            case BufferState::Mapped:
+            case BufferState::MappedAtCreation:
+                return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
+            case BufferState::Destroyed:
+                return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
+            case BufferState::Unmapped:
+                break;
+        }
+
+        bool isReadMode = mode & wgpu::MapMode::Read;
+        bool isWriteMode = mode & wgpu::MapMode::Write;
+        DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
+                        wgpu::MapMode::Write, wgpu::MapMode::Read);
+
+        if (mode & wgpu::MapMode::Read) {
+            DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
+                            "The buffer usages (%s) do not contain %s.", mUsage,
+                            wgpu::BufferUsage::MapRead);
+        } else {
+            ASSERT(mode & wgpu::MapMode::Write);
+            DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
+                            "The buffer usages (%s) do not contain %s.", mUsage,
+                            wgpu::BufferUsage::MapWrite);
+        }
+
+        *status = WGPUBufferMapAsyncStatus_Success;
+        return {};
+    }
+
+    bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
+        if (offset % 8 != 0 || size % 4 != 0) {
+            return false;
+        }
+
+        if (size > mMapSize || offset < mMapOffset) {
+            return false;
+        }
+
+        size_t offsetInMappedRange = offset - mMapOffset;
+        if (offsetInMappedRange > mMapSize - size) {
+            return false;
+        }
+
+        // Note that:
+        //
+        //   - We don't check that the device is alive because the application can ask for the
+        //     mapped pointer before it knows, and even Dawn knows, that the device was lost, and
+        //     still needs to work properly.
+        //   - We don't check that the object is alive because we need to return mapped pointers
+        //     for error buffers too.
+
+        switch (mState) {
+            // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
+            case BufferState::MappedAtCreation:
+                return true;
+
+            case BufferState::Mapped:
+                ASSERT(bool(mMapMode & wgpu::MapMode::Read) ^
+                       bool(mMapMode & wgpu::MapMode::Write));
+                return !writable || (mMapMode & wgpu::MapMode::Write);
+
+            case BufferState::Unmapped:
+            case BufferState::Destroyed:
+                return false;
+        }
+        UNREACHABLE();
+    }
+
+    MaybeError BufferBase::ValidateUnmap() const {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+        switch (mState) {
+            case BufferState::Mapped:
+            case BufferState::MappedAtCreation:
+                // A buffer may be in the Mapped state if it was created with mappedAtCreation
+                // even if it did not have a mappable usage.
+                return {};
+            case BufferState::Unmapped:
+                return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
+            case BufferState::Destroyed:
+                return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
+        }
+        UNREACHABLE();
+    }
+
+    void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+        CallMapCallback(mapID, status);
+    }
+
+    bool BufferBase::NeedsInitialization() const {
+        return !mIsDataInitialized &&
+               GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
+    }
+
+    bool BufferBase::IsDataInitialized() const {
+        return mIsDataInitialized;
+    }
+
+    void BufferBase::SetIsDataInitialized() {
+        mIsDataInitialized = true;
+    }
+
+    bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
+        return offset == 0 && size == GetSize();
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Buffer.h b/src/dawn/native/Buffer.h
new file mode 100644
index 0000000..2a9759f
--- /dev/null
+++ b/src/dawn/native/Buffer.h
@@ -0,0 +1,135 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUFFER_H_
+#define DAWNNATIVE_BUFFER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <memory>
+
+namespace dawn::native {
+
+    struct CopyTextureToBufferCmd;
+
+    enum class MapType : uint32_t;
+
+    MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
+
+    static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
+        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
+        wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
+        wgpu::BufferUsage::Indirect;
+
+    static constexpr wgpu::BufferUsage kMappableBufferUsages =
+        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
+
+    class BufferBase : public ApiObjectBase {
+      public:
+        enum class BufferState {
+            Unmapped,
+            Mapped,
+            MappedAtCreation,
+            Destroyed,
+        };
+        BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
+
+        static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
+
+        ObjectType GetType() const override;
+
+        uint64_t GetSize() const;
+        uint64_t GetAllocatedSize() const;
+        wgpu::BufferUsage GetUsage() const;
+
+        MaybeError MapAtCreation();
+        void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+        MaybeError ValidateCanUseOnQueueNow() const;
+
+        bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
+        bool NeedsInitialization() const;
+        bool IsDataInitialized() const;
+        void SetIsDataInitialized();
+
+        void* GetMappedRange(size_t offset, size_t size, bool writable = true);
+        void Unmap();
+
+        // Dawn API
+        void APIMapAsync(wgpu::MapMode mode,
+                         size_t offset,
+                         size_t size,
+                         WGPUBufferMapCallback callback,
+                         void* userdata);
+        void* APIGetMappedRange(size_t offset, size_t size);
+        const void* APIGetConstMappedRange(size_t offset, size_t size);
+        void APIUnmap();
+        void APIDestroy();
+
+      protected:
+        BufferBase(DeviceBase* device,
+                   const BufferDescriptor* descriptor,
+                   ObjectBase::ErrorTag tag);
+
+        // Constructor used only for mocking and testing.
+        BufferBase(DeviceBase* device, BufferState state);
+        void DestroyImpl() override;
+
+        ~BufferBase() override;
+
+        MaybeError MapAtCreationInternal();
+
+        uint64_t mAllocatedSize = 0;
+
+      private:
+        virtual MaybeError MapAtCreationImpl() = 0;
+        virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
+        virtual void UnmapImpl() = 0;
+        virtual void* GetMappedPointerImpl() = 0;
+
+        virtual bool IsCPUWritableAtCreation() const = 0;
+        MaybeError CopyFromStagingBuffer();
+        void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+        MaybeError ValidateMapAsync(wgpu::MapMode mode,
+                                    size_t offset,
+                                    size_t size,
+                                    WGPUBufferMapAsyncStatus* status) const;
+        MaybeError ValidateUnmap() const;
+        bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
+        void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
+
+        uint64_t mSize = 0;
+        wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
+        BufferState mState;
+        bool mIsDataInitialized = false;
+
+        std::unique_ptr<StagingBufferBase> mStagingBuffer;
+
+        WGPUBufferMapCallback mMapCallback = nullptr;
+        void* mMapUserdata = 0;
+        MapRequestID mLastMapID = MapRequestID(0);
+        wgpu::MapMode mMapMode = wgpu::MapMode::None;
+        size_t mMapOffset = 0;
+        size_t mMapSize = 0;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_BUFFER_H_
diff --git a/src/dawn/native/CMakeLists.txt b/src/dawn/native/CMakeLists.txt
new file mode 100644
index 0000000..90610a4
--- /dev/null
+++ b/src/dawn/native/CMakeLists.txt
@@ -0,0 +1,556 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DawnJSONGenerator(
+    TARGET "native_utils"
+    PRINT_NAME "Dawn native utilities"
+    RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES"
+)
+
+add_library(dawn_native ${DAWN_DUMMY_FILE})
+
+target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+    target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_SHARED_LIBRARY")
+endif()
+
+target_sources(dawn_native PRIVATE
+    "${DAWN_INCLUDE_DIR}/dawn/native/DawnNative.h"
+    "${DAWN_INCLUDE_DIR}/dawn/native/dawn_native_export.h"
+    ${DAWN_NATIVE_UTILS_GEN_SOURCES}
+    "Adapter.cpp"
+    "Adapter.h"
+    "AsyncTask.cpp"
+    "AsyncTask.h"
+    "AttachmentState.cpp"
+    "AttachmentState.h"
+    "BackendConnection.cpp"
+    "BackendConnection.h"
+    "BindGroup.cpp"
+    "BindGroup.h"
+    "BindGroupLayout.cpp"
+    "BindGroupLayout.h"
+    "BindGroupTracker.h"
+    "BindingInfo.cpp"
+    "BindingInfo.h"
+    "BuddyAllocator.cpp"
+    "BuddyAllocator.h"
+    "BuddyMemoryAllocator.cpp"
+    "BuddyMemoryAllocator.h"
+    "Buffer.cpp"
+    "Buffer.h"
+    "CachedObject.cpp"
+    "CachedObject.h"
+    "CacheKey.cpp"
+    "CacheKey.h"
+    "CallbackTaskManager.cpp"
+    "CallbackTaskManager.h"
+    "CommandAllocator.cpp"
+    "CommandAllocator.h"
+    "CommandBuffer.cpp"
+    "CommandBuffer.h"
+    "CommandBufferStateTracker.cpp"
+    "CommandBufferStateTracker.h"
+    "CommandEncoder.cpp"
+    "CommandEncoder.h"
+    "CommandValidation.cpp"
+    "CommandValidation.h"
+    "Commands.cpp"
+    "Commands.h"
+    "CompilationMessages.cpp"
+    "CompilationMessages.h"
+    "ComputePassEncoder.cpp"
+    "ComputePassEncoder.h"
+    "ComputePipeline.cpp"
+    "ComputePipeline.h"
+    "CopyTextureForBrowserHelper.cpp"
+    "CopyTextureForBrowserHelper.h"
+    "CreatePipelineAsyncTask.cpp"
+    "CreatePipelineAsyncTask.h"
+    "Device.cpp"
+    "Device.h"
+    "DynamicUploader.cpp"
+    "DynamicUploader.h"
+    "EncodingContext.cpp"
+    "EncodingContext.h"
+    "EnumClassBitmasks.h"
+    "EnumMaskIterator.h"
+    "Error.cpp"
+    "Error.h"
+    "ErrorData.cpp"
+    "ErrorData.h"
+    "ErrorInjector.cpp"
+    "ErrorInjector.h"
+    "ErrorScope.cpp"
+    "ErrorScope.h"
+    "Features.cpp"
+    "Features.h"
+    "ExternalTexture.cpp"
+    "ExternalTexture.h"
+    "IndirectDrawMetadata.cpp"
+    "IndirectDrawMetadata.h"
+    "IndirectDrawValidationEncoder.cpp"
+    "IndirectDrawValidationEncoder.h"
+    "ObjectContentHasher.cpp"
+    "ObjectContentHasher.h"
+    "Format.cpp"
+    "Format.h"
+    "Forward.h"
+    "Instance.cpp"
+    "Instance.h"
+    "InternalPipelineStore.cpp"
+    "InternalPipelineStore.h"
+    "IntegerTypes.h"
+    "Limits.cpp"
+    "Limits.h"
+    "ObjectBase.cpp"
+    "ObjectBase.h"
+    "PassResourceUsage.h"
+    "PassResourceUsageTracker.cpp"
+    "PassResourceUsageTracker.h"
+    "PersistentCache.cpp"
+    "PersistentCache.h"
+    "PerStage.cpp"
+    "PerStage.h"
+    "Pipeline.cpp"
+    "Pipeline.h"
+    "PipelineLayout.cpp"
+    "PipelineLayout.h"
+    "PooledResourceMemoryAllocator.cpp"
+    "PooledResourceMemoryAllocator.h"
+    "ProgrammableEncoder.cpp"
+    "ProgrammableEncoder.h"
+    "QueryHelper.cpp"
+    "QueryHelper.h"
+    "QuerySet.cpp"
+    "QuerySet.h"
+    "Queue.cpp"
+    "Queue.h"
+    "RenderBundle.cpp"
+    "RenderBundle.h"
+    "RenderBundleEncoder.cpp"
+    "RenderBundleEncoder.h"
+    "RenderEncoderBase.cpp"
+    "RenderEncoderBase.h"
+    "RenderPassEncoder.cpp"
+    "RenderPassEncoder.h"
+    "RenderPipeline.cpp"
+    "RenderPipeline.h"
+    "ResourceHeap.h"
+    "ResourceHeapAllocator.h"
+    "ResourceMemoryAllocation.cpp"
+    "ResourceMemoryAllocation.h"
+    "RingBufferAllocator.cpp"
+    "RingBufferAllocator.h"
+    "Sampler.cpp"
+    "Sampler.h"
+    "ScratchBuffer.cpp"
+    "ScratchBuffer.h"
+    "ShaderModule.cpp"
+    "ShaderModule.h"
+    "StagingBuffer.cpp"
+    "StagingBuffer.h"
+    "Subresource.cpp"
+    "Subresource.h"
+    "SubresourceStorage.h"
+    "Surface.cpp"
+    "Surface.h"
+    "SwapChain.cpp"
+    "SwapChain.h"
+    "Texture.cpp"
+    "Texture.h"
+    "TintUtils.cpp"
+    "TintUtils.h"
+    "ToBackend.h"
+    "Toggles.cpp"
+    "Toggles.h"
+    "VertexFormat.cpp"
+    "VertexFormat.h"
+    "dawn_platform.h"
+    "webgpu_absl_format.cpp"
+    "webgpu_absl_format.h"
+    "utils/WGPUHelpers.cpp"
+    "utils/WGPUHelpers.h"
+)
+target_link_libraries(dawn_native
+    PUBLIC dawncpp_headers
+    PRIVATE dawn_common
+            dawn_platform
+            dawn_internal_config
+            libtint
+            SPIRV-Tools-opt
+            absl_strings
+            absl_str_format_internal
+)
+
+target_include_directories(dawn_native PRIVATE ${DAWN_ABSEIL_DIR})
+
+if (DAWN_USE_X11)
+    find_package(X11 REQUIRED)
+    target_link_libraries(dawn_native PRIVATE ${X11_LIBRARIES})
+    target_include_directories(dawn_native PRIVATE ${X11_INCLUDE_DIR})
+    target_sources(dawn_native PRIVATE
+        "XlibXcbFunctions.cpp"
+        "XlibXcbFunctions.h"
+    )
+endif()
+
+# Only win32 app needs to link with user32.lib
+# In UWP, all availiable APIs are defined in WindowsApp.lib
+# and is automatically linked when WINDOWS_STORE set
+if (WIN32 AND NOT WINDOWS_STORE)
+    target_link_libraries(dawn_native PRIVATE user32.lib)
+endif()
+
+# DXGIGetDebugInterface1 is defined in dxgi.lib
+# But this API is tagged as a development-only capability
+# which implies that linking to this function will cause
+# the application to fail Windows store certification
+# So we only link to it in debug build when compiling for UWP.
+# In win32 we load dxgi.dll using LoadLibrary
+# so no need for static linking.
+if (WINDOWS_STORE)
+    target_link_libraries(dawn_native PRIVATE debug dxgi.lib)
+endif()
+
+if (DAWN_ENABLE_D3D12)
+    target_sources(dawn_native PRIVATE
+        "${DAWN_INCLUDE_DIR}/dawn/native/D3D12Backend.h"
+        "d3d12/AdapterD3D12.cpp"
+        "d3d12/AdapterD3D12.h"
+        "d3d12/BackendD3D12.cpp"
+        "d3d12/BackendD3D12.h"
+        "d3d12/BindGroupD3D12.cpp"
+        "d3d12/BindGroupD3D12.h"
+        "d3d12/BindGroupLayoutD3D12.cpp"
+        "d3d12/BindGroupLayoutD3D12.h"
+        "d3d12/BufferD3D12.cpp"
+        "d3d12/BufferD3D12.h"
+        "d3d12/CPUDescriptorHeapAllocationD3D12.cpp"
+        "d3d12/CPUDescriptorHeapAllocationD3D12.h"
+        "d3d12/CommandAllocatorManager.cpp"
+        "d3d12/CommandAllocatorManager.h"
+        "d3d12/CommandBufferD3D12.cpp"
+        "d3d12/CommandBufferD3D12.h"
+        "d3d12/CommandRecordingContext.cpp"
+        "d3d12/CommandRecordingContext.h"
+        "d3d12/ComputePipelineD3D12.cpp"
+        "d3d12/ComputePipelineD3D12.h"
+        "d3d12/D3D11on12Util.cpp"
+        "d3d12/D3D11on12Util.h"
+        "d3d12/D3D12Error.cpp"
+        "d3d12/D3D12Error.h"
+        "d3d12/D3D12Info.cpp"
+        "d3d12/D3D12Info.h"
+        "d3d12/DeviceD3D12.cpp"
+        "d3d12/DeviceD3D12.h"
+        "d3d12/Forward.h"
+        "d3d12/GPUDescriptorHeapAllocationD3D12.cpp"
+        "d3d12/GPUDescriptorHeapAllocationD3D12.h"
+        "d3d12/HeapAllocatorD3D12.cpp"
+        "d3d12/HeapAllocatorD3D12.h"
+        "d3d12/HeapD3D12.cpp"
+        "d3d12/HeapD3D12.h"
+        "d3d12/IntegerTypes.h"
+        "d3d12/NativeSwapChainImplD3D12.cpp"
+        "d3d12/NativeSwapChainImplD3D12.h"
+        "d3d12/PageableD3D12.cpp"
+        "d3d12/PageableD3D12.h"
+        "d3d12/PipelineLayoutD3D12.cpp"
+        "d3d12/PipelineLayoutD3D12.h"
+        "d3d12/PlatformFunctions.cpp"
+        "d3d12/PlatformFunctions.h"
+        "d3d12/QuerySetD3D12.cpp"
+        "d3d12/QuerySetD3D12.h"
+        "d3d12/QueueD3D12.cpp"
+        "d3d12/QueueD3D12.h"
+        "d3d12/RenderPassBuilderD3D12.cpp"
+        "d3d12/RenderPassBuilderD3D12.h"
+        "d3d12/RenderPipelineD3D12.cpp"
+        "d3d12/RenderPipelineD3D12.h"
+        "d3d12/ResidencyManagerD3D12.cpp"
+        "d3d12/ResidencyManagerD3D12.h"
+        "d3d12/ResourceAllocatorManagerD3D12.cpp"
+        "d3d12/ResourceAllocatorManagerD3D12.h"
+        "d3d12/ResourceHeapAllocationD3D12.cpp"
+        "d3d12/ResourceHeapAllocationD3D12.h"
+        "d3d12/SamplerD3D12.cpp"
+        "d3d12/SamplerD3D12.h"
+        "d3d12/SamplerHeapCacheD3D12.cpp"
+        "d3d12/SamplerHeapCacheD3D12.h"
+        "d3d12/ShaderModuleD3D12.cpp"
+        "d3d12/ShaderModuleD3D12.h"
+        "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp"
+        "d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+        "d3d12/StagingBufferD3D12.cpp"
+        "d3d12/StagingBufferD3D12.h"
+        "d3d12/StagingDescriptorAllocatorD3D12.cpp"
+        "d3d12/StagingDescriptorAllocatorD3D12.h"
+        "d3d12/SwapChainD3D12.cpp"
+        "d3d12/SwapChainD3D12.h"
+        "d3d12/TextureCopySplitter.cpp"
+        "d3d12/TextureCopySplitter.h"
+        "d3d12/TextureD3D12.cpp"
+        "d3d12/TextureD3D12.h"
+        "d3d12/UtilsD3D12.cpp"
+        "d3d12/UtilsD3D12.h"
+        "d3d12/d3d12_platform.h"
+    )
+    target_link_libraries(dawn_native PRIVATE dxguid.lib)
+endif()
+
+if (DAWN_ENABLE_METAL)
+    target_sources(dawn_native PRIVATE
+        "${DAWN_INCLUDE_DIR}/dawn/native/MetalBackend.h"
+        "Surface_metal.mm"
+        "metal/BackendMTL.h"
+        "metal/BackendMTL.mm"
+        "metal/BindGroupLayoutMTL.h"
+        "metal/BindGroupLayoutMTL.mm"
+        "metal/BindGroupMTL.h"
+        "metal/BindGroupMTL.mm"
+        "metal/BufferMTL.h"
+        "metal/BufferMTL.mm"
+        "metal/CommandBufferMTL.h"
+        "metal/CommandBufferMTL.mm"
+        "metal/CommandRecordingContext.h"
+        "metal/CommandRecordingContext.mm"
+        "metal/ComputePipelineMTL.h"
+        "metal/ComputePipelineMTL.mm"
+        "metal/DeviceMTL.h"
+        "metal/DeviceMTL.mm"
+        "metal/Forward.h"
+        "metal/PipelineLayoutMTL.h"
+        "metal/PipelineLayoutMTL.mm"
+        "metal/QueueMTL.h"
+        "metal/QueueMTL.mm"
+        "metal/QuerySetMTL.h"
+        "metal/QuerySetMTL.mm"
+        "metal/RenderPipelineMTL.h"
+        "metal/RenderPipelineMTL.mm"
+        "metal/SamplerMTL.h"
+        "metal/SamplerMTL.mm"
+        "metal/ShaderModuleMTL.h"
+        "metal/ShaderModuleMTL.mm"
+        "metal/StagingBufferMTL.h"
+        "metal/StagingBufferMTL.mm"
+        "metal/SwapChainMTL.h"
+        "metal/SwapChainMTL.mm"
+        "metal/TextureMTL.h"
+        "metal/TextureMTL.mm"
+        "metal/UtilsMetal.h"
+        "metal/UtilsMetal.mm"
+    )
+    target_link_libraries(dawn_native PRIVATE
+        "-framework Cocoa"
+        "-framework IOKit"
+        "-framework IOSurface"
+        "-framework QuartzCore"
+        "-framework Metal"
+    )
+endif()
+
+if (DAWN_ENABLE_NULL)
+    target_sources(dawn_native PRIVATE
+        "${DAWN_INCLUDE_DIR}/dawn/native/NullBackend.h"
+        "null/DeviceNull.cpp"
+        "null/DeviceNull.h"
+    )
+endif()
+
+if (DAWN_ENABLE_OPENGL OR DAWN_ENABLE_VULKAN)
+    target_sources(dawn_native PRIVATE
+        "SpirvValidation.cpp"
+        "SpirvValidation.h"
+    )
+endif()
+
+if (DAWN_ENABLE_OPENGL)
+    DawnGenerator(
+        SCRIPT "${Dawn_SOURCE_DIR}/generator/opengl_loader_generator.py"
+        PRINT_NAME "OpenGL function loader"
+        ARGS "--gl-xml"
+             "${Dawn_SOURCE_DIR}/third_party/khronos/gl.xml"
+             "--supported-extensions"
+             "${Dawn_SOURCE_DIR}/src/dawn/native/opengl/supported_extensions.json"
+        RESULT_VARIABLE "DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES"
+    )
+
+    target_sources(dawn_native PRIVATE
+        "${DAWN_INCLUDE_DIR}/dawn/native/OpenGLBackend.h"
+        ${DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES}
+        "opengl/BackendGL.cpp"
+        "opengl/BackendGL.h"
+        "opengl/BindGroupGL.cpp"
+        "opengl/BindGroupGL.h"
+        "opengl/BindGroupLayoutGL.cpp"
+        "opengl/BindGroupLayoutGL.h"
+        "opengl/BufferGL.cpp"
+        "opengl/BufferGL.h"
+        "opengl/CommandBufferGL.cpp"
+        "opengl/CommandBufferGL.h"
+        "opengl/ComputePipelineGL.cpp"
+        "opengl/ComputePipelineGL.h"
+        "opengl/DeviceGL.cpp"
+        "opengl/DeviceGL.h"
+        "opengl/Forward.h"
+        "opengl/GLFormat.cpp"
+        "opengl/GLFormat.h"
+        "opengl/NativeSwapChainImplGL.cpp"
+        "opengl/NativeSwapChainImplGL.h"
+        "opengl/OpenGLFunctions.cpp"
+        "opengl/OpenGLFunctions.h"
+        "opengl/OpenGLVersion.cpp"
+        "opengl/OpenGLVersion.h"
+        "opengl/PersistentPipelineStateGL.cpp"
+        "opengl/PersistentPipelineStateGL.h"
+        "opengl/PipelineGL.cpp"
+        "opengl/PipelineGL.h"
+        "opengl/PipelineLayoutGL.cpp"
+        "opengl/PipelineLayoutGL.h"
+        "opengl/QuerySetGL.cpp"
+        "opengl/QuerySetGL.h"
+        "opengl/QueueGL.cpp"
+        "opengl/QueueGL.h"
+        "opengl/RenderPipelineGL.cpp"
+        "opengl/RenderPipelineGL.h"
+        "opengl/SamplerGL.cpp"
+        "opengl/SamplerGL.h"
+        "opengl/ShaderModuleGL.cpp"
+        "opengl/ShaderModuleGL.h"
+        "opengl/SwapChainGL.cpp"
+        "opengl/SwapChainGL.h"
+        "opengl/TextureGL.cpp"
+        "opengl/TextureGL.h"
+        "opengl/UtilsGL.cpp"
+        "opengl/UtilsGL.h"
+        "opengl/opengl_platform.h"
+    )
+
+    target_link_libraries(dawn_native PRIVATE dawn_khronos_platform)
+endif()
+
+if (DAWN_ENABLE_VULKAN)
+    target_sources(dawn_native PRIVATE
+        "${DAWN_INCLUDE_DIR}/dawn/native/VulkanBackend.h"
+        "vulkan/AdapterVk.cpp"
+        "vulkan/AdapterVk.h"
+        "vulkan/BackendVk.cpp"
+        "vulkan/BackendVk.h"
+        "vulkan/BindGroupLayoutVk.cpp"
+        "vulkan/BindGroupLayoutVk.h"
+        "vulkan/BindGroupVk.cpp"
+        "vulkan/BindGroupVk.h"
+        "vulkan/BufferVk.cpp"
+        "vulkan/BufferVk.h"
+        "vulkan/CommandBufferVk.cpp"
+        "vulkan/CommandBufferVk.h"
+        "vulkan/CommandRecordingContext.h"
+        "vulkan/ComputePipelineVk.cpp"
+        "vulkan/ComputePipelineVk.h"
+        "vulkan/DescriptorSetAllocation.h"
+        "vulkan/DescriptorSetAllocator.cpp"
+        "vulkan/DescriptorSetAllocator.h"
+        "vulkan/DeviceVk.cpp"
+        "vulkan/DeviceVk.h"
+        "vulkan/ExternalHandle.h"
+        "vulkan/FencedDeleter.cpp"
+        "vulkan/FencedDeleter.h"
+        "vulkan/Forward.h"
+        "vulkan/NativeSwapChainImplVk.cpp"
+        "vulkan/NativeSwapChainImplVk.h"
+        "vulkan/PipelineLayoutVk.cpp"
+        "vulkan/PipelineLayoutVk.h"
+        "vulkan/QuerySetVk.cpp"
+        "vulkan/QuerySetVk.h"
+        "vulkan/QueueVk.cpp"
+        "vulkan/QueueVk.h"
+        "vulkan/RenderPassCache.cpp"
+        "vulkan/RenderPassCache.h"
+        "vulkan/RenderPipelineVk.cpp"
+        "vulkan/RenderPipelineVk.h"
+        "vulkan/ResourceHeapVk.cpp"
+        "vulkan/ResourceHeapVk.h"
+        "vulkan/ResourceMemoryAllocatorVk.cpp"
+        "vulkan/ResourceMemoryAllocatorVk.h"
+        "vulkan/SamplerVk.cpp"
+        "vulkan/SamplerVk.h"
+        "vulkan/ShaderModuleVk.cpp"
+        "vulkan/ShaderModuleVk.h"
+        "vulkan/StagingBufferVk.cpp"
+        "vulkan/StagingBufferVk.h"
+        "vulkan/SwapChainVk.cpp"
+        "vulkan/SwapChainVk.h"
+        "vulkan/TextureVk.cpp"
+        "vulkan/TextureVk.h"
+        "vulkan/UtilsVulkan.cpp"
+        "vulkan/UtilsVulkan.h"
+        "vulkan/VulkanError.cpp"
+        "vulkan/VulkanError.h"
+        "vulkan/VulkanExtensions.cpp"
+        "vulkan/VulkanExtensions.h"
+        "vulkan/VulkanFunctions.cpp"
+        "vulkan/VulkanFunctions.h"
+        "vulkan/VulkanInfo.cpp"
+        "vulkan/VulkanInfo.h"
+        "vulkan/external_memory/MemoryService.h"
+        "vulkan/external_semaphore/SemaphoreService.h"
+    )
+
+    target_link_libraries(dawn_native PUBLIC Vulkan-Headers)
+
+    if (UNIX AND NOT APPLE)
+        target_sources(dawn_native PRIVATE
+            "vulkan/external_memory/MemoryServiceOpaqueFD.cpp"
+            "vulkan/external_semaphore/SemaphoreServiceFD.cpp"
+        )
+    else()
+        target_sources(dawn_native PRIVATE
+            "vulkan/external_memory/MemoryServiceNull.cpp"
+            "vulkan/external_semaphore/SemaphoreServiceNull.cpp"
+        )
+    endif()
+endif()
+
+# TODO how to do the component build in CMake?
+target_sources(dawn_native PRIVATE "DawnNative.cpp")
+if (DAWN_ENABLE_D3D12)
+    target_sources(dawn_native PRIVATE "d3d12/D3D12Backend.cpp")
+endif()
+if (DAWN_ENABLE_METAL)
+    target_sources(dawn_native PRIVATE "metal/MetalBackend.mm")
+endif()
+if (DAWN_ENABLE_NULL)
+    target_sources(dawn_native PRIVATE "null/NullBackend.cpp")
+endif()
+if (DAWN_ENABLE_OPENGL)
+    target_sources(dawn_native PRIVATE "opengl/OpenGLBackend.cpp")
+endif()
+if (DAWN_ENABLE_VULKAN)
+    target_sources(dawn_native PRIVATE "vulkan/VulkanBackend.cpp")
+endif()
+
+DawnJSONGenerator(
+    TARGET "webgpu_dawn_native_proc"
+    PRINT_NAME "Dawn native WebGPU procs"
+    RESULT_VARIABLE "WEBGPU_DAWN_NATIVE_PROC_GEN"
+)
+
+add_library(webgpu_dawn ${DAWN_DUMMY_FILE})
+target_link_libraries(webgpu_dawn PRIVATE dawn_native)
+target_compile_definitions(webgpu_dawn PRIVATE "WGPU_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+    target_compile_definitions(webgpu_dawn PRIVATE "WGPU_SHARED_LIBRARY")
+endif()
+target_sources(webgpu_dawn PRIVATE ${WEBGPU_DAWN_NATIVE_PROC_GEN})
diff --git a/src/dawn/native/CacheKey.cpp b/src/dawn/native/CacheKey.cpp
new file mode 100644
index 0000000..3495577
--- /dev/null
+++ b/src/dawn/native/CacheKey.cpp
@@ -0,0 +1,32 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CacheKey.h"
+
+namespace dawn::native {
+
+    template <>
+    void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
+        key->Record(static_cast<size_t>(t.length()));
+        key->insert(key->end(), t.begin(), t.end());
+    }
+
+    template <>
+    void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
+        // For nested cache keys, we do not record the length, and just copy the key so that it
+        // appears we just flatten the keys into a single key.
+        key->insert(key->end(), t.begin(), t.end());
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CacheKey.h b/src/dawn/native/CacheKey.h
new file mode 100644
index 0000000..ce21f6d
--- /dev/null
+++ b/src/dawn/native/CacheKey.h
@@ -0,0 +1,98 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CACHE_KEY_H_
+#define DAWNNATIVE_CACHE_KEY_H_
+
+#include <limits>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native {
+
+    // Forward declare CacheKey class because of co-dependency.
+    class CacheKey;
+
+    // Overridable serializer struct that should be implemented for cache key serializable
+    // types/classes.
+    template <typename T, typename SFINAE = void>
+    class CacheKeySerializer {
+      public:
+        static void Serialize(CacheKey* key, const T& t);
+    };
+
+    class CacheKey : public std::vector<uint8_t> {
+      public:
+        using std::vector<uint8_t>::vector;
+
+        template <typename T>
+        CacheKey& Record(const T& t) {
+            CacheKeySerializer<T>::Serialize(this, t);
+            return *this;
+        }
+        template <typename T, typename... Args>
+        CacheKey& Record(const T& t, const Args&... args) {
+            CacheKeySerializer<T>::Serialize(this, t);
+            return Record(args...);
+        }
+
+        // Records iterables by prepending the number of elements. Some common iterables are have a
+        // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
+        // strings and CacheKeys, but they fundamentally do the same as this function.
+        template <typename IterableT>
+        CacheKey& RecordIterable(const IterableT& iterable) {
+            // Always record the size of generic iterables as a size_t for now.
+            Record(static_cast<size_t>(iterable.size()));
+            for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+                Record(*it);
+            }
+            return *this;
+        }
+        template <typename Ptr>
+        CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
+            Record(n);
+            for (size_t i = 0; i < n; ++i) {
+                Record(ptr[i]);
+            }
+            return *this;
+        }
+    };
+
+    // Specialized overload for fundamental types.
+    template <typename T>
+    class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
+      public:
+        static void Serialize(CacheKey* key, const T t) {
+            const char* it = reinterpret_cast<const char*>(&t);
+            key->insert(key->end(), it, (it + sizeof(T)));
+        }
+    };
+
+    // Specialized overload for string literals. Note we drop the null-terminator.
+    template <size_t N>
+    class CacheKeySerializer<char[N]> {
+      public:
+        static void Serialize(CacheKey* key, const char (&t)[N]) {
+            static_assert(N > 0);
+            key->Record(static_cast<size_t>(N));
+            key->insert(key->end(), t, t + N);
+        }
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_CACHE_KEY_H_
diff --git a/src/dawn/native/CachedObject.cpp b/src/dawn/native/CachedObject.cpp
new file mode 100644
index 0000000..e7e7cd8
--- /dev/null
+++ b/src/dawn/native/CachedObject.cpp
@@ -0,0 +1,53 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CachedObject.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Device.h"
+
+namespace dawn::native {
+
+    bool CachedObject::IsCachedReference() const {
+        return mIsCachedReference;
+    }
+
+    void CachedObject::SetIsCachedReference() {
+        mIsCachedReference = true;
+    }
+
+    size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
+        return obj->GetContentHash();
+    }
+
+    size_t CachedObject::GetContentHash() const {
+        ASSERT(mIsContentHashInitialized);
+        return mContentHash;
+    }
+
+    void CachedObject::SetContentHash(size_t contentHash) {
+        ASSERT(!mIsContentHashInitialized);
+        mContentHash = contentHash;
+        mIsContentHashInitialized = true;
+    }
+
+    const CacheKey& CachedObject::GetCacheKey() const {
+        return mCacheKey;
+    }
+
+    CacheKey* CachedObject::GetCacheKey() {
+        return &mCacheKey;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CachedObject.h b/src/dawn/native/CachedObject.h
new file mode 100644
index 0000000..7d28ae8
--- /dev/null
+++ b/src/dawn/native/CachedObject.h
@@ -0,0 +1,65 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CACHED_OBJECT_H_
+#define DAWNNATIVE_CACHED_OBJECT_H_
+
+#include "dawn/native/CacheKey.h"
+#include "dawn/native/Forward.h"
+
+#include <cstddef>
+#include <string>
+
+namespace dawn::native {
+
+    // Some objects are cached so that instead of creating new duplicate objects,
+    // we increase the refcount of an existing object.
+    // When an object is successfully created, the device should call
+    // SetIsCachedReference() and insert the object into the cache.
+    class CachedObject {
+      public:
+        bool IsCachedReference() const;
+
+        // Functor necessary for the unordered_set<CachedObject*>-based cache.
+        struct HashFunc {
+            size_t operator()(const CachedObject* obj) const;
+        };
+
+        size_t GetContentHash() const;
+        void SetContentHash(size_t contentHash);
+
+        // Returns the cache key for the object only, i.e. without device/adapter information.
+        const CacheKey& GetCacheKey() const;
+
+      protected:
+        // Protected accessor for derived classes to access and modify the key.
+        CacheKey* GetCacheKey();
+
+      private:
+        friend class DeviceBase;
+        void SetIsCachedReference();
+
+        bool mIsCachedReference = false;
+
+        // Called by ObjectContentHasher upon creation to record the object.
+        virtual size_t ComputeContentHash() = 0;
+
+        size_t mContentHash = 0;
+        bool mIsContentHashInitialized = false;
+        CacheKey mCacheKey;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_CACHED_OBJECT_H_
diff --git a/src/dawn/native/CallbackTaskManager.cpp b/src/dawn/native/CallbackTaskManager.cpp
new file mode 100644
index 0000000..a8be5cc
--- /dev/null
+++ b/src/dawn/native/CallbackTaskManager.cpp
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CallbackTaskManager.h"
+
+namespace dawn::native {
+
+    bool CallbackTaskManager::IsEmpty() {
+        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+        return mCallbackTaskQueue.empty();
+    }
+
+    std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
+        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+
+        std::vector<std::unique_ptr<CallbackTask>> allTasks;
+        allTasks.swap(mCallbackTaskQueue);
+        return allTasks;
+    }
+
+    void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
+        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+        mCallbackTaskQueue.push_back(std::move(callbackTask));
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CallbackTaskManager.h b/src/dawn/native/CallbackTaskManager.h
new file mode 100644
index 0000000..37fddd4
--- /dev/null
+++ b/src/dawn/native/CallbackTaskManager.h
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
+#define DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
+
+#include <memory>
+#include <mutex>
+#include <vector>
+
+namespace dawn::native {
+
+    struct CallbackTask {
+      public:
+        virtual ~CallbackTask() = default;
+        virtual void Finish() = 0;
+        virtual void HandleShutDown() = 0;
+        virtual void HandleDeviceLoss() = 0;
+    };
+
+    class CallbackTaskManager {
+      public:
+        void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
+        bool IsEmpty();
+        std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
+
+      private:
+        std::mutex mCallbackTaskQueueMutex;
+        std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
+    };
+
+}  // namespace dawn::native
+
+#endif
diff --git a/src/dawn/native/CommandAllocator.cpp b/src/dawn/native/CommandAllocator.cpp
new file mode 100644
index 0000000..5d36aad
--- /dev/null
+++ b/src/dawn/native/CommandAllocator.cpp
@@ -0,0 +1,228 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandAllocator.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
+#include <algorithm>
+#include <climits>
+#include <cstdlib>
+#include <utility>
+
+namespace dawn::native {
+
+    // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
+
+    CommandIterator::CommandIterator() {
+        Reset();
+    }
+
+    CommandIterator::~CommandIterator() {
+        ASSERT(IsEmpty());
+    }
+
+    CommandIterator::CommandIterator(CommandIterator&& other) {
+        if (!other.IsEmpty()) {
+            mBlocks = std::move(other.mBlocks);
+            other.Reset();
+        }
+        Reset();
+    }
+
+    CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
+        ASSERT(IsEmpty());
+        if (!other.IsEmpty()) {
+            mBlocks = std::move(other.mBlocks);
+            other.Reset();
+        }
+        Reset();
+        return *this;
+    }
+
+    CommandIterator::CommandIterator(CommandAllocator allocator)
+        : mBlocks(allocator.AcquireBlocks()) {
+        Reset();
+    }
+
+    void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
+        ASSERT(IsEmpty());
+        mBlocks.clear();
+        for (CommandAllocator& allocator : allocators) {
+            CommandBlocks blocks = allocator.AcquireBlocks();
+            if (!blocks.empty()) {
+                mBlocks.reserve(mBlocks.size() + blocks.size());
+                for (BlockDef& block : blocks) {
+                    mBlocks.push_back(std::move(block));
+                }
+            }
+        }
+        Reset();
+    }
+
+    bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
+        mCurrentBlock++;
+        if (mCurrentBlock >= mBlocks.size()) {
+            Reset();
+            *commandId = detail::kEndOfBlock;
+            return false;
+        }
+        mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
+        return NextCommandId(commandId);
+    }
+
+    void CommandIterator::Reset() {
+        mCurrentBlock = 0;
+
+        if (mBlocks.empty()) {
+            // This will case the first NextCommandId call to try to move to the next block and stop
+            // the iteration immediately, without special casing the initialization.
+            mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
+            mBlocks.emplace_back();
+            mBlocks[0].size = sizeof(mEndOfBlock);
+            mBlocks[0].block = mCurrentPtr;
+        } else {
+            mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
+        }
+    }
+
+    void CommandIterator::MakeEmptyAsDataWasDestroyed() {
+        if (IsEmpty()) {
+            return;
+        }
+
+        for (BlockDef& block : mBlocks) {
+            free(block.block);
+        }
+        mBlocks.clear();
+        Reset();
+        ASSERT(IsEmpty());
+    }
+
+    bool CommandIterator::IsEmpty() const {
+        return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
+    }
+
+    // Potential TODO(crbug.com/dawn/835):
+    //  - Host the size and pointer to next block in the block itself to avoid having an allocation
+    //    in the vector
+    //  - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
+    //    in Allocate
+    //  - Be able to optimize allocation to one block, for command buffers expected to live long to
+    //    avoid cache misses
+    //  - Better block allocation, maybe have Dawn API to say command buffer is going to have size
+    //    close to another
+
+    CommandAllocator::CommandAllocator() {
+        ResetPointers();
+    }
+
+    CommandAllocator::~CommandAllocator() {
+        Reset();
+    }
+
+    CommandAllocator::CommandAllocator(CommandAllocator&& other)
+        : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
+        other.mBlocks.clear();
+        if (!other.IsEmpty()) {
+            mCurrentPtr = other.mCurrentPtr;
+            mEndPtr = other.mEndPtr;
+        } else {
+            ResetPointers();
+        }
+        other.Reset();
+    }
+
+    CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
+        Reset();
+        if (!other.IsEmpty()) {
+            std::swap(mBlocks, other.mBlocks);
+            mLastAllocationSize = other.mLastAllocationSize;
+            mCurrentPtr = other.mCurrentPtr;
+            mEndPtr = other.mEndPtr;
+        }
+        other.Reset();
+        return *this;
+    }
+
+    void CommandAllocator::Reset() {
+        for (BlockDef& block : mBlocks) {
+            free(block.block);
+        }
+        mBlocks.clear();
+        mLastAllocationSize = kDefaultBaseAllocationSize;
+        ResetPointers();
+    }
+
+    bool CommandAllocator::IsEmpty() const {
+        return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]);
+    }
+
+    CommandBlocks&& CommandAllocator::AcquireBlocks() {
+        ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
+        ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+        ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
+        *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
+
+        mCurrentPtr = nullptr;
+        mEndPtr = nullptr;
+        return std::move(mBlocks);
+    }
+
+    uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
+                                                  size_t commandSize,
+                                                  size_t commandAlignment) {
+        // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
+        // to move to the next one. kEndOfBlock on the last block means the end of the commands.
+        uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+        *idAlloc = detail::kEndOfBlock;
+
+        // We'll request a block that can contain at least the command ID, the command and an
+        // additional ID to contain the kEndOfBlock tag.
+        size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
+
+        // The computation of the request could overflow.
+        if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
+            return nullptr;
+        }
+
+        if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
+            return nullptr;
+        }
+        return Allocate(commandId, commandSize, commandAlignment);
+    }
+
+    bool CommandAllocator::GetNewBlock(size_t minimumSize) {
+        // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
+        mLastAllocationSize =
+            std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
+
+        uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
+        if (DAWN_UNLIKELY(block == nullptr)) {
+            return false;
+        }
+
+        mBlocks.push_back({mLastAllocationSize, block});
+        mCurrentPtr = AlignPtr(block, alignof(uint32_t));
+        mEndPtr = block + mLastAllocationSize;
+        return true;
+    }
+
+    void CommandAllocator::ResetPointers() {
+        mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]);
+        mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CommandAllocator.h b/src/dawn/native/CommandAllocator.h
new file mode 100644
index 0000000..9d2b471
--- /dev/null
+++ b/src/dawn/native/CommandAllocator.h
@@ -0,0 +1,273 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMAND_ALLOCATOR_H_
+#define DAWNNATIVE_COMMAND_ALLOCATOR_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/NonCopyable.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace dawn::native {
+
+    // Allocation for command buffers should be fast. To avoid doing an allocation per command
+    // or to avoid copying commands when reallocing, we use a linear allocator in a growing set
+    // of large memory blocks. We also use this to have the format to be (u32 commandId, command),
+    // so that iteration over the commands is easy.
+
+    // Usage of the allocator and iterator:
+    //     CommandAllocator allocator;
+    //     DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
+    //     // Fill command
+    //     // Repeat allocation and filling commands
+    //
+    //     CommandIterator commands(allocator);
+    //     CommandType type;
+    //     while(commands.NextCommandId(&type)) {
+    //         switch(type) {
+    //              case CommandType::Draw:
+    //                  DrawCommand* draw = commands.NextCommand<DrawCommand>();
+    //                  // Do the draw
+    //                  break;
+    //              // other cases
+    //         }
+    //     }
+
+    // Note that you need to extract the commands from the CommandAllocator before destroying it
+    // and must tell the CommandIterator when the allocated commands have been processed for
+    // deletion.
+
+    // These are the lists of blocks, should not be used directly, only through CommandAllocator
+    // and CommandIterator
+    struct BlockDef {
+        size_t size;
+        uint8_t* block;
+    };
+    using CommandBlocks = std::vector<BlockDef>;
+
+    namespace detail {
+        constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
+        constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
+    }  // namespace detail
+
+    class CommandAllocator;
+
+    class CommandIterator : public NonCopyable {
+      public:
+        CommandIterator();
+        ~CommandIterator();
+
+        CommandIterator(CommandIterator&& other);
+        CommandIterator& operator=(CommandIterator&& other);
+
+        // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
+        explicit CommandIterator(CommandAllocator allocator);
+
+        void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
+
+        template <typename E>
+        bool NextCommandId(E* commandId) {
+            return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
+        }
+        template <typename T>
+        T* NextCommand() {
+            return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
+        }
+        template <typename T>
+        T* NextData(size_t count) {
+            return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
+        }
+
+        // Sets iterator to the beginning of the commands without emptying the list. This method can
+        // be used if iteration was stopped early and the iterator needs to be restarted.
+        void Reset();
+
+        // This method must to be called after commands have been deleted. This indicates that the
+        // commands have been submitted and they are no longer valid.
+        void MakeEmptyAsDataWasDestroyed();
+
+      private:
+        bool IsEmpty() const;
+
+        DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
+            uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
+            ASSERT(idPtr + sizeof(uint32_t) <=
+                   mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+            uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
+
+            if (id != detail::kEndOfBlock) {
+                mCurrentPtr = idPtr + sizeof(uint32_t);
+                *commandId = id;
+                return true;
+            }
+            return NextCommandIdInNewBlock(commandId);
+        }
+
+        bool NextCommandIdInNewBlock(uint32_t* commandId);
+
+        DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
+            uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
+            ASSERT(commandPtr + sizeof(commandSize) <=
+                   mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+            mCurrentPtr = commandPtr + commandSize;
+            return commandPtr;
+        }
+
+        DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
+            uint32_t id;
+            bool hasId = NextCommandId(&id);
+            ASSERT(hasId);
+            ASSERT(id == detail::kAdditionalData);
+
+            return NextCommand(dataSize, dataAlignment);
+        }
+
+        CommandBlocks mBlocks;
+        uint8_t* mCurrentPtr = nullptr;
+        size_t mCurrentBlock = 0;
+        // Used to avoid a special case for empty iterators.
+        uint32_t mEndOfBlock = detail::kEndOfBlock;
+    };
+
+    class CommandAllocator : public NonCopyable {
+      public:
+        CommandAllocator();
+        ~CommandAllocator();
+
+        // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
+        CommandAllocator(CommandAllocator&&);
+        CommandAllocator& operator=(CommandAllocator&&);
+
+        // Frees all blocks held by the allocator and restores it to its initial empty state.
+        void Reset();
+
+        bool IsEmpty() const;
+
+        template <typename T, typename E>
+        T* Allocate(E commandId) {
+            static_assert(sizeof(E) == sizeof(uint32_t));
+            static_assert(alignof(E) == alignof(uint32_t));
+            static_assert(alignof(T) <= kMaxSupportedAlignment);
+            T* result = reinterpret_cast<T*>(
+                Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
+            if (!result) {
+                return nullptr;
+            }
+            new (result) T;
+            return result;
+        }
+
+        template <typename T>
+        T* AllocateData(size_t count) {
+            static_assert(alignof(T) <= kMaxSupportedAlignment);
+            T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
+            if (!result) {
+                return nullptr;
+            }
+            for (size_t i = 0; i < count; i++) {
+                new (result + i) T;
+            }
+            return result;
+        }
+
+      private:
+        // This is used for some internal computations and can be any power of two as long as code
+        // using the CommandAllocator passes the static_asserts.
+        static constexpr size_t kMaxSupportedAlignment = 8;
+
+        // To avoid checking for overflows at every step of the computations we compute an upper
+        // bound of the space that will be needed in addition to the command data.
+        static constexpr size_t kWorstCaseAdditionalSize =
+            sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
+
+        // The default value of mLastAllocationSize.
+        static constexpr size_t kDefaultBaseAllocationSize = 2048;
+
+        friend CommandIterator;
+        CommandBlocks&& AcquireBlocks();
+
+        DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
+                                            size_t commandSize,
+                                            size_t commandAlignment) {
+            ASSERT(mCurrentPtr != nullptr);
+            ASSERT(mEndPtr != nullptr);
+            ASSERT(commandId != detail::kEndOfBlock);
+
+            // It should always be possible to allocate one id, for kEndOfBlock tagging,
+            ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+            ASSERT(mEndPtr >= mCurrentPtr);
+            ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
+
+            // The memory after the ID will contain the following:
+            //   - the current ID
+            //   - padding to align the command, maximum kMaxSupportedAlignment
+            //   - the command of size commandSize
+            //   - padding to align the next ID, maximum alignof(uint32_t)
+            //   - the next ID of size sizeof(uint32_t)
+
+            // This can't overflow because by construction mCurrentPtr always has space for the next
+            // ID.
+            size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
+
+            // The good case were we have enough space for the command data and upper bound of the
+            // extra required space.
+            if ((remainingSize >= kWorstCaseAdditionalSize) &&
+                (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
+                uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+                *idAlloc = commandId;
+
+                uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
+                mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+
+                return commandAlloc;
+            }
+            return AllocateInNewBlock(commandId, commandSize, commandAlignment);
+        }
+
+        uint8_t* AllocateInNewBlock(uint32_t commandId,
+                                    size_t commandSize,
+                                    size_t commandAlignment);
+
+        DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
+            return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
+        }
+
+        bool GetNewBlock(size_t minimumSize);
+
+        void ResetPointers();
+
+        CommandBlocks mBlocks;
+        size_t mLastAllocationSize = kDefaultBaseAllocationSize;
+
+        // Data used for the block range at initialization so that the first call to Allocate sees
+        // there is not enough space and calls GetNewBlock. This avoids having to special case the
+        // initialization in Allocate.
+        uint32_t mDummyEnum[1] = {0};
+
+        // Pointers to the current range of allocation in the block. Guaranteed to allow for at
+        // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
+        // be written. Nullptr iff the blocks were moved out.
+        uint8_t* mCurrentPtr = nullptr;
+        uint8_t* mEndPtr = nullptr;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMMAND_ALLOCATOR_H_
diff --git a/src/dawn/native/CommandBuffer.cpp b/src/dawn/native/CommandBuffer.cpp
new file mode 100644
index 0000000..f8c7836
--- /dev/null
+++ b/src/dawn/native/CommandBuffer.cpp
@@ -0,0 +1,245 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandBuffer.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+    CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
+                                         const CommandBufferDescriptor* descriptor)
+        : ApiObjectBase(encoder->GetDevice(), descriptor->label),
+          mCommands(encoder->AcquireCommands()),
+          mResourceUsages(encoder->AcquireResourceUsages()) {
+        TrackInDevice();
+    }
+
+    CommandBufferBase::CommandBufferBase(DeviceBase* device)
+        : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    // static
+    CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
+        return new CommandBufferBase(device, ObjectBase::kError);
+    }
+
+    ObjectType CommandBufferBase::GetType() const {
+        return ObjectType::CommandBuffer;
+    }
+
+    MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
+        ASSERT(!IsError());
+
+        DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
+        return {};
+    }
+
+    void CommandBufferBase::DestroyImpl() {
+        FreeCommands(&mCommands);
+        mResourceUsages = {};
+    }
+
+    const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
+        return mResourceUsages;
+    }
+
+    CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
+        return &mCommands;
+    }
+
+    bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+                                       const Extent3D copySize,
+                                       const uint32_t mipLevel) {
+        Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
+
+        switch (texture->GetDimension()) {
+            case wgpu::TextureDimension::e1D:
+                return extent.width == copySize.width;
+            case wgpu::TextureDimension::e2D:
+                return extent.width == copySize.width && extent.height == copySize.height;
+            case wgpu::TextureDimension::e3D:
+                return extent.width == copySize.width && extent.height == copySize.height &&
+                       extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
+        }
+    }
+
+    SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
+                                                   const Extent3D& copySize) {
+        switch (copy.texture->GetDimension()) {
+            case wgpu::TextureDimension::e1D:
+                ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+                ASSERT(copy.mipLevel == 0);
+                return {copy.aspect, {0, 1}, {0, 1}};
+            case wgpu::TextureDimension::e2D:
+                return {
+                    copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
+            case wgpu::TextureDimension::e3D:
+                return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
+        }
+    }
+
+    void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
+        for (ColorAttachmentIndex i :
+             IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+            auto& attachmentInfo = renderPass->colorAttachments[i];
+            TextureViewBase* view = attachmentInfo.view.Get();
+            bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+
+            ASSERT(view->GetLayerCount() == 1);
+            ASSERT(view->GetLevelCount() == 1);
+            SubresourceRange range = view->GetSubresourceRange();
+
+            // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
+            if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
+                !view->GetTexture()->IsSubresourceContentInitialized(range)) {
+                attachmentInfo.loadOp = wgpu::LoadOp::Clear;
+                attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
+            }
+
+            if (hasResolveTarget) {
+                // We need to set the resolve target to initialized so that it does not get
+                // cleared later in the pipeline. The texture will be resolved from the
+                // source color attachment, which will be correctly initialized.
+                TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
+                ASSERT(resolveView->GetLayerCount() == 1);
+                ASSERT(resolveView->GetLevelCount() == 1);
+                resolveView->GetTexture()->SetIsSubresourceContentInitialized(
+                    true, resolveView->GetSubresourceRange());
+            }
+
+            switch (attachmentInfo.storeOp) {
+                case wgpu::StoreOp::Store:
+                    view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
+                    break;
+
+                case wgpu::StoreOp::Discard:
+                    view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
+                    break;
+
+                case wgpu::StoreOp::Undefined:
+                    UNREACHABLE();
+                    break;
+            }
+        }
+
+        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+            auto& attachmentInfo = renderPass->depthStencilAttachment;
+            TextureViewBase* view = attachmentInfo.view.Get();
+            ASSERT(view->GetLayerCount() == 1);
+            ASSERT(view->GetLevelCount() == 1);
+            SubresourceRange range = view->GetSubresourceRange();
+
+            SubresourceRange depthRange = range;
+            depthRange.aspects = range.aspects & Aspect::Depth;
+
+            SubresourceRange stencilRange = range;
+            stencilRange.aspects = range.aspects & Aspect::Stencil;
+
+            // If the depth stencil texture has not been initialized, we want to use loadop
+            // clear to init the contents to 0's
+            if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
+                attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
+                attachmentInfo.clearDepth = 0.0f;
+                attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+            }
+
+            if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
+                attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
+                attachmentInfo.clearStencil = 0u;
+                attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+            }
+
+            view->GetTexture()->SetIsSubresourceContentInitialized(
+                attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
+
+            view->GetTexture()->SetIsSubresourceContentInitialized(
+                attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
+        }
+    }
+
+    bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
+        ASSERT(copy != nullptr);
+
+        if (copy->destination.offset > 0) {
+            // The copy doesn't touch the start of the buffer.
+            return false;
+        }
+
+        const TextureBase* texture = copy->source.texture.Get();
+        const TexelBlockInfo& blockInfo =
+            texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
+        const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
+        const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
+        const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
+        const bool multiRow = multiSlice || heightInBlocks > 1;
+
+        if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
+            // There are gaps between slices that aren't overwritten
+            return false;
+        }
+
+        const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
+        if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
+            // There are gaps between rows that aren't overwritten
+            return false;
+        }
+
+        // After the above checks, we're sure the copy has no gaps.
+        // Now, compute the total number of bytes written.
+        const uint64_t writtenBytes =
+            ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
+                                       copy->destination.rowsPerImage)
+                .AcquireSuccess();
+        if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
+            // The written bytes don't cover the whole buffer.
+            return false;
+        }
+
+        return true;
+    }
+
+    std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
+        const std::array<float, 4> outputValue = {
+            static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
+            static_cast<float>(color.a)};
+        return outputValue;
+    }
+    std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
+        const std::array<int32_t, 4> outputValue = {
+            static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
+            static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
+        return outputValue;
+    }
+
+    std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
+        const std::array<uint32_t, 4> outputValue = {
+            static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
+            static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
+        return outputValue;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CommandBuffer.h b/src/dawn/native/CommandBuffer.h
new file mode 100644
index 0000000..3d9d71a
--- /dev/null
+++ b/src/dawn/native/CommandBuffer.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDBUFFER_H_
+#define DAWNNATIVE_COMMANDBUFFER_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+    struct BeginRenderPassCmd;
+    struct CopyTextureToBufferCmd;
+    struct TextureCopy;
+
+    class CommandBufferBase : public ApiObjectBase {
+      public:
+        CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+        static CommandBufferBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        MaybeError ValidateCanUseInSubmitNow() const;
+
+        const CommandBufferResourceUsage& GetResourceUsages() const;
+
+        CommandIterator* GetCommandIteratorForTesting();
+
+      protected:
+        // Constructor used only for mocking and testing.
+        CommandBufferBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+        CommandIterator mCommands;
+
+      private:
+        CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        CommandBufferResourceUsage mResourceUsages;
+    };
+
+    bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+                                       const Extent3D copySize,
+                                       const uint32_t mipLevel);
+    SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
+                                                   const Extent3D& copySize);
+
+    void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
+
+    bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
+
+    std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
+    std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
+    std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMMANDBUFFER_H_
diff --git a/src/dawn/native/CommandBufferStateTracker.cpp b/src/dawn/native/CommandBufferStateTracker.cpp
new file mode 100644
index 0000000..ee164c7
--- /dev/null
+++ b/src/dawn/native/CommandBufferStateTracker.cpp
@@ -0,0 +1,421 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandBufferStateTracker.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/RenderPipeline.h"
+
+// TODO(dawn:563): None of the error messages in this file include the buffer objects they are
+// validating against. It would be nice to improve that, but difficult to do without incurring
+// additional tracking costs.
+
+namespace dawn::native {
+
+    namespace {
+        bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
+                                     const std::vector<uint64_t>& pipelineMinBufferSizes) {
+            ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
+
+            for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
+                if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
+                    return false;
+                }
+            }
+
+            return true;
+        }
+    }  // namespace
+
+    enum ValidationAspect {
+        VALIDATION_ASPECT_PIPELINE,
+        VALIDATION_ASPECT_BIND_GROUPS,
+        VALIDATION_ASPECT_VERTEX_BUFFERS,
+        VALIDATION_ASPECT_INDEX_BUFFER,
+
+        VALIDATION_ASPECT_COUNT
+    };
+    static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
+
+    static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
+        1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
+
+    static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
+        1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+        1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
+
+    static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
+        1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+        1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
+
+    static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
+        1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
+        1 << VALIDATION_ASPECT_INDEX_BUFFER;
+
+    MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
+        return ValidateOperation(kDispatchAspects);
+    }
+
+    MaybeError CommandBufferStateTracker::ValidateCanDraw() {
+        return ValidateOperation(kDrawAspects);
+    }
+
+    MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
+        return ValidateOperation(kDrawIndexedAspects);
+    }
+
+    MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
+        uint32_t vertexCount,
+        uint32_t firstVertex) {
+        RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+            vertexBufferSlotsUsedAsVertexBuffer =
+                lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
+
+        for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
+            const VertexBufferInfo& vertexBuffer =
+                lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
+            uint64_t arrayStride = vertexBuffer.arrayStride;
+            uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
+
+            if (arrayStride == 0) {
+                DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+                                "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+                                "is smaller than the required size for all attributes (%u)",
+                                bufferSize, static_cast<uint8_t>(usedSlotVertex),
+                                vertexBuffer.usedBytesInStride);
+            } else {
+                uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
+                if (strideCount != 0u) {
+                    uint64_t requiredSize =
+                        (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
+                    // firstVertex and vertexCount are in uint32_t,
+                    // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
+                    // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
+                    // sizeof(attribute.format)) with attribute.offset being no larger than
+                    // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
+                    // overflows.
+                    DAWN_INVALID_IF(
+                        requiredSize > bufferSize,
+                        "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than "
+                        "the "
+                        "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
+                        firstVertex, vertexCount, requiredSize, bufferSize,
+                        static_cast<uint8_t>(usedSlotVertex), arrayStride);
+                }
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
+        uint32_t instanceCount,
+        uint32_t firstInstance) {
+        RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+            vertexBufferSlotsUsedAsInstanceBuffer =
+                lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
+
+        for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
+            const VertexBufferInfo& vertexBuffer =
+                lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
+            uint64_t arrayStride = vertexBuffer.arrayStride;
+            uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
+            if (arrayStride == 0) {
+                DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+                                "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+                                "is smaller than the required size for all attributes (%u)",
+                                bufferSize, static_cast<uint8_t>(usedSlotInstance),
+                                vertexBuffer.usedBytesInStride);
+            } else {
+                uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
+                if (strideCount != 0u) {
+                    uint64_t requiredSize =
+                        (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
+                    // firstInstance and instanceCount are in uint32_t,
+                    // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
+                    // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
+                    // sizeof(attribute.format)) with attribute.offset being no larger than
+                    // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
+                    // overflows.
+                    DAWN_INVALID_IF(
+                        requiredSize > bufferSize,
+                        "Instance range (first: %u, count: %u) requires a larger buffer (%u) than "
+                        "the "
+                        "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
+                        firstInstance, instanceCount, requiredSize, bufferSize,
+                        static_cast<uint8_t>(usedSlotInstance), arrayStride);
+                }
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
+                                                                     uint32_t firstIndex) {
+        // Validate the range of index buffer
+        // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
+        // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
+        // uint64_t we avoid overflows.
+        DAWN_INVALID_IF(
+            (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
+                mIndexBufferSize,
+            "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
+            "(%u).",
+            firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
+        return {};
+    }
+
+    MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
+        // Fast return-true path if everything is good
+        ValidationAspects missingAspects = requiredAspects & ~mAspects;
+        if (missingAspects.none()) {
+            return {};
+        }
+
+        // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
+        // requires the pipeline to be set.
+        DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
+
+        RecomputeLazyAspects(missingAspects);
+
+        DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
+
+        return {};
+    }
+
+    void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
+        ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
+        ASSERT((aspects & ~kLazyAspects).none());
+
+        if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
+            bool matches = true;
+
+            for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+                if (mBindgroups[i] == nullptr ||
+                    mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
+                    !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+                                             (*mMinBufferSizes)[i])) {
+                    matches = false;
+                    break;
+                }
+            }
+
+            if (matches) {
+                mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
+            }
+        }
+
+        if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
+            RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+            const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
+                lastRenderPipeline->GetVertexBufferSlotsUsed();
+            if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
+                mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
+            }
+        }
+
+        if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
+            RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+            if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
+                mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
+                mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
+            }
+        }
+    }
+
+    MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
+        if (!aspects.any()) {
+            return {};
+        }
+
+        DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
+
+        if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
+            DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
+
+            RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+            wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
+
+            if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
+                DAWN_INVALID_IF(
+                    pipelineIndexFormat == wgpu::IndexFormat::Undefined,
+                    "%s has a strip primitive topology (%s) but a strip index format of %s, which "
+                    "prevents it for being used for indexed draw calls.",
+                    lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
+                    pipelineIndexFormat);
+
+                DAWN_INVALID_IF(
+                    mIndexFormat != pipelineIndexFormat,
+                    "Strip index format (%s) of %s does not match index buffer format (%s).",
+                    pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
+            }
+
+            // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+            // It returns the first invalid state found. We shouldn't be able to reach this line
+            // because to have invalid aspects one of the above conditions must have failed earlier.
+            // If this is reached, make sure lazy aspects and the error checks above are consistent.
+            UNREACHABLE();
+            return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
+        }
+
+        // TODO(dawn:563): Indicate which slots were not set.
+        DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
+                        "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
+
+        if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
+            for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+                ASSERT(HasPipeline());
+
+                DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
+                                static_cast<uint32_t>(i));
+
+                BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
+                BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
+
+                DAWN_INVALID_IF(
+                    requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
+                        currentBGL->GetPipelineCompatibilityToken() !=
+                            requiredBGL->GetPipelineCompatibilityToken(),
+                    "The current pipeline (%s) was created with a default layout, and is not "
+                    "compatible with the %s at index %u which uses a %s that was not created by "
+                    "the pipeline. Either use the bind group layout returned by calling "
+                    "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
+                    "provide an explicit pipeline layout when creating the pipeline.",
+                    mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
+                    static_cast<uint32_t>(i));
+
+                DAWN_INVALID_IF(
+                    requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
+                        currentBGL->GetPipelineCompatibilityToken() !=
+                            PipelineCompatibilityToken(0),
+                    "%s at index %u uses a %s which was created as part of the default layout for "
+                    "a different pipeline than the current one (%s), and as a result is not "
+                    "compatible. Use an explicit bind group layout when creating bind groups and "
+                    "an explicit pipeline layout when creating pipelines to share bind groups "
+                    "between pipelines.",
+                    mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
+
+                DAWN_INVALID_IF(
+                    mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
+                    "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
+                    "group %s at index %u.",
+                    requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
+                    static_cast<uint32_t>(i));
+
+                // TODO(dawn:563): Report the binding sizes and which ones are failing.
+                DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+                                                         (*mMinBufferSizes)[i]),
+                                "Binding sizes are too small for bind group %s at index %u",
+                                mBindgroups[i], static_cast<uint32_t>(i));
+            }
+
+            // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+            // It returns the first invalid state found. We shouldn't be able to reach this line
+            // because to have invalid aspects one of the above conditions must have failed earlier.
+            // If this is reached, make sure lazy aspects and the error checks above are consistent.
+            UNREACHABLE();
+            return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
+        }
+
+        UNREACHABLE();
+    }
+
+    void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
+        SetPipelineCommon(pipeline);
+    }
+
+    void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
+        SetPipelineCommon(pipeline);
+    }
+
+    void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
+                                                 BindGroupBase* bindgroup,
+                                                 uint32_t dynamicOffsetCount,
+                                                 const uint32_t* dynamicOffsets) {
+        mBindgroups[index] = bindgroup;
+        mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
+        mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
+    }
+
+    void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
+        mIndexBufferSet = true;
+        mIndexFormat = format;
+        mIndexBufferSize = size;
+    }
+
+    void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
+        mVertexBufferSlotsUsed.set(slot);
+        mVertexBufferSizes[slot] = size;
+    }
+
+    void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
+        mLastPipeline = pipeline;
+        mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
+        mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
+
+        mAspects.set(VALIDATION_ASPECT_PIPELINE);
+
+        // Reset lazy aspects so they get recomputed on the next operation.
+        mAspects &= ~kLazyAspects;
+    }
+
+    BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
+        return mBindgroups[index];
+    }
+
+    const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
+        BindGroupIndex index) const {
+        return mDynamicOffsets[index];
+    }
+
+    bool CommandBufferStateTracker::HasPipeline() const {
+        return mLastPipeline != nullptr;
+    }
+
+    RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
+        ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
+        return static_cast<RenderPipelineBase*>(mLastPipeline);
+    }
+
+    ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
+        ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
+        return static_cast<ComputePipelineBase*>(mLastPipeline);
+    }
+
+    PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
+        return mLastPipelineLayout;
+    }
+
+    wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
+        return mIndexFormat;
+    }
+
+    uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
+        return mIndexBufferSize;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CommandBufferStateTracker.h b/src/dawn/native/CommandBufferStateTracker.h
new file mode 100644
index 0000000..b68e27a
--- /dev/null
+++ b/src/dawn/native/CommandBufferStateTracker.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
+#define DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+
+namespace dawn::native {
+
+    class CommandBufferStateTracker {
+      public:
+        // Non-state-modifying validation functions
+        MaybeError ValidateCanDispatch();
+        MaybeError ValidateCanDraw();
+        MaybeError ValidateCanDrawIndexed();
+        MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
+        MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
+                                                          uint32_t firstInstance);
+        MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
+
+        // State-modifying methods
+        void SetComputePipeline(ComputePipelineBase* pipeline);
+        void SetRenderPipeline(RenderPipelineBase* pipeline);
+        void SetBindGroup(BindGroupIndex index,
+                          BindGroupBase* bindgroup,
+                          uint32_t dynamicOffsetCount,
+                          const uint32_t* dynamicOffsets);
+        void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
+        void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
+
+        static constexpr size_t kNumAspects = 4;
+        using ValidationAspects = std::bitset<kNumAspects>;
+
+        BindGroupBase* GetBindGroup(BindGroupIndex index) const;
+        const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
+        bool HasPipeline() const;
+        RenderPipelineBase* GetRenderPipeline() const;
+        ComputePipelineBase* GetComputePipeline() const;
+        PipelineLayoutBase* GetPipelineLayout() const;
+        wgpu::IndexFormat GetIndexFormat() const;
+        uint64_t GetIndexBufferSize() const;
+
+      private:
+        MaybeError ValidateOperation(ValidationAspects requiredAspects);
+        void RecomputeLazyAspects(ValidationAspects aspects);
+        MaybeError CheckMissingAspects(ValidationAspects aspects);
+
+        void SetPipelineCommon(PipelineBase* pipeline);
+
+        ValidationAspects mAspects;
+
+        ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
+        ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
+        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+        bool mIndexBufferSet = false;
+        wgpu::IndexFormat mIndexFormat;
+        uint64_t mIndexBufferSize = 0;
+
+        ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
+
+        PipelineLayoutBase* mLastPipelineLayout = nullptr;
+        PipelineBase* mLastPipeline = nullptr;
+
+        const RequiredBufferSizes* mMinBufferSizes = nullptr;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
diff --git a/src/dawn/native/CommandEncoder.cpp b/src/dawn/native/CommandEncoder.cpp
new file mode 100644
index 0000000..7f516ab
--- /dev/null
+++ b/src/dawn/native/CommandEncoder.cpp
@@ -0,0 +1,1422 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandEncoder.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/QueryHelper.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderPassEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native {
+
+    namespace {
+
+        bool HasDeprecatedColor(const RenderPassColorAttachment& attachment) {
+            return !std::isnan(attachment.clearColor.r) || !std::isnan(attachment.clearColor.g) ||
+                   !std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
+        }
+
+        MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
+                                            uint64_t srcOffset,
+                                            uint64_t dstOffset) {
+            // Copy size must be a multiple of 4 bytes on macOS.
+            DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
+
+            // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
+            DAWN_INVALID_IF(
+                srcOffset % 4 != 0 || dstOffset % 4 != 0,
+                "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
+                srcOffset, dstOffset);
+
+            return {};
+        }
+
+        MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
+            DAWN_INVALID_IF(texture->GetSampleCount() > 1,
+                            "%s sample count (%u) is not 1 when copying to or from a buffer.",
+                            texture, texture->GetSampleCount());
+
+            return {};
+        }
+
+        MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
+                                                   const TexelBlockInfo& blockInfo,
+                                                   const bool hasDepthOrStencil) {
+            if (hasDepthOrStencil) {
+                // For depth-stencil texture, buffer offset must be a multiple of 4.
+                DAWN_INVALID_IF(layout.offset % 4 != 0,
+                                "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
+                                layout.offset);
+            } else {
+                DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
+                                "Offset (%u) is not a multiple of the texel block byte size (%u).",
+                                layout.offset, blockInfo.byteSize);
+            }
+            return {};
+        }
+
+        MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
+            const ImageCopyTexture& src) {
+            Aspect aspectUsed;
+            DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
+            if (aspectUsed == Aspect::Depth) {
+                switch (src.texture->GetFormat().format) {
+                    case wgpu::TextureFormat::Depth24Plus:
+                    case wgpu::TextureFormat::Depth24PlusStencil8:
+                    case wgpu::TextureFormat::Depth24UnormStencil8:
+                        return DAWN_FORMAT_VALIDATION_ERROR(
+                            "The depth aspect of %s format %s cannot be selected in a texture to "
+                            "buffer copy.",
+                            src.texture, src.texture->GetFormat().format);
+                    case wgpu::TextureFormat::Depth32Float:
+                    case wgpu::TextureFormat::Depth16Unorm:
+                    case wgpu::TextureFormat::Depth32FloatStencil8:
+                        break;
+
+                    default:
+                        UNREACHABLE();
+                }
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
+            // Currently we do not support layered rendering.
+            DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
+                            "The layer count (%u) of %s used as attachment is greater than 1.",
+                            attachment->GetLayerCount(), attachment);
+
+            DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
+                            "The mip level count (%u) of %s used as attachment is greater than 1.",
+                            attachment->GetLevelCount(), attachment);
+
+            return {};
+        }
+
+        MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
+                                               uint32_t* width,
+                                               uint32_t* height) {
+            const Extent3D& attachmentSize =
+                attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
+
+            if (*width == 0) {
+                DAWN_ASSERT(*height == 0);
+                *width = attachmentSize.width;
+                *height = attachmentSize.height;
+                DAWN_ASSERT(*width != 0 && *height != 0);
+            } else {
+                DAWN_INVALID_IF(
+                    *width != attachmentSize.width || *height != attachmentSize.height,
+                    "Attachment %s size (width: %u, height: %u) does not match the size of the "
+                    "other attachments (width: %u, height: %u).",
+                    attachment, attachmentSize.width, attachmentSize.height, *width, *height);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
+                                                           uint32_t* sampleCount) {
+            if (*sampleCount == 0) {
+                *sampleCount = colorAttachment->GetTexture()->GetSampleCount();
+                DAWN_ASSERT(*sampleCount != 0);
+            } else {
+                DAWN_INVALID_IF(
+                    *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
+                    "Color attachment %s sample count (%u) does not match the sample count of the "
+                    "other attachments (%u).",
+                    colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateResolveTarget(const DeviceBase* device,
+                                         const RenderPassColorAttachment& colorAttachment,
+                                         UsageValidationMode usageValidationMode) {
+            if (colorAttachment.resolveTarget == nullptr) {
+                return {};
+            }
+
+            const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
+            const TextureViewBase* attachment = colorAttachment.view;
+            DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
+            DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
+                                      wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+            DAWN_INVALID_IF(
+                !attachment->GetTexture()->IsMultisampledTexture(),
+                "Cannot set %s as a resolve target when the color attachment %s has a sample "
+                "count of 1.",
+                resolveTarget, attachment);
+
+            DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
+                            "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
+                            resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
+
+            DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
+                            "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
+                            resolveTarget->GetLayerCount());
+
+            DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
+                            "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
+                            resolveTarget->GetLevelCount());
+
+            const Extent3D& colorTextureSize =
+                attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
+            const Extent3D& resolveTextureSize =
+                resolveTarget->GetTexture()->GetMipLevelVirtualSize(
+                    resolveTarget->GetBaseMipLevel());
+            DAWN_INVALID_IF(
+                colorTextureSize.width != resolveTextureSize.width ||
+                    colorTextureSize.height != resolveTextureSize.height,
+                "The Resolve target %s size (width: %u, height: %u) does not match the color "
+                "attachment %s size (width: %u, height: %u).",
+                resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
+                colorTextureSize.width, colorTextureSize.height);
+
+            wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
+            DAWN_INVALID_IF(
+                resolveTargetFormat != attachment->GetFormat().format,
+                "The resolve target %s format (%s) does not match the color attachment %s format "
+                "(%s).",
+                resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
+            DAWN_INVALID_IF(
+                !resolveTarget->GetFormat().supportsResolveTarget,
+                "The resolve target %s format (%s) does not support being used as resolve target.",
+                resolveTarget, resolveTargetFormat);
+
+            return {};
+        }
+
+        MaybeError ValidateRenderPassColorAttachment(
+            DeviceBase* device,
+            const RenderPassColorAttachment& colorAttachment,
+            uint32_t* width,
+            uint32_t* height,
+            uint32_t* sampleCount,
+            UsageValidationMode usageValidationMode) {
+            TextureViewBase* attachment = colorAttachment.view;
+            if (attachment == nullptr) {
+                return {};
+            }
+            DAWN_TRY(device->ValidateObject(attachment));
+            DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
+                                      wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+            DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
+                                !attachment->GetFormat().isRenderable,
+                            "The color attachment %s format (%s) is not color renderable.",
+                            attachment, attachment->GetFormat().format);
+
+            DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
+            DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
+            DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined,
+                            "loadOp must be set.");
+            DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined,
+                            "storeOp must be set.");
+
+            // TODO(dawn:1269): Remove after the deprecation period.
+            bool useClearColor = HasDeprecatedColor(colorAttachment);
+            const dawn::native::Color& clearValue =
+                useClearColor ? colorAttachment.clearColor : colorAttachment.clearValue;
+            if (useClearColor) {
+                device->EmitDeprecationWarning(
+                    "clearColor is deprecated, prefer using clearValue instead.");
+            }
+
+            if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
+                DAWN_INVALID_IF(std::isnan(clearValue.r) || std::isnan(clearValue.g) ||
+                                    std::isnan(clearValue.b) || std::isnan(clearValue.a),
+                                "Color clear value (%s) contain a NaN.", &clearValue);
+            }
+
+            DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
+
+            DAWN_TRY(ValidateResolveTarget(device, colorAttachment, usageValidationMode));
+
+            DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+            DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+            return {};
+        }
+
+        MaybeError ValidateRenderPassDepthStencilAttachment(
+            DeviceBase* device,
+            const RenderPassDepthStencilAttachment* depthStencilAttachment,
+            uint32_t* width,
+            uint32_t* height,
+            uint32_t* sampleCount,
+            UsageValidationMode usageValidationMode) {
+            DAWN_ASSERT(depthStencilAttachment != nullptr);
+
+            TextureViewBase* attachment = depthStencilAttachment->view;
+            DAWN_TRY(device->ValidateObject(attachment));
+            DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
+                                      wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+            const Format& format = attachment->GetFormat();
+            DAWN_INVALID_IF(
+                !format.HasDepthOrStencil(),
+                "The depth stencil attachment %s format (%s) is not a depth stencil format.",
+                attachment, format.format);
+
+            DAWN_INVALID_IF(!format.isRenderable,
+                            "The depth stencil attachment %s format (%s) is not renderable.",
+                            attachment, format.format);
+
+            DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
+                            "The depth stencil attachment %s must encompass all aspects.",
+                            attachment);
+
+            DAWN_INVALID_IF(
+                attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
+                    depthStencilAttachment->depthReadOnly !=
+                        depthStencilAttachment->stencilReadOnly,
+                "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
+                "is 'all'.",
+                depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
+
+            // Read only, or depth doesn't exist.
+            if (depthStencilAttachment->depthReadOnly ||
+                !IsSubset(Aspect::Depth, attachment->GetAspects())) {
+                if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Load &&
+                    depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Store) {
+                    // TODO(dawn:1269): Remove this branch after the deprecation period.
+                    device->EmitDeprecationWarning(
+                        "Setting depthLoadOp and depthStoreOp when "
+                        "the attachment has no depth aspect or depthReadOnly is true is "
+                        "deprecated.");
+                } else {
+                    DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Undefined,
+                                    "depthLoadOp (%s) must not be set if the attachment (%s) has "
+                                    "no depth aspect or depthReadOnly (%u) is true.",
+                                    depthStencilAttachment->depthLoadOp, attachment,
+                                    depthStencilAttachment->depthReadOnly);
+                    DAWN_INVALID_IF(
+                        depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
+                        "depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
+                        "aspect or depthReadOnly (%u) is true.",
+                        depthStencilAttachment->depthStoreOp, attachment,
+                        depthStencilAttachment->depthReadOnly);
+                }
+            } else {
+                DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
+                DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Undefined,
+                                "depthLoadOp must be set if the attachment (%s) has a depth aspect "
+                                "and depthReadOnly (%u) is false.",
+                                attachment, depthStencilAttachment->depthReadOnly);
+                DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
+                DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Undefined,
+                                "depthStoreOp must be set if the attachment (%s) has a depth "
+                                "aspect and depthReadOnly (%u) is false.",
+                                attachment, depthStencilAttachment->depthReadOnly);
+            }
+
+            // Read only, or stencil doesn't exist.
+            if (depthStencilAttachment->stencilReadOnly ||
+                !IsSubset(Aspect::Stencil, attachment->GetAspects())) {
+                if (depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Load &&
+                    depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Store) {
+                    // TODO(dawn:1269): Remove this branch after the deprecation period.
+                    device->EmitDeprecationWarning(
+                        "Setting stencilLoadOp and stencilStoreOp when "
+                        "the attachment has no stencil aspect or stencilReadOnly is true is "
+                        "deprecated.");
+                } else {
+                    DAWN_INVALID_IF(
+                        depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Undefined,
+                        "stencilLoadOp (%s) must not be set if the attachment (%s) has no stencil "
+                        "aspect or stencilReadOnly (%u) is true.",
+                        depthStencilAttachment->stencilLoadOp, attachment,
+                        depthStencilAttachment->stencilReadOnly);
+                    DAWN_INVALID_IF(
+                        depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Undefined,
+                        "stencilStoreOp (%s) must not be set if the attachment (%s) has no stencil "
+                        "aspect or stencilReadOnly (%u) is true.",
+                        depthStencilAttachment->stencilStoreOp, attachment,
+                        depthStencilAttachment->stencilReadOnly);
+                }
+            } else {
+                DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
+                DAWN_INVALID_IF(
+                    depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
+                    "stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
+                    "aspect and stencilReadOnly (%u) is false.",
+                    depthStencilAttachment->stencilLoadOp, attachment,
+                    depthStencilAttachment->stencilReadOnly);
+                DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
+                DAWN_INVALID_IF(
+                    depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
+                    "stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
+                    "aspect and stencilReadOnly (%u) is false.",
+                    depthStencilAttachment->stencilStoreOp, attachment,
+                    depthStencilAttachment->stencilReadOnly);
+            }
+
+            if (!std::isnan(depthStencilAttachment->clearDepth)) {
+                // TODO(dawn:1269): Remove this branch after the deprecation period.
+                device->EmitDeprecationWarning(
+                    "clearDepth is deprecated, prefer depthClearValue instead.");
+            } else {
+                DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
+                                    std::isnan(depthStencilAttachment->depthClearValue),
+                                "depthClearValue is NaN.");
+            }
+
+            // TODO(dawn:1269): Remove after the deprecation period.
+            if (depthStencilAttachment->stencilClearValue == 0 &&
+                depthStencilAttachment->clearStencil != 0) {
+                device->EmitDeprecationWarning(
+                    "clearStencil is deprecated, prefer stencilClearValue instead.");
+            }
+
+            // *sampleCount == 0 must only happen when there is no color attachment. In that case we
+            // do not need to validate the sample count of the depth stencil attachment.
+            const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
+            if (*sampleCount != 0) {
+                DAWN_INVALID_IF(
+                    depthStencilSampleCount != *sampleCount,
+                    "The depth stencil attachment %s sample count (%u) does not match the sample "
+                    "count of the other attachments (%u).",
+                    attachment, depthStencilSampleCount, *sampleCount);
+            } else {
+                *sampleCount = depthStencilSampleCount;
+            }
+
+            DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+            DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+            return {};
+        }
+
+        MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
+                                                const RenderPassDescriptor* descriptor,
+                                                uint32_t* width,
+                                                uint32_t* height,
+                                                uint32_t* sampleCount,
+                                                UsageValidationMode usageValidationMode) {
+            DAWN_INVALID_IF(
+                descriptor->colorAttachmentCount > kMaxColorAttachments,
+                "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
+                descriptor->colorAttachmentCount, kMaxColorAttachments);
+
+            bool isAllColorAttachmentNull = true;
+            for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
+                DAWN_TRY_CONTEXT(ValidateRenderPassColorAttachment(
+                                     device, descriptor->colorAttachments[i], width, height,
+                                     sampleCount, usageValidationMode),
+                                 "validating colorAttachments[%u].", i);
+                if (descriptor->colorAttachments[i].view) {
+                    isAllColorAttachmentNull = false;
+                }
+            }
+
+            if (descriptor->depthStencilAttachment != nullptr) {
+                DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
+                                     device, descriptor->depthStencilAttachment, width, height,
+                                     sampleCount, usageValidationMode),
+                                 "validating depthStencilAttachment.");
+            } else {
+                DAWN_INVALID_IF(
+                    isAllColorAttachmentNull,
+                    "No color or depthStencil attachments specified. At least one is required.");
+            }
+
+            if (descriptor->occlusionQuerySet != nullptr) {
+                DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
+
+                DAWN_INVALID_IF(
+                    descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
+                    "The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
+                    descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
+            }
+
+            if (descriptor->timestampWriteCount > 0) {
+                DAWN_ASSERT(descriptor->timestampWrites != nullptr);
+
+                // Record the query set and query index used on render passes for validating query
+                // index overwrite. The TrackQueryAvailability of
+                // RenderPassResourceUsageTracker is not used here because the timestampWrites are
+                // not validated and encoded one by one, but encoded together after passing the
+                // validation.
+                QueryAvailabilityMap usedQueries;
+                for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
+                    QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+                    DAWN_ASSERT(querySet != nullptr);
+                    uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
+                    DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
+                                     "validating querySet and queryIndex of timestampWrites[%u].",
+                                     i);
+                    DAWN_TRY_CONTEXT(ValidateRenderPassTimestampLocation(
+                                         descriptor->timestampWrites[i].location),
+                                     "validating location of timestampWrites[%u].", i);
+
+                    auto checkIt = usedQueries.find(querySet);
+                    DAWN_INVALID_IF(checkIt != usedQueries.end() && checkIt->second[queryIndex],
+                                    "Query index %u of %s is written to twice in a render pass.",
+                                    queryIndex, querySet);
+
+                    // Gets the iterator for that querySet or create a new vector of bool set to
+                    // false if the querySet wasn't registered.
+                    auto addIt = usedQueries.emplace(querySet, querySet->GetQueryCount()).first;
+                    addIt->second[queryIndex] = true;
+                }
+            }
+
+            DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
+                                descriptor->depthStencilAttachment == nullptr,
+                            "Render pass has no attachments.");
+
+            return {};
+        }
+
+        MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
+                                                 const ComputePassDescriptor* descriptor) {
+            if (descriptor == nullptr) {
+                return {};
+            }
+
+            if (descriptor->timestampWriteCount > 0) {
+                DAWN_ASSERT(descriptor->timestampWrites != nullptr);
+
+                for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
+                    DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
+                    DAWN_TRY_CONTEXT(
+                        ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
+                                               descriptor->timestampWrites[i].queryIndex),
+                        "validating querySet and queryIndex of timestampWrites[%u].", i);
+                    DAWN_TRY_CONTEXT(ValidateComputePassTimestampLocation(
+                                         descriptor->timestampWrites[i].location),
+                                     "validating location of timestampWrites[%u].", i);
+                }
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
+                                           uint32_t firstQuery,
+                                           uint32_t queryCount,
+                                           const BufferBase* destination,
+                                           uint64_t destinationOffset) {
+            DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
+                            "First query (%u) exceeds the number of queries (%u) in %s.",
+                            firstQuery, querySet->GetQueryCount(), querySet);
+
+            DAWN_INVALID_IF(
+                queryCount > querySet->GetQueryCount() - firstQuery,
+                "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
+                "(%u) in %s.",
+                firstQuery, queryCount, querySet->GetQueryCount(), querySet);
+
+            DAWN_INVALID_IF(destinationOffset % 256 != 0,
+                            "The destination buffer %s offset (%u) is not a multiple of 256.",
+                            destination, destinationOffset);
+
+            uint64_t bufferSize = destination->GetSize();
+            // The destination buffer must have enough storage, from destination offset, to contain
+            // the result of resolved queries
+            bool fitsInBuffer = destinationOffset <= bufferSize &&
+                                (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
+                                 (bufferSize - destinationOffset));
+            DAWN_INVALID_IF(
+                !fitsInBuffer,
+                "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
+                querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
+                bufferSize, destinationOffset);
+
+            return {};
+        }
+
+        MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
+                                                           QuerySetBase* querySet,
+                                                           uint32_t firstQuery,
+                                                           uint32_t queryCount,
+                                                           BufferBase* destination,
+                                                           uint64_t destinationOffset) {
+            DeviceBase* device = encoder->GetDevice();
+
+            // The availability got from query set is a reference to vector<bool>, need to covert
+            // bool to uint32_t due to a user input in pipeline must not contain a bool type in
+            // WGSL.
+            std::vector<uint32_t> availability{querySet->GetQueryAvailability().begin(),
+                                               querySet->GetQueryAvailability().end()};
+
+            // Timestamp availability storage buffer
+            BufferDescriptor availabilityDesc = {};
+            availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
+            availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
+            Ref<BufferBase> availabilityBuffer;
+            DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
+
+            DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
+                                                     availability.data(),
+                                                     availability.size() * sizeof(uint32_t)));
+
+            // Timestamp params uniform buffer
+            TimestampParams params(firstQuery, queryCount, static_cast<uint32_t>(destinationOffset),
+                                   device->GetTimestampPeriodInNS());
+
+            BufferDescriptor parmsDesc = {};
+            parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+            parmsDesc.size = sizeof(params);
+            Ref<BufferBase> paramsBuffer;
+            DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
+
+            DAWN_TRY(
+                device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
+
+            return EncodeConvertTimestampsToNanoseconds(
+                encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
+        }
+
+        bool IsReadOnlyDepthStencilAttachment(
+            const RenderPassDepthStencilAttachment* depthStencilAttachment) {
+            DAWN_ASSERT(depthStencilAttachment != nullptr);
+            Aspect aspects = depthStencilAttachment->view->GetAspects();
+            DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
+
+            if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
+                return false;
+            }
+            if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
+                return false;
+            }
+            return true;
+        }
+
+    }  // namespace
+
+    MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+                                                const CommandEncoderDescriptor* descriptor) {
+        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+                                     wgpu::SType::DawnEncoderInternalUsageDescriptor));
+
+        const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+        DAWN_INVALID_IF(internalUsageDesc != nullptr &&
+                            !device->APIHasFeature(wgpu::FeatureName::DawnInternalUsages),
+                        "%s is not available.", wgpu::FeatureName::DawnInternalUsages);
+        return {};
+    }
+
+    // static
+    Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
+                                               const CommandEncoderDescriptor* descriptor) {
+        return AcquireRef(new CommandEncoder(device, descriptor));
+    }
+
+    // static
+    CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
+        return new CommandEncoder(device, ObjectBase::kError);
+    }
+
+    CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
+        : ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
+        TrackInDevice();
+
+        const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+        if (internalUsageDesc != nullptr && internalUsageDesc->useInternalUsages) {
+            mUsageValidationMode = UsageValidationMode::Internal;
+        } else {
+            mUsageValidationMode = UsageValidationMode::Default;
+        }
+    }
+
+    CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag),
+          mEncodingContext(device, this),
+          mUsageValidationMode(UsageValidationMode::Default) {
+        mEncodingContext.HandleError(DAWN_FORMAT_VALIDATION_ERROR("%s is invalid.", this));
+    }
+
+    ObjectType CommandEncoder::GetType() const {
+        return ObjectType::CommandEncoder;
+    }
+
+    void CommandEncoder::DestroyImpl() {
+        mEncodingContext.Destroy();
+    }
+
+    CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
+        return CommandBufferResourceUsage{
+            mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
+            std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
+    }
+
+    CommandIterator CommandEncoder::AcquireCommands() {
+        return mEncodingContext.AcquireCommands();
+    }
+
+    void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
+        mUsedQuerySets.insert(querySet);
+    }
+
+    void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+        DAWN_ASSERT(querySet != nullptr);
+
+        if (GetDevice()->IsValidationEnabled()) {
+            TrackUsedQuerySet(querySet);
+        }
+
+        // Set the query at queryIndex to available for resolving in query set.
+        querySet->SetQueryAvailability(queryIndex, true);
+    }
+
+    // Implementation of the API's command recording methods
+
+    ComputePassEncoder* CommandEncoder::APIBeginComputePass(
+        const ComputePassDescriptor* descriptor) {
+        return BeginComputePass(descriptor).Detach();
+    }
+
+    Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(
+        const ComputePassDescriptor* descriptor) {
+        DeviceBase* device = GetDevice();
+
+        std::vector<TimestampWrite> timestampWritesAtBeginning;
+        std::vector<TimestampWrite> timestampWritesAtEnd;
+        bool success = mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
+
+                BeginComputePassCmd* cmd =
+                    allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
+
+                if (descriptor == nullptr) {
+                    return {};
+                }
+
+                // Split the timestampWrites used in BeginComputePassCmd and EndComputePassCmd
+                for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
+                    QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+                    uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
+
+                    switch (descriptor->timestampWrites[i].location) {
+                        case wgpu::ComputePassTimestampLocation::Beginning:
+                            timestampWritesAtBeginning.push_back({querySet, queryIndex});
+                            break;
+                        case wgpu::ComputePassTimestampLocation::End:
+                            timestampWritesAtEnd.push_back({querySet, queryIndex});
+                            break;
+                        default:
+                            break;
+                    }
+
+                    TrackQueryAvailability(querySet, queryIndex);
+                }
+
+                cmd->timestampWrites = std::move(timestampWritesAtBeginning);
+
+                return {};
+            },
+            "encoding %s.BeginComputePass(%s).", this, descriptor);
+
+        if (success) {
+            const ComputePassDescriptor defaultDescriptor = {};
+            if (descriptor == nullptr) {
+                descriptor = &defaultDescriptor;
+            }
+
+            Ref<ComputePassEncoder> passEncoder = ComputePassEncoder::Create(
+                device, descriptor, this, &mEncodingContext, std::move(timestampWritesAtEnd));
+            mEncodingContext.EnterPass(passEncoder.Get());
+            return passEncoder;
+        }
+
+        return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
+    }
+
+    RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
+        return BeginRenderPass(descriptor).Detach();
+    }
+
+    Ref<RenderPassEncoder> CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
+        DeviceBase* device = GetDevice();
+
+        RenderPassResourceUsageTracker usageTracker;
+
+        uint32_t width = 0;
+        uint32_t height = 0;
+        bool depthReadOnly = false;
+        bool stencilReadOnly = false;
+        Ref<AttachmentState> attachmentState;
+        std::vector<TimestampWrite> timestampWritesAtBeginning;
+        std::vector<TimestampWrite> timestampWritesAtEnd;
+        bool success = mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                uint32_t sampleCount = 0;
+
+                DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
+                                                      &sampleCount, mUsageValidationMode));
+
+                ASSERT(width > 0 && height > 0 && sampleCount > 0);
+
+                mEncodingContext.WillBeginRenderPass();
+                BeginRenderPassCmd* cmd =
+                    allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
+
+                cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
+                attachmentState = cmd->attachmentState;
+
+                // Split the timestampWrites used in BeginRenderPassCmd and EndRenderPassCmd
+                for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
+                    QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+                    uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
+
+                    switch (descriptor->timestampWrites[i].location) {
+                        case wgpu::RenderPassTimestampLocation::Beginning:
+                            timestampWritesAtBeginning.push_back({querySet, queryIndex});
+                            break;
+                        case wgpu::RenderPassTimestampLocation::End:
+                            timestampWritesAtEnd.push_back({querySet, queryIndex});
+                            break;
+                        default:
+                            break;
+                    }
+
+                    TrackQueryAvailability(querySet, queryIndex);
+                    // Track the query availability with true on render pass again for rewrite
+                    // validation and query reset on Vulkan
+                    usageTracker.TrackQueryAvailability(querySet, queryIndex);
+                }
+
+                for (ColorAttachmentIndex index :
+                     IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
+                    uint8_t i = static_cast<uint8_t>(index);
+                    TextureViewBase* view = descriptor->colorAttachments[i].view;
+                    TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
+
+                    cmd->colorAttachments[index].view = view;
+                    cmd->colorAttachments[index].resolveTarget = resolveTarget;
+                    cmd->colorAttachments[index].loadOp = descriptor->colorAttachments[i].loadOp;
+                    cmd->colorAttachments[index].storeOp = descriptor->colorAttachments[i].storeOp;
+
+                    cmd->colorAttachments[index].clearColor =
+                        HasDeprecatedColor(descriptor->colorAttachments[i])
+                            ? descriptor->colorAttachments[i].clearColor
+                            : descriptor->colorAttachments[i].clearValue;
+
+                    usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+
+                    if (resolveTarget != nullptr) {
+                        usageTracker.TextureViewUsedAs(resolveTarget,
+                                                       wgpu::TextureUsage::RenderAttachment);
+                    }
+                }
+
+                if (cmd->attachmentState->HasDepthStencilAttachment()) {
+                    TextureViewBase* view = descriptor->depthStencilAttachment->view;
+
+                    cmd->depthStencilAttachment.view = view;
+
+                    if (!std::isnan(descriptor->depthStencilAttachment->clearDepth)) {
+                        // TODO(dawn:1269): Remove this branch after the deprecation period.
+                        cmd->depthStencilAttachment.clearDepth =
+                            descriptor->depthStencilAttachment->clearDepth;
+                    } else {
+                        cmd->depthStencilAttachment.clearDepth =
+                            descriptor->depthStencilAttachment->depthClearValue;
+                    }
+
+                    if (descriptor->depthStencilAttachment->stencilClearValue == 0 &&
+                        descriptor->depthStencilAttachment->clearStencil != 0) {
+                        // TODO(dawn:1269): Remove this branch after the deprecation period.
+                        cmd->depthStencilAttachment.clearStencil =
+                            descriptor->depthStencilAttachment->clearStencil;
+                    } else {
+                        cmd->depthStencilAttachment.clearStencil =
+                            descriptor->depthStencilAttachment->stencilClearValue;
+                    }
+
+                    cmd->depthStencilAttachment.depthReadOnly =
+                        descriptor->depthStencilAttachment->depthReadOnly;
+                    cmd->depthStencilAttachment.stencilReadOnly =
+                        descriptor->depthStencilAttachment->stencilReadOnly;
+
+                    if (descriptor->depthStencilAttachment->depthReadOnly ||
+                        !IsSubset(Aspect::Depth,
+                                  descriptor->depthStencilAttachment->view->GetAspects())) {
+                        cmd->depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Load;
+                        cmd->depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
+                    } else {
+                        cmd->depthStencilAttachment.depthLoadOp =
+                            descriptor->depthStencilAttachment->depthLoadOp;
+                        cmd->depthStencilAttachment.depthStoreOp =
+                            descriptor->depthStencilAttachment->depthStoreOp;
+                    }
+
+                    if (descriptor->depthStencilAttachment->stencilReadOnly ||
+                        !IsSubset(Aspect::Stencil,
+                                  descriptor->depthStencilAttachment->view->GetAspects())) {
+                        cmd->depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Load;
+                        cmd->depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
+                    } else {
+                        cmd->depthStencilAttachment.stencilLoadOp =
+                            descriptor->depthStencilAttachment->stencilLoadOp;
+                        cmd->depthStencilAttachment.stencilStoreOp =
+                            descriptor->depthStencilAttachment->stencilStoreOp;
+                    }
+
+                    if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
+                        usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
+                    } else {
+                        usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+                    }
+
+                    depthReadOnly = descriptor->depthStencilAttachment->depthReadOnly;
+                    stencilReadOnly = descriptor->depthStencilAttachment->stencilReadOnly;
+                }
+
+                cmd->width = width;
+                cmd->height = height;
+
+                cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
+
+                cmd->timestampWrites = std::move(timestampWritesAtBeginning);
+
+                return {};
+            },
+            "encoding %s.BeginRenderPass(%s).", this, descriptor);
+
+        if (success) {
+            Ref<RenderPassEncoder> passEncoder = RenderPassEncoder::Create(
+                device, descriptor, this, &mEncodingContext, std::move(usageTracker),
+                std::move(attachmentState), std::move(timestampWritesAtEnd), width, height,
+                depthReadOnly, stencilReadOnly);
+            mEncodingContext.EnterPass(passEncoder.Get());
+            return passEncoder;
+        }
+
+        return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
+    }
+
+    void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
+                                               uint64_t sourceOffset,
+                                               BufferBase* destination,
+                                               uint64_t destinationOffset,
+                                               uint64_t size) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(source));
+                    DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+                    DAWN_INVALID_IF(source == destination,
+                                    "Source and destination are the same buffer (%s).", source);
+
+                    DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
+                                     "validating source %s copy size.", source);
+                    DAWN_TRY_CONTEXT(
+                        ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
+                        "validating destination %s copy size.", destination);
+                    DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
+
+                    DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
+                                     "validating source %s usage.", source);
+                    DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
+                                     "validating destination %s usage.", destination);
+
+                    mTopLevelBuffers.insert(source);
+                    mTopLevelBuffers.insert(destination);
+                }
+
+                CopyBufferToBufferCmd* copy =
+                    allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
+                copy->source = source;
+                copy->sourceOffset = sourceOffset;
+                copy->destination = destination;
+                copy->destinationOffset = destinationOffset;
+                copy->size = size;
+
+                return {};
+            },
+            "encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
+            destination, destinationOffset, size);
+    }
+
+    void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
+                                                const ImageCopyTexture* destination,
+                                                const Extent3D* copySize) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
+                    DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
+                                     "validating source %s usage.", source->buffer);
+
+                    DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
+                    DAWN_TRY_CONTEXT(
+                        ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                                         mUsageValidationMode),
+                        "validating destination %s usage.", destination->texture);
+                    DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
+
+                    DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+                    // We validate texture copy range before validating linear texture data,
+                    // because in the latter we divide copyExtent.width by blockWidth and
+                    // copyExtent.height by blockHeight while the divisibility conditions are
+                    // checked in validating texture copy range.
+                    DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
+                }
+                const TexelBlockInfo& blockInfo =
+                    destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(ValidateLinearTextureCopyOffset(
+                        source->layout, blockInfo,
+                        destination->texture->GetFormat().HasDepthOrStencil()));
+                    DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
+                                                       blockInfo, *copySize));
+
+                    mTopLevelBuffers.insert(source->buffer);
+                    mTopLevelTextures.insert(destination->texture);
+                }
+
+                TextureDataLayout srcLayout = source->layout;
+                ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
+
+                CopyBufferToTextureCmd* copy =
+                    allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
+                copy->source.buffer = source->buffer;
+                copy->source.offset = srcLayout.offset;
+                copy->source.bytesPerRow = srcLayout.bytesPerRow;
+                copy->source.rowsPerImage = srcLayout.rowsPerImage;
+                copy->destination.texture = destination->texture;
+                copy->destination.origin = destination->origin;
+                copy->destination.mipLevel = destination->mipLevel;
+                copy->destination.aspect =
+                    ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+                copy->copySize = *copySize;
+
+                return {};
+            },
+            "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer,
+            destination->texture, copySize);
+    }
+
+    void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
+                                                const ImageCopyBuffer* destination,
+                                                const Extent3D* copySize) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
+                    DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                                                      mUsageValidationMode),
+                                     "validating source %s usage.", source->texture);
+                    DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
+                    DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
+
+                    DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
+                    DAWN_TRY_CONTEXT(
+                        ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
+                        "validating destination %s usage.", destination->buffer);
+
+                    // We validate texture copy range before validating linear texture data,
+                    // because in the latter we divide copyExtent.width by blockWidth and
+                    // copyExtent.height by blockHeight while the divisibility conditions are
+                    // checked in validating texture copy range.
+                    DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
+                }
+                const TexelBlockInfo& blockInfo =
+                    source->texture->GetFormat().GetAspectInfo(source->aspect).block;
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(ValidateLinearTextureCopyOffset(
+                        destination->layout, blockInfo,
+                        source->texture->GetFormat().HasDepthOrStencil()));
+                    DAWN_TRY(ValidateLinearTextureData(
+                        destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
+
+                    mTopLevelTextures.insert(source->texture);
+                    mTopLevelBuffers.insert(destination->buffer);
+                }
+
+                TextureDataLayout dstLayout = destination->layout;
+                ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
+
+                CopyTextureToBufferCmd* copy =
+                    allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
+                copy->source.texture = source->texture;
+                copy->source.origin = source->origin;
+                copy->source.mipLevel = source->mipLevel;
+                copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+                copy->destination.buffer = destination->buffer;
+                copy->destination.offset = dstLayout.offset;
+                copy->destination.bytesPerRow = dstLayout.bytesPerRow;
+                copy->destination.rowsPerImage = dstLayout.rowsPerImage;
+                copy->copySize = *copySize;
+
+                return {};
+            },
+            "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture,
+            destination->buffer, copySize);
+    }
+
+    void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
+                                                 const ImageCopyTexture* destination,
+                                                 const Extent3D* copySize) {
+        APICopyTextureToTextureHelper<false>(source, destination, copySize);
+    }
+
+    void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+                                                         const ImageCopyTexture* destination,
+                                                         const Extent3D* copySize) {
+        APICopyTextureToTextureHelper<true>(source, destination, copySize);
+    }
+
+    template <bool Internal>
+    void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+                                                       const ImageCopyTexture* destination,
+                                                       const Extent3D* copySize) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(source->texture));
+                    DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+                    DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
+                                     "validating source %s.", source->texture);
+                    DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
+                                     "validating destination %s.", destination->texture);
+
+                    DAWN_TRY(
+                        ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
+
+                    DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
+                                     "validating source %s copy range.", source->texture);
+                    DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
+                                     "validating source %s copy range.", destination->texture);
+
+                    // For internal usages (CopyToCopyInternal) we don't care if the user has added
+                    // CopySrc as a usage for this texture, but we will always add it internally.
+                    if (Internal) {
+                        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                                                  UsageValidationMode::Internal));
+                        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                                                  UsageValidationMode::Internal));
+                    } else {
+                        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                                                  mUsageValidationMode));
+                        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                                                  mUsageValidationMode));
+                    }
+
+                    mTopLevelTextures.insert(source->texture);
+                    mTopLevelTextures.insert(destination->texture);
+                }
+
+                CopyTextureToTextureCmd* copy =
+                    allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
+                copy->source.texture = source->texture;
+                copy->source.origin = source->origin;
+                copy->source.mipLevel = source->mipLevel;
+                copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+                copy->destination.texture = destination->texture;
+                copy->destination.origin = destination->origin;
+                copy->destination.mipLevel = destination->mipLevel;
+                copy->destination.aspect =
+                    ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+                copy->copySize = *copySize;
+
+                return {};
+            },
+            "encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
+            destination->texture, copySize);
+    }
+
+    void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(buffer));
+
+                    uint64_t bufferSize = buffer->GetSize();
+                    DAWN_INVALID_IF(offset > bufferSize,
+                                    "Buffer offset (%u) is larger than the size (%u) of %s.",
+                                    offset, bufferSize, buffer);
+
+                    uint64_t remainingSize = bufferSize - offset;
+                    if (size == wgpu::kWholeSize) {
+                        size = remainingSize;
+                    } else {
+                        DAWN_INVALID_IF(size > remainingSize,
+                                        "Buffer range (offset: %u, size: %u) doesn't fit in "
+                                        "the size (%u) of %s.",
+                                        offset, size, bufferSize, buffer);
+                    }
+
+                    DAWN_TRY_CONTEXT(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst),
+                                     "validating buffer %s usage.", buffer);
+
+                    // Size must be a multiple of 4 bytes on macOS.
+                    DAWN_INVALID_IF(size % 4 != 0, "Fill size (%u) is not a multiple of 4 bytes.",
+                                    size);
+
+                    // Offset must be multiples of 4 bytes on macOS.
+                    DAWN_INVALID_IF(offset % 4 != 0, "Offset (%u) is not a multiple of 4 bytes,",
+                                    offset);
+
+                    mTopLevelBuffers.insert(buffer);
+                } else {
+                    if (size == wgpu::kWholeSize) {
+                        DAWN_ASSERT(buffer->GetSize() >= offset);
+                        size = buffer->GetSize() - offset;
+                    }
+                }
+
+                ClearBufferCmd* cmd = allocator->Allocate<ClearBufferCmd>(Command::ClearBuffer);
+                cmd->buffer = buffer;
+                cmd->offset = offset;
+                cmd->size = size;
+
+                return {};
+            },
+            "encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
+    }
+
+    void CommandEncoder::APIInjectValidationError(const char* message) {
+        if (mEncodingContext.CheckCurrentEncoder(this)) {
+            mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
+        }
+    }
+
+    void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                InsertDebugMarkerCmd* cmd =
+                    allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+                cmd->length = strlen(groupLabel);
+
+                char* label = allocator->AllocateData<char>(cmd->length + 1);
+                memcpy(label, groupLabel, cmd->length + 1);
+
+                return {};
+            },
+            "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+    }
+
+    void CommandEncoder::APIPopDebugGroup() {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_INVALID_IF(
+                        mDebugGroupStackSize == 0,
+                        "PopDebugGroup called when no debug groups are currently pushed.");
+                }
+                allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+                mDebugGroupStackSize--;
+                mEncodingContext.PopDebugGroupLabel();
+
+                return {};
+            },
+            "encoding %s.PopDebugGroup().", this);
+    }
+
+    void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                PushDebugGroupCmd* cmd =
+                    allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+                cmd->length = strlen(groupLabel);
+
+                char* label = allocator->AllocateData<char>(cmd->length + 1);
+                memcpy(label, groupLabel, cmd->length + 1);
+
+                mDebugGroupStackSize++;
+                mEncodingContext.PushDebugGroupLabel(groupLabel);
+
+                return {};
+            },
+            "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+    }
+
+    void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
+                                            uint32_t firstQuery,
+                                            uint32_t queryCount,
+                                            BufferBase* destination,
+                                            uint64_t destinationOffset) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(querySet));
+                    DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+                    DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
+                                                     destinationOffset));
+
+                    DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
+
+                    TrackUsedQuerySet(querySet);
+                    mTopLevelBuffers.insert(destination);
+                }
+
+                ResolveQuerySetCmd* cmd =
+                    allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
+                cmd->querySet = querySet;
+                cmd->firstQuery = firstQuery;
+                cmd->queryCount = queryCount;
+                cmd->destination = destination;
+                cmd->destinationOffset = destinationOffset;
+
+                // Encode internal compute pipeline for timestamp query
+                if (querySet->GetQueryType() == wgpu::QueryType::Timestamp &&
+                    !GetDevice()->IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+                    DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
+                        this, querySet, firstQuery, queryCount, destination, destinationOffset));
+                }
+
+                return {};
+            },
+            "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery,
+            queryCount, destination, destinationOffset);
+    }
+
+    void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
+                                        uint64_t bufferOffset,
+                                        const uint8_t* data,
+                                        uint64_t size) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+                }
+
+                WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
+                cmd->buffer = buffer;
+                cmd->offset = bufferOffset;
+                cmd->size = size;
+
+                uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
+                memcpy(inlinedData, data, size);
+
+                mTopLevelBuffers.insert(buffer);
+
+                return {};
+            },
+            "encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
+    }
+
+    void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+        mEncodingContext.TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (GetDevice()->IsValidationEnabled()) {
+                    DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+                }
+
+                TrackQueryAvailability(querySet, queryIndex);
+
+                WriteTimestampCmd* cmd =
+                    allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+                cmd->querySet = querySet;
+                cmd->queryIndex = queryIndex;
+
+                return {};
+            },
+            "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+    }
+
+    CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
+        Ref<CommandBufferBase> commandBuffer;
+        if (GetDevice()->ConsumedError(Finish(descriptor), &commandBuffer)) {
+            return CommandBufferBase::MakeError(GetDevice());
+        }
+        ASSERT(!IsError());
+        return commandBuffer.Detach();
+    }
+
+    ResultOrError<Ref<CommandBufferBase>> CommandEncoder::Finish(
+        const CommandBufferDescriptor* descriptor) {
+        DeviceBase* device = GetDevice();
+
+        // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
+        // state of the encoding context. The internal state is set to finished, and subsequent
+        // calls to encode commands will generate errors.
+        DAWN_TRY(mEncodingContext.Finish());
+        DAWN_TRY(device->ValidateIsAlive());
+
+        if (device->IsValidationEnabled()) {
+            DAWN_TRY(ValidateFinish());
+        }
+
+        const CommandBufferDescriptor defaultDescriptor = {};
+        if (descriptor == nullptr) {
+            descriptor = &defaultDescriptor;
+        }
+
+        return device->CreateCommandBuffer(this, descriptor);
+    }
+
+    // Implementation of the command buffer validation that can be precomputed before submit
+    MaybeError CommandEncoder::ValidateFinish() const {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
+            DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
+                             "validating render pass usage.");
+        }
+
+        for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
+            for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
+                DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
+                                 "validating compute pass usage.");
+            }
+        }
+
+        DAWN_INVALID_IF(
+            mDebugGroupStackSize != 0,
+            "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
+            "calling Finish.",
+            mDebugGroupStackSize);
+
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CommandEncoder.h b/src/dawn/native/CommandEncoder.h
new file mode 100644
index 0000000..59e19c6
--- /dev/null
+++ b/src/dawn/native/CommandEncoder.h
@@ -0,0 +1,122 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDENCODER_H_
+#define DAWNNATIVE_COMMANDENCODER_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/EncodingContext.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PassResourceUsage.h"
+
+#include <string>
+
+namespace dawn::native {
+
+    enum class UsageValidationMode;
+
+    MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+                                                const CommandEncoderDescriptor* descriptor);
+
+    class CommandEncoder final : public ApiObjectBase {
+      public:
+        static Ref<CommandEncoder> Create(DeviceBase* device,
+                                          const CommandEncoderDescriptor* descriptor);
+        static CommandEncoder* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        CommandIterator AcquireCommands();
+        CommandBufferResourceUsage AcquireResourceUsages();
+
+        void TrackUsedQuerySet(QuerySetBase* querySet);
+        void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+
+        // Dawn API
+        ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
+        RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
+
+        void APICopyBufferToBuffer(BufferBase* source,
+                                   uint64_t sourceOffset,
+                                   BufferBase* destination,
+                                   uint64_t destinationOffset,
+                                   uint64_t size);
+        void APICopyBufferToTexture(const ImageCopyBuffer* source,
+                                    const ImageCopyTexture* destination,
+                                    const Extent3D* copySize);
+        void APICopyTextureToBuffer(const ImageCopyTexture* source,
+                                    const ImageCopyBuffer* destination,
+                                    const Extent3D* copySize);
+        void APICopyTextureToTexture(const ImageCopyTexture* source,
+                                     const ImageCopyTexture* destination,
+                                     const Extent3D* copySize);
+        void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+                                             const ImageCopyTexture* destination,
+                                             const Extent3D* copySize);
+        void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
+
+        void APIInjectValidationError(const char* message);
+        void APIInsertDebugMarker(const char* groupLabel);
+        void APIPopDebugGroup();
+        void APIPushDebugGroup(const char* groupLabel);
+
+        void APIResolveQuerySet(QuerySetBase* querySet,
+                                uint32_t firstQuery,
+                                uint32_t queryCount,
+                                BufferBase* destination,
+                                uint64_t destinationOffset);
+        void APIWriteBuffer(BufferBase* buffer,
+                            uint64_t bufferOffset,
+                            const uint8_t* data,
+                            uint64_t size);
+        void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+        CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
+
+        Ref<ComputePassEncoder> BeginComputePass(const ComputePassDescriptor* descriptor = nullptr);
+        Ref<RenderPassEncoder> BeginRenderPass(const RenderPassDescriptor* descriptor);
+        ResultOrError<Ref<CommandBufferBase>> Finish(
+            const CommandBufferDescriptor* descriptor = nullptr);
+
+      private:
+        CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
+        CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        void DestroyImpl() override;
+
+        // Helper to be able to implement both APICopyTextureToTexture and
+        // APICopyTextureToTextureInternal. The only difference between both
+        // copies, is that the Internal one will also check internal usage.
+        template <bool Internal>
+        void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+                                           const ImageCopyTexture* destination,
+                                           const Extent3D* copySize);
+
+        MaybeError ValidateFinish() const;
+
+        EncodingContext mEncodingContext;
+        std::set<BufferBase*> mTopLevelBuffers;
+        std::set<TextureBase*> mTopLevelTextures;
+        std::set<QuerySetBase*> mUsedQuerySets;
+
+        uint64_t mDebugGroupStackSize = 0;
+
+        UsageValidationMode mUsageValidationMode;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMMANDENCODER_H_
diff --git a/src/dawn/native/CommandValidation.cpp b/src/dawn/native/CommandValidation.cpp
new file mode 100644
index 0000000..44fbdf8
--- /dev/null
+++ b/src/dawn/native/CommandValidation.cpp
@@ -0,0 +1,496 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandValidation.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+
+    // Performs validation of the "synchronization scope" rules of WebGPU.
+    MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
+        // Buffers can only be used as single-write or multiple read.
+        for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
+            const wgpu::BufferUsage usage = scope.bufferUsages[i];
+            bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
+            bool singleUse = wgpu::HasZeroOrOneBits(usage);
+
+            DAWN_INVALID_IF(!readOnly && !singleUse,
+                            "%s usage (%s) includes writable usage and another usage in the same "
+                            "synchronization scope.",
+                            scope.buffers[i], usage);
+        }
+
+        // Check that every single subresource is used as either a single-write usage or a
+        // combination of readonly usages.
+        for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
+            const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
+            MaybeError error = {};
+            textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
+                bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
+                bool singleUse = wgpu::HasZeroOrOneBits(usage);
+                if (!readOnly && !singleUse && !error.IsError()) {
+                    error = DAWN_FORMAT_VALIDATION_ERROR(
+                        "%s usage (%s) includes writable usage and another usage in the same "
+                        "synchronization scope.",
+                        scope.textures[i], usage);
+                }
+            });
+            DAWN_TRY(std::move(error));
+        }
+        return {};
+    }
+
+    MaybeError ValidateTimestampQuery(const DeviceBase* device,
+                                      const QuerySetBase* querySet,
+                                      uint32_t queryIndex) {
+        DAWN_TRY(device->ValidateObject(querySet));
+
+        DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
+                        "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
+
+        DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
+                        "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
+                        querySet->GetQueryCount(), querySet);
+
+        return {};
+    }
+
+    MaybeError ValidateWriteBuffer(const DeviceBase* device,
+                                   const BufferBase* buffer,
+                                   uint64_t bufferOffset,
+                                   uint64_t size) {
+        DAWN_TRY(device->ValidateObject(buffer));
+
+        DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
+                        bufferOffset);
+
+        DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
+
+        uint64_t bufferSize = buffer->GetSize();
+        DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
+                        "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
+                        bufferOffset, size, buffer, bufferSize);
+
+        DAWN_INVALID_IF(!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst),
+                        "%s usage (%s) does not include %s.", buffer, buffer->GetUsage(),
+                        wgpu::BufferUsage::CopyDst);
+
+        return {};
+    }
+
+    bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
+        uint32_t maxStart = std::max(startA, startB);
+        uint32_t minStart = std::min(startA, startB);
+        return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
+               static_cast<uint64_t>(maxStart);
+    }
+
+    template <typename A, typename B>
+    DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
+        static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
+        static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
+        return uint64_t(a) * uint64_t(b);
+    }
+
+    ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+                                                       const Extent3D& copySize,
+                                                       uint32_t bytesPerRow,
+                                                       uint32_t rowsPerImage) {
+        ASSERT(copySize.width % blockInfo.width == 0);
+        ASSERT(copySize.height % blockInfo.height == 0);
+        uint32_t widthInBlocks = copySize.width / blockInfo.width;
+        uint32_t heightInBlocks = copySize.height / blockInfo.height;
+        uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
+
+        if (copySize.depthOrArrayLayers == 0) {
+            return 0;
+        }
+
+        // Check for potential overflows for the rest of the computations. We have the following
+        // inequalities:
+        //
+        //   bytesInLastRow <= bytesPerRow
+        //   heightInBlocks <= rowsPerImage
+        //
+        // So:
+        //
+        //   bytesInLastImage  = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
+        //                    <= bytesPerRow * heightInBlocks
+        //                    <= bytesPerRow * rowsPerImage
+        //                    <= bytesPerImage
+        //
+        // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
+        // computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
+        ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
+                                                    rowsPerImage != wgpu::kCopyStrideUndefined));
+        uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
+        DAWN_INVALID_IF(
+            bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+            "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
+            bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+            copySize.depthOrArrayLayers);
+
+        uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
+        if (heightInBlocks > 0) {
+            ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
+            uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
+            requiredBytesInCopy += bytesInLastImage;
+        }
+        return requiredBytesInCopy;
+    }
+
+    MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+                                            uint64_t offset,
+                                            uint64_t size) {
+        uint64_t bufferSize = buffer->GetSize();
+        bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
+        DAWN_INVALID_IF(!fitsInBuffer,
+                        "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
+                        size, buffer.Get(), bufferSize);
+
+        return {};
+    }
+
+    // Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
+    // it.
+    void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+                                              const TexelBlockInfo& blockInfo,
+                                              const Extent3D& copyExtent) {
+        ASSERT(layout != nullptr);
+        ASSERT(copyExtent.height % blockInfo.height == 0);
+        uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
+
+        if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
+            ASSERT(copyExtent.width % blockInfo.width == 0);
+            uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
+            uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
+
+            ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
+            layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
+        }
+        if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
+            ASSERT(copyExtent.depthOrArrayLayers <= 1);
+            layout->rowsPerImage = heightInBlocks;
+        }
+    }
+
+    MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+                                         uint64_t byteSize,
+                                         const TexelBlockInfo& blockInfo,
+                                         const Extent3D& copyExtent) {
+        ASSERT(copyExtent.height % blockInfo.height == 0);
+        uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
+
+        // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
+        // validation message. Investigate ways to make it print as a more readable symbol.
+        DAWN_INVALID_IF(
+            copyExtent.depthOrArrayLayers > 1 &&
+                (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
+                 layout.rowsPerImage == wgpu::kCopyStrideUndefined),
+            "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
+            copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
+
+        DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
+                        "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
+                        heightInBlocks);
+
+        // Validation for other members in layout:
+        ASSERT(copyExtent.width % blockInfo.width == 0);
+        uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
+        ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
+               std::numeric_limits<uint32_t>::max());
+        uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
+
+        // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
+        // but they should get optimized out.
+        DAWN_INVALID_IF(
+            layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
+            "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
+            layout.bytesPerRow);
+
+        DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
+                            heightInBlocks > layout.rowsPerImage,
+                        "The height of each image in blocks (%u) is > rowsPerImage (%u).",
+                        heightInBlocks, layout.rowsPerImage);
+
+        // We compute required bytes in copy after validating texel block alignments
+        // because the divisibility conditions are necessary for the algorithm to be valid,
+        // also the bytesPerRow bound is necessary to avoid overflows.
+        uint64_t requiredBytesInCopy;
+        DAWN_TRY_ASSIGN(requiredBytesInCopy,
+                        ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
+                                                   layout.rowsPerImage));
+
+        bool fitsInData =
+            layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
+        DAWN_INVALID_IF(
+            !fitsInData,
+            "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
+            "offset (%u).",
+            requiredBytesInCopy, byteSize, layout.offset);
+
+        return {};
+    }
+
+    MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+                                       const ImageCopyBuffer& imageCopyBuffer) {
+        DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
+        if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
+            DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
+                            "bytesPerRow (%u) is not a multiple of %u.",
+                            imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
+        }
+
+        return {};
+    }
+
+    MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+                                        const ImageCopyTexture& textureCopy,
+                                        const Extent3D& copySize) {
+        const TextureBase* texture = textureCopy.texture;
+        DAWN_TRY(device->ValidateObject(texture));
+
+        DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
+                        "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
+                        textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
+
+        DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
+        DAWN_INVALID_IF(
+            SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
+            "%s format (%s) does not have the selected aspect (%s).", texture,
+            texture->GetFormat().format, textureCopy.aspect);
+
+        if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
+            Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+            ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+            DAWN_INVALID_IF(
+                textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
+                    subresourceSize.width != copySize.width ||
+                    subresourceSize.height != copySize.height,
+                "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
+                "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
+                "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
+                &textureCopy.origin, &copySize, &subresourceSize, texture,
+                texture->GetFormat().format, texture->GetSampleCount());
+        }
+
+        return {};
+    }
+
+    MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+                                        const ImageCopyTexture& textureCopy,
+                                        const Extent3D& copySize) {
+        const TextureBase* texture = textureCopy.texture;
+
+        // Validation for the copy being in-bounds:
+        Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+        // For 1D/2D textures, include the array layer as depth so it can be checked with other
+        // dimensions.
+        if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
+            mipSize.depthOrArrayLayers = texture->GetArrayLayers();
+        }
+        // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
+        // overflows.
+        DAWN_INVALID_IF(
+            static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
+                    static_cast<uint64_t>(mipSize.width) ||
+                static_cast<uint64_t>(textureCopy.origin.y) +
+                        static_cast<uint64_t>(copySize.height) >
+                    static_cast<uint64_t>(mipSize.height) ||
+                static_cast<uint64_t>(textureCopy.origin.z) +
+                        static_cast<uint64_t>(copySize.depthOrArrayLayers) >
+                    static_cast<uint64_t>(mipSize.depthOrArrayLayers),
+            "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
+            "size (%s).",
+            &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
+
+        // Validation for the texel block alignments:
+        const Format& format = textureCopy.texture->GetFormat();
+        if (format.isCompressed) {
+            const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
+            DAWN_INVALID_IF(
+                textureCopy.origin.x % blockInfo.width != 0,
+                "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
+                "width (%u).",
+                textureCopy.origin.x, blockInfo.width);
+            DAWN_INVALID_IF(
+                textureCopy.origin.y % blockInfo.height != 0,
+                "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
+                "height (%u).",
+                textureCopy.origin.y, blockInfo.height);
+            DAWN_INVALID_IF(
+                copySize.width % blockInfo.width != 0,
+                "copySize.width (%u) is not a multiple of compressed texture format block width "
+                "(%u).",
+                copySize.width, blockInfo.width);
+            DAWN_INVALID_IF(
+                copySize.height % blockInfo.height != 0,
+                "copySize.height (%u) is not a multiple of compressed texture format block "
+                "height (%u).",
+                copySize.height, blockInfo.height);
+        }
+
+        return {};
+    }
+
+    // Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
+    // formats).
+    ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
+        const Format& format = view.texture->GetFormat();
+        switch (view.aspect) {
+            case wgpu::TextureAspect::All: {
+                DAWN_INVALID_IF(
+                    !HasOneBit(format.aspects),
+                    "More than a single aspect (%s) is selected for multi-planar format (%s) in "
+                    "%s <-> linear data copy.",
+                    view.aspect, format.format, view.texture);
+
+                Aspect single = format.aspects;
+                return single;
+            }
+            case wgpu::TextureAspect::DepthOnly:
+                ASSERT(format.aspects & Aspect::Depth);
+                return Aspect::Depth;
+            case wgpu::TextureAspect::StencilOnly:
+                ASSERT(format.aspects & Aspect::Stencil);
+                return Aspect::Stencil;
+            case wgpu::TextureAspect::Plane0Only:
+            case wgpu::TextureAspect::Plane1Only:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
+        Aspect aspectUsed;
+        DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
+
+        const Format& format = dst.texture->GetFormat();
+        switch (format.format) {
+            case wgpu::TextureFormat::Depth16Unorm:
+                return {};
+            default:
+                DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
+                                "Cannot copy into the depth aspect of %s with format %s.",
+                                dst.texture, format.format);
+                break;
+        }
+
+        return {};
+    }
+
+    MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+                                                              const ImageCopyTexture& dst,
+                                                              const Extent3D& copySize) {
+        const uint32_t srcSamples = src.texture->GetSampleCount();
+        const uint32_t dstSamples = dst.texture->GetSampleCount();
+
+        DAWN_INVALID_IF(
+            srcSamples != dstSamples,
+            "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
+            src.texture, srcSamples, dst.texture, dstSamples);
+
+        // Metal cannot select a single aspect for texture-to-texture copies.
+        const Format& format = src.texture->GetFormat();
+        DAWN_INVALID_IF(
+            SelectFormatAspects(format, src.aspect) != format.aspects,
+            "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
+            src.texture, src.aspect, format.format);
+
+        DAWN_INVALID_IF(
+            SelectFormatAspects(format, dst.aspect) != format.aspects,
+            "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
+            "(%s).",
+            dst.texture, dst.aspect, format.format);
+
+        if (src.texture == dst.texture) {
+            switch (src.texture->GetDimension()) {
+                case wgpu::TextureDimension::e1D:
+                    ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
+                    return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
+
+                case wgpu::TextureDimension::e2D:
+                    DAWN_INVALID_IF(src.mipLevel == dst.mipLevel &&
+                                        IsRangeOverlapped(src.origin.z, dst.origin.z,
+                                                          copySize.depthOrArrayLayers),
+                                    "Copy source and destination are overlapping layer ranges "
+                                    "([%u, %u) and [%u, %u)) of %s mip level %u",
+                                    src.origin.z, src.origin.z + copySize.depthOrArrayLayers,
+                                    dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers,
+                                    src.texture, src.mipLevel);
+                    break;
+
+                case wgpu::TextureDimension::e3D:
+                    DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
+                                    "Copy is from %s mip level %u to itself.", src.texture,
+                                    src.mipLevel);
+                    break;
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+                                                        const ImageCopyTexture& dst,
+                                                        const Extent3D& copySize) {
+        // Metal requires texture-to-texture copies happens between texture formats that equal to
+        // each other or only have diff on srgb-ness.
+        DAWN_INVALID_IF(
+            !src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
+            "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
+            src.texture, src.texture->GetFormat().format, dst.texture,
+            dst.texture->GetFormat().format);
+
+        return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
+    }
+
+    MaybeError ValidateCanUseAs(const TextureBase* texture,
+                                wgpu::TextureUsage usage,
+                                UsageValidationMode mode) {
+        ASSERT(wgpu::HasZeroOrOneBits(usage));
+        switch (mode) {
+            case UsageValidationMode::Default:
+                DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
+                                texture, texture->GetUsage(), usage);
+                break;
+            case UsageValidationMode::Internal:
+                DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
+                                "%s internal usage (%s) doesn't include %s.", texture,
+                                texture->GetInternalUsage(), usage);
+                break;
+        }
+
+        return {};
+    }
+
+    MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
+        ASSERT(wgpu::HasZeroOrOneBits(usage));
+        DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s usage (%s) doesn't include %s.", buffer,
+                        buffer->GetUsage(), usage);
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CommandValidation.h b/src/dawn/native/CommandValidation.h
new file mode 100644
index 0000000..1cae7cc
--- /dev/null
+++ b/src/dawn/native/CommandValidation.h
@@ -0,0 +1,90 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDVALIDATION_H_
+#define DAWNNATIVE_COMMANDVALIDATION_H_
+
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Texture.h"
+
+#include <vector>
+
+namespace dawn::native {
+
+    class QuerySetBase;
+    struct SyncScopeResourceUsage;
+    struct TexelBlockInfo;
+
+    MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
+
+    MaybeError ValidateTimestampQuery(const DeviceBase* device,
+                                      const QuerySetBase* querySet,
+                                      uint32_t queryIndex);
+
+    MaybeError ValidateWriteBuffer(const DeviceBase* device,
+                                   const BufferBase* buffer,
+                                   uint64_t bufferOffset,
+                                   uint64_t size);
+
+    ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+                                                       const Extent3D& copySize,
+                                                       uint32_t bytesPerRow,
+                                                       uint32_t rowsPerImage);
+
+    void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+                                              const TexelBlockInfo& blockInfo,
+                                              const Extent3D& copyExtent);
+    MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+                                         uint64_t byteSize,
+                                         const TexelBlockInfo& blockInfo,
+                                         const Extent3D& copyExtent);
+    MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+                                        const ImageCopyTexture& imageCopyTexture,
+                                        const Extent3D& copySize);
+    ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
+    MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
+
+    MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+                                       const ImageCopyBuffer& imageCopyBuffer);
+    MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+                                        const ImageCopyTexture& imageCopyTexture,
+                                        const Extent3D& copySize);
+
+    MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+                                            uint64_t offset,
+                                            uint64_t size);
+
+    bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
+
+    MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+                                                              const ImageCopyTexture& dst,
+                                                              const Extent3D& copySize);
+    MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+                                                        const ImageCopyTexture& dst,
+                                                        const Extent3D& copySize);
+
+    enum class UsageValidationMode {
+        Default,
+        Internal,
+    };
+
+    MaybeError ValidateCanUseAs(const TextureBase* texture,
+                                wgpu::TextureUsage usage,
+                                UsageValidationMode mode);
+    MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMMANDVALIDATION_H_
diff --git a/src/dawn/native/Commands.cpp b/src/dawn/native/Commands.cpp
new file mode 100644
index 0000000..3337cbd
--- /dev/null
+++ b/src/dawn/native/Commands.cpp
@@ -0,0 +1,365 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Commands.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+    void FreeCommands(CommandIterator* commands) {
+        commands->Reset();
+
+        Command type;
+        while (commands->NextCommandId(&type)) {
+            switch (type) {
+                case Command::BeginComputePass: {
+                    BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
+                    begin->~BeginComputePassCmd();
+                    break;
+                }
+                case Command::BeginOcclusionQuery: {
+                    BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
+                    begin->~BeginOcclusionQueryCmd();
+                    break;
+                }
+                case Command::BeginRenderPass: {
+                    BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
+                    begin->~BeginRenderPassCmd();
+                    break;
+                }
+                case Command::CopyBufferToBuffer: {
+                    CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
+                    copy->~CopyBufferToBufferCmd();
+                    break;
+                }
+                case Command::CopyBufferToTexture: {
+                    CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
+                    copy->~CopyBufferToTextureCmd();
+                    break;
+                }
+                case Command::CopyTextureToBuffer: {
+                    CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
+                    copy->~CopyTextureToBufferCmd();
+                    break;
+                }
+                case Command::CopyTextureToTexture: {
+                    CopyTextureToTextureCmd* copy =
+                        commands->NextCommand<CopyTextureToTextureCmd>();
+                    copy->~CopyTextureToTextureCmd();
+                    break;
+                }
+                case Command::Dispatch: {
+                    DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
+                    dispatch->~DispatchCmd();
+                    break;
+                }
+                case Command::DispatchIndirect: {
+                    DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
+                    dispatch->~DispatchIndirectCmd();
+                    break;
+                }
+                case Command::Draw: {
+                    DrawCmd* draw = commands->NextCommand<DrawCmd>();
+                    draw->~DrawCmd();
+                    break;
+                }
+                case Command::DrawIndexed: {
+                    DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
+                    draw->~DrawIndexedCmd();
+                    break;
+                }
+                case Command::DrawIndirect: {
+                    DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
+                    draw->~DrawIndirectCmd();
+                    break;
+                }
+                case Command::DrawIndexedIndirect: {
+                    DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
+                    draw->~DrawIndexedIndirectCmd();
+                    break;
+                }
+                case Command::EndComputePass: {
+                    EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
+                    cmd->~EndComputePassCmd();
+                    break;
+                }
+                case Command::EndOcclusionQuery: {
+                    EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
+                    cmd->~EndOcclusionQueryCmd();
+                    break;
+                }
+                case Command::EndRenderPass: {
+                    EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
+                    cmd->~EndRenderPassCmd();
+                    break;
+                }
+                case Command::ExecuteBundles: {
+                    ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+                    auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+                    for (size_t i = 0; i < cmd->count; ++i) {
+                        (&bundles[i])->~Ref<RenderBundleBase>();
+                    }
+                    cmd->~ExecuteBundlesCmd();
+                    break;
+                }
+                case Command::ClearBuffer: {
+                    ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
+                    cmd->~ClearBufferCmd();
+                    break;
+                }
+                case Command::InsertDebugMarker: {
+                    InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+                    commands->NextData<char>(cmd->length + 1);
+                    cmd->~InsertDebugMarkerCmd();
+                    break;
+                }
+                case Command::PopDebugGroup: {
+                    PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
+                    cmd->~PopDebugGroupCmd();
+                    break;
+                }
+                case Command::PushDebugGroup: {
+                    PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+                    commands->NextData<char>(cmd->length + 1);
+                    cmd->~PushDebugGroupCmd();
+                    break;
+                }
+                case Command::ResolveQuerySet: {
+                    ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
+                    cmd->~ResolveQuerySetCmd();
+                    break;
+                }
+                case Command::SetComputePipeline: {
+                    SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
+                    cmd->~SetComputePipelineCmd();
+                    break;
+                }
+                case Command::SetRenderPipeline: {
+                    SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
+                    cmd->~SetRenderPipelineCmd();
+                    break;
+                }
+                case Command::SetStencilReference: {
+                    SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
+                    cmd->~SetStencilReferenceCmd();
+                    break;
+                }
+                case Command::SetViewport: {
+                    SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
+                    cmd->~SetViewportCmd();
+                    break;
+                }
+                case Command::SetScissorRect: {
+                    SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
+                    cmd->~SetScissorRectCmd();
+                    break;
+                }
+                case Command::SetBlendConstant: {
+                    SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
+                    cmd->~SetBlendConstantCmd();
+                    break;
+                }
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
+                    if (cmd->dynamicOffsetCount > 0) {
+                        commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+                    cmd->~SetBindGroupCmd();
+                    break;
+                }
+                case Command::SetIndexBuffer: {
+                    SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
+                    cmd->~SetIndexBufferCmd();
+                    break;
+                }
+                case Command::SetVertexBuffer: {
+                    SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
+                    cmd->~SetVertexBufferCmd();
+                    break;
+                }
+                case Command::WriteBuffer: {
+                    WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
+                    commands->NextData<uint8_t>(write->size);
+                    write->~WriteBufferCmd();
+                    break;
+                }
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
+                    cmd->~WriteTimestampCmd();
+                    break;
+                }
+            }
+        }
+
+        commands->MakeEmptyAsDataWasDestroyed();
+    }
+
+    void SkipCommand(CommandIterator* commands, Command type) {
+        switch (type) {
+            case Command::BeginComputePass:
+                commands->NextCommand<BeginComputePassCmd>();
+                break;
+
+            case Command::BeginOcclusionQuery:
+                commands->NextCommand<BeginOcclusionQueryCmd>();
+                break;
+
+            case Command::BeginRenderPass:
+                commands->NextCommand<BeginRenderPassCmd>();
+                break;
+
+            case Command::CopyBufferToBuffer:
+                commands->NextCommand<CopyBufferToBufferCmd>();
+                break;
+
+            case Command::CopyBufferToTexture:
+                commands->NextCommand<CopyBufferToTextureCmd>();
+                break;
+
+            case Command::CopyTextureToBuffer:
+                commands->NextCommand<CopyTextureToBufferCmd>();
+                break;
+
+            case Command::CopyTextureToTexture:
+                commands->NextCommand<CopyTextureToTextureCmd>();
+                break;
+
+            case Command::Dispatch:
+                commands->NextCommand<DispatchCmd>();
+                break;
+
+            case Command::DispatchIndirect:
+                commands->NextCommand<DispatchIndirectCmd>();
+                break;
+
+            case Command::Draw:
+                commands->NextCommand<DrawCmd>();
+                break;
+
+            case Command::DrawIndexed:
+                commands->NextCommand<DrawIndexedCmd>();
+                break;
+
+            case Command::DrawIndirect:
+                commands->NextCommand<DrawIndirectCmd>();
+                break;
+
+            case Command::DrawIndexedIndirect:
+                commands->NextCommand<DrawIndexedIndirectCmd>();
+                break;
+
+            case Command::EndComputePass:
+                commands->NextCommand<EndComputePassCmd>();
+                break;
+
+            case Command::EndOcclusionQuery:
+                commands->NextCommand<EndOcclusionQueryCmd>();
+                break;
+
+            case Command::EndRenderPass:
+                commands->NextCommand<EndRenderPassCmd>();
+                break;
+
+            case Command::ExecuteBundles: {
+                auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+                commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+                break;
+            }
+
+            case Command::ClearBuffer:
+                commands->NextCommand<ClearBufferCmd>();
+                break;
+
+            case Command::InsertDebugMarker: {
+                InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+                commands->NextData<char>(cmd->length + 1);
+                break;
+            }
+
+            case Command::PopDebugGroup:
+                commands->NextCommand<PopDebugGroupCmd>();
+                break;
+
+            case Command::PushDebugGroup: {
+                PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+                commands->NextData<char>(cmd->length + 1);
+                break;
+            }
+
+            case Command::ResolveQuerySet: {
+                commands->NextCommand<ResolveQuerySetCmd>();
+                break;
+            }
+
+            case Command::SetComputePipeline:
+                commands->NextCommand<SetComputePipelineCmd>();
+                break;
+
+            case Command::SetRenderPipeline:
+                commands->NextCommand<SetRenderPipelineCmd>();
+                break;
+
+            case Command::SetStencilReference:
+                commands->NextCommand<SetStencilReferenceCmd>();
+                break;
+
+            case Command::SetViewport:
+                commands->NextCommand<SetViewportCmd>();
+                break;
+
+            case Command::SetScissorRect:
+                commands->NextCommand<SetScissorRectCmd>();
+                break;
+
+            case Command::SetBlendConstant:
+                commands->NextCommand<SetBlendConstantCmd>();
+                break;
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
+                if (cmd->dynamicOffsetCount > 0) {
+                    commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                }
+                break;
+            }
+
+            case Command::SetIndexBuffer:
+                commands->NextCommand<SetIndexBufferCmd>();
+                break;
+
+            case Command::SetVertexBuffer: {
+                commands->NextCommand<SetVertexBufferCmd>();
+                break;
+            }
+
+            case Command::WriteBuffer:
+                commands->NextCommand<WriteBufferCmd>();
+                break;
+
+            case Command::WriteTimestamp: {
+                commands->NextCommand<WriteTimestampCmd>();
+                break;
+            }
+        }
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Commands.h b/src/dawn/native/Commands.h
new file mode 100644
index 0000000..3c2d8ab
--- /dev/null
+++ b/src/dawn/native/Commands.h
@@ -0,0 +1,302 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDS_H_
+#define DAWNNATIVE_COMMANDS_H_
+
+#include "dawn/common/Constants.h"
+
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+    // Definition of the commands that are present in the CommandIterator given by the
+    // CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
+    // dependencies: Ref<Object> needs Object to be defined.
+
+    enum class Command {
+        BeginComputePass,
+        BeginOcclusionQuery,
+        BeginRenderPass,
+        ClearBuffer,
+        CopyBufferToBuffer,
+        CopyBufferToTexture,
+        CopyTextureToBuffer,
+        CopyTextureToTexture,
+        Dispatch,
+        DispatchIndirect,
+        Draw,
+        DrawIndexed,
+        DrawIndirect,
+        DrawIndexedIndirect,
+        EndComputePass,
+        EndOcclusionQuery,
+        EndRenderPass,
+        ExecuteBundles,
+        InsertDebugMarker,
+        PopDebugGroup,
+        PushDebugGroup,
+        ResolveQuerySet,
+        SetComputePipeline,
+        SetRenderPipeline,
+        SetStencilReference,
+        SetViewport,
+        SetScissorRect,
+        SetBlendConstant,
+        SetBindGroup,
+        SetIndexBuffer,
+        SetVertexBuffer,
+        WriteBuffer,
+        WriteTimestamp,
+    };
+
+    struct TimestampWrite {
+        Ref<QuerySetBase> querySet;
+        uint32_t queryIndex;
+    };
+
+    struct BeginComputePassCmd {
+        std::vector<TimestampWrite> timestampWrites;
+    };
+
+    struct BeginOcclusionQueryCmd {
+        Ref<QuerySetBase> querySet;
+        uint32_t queryIndex;
+    };
+
+    struct RenderPassColorAttachmentInfo {
+        Ref<TextureViewBase> view;
+        Ref<TextureViewBase> resolveTarget;
+        wgpu::LoadOp loadOp;
+        wgpu::StoreOp storeOp;
+        dawn::native::Color clearColor;
+    };
+
+    struct RenderPassDepthStencilAttachmentInfo {
+        Ref<TextureViewBase> view;
+        wgpu::LoadOp depthLoadOp;
+        wgpu::StoreOp depthStoreOp;
+        wgpu::LoadOp stencilLoadOp;
+        wgpu::StoreOp stencilStoreOp;
+        float clearDepth;
+        uint32_t clearStencil;
+        bool depthReadOnly;
+        bool stencilReadOnly;
+    };
+
+    struct BeginRenderPassCmd {
+        Ref<AttachmentState> attachmentState;
+        ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
+            colorAttachments;
+        RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
+
+        // Cache the width and height of all attachments for convenience
+        uint32_t width;
+        uint32_t height;
+
+        Ref<QuerySetBase> occlusionQuerySet;
+        std::vector<TimestampWrite> timestampWrites;
+    };
+
+    struct BufferCopy {
+        Ref<BufferBase> buffer;
+        uint64_t offset;
+        uint32_t bytesPerRow;
+        uint32_t rowsPerImage;
+    };
+
+    struct TextureCopy {
+        Ref<TextureBase> texture;
+        uint32_t mipLevel;
+        Origin3D origin;  // Texels / array layer
+        Aspect aspect;
+    };
+
+    struct CopyBufferToBufferCmd {
+        Ref<BufferBase> source;
+        uint64_t sourceOffset;
+        Ref<BufferBase> destination;
+        uint64_t destinationOffset;
+        uint64_t size;
+    };
+
+    struct CopyBufferToTextureCmd {
+        BufferCopy source;
+        TextureCopy destination;
+        Extent3D copySize;  // Texels
+    };
+
+    struct CopyTextureToBufferCmd {
+        TextureCopy source;
+        BufferCopy destination;
+        Extent3D copySize;  // Texels
+    };
+
+    struct CopyTextureToTextureCmd {
+        TextureCopy source;
+        TextureCopy destination;
+        Extent3D copySize;  // Texels
+    };
+
+    struct DispatchCmd {
+        uint32_t x;
+        uint32_t y;
+        uint32_t z;
+    };
+
+    struct DispatchIndirectCmd {
+        Ref<BufferBase> indirectBuffer;
+        uint64_t indirectOffset;
+    };
+
+    struct DrawCmd {
+        uint32_t vertexCount;
+        uint32_t instanceCount;
+        uint32_t firstVertex;
+        uint32_t firstInstance;
+    };
+
+    struct DrawIndexedCmd {
+        uint32_t indexCount;
+        uint32_t instanceCount;
+        uint32_t firstIndex;
+        int32_t baseVertex;
+        uint32_t firstInstance;
+    };
+
+    struct DrawIndirectCmd {
+        Ref<BufferBase> indirectBuffer;
+        uint64_t indirectOffset;
+    };
+
+    struct DrawIndexedIndirectCmd {
+        Ref<BufferBase> indirectBuffer;
+        uint64_t indirectOffset;
+    };
+
+    struct EndComputePassCmd {
+        std::vector<TimestampWrite> timestampWrites;
+    };
+
+    struct EndOcclusionQueryCmd {
+        Ref<QuerySetBase> querySet;
+        uint32_t queryIndex;
+    };
+
+    struct EndRenderPassCmd {
+        std::vector<TimestampWrite> timestampWrites;
+    };
+
+    struct ExecuteBundlesCmd {
+        uint32_t count;
+    };
+
+    struct ClearBufferCmd {
+        Ref<BufferBase> buffer;
+        uint64_t offset;
+        uint64_t size;
+    };
+
+    struct InsertDebugMarkerCmd {
+        uint32_t length;
+    };
+
+    struct PopDebugGroupCmd {};
+
+    struct PushDebugGroupCmd {
+        uint32_t length;
+    };
+
+    struct ResolveQuerySetCmd {
+        Ref<QuerySetBase> querySet;
+        uint32_t firstQuery;
+        uint32_t queryCount;
+        Ref<BufferBase> destination;
+        uint64_t destinationOffset;
+    };
+
+    struct SetComputePipelineCmd {
+        Ref<ComputePipelineBase> pipeline;
+    };
+
+    struct SetRenderPipelineCmd {
+        Ref<RenderPipelineBase> pipeline;
+    };
+
+    struct SetStencilReferenceCmd {
+        uint32_t reference;
+    };
+
+    struct SetViewportCmd {
+        float x, y, width, height, minDepth, maxDepth;
+    };
+
+    struct SetScissorRectCmd {
+        uint32_t x, y, width, height;
+    };
+
+    struct SetBlendConstantCmd {
+        Color color;
+    };
+
+    struct SetBindGroupCmd {
+        BindGroupIndex index;
+        Ref<BindGroupBase> group;
+        uint32_t dynamicOffsetCount;
+    };
+
+    struct SetIndexBufferCmd {
+        Ref<BufferBase> buffer;
+        wgpu::IndexFormat format;
+        uint64_t offset;
+        uint64_t size;
+    };
+
+    struct SetVertexBufferCmd {
+        VertexBufferSlot slot;
+        Ref<BufferBase> buffer;
+        uint64_t offset;
+        uint64_t size;
+    };
+
+    struct WriteBufferCmd {
+        Ref<BufferBase> buffer;
+        uint64_t offset;
+        uint64_t size;
+    };
+
+    struct WriteTimestampCmd {
+        Ref<QuerySetBase> querySet;
+        uint32_t queryIndex;
+    };
+
+    // This needs to be called before the CommandIterator is freed so that the Ref<> present in
+    // the commands have a chance to run their destructor and remove internal references.
+    class CommandIterator;
+    void FreeCommands(CommandIterator* commands);
+
+    // Helper function to allow skipping over a command when it is unimplemented, while still
+    // consuming the correct amount of data from the command iterator.
+    void SkipCommand(CommandIterator* commands, Command type);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMMANDS_H_
diff --git a/src/dawn/native/CompilationMessages.cpp b/src/dawn/native/CompilationMessages.cpp
new file mode 100644
index 0000000..47c3d0b
--- /dev/null
+++ b/src/dawn/native/CompilationMessages.cpp
@@ -0,0 +1,201 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CompilationMessages.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <tint/tint.h>
+
+namespace dawn::native {
+
+    namespace {
+
+        WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
+            switch (severity) {
+                case tint::diag::Severity::Note:
+                    return WGPUCompilationMessageType_Info;
+                case tint::diag::Severity::Warning:
+                    return WGPUCompilationMessageType_Warning;
+                default:
+                    return WGPUCompilationMessageType_Error;
+            }
+        }
+
+    }  // anonymous namespace
+
+    OwnedCompilationMessages::OwnedCompilationMessages() {
+        mCompilationInfo.nextInChain = 0;
+        mCompilationInfo.messageCount = 0;
+        mCompilationInfo.messages = nullptr;
+    }
+
+    void OwnedCompilationMessages::AddMessageForTesting(std::string message,
+                                                        wgpu::CompilationMessageType type,
+                                                        uint64_t lineNum,
+                                                        uint64_t linePos,
+                                                        uint64_t offset,
+                                                        uint64_t length) {
+        // Cannot add messages after GetCompilationInfo has been called.
+        ASSERT(mCompilationInfo.messages == nullptr);
+
+        mMessageStrings.push_back(message);
+        mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
+                             lineNum, linePos, offset, length});
+    }
+
+    void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
+        // Cannot add messages after GetCompilationInfo has been called.
+        ASSERT(mCompilationInfo.messages == nullptr);
+
+        // Tint line and column values are 1-based.
+        uint64_t lineNum = diagnostic.source.range.begin.line;
+        uint64_t linePos = diagnostic.source.range.begin.column;
+        // The offset is 0-based.
+        uint64_t offset = 0;
+        uint64_t length = 0;
+
+        if (lineNum && linePos && diagnostic.source.file) {
+            const auto& lines = diagnostic.source.file->content.lines;
+            size_t i = 0;
+            // To find the offset of the message position, loop through each of the first lineNum-1
+            // lines and add it's length (+1 to account for the line break) to the offset.
+            for (; i < lineNum - 1; ++i) {
+                offset += lines[i].length() + 1;
+            }
+
+            // If the end line is on a different line from the beginning line, add the length of the
+            // lines in between to the ending offset.
+            uint64_t endLineNum = diagnostic.source.range.end.line;
+            uint64_t endLinePos = diagnostic.source.range.end.column;
+
+            // If the range has a valid start but the end it not specified, clamp it to the start.
+            if (endLineNum == 0 || endLinePos == 0) {
+                endLineNum = lineNum;
+                endLinePos = linePos;
+            }
+
+            // Negative ranges aren't allowed
+            ASSERT(endLineNum >= lineNum);
+
+            uint64_t endOffset = offset;
+            for (; i < endLineNum - 1; ++i) {
+                endOffset += lines[i].length() + 1;
+            }
+
+            // Add the line positions to the offset and endOffset to get their final positions
+            // within the code string.
+            offset += linePos - 1;
+            endOffset += endLinePos - 1;
+
+            // Negative ranges aren't allowed
+            ASSERT(endOffset >= offset);
+
+            // The length of the message is the difference between the starting offset and the
+            // ending offset.
+            length = endOffset - offset;
+        }
+
+        if (diagnostic.code) {
+            mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
+        } else {
+            mMessageStrings.push_back(diagnostic.message);
+        }
+
+        mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
+                             lineNum, linePos, offset, length});
+    }
+
+    void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
+        // Cannot add messages after GetCompilationInfo has been called.
+        ASSERT(mCompilationInfo.messages == nullptr);
+
+        for (const auto& diag : diagnostics) {
+            AddMessage(diag);
+        }
+
+        AddFormattedTintMessages(diagnostics);
+    }
+
+    void OwnedCompilationMessages::ClearMessages() {
+        // Cannot clear messages after GetCompilationInfo has been called.
+        ASSERT(mCompilationInfo.messages == nullptr);
+
+        mMessageStrings.clear();
+        mMessages.clear();
+    }
+
+    const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
+        mCompilationInfo.messageCount = mMessages.size();
+        mCompilationInfo.messages = mMessages.data();
+
+        // Ensure every message points at the correct message string. Cannot do this earlier, since
+        // vector reallocations may move the pointers around.
+        for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
+            WGPUCompilationMessage& message = mMessages[i];
+            std::string& messageString = mMessageStrings[i];
+            message.message = messageString.c_str();
+        }
+
+        return &mCompilationInfo;
+    }
+
+    const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
+        return mFormattedTintMessages;
+    }
+
+    void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
+        tint::diag::List messageList;
+        size_t warningCount = 0;
+        size_t errorCount = 0;
+        for (auto& diag : diagnostics) {
+            switch (diag.severity) {
+                case (tint::diag::Severity::Fatal):
+                case (tint::diag::Severity::Error):
+                case (tint::diag::Severity::InternalCompilerError): {
+                    errorCount++;
+                    messageList.add(tint::diag::Diagnostic(diag));
+                    break;
+                }
+                case (tint::diag::Severity::Warning): {
+                    warningCount++;
+                    messageList.add(tint::diag::Diagnostic(diag));
+                    break;
+                }
+                default:
+                    break;
+            }
+        }
+        if (errorCount == 0 && warningCount == 0) {
+            return;
+        }
+        tint::diag::Formatter::Style style;
+        style.print_newline_at_end = false;
+        std::ostringstream t;
+        if (errorCount > 0) {
+            t << errorCount << " error(s) ";
+            if (warningCount > 0) {
+                t << "and ";
+            }
+        }
+        if (warningCount > 0) {
+            t << warningCount << " warning(s) ";
+        }
+        t << "generated while compiling the shader:" << std::endl
+          << tint::diag::Formatter{style}.format(messageList);
+        mFormattedTintMessages.push_back(t.str());
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CompilationMessages.h b/src/dawn/native/CompilationMessages.h
new file mode 100644
index 0000000..92e3346
--- /dev/null
+++ b/src/dawn/native/CompilationMessages.h
@@ -0,0 +1,62 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMPILATIONMESSAGES_H_
+#define DAWNNATIVE_COMPILATIONMESSAGES_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/NonCopyable.h"
+
+#include <string>
+#include <vector>
+
+namespace tint::diag {
+    class Diagnostic;
+    class List;
+}  // namespace tint::diag
+
+namespace dawn::native {
+
+    class OwnedCompilationMessages : public NonCopyable {
+      public:
+        OwnedCompilationMessages();
+        ~OwnedCompilationMessages() = default;
+
+        void AddMessageForTesting(
+            std::string message,
+            wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
+            uint64_t lineNum = 0,
+            uint64_t linePos = 0,
+            uint64_t offset = 0,
+            uint64_t length = 0);
+        void AddMessages(const tint::diag::List& diagnostics);
+        void ClearMessages();
+
+        const WGPUCompilationInfo* GetCompilationInfo();
+        const std::vector<std::string>& GetFormattedTintMessages();
+
+      private:
+        void AddMessage(const tint::diag::Diagnostic& diagnostic);
+        void AddFormattedTintMessages(const tint::diag::List& diagnostics);
+
+        WGPUCompilationInfo mCompilationInfo;
+        std::vector<std::string> mMessageStrings;
+        std::vector<WGPUCompilationMessage> mMessages;
+        std::vector<std::string> mFormattedTintMessages;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMPILATIONMESSAGES_H_
diff --git a/src/dawn/native/ComputePassEncoder.cpp b/src/dawn/native/ComputePassEncoder.cpp
new file mode 100644
index 0000000..e825ef2
--- /dev/null
+++ b/src/dawn/native/ComputePassEncoder.cpp
@@ -0,0 +1,485 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ComputePassEncoder.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+namespace dawn::native {
+
+    namespace {
+
+        ResultOrError<ComputePipelineBase*> GetOrCreateIndirectDispatchValidationPipeline(
+            DeviceBase* device) {
+            InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+            if (store->dispatchIndirectValidationPipeline != nullptr) {
+                return store->dispatchIndirectValidationPipeline.Get();
+            }
+
+            // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this
+            // shader in various failure modes.
+            // Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable.
+            Ref<ShaderModuleBase> shaderModule;
+            DAWN_TRY_ASSIGN(shaderModule, utils::CreateShaderModule(device, R"(
+                struct UniformParams {
+                    maxComputeWorkgroupsPerDimension: u32;
+                    clientOffsetInU32: u32;
+                    enableValidation: u32;
+                    duplicateNumWorkgroups: u32;
+                };
+
+                struct IndirectParams {
+                    data: array<u32>;
+                };
+
+                struct ValidatedParams {
+                    data: array<u32>;
+                };
+
+                @group(0) @binding(0) var<uniform> uniformParams: UniformParams;
+                @group(0) @binding(1) var<storage, read_write> clientParams: IndirectParams;
+                @group(0) @binding(2) var<storage, write> validatedParams: ValidatedParams;
+
+                @stage(compute) @workgroup_size(1, 1, 1)
+                fn main() {
+                    for (var i = 0u; i < 3u; i = i + 1u) {
+                        var numWorkgroups = clientParams.data[uniformParams.clientOffsetInU32 + i];
+                        if (uniformParams.enableValidation > 0u &&
+                            numWorkgroups > uniformParams.maxComputeWorkgroupsPerDimension) {
+                            numWorkgroups = 0u;
+                        }
+                        validatedParams.data[i] = numWorkgroups;
+
+                        if (uniformParams.duplicateNumWorkgroups > 0u) {
+                             validatedParams.data[i + 3u] = numWorkgroups;
+                        }
+                    }
+                }
+            )"));
+
+            Ref<BindGroupLayoutBase> bindGroupLayout;
+            DAWN_TRY_ASSIGN(
+                bindGroupLayout,
+                utils::MakeBindGroupLayout(
+                    device,
+                    {
+                        {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                        {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+                        {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                    },
+                    /* allowInternalBinding */ true));
+
+            Ref<PipelineLayoutBase> pipelineLayout;
+            DAWN_TRY_ASSIGN(pipelineLayout,
+                            utils::MakeBasicPipelineLayout(device, bindGroupLayout));
+
+            ComputePipelineDescriptor computePipelineDescriptor = {};
+            computePipelineDescriptor.layout = pipelineLayout.Get();
+            computePipelineDescriptor.compute.module = shaderModule.Get();
+            computePipelineDescriptor.compute.entryPoint = "main";
+
+            DAWN_TRY_ASSIGN(store->dispatchIndirectValidationPipeline,
+                            device->CreateComputePipeline(&computePipelineDescriptor));
+
+            return store->dispatchIndirectValidationPipeline.Get();
+        }
+
+    }  // namespace
+
+    ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+                                           const ComputePassDescriptor* descriptor,
+                                           CommandEncoder* commandEncoder,
+                                           EncodingContext* encodingContext,
+                                           std::vector<TimestampWrite> timestampWritesAtEnd)
+        : ProgrammableEncoder(device, descriptor->label, encodingContext),
+          mCommandEncoder(commandEncoder),
+          mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
+        TrackInDevice();
+    }
+
+    // static
+    Ref<ComputePassEncoder> ComputePassEncoder::Create(
+        DeviceBase* device,
+        const ComputePassDescriptor* descriptor,
+        CommandEncoder* commandEncoder,
+        EncodingContext* encodingContext,
+        std::vector<TimestampWrite> timestampWritesAtEnd) {
+        return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder,
+                                                 encodingContext, std::move(timestampWritesAtEnd)));
+    }
+
+    ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+                                           CommandEncoder* commandEncoder,
+                                           EncodingContext* encodingContext,
+                                           ErrorTag errorTag)
+        : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
+    }
+
+    // static
+    Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device,
+                                                          CommandEncoder* commandEncoder,
+                                                          EncodingContext* encodingContext) {
+        return AcquireRef(
+            new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
+    }
+
+    void ComputePassEncoder::DestroyImpl() {
+        // Ensure that the pass has exited. This is done for passes only since validation requires
+        // they exit before destruction while bundles do not.
+        mEncodingContext->EnsurePassExited(this);
+    }
+
+    ObjectType ComputePassEncoder::GetType() const {
+        return ObjectType::ComputePassEncoder;
+    }
+
+    void ComputePassEncoder::APIEnd() {
+        if (mEncodingContext->TryEncode(
+                this,
+                [&](CommandAllocator* allocator) -> MaybeError {
+                    if (IsValidationEnabled()) {
+                        DAWN_TRY(ValidateProgrammableEncoderEnd());
+                    }
+
+                    EndComputePassCmd* cmd =
+                        allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
+                    // The query availability has already been updated at the beginning of compute
+                    // pass, and no need to do update here.
+                    cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
+
+                    return {};
+                },
+                "encoding %s.End().", this)) {
+            mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
+        }
+    }
+
+    void ComputePassEncoder::APIEndPass() {
+        GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+        APIEnd();
+    }
+
+    void ComputePassEncoder::APIDispatch(uint32_t workgroupCountX,
+                                         uint32_t workgroupCountY,
+                                         uint32_t workgroupCountZ) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+                    uint32_t workgroupsPerDimension =
+                        GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+
+                    DAWN_INVALID_IF(workgroupCountX > workgroupsPerDimension,
+                                    "Dispatch workgroup count X (%u) exceeds max compute "
+                                    "workgroups per dimension (%u).",
+                                    workgroupCountX, workgroupsPerDimension);
+
+                    DAWN_INVALID_IF(workgroupCountY > workgroupsPerDimension,
+                                    "Dispatch workgroup count Y (%u) exceeds max compute "
+                                    "workgroups per dimension (%u).",
+                                    workgroupCountY, workgroupsPerDimension);
+
+                    DAWN_INVALID_IF(workgroupCountZ > workgroupsPerDimension,
+                                    "Dispatch workgroup count Z (%u) exceeds max compute "
+                                    "workgroups per dimension (%u).",
+                                    workgroupCountZ, workgroupsPerDimension);
+                }
+
+                // Record the synchronization scope for Dispatch, which is just the current
+                // bindgroups.
+                AddDispatchSyncScope();
+
+                DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
+                dispatch->x = workgroupCountX;
+                dispatch->y = workgroupCountY;
+                dispatch->z = workgroupCountZ;
+
+                return {};
+            },
+            "encoding %s.Dispatch(%u, %u, %u).", this, workgroupCountX, workgroupCountY,
+            workgroupCountZ);
+    }
+
+    ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
+    ComputePassEncoder::TransformIndirectDispatchBuffer(Ref<BufferBase> indirectBuffer,
+                                                        uint64_t indirectOffset) {
+        DeviceBase* device = GetDevice();
+
+        const bool shouldDuplicateNumWorkgroups =
+            device->ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+                mCommandBufferState.GetComputePipeline());
+        if (!IsValidationEnabled() && !shouldDuplicateNumWorkgroups) {
+            return std::make_pair(indirectBuffer, indirectOffset);
+        }
+
+        // Save the previous command buffer state so it can be restored after the
+        // validation inserts additional commands.
+        CommandBufferStateTracker previousState = mCommandBufferState;
+
+        auto* const store = device->GetInternalPipelineStore();
+
+        Ref<ComputePipelineBase> validationPipeline;
+        DAWN_TRY_ASSIGN(validationPipeline, GetOrCreateIndirectDispatchValidationPipeline(device));
+
+        Ref<BindGroupLayoutBase> layout;
+        DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
+
+        uint32_t storageBufferOffsetAlignment =
+            device->GetLimits().v1.minStorageBufferOffsetAlignment;
+
+        // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
+        const uint32_t clientOffsetFromAlignedBoundary =
+            indirectOffset % storageBufferOffsetAlignment;
+        const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
+        const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
+
+        // Let the size of the binding be the additional offset, plus the size.
+        const uint64_t clientIndirectBindingSize =
+            kDispatchIndirectSize + clientOffsetFromAlignedBoundary;
+
+        // Neither 'enableValidation' nor 'duplicateNumWorkgroups' can be declared as 'bool' as
+        // currently in WGSL type 'bool' cannot be used in storage class 'uniform' as 'it is
+        // non-host-shareable'.
+        struct UniformParams {
+            uint32_t maxComputeWorkgroupsPerDimension;
+            uint32_t clientOffsetInU32;
+            uint32_t enableValidation;
+            uint32_t duplicateNumWorkgroups;
+        };
+
+        // Create a uniform buffer to hold parameters for the shader.
+        Ref<BufferBase> uniformBuffer;
+        {
+            UniformParams params;
+            params.maxComputeWorkgroupsPerDimension =
+                device->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+            params.clientOffsetInU32 = clientOffsetFromAlignedBoundary / sizeof(uint32_t);
+            params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
+            params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
+
+            DAWN_TRY_ASSIGN(uniformBuffer, utils::CreateBufferFromData(
+                                               device, wgpu::BufferUsage::Uniform, {params}));
+        }
+
+        // Reserve space in the scratch buffer to hold the validated indirect params.
+        ScratchBuffer& scratchBuffer = store->scratchIndirectStorage;
+        const uint64_t scratchBufferSize =
+            shouldDuplicateNumWorkgroups ? 2 * kDispatchIndirectSize : kDispatchIndirectSize;
+        DAWN_TRY(scratchBuffer.EnsureCapacity(scratchBufferSize));
+        Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
+
+        Ref<BindGroupBase> validationBindGroup;
+        ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
+        DAWN_TRY_ASSIGN(validationBindGroup,
+                        utils::MakeBindGroup(device, layout,
+                                             {
+                                                 {0, uniformBuffer},
+                                                 {1, indirectBuffer, clientIndirectBindingOffset,
+                                                  clientIndirectBindingSize},
+                                                 {2, validatedIndirectBuffer, 0, scratchBufferSize},
+                                             }));
+
+        // Issue commands to validate the indirect buffer.
+        APISetPipeline(validationPipeline.Get());
+        APISetBindGroup(0, validationBindGroup.Get());
+        APIDispatch(1);
+
+        // Restore the state.
+        RestoreCommandBufferState(std::move(previousState));
+
+        // Return the new indirect buffer and indirect buffer offset.
+        return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
+    }
+
+    void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
+                                                 uint64_t indirectOffset) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+                    DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+                    DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+                    DAWN_INVALID_IF(indirectOffset % 4 != 0,
+                                    "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+                    DAWN_INVALID_IF(
+                        indirectOffset >= indirectBuffer->GetSize() ||
+                            indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize(),
+                        "Indirect offset (%u) and dispatch size (%u) exceeds the indirect buffer "
+                        "size (%u).",
+                        indirectOffset, kDispatchIndirectSize, indirectBuffer->GetSize());
+                }
+
+                SyncScopeUsageTracker scope;
+                scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+                mUsageTracker.AddReferencedBuffer(indirectBuffer);
+                // TODO(crbug.com/dawn/1166): If validation is enabled, adding |indirectBuffer|
+                // is needed for correct usage validation even though it will only be bound for
+                // storage. This will unecessarily transition the |indirectBuffer| in
+                // the backend.
+
+                Ref<BufferBase> indirectBufferRef = indirectBuffer;
+
+                // Get applied indirect buffer with necessary changes on the original indirect
+                // buffer. For example,
+                // - Validate each indirect dispatch with a single dispatch to copy the indirect
+                //   buffer params into a scratch buffer if they're valid, and otherwise zero them
+                //   out.
+                // - Duplicate all the indirect dispatch parameters to support @num_workgroups on
+                //   D3D12.
+                // - Directly return the original indirect dispatch buffer if we don't need any
+                //   transformations on it.
+                // We could consider moving the validation earlier in the pass after the last
+                // last point the indirect buffer was used with writable usage, as well as batch
+                // validation for multiple dispatches into one, but inserting commands at
+                // arbitrary points in the past is not possible right now.
+                DAWN_TRY_ASSIGN(std::tie(indirectBufferRef, indirectOffset),
+                                TransformIndirectDispatchBuffer(indirectBufferRef, indirectOffset));
+
+                // If we have created a new scratch dispatch indirect buffer in
+                // TransformIndirectDispatchBuffer(), we need to track it in mUsageTracker.
+                if (indirectBufferRef.Get() != indirectBuffer) {
+                    // |indirectBufferRef| was replaced with a scratch buffer. Add it to the
+                    // synchronization scope.
+                    scope.BufferUsedAs(indirectBufferRef.Get(), wgpu::BufferUsage::Indirect);
+                    mUsageTracker.AddReferencedBuffer(indirectBufferRef.Get());
+                }
+
+                AddDispatchSyncScope(std::move(scope));
+
+                DispatchIndirectCmd* dispatch =
+                    allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
+                dispatch->indirectBuffer = std::move(indirectBufferRef);
+                dispatch->indirectOffset = indirectOffset;
+                return {};
+            },
+            "encoding %s.DispatchIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+    }
+
+    void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+                }
+
+                mCommandBufferState.SetComputePipeline(pipeline);
+
+                SetComputePipelineCmd* cmd =
+                    allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
+                cmd->pipeline = pipeline;
+
+                return {};
+            },
+            "encoding %s.SetPipeline(%s).", this, pipeline);
+    }
+
+    void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
+                                             BindGroupBase* group,
+                                             uint32_t dynamicOffsetCount,
+                                             const uint32_t* dynamicOffsets) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                BindGroupIndex groupIndex(groupIndexIn);
+
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
+                                                  dynamicOffsets));
+                }
+
+                mUsageTracker.AddResourcesReferencedByBindGroup(group);
+                RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
+                                   dynamicOffsets);
+                mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
+                                                 dynamicOffsets);
+
+                return {};
+            },
+            "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
+            dynamicOffsetCount);
+    }
+
+    void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+                }
+
+                mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+
+                WriteTimestampCmd* cmd =
+                    allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+                cmd->querySet = querySet;
+                cmd->queryIndex = queryIndex;
+
+                return {};
+            },
+            "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+    }
+
+    void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
+        PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
+        for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
+        }
+        mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
+    }
+
+    void ComputePassEncoder::RestoreCommandBufferState(CommandBufferStateTracker state) {
+        // Encode commands for the backend to restore the pipeline and bind groups.
+        if (state.HasPipeline()) {
+            APISetPipeline(state.GetComputePipeline());
+        }
+        for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+            BindGroupBase* bg = state.GetBindGroup(i);
+            if (bg != nullptr) {
+                const std::vector<uint32_t>& offsets = state.GetDynamicOffsets(i);
+                if (offsets.empty()) {
+                    APISetBindGroup(static_cast<uint32_t>(i), bg);
+                } else {
+                    APISetBindGroup(static_cast<uint32_t>(i), bg, offsets.size(), offsets.data());
+                }
+            }
+        }
+
+        // Restore the frontend state tracking information.
+        mCommandBufferState = std::move(state);
+    }
+
+    CommandBufferStateTracker* ComputePassEncoder::GetCommandBufferStateTrackerForTesting() {
+        return &mCommandBufferState;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ComputePassEncoder.h b/src/dawn/native/ComputePassEncoder.h
new file mode 100644
index 0000000..16dd11d
--- /dev/null
+++ b/src/dawn/native/ComputePassEncoder.h
@@ -0,0 +1,98 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMPUTEPASSENCODER_H_
+#define DAWNNATIVE_COMPUTEPASSENCODER_H_
+
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/ProgrammableEncoder.h"
+
+namespace dawn::native {
+
+    class SyncScopeUsageTracker;
+
+    class ComputePassEncoder final : public ProgrammableEncoder {
+      public:
+        static Ref<ComputePassEncoder> Create(DeviceBase* device,
+                                              const ComputePassDescriptor* descriptor,
+                                              CommandEncoder* commandEncoder,
+                                              EncodingContext* encodingContext,
+                                              std::vector<TimestampWrite> timestampWritesAtEnd);
+        static Ref<ComputePassEncoder> MakeError(DeviceBase* device,
+                                                 CommandEncoder* commandEncoder,
+                                                 EncodingContext* encodingContext);
+
+        ObjectType GetType() const override;
+
+        void APIEnd();
+        void APIEndPass();  // TODO(dawn:1286): Remove after deprecation period.
+
+        void APIDispatch(uint32_t workgroupCountX,
+                         uint32_t workgroupCountY = 1,
+                         uint32_t workgroupCountZ = 1);
+        void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+        void APISetPipeline(ComputePipelineBase* pipeline);
+
+        void APISetBindGroup(uint32_t groupIndex,
+                             BindGroupBase* group,
+                             uint32_t dynamicOffsetCount = 0,
+                             const uint32_t* dynamicOffsets = nullptr);
+
+        void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+        CommandBufferStateTracker* GetCommandBufferStateTrackerForTesting();
+        void RestoreCommandBufferStateForTesting(CommandBufferStateTracker state) {
+            RestoreCommandBufferState(std::move(state));
+        }
+
+      protected:
+        ComputePassEncoder(DeviceBase* device,
+                           const ComputePassDescriptor* descriptor,
+                           CommandEncoder* commandEncoder,
+                           EncodingContext* encodingContext,
+                           std::vector<TimestampWrite> timestampWritesAtEnd);
+        ComputePassEncoder(DeviceBase* device,
+                           CommandEncoder* commandEncoder,
+                           EncodingContext* encodingContext,
+                           ErrorTag errorTag);
+
+      private:
+        void DestroyImpl() override;
+
+        ResultOrError<std::pair<Ref<BufferBase>, uint64_t>> TransformIndirectDispatchBuffer(
+            Ref<BufferBase> indirectBuffer,
+            uint64_t indirectOffset);
+
+        void RestoreCommandBufferState(CommandBufferStateTracker state);
+
+        CommandBufferStateTracker mCommandBufferState;
+
+        // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
+        // records it in mUsageTracker.
+        void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
+        ComputePassResourceUsageTracker mUsageTracker;
+
+        // For render and compute passes, the encoding context is borrowed from the command encoder.
+        // Keep a reference to the encoder to make sure the context isn't freed.
+        Ref<CommandEncoder> mCommandEncoder;
+
+        std::vector<TimestampWrite> mTimestampWritesAtEnd;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMPUTEPASSENCODER_H_
diff --git a/src/dawn/native/ComputePipeline.cpp b/src/dawn/native/ComputePipeline.cpp
new file mode 100644
index 0000000..2de7f32
--- /dev/null
+++ b/src/dawn/native/ComputePipeline.cpp
@@ -0,0 +1,96 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+
+namespace dawn::native {
+
+    MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+                                                 const ComputePipelineDescriptor* descriptor) {
+        if (descriptor->nextInChain != nullptr) {
+            return DAWN_FORMAT_VALIDATION_ERROR("nextInChain must be nullptr.");
+        }
+
+        if (descriptor->layout != nullptr) {
+            DAWN_TRY(device->ValidateObject(descriptor->layout));
+        }
+
+        return ValidateProgrammableStage(
+            device, descriptor->compute.module, descriptor->compute.entryPoint,
+            descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
+            SingleShaderStage::Compute);
+    }
+
+    // ComputePipelineBase
+
+    ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
+                                             const ComputePipelineDescriptor* descriptor)
+        : PipelineBase(device,
+                       descriptor->layout,
+                       descriptor->label,
+                       {{SingleShaderStage::Compute, descriptor->compute.module,
+                         descriptor->compute.entryPoint, descriptor->compute.constantCount,
+                         descriptor->compute.constants}}) {
+        SetContentHash(ComputeContentHash());
+        TrackInDevice();
+    }
+
+    ComputePipelineBase::ComputePipelineBase(DeviceBase* device) : PipelineBase(device) {
+        TrackInDevice();
+    }
+
+    ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : PipelineBase(device, tag) {
+    }
+
+    ComputePipelineBase::~ComputePipelineBase() = default;
+
+    void ComputePipelineBase::DestroyImpl() {
+        if (IsCachedReference()) {
+            // Do not uncache the actual cached object if we are a blueprint.
+            GetDevice()->UncacheComputePipeline(this);
+        }
+    }
+
+    // static
+    ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
+        class ErrorComputePipeline final : public ComputePipelineBase {
+          public:
+            ErrorComputePipeline(DeviceBase* device)
+                : ComputePipelineBase(device, ObjectBase::kError) {
+            }
+
+            MaybeError Initialize() override {
+                UNREACHABLE();
+                return {};
+            }
+        };
+
+        return new ErrorComputePipeline(device);
+    }
+
+    ObjectType ComputePipelineBase::GetType() const {
+        return ObjectType::ComputePipeline;
+    }
+
+    bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
+                                                       const ComputePipelineBase* b) const {
+        return PipelineBase::EqualForCache(a, b);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ComputePipeline.h b/src/dawn/native/ComputePipeline.h
new file mode 100644
index 0000000..1bd97d1
--- /dev/null
+++ b/src/dawn/native/ComputePipeline.h
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMPUTEPIPELINE_H_
+#define DAWNNATIVE_COMPUTEPIPELINE_H_
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/Pipeline.h"
+
+namespace dawn::native {
+
+    class DeviceBase;
+    struct EntryPointMetadata;
+
+    MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+                                                 const ComputePipelineDescriptor* descriptor);
+
+    class ComputePipelineBase : public PipelineBase {
+      public:
+        ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
+        ~ComputePipelineBase() override;
+
+        static ComputePipelineBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        // Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
+        struct EqualityFunc {
+            bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
+        };
+
+      protected:
+        // Constructor used only for mocking and testing.
+        ComputePipelineBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+      private:
+        ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COMPUTEPIPELINE_H_
diff --git a/src/dawn/native/CopyTextureForBrowserHelper.cpp b/src/dawn/native/CopyTextureForBrowserHelper.cpp
new file mode 100644
index 0000000..a72cedc
--- /dev/null
+++ b/src/dawn/native/CopyTextureForBrowserHelper.cpp
@@ -0,0 +1,604 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CopyTextureForBrowserHelper.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderPassEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include <unordered_set>
+
+namespace dawn::native {
+    namespace {
+
+        static const char sCopyTextureForBrowserShader[] = R"(
+            struct GammaTransferParams {
+                G: f32;
+                A: f32;
+                B: f32;
+                C: f32;
+                D: f32;
+                E: f32;
+                F: f32;
+                padding: u32;
+            };
+
+            struct Uniforms {                                            // offset   align   size
+                scale: vec2<f32>;                                        // 0        8       8
+                offset: vec2<f32>;                                       // 8        8       8
+                steps_mask: u32;                                         // 16       4       4
+                // implicit padding;                                     // 20               12
+                conversion_matrix: mat3x3<f32>;                          // 32       16      48
+                gamma_decoding_params: GammaTransferParams;              // 80       4       32
+                gamma_encoding_params: GammaTransferParams;              // 112      4       32
+                gamma_decoding_for_dst_srgb_params: GammaTransferParams; // 144      4       32
+            };
+
+            @binding(0) @group(0) var<uniform> uniforms : Uniforms;
+
+            struct VertexOutputs {
+                @location(0) texcoords : vec2<f32>;
+                @builtin(position) position : vec4<f32>;
+            };
+
+            // Chromium uses unified equation to construct gamma decoding function
+            // and gamma encoding function.
+            // The logic is:
+            //  if x < D
+            //      linear = C * x + F
+            //  nonlinear = pow(A * x + B, G) + E
+            // (https://source.chromium.org/chromium/chromium/src/+/main:ui/gfx/color_transform.cc;l=541)
+            // Expand the equation with sign() to make it handle all gamma conversions.
+            fn gamma_conversion(v: f32, params: GammaTransferParams) -> f32 {
+                // Linear part: C * x + F
+                if (abs(v) < params.D) {
+                    return sign(v) * (params.C * abs(v) + params.F);
+                }
+
+                // Gamma part: pow(A * x + B, G) + E
+                return sign(v) * (pow(params.A * abs(v) + params.B, params.G) + params.E);
+            }
+
+            @stage(vertex)
+            fn vs_main(
+                @builtin(vertex_index) VertexIndex : u32
+            ) -> VertexOutputs {
+                var texcoord = array<vec2<f32>, 3>(
+                    vec2<f32>(-0.5, 0.0),
+                    vec2<f32>( 1.5, 0.0),
+                    vec2<f32>( 0.5, 2.0));
+
+                var output : VertexOutputs;
+                output.position = vec4<f32>((texcoord[VertexIndex] * 2.0 - vec2<f32>(1.0, 1.0)), 0.0, 1.0);
+
+                // Y component of scale is calculated by the copySizeHeight / textureHeight. Only
+                // flipY case can get negative number.
+                var flipY = uniforms.scale.y < 0.0;
+
+                // Texture coordinate takes top-left as origin point. We need to map the
+                // texture to triangle carefully.
+                if (flipY) {
+                    // We need to get the mirror positions(mirrored based on y = 0.5) on flip cases.
+                    // Adopt transform to src texture and then mapping it to triangle coord which
+                    // do a +1 shift on Y dimension will help us got that mirror position perfectly.
+                    output.texcoords = (texcoord[VertexIndex] * uniforms.scale + uniforms.offset) *
+                        vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0);
+                } else {
+                    // For the normal case, we need to get the exact position.
+                    // So mapping texture to triangle firstly then adopt the transform.
+                    output.texcoords = (texcoord[VertexIndex] *
+                        vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0)) *
+                        uniforms.scale + uniforms.offset;
+                }
+
+                return output;
+            }
+
+            @binding(1) @group(0) var mySampler: sampler;
+            @binding(2) @group(0) var myTexture: texture_2d<f32>;
+
+            @stage(fragment)
+            fn fs_main(
+                @location(0) texcoord : vec2<f32>
+            ) -> @location(0) vec4<f32> {
+                // Clamp the texcoord and discard the out-of-bound pixels.
+                var clampedTexcoord =
+                    clamp(texcoord, vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 1.0));
+                if (!all(clampedTexcoord == texcoord)) {
+                    discard;
+                }
+
+                // Swizzling of texture formats when sampling / rendering is handled by the
+                // hardware so we don't need special logic in this shader. This is covered by tests.
+                var color = textureSample(myTexture, mySampler, texcoord);
+
+                let kUnpremultiplyStep = 0x01u;
+                let kDecodeToLinearStep = 0x02u;
+                let kConvertToDstGamutStep = 0x04u;
+                let kEncodeToGammaStep = 0x08u;
+                let kPremultiplyStep = 0x10u;
+                let kDecodeForSrgbDstFormat = 0x20u;
+
+                // Unpremultiply step. Appling color space conversion op on premultiplied source texture
+                // also needs to unpremultiply first.
+                if (bool(uniforms.steps_mask & kUnpremultiplyStep)) {
+                    if (color.a != 0.0) {
+                        color = vec4<f32>(color.rgb / color.a, color.a);
+                    }
+                }
+
+                // Linearize the source color using the source color space’s
+                // transfer function if it is non-linear.
+                if (bool(uniforms.steps_mask & kDecodeToLinearStep)) {
+                    color = vec4<f32>(gamma_conversion(color.r, uniforms.gamma_decoding_params),
+                                      gamma_conversion(color.g, uniforms.gamma_decoding_params),
+                                      gamma_conversion(color.b, uniforms.gamma_decoding_params),
+                                      color.a);
+                }
+
+                // Convert unpremultiplied, linear source colors to the destination gamut by
+                // multiplying by a 3x3 matrix. Calculate transformFromXYZD50 * transformToXYZD50
+                // in CPU side and upload the final result in uniforms.
+                if (bool(uniforms.steps_mask & kConvertToDstGamutStep)) {
+                    color = vec4<f32>(uniforms.conversion_matrix * color.rgb, color.a);
+                }
+
+                // Encode that color using the inverse of the destination color
+                // space’s transfer function if it is non-linear.
+                if (bool(uniforms.steps_mask & kEncodeToGammaStep)) {
+                    color = vec4<f32>(gamma_conversion(color.r, uniforms.gamma_encoding_params),
+                                      gamma_conversion(color.g, uniforms.gamma_encoding_params),
+                                      gamma_conversion(color.b, uniforms.gamma_encoding_params),
+                                      color.a);
+                }
+
+                // Premultiply step.
+                if (bool(uniforms.steps_mask & kPremultiplyStep)) {
+                    color = vec4<f32>(color.rgb * color.a, color.a);
+                }
+
+                // Decode for copying from non-srgb formats to srgb formats
+                if (bool(uniforms.steps_mask & kDecodeForSrgbDstFormat)) {
+                    color = vec4<f32>(gamma_conversion(color.r, uniforms.gamma_decoding_for_dst_srgb_params),
+                                      gamma_conversion(color.g, uniforms.gamma_decoding_for_dst_srgb_params),
+                                      gamma_conversion(color.b, uniforms.gamma_decoding_for_dst_srgb_params),
+                                      color.a);
+                }
+
+                return color;
+            }
+        )";
+
+        // Follow the same order of skcms_TransferFunction
+        // https://source.chromium.org/chromium/chromium/src/+/main:third_party/skia/include/third_party/skcms/skcms.h;l=46;
+        struct GammaTransferParams {
+            float G = 0.0;
+            float A = 0.0;
+            float B = 0.0;
+            float C = 0.0;
+            float D = 0.0;
+            float E = 0.0;
+            float F = 0.0;
+            uint32_t padding = 0;
+        };
+
+        struct Uniform {
+            float scaleX;
+            float scaleY;
+            float offsetX;
+            float offsetY;
+            uint32_t stepsMask = 0;
+            const std::array<uint32_t, 3> padding = {};  // 12 bytes padding
+            std::array<float, 12> conversionMatrix = {};
+            GammaTransferParams gammaDecodingParams = {};
+            GammaTransferParams gammaEncodingParams = {};
+            GammaTransferParams gammaDecodingForDstSrgbParams = {};
+        };
+        static_assert(sizeof(Uniform) == 176);
+
+        // TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
+        // non-depth, non-stencil, non-compressed texture format pair copy.
+        MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
+                                                       const wgpu::TextureFormat dstFormat) {
+            switch (srcFormat) {
+                case wgpu::TextureFormat::BGRA8Unorm:
+                case wgpu::TextureFormat::RGBA8Unorm:
+                    break;
+                default:
+                    return DAWN_FORMAT_VALIDATION_ERROR(
+                        "Source texture format (%s) is not supported.", srcFormat);
+            }
+
+            switch (dstFormat) {
+                case wgpu::TextureFormat::R8Unorm:
+                case wgpu::TextureFormat::R16Float:
+                case wgpu::TextureFormat::R32Float:
+                case wgpu::TextureFormat::RG8Unorm:
+                case wgpu::TextureFormat::RG16Float:
+                case wgpu::TextureFormat::RG32Float:
+                case wgpu::TextureFormat::RGBA8Unorm:
+                case wgpu::TextureFormat::RGBA8UnormSrgb:
+                case wgpu::TextureFormat::BGRA8Unorm:
+                case wgpu::TextureFormat::BGRA8UnormSrgb:
+                case wgpu::TextureFormat::RGB10A2Unorm:
+                case wgpu::TextureFormat::RGBA16Float:
+                case wgpu::TextureFormat::RGBA32Float:
+                    break;
+                default:
+                    return DAWN_FORMAT_VALIDATION_ERROR(
+                        "Destination texture format (%s) is not supported.", dstFormat);
+            }
+
+            return {};
+        }
+
+        RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
+                                              wgpu::TextureFormat dstFormat) {
+            auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
+            if (pipeline != store->copyTextureForBrowserPipelines.end()) {
+                return pipeline->second.Get();
+            }
+            return nullptr;
+        }
+
+        ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
+            DeviceBase* device,
+            wgpu::TextureFormat dstFormat) {
+            InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+            if (GetCachedPipeline(store, dstFormat) == nullptr) {
+                // Create vertex shader module if not cached before.
+                if (store->copyTextureForBrowser == nullptr) {
+                    DAWN_TRY_ASSIGN(
+                        store->copyTextureForBrowser,
+                        utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
+                }
+
+                ShaderModuleBase* shaderModule = store->copyTextureForBrowser.Get();
+
+                // Prepare vertex stage.
+                VertexState vertex = {};
+                vertex.module = shaderModule;
+                vertex.entryPoint = "vs_main";
+
+                // Prepare frgament stage.
+                FragmentState fragment = {};
+                fragment.module = shaderModule;
+                fragment.entryPoint = "fs_main";
+
+                // Prepare color state.
+                ColorTargetState target = {};
+                target.format = dstFormat;
+
+                // Create RenderPipeline.
+                RenderPipelineDescriptor renderPipelineDesc = {};
+
+                // Generate the layout based on shader modules.
+                renderPipelineDesc.layout = nullptr;
+
+                renderPipelineDesc.vertex = vertex;
+                renderPipelineDesc.fragment = &fragment;
+
+                renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+
+                fragment.targetCount = 1;
+                fragment.targets = &target;
+
+                Ref<RenderPipelineBase> pipeline;
+                DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
+                store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
+            }
+
+            return GetCachedPipeline(store, dstFormat);
+        }
+    }  // anonymous namespace
+
+    MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+                                             const ImageCopyTexture* source,
+                                             const ImageCopyTexture* destination,
+                                             const Extent3D* copySize,
+                                             const CopyTextureForBrowserOptions* options) {
+        DAWN_TRY(device->ValidateObject(source->texture));
+        DAWN_TRY(device->ValidateObject(destination->texture));
+
+        DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
+                        "Source texture %s is destroyed.", source->texture);
+
+        DAWN_INVALID_IF(
+            destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
+            "Destination texture %s is destroyed.", destination->texture);
+
+        DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
+                         "validating the ImageCopyTexture for the source");
+        DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
+                         "validating the ImageCopyTexture for the destination");
+
+        DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
+                         "validating that the copy fits in the source");
+        DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
+                         "validating that the copy fits in the destination");
+
+        DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
+
+        DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).",
+                        source->origin.z);
+        DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1,
+                        "Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
+
+        DAWN_INVALID_IF(
+            source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
+            "The source texture sample count (%u) or the destination texture sample count (%u) is "
+            "not 1.",
+            source->texture->GetSampleCount(), destination->texture->GetSampleCount());
+
+        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                                  UsageValidationMode::Default));
+        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding,
+                                  UsageValidationMode::Default));
+
+        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                                  UsageValidationMode::Default));
+        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment,
+                                  UsageValidationMode::Default));
+
+        DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
+                                                     destination->texture->GetFormat().format));
+
+        DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
+
+        DAWN_TRY(ValidateAlphaMode(options->srcAlphaMode));
+        DAWN_TRY(ValidateAlphaMode(options->dstAlphaMode));
+
+        if (options->needsColorSpaceConversion) {
+            DAWN_INVALID_IF(options->srcTransferFunctionParameters == nullptr,
+                            "srcTransferFunctionParameters is nullptr when doing color conversion");
+            DAWN_INVALID_IF(options->conversionMatrix == nullptr,
+                            "conversionMatrix is nullptr when doing color conversion");
+            DAWN_INVALID_IF(options->dstTransferFunctionParameters == nullptr,
+                            "dstTransferFunctionParameters is nullptr when doing color conversion");
+        }
+        return {};
+    }
+
+    // Whether the format of dst texture of CopyTextureForBrowser() is srgb or non-srgb.
+    bool IsSrgbDstFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+                                       const ImageCopyTexture* source,
+                                       const ImageCopyTexture* destination,
+                                       const Extent3D* copySize,
+                                       const CopyTextureForBrowserOptions* options) {
+        // TODO(crbug.com/dawn/856): In D3D12 and Vulkan, compatible texture format can directly
+        // copy to each other. This can be a potential fast path.
+
+        // Noop copy
+        if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
+            return {};
+        }
+
+        bool isSrgbDstFormat = IsSrgbDstFormat(destination->texture->GetFormat().format);
+        RenderPipelineBase* pipeline;
+        DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
+                                      device, destination->texture->GetFormat().format));
+
+        // Prepare bind group layout.
+        Ref<BindGroupLayoutBase> layout;
+        DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+        Extent3D srcTextureSize = source->texture->GetSize();
+
+        // Prepare binding 0 resource: uniform buffer.
+        Uniform uniformData = {
+            copySize->width / static_cast<float>(srcTextureSize.width),
+            copySize->height / static_cast<float>(srcTextureSize.height),  // scale
+            source->origin.x / static_cast<float>(srcTextureSize.width),
+            source->origin.y / static_cast<float>(srcTextureSize.height)  // offset
+        };
+
+        // Handle flipY. FlipY here means we flip the source texture firstly and then
+        // do copy. This helps on the case which source texture is flipped and the copy
+        // need to unpack the flip.
+        if (options->flipY) {
+            uniformData.scaleY *= -1.0;
+            uniformData.offsetY += copySize->height / static_cast<float>(srcTextureSize.height);
+        }
+
+        uint32_t stepsMask = 0u;
+
+        // Steps to do color space conversion
+        // From https://skia.org/docs/user/color/
+        // - unpremultiply if the source color is premultiplied; Alpha is not involved in color
+        // management, and we need to divide it out if it’s multiplied in.
+        // - linearize the source color using the source color space’s transfer function
+        // - convert those unpremultiplied, linear source colors to XYZ D50 gamut by multiplying by
+        // a 3x3 matrix.
+        // - convert those XYZ D50 colors to the destination gamut by multiplying by a 3x3 matrix.
+        // - encode that color using the inverse of the destination color space’s transfer function.
+        // - premultiply by alpha if the destination is premultiplied.
+        // The reason to choose XYZ D50 as intermediate color space:
+        // From http://www.brucelindbloom.com/index.html?WorkingSpaceInfo.html
+        // "Since the Lab TIFF specification, the ICC profile specification and
+        // Adobe Photoshop all use a D50"
+        constexpr uint32_t kUnpremultiplyStep = 0x01;
+        constexpr uint32_t kDecodeToLinearStep = 0x02;
+        constexpr uint32_t kConvertToDstGamutStep = 0x04;
+        constexpr uint32_t kEncodeToGammaStep = 0x08;
+        constexpr uint32_t kPremultiplyStep = 0x10;
+        constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
+
+        if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
+            if (options->needsColorSpaceConversion ||
+                options->srcAlphaMode != options->dstAlphaMode) {
+                stepsMask |= kUnpremultiplyStep;
+            }
+        }
+
+        if (options->needsColorSpaceConversion) {
+            stepsMask |= kDecodeToLinearStep;
+            const float* decodingParams = options->srcTransferFunctionParameters;
+
+            uniformData.gammaDecodingParams = {
+                decodingParams[0], decodingParams[1], decodingParams[2], decodingParams[3],
+                decodingParams[4], decodingParams[5], decodingParams[6]};
+
+            stepsMask |= kConvertToDstGamutStep;
+            const float* matrix = options->conversionMatrix;
+            uniformData.conversionMatrix = {{
+                matrix[0],
+                matrix[1],
+                matrix[2],
+                0.0,
+                matrix[3],
+                matrix[4],
+                matrix[5],
+                0.0,
+                matrix[6],
+                matrix[7],
+                matrix[8],
+                0.0,
+            }};
+
+            stepsMask |= kEncodeToGammaStep;
+            const float* encodingParams = options->dstTransferFunctionParameters;
+
+            uniformData.gammaEncodingParams = {
+                encodingParams[0], encodingParams[1], encodingParams[2], encodingParams[3],
+                encodingParams[4], encodingParams[5], encodingParams[6]};
+        }
+
+        if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
+            if (options->needsColorSpaceConversion ||
+                options->srcAlphaMode != options->dstAlphaMode) {
+                stepsMask |= kPremultiplyStep;
+            }
+        }
+
+        // Copy to *-srgb texture should keep the bytes exactly the same as copy
+        // to non-srgb texture. Add an extra decode-to-linear step so that after the
+        // sampler of *-srgb format texture applying encoding, the bytes keeps the same
+        // as non-srgb format texture.
+        // NOTE: CopyTextureForBrowser() doesn't need to accept *-srgb format texture as
+        // source input. But above operation also valid for *-srgb format texture input and
+        // non-srgb format dst texture.
+        // TODO(crbug.com/dawn/1195): Reinterpret to non-srgb texture view on *-srgb texture
+        // and use it as render attachment when possible.
+        // TODO(crbug.com/dawn/1195): Opt the condition for this extra step. It is possible to
+        // bypass this extra step in some cases.
+        if (isSrgbDstFormat) {
+            stepsMask |= kDecodeForSrgbDstFormat;
+            // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
+            // mathematics. Order: {G, A, B, C, D, E, F, }
+            uniformData.gammaDecodingForDstSrgbParams = {
+                2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0};
+        }
+
+        uniformData.stepsMask = stepsMask;
+
+        Ref<BufferBase> uniformBuffer;
+        DAWN_TRY_ASSIGN(
+            uniformBuffer,
+            utils::CreateBufferFromData(
+                device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, {uniformData}));
+
+        // Prepare binding 1 resource: sampler
+        // Use default configuration, filterMode set to Nearest for min and mag.
+        SamplerDescriptor samplerDesc = {};
+        Ref<SamplerBase> sampler;
+        DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
+
+        // Prepare binding 2 resource: sampled texture
+        TextureViewDescriptor srcTextureViewDesc = {};
+        srcTextureViewDesc.baseMipLevel = source->mipLevel;
+        srcTextureViewDesc.mipLevelCount = 1;
+        srcTextureViewDesc.arrayLayerCount = 1;
+        Ref<TextureViewBase> srcTextureView;
+        DAWN_TRY_ASSIGN(srcTextureView,
+                        device->CreateTextureView(source->texture, &srcTextureViewDesc));
+
+        // Create bind group after all binding entries are set.
+        Ref<BindGroupBase> bindGroup;
+        DAWN_TRY_ASSIGN(bindGroup, utils::MakeBindGroup(
+                                       device, layout,
+                                       {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
+
+        // Create command encoder.
+        Ref<CommandEncoder> encoder;
+        DAWN_TRY_ASSIGN(encoder, device->CreateCommandEncoder());
+
+        // Prepare dst texture view as color Attachment.
+        TextureViewDescriptor dstTextureViewDesc;
+        dstTextureViewDesc.baseMipLevel = destination->mipLevel;
+        dstTextureViewDesc.mipLevelCount = 1;
+        dstTextureViewDesc.baseArrayLayer = destination->origin.z;
+        dstTextureViewDesc.arrayLayerCount = 1;
+        Ref<TextureViewBase> dstView;
+
+        DAWN_TRY_ASSIGN(dstView,
+                        device->CreateTextureView(destination->texture, &dstTextureViewDesc));
+        // Prepare render pass color attachment descriptor.
+        RenderPassColorAttachment colorAttachmentDesc;
+
+        colorAttachmentDesc.view = dstView.Get();
+        colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
+        colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
+        colorAttachmentDesc.clearValue = {0.0, 0.0, 0.0, 1.0};
+
+        // Create render pass.
+        RenderPassDescriptor renderPassDesc;
+        renderPassDesc.colorAttachmentCount = 1;
+        renderPassDesc.colorAttachments = &colorAttachmentDesc;
+        Ref<RenderPassEncoder> passEncoder = encoder->BeginRenderPass(&renderPassDesc);
+
+        // Start pipeline  and encode commands to complete
+        // the copy from src texture to dst texture with transformation.
+        passEncoder->APISetPipeline(pipeline);
+        passEncoder->APISetBindGroup(0, bindGroup.Get());
+        passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
+                                    copySize->height, 0.0, 1.0);
+        passEncoder->APIDraw(3);
+        passEncoder->APIEnd();
+
+        // Finsh encoding.
+        Ref<CommandBufferBase> commandBuffer;
+        DAWN_TRY_ASSIGN(commandBuffer, encoder->Finish());
+        CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
+
+        // Submit command buffer.
+        device->GetQueue()->APISubmit(1, &submitCommandBuffer);
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/CopyTextureForBrowserHelper.h b/src/dawn/native/CopyTextureForBrowserHelper.h
new file mode 100644
index 0000000..de82f5f
--- /dev/null
+++ b/src/dawn/native/CopyTextureForBrowserHelper.h
@@ -0,0 +1,41 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
+#define DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+
+namespace dawn::native {
+    class DeviceBase;
+    struct Extent3D;
+    struct ImageCopyTexture;
+    struct CopyTextureForBrowserOptions;
+
+    MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+                                             const ImageCopyTexture* source,
+                                             const ImageCopyTexture* destination,
+                                             const Extent3D* copySize,
+                                             const CopyTextureForBrowserOptions* options);
+
+    MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+                                       const ImageCopyTexture* source,
+                                       const ImageCopyTexture* destination,
+                                       const Extent3D* copySize,
+                                       const CopyTextureForBrowserOptions* options);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
diff --git a/src/dawn/native/CreatePipelineAsyncTask.cpp b/src/dawn/native/CreatePipelineAsyncTask.cpp
new file mode 100644
index 0000000..92a3bdf
--- /dev/null
+++ b/src/dawn/native/CreatePipelineAsyncTask.cpp
@@ -0,0 +1,206 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+
+#include "dawn/native/AsyncTask.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native {
+
+    CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(
+        std::string errorMessage,
+        void* userdata)
+        : mErrorMessage(errorMessage), mUserData(userdata) {
+    }
+
+    CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
+        Ref<ComputePipelineBase> pipeline,
+        std::string errorMessage,
+        WGPUCreateComputePipelineAsyncCallback callback,
+        void* userdata)
+        : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+          mPipeline(std::move(pipeline)),
+          mCreateComputePipelineAsyncCallback(callback) {
+    }
+
+    void CreateComputePipelineAsyncCallbackTask::Finish() {
+        ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+        if (mPipeline.Get() != nullptr) {
+            mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+                                                ToAPI(mPipeline.Detach()), "", mUserData);
+        } else {
+            mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+                                                mErrorMessage.c_str(), mUserData);
+        }
+    }
+
+    void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
+        ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+        mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                                            "Device destroyed before callback", mUserData);
+    }
+
+    void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
+        ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+        mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                                            "Device lost before callback", mUserData);
+    }
+
+    CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
+        Ref<RenderPipelineBase> pipeline,
+        std::string errorMessage,
+        WGPUCreateRenderPipelineAsyncCallback callback,
+        void* userdata)
+        : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+          mPipeline(std::move(pipeline)),
+          mCreateRenderPipelineAsyncCallback(callback) {
+    }
+
+    void CreateRenderPipelineAsyncCallbackTask::Finish() {
+        ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+        if (mPipeline.Get() != nullptr) {
+            mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+                                               ToAPI(mPipeline.Detach()), "", mUserData);
+        } else {
+            mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+                                               mErrorMessage.c_str(), mUserData);
+        }
+    }
+
+    void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
+        ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+        mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                                           "Device destroyed before callback", mUserData);
+    }
+
+    void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
+        ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+        mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                                           "Device lost before callback", mUserData);
+    }
+
+    CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
+        Ref<ComputePipelineBase> nonInitializedComputePipeline,
+        WGPUCreateComputePipelineAsyncCallback callback,
+        void* userdata)
+        : mComputePipeline(std::move(nonInitializedComputePipeline)),
+          mCallback(callback),
+          mUserdata(userdata) {
+        ASSERT(mComputePipeline != nullptr);
+    }
+
+    void CreateComputePipelineAsyncTask::Run() {
+        const char* eventLabel = utils::GetLabelForTrace(mComputePipeline->GetLabel().c_str());
+
+        DeviceBase* device = mComputePipeline->GetDevice();
+        TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
+                              "CreateComputePipelineAsyncTask::RunAsync", this, "label",
+                              eventLabel);
+        TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label",
+                     eventLabel);
+
+        MaybeError maybeError = mComputePipeline->Initialize();
+        std::string errorMessage;
+        if (maybeError.IsError()) {
+            mComputePipeline = nullptr;
+            errorMessage = maybeError.AcquireError()->GetMessage();
+        }
+
+        device->AddComputePipelineAsyncCallbackTask(mComputePipeline, errorMessage, mCallback,
+                                                    mUserdata);
+    }
+
+    void CreateComputePipelineAsyncTask::RunAsync(
+        std::unique_ptr<CreateComputePipelineAsyncTask> task) {
+        DeviceBase* device = task->mComputePipeline->GetDevice();
+
+        const char* eventLabel =
+            utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
+
+        // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+        // since C++14:
+        // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+        auto asyncTask = [taskPtr = task.release()] {
+            std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
+            innnerTaskPtr->Run();
+        };
+
+        TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+                                "CreateComputePipelineAsyncTask::RunAsync", task.get(), "label",
+                                eventLabel);
+        device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+    }
+
+    CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
+        Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+        WGPUCreateRenderPipelineAsyncCallback callback,
+        void* userdata)
+        : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
+          mCallback(callback),
+          mUserdata(userdata) {
+        ASSERT(mRenderPipeline != nullptr);
+    }
+
+    void CreateRenderPipelineAsyncTask::Run() {
+        const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
+
+        DeviceBase* device = mRenderPipeline->GetDevice();
+        TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
+                              "CreateRenderPipelineAsyncTask::RunAsync", this, "label", eventLabel);
+        TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label",
+                     eventLabel);
+
+        MaybeError maybeError = mRenderPipeline->Initialize();
+        std::string errorMessage;
+        if (maybeError.IsError()) {
+            mRenderPipeline = nullptr;
+            errorMessage = maybeError.AcquireError()->GetMessage();
+        }
+
+        device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback,
+                                                   mUserdata);
+    }
+
+    void CreateRenderPipelineAsyncTask::RunAsync(
+        std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
+        DeviceBase* device = task->mRenderPipeline->GetDevice();
+
+        const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
+
+        // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+        // since C++14:
+        // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+        auto asyncTask = [taskPtr = task.release()] {
+            std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
+            innerTaskPtr->Run();
+        };
+
+        TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+                                "CreateRenderPipelineAsyncTask::RunAsync", task.get(), "label",
+                                eventLabel);
+        device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/CreatePipelineAsyncTask.h b/src/dawn/native/CreatePipelineAsyncTask.h
new file mode 100644
index 0000000..4b936cf
--- /dev/null
+++ b/src/dawn/native/CreatePipelineAsyncTask.h
@@ -0,0 +1,108 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
+#define DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/CallbackTaskManager.h"
+#include "dawn/native/Error.h"
+#include "dawn/webgpu.h"
+
+namespace dawn::native {
+
+    class ComputePipelineBase;
+    class DeviceBase;
+    class PipelineLayoutBase;
+    class RenderPipelineBase;
+    class ShaderModuleBase;
+    struct FlatComputePipelineDescriptor;
+
+    struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
+        CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
+
+      protected:
+        std::string mErrorMessage;
+        void* mUserData;
+    };
+
+    struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+        CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+                                               std::string errorMessage,
+                                               WGPUCreateComputePipelineAsyncCallback callback,
+                                               void* userdata);
+
+        void Finish() override;
+        void HandleShutDown() final;
+        void HandleDeviceLoss() final;
+
+      protected:
+        Ref<ComputePipelineBase> mPipeline;
+        WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
+    };
+
+    struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+        CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+                                              std::string errorMessage,
+                                              WGPUCreateRenderPipelineAsyncCallback callback,
+                                              void* userdata);
+
+        void Finish() override;
+        void HandleShutDown() final;
+        void HandleDeviceLoss() final;
+
+      protected:
+        Ref<RenderPipelineBase> mPipeline;
+        WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
+    };
+
+    // CreateComputePipelineAsyncTask defines all the inputs and outputs of
+    // CreateComputePipelineAsync() tasks, which are the same among all the backends.
+    class CreateComputePipelineAsyncTask {
+      public:
+        CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
+                                       WGPUCreateComputePipelineAsyncCallback callback,
+                                       void* userdata);
+
+        void Run();
+
+        static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
+
+      private:
+        Ref<ComputePipelineBase> mComputePipeline;
+        WGPUCreateComputePipelineAsyncCallback mCallback;
+        void* mUserdata;
+    };
+
+    // CreateRenderPipelineAsyncTask defines all the inputs and outputs of
+    // CreateRenderPipelineAsync() tasks, which are the same among all the backends.
+    class CreateRenderPipelineAsyncTask {
+      public:
+        CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+                                      WGPUCreateRenderPipelineAsyncCallback callback,
+                                      void* userdata);
+
+        void Run();
+
+        static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
+
+      private:
+        Ref<RenderPipelineBase> mRenderPipeline;
+        WGPUCreateRenderPipelineAsyncCallback mCallback;
+        void* mUserdata;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
diff --git a/src/dawn/native/DawnNative.cpp b/src/dawn/native/DawnNative.cpp
new file mode 100644
index 0000000..ca46df8
--- /dev/null
+++ b/src/dawn/native/DawnNative.cpp
@@ -0,0 +1,312 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/DawnNative.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/Texture.h"
+#include "dawn/platform/DawnPlatform.h"
+
+// Contains the entry-points into dawn_native
+
+namespace dawn::native {
+
+    namespace {
+        struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
+            ComboDeprecatedDawnDeviceDescriptor(const DawnDeviceDescriptor* deviceDescriptor) {
+                dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
+                                      "WGPUDeviceDescriptor instead.";
+
+                DeviceDescriptor* desc = this;
+
+                if (deviceDescriptor != nullptr) {
+                    desc->nextInChain = &mTogglesDesc;
+                    mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
+                    mTogglesDesc.forceEnabledTogglesCount =
+                        deviceDescriptor->forceEnabledToggles.size();
+                    mTogglesDesc.forceDisabledToggles =
+                        deviceDescriptor->forceDisabledToggles.data();
+                    mTogglesDesc.forceDisabledTogglesCount =
+                        deviceDescriptor->forceDisabledToggles.size();
+
+                    desc->requiredLimits =
+                        reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
+
+                    FeaturesInfo featuresInfo;
+                    for (const char* featureStr : deviceDescriptor->requiredFeatures) {
+                        mRequiredFeatures.push_back(featuresInfo.FeatureNameToAPIEnum(featureStr));
+                    }
+                    desc->requiredFeatures = mRequiredFeatures.data();
+                    desc->requiredFeaturesCount = mRequiredFeatures.size();
+                }
+            }
+
+            DawnTogglesDeviceDescriptor mTogglesDesc = {};
+            std::vector<wgpu::FeatureName> mRequiredFeatures = {};
+        };
+    }  // namespace
+
+    const DawnProcTable& GetProcsAutogen();
+
+    const DawnProcTable& GetProcs() {
+        return GetProcsAutogen();
+    }
+
+    std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
+        return FromAPI(device)->GetTogglesUsed();
+    }
+
+    // Adapter
+
+    Adapter::Adapter() = default;
+
+    Adapter::Adapter(AdapterBase* impl) : mImpl(impl) {
+        if (mImpl != nullptr) {
+            mImpl->Reference();
+        }
+    }
+
+    Adapter::~Adapter() {
+        if (mImpl != nullptr) {
+            mImpl->Release();
+        }
+        mImpl = nullptr;
+    }
+
+    Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {
+    }
+
+    Adapter& Adapter::operator=(const Adapter& other) {
+        if (this != &other) {
+            if (mImpl) {
+                mImpl->Release();
+            }
+            mImpl = other.mImpl;
+            if (mImpl) {
+                mImpl->Reference();
+            }
+        }
+        return *this;
+    }
+
+    void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
+        GetProperties(reinterpret_cast<WGPUAdapterProperties*>(properties));
+    }
+
+    void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+        mImpl->APIGetProperties(FromAPI(properties));
+    }
+
+    WGPUAdapter Adapter::Get() const {
+        return ToAPI(mImpl);
+    }
+
+    std::vector<const char*> Adapter::GetSupportedFeatures() const {
+        FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
+        return supportedFeaturesSet.GetEnabledFeatureNames();
+    }
+
+    WGPUDeviceProperties Adapter::GetAdapterProperties() const {
+        return mImpl->GetAdapterProperties();
+    }
+
+    bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+        return mImpl->GetLimits(FromAPI(limits));
+    }
+
+    void Adapter::SetUseTieredLimits(bool useTieredLimits) {
+        mImpl->SetUseTieredLimits(useTieredLimits);
+    }
+
+    bool Adapter::SupportsExternalImages() const {
+        return mImpl->SupportsExternalImages();
+    }
+
+    Adapter::operator bool() const {
+        return mImpl != nullptr;
+    }
+
+    WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
+        ComboDeprecatedDawnDeviceDescriptor desc(deviceDescriptor);
+        return ToAPI(mImpl->APICreateDevice(&desc));
+    }
+
+    WGPUDevice Adapter::CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor) {
+        return CreateDevice(reinterpret_cast<const WGPUDeviceDescriptor*>(deviceDescriptor));
+    }
+
+    WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor) {
+        return ToAPI(mImpl->APICreateDevice(FromAPI(deviceDescriptor)));
+    }
+
+    void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
+                                WGPURequestDeviceCallback callback,
+                                void* userdata) {
+        ComboDeprecatedDawnDeviceDescriptor desc(descriptor);
+        mImpl->APIRequestDevice(&desc, callback, userdata);
+    }
+
+    void Adapter::RequestDevice(const wgpu::DeviceDescriptor* descriptor,
+                                WGPURequestDeviceCallback callback,
+                                void* userdata) {
+        mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+                                userdata);
+    }
+
+    void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                                WGPURequestDeviceCallback callback,
+                                void* userdata) {
+        mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+                                userdata);
+    }
+
+    void Adapter::ResetInternalDeviceForTesting() {
+        mImpl->ResetInternalDeviceForTesting();
+    }
+
+    // AdapterDiscoverOptionsBase
+
+    AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
+        : backendType(type) {
+    }
+
+    // Instance
+
+    Instance::Instance(const WGPUInstanceDescriptor* desc)
+        : mImpl(InstanceBase::Create(reinterpret_cast<const InstanceDescriptor*>(desc))) {
+    }
+
+    Instance::~Instance() {
+        if (mImpl != nullptr) {
+            mImpl->Release();
+            mImpl = nullptr;
+        }
+    }
+
+    void Instance::DiscoverDefaultAdapters() {
+        mImpl->DiscoverDefaultAdapters();
+    }
+
+    bool Instance::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+        return mImpl->DiscoverAdapters(options);
+    }
+
+    std::vector<Adapter> Instance::GetAdapters() const {
+        // Adapters are owned by mImpl so it is safe to return non RAII pointers to them
+        std::vector<Adapter> adapters;
+        for (const Ref<AdapterBase>& adapter : mImpl->GetAdapters()) {
+            adapters.push_back({adapter.Get()});
+        }
+        return adapters;
+    }
+
+    const ToggleInfo* Instance::GetToggleInfo(const char* toggleName) {
+        return mImpl->GetToggleInfo(toggleName);
+    }
+
+    const FeatureInfo* Instance::GetFeatureInfo(WGPUFeatureName feature) {
+        return mImpl->GetFeatureInfo(static_cast<wgpu::FeatureName>(feature));
+    }
+
+    void Instance::EnableBackendValidation(bool enableBackendValidation) {
+        if (enableBackendValidation) {
+            mImpl->SetBackendValidationLevel(BackendValidationLevel::Full);
+        }
+    }
+
+    void Instance::SetBackendValidationLevel(BackendValidationLevel level) {
+        mImpl->SetBackendValidationLevel(level);
+    }
+
+    void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+        mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
+    }
+
+    void Instance::SetPlatform(dawn::platform::Platform* platform) {
+        mImpl->SetPlatform(platform);
+    }
+
+    WGPUInstance Instance::Get() const {
+        return ToAPI(mImpl);
+    }
+
+    size_t GetLazyClearCountForTesting(WGPUDevice device) {
+        return FromAPI(device)->GetLazyClearCountForTesting();
+    }
+
+    size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
+        return FromAPI(device)->GetDeprecationWarningCountForTesting();
+    }
+
+    bool IsTextureSubresourceInitialized(WGPUTexture texture,
+                                         uint32_t baseMipLevel,
+                                         uint32_t levelCount,
+                                         uint32_t baseArrayLayer,
+                                         uint32_t layerCount,
+                                         WGPUTextureAspect cAspect) {
+        TextureBase* textureBase = FromAPI(texture);
+
+        Aspect aspect =
+            ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
+        SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
+        return textureBase->IsSubresourceContentInitialized(range);
+    }
+
+    std::vector<const char*> GetProcMapNamesForTestingInternal();
+
+    std::vector<const char*> GetProcMapNamesForTesting() {
+        return GetProcMapNamesForTestingInternal();
+    }
+
+    DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
+        return FromAPI(device)->APITick();
+    }
+
+    // ExternalImageDescriptor
+
+    ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {
+    }
+
+    ExternalImageType ExternalImageDescriptor::GetType() const {
+        return mType;
+    }
+
+    // ExternalImageExportInfo
+
+    ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {
+    }
+
+    ExternalImageType ExternalImageExportInfo::GetType() const {
+        return mType;
+    }
+
+    const char* GetObjectLabelForTesting(void* objectHandle) {
+        ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
+        return object->GetLabel().c_str();
+    }
+
+    uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
+        return FromAPI(buffer)->GetAllocatedSize();
+    }
+
+    bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
+        bool excludePipelineCompatibiltyToken = true;
+        return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Device.cpp b/src/dawn/native/Device.cpp
new file mode 100644
index 0000000..f79ead5
--- /dev/null
+++ b/src/dawn/native/Device.cpp
@@ -0,0 +1,1806 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Device.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/AsyncTask.h"
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CompilationMessages.h"
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/ErrorInjector.h"
+#include "dawn/native/ErrorScope.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PersistentCache.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderBundleEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/SwapChain.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <array>
+#include <mutex>
+#include <unordered_set>
+
+namespace dawn::native {
+
+    // DeviceBase sub-structures
+
+    // The caches are unordered_sets of pointers with special hash and compare functions
+    // to compare the value of the objects, instead of the pointers.
+    template <typename Object>
+    using ContentLessObjectCache =
+        std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
+
+    struct DeviceBase::Caches {
+        ~Caches() {
+            ASSERT(attachmentStates.empty());
+            ASSERT(bindGroupLayouts.empty());
+            ASSERT(computePipelines.empty());
+            ASSERT(pipelineLayouts.empty());
+            ASSERT(renderPipelines.empty());
+            ASSERT(samplers.empty());
+            ASSERT(shaderModules.empty());
+        }
+
+        ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
+        ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
+        ContentLessObjectCache<ComputePipelineBase> computePipelines;
+        ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
+        ContentLessObjectCache<RenderPipelineBase> renderPipelines;
+        ContentLessObjectCache<SamplerBase> samplers;
+        ContentLessObjectCache<ShaderModuleBase> shaderModules;
+    };
+
+    struct DeviceBase::DeprecationWarnings {
+        std::unordered_set<std::string> emitted;
+        size_t count = 0;
+    };
+
+    namespace {
+        struct LoggingCallbackTask : CallbackTask {
+          public:
+            LoggingCallbackTask() = delete;
+            LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
+                                WGPULoggingType loggingType,
+                                const char* message,
+                                void* userdata)
+                : mCallback(loggingCallback),
+                  mLoggingType(loggingType),
+                  mMessage(message),
+                  mUserdata(userdata) {
+                // Since the Finish() will be called in uncertain future in which time the message
+                // may already disposed, we must keep a local copy in the CallbackTask.
+            }
+
+            void Finish() override {
+                mCallback(mLoggingType, mMessage.c_str(), mUserdata);
+            }
+
+            void HandleShutDown() override {
+                // Do the logging anyway
+                mCallback(mLoggingType, mMessage.c_str(), mUserdata);
+            }
+
+            void HandleDeviceLoss() override {
+                mCallback(mLoggingType, mMessage.c_str(), mUserdata);
+            }
+
+          private:
+            // As all deferred callback tasks will be triggered before modifying the registered
+            // callback or shutting down, we are ensured that callback function and userdata pointer
+            // stored in tasks is valid when triggered.
+            wgpu::LoggingCallback mCallback;
+            WGPULoggingType mLoggingType;
+            std::string mMessage;
+            void* mUserdata;
+        };
+
+        ResultOrError<Ref<PipelineLayoutBase>>
+        ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+            DeviceBase* device,
+            const ComputePipelineDescriptor& descriptor,
+            ComputePipelineDescriptor* outDescriptor) {
+            Ref<PipelineLayoutBase> layoutRef;
+            *outDescriptor = descriptor;
+
+            if (outDescriptor->layout == nullptr) {
+                DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
+                                               device, {{
+                                                           SingleShaderStage::Compute,
+                                                           outDescriptor->compute.module,
+                                                           outDescriptor->compute.entryPoint,
+                                                           outDescriptor->compute.constantCount,
+                                                           outDescriptor->compute.constants,
+                                                       }}));
+                outDescriptor->layout = layoutRef.Get();
+            }
+
+            return layoutRef;
+        }
+
+        ResultOrError<Ref<PipelineLayoutBase>>
+        ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+            DeviceBase* device,
+            const RenderPipelineDescriptor& descriptor,
+            RenderPipelineDescriptor* outDescriptor) {
+            Ref<PipelineLayoutBase> layoutRef;
+            *outDescriptor = descriptor;
+
+            if (descriptor.layout == nullptr) {
+                // Ref will keep the pipeline layout alive until the end of the function where
+                // the pipeline will take another reference.
+                DAWN_TRY_ASSIGN(layoutRef,
+                                PipelineLayoutBase::CreateDefault(
+                                    device, GetRenderStagesAndSetDummyShader(device, &descriptor)));
+                outDescriptor->layout = layoutRef.Get();
+            }
+
+            return layoutRef;
+        }
+
+    }  // anonymous namespace
+
+    // DeviceBase
+
+    DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
+        : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
+        ASSERT(descriptor != nullptr);
+
+        const DawnTogglesDeviceDescriptor* togglesDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &togglesDesc);
+        if (togglesDesc != nullptr) {
+            ApplyToggleOverrides(togglesDesc);
+        }
+        ApplyFeatures(descriptor);
+
+        const DawnCacheDeviceDescriptor* cacheDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &cacheDesc);
+        if (cacheDesc != nullptr) {
+            mCacheIsolationKey = cacheDesc->isolationKey;
+        }
+
+        if (descriptor->requiredLimits != nullptr) {
+            mLimits.v1 = ReifyDefaultLimits(descriptor->requiredLimits->limits);
+        } else {
+            GetDefaultLimits(&mLimits.v1);
+        }
+
+        mFormatTable = BuildFormatTable(this);
+        SetDefaultToggles();
+    }
+
+    DeviceBase::DeviceBase() : mState(State::Alive) {
+        mCaches = std::make_unique<DeviceBase::Caches>();
+    }
+
+    DeviceBase::~DeviceBase() {
+        // We need to explicitly release the Queue before we complete the destructor so that the
+        // Queue does not get destroyed after the Device.
+        mQueue = nullptr;
+    }
+
+    MaybeError DeviceBase::Initialize(QueueBase* defaultQueue) {
+        mQueue = AcquireRef(defaultQueue);
+
+#if defined(DAWN_ENABLE_ASSERTS)
+        mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
+            static bool calledOnce = false;
+            if (!calledOnce) {
+                calledOnce = true;
+                dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+                                      "probably not intended. If you really want to ignore errors "
+                                      "and suppress this message, set the callback to null.";
+            }
+        };
+
+        mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+            static bool calledOnce = false;
+            if (!calledOnce) {
+                calledOnce = true;
+                dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+                                      "intended. If you really want to ignore device lost "
+                                      "and suppress this message, set the callback to null.";
+            }
+        };
+#endif  // DAWN_ENABLE_ASSERTS
+
+        mCaches = std::make_unique<DeviceBase::Caches>();
+        mErrorScopeStack = std::make_unique<ErrorScopeStack>();
+        mDynamicUploader = std::make_unique<DynamicUploader>(this);
+        mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
+        mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
+        mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
+        mPersistentCache = std::make_unique<PersistentCache>(this);
+
+        ASSERT(GetPlatform() != nullptr);
+        mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
+        mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
+
+        // Starting from now the backend can start doing reentrant calls so the device is marked as
+        // alive.
+        mState = State::Alive;
+
+        DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
+
+        // If dummy fragment shader module is needed, initialize it
+        if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
+            // The empty fragment shader, used as a work around for vertex-only render pipeline
+            constexpr char kEmptyFragmentShader[] = R"(
+                @stage(fragment) fn fs_empty_main() {}
+            )";
+            ShaderModuleDescriptor descriptor;
+            ShaderModuleWGSLDescriptor wgslDesc;
+            wgslDesc.source = kEmptyFragmentShader;
+            descriptor.nextInChain = &wgslDesc;
+
+            DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
+                            CreateShaderModule(&descriptor));
+        }
+
+        return {};
+    }
+
+    void DeviceBase::DestroyObjects() {
+        // List of object types in reverse "dependency" order so we can iterate and delete the
+        // objects safely starting at leaf objects. We define dependent here such that if B has
+        // a ref to A, then B depends on A. We therefore try to destroy B before destroying A. Note
+        // that this only considers the immediate frontend dependencies, while backend objects could
+        // add complications and extra dependencies.
+        //
+        // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
+        // since AttachmentStates are cached by the device, objects that hold references to
+        // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
+        // can destroy the frontend cache.
+
+        // clang-format off
+        static constexpr std::array<ObjectType, 19> kObjectTypeDependencyOrder = {
+            ObjectType::ComputePassEncoder,
+            ObjectType::RenderPassEncoder,
+            ObjectType::RenderBundleEncoder,
+            ObjectType::RenderBundle,
+            ObjectType::CommandEncoder,
+            ObjectType::CommandBuffer,
+            ObjectType::RenderPipeline,
+            ObjectType::ComputePipeline,
+            ObjectType::PipelineLayout,
+            ObjectType::SwapChain,
+            ObjectType::BindGroup,
+            ObjectType::BindGroupLayout,
+            ObjectType::ShaderModule,
+            ObjectType::ExternalTexture,
+            ObjectType::TextureView,
+            ObjectType::Texture,
+            ObjectType::QuerySet,
+            ObjectType::Sampler,
+            ObjectType::Buffer,
+        };
+        // clang-format on
+
+        // We first move all objects out from the tracking list into a separate list so that we can
+        // avoid locking the same mutex twice. We can then iterate across the separate list to call
+        // the actual destroy function.
+        LinkedList<ApiObjectBase> objects;
+        for (ObjectType type : kObjectTypeDependencyOrder) {
+            ApiObjectList& objList = mObjectLists[type];
+            const std::lock_guard<std::mutex> lock(objList.mutex);
+            objList.objects.MoveInto(&objects);
+        }
+        for (LinkNode<ApiObjectBase>* node : objects) {
+            node->value()->Destroy();
+        }
+    }
+
+    void DeviceBase::Destroy() {
+        // Skip if we are already destroyed.
+        if (mState == State::Destroyed) {
+            return;
+        }
+
+        // Skip handling device facilities if they haven't even been created (or failed doing so)
+        if (mState != State::BeingCreated) {
+            // The device is being destroyed so it will be lost, call the application callback.
+            if (mDeviceLostCallback != nullptr) {
+                mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
+                                    mDeviceLostUserdata);
+                mDeviceLostCallback = nullptr;
+            }
+
+            // Call all the callbacks immediately as the device is about to shut down.
+            // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+            mAsyncTaskManager->WaitAllPendingTasks();
+            auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+            for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+                callbackTask->HandleShutDown();
+            }
+        }
+
+        // Disconnect the device, depending on which state we are currently in.
+        switch (mState) {
+            case State::BeingCreated:
+                // The GPU timeline was never started so we don't have to wait.
+                break;
+
+            case State::Alive:
+                // Alive is the only state which can have GPU work happening. Wait for all of it to
+                // complete before proceeding with destruction.
+                // Ignore errors so that we can continue with destruction
+                IgnoreErrors(WaitForIdleForDestruction());
+                AssumeCommandsComplete();
+                break;
+
+            case State::BeingDisconnected:
+                // Getting disconnected is a transient state happening in a single API call so there
+                // is always an external reference keeping the Device alive, which means the
+                // destructor cannot run while BeingDisconnected.
+                UNREACHABLE();
+                break;
+
+            case State::Disconnected:
+                break;
+
+            case State::Destroyed:
+                // If we are already destroyed we should've skipped this work entirely.
+                UNREACHABLE();
+                break;
+        }
+        ASSERT(mCompletedSerial == mLastSubmittedSerial);
+        ASSERT(mFutureSerial <= mCompletedSerial);
+
+        if (mState != State::BeingCreated) {
+            // The GPU timeline is finished.
+            // Finish destroying all objects owned by the device and tick the queue-related tasks
+            // since they should be complete. This must be done before DestroyImpl() it may
+            // relinquish resources that will be freed by backends in the DestroyImpl() call.
+            DestroyObjects();
+            mQueue->Tick(GetCompletedCommandSerial());
+            // Call TickImpl once last time to clean up resources
+            // Ignore errors so that we can continue with destruction
+            IgnoreErrors(TickImpl());
+        }
+
+        // At this point GPU operations are always finished, so we are in the disconnected state.
+        // Note that currently this state change is required because some of the backend
+        // implementations of DestroyImpl checks that we are disconnected before doing work.
+        mState = State::Disconnected;
+
+        mDynamicUploader = nullptr;
+        mCallbackTaskManager = nullptr;
+        mAsyncTaskManager = nullptr;
+        mPersistentCache = nullptr;
+        mEmptyBindGroupLayout = nullptr;
+        mInternalPipelineStore = nullptr;
+        mExternalTextureDummyView = nullptr;
+
+        AssumeCommandsComplete();
+
+        // Now that the GPU timeline is empty, destroy the backend device.
+        DestroyImpl();
+
+        mCaches = nullptr;
+        mState = State::Destroyed;
+    }
+
+    void DeviceBase::APIDestroy() {
+        Destroy();
+    }
+
+    void DeviceBase::HandleError(InternalErrorType type, const char* message) {
+        if (type == InternalErrorType::DeviceLost) {
+            mState = State::Disconnected;
+
+            // If the ErrorInjector is enabled, then the device loss might be fake and the device
+            // still be executing commands. Force a wait for idle in this case, with State being
+            // Disconnected so we can detect this case in WaitForIdleForDestruction.
+            if (ErrorInjectorEnabled()) {
+                IgnoreErrors(WaitForIdleForDestruction());
+            }
+
+            // A real device lost happened. Set the state to disconnected as the device cannot be
+            // used. Also tags all commands as completed since the device stopped running.
+            AssumeCommandsComplete();
+        } else if (type == InternalErrorType::Internal) {
+            // If we receive an internal error, assume the backend can't recover and proceed with
+            // device destruction. We first wait for all previous commands to be completed so that
+            // backend objects can be freed immediately, before handling the loss.
+
+            // Move away from the Alive state so that the application cannot use this device
+            // anymore.
+            // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
+            // threads in a multithreaded scenario?
+            mState = State::BeingDisconnected;
+
+            // Ignore errors so that we can continue with destruction
+            // Assume all commands are complete after WaitForIdleForDestruction (because they were)
+            IgnoreErrors(WaitForIdleForDestruction());
+            IgnoreErrors(TickImpl());
+            AssumeCommandsComplete();
+            ASSERT(mFutureSerial <= mCompletedSerial);
+            mState = State::Disconnected;
+
+            // Now everything is as if the device was lost.
+            type = InternalErrorType::DeviceLost;
+        }
+
+        if (type == InternalErrorType::DeviceLost) {
+            // The device was lost, call the application callback.
+            if (mDeviceLostCallback != nullptr) {
+                mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
+                mDeviceLostCallback = nullptr;
+            }
+
+            mQueue->HandleDeviceLoss();
+
+            // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+            mAsyncTaskManager->WaitAllPendingTasks();
+            auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+            for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+                callbackTask->HandleDeviceLoss();
+            }
+
+            // Still forward device loss errors to the error scopes so they all reject.
+            mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+        } else {
+            // Pass the error to the error scope stack and call the uncaptured error callback
+            // if it isn't handled. DeviceLost is not handled here because it should be
+            // handled by the lost callback.
+            bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+            if (!captured && mUncapturedErrorCallback != nullptr) {
+                mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
+                                         mUncapturedErrorUserdata);
+            }
+        }
+    }
+
+    void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
+        ASSERT(error != nullptr);
+        HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+    }
+
+    void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
+        // The registered callback function and userdata pointer are stored and used by deferred
+        // callback tasks, and after setting a different callback (especially in the case of
+        // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+        // callback tasks to guarantee we are never going to use the previous callback after
+        // this call.
+        if (IsLost()) {
+            return;
+        }
+        FlushCallbackTaskQueue();
+        mLoggingCallback = callback;
+        mLoggingUserdata = userdata;
+    }
+
+    void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
+        // The registered callback function and userdata pointer are stored and used by deferred
+        // callback tasks, and after setting a different callback (especially in the case of
+        // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+        // callback tasks to guarantee we are never going to use the previous callback after
+        // this call.
+        if (IsLost()) {
+            return;
+        }
+        FlushCallbackTaskQueue();
+        mUncapturedErrorCallback = callback;
+        mUncapturedErrorUserdata = userdata;
+    }
+
+    void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
+        // The registered callback function and userdata pointer are stored and used by deferred
+        // callback tasks, and after setting a different callback (especially in the case of
+        // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+        // callback tasks to guarantee we are never going to use the previous callback after
+        // this call.
+        if (IsLost()) {
+            return;
+        }
+        FlushCallbackTaskQueue();
+        mDeviceLostCallback = callback;
+        mDeviceLostUserdata = userdata;
+    }
+
+    void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
+        if (ConsumedError(ValidateErrorFilter(filter))) {
+            return;
+        }
+        mErrorScopeStack->Push(filter);
+    }
+
+    bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
+        // TODO(crbug.com/dawn/1324) Remove return and make function void when users are updated.
+        bool returnValue = true;
+        if (callback == nullptr) {
+            static wgpu::ErrorCallback defaultCallback = [](WGPUErrorType, char const*, void*) {};
+            callback = defaultCallback;
+        }
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        if (IsLost()) {
+            callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
+            return returnValue;
+        }
+        if (mErrorScopeStack->Empty()) {
+            callback(WGPUErrorType_Unknown, "No error scopes to pop", userdata);
+            return returnValue;
+        }
+        ErrorScope scope = mErrorScopeStack->Pop();
+        callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
+                 userdata);
+        return returnValue;
+    }
+
+    PersistentCache* DeviceBase::GetPersistentCache() {
+        ASSERT(mPersistentCache.get() != nullptr);
+        return mPersistentCache.get();
+    }
+
+    MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
+        ASSERT(object != nullptr);
+        DAWN_INVALID_IF(object->GetDevice() != this,
+                        "%s is associated with %s, and cannot be used with %s.", object,
+                        object->GetDevice(), this);
+
+        // TODO(dawn:563): Preserve labels for error objects.
+        DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
+
+        return {};
+    }
+
+    MaybeError DeviceBase::ValidateIsAlive() const {
+        DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
+        return {};
+    }
+
+    void DeviceBase::APILoseForTesting() {
+        if (mState != State::Alive) {
+            return;
+        }
+
+        HandleError(InternalErrorType::Internal, "Device lost for testing");
+    }
+
+    DeviceBase::State DeviceBase::GetState() const {
+        return mState;
+    }
+
+    bool DeviceBase::IsLost() const {
+        ASSERT(mState != State::BeingCreated);
+        return mState != State::Alive;
+    }
+
+    void DeviceBase::TrackObject(ApiObjectBase* object) {
+        ApiObjectList& objectList = mObjectLists[object->GetType()];
+        std::lock_guard<std::mutex> lock(objectList.mutex);
+        object->InsertBefore(objectList.objects.head());
+    }
+
+    std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
+        return &mObjectLists[type].mutex;
+    }
+
+    AdapterBase* DeviceBase::GetAdapter() const {
+        return mAdapter;
+    }
+
+    dawn::platform::Platform* DeviceBase::GetPlatform() const {
+        return GetAdapter()->GetInstance()->GetPlatform();
+    }
+
+    ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
+        return mCompletedSerial;
+    }
+
+    ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
+        return mLastSubmittedSerial;
+    }
+
+    ExecutionSerial DeviceBase::GetFutureSerial() const {
+        return mFutureSerial;
+    }
+
+    InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
+        return mInternalPipelineStore.get();
+    }
+
+    void DeviceBase::IncrementLastSubmittedCommandSerial() {
+        mLastSubmittedSerial++;
+    }
+
+    void DeviceBase::AssumeCommandsComplete() {
+        ExecutionSerial maxSerial =
+            ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
+        mLastSubmittedSerial = maxSerial;
+        mCompletedSerial = maxSerial;
+    }
+
+    bool DeviceBase::IsDeviceIdle() {
+        if (mAsyncTaskManager->HasPendingTasks()) {
+            return false;
+        }
+
+        ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
+        if (mCompletedSerial == maxSerial) {
+            return true;
+        }
+        return false;
+    }
+
+    ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
+        return mLastSubmittedSerial + ExecutionSerial(1);
+    }
+
+    void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
+        if (serial > mFutureSerial) {
+            mFutureSerial = serial;
+        }
+    }
+
+    MaybeError DeviceBase::CheckPassedSerials() {
+        ExecutionSerial completedSerial;
+        DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
+
+        ASSERT(completedSerial <= mLastSubmittedSerial);
+        // completedSerial should not be less than mCompletedSerial unless it is 0.
+        // It can be 0 when there's no fences to check.
+        ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
+
+        if (completedSerial > mCompletedSerial) {
+            mCompletedSerial = completedSerial;
+        }
+
+        return {};
+    }
+
+    ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
+        FormatIndex index = ComputeFormatIndex(format);
+        DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
+
+        const Format* internalFormat = &mFormatTable[index];
+        DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
+
+        return internalFormat;
+    }
+
+    const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
+        FormatIndex index = ComputeFormatIndex(format);
+        ASSERT(index < mFormatTable.size());
+        ASSERT(mFormatTable[index].isSupported);
+        return mFormatTable[index];
+    }
+
+    const Format& DeviceBase::GetValidInternalFormat(FormatIndex index) const {
+        ASSERT(index < mFormatTable.size());
+        ASSERT(mFormatTable[index].isSupported);
+        return mFormatTable[index];
+    }
+
+    ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
+                                      ApiObjectBase::kUntrackedByDevice);
+
+        const size_t blueprintHash = blueprint.ComputeContentHash();
+        blueprint.SetContentHash(blueprintHash);
+
+        Ref<BindGroupLayoutBase> result;
+        auto iter = mCaches->bindGroupLayouts.find(&blueprint);
+        if (iter != mCaches->bindGroupLayouts.end()) {
+            result = *iter;
+        } else {
+            DAWN_TRY_ASSIGN(result,
+                            CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
+            result->SetIsCachedReference();
+            result->SetContentHash(blueprintHash);
+            mCaches->bindGroupLayouts.insert(result.Get());
+        }
+
+        return std::move(result);
+    }
+
+    void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
+        ASSERT(obj->IsCachedReference());
+        size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
+        ASSERT(removedCount == 1);
+    }
+
+    // Private function used at initialization
+    ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
+        BindGroupLayoutDescriptor desc = {};
+        desc.entryCount = 0;
+        desc.entries = nullptr;
+
+        return GetOrCreateBindGroupLayout(&desc);
+    }
+
+    BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
+        ASSERT(mEmptyBindGroupLayout != nullptr);
+        return mEmptyBindGroupLayout.Get();
+    }
+
+    Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
+        ComputePipelineBase* uninitializedComputePipeline) {
+        Ref<ComputePipelineBase> cachedPipeline;
+        auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
+        if (iter != mCaches->computePipelines.end()) {
+            cachedPipeline = *iter;
+        }
+
+        return cachedPipeline;
+    }
+
+    Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
+        RenderPipelineBase* uninitializedRenderPipeline) {
+        Ref<RenderPipelineBase> cachedPipeline;
+        auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
+        if (iter != mCaches->renderPipelines.end()) {
+            cachedPipeline = *iter;
+        }
+        return cachedPipeline;
+    }
+
+    Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
+        Ref<ComputePipelineBase> computePipeline) {
+        auto [cachedPipeline, inserted] = mCaches->computePipelines.insert(computePipeline.Get());
+        if (inserted) {
+            computePipeline->SetIsCachedReference();
+            return computePipeline;
+        } else {
+            return *cachedPipeline;
+        }
+    }
+
+    Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
+        Ref<RenderPipelineBase> renderPipeline) {
+        auto [cachedPipeline, inserted] = mCaches->renderPipelines.insert(renderPipeline.Get());
+        if (inserted) {
+            renderPipeline->SetIsCachedReference();
+            return renderPipeline;
+        } else {
+            return *cachedPipeline;
+        }
+    }
+
+    void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
+        ASSERT(obj->IsCachedReference());
+        size_t removedCount = mCaches->computePipelines.erase(obj);
+        ASSERT(removedCount == 1);
+    }
+
+    ResultOrError<Ref<TextureViewBase>>
+    DeviceBase::GetOrCreateDummyTextureViewForExternalTexture() {
+        if (!mExternalTextureDummyView.Get()) {
+            Ref<TextureBase> externalTextureDummy;
+            TextureDescriptor textureDesc;
+            textureDesc.dimension = wgpu::TextureDimension::e2D;
+            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            textureDesc.label = "Dawn_External_Texture_Dummy_Texture";
+            textureDesc.size = {1, 1, 1};
+            textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+
+            DAWN_TRY_ASSIGN(externalTextureDummy, CreateTexture(&textureDesc));
+
+            TextureViewDescriptor textureViewDesc;
+            textureViewDesc.arrayLayerCount = 1;
+            textureViewDesc.aspect = wgpu::TextureAspect::All;
+            textureViewDesc.baseArrayLayer = 0;
+            textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
+            textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            textureViewDesc.label = "Dawn_External_Texture_Dummy_Texture_View";
+            textureViewDesc.mipLevelCount = 1;
+
+            DAWN_TRY_ASSIGN(mExternalTextureDummyView,
+                            CreateTextureView(externalTextureDummy.Get(), &textureViewDesc));
+        }
+
+        return mExternalTextureDummyView;
+    }
+
+    ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
+        const PipelineLayoutDescriptor* descriptor) {
+        PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+        const size_t blueprintHash = blueprint.ComputeContentHash();
+        blueprint.SetContentHash(blueprintHash);
+
+        Ref<PipelineLayoutBase> result;
+        auto iter = mCaches->pipelineLayouts.find(&blueprint);
+        if (iter != mCaches->pipelineLayouts.end()) {
+            result = *iter;
+        } else {
+            DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
+            result->SetIsCachedReference();
+            result->SetContentHash(blueprintHash);
+            mCaches->pipelineLayouts.insert(result.Get());
+        }
+
+        return std::move(result);
+    }
+
+    void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
+        ASSERT(obj->IsCachedReference());
+        size_t removedCount = mCaches->pipelineLayouts.erase(obj);
+        ASSERT(removedCount == 1);
+    }
+
+    void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
+        ASSERT(obj->IsCachedReference());
+        size_t removedCount = mCaches->renderPipelines.erase(obj);
+        ASSERT(removedCount == 1);
+    }
+
+    ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
+        const SamplerDescriptor* descriptor) {
+        SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+        const size_t blueprintHash = blueprint.ComputeContentHash();
+        blueprint.SetContentHash(blueprintHash);
+
+        Ref<SamplerBase> result;
+        auto iter = mCaches->samplers.find(&blueprint);
+        if (iter != mCaches->samplers.end()) {
+            result = *iter;
+        } else {
+            DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
+            result->SetIsCachedReference();
+            result->SetContentHash(blueprintHash);
+            mCaches->samplers.insert(result.Get());
+        }
+
+        return std::move(result);
+    }
+
+    void DeviceBase::UncacheSampler(SamplerBase* obj) {
+        ASSERT(obj->IsCachedReference());
+        size_t removedCount = mCaches->samplers.erase(obj);
+        ASSERT(removedCount == 1);
+    }
+
+    ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult,
+        OwnedCompilationMessages* compilationMessages) {
+        ASSERT(parseResult != nullptr);
+
+        ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+        const size_t blueprintHash = blueprint.ComputeContentHash();
+        blueprint.SetContentHash(blueprintHash);
+
+        Ref<ShaderModuleBase> result;
+        auto iter = mCaches->shaderModules.find(&blueprint);
+        if (iter != mCaches->shaderModules.end()) {
+            result = *iter;
+        } else {
+            if (!parseResult->HasParsedShader()) {
+                // We skip the parse on creation if validation isn't enabled which let's us quickly
+                // lookup in the cache without validating and parsing. We need the parsed module
+                // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
+                // we can consider splitting it if additional validation is added.
+                ASSERT(!IsValidationEnabled());
+                DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult,
+                                                        compilationMessages));
+            }
+            DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
+            result->SetIsCachedReference();
+            result->SetContentHash(blueprintHash);
+            mCaches->shaderModules.insert(result.Get());
+        }
+
+        return std::move(result);
+    }
+
+    void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
+        ASSERT(obj->IsCachedReference());
+        size_t removedCount = mCaches->shaderModules.erase(obj);
+        ASSERT(removedCount == 1);
+    }
+
+    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+        AttachmentStateBlueprint* blueprint) {
+        auto iter = mCaches->attachmentStates.find(blueprint);
+        if (iter != mCaches->attachmentStates.end()) {
+            return static_cast<AttachmentState*>(*iter);
+        }
+
+        Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
+        attachmentState->SetIsCachedReference();
+        attachmentState->SetContentHash(attachmentState->ComputeContentHash());
+        mCaches->attachmentStates.insert(attachmentState.Get());
+        return attachmentState;
+    }
+
+    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+        const RenderBundleEncoderDescriptor* descriptor) {
+        AttachmentStateBlueprint blueprint(descriptor);
+        return GetOrCreateAttachmentState(&blueprint);
+    }
+
+    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+        const RenderPipelineDescriptor* descriptor) {
+        AttachmentStateBlueprint blueprint(descriptor);
+        return GetOrCreateAttachmentState(&blueprint);
+    }
+
+    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+        const RenderPassDescriptor* descriptor) {
+        AttachmentStateBlueprint blueprint(descriptor);
+        return GetOrCreateAttachmentState(&blueprint);
+    }
+
+    void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
+        ASSERT(obj->IsCachedReference());
+        size_t removedCount = mCaches->attachmentStates.erase(obj);
+        ASSERT(removedCount == 1);
+    }
+
+    // Object creation API methods
+
+    BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
+        Ref<BindGroupBase> result;
+        if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).",
+                          this, descriptor)) {
+            return BindGroupBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
+        const BindGroupLayoutDescriptor* descriptor) {
+        Ref<BindGroupLayoutBase> result;
+        if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
+                          "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
+            return BindGroupLayoutBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
+        Ref<BufferBase> result = nullptr;
+        if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
+                          descriptor)) {
+            ASSERT(result == nullptr);
+            return BufferBase::MakeError(this, descriptor);
+        }
+        return result.Detach();
+    }
+    CommandEncoder* DeviceBase::APICreateCommandEncoder(
+        const CommandEncoderDescriptor* descriptor) {
+        Ref<CommandEncoder> result;
+        if (ConsumedError(CreateCommandEncoder(descriptor), &result,
+                          "calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
+            return CommandEncoder::MakeError(this);
+        }
+        return result.Detach();
+    }
+    ComputePipelineBase* DeviceBase::APICreateComputePipeline(
+        const ComputePipelineDescriptor* descriptor) {
+        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
+                     utils::GetLabelForTrace(descriptor->label));
+
+        Ref<ComputePipelineBase> result;
+        if (ConsumedError(CreateComputePipeline(descriptor), &result,
+                          "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
+            return ComputePipelineBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+                                                   WGPUCreateComputePipelineAsyncCallback callback,
+                                                   void* userdata) {
+        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
+                     utils::GetLabelForTrace(descriptor->label));
+
+        MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
+
+        // Call the callback directly when a validation error has been found in the front-end
+        // validations. If there is no error, then CreateComputePipelineAsync will call the
+        // callback.
+        if (maybeResult.IsError()) {
+            std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+            callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+                     userdata);
+        }
+    }
+    PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
+        const PipelineLayoutDescriptor* descriptor) {
+        Ref<PipelineLayoutBase> result;
+        if (ConsumedError(CreatePipelineLayout(descriptor), &result,
+                          "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
+            return PipelineLayoutBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
+        Ref<QuerySetBase> result;
+        if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).",
+                          this, descriptor)) {
+            return QuerySetBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
+        Ref<SamplerBase> result;
+        if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
+                          descriptor)) {
+            return SamplerBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                                  WGPUCreateRenderPipelineAsyncCallback callback,
+                                                  void* userdata) {
+        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
+                     utils::GetLabelForTrace(descriptor->label));
+        // TODO(dawn:563): Add validation error context.
+        MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
+
+        // Call the callback directly when a validation error has been found in the front-end
+        // validations. If there is no error, then CreateRenderPipelineAsync will call the
+        // callback.
+        if (maybeResult.IsError()) {
+            std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+            callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+                     userdata);
+        }
+    }
+    RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
+        const RenderBundleEncoderDescriptor* descriptor) {
+        Ref<RenderBundleEncoder> result;
+        if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
+                          "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
+            return RenderBundleEncoder::MakeError(this);
+        }
+        return result.Detach();
+    }
+    RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
+        const RenderPipelineDescriptor* descriptor) {
+        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
+                     utils::GetLabelForTrace(descriptor->label));
+
+        Ref<RenderPipelineBase> result;
+        if (ConsumedError(CreateRenderPipeline(descriptor), &result,
+                          "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
+            return RenderPipelineBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
+        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
+                     utils::GetLabelForTrace(descriptor->label));
+
+        Ref<ShaderModuleBase> result;
+        std::unique_ptr<OwnedCompilationMessages> compilationMessages(
+            std::make_unique<OwnedCompilationMessages>());
+        if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
+                          "calling %s.CreateShaderModule(%s).", this, descriptor)) {
+            DAWN_ASSERT(result == nullptr);
+            result = ShaderModuleBase::MakeError(this);
+        }
+        // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
+        // after all other operations are finished successfully.
+        result->InjectCompilationMessages(std::move(compilationMessages));
+
+        return result.Detach();
+    }
+    SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
+                                                  const SwapChainDescriptor* descriptor) {
+        Ref<SwapChainBase> result;
+        if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
+                          "calling %s.CreateSwapChain(%s).", this, descriptor)) {
+            return SwapChainBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+    TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
+        Ref<TextureBase> result;
+        if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
+                          descriptor)) {
+            return TextureBase::MakeError(this);
+        }
+        return result.Detach();
+    }
+
+    // For Dawn Wire
+
+    BufferBase* DeviceBase::APICreateErrorBuffer() {
+        BufferDescriptor desc = {};
+        return BufferBase::MakeError(this, &desc);
+    }
+
+    // Other Device API methods
+
+    // Returns true if future ticking is needed.
+    bool DeviceBase::APITick() {
+        if (IsLost() || ConsumedError(Tick())) {
+            return false;
+        }
+        return !IsDeviceIdle();
+    }
+
+    MaybeError DeviceBase::Tick() {
+        DAWN_TRY(ValidateIsAlive());
+
+        // to avoid overly ticking, we only want to tick when:
+        // 1. the last submitted serial has moved beyond the completed serial
+        // 2. or the completed serial has not reached the future serial set by the trackers
+        if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
+            DAWN_TRY(CheckPassedSerials());
+            DAWN_TRY(TickImpl());
+
+            // There is no GPU work in flight, we need to move the serials forward so that
+            // so that CPU operations waiting on GPU completion can know they don't have to wait.
+            // AssumeCommandsComplete will assign the max serial we must tick to in order to
+            // fire the awaiting callbacks.
+            if (mCompletedSerial == mLastSubmittedSerial) {
+                AssumeCommandsComplete();
+            }
+
+            // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
+            // tick the dynamic uploader before the backend resource allocators. This would allow
+            // reclaiming resources one tick earlier.
+            mDynamicUploader->Deallocate(mCompletedSerial);
+            mQueue->Tick(mCompletedSerial);
+        }
+
+        // We have to check callback tasks in every Tick because it is not related to any global
+        // serials.
+        FlushCallbackTaskQueue();
+
+        return {};
+    }
+
+    QueueBase* DeviceBase::APIGetQueue() {
+        // Backends gave the primary queue during initialization.
+        ASSERT(mQueue != nullptr);
+
+        // Returns a new reference to the queue.
+        mQueue->Reference();
+        return mQueue.Get();
+    }
+
+    ExternalTextureBase* DeviceBase::APICreateExternalTexture(
+        const ExternalTextureDescriptor* descriptor) {
+        Ref<ExternalTextureBase> result = nullptr;
+        if (ConsumedError(CreateExternalTextureImpl(descriptor), &result,
+                          "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
+            return ExternalTextureBase::MakeError(this);
+        }
+
+        return result.Detach();
+    }
+
+    void DeviceBase::ApplyFeatures(const DeviceDescriptor* deviceDescriptor) {
+        ASSERT(deviceDescriptor);
+        ASSERT(GetAdapter()->SupportsAllRequiredFeatures(
+            {deviceDescriptor->requiredFeatures, deviceDescriptor->requiredFeaturesCount}));
+
+        for (uint32_t i = 0; i < deviceDescriptor->requiredFeaturesCount; ++i) {
+            mEnabledFeatures.EnableFeature(deviceDescriptor->requiredFeatures[i]);
+        }
+    }
+
+    bool DeviceBase::IsFeatureEnabled(Feature feature) const {
+        return mEnabledFeatures.IsEnabled(feature);
+    }
+
+    bool DeviceBase::IsValidationEnabled() const {
+        return !IsToggleEnabled(Toggle::SkipValidation);
+    }
+
+    bool DeviceBase::IsRobustnessEnabled() const {
+        return !IsToggleEnabled(Toggle::DisableRobustness);
+    }
+
+    size_t DeviceBase::GetLazyClearCountForTesting() {
+        return mLazyClearCountForTesting;
+    }
+
+    void DeviceBase::IncrementLazyClearCountForTesting() {
+        ++mLazyClearCountForTesting;
+    }
+
+    size_t DeviceBase::GetDeprecationWarningCountForTesting() {
+        return mDeprecationWarnings->count;
+    }
+
+    void DeviceBase::EmitDeprecationWarning(const char* warning) {
+        mDeprecationWarnings->count++;
+        if (mDeprecationWarnings->emitted.insert(warning).second) {
+            dawn::WarningLog() << warning;
+        }
+    }
+
+    void DeviceBase::EmitLog(const char* message) {
+        this->EmitLog(WGPULoggingType_Info, message);
+    }
+
+    void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
+        if (mLoggingCallback != nullptr) {
+            // Use the thread-safe CallbackTaskManager routine
+            std::unique_ptr<LoggingCallbackTask> callbackTask =
+                std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message,
+                                                      mLoggingUserdata);
+            mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+        }
+    }
+
+    bool DeviceBase::APIGetLimits(SupportedLimits* limits) const {
+        ASSERT(limits != nullptr);
+        if (limits->nextInChain != nullptr) {
+            return false;
+        }
+        limits->limits = mLimits.v1;
+        return true;
+    }
+
+    bool DeviceBase::APIHasFeature(wgpu::FeatureName feature) const {
+        return mEnabledFeatures.IsEnabled(feature);
+    }
+
+    size_t DeviceBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+        return mEnabledFeatures.EnumerateFeatures(features);
+    }
+
+    void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
+        if (ConsumedError(ValidateErrorType(type))) {
+            return;
+        }
+
+        // This method should only be used to make error scope reject. For DeviceLost there is the
+        // LoseForTesting function that can be used instead.
+        if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
+            HandleError(InternalErrorType::Validation,
+                        "Invalid injected error, must be Validation or OutOfMemory");
+            return;
+        }
+
+        HandleError(FromWGPUErrorType(type), message);
+    }
+
+    QueueBase* DeviceBase::GetQueue() const {
+        return mQueue.Get();
+    }
+
+    // Implementation details of object creation
+
+    ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
+        const BindGroupDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
+                             "validating %s against %s", descriptor, descriptor->layout);
+        }
+        return CreateBindGroupImpl(descriptor);
+    }
+
+    ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
+        const BindGroupLayoutDescriptor* descriptor,
+        bool allowInternalBinding) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(
+                ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
+                "validating %s", descriptor);
+        }
+        return GetOrCreateBindGroupLayout(descriptor);
+    }
+
+    ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
+                             descriptor);
+        }
+
+        Ref<BufferBase> buffer;
+        DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
+
+        if (descriptor->mappedAtCreation) {
+            DAWN_TRY(buffer->MapAtCreation());
+        }
+
+        return std::move(buffer);
+    }
+
+    ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
+        const ComputePipelineDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
+        }
+
+        // Ref will keep the pipeline layout alive until the end of the function where
+        // the pipeline will take another reference.
+        Ref<PipelineLayoutBase> layoutRef;
+        ComputePipelineDescriptor appliedDescriptor;
+        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+                                       this, *descriptor, &appliedDescriptor));
+
+        Ref<ComputePipelineBase> uninitializedComputePipeline =
+            CreateUninitializedComputePipelineImpl(&appliedDescriptor);
+        Ref<ComputePipelineBase> cachedComputePipeline =
+            GetCachedComputePipeline(uninitializedComputePipeline.Get());
+        if (cachedComputePipeline.Get() != nullptr) {
+            return cachedComputePipeline;
+        }
+
+        DAWN_TRY(uninitializedComputePipeline->Initialize());
+        return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
+    }
+
+    ResultOrError<Ref<CommandEncoder>> DeviceBase::CreateCommandEncoder(
+        const CommandEncoderDescriptor* descriptor) {
+        const CommandEncoderDescriptor defaultDescriptor = {};
+        if (descriptor == nullptr) {
+            descriptor = &defaultDescriptor;
+        }
+
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY(ValidateCommandEncoderDescriptor(this, descriptor));
+        }
+        return CommandEncoder::Create(this, descriptor);
+    }
+
+    MaybeError DeviceBase::CreateComputePipelineAsync(
+        const ComputePipelineDescriptor* descriptor,
+        WGPUCreateComputePipelineAsyncCallback callback,
+        void* userdata) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
+        }
+
+        Ref<PipelineLayoutBase> layoutRef;
+        ComputePipelineDescriptor appliedDescriptor;
+        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+                                       this, *descriptor, &appliedDescriptor));
+
+        Ref<ComputePipelineBase> uninitializedComputePipeline =
+            CreateUninitializedComputePipelineImpl(&appliedDescriptor);
+
+        // Call the callback directly when we can get a cached compute pipeline object.
+        Ref<ComputePipelineBase> cachedComputePipeline =
+            GetCachedComputePipeline(uninitializedComputePipeline.Get());
+        if (cachedComputePipeline.Get() != nullptr) {
+            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+            callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
+                     "", userdata);
+        } else {
+            // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
+            // where the pipeline object may be initialized asynchronously and the result will be
+            // saved to mCreatePipelineAsyncTracker.
+            InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
+                                               userdata);
+        }
+
+        return {};
+    }
+
+    // This function is overwritten with the async version on the backends that supports
+    //  initializing compute pipelines asynchronously.
+    void DeviceBase::InitializeComputePipelineAsyncImpl(
+        Ref<ComputePipelineBase> computePipeline,
+        WGPUCreateComputePipelineAsyncCallback callback,
+        void* userdata) {
+        Ref<ComputePipelineBase> result;
+        std::string errorMessage;
+
+        MaybeError maybeError = computePipeline->Initialize();
+        if (maybeError.IsError()) {
+            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+            errorMessage = error->GetMessage();
+        } else {
+            result = AddOrGetCachedComputePipeline(std::move(computePipeline));
+        }
+
+        std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
+            std::make_unique<CreateComputePipelineAsyncCallbackTask>(
+                std::move(result), errorMessage, callback, userdata);
+        mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+    }
+
+    // This function is overwritten with the async version on the backends
+    // that supports initializing render pipeline asynchronously
+    void DeviceBase::InitializeRenderPipelineAsyncImpl(
+        Ref<RenderPipelineBase> renderPipeline,
+        WGPUCreateRenderPipelineAsyncCallback callback,
+        void* userdata) {
+        Ref<RenderPipelineBase> result;
+        std::string errorMessage;
+
+        MaybeError maybeError = renderPipeline->Initialize();
+        if (maybeError.IsError()) {
+            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+            errorMessage = error->GetMessage();
+        } else {
+            result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
+        }
+
+        std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
+            std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
+                                                                    callback, userdata);
+        mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+    }
+
+    ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
+        const PipelineLayoutDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
+        }
+        return GetOrCreatePipelineLayout(descriptor);
+    }
+
+    ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureImpl(
+        const ExternalTextureDescriptor* descriptor) {
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
+                             descriptor);
+        }
+
+        return ExternalTextureBase::Create(this, descriptor);
+    }
+
+    ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
+        const QuerySetDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
+                             descriptor);
+        }
+        return CreateQuerySetImpl(descriptor);
+    }
+
+    ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
+        const RenderBundleEncoderDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
+        }
+        return RenderBundleEncoder::Create(this, descriptor);
+    }
+
+    ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
+        const RenderPipelineDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+        }
+
+        // Ref will keep the pipeline layout alive until the end of the function where
+        // the pipeline will take another reference.
+        Ref<PipelineLayoutBase> layoutRef;
+        RenderPipelineDescriptor appliedDescriptor;
+        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+                                       this, *descriptor, &appliedDescriptor));
+
+        Ref<RenderPipelineBase> uninitializedRenderPipeline =
+            CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+        Ref<RenderPipelineBase> cachedRenderPipeline =
+            GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+        if (cachedRenderPipeline != nullptr) {
+            return cachedRenderPipeline;
+        }
+
+        DAWN_TRY(uninitializedRenderPipeline->Initialize());
+        return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
+    }
+
+    MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                                     WGPUCreateRenderPipelineAsyncCallback callback,
+                                                     void* userdata) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+        }
+
+        // Ref will keep the pipeline layout alive until the end of the function where
+        // the pipeline will take another reference.
+        Ref<PipelineLayoutBase> layoutRef;
+        RenderPipelineDescriptor appliedDescriptor;
+        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+                                       this, *descriptor, &appliedDescriptor));
+
+        Ref<RenderPipelineBase> uninitializedRenderPipeline =
+            CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+        // Call the callback directly when we can get a cached render pipeline object.
+        Ref<RenderPipelineBase> cachedRenderPipeline =
+            GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+        if (cachedRenderPipeline != nullptr) {
+            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+            callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
+                     "", userdata);
+        } else {
+            // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
+            // where the pipeline object may be initialized asynchronously and the result will be
+            // saved to mCreatePipelineAsyncTracker.
+            InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
+                                              userdata);
+        }
+
+        return {};
+    }
+
+    ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
+        const SamplerDescriptor defaultDescriptor = {};
+        DAWN_TRY(ValidateIsAlive());
+        descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
+                             descriptor);
+        }
+        return GetOrCreateSampler(descriptor);
+    }
+
+    ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
+        const ShaderModuleDescriptor* descriptor,
+        OwnedCompilationMessages* compilationMessages) {
+        DAWN_TRY(ValidateIsAlive());
+
+        // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
+        // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
+        // long as dawn_native don't use the compilationMessages of these internal shader modules.
+        ShaderModuleParseResult parseResult;
+
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(
+                ValidateShaderModuleDescriptor(this, descriptor, &parseResult, compilationMessages),
+                "validating %s", descriptor);
+        }
+
+        return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
+    }
+
+    ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
+        Surface* surface,
+        const SwapChainDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor),
+                             "validating %s", descriptor);
+        }
+
+        // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
+        if (surface == nullptr) {
+            return CreateSwapChainImpl(descriptor);
+        } else {
+            ASSERT(descriptor->implementation == 0);
+
+            NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
+            ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
+                CreateSwapChainImpl(surface, previousSwapChain, descriptor);
+
+            if (previousSwapChain != nullptr) {
+                previousSwapChain->DetachFromSurface();
+            }
+
+            Ref<NewSwapChainBase> newSwapChain;
+            DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
+
+            newSwapChain->SetIsAttached();
+            surface->SetAttachedSwapChain(newSwapChain.Get());
+            return newSwapChain;
+        }
+    }
+
+    ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
+                             descriptor);
+        }
+        return CreateTextureImpl(descriptor);
+    }
+
+    ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) {
+        DAWN_TRY(ValidateIsAlive());
+        DAWN_TRY(ValidateObject(texture));
+        TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
+        if (IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
+                             "validating %s against %s.", &desc, texture);
+        }
+        return CreateTextureViewImpl(texture, &desc);
+    }
+
+    // Other implementation details
+
+    DynamicUploader* DeviceBase::GetDynamicUploader() const {
+        return mDynamicUploader.get();
+    }
+
+    // The Toggle device facility
+
+    std::vector<const char*> DeviceBase::GetTogglesUsed() const {
+        return mEnabledToggles.GetContainedToggleNames();
+    }
+
+    bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
+        return mEnabledToggles.Has(toggle);
+    }
+
+    void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
+        if (!mOverridenToggles.Has(toggle)) {
+            mEnabledToggles.Set(toggle, isEnabled);
+        }
+    }
+
+    void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
+        if (mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
+            dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
+                               << isEnabled << " when it was overriden to be " << !isEnabled;
+        }
+        mEnabledToggles.Set(toggle, isEnabled);
+    }
+
+    void DeviceBase::SetDefaultToggles() {
+        SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
+        SetToggle(Toggle::DisallowUnsafeAPIs, true);
+    }
+
+    void DeviceBase::ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor) {
+        ASSERT(togglesDescriptor != nullptr);
+
+        for (uint32_t i = 0; i < togglesDescriptor->forceEnabledTogglesCount; ++i) {
+            Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+                togglesDescriptor->forceEnabledToggles[i]);
+            if (toggle != Toggle::InvalidEnum) {
+                mEnabledToggles.Set(toggle, true);
+                mOverridenToggles.Set(toggle, true);
+            }
+        }
+        for (uint32_t i = 0; i < togglesDescriptor->forceDisabledTogglesCount; ++i) {
+            Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+                togglesDescriptor->forceDisabledToggles[i]);
+            if (toggle != Toggle::InvalidEnum) {
+                mEnabledToggles.Set(toggle, false);
+                mOverridenToggles.Set(toggle, true);
+            }
+        }
+    }
+
+    void DeviceBase::FlushCallbackTaskQueue() {
+        if (!mCallbackTaskManager->IsEmpty()) {
+            // If a user calls Queue::Submit inside the callback, then the device will be ticked,
+            // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
+            // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
+            // update mCallbackTaskManager, then call all the callbacks.
+            auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+            for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+                callbackTask->Finish();
+            }
+        }
+    }
+
+    const CombinedLimits& DeviceBase::GetLimits() const {
+        return mLimits;
+    }
+
+    AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
+        return mAsyncTaskManager.get();
+    }
+
+    CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
+        return mCallbackTaskManager.get();
+    }
+
+    dawn::platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
+        return mWorkerTaskPool.get();
+    }
+
+    void DeviceBase::AddComputePipelineAsyncCallbackTask(
+        Ref<ComputePipelineBase> pipeline,
+        std::string errorMessage,
+        WGPUCreateComputePipelineAsyncCallback callback,
+        void* userdata) {
+        // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
+        // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
+        struct CreateComputePipelineAsyncWaitableCallbackTask final
+            : CreateComputePipelineAsyncCallbackTask {
+            using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
+            void Finish() final {
+                // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
+                // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+                // thread-safe.
+                if (mPipeline.Get() != nullptr) {
+                    mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
+                }
+
+                CreateComputePipelineAsyncCallbackTask::Finish();
+            }
+        };
+
+        mCallbackTaskManager->AddCallbackTask(
+            std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
+                std::move(pipeline), errorMessage, callback, userdata));
+    }
+
+    void DeviceBase::AddRenderPipelineAsyncCallbackTask(
+        Ref<RenderPipelineBase> pipeline,
+        std::string errorMessage,
+        WGPUCreateRenderPipelineAsyncCallback callback,
+        void* userdata) {
+        // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
+        // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
+        struct CreateRenderPipelineAsyncWaitableCallbackTask final
+            : CreateRenderPipelineAsyncCallbackTask {
+            using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
+
+            void Finish() final {
+                // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
+                // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+                // thread-safe.
+                if (mPipeline.Get() != nullptr) {
+                    mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
+                }
+
+                CreateRenderPipelineAsyncCallbackTask::Finish();
+            }
+        };
+
+        mCallbackTaskManager->AddCallbackTask(
+            std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
+                std::move(pipeline), errorMessage, callback, userdata));
+    }
+
+    PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
+        return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
+    }
+
+    const std::string& DeviceBase::GetCacheIsolationKey() const {
+        return mCacheIsolationKey;
+    }
+
+    const std::string& DeviceBase::GetLabel() const {
+        return mLabel;
+    }
+
+    void DeviceBase::APISetLabel(const char* label) {
+        mLabel = label;
+        SetLabelImpl();
+    }
+
+    void DeviceBase::SetLabelImpl() {
+    }
+
+    bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+        ComputePipelineBase* computePipeline) const {
+        return false;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Device.h b/src/dawn/native/Device.h
new file mode 100644
index 0000000..db13e09
--- /dev/null
+++ b/src/dawn/native/Device.h
@@ -0,0 +1,555 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_DEVICE_H_
+#define DAWNNATIVE_DEVICE_H_
+
+#include "dawn/native/Commands.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/Limits.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/Toggles.h"
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <mutex>
+#include <utility>
+
+namespace dawn::platform {
+    class WorkerTaskPool;
+}  // namespace dawn::platform
+
+namespace dawn::native {
+    class AdapterBase;
+    class AsyncTaskManager;
+    class AttachmentState;
+    class AttachmentStateBlueprint;
+    class BindGroupLayoutBase;
+    class CallbackTaskManager;
+    class DynamicUploader;
+    class ErrorScopeStack;
+    class ExternalTextureBase;
+    class OwnedCompilationMessages;
+    class PersistentCache;
+    class StagingBufferBase;
+    struct CallbackTask;
+    struct InternalPipelineStore;
+    struct ShaderModuleParseResult;
+
+    class DeviceBase : public RefCounted {
+      public:
+        DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
+        virtual ~DeviceBase();
+
+        void HandleError(InternalErrorType type, const char* message);
+
+        bool ConsumedError(MaybeError maybeError) {
+            if (DAWN_UNLIKELY(maybeError.IsError())) {
+                ConsumeError(maybeError.AcquireError());
+                return true;
+            }
+            return false;
+        }
+
+        template <typename T>
+        bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
+            if (DAWN_UNLIKELY(resultOrError.IsError())) {
+                ConsumeError(resultOrError.AcquireError());
+                return true;
+            }
+            *result = resultOrError.AcquireSuccess();
+            return false;
+        }
+
+        template <typename... Args>
+        bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
+            if (DAWN_UNLIKELY(maybeError.IsError())) {
+                std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+                if (error->GetType() == InternalErrorType::Validation) {
+                    std::string out;
+                    absl::UntypedFormatSpec format(formatStr);
+                    if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+                        error->AppendContext(std::move(out));
+                    } else {
+                        error->AppendContext(
+                            absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
+                    }
+                }
+                ConsumeError(std::move(error));
+                return true;
+            }
+            return false;
+        }
+
+        template <typename T, typename... Args>
+        bool ConsumedError(ResultOrError<T> resultOrError,
+                           T* result,
+                           const char* formatStr,
+                           const Args&... args) {
+            if (DAWN_UNLIKELY(resultOrError.IsError())) {
+                std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
+                if (error->GetType() == InternalErrorType::Validation) {
+                    std::string out;
+                    absl::UntypedFormatSpec format(formatStr);
+                    if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+                        error->AppendContext(std::move(out));
+                    } else {
+                        error->AppendContext(
+                            absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
+                    }
+                }
+                ConsumeError(std::move(error));
+                return true;
+            }
+            *result = resultOrError.AcquireSuccess();
+            return false;
+        }
+
+        MaybeError ValidateObject(const ApiObjectBase* object) const;
+
+        AdapterBase* GetAdapter() const;
+        dawn::platform::Platform* GetPlatform() const;
+
+        // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
+        // isn't a valid wgpu::TextureFormat or isn't supported by this device.
+        // The pointer returned has the same lifetime as the device.
+        ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
+
+        // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
+        // valid and supported.
+        // The reference returned has the same lifetime as the device.
+        const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
+        const Format& GetValidInternalFormat(FormatIndex formatIndex) const;
+
+        virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+            CommandEncoder* encoder,
+            const CommandBufferDescriptor* descriptor) = 0;
+
+        ExecutionSerial GetCompletedCommandSerial() const;
+        ExecutionSerial GetLastSubmittedCommandSerial() const;
+        ExecutionSerial GetFutureSerial() const;
+        ExecutionSerial GetPendingCommandSerial() const;
+
+        // Many Dawn objects are completely immutable once created which means that if two
+        // creations are given the same arguments, they can return the same object. Reusing
+        // objects will help make comparisons between objects by a single pointer comparison.
+        //
+        // Technically no object is immutable as they have a reference count, and an
+        // application with reference-counting issues could "see" that objects are reused.
+        // This is solved by automatic-reference counting, and also the fact that when using
+        // the client-server wire every creation will get a different proxy object, with a
+        // different reference count.
+        //
+        // When trying to create an object, we give both the descriptor and an example of what
+        // the created object will be, the "blueprint". The blueprint is just a FooBase object
+        // instead of a backend Foo object. If the blueprint doesn't match an object in the
+        // cache, then the descriptor is used to make a new object.
+        ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+        void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
+
+        BindGroupLayoutBase* GetEmptyBindGroupLayout();
+
+        void UncacheComputePipeline(ComputePipelineBase* obj);
+
+        ResultOrError<Ref<TextureViewBase>> GetOrCreateDummyTextureViewForExternalTexture();
+
+        ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
+            const PipelineLayoutDescriptor* descriptor);
+        void UncachePipelineLayout(PipelineLayoutBase* obj);
+
+        void UncacheRenderPipeline(RenderPipelineBase* obj);
+
+        ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
+        void UncacheSampler(SamplerBase* obj);
+
+        ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
+            const ShaderModuleDescriptor* descriptor,
+            ShaderModuleParseResult* parseResult,
+            OwnedCompilationMessages* compilationMessages);
+        void UncacheShaderModule(ShaderModuleBase* obj);
+
+        Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint);
+        Ref<AttachmentState> GetOrCreateAttachmentState(
+            const RenderBundleEncoderDescriptor* descriptor);
+        Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
+        Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
+        void UncacheAttachmentState(AttachmentState* obj);
+
+        // Object creation methods that be used in a reentrant manner.
+        ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
+        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
+            const BindGroupLayoutDescriptor* descriptor,
+            bool allowInternalBinding = false);
+        ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
+        ResultOrError<Ref<CommandEncoder>> CreateCommandEncoder(
+            const CommandEncoderDescriptor* descriptor = nullptr);
+        ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
+            const ComputePipelineDescriptor* descriptor);
+        MaybeError CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+                                              WGPUCreateComputePipelineAsyncCallback callback,
+                                              void* userdata);
+
+        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
+            const PipelineLayoutDescriptor* descriptor);
+        ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
+        ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
+            const RenderBundleEncoderDescriptor* descriptor);
+        ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
+            const RenderPipelineDescriptor* descriptor);
+        MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                             WGPUCreateRenderPipelineAsyncCallback callback,
+                                             void* userdata);
+        ResultOrError<Ref<SamplerBase>> CreateSampler(
+            const SamplerDescriptor* descriptor = nullptr);
+        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
+            const ShaderModuleDescriptor* descriptor,
+            OwnedCompilationMessages* compilationMessages = nullptr);
+        ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
+                                                          const SwapChainDescriptor* descriptor);
+        ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
+        ResultOrError<Ref<TextureViewBase>> CreateTextureView(
+            TextureBase* texture,
+            const TextureViewDescriptor* descriptor);
+
+        // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
+        BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
+        BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
+        BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
+        CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
+        ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
+        PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
+        QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
+        void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+                                           WGPUCreateComputePipelineAsyncCallback callback,
+                                           void* userdata);
+        void APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                          WGPUCreateRenderPipelineAsyncCallback callback,
+                                          void* userdata);
+        RenderBundleEncoder* APICreateRenderBundleEncoder(
+            const RenderBundleEncoderDescriptor* descriptor);
+        RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
+        ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
+        SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
+        ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
+        SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
+        TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
+
+        InternalPipelineStore* GetInternalPipelineStore();
+
+        // For Dawn Wire
+        BufferBase* APICreateErrorBuffer();
+
+        QueueBase* APIGetQueue();
+
+        bool APIGetLimits(SupportedLimits* limits) const;
+        bool APIHasFeature(wgpu::FeatureName feature) const;
+        size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+        void APIInjectError(wgpu::ErrorType type, const char* message);
+        bool APITick();
+
+        void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
+        void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
+        void APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata);
+        void APIPushErrorScope(wgpu::ErrorFilter filter);
+        bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
+
+        MaybeError ValidateIsAlive() const;
+
+        PersistentCache* GetPersistentCache();
+
+        virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(
+            size_t size) = 0;
+        virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                                   uint64_t sourceOffset,
+                                                   BufferBase* destination,
+                                                   uint64_t destinationOffset,
+                                                   uint64_t size) = 0;
+        virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                                    const TextureDataLayout& src,
+                                                    TextureCopy* dst,
+                                                    const Extent3D& copySizePixels) = 0;
+
+        DynamicUploader* GetDynamicUploader() const;
+
+        // The device state which is a combination of creation state and loss state.
+        //
+        //   - BeingCreated: the device didn't finish creation yet and the frontend cannot be used
+        //     (both for the application calling WebGPU, or re-entrant calls). No work exists on
+        //     the GPU timeline.
+        //   - Alive: the device is usable and might have work happening on the GPU timeline.
+        //   - BeingDisconnected: the device is no longer usable because we are waiting for all
+        //     work on the GPU timeline to finish. (this is to make validation prevent the
+        //     application from adding more work during the transition from Available to
+        //     Disconnected)
+        //   - Disconnected: there is no longer work happening on the GPU timeline and the CPU data
+        //     structures can be safely destroyed without additional synchronization.
+        //   - Destroyed: the device is disconnected and resources have been reclaimed.
+        enum class State {
+            BeingCreated,
+            Alive,
+            BeingDisconnected,
+            Disconnected,
+            Destroyed,
+        };
+        State GetState() const;
+        bool IsLost() const;
+        void TrackObject(ApiObjectBase* object);
+        std::mutex* GetObjectListMutex(ObjectType type);
+
+        std::vector<const char*> GetTogglesUsed() const;
+        bool IsFeatureEnabled(Feature feature) const;
+        bool IsToggleEnabled(Toggle toggle) const;
+        bool IsValidationEnabled() const;
+        bool IsRobustnessEnabled() const;
+        size_t GetLazyClearCountForTesting();
+        void IncrementLazyClearCountForTesting();
+        size_t GetDeprecationWarningCountForTesting();
+        void EmitDeprecationWarning(const char* warning);
+        void EmitLog(const char* message);
+        void EmitLog(WGPULoggingType loggingType, const char* message);
+        void APILoseForTesting();
+        QueueBase* GetQueue() const;
+
+        // AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
+        // ticked in order to clean up all pending callback work or to execute asynchronous resource
+        // writes. It should be given the serial that a callback is tracked with, so that once that
+        // serial is completed, it can be resolved and cleaned up. This is so that when there is no
+        // gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
+        // still check if we have pending work to take care of, rather than hanging and never
+        // reaching the serial the work will be executed on.
+        void AddFutureSerial(ExecutionSerial serial);
+        // Check for passed fences and set the new completed serial
+        MaybeError CheckPassedSerials();
+
+        MaybeError Tick();
+
+        // TODO(crbug.com/dawn/839): Organize the below backend-specific parameters into the struct
+        // BackendMetadata that we can query from the device.
+        virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
+        virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
+
+        virtual float GetTimestampPeriodInNS() const = 0;
+
+        virtual bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+            ComputePipelineBase* computePipeline) const;
+
+        const CombinedLimits& GetLimits() const;
+
+        AsyncTaskManager* GetAsyncTaskManager() const;
+        CallbackTaskManager* GetCallbackTaskManager() const;
+        dawn::platform::WorkerTaskPool* GetWorkerTaskPool() const;
+
+        void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+                                                 std::string errorMessage,
+                                                 WGPUCreateComputePipelineAsyncCallback callback,
+                                                 void* userdata);
+        void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+                                                std::string errorMessage,
+                                                WGPUCreateRenderPipelineAsyncCallback callback,
+                                                void* userdata);
+
+        PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
+
+        const std::string& GetCacheIsolationKey() const;
+        const std::string& GetLabel() const;
+        void APISetLabel(const char* label);
+        void APIDestroy();
+
+      protected:
+        // Constructor used only for mocking and testing.
+        DeviceBase();
+
+        void SetToggle(Toggle toggle, bool isEnabled);
+        void ForceSetToggle(Toggle toggle, bool isEnabled);
+
+        MaybeError Initialize(QueueBase* defaultQueue);
+        void DestroyObjects();
+        void Destroy();
+
+        // Incrememt mLastSubmittedSerial when we submit the next serial
+        void IncrementLastSubmittedCommandSerial();
+
+      private:
+        virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+            const BindGroupDescriptor* descriptor) = 0;
+        virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
+        virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+            const BufferDescriptor* descriptor) = 0;
+        virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
+            const ExternalTextureDescriptor* descriptor);
+        virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+            const PipelineLayoutDescriptor* descriptor) = 0;
+        virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+            const QuerySetDescriptor* descriptor) = 0;
+        virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+            const SamplerDescriptor* descriptor) = 0;
+        virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+            const ShaderModuleDescriptor* descriptor,
+            ShaderModuleParseResult* parseResult) = 0;
+        virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+            const SwapChainDescriptor* descriptor) = 0;
+        // Note that previousSwapChain may be nullptr, or come from a different backend.
+        virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+            Surface* surface,
+            NewSwapChainBase* previousSwapChain,
+            const SwapChainDescriptor* descriptor) = 0;
+        virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+            const TextureDescriptor* descriptor) = 0;
+        virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+            TextureBase* texture,
+            const TextureViewDescriptor* descriptor) = 0;
+        virtual Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+            const ComputePipelineDescriptor* descriptor) = 0;
+        virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+            const RenderPipelineDescriptor* descriptor) = 0;
+        virtual void SetLabelImpl();
+
+        virtual MaybeError TickImpl() = 0;
+        void FlushCallbackTaskQueue();
+
+        ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
+
+        Ref<ComputePipelineBase> GetCachedComputePipeline(
+            ComputePipelineBase* uninitializedComputePipeline);
+        Ref<RenderPipelineBase> GetCachedRenderPipeline(
+            RenderPipelineBase* uninitializedRenderPipeline);
+        Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
+            Ref<ComputePipelineBase> computePipeline);
+        Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(
+            Ref<RenderPipelineBase> renderPipeline);
+        virtual void InitializeComputePipelineAsyncImpl(
+            Ref<ComputePipelineBase> computePipeline,
+            WGPUCreateComputePipelineAsyncCallback callback,
+            void* userdata);
+        virtual void InitializeRenderPipelineAsyncImpl(
+            Ref<RenderPipelineBase> renderPipeline,
+            WGPUCreateRenderPipelineAsyncCallback callback,
+            void* userdata);
+
+        void ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor);
+        void ApplyFeatures(const DeviceDescriptor* deviceDescriptor);
+
+        void SetDefaultToggles();
+
+        void ConsumeError(std::unique_ptr<ErrorData> error);
+
+        // Each backend should implement to check their passed fences if there are any and return a
+        // completed serial. Return 0 should indicate no fences to check.
+        virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
+        // During shut down of device, some operations might have been started since the last submit
+        // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
+        // make all commands look completed.
+        void AssumeCommandsComplete();
+        bool IsDeviceIdle();
+
+        // mCompletedSerial tracks the last completed command serial that the fence has returned.
+        // mLastSubmittedSerial tracks the last submitted command serial.
+        // During device removal, the serials could be artificially incremented
+        // to make it appear as if commands have been compeleted. They can also be artificially
+        // incremented when no work is being done in the GPU so CPU operations don't have to wait on
+        // stale serials.
+        // mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
+        // callbacks to fire
+        ExecutionSerial mCompletedSerial = ExecutionSerial(0);
+        ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
+        ExecutionSerial mFutureSerial = ExecutionSerial(0);
+
+        // DestroyImpl is used to clean up and release resources used by device, does not wait for
+        // GPU or check errors.
+        virtual void DestroyImpl() = 0;
+
+        // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
+        // destruction. This is only used when properly destructing the device. For a real
+        // device loss, this function doesn't need to be called since the driver already closed all
+        // resources.
+        virtual MaybeError WaitForIdleForDestruction() = 0;
+
+        wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
+        void* mUncapturedErrorUserdata = nullptr;
+
+        wgpu::LoggingCallback mLoggingCallback = nullptr;
+        void* mLoggingUserdata = nullptr;
+
+        wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
+        void* mDeviceLostUserdata = nullptr;
+
+        std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
+
+        // The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
+        // The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
+        // The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
+        // Instance.
+        Ref<InstanceBase> mInstance;
+        AdapterBase* mAdapter = nullptr;
+
+        // The object caches aren't exposed in the header as they would require a lot of
+        // additional includes.
+        struct Caches;
+        std::unique_ptr<Caches> mCaches;
+
+        Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
+
+        Ref<TextureViewBase> mExternalTextureDummyView;
+
+        std::unique_ptr<DynamicUploader> mDynamicUploader;
+        std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
+        Ref<QueueBase> mQueue;
+
+        struct DeprecationWarnings;
+        std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
+
+        State mState = State::BeingCreated;
+
+        // Encompasses the mutex and the actual list that contains all live objects "owned" by the
+        // device.
+        struct ApiObjectList {
+            std::mutex mutex;
+            LinkedList<ApiObjectBase> objects;
+        };
+        PerObjectType<ApiObjectList> mObjectLists;
+
+        FormatTable mFormatTable;
+
+        TogglesSet mEnabledToggles;
+        TogglesSet mOverridenToggles;
+        size_t mLazyClearCountForTesting = 0;
+        std::atomic_uint64_t mNextPipelineCompatibilityToken;
+
+        CombinedLimits mLimits;
+        FeaturesSet mEnabledFeatures;
+
+        std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
+
+        std::unique_ptr<PersistentCache> mPersistentCache;
+
+        std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
+        std::unique_ptr<dawn::platform::WorkerTaskPool> mWorkerTaskPool;
+        std::string mLabel;
+        std::string mCacheIsolationKey = "";
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_DEVICE_H_
diff --git a/src/dawn/native/DynamicUploader.cpp b/src/dawn/native/DynamicUploader.cpp
new file mode 100644
index 0000000..262c07d
--- /dev/null
+++ b/src/dawn/native/DynamicUploader.cpp
@@ -0,0 +1,129 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/Device.h"
+
+namespace dawn::native {
+
+    DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
+        mRingBuffers.emplace_back(
+            std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
+    }
+
+    void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
+        mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer),
+                                        mDevice->GetPendingCommandSerial());
+    }
+
+    ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
+                                                                  ExecutionSerial serial) {
+        // Disable further sub-allocation should the request be too large.
+        if (allocationSize > kRingBufferSize) {
+            std::unique_ptr<StagingBufferBase> stagingBuffer;
+            DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
+
+            UploadHandle uploadHandle;
+            uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
+            uploadHandle.stagingBuffer = stagingBuffer.get();
+
+            ReleaseStagingBuffer(std::move(stagingBuffer));
+            return uploadHandle;
+        }
+
+        // Note: Validation ensures size is already aligned.
+        // First-fit: find next smallest buffer large enough to satisfy the allocation request.
+        RingBuffer* targetRingBuffer = mRingBuffers.back().get();
+        for (auto& ringBuffer : mRingBuffers) {
+            const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
+            // Prevent overflow.
+            ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
+            const uint64_t remainingSize =
+                ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
+            if (allocationSize <= remainingSize) {
+                targetRingBuffer = ringBuffer.get();
+                break;
+            }
+        }
+
+        uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
+        if (targetRingBuffer != nullptr) {
+            startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
+        }
+
+        // Upon failure, append a newly created ring buffer to fulfill the
+        // request.
+        if (startOffset == RingBufferAllocator::kInvalidOffset) {
+            mRingBuffers.emplace_back(
+                std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
+
+            targetRingBuffer = mRingBuffers.back().get();
+            startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
+        }
+
+        ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
+
+        // Allocate the staging buffer backing the ringbuffer.
+        // Note: the first ringbuffer will be lazily created.
+        if (targetRingBuffer->mStagingBuffer == nullptr) {
+            std::unique_ptr<StagingBufferBase> stagingBuffer;
+            DAWN_TRY_ASSIGN(stagingBuffer,
+                            mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
+            targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
+        }
+
+        ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
+
+        UploadHandle uploadHandle;
+        uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
+        uploadHandle.mappedBuffer =
+            static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
+        uploadHandle.startOffset = startOffset;
+
+        return uploadHandle;
+    }
+
+    void DynamicUploader::Deallocate(ExecutionSerial lastCompletedSerial) {
+        // Reclaim memory within the ring buffers by ticking (or removing requests no longer
+        // in-flight).
+        for (size_t i = 0; i < mRingBuffers.size(); ++i) {
+            mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
+
+            // Never erase the last buffer as to prevent re-creating smaller buffers
+            // again. The last buffer is the largest.
+            if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
+                mRingBuffers.erase(mRingBuffers.begin() + i);
+            }
+        }
+        mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
+    }
+
+    // TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
+    // when it's not necessary.
+    ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
+                                                          ExecutionSerial serial,
+                                                          uint64_t offsetAlignment) {
+        ASSERT(offsetAlignment > 0);
+        UploadHandle uploadHandle;
+        DAWN_TRY_ASSIGN(uploadHandle,
+                        AllocateInternal(allocationSize + offsetAlignment - 1, serial));
+        uint64_t additionalOffset =
+            Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
+        uploadHandle.mappedBuffer =
+            static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
+        uploadHandle.startOffset += additionalOffset;
+        return uploadHandle;
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/DynamicUploader.h b/src/dawn/native/DynamicUploader.h
new file mode 100644
index 0000000..fa3f80a
--- /dev/null
+++ b/src/dawn/native/DynamicUploader.h
@@ -0,0 +1,66 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_DYNAMICUPLOADER_H_
+#define DAWNNATIVE_DYNAMICUPLOADER_H_
+
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/RingBufferAllocator.h"
+#include "dawn/native/StagingBuffer.h"
+
+// DynamicUploader is the front-end implementation used to manage multiple ring buffers for upload
+// usage.
+namespace dawn::native {
+
+    struct UploadHandle {
+        uint8_t* mappedBuffer = nullptr;
+        uint64_t startOffset = 0;
+        StagingBufferBase* stagingBuffer = nullptr;
+    };
+
+    class DynamicUploader {
+      public:
+        DynamicUploader(DeviceBase* device);
+        ~DynamicUploader() = default;
+
+        // We add functions to Release StagingBuffers to the DynamicUploader as there's
+        // currently no place to track the allocated staging buffers such that they're freed after
+        // pending commands are finished. This should be changed when better resource allocation is
+        // implemented.
+        void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
+
+        ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
+                                             ExecutionSerial serial,
+                                             uint64_t offsetAlignment);
+        void Deallocate(ExecutionSerial lastCompletedSerial);
+
+      private:
+        static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
+
+        struct RingBuffer {
+            std::unique_ptr<StagingBufferBase> mStagingBuffer;
+            RingBufferAllocator mAllocator;
+        };
+
+        ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize,
+                                                     ExecutionSerial serial);
+
+        std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
+        SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
+        DeviceBase* mDevice;
+    };
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_DYNAMICUPLOADER_H_
diff --git a/src/dawn/native/EncodingContext.cpp b/src/dawn/native/EncodingContext.cpp
new file mode 100644
index 0000000..b9ba529
--- /dev/null
+++ b/src/dawn/native/EncodingContext.cpp
@@ -0,0 +1,217 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/EncodingContext.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/IndirectDrawValidationEncoder.h"
+#include "dawn/native/RenderBundleEncoder.h"
+
+namespace dawn::native {
+
+    EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
+        : mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {
+    }
+
+    EncodingContext::~EncodingContext() {
+        Destroy();
+    }
+
+    void EncodingContext::Destroy() {
+        if (mDestroyed) {
+            return;
+        }
+        if (!mWereCommandsAcquired) {
+            FreeCommands(GetIterator());
+        }
+        // If we weren't already finished, then we want to handle an error here so that any calls
+        // to Finish after Destroy will return a meaningful error.
+        if (!IsFinished()) {
+            HandleError(DAWN_FORMAT_VALIDATION_ERROR("Destroyed encoder cannot be finished."));
+        }
+        mDestroyed = true;
+        mCurrentEncoder = nullptr;
+    }
+
+    CommandIterator EncodingContext::AcquireCommands() {
+        MoveToIterator();
+        ASSERT(!mWereCommandsAcquired);
+        mWereCommandsAcquired = true;
+        return std::move(mIterator);
+    }
+
+    CommandIterator* EncodingContext::GetIterator() {
+        MoveToIterator();
+        ASSERT(!mWereCommandsAcquired);
+        return &mIterator;
+    }
+
+    void EncodingContext::MoveToIterator() {
+        CommitCommands(std::move(mPendingCommands));
+        if (!mWasMovedToIterator) {
+            mIterator.AcquireCommandBlocks(std::move(mAllocators));
+            mWasMovedToIterator = true;
+        }
+    }
+
+    void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
+        // Append in reverse so that the most recently set debug group is printed first, like a
+        // call stack.
+        for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
+            error->AppendDebugGroup(*iter);
+        }
+
+        if (!IsFinished()) {
+            // Encoding should only generate validation errors.
+            ASSERT(error->GetType() == InternalErrorType::Validation);
+            // If the encoding context is not finished, errors are deferred until
+            // Finish() is called.
+            if (mError == nullptr) {
+                mError = std::move(error);
+            }
+        } else {
+            mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+        }
+    }
+
+    void EncodingContext::WillBeginRenderPass() {
+        ASSERT(mCurrentEncoder == mTopLevelEncoder);
+        if (mDevice->IsValidationEnabled()) {
+            // When validation is enabled, we are going to want to capture all commands encoded
+            // between and including BeginRenderPassCmd and EndRenderPassCmd, and defer their
+            // sequencing util after we have a chance to insert any necessary validation
+            // commands. To support this we commit any current commands now, so that the
+            // impending BeginRenderPassCmd starts in a fresh CommandAllocator.
+            CommitCommands(std::move(mPendingCommands));
+        }
+    }
+
+    void EncodingContext::EnterPass(const ApiObjectBase* passEncoder) {
+        // Assert we're at the top level.
+        ASSERT(mCurrentEncoder == mTopLevelEncoder);
+        ASSERT(passEncoder != nullptr);
+
+        mCurrentEncoder = passEncoder;
+    }
+
+    MaybeError EncodingContext::ExitRenderPass(const ApiObjectBase* passEncoder,
+                                               RenderPassResourceUsageTracker usageTracker,
+                                               CommandEncoder* commandEncoder,
+                                               IndirectDrawMetadata indirectDrawMetadata) {
+        ASSERT(mCurrentEncoder != mTopLevelEncoder);
+        ASSERT(mCurrentEncoder == passEncoder);
+
+        mCurrentEncoder = mTopLevelEncoder;
+
+        if (mDevice->IsValidationEnabled()) {
+            // With validation enabled, commands were committed just before BeginRenderPassCmd was
+            // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
+            // mPendingCommands contains only the commands from BeginRenderPassCmd to
+            // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
+            // the validation encoder a chance to insert its commands first.
+            CommandAllocator renderCommands = std::move(mPendingCommands);
+            DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
+                                                          &indirectDrawMetadata));
+            CommitCommands(std::move(mPendingCommands));
+            CommitCommands(std::move(renderCommands));
+        }
+
+        mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
+        return {};
+    }
+
+    void EncodingContext::ExitComputePass(const ApiObjectBase* passEncoder,
+                                          ComputePassResourceUsage usages) {
+        ASSERT(mCurrentEncoder != mTopLevelEncoder);
+        ASSERT(mCurrentEncoder == passEncoder);
+
+        mCurrentEncoder = mTopLevelEncoder;
+        mComputePassUsages.push_back(std::move(usages));
+    }
+
+    void EncodingContext::EnsurePassExited(const ApiObjectBase* passEncoder) {
+        if (mCurrentEncoder != mTopLevelEncoder && mCurrentEncoder == passEncoder) {
+            // The current pass encoder is being deleted. Implicitly end the pass with an error.
+            mCurrentEncoder = mTopLevelEncoder;
+            HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+                "Command buffer recording ended before %s was ended.", passEncoder));
+        }
+    }
+
+    const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
+        ASSERT(!mWereRenderPassUsagesAcquired);
+        return mRenderPassUsages;
+    }
+
+    RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
+        ASSERT(!mWereRenderPassUsagesAcquired);
+        mWereRenderPassUsagesAcquired = true;
+        return std::move(mRenderPassUsages);
+    }
+
+    const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
+        ASSERT(!mWereComputePassUsagesAcquired);
+        return mComputePassUsages;
+    }
+
+    ComputePassUsages EncodingContext::AcquireComputePassUsages() {
+        ASSERT(!mWereComputePassUsagesAcquired);
+        mWereComputePassUsagesAcquired = true;
+        return std::move(mComputePassUsages);
+    }
+
+    void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
+        mDebugGroupLabels.emplace_back(groupLabel);
+    }
+
+    void EncodingContext::PopDebugGroupLabel() {
+        mDebugGroupLabels.pop_back();
+    }
+
+    MaybeError EncodingContext::Finish() {
+        DAWN_INVALID_IF(IsFinished(), "Command encoding already finished.");
+
+        const ApiObjectBase* currentEncoder = mCurrentEncoder;
+        const ApiObjectBase* topLevelEncoder = mTopLevelEncoder;
+
+        // Even if finish validation fails, it is now invalid to call any encoding commands,
+        // so we clear the encoders. Note: mTopLevelEncoder == nullptr is used as a flag for
+        // if Finish() has been called.
+        mCurrentEncoder = nullptr;
+        mTopLevelEncoder = nullptr;
+        CommitCommands(std::move(mPendingCommands));
+
+        if (mError != nullptr) {
+            return std::move(mError);
+        }
+        DAWN_INVALID_IF(currentEncoder != topLevelEncoder,
+                        "Command buffer recording ended before %s was ended.", currentEncoder);
+        return {};
+    }
+
+    void EncodingContext::CommitCommands(CommandAllocator allocator) {
+        if (!allocator.IsEmpty()) {
+            mAllocators.push_back(std::move(allocator));
+        }
+    }
+
+    bool EncodingContext::IsFinished() const {
+        return mTopLevelEncoder == nullptr;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/EncodingContext.h b/src/dawn/native/EncodingContext.h
new file mode 100644
index 0000000..659a9a7
--- /dev/null
+++ b/src/dawn/native/EncodingContext.h
@@ -0,0 +1,182 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENCODINGCONTEXT_H_
+#define DAWNNATIVE_ENCODINGCONTEXT_H_
+
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <string>
+
+namespace dawn::native {
+
+    class CommandEncoder;
+    class DeviceBase;
+    class ApiObjectBase;
+
+    // Base class for allocating/iterating commands.
+    // It performs error tracking as well as encoding state for render/compute passes.
+    class EncodingContext {
+      public:
+        EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder);
+        ~EncodingContext();
+
+        // Marks the encoding context as destroyed so that any future encodes will fail, and all
+        // encoded commands are released.
+        void Destroy();
+
+        CommandIterator AcquireCommands();
+        CommandIterator* GetIterator();
+
+        // Functions to handle encoder errors
+        void HandleError(std::unique_ptr<ErrorData> error);
+
+        inline bool ConsumedError(MaybeError maybeError) {
+            if (DAWN_UNLIKELY(maybeError.IsError())) {
+                HandleError(maybeError.AcquireError());
+                return true;
+            }
+            return false;
+        }
+
+        template <typename... Args>
+        inline bool ConsumedError(MaybeError maybeError,
+                                  const char* formatStr,
+                                  const Args&... args) {
+            if (DAWN_UNLIKELY(maybeError.IsError())) {
+                std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+                if (error->GetType() == InternalErrorType::Validation) {
+                    std::string out;
+                    absl::UntypedFormatSpec format(formatStr);
+                    if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+                        error->AppendContext(std::move(out));
+                    } else {
+                        error->AppendContext(absl::StrFormat(
+                            "[Failed to format error message: \"%s\"].", formatStr));
+                    }
+                }
+                HandleError(std::move(error));
+                return true;
+            }
+            return false;
+        }
+
+        inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
+            if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
+                if (mDestroyed) {
+                    HandleError(
+                        DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
+                } else if (mCurrentEncoder != mTopLevelEncoder) {
+                    // The top level encoder was used when a pass encoder was current.
+                    HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+                        "Command cannot be recorded while %s is active.", mCurrentEncoder));
+                } else {
+                    HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+                        "Recording in an error or already ended %s.", encoder));
+                }
+                return false;
+            }
+            return true;
+        }
+
+        template <typename EncodeFunction>
+        inline bool TryEncode(const ApiObjectBase* encoder, EncodeFunction&& encodeFunction) {
+            if (!CheckCurrentEncoder(encoder)) {
+                return false;
+            }
+            ASSERT(!mWasMovedToIterator);
+            return !ConsumedError(encodeFunction(&mPendingCommands));
+        }
+
+        template <typename EncodeFunction, typename... Args>
+        inline bool TryEncode(const ApiObjectBase* encoder,
+                              EncodeFunction&& encodeFunction,
+                              const char* formatStr,
+                              const Args&... args) {
+            if (!CheckCurrentEncoder(encoder)) {
+                return false;
+            }
+            ASSERT(!mWasMovedToIterator);
+            return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
+        }
+
+        // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
+        // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
+        // failed validation before the BeginRenderPassCmd could be encoded.
+        void WillBeginRenderPass();
+
+        // Functions to set current encoder state
+        void EnterPass(const ApiObjectBase* passEncoder);
+        MaybeError ExitRenderPass(const ApiObjectBase* passEncoder,
+                                  RenderPassResourceUsageTracker usageTracker,
+                                  CommandEncoder* commandEncoder,
+                                  IndirectDrawMetadata indirectDrawMetadata);
+        void ExitComputePass(const ApiObjectBase* passEncoder, ComputePassResourceUsage usages);
+        MaybeError Finish();
+
+        // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the
+        // mCurrentEncoder.
+        void EnsurePassExited(const ApiObjectBase* passEncoder);
+
+        const RenderPassUsages& GetRenderPassUsages() const;
+        const ComputePassUsages& GetComputePassUsages() const;
+        RenderPassUsages AcquireRenderPassUsages();
+        ComputePassUsages AcquireComputePassUsages();
+
+        void PushDebugGroupLabel(const char* groupLabel);
+        void PopDebugGroupLabel();
+
+      private:
+        void CommitCommands(CommandAllocator allocator);
+
+        bool IsFinished() const;
+        void MoveToIterator();
+
+        DeviceBase* mDevice;
+
+        // There can only be two levels of encoders. Top-level and render/compute pass.
+        // The top level encoder is the encoder the EncodingContext is created with.
+        // It doubles as flag to check if encoding has been Finished.
+        const ApiObjectBase* mTopLevelEncoder;
+        // The current encoder must be the same as the encoder provided to TryEncode,
+        // otherwise an error is produced. It may be nullptr if the EncodingContext is an error.
+        // The current encoder changes with Enter/ExitPass which should be called by
+        // CommandEncoder::Begin/EndPass.
+        const ApiObjectBase* mCurrentEncoder;
+
+        RenderPassUsages mRenderPassUsages;
+        bool mWereRenderPassUsagesAcquired = false;
+        ComputePassUsages mComputePassUsages;
+        bool mWereComputePassUsagesAcquired = false;
+
+        CommandAllocator mPendingCommands;
+
+        std::vector<CommandAllocator> mAllocators;
+        CommandIterator mIterator;
+        bool mWasMovedToIterator = false;
+        bool mWereCommandsAcquired = false;
+        bool mDestroyed = false;
+
+        std::unique_ptr<ErrorData> mError;
+        std::vector<std::string> mDebugGroupLabels;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ENCODINGCONTEXT_H_
diff --git a/src/dawn/native/EnumClassBitmasks.h b/src/dawn/native/EnumClassBitmasks.h
new file mode 100644
index 0000000..671db23
--- /dev/null
+++ b/src/dawn/native/EnumClassBitmasks.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENUMCLASSBITMASK_H_
+#define DAWNNATIVE_ENUMCLASSBITMASK_H_
+
+#include "dawn/EnumClassBitmasks.h"
+
+namespace dawn::native {
+
+    // EnumClassBitmmasks is a helper in the dawn:: namespace.
+    // Re-export it in the dawn_native namespace.
+    DAWN_IMPORT_BITMASK_OPERATORS
+
+    // Specify this for usage with EnumMaskIterator
+    template <typename T>
+    struct EnumBitmaskSize {
+        static constexpr unsigned value = 0;
+    };
+
+    template <typename T>
+    constexpr bool HasOneBit(T value) {
+        return HasZeroOrOneBits(value) && value != T(0);
+    }
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ENUMCLASSBITMASK_H_
diff --git a/src/dawn/native/EnumMaskIterator.h b/src/dawn/native/EnumMaskIterator.h
new file mode 100644
index 0000000..6653ef4
--- /dev/null
+++ b/src/dawn/native/EnumMaskIterator.h
@@ -0,0 +1,82 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENUMMASKITERATOR_H_
+#define DAWNNATIVE_ENUMMASKITERATOR_H_
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/EnumClassBitmasks.h"
+
+namespace dawn::native {
+
+    template <typename T>
+    class EnumMaskIterator final {
+        static constexpr size_t N = EnumBitmaskSize<T>::value;
+        static_assert(N > 0);
+
+        using U = std::underlying_type_t<T>;
+
+      public:
+        EnumMaskIterator(const T& mask) : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
+            // If you hit this ASSERT it means that you forgot to update EnumBitmaskSize<T>::value;
+            ASSERT(U(mask) == 0 || Log2(uint64_t(U(mask))) < N);
+        }
+
+        class Iterator final {
+          public:
+            Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {
+            }
+
+            Iterator& operator++() {
+                ++mIter;
+                return *this;
+            }
+
+            bool operator==(const Iterator& other) const {
+                return mIter == other.mIter;
+            }
+
+            bool operator!=(const Iterator& other) const {
+                return mIter != other.mIter;
+            }
+
+            T operator*() const {
+                U value = *mIter;
+                return static_cast<T>(U(1) << value);
+            }
+
+          private:
+            typename BitSetIterator<N, U>::Iterator mIter;
+        };
+
+        Iterator begin() const {
+            return Iterator(mBitSetIterator.begin());
+        }
+
+        Iterator end() const {
+            return Iterator(mBitSetIterator.end());
+        }
+
+      private:
+        BitSetIterator<N, U> mBitSetIterator;
+    };
+
+    template <typename T>
+    EnumMaskIterator<T> IterateEnumMask(const T& mask) {
+        return EnumMaskIterator<T>(mask);
+    }
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ENUMMASKITERATOR_H_
diff --git a/src/dawn/native/Error.cpp b/src/dawn/native/Error.cpp
new file mode 100644
index 0000000..d524a32
--- /dev/null
+++ b/src/dawn/native/Error.cpp
@@ -0,0 +1,64 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Error.h"
+
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    void IgnoreErrors(MaybeError maybeError) {
+        if (maybeError.IsError()) {
+            std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
+            // During shutdown and destruction, device lost errors can be ignored.
+            // We can also ignore other unexpected internal errors on shut down and treat it as
+            // device lost so that we can continue with destruction.
+            ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
+                   errorData->GetType() == InternalErrorType::Internal);
+        }
+    }
+
+    wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
+        switch (type) {
+            case InternalErrorType::Validation:
+                return wgpu::ErrorType::Validation;
+            case InternalErrorType::OutOfMemory:
+                return wgpu::ErrorType::OutOfMemory;
+
+            // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
+            // the device at the API level to be lost, so treat it like a DeviceLost error.
+            case InternalErrorType::Internal:
+            case InternalErrorType::DeviceLost:
+                return wgpu::ErrorType::DeviceLost;
+
+            default:
+                return wgpu::ErrorType::Unknown;
+        }
+    }
+
+    InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
+        switch (type) {
+            case wgpu::ErrorType::Validation:
+                return InternalErrorType::Validation;
+            case wgpu::ErrorType::OutOfMemory:
+                return InternalErrorType::OutOfMemory;
+            case wgpu::ErrorType::DeviceLost:
+                return InternalErrorType::DeviceLost;
+            default:
+                return InternalErrorType::Internal;
+        }
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Error.h b/src/dawn/native/Error.h
new file mode 100644
index 0000000..64c481f
--- /dev/null
+++ b/src/dawn/native/Error.h
@@ -0,0 +1,194 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERROR_H_
+#define DAWNNATIVE_ERROR_H_
+
+#include "absl/strings/str_format.h"
+#include "dawn/common/Result.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/webgpu_absl_format.h"
+
+#include <string>
+
+namespace dawn::native {
+
+    enum class InternalErrorType : uint32_t { Validation, DeviceLost, Internal, OutOfMemory };
+
+    // MaybeError and ResultOrError are meant to be used as return value for function that are not
+    // expected to, but might fail. The handling of error is potentially much slower than successes.
+    using MaybeError = Result<void, ErrorData>;
+
+    template <typename T>
+    using ResultOrError = Result<T, ErrorData>;
+
+    // Returning a success is done like so:
+    //   return {}; // for Error
+    //   return SomethingOfTypeT; // for ResultOrError<T>
+    //
+    // Returning an error is done via:
+    //   return DAWN_MAKE_ERROR(errorType, "My error message");
+    //
+    // but shorthand version for specific error types are preferred:
+    //   return DAWN_VALIDATION_ERROR("My error message");
+    //
+    // There are different types of errors that should be used for different purpose:
+    //
+    //   - Validation: these are errors that show the user did something bad, which causes the
+    //     whole call to be a no-op. It's most commonly found in the frontend but there can be some
+    //     backend specific validation in non-conformant backends too.
+    //
+    //   - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
+    //     This is similar to validation errors in that the call becomes a no-op and returns an
+    //     error object, but is reported separated from validation to the user.
+    //
+    //   - Device loss: the backend driver reported that the GPU has been lost, which means all
+    //     previous commands magically disappeared and the only thing left to do is clean up.
+    //     Note: Device loss should be used rarely and in most case you want to use Internal
+    //     instead.
+    //
+    //   - Internal: something happened that the backend didn't expect, and it doesn't know
+    //     how to recover from that situation. This causes the device to be lost, but is separate
+    //     from device loss, because the GPU execution is still happening so we need to clean up
+    //     more gracefully.
+    //
+    //   - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
+    //     more clarity.
+
+#define DAWN_MAKE_ERROR(TYPE, MESSAGE) \
+    ::dawn::native::ErrorData::Create(TYPE, MESSAGE, __FILE__, __func__, __LINE__)
+
+#define DAWN_VALIDATION_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Validation, MESSAGE)
+
+// TODO(dawn:563): Rename to DAWN_VALIDATION_ERROR once all message format strings have been
+// converted to constexpr.
+#define DAWN_FORMAT_VALIDATION_ERROR(...) \
+    DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__))
+
+#define DAWN_INVALID_IF(EXPR, ...)                                                           \
+    if (DAWN_UNLIKELY(EXPR)) {                                                               \
+        return DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__)); \
+    }                                                                                        \
+    for (;;)                                                                                 \
+    break
+
+// DAWN_DEVICE_LOST_ERROR means that there was a real unrecoverable native device lost error.
+// We can't even do a graceful shutdown because the Device is gone.
+#define DAWN_DEVICE_LOST_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::DeviceLost, MESSAGE)
+
+// DAWN_INTERNAL_ERROR means Dawn hit an unexpected error in the backend and should try to
+// gracefully shut down.
+#define DAWN_INTERNAL_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Internal, MESSAGE)
+
+#define DAWN_FORMAT_INTERNAL_ERROR(...) \
+    DAWN_MAKE_ERROR(InternalErrorType::Internal, absl::StrFormat(__VA_ARGS__))
+
+#define DAWN_UNIMPLEMENTED_ERROR(MESSAGE) \
+    DAWN_MAKE_ERROR(InternalErrorType::Internal, std::string("Unimplemented: ") + MESSAGE)
+
+// DAWN_OUT_OF_MEMORY_ERROR means we ran out of memory. It may be used as a signal internally in
+// Dawn to free up unused resources. Or, it may bubble up to the application to signal an allocation
+// was too large or they should free some existing resources.
+#define DAWN_OUT_OF_MEMORY_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::OutOfMemory, MESSAGE)
+
+#define DAWN_CONCAT1(x, y) x##y
+#define DAWN_CONCAT2(x, y) DAWN_CONCAT1(x, y)
+#define DAWN_LOCAL_VAR DAWN_CONCAT2(_localVar, __LINE__)
+
+    // When Errors aren't handled explicitly, calls to functions returning errors should be
+    // wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
+    // the current function.
+#define DAWN_TRY(EXPR) DAWN_TRY_WITH_CLEANUP(EXPR, {})
+
+#define DAWN_TRY_CONTEXT(EXPR, ...) \
+    DAWN_TRY_WITH_CLEANUP(EXPR, { error->AppendContext(absl::StrFormat(__VA_ARGS__)); })
+
+#define DAWN_TRY_WITH_CLEANUP(EXPR, BODY)                                                     \
+    {                                                                                         \
+        auto DAWN_LOCAL_VAR = EXPR;                                                           \
+        if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) {                                        \
+            std::unique_ptr<::dawn::native::ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
+            {BODY} /* comment to force the formatter to insert a newline */                   \
+            error->AppendBacktrace(__FILE__, __func__, __LINE__);                             \
+            return {std::move(error)};                                                        \
+        }                                                                                     \
+    }                                                                                         \
+    for (;;)                                                                                  \
+    break
+
+    // DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
+    // any, to VAR.
+#define DAWN_TRY_ASSIGN(VAR, EXPR) DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {})
+#define DAWN_TRY_ASSIGN_CONTEXT(VAR, EXPR, ...) \
+    DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, { error->AppendContext(absl::StrFormat(__VA_ARGS__)); })
+
+    // Argument helpers are used to determine which macro implementations should be called when
+    // overloading with different number of variables.
+#define DAWN_ERROR_UNIMPLEMENTED_MACRO_(...) UNREACHABLE()
+#define DAWN_ERROR_GET_5TH_ARG_HELPER_(_1, _2, _3, _4, NAME, ...) NAME
+#define DAWN_ERROR_GET_5TH_ARG_(args) DAWN_ERROR_GET_5TH_ARG_HELPER_ args
+
+    // DAWN_TRY_ASSIGN_WITH_CLEANUP is overloaded with 2 version so that users can override the
+    // return value of the macro when necessary. This is particularly useful if the function
+    // calling the macro may want to return void instead of the error, i.e. in a test where we may
+    // just want to assert and fail if the assign cannot go through. In both the cleanup and return
+    // clauses, users can use the `error` variable to access the pointer to the acquired error.
+    //
+    // Example usages:
+    //     3 Argument Case:
+    //          Result res;
+    //          DAWN_TRY_ASSIGN_WITH_CLEANUP(
+    //              res, GetResultOrErrorFunction(), { AddAdditionalErrorInformation(error.get()); }
+    //          );
+    //
+    //     4 Argument Case:
+    //          bool FunctionThatReturnsBool() {
+    //              DAWN_TRY_ASSIGN_WITH_CLEANUP(
+    //                  res, GetResultOrErrorFunction(),
+    //                  { AddAdditionalErrorInformation(error.get()); },
+    //                  false
+    //              );
+    //          }
+#define DAWN_TRY_ASSIGN_WITH_CLEANUP(...)                                       \
+    DAWN_ERROR_GET_5TH_ARG_((__VA_ARGS__, DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_, \
+                             DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_,              \
+                             DAWN_ERROR_UNIMPLEMENTED_MACRO_))                  \
+    (__VA_ARGS__)
+
+#define DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_(VAR, EXPR, BODY) \
+    DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_(VAR, EXPR, BODY, std::move(error))
+
+#define DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_(VAR, EXPR, BODY, RET)            \
+    {                                                                         \
+        auto DAWN_LOCAL_VAR = EXPR;                                           \
+        if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) {                        \
+            std::unique_ptr<ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
+            {BODY} /* comment to force the formatter to insert a newline */   \
+            error->AppendBacktrace(__FILE__, __func__, __LINE__);             \
+            return (RET);                                                     \
+        }                                                                     \
+        VAR = DAWN_LOCAL_VAR.AcquireSuccess();                                \
+    }                                                                         \
+    for (;;)                                                                  \
+    break
+
+    // Assert that errors are device loss so that we can continue with destruction
+    void IgnoreErrors(MaybeError maybeError);
+
+    wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
+    InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ERROR_H_
diff --git a/src/dawn/native/ErrorData.cpp b/src/dawn/native/ErrorData.cpp
new file mode 100644
index 0000000..863d20f
--- /dev/null
+++ b/src/dawn/native/ErrorData.cpp
@@ -0,0 +1,103 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ErrorData.h"
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
+                                                 std::string message,
+                                                 const char* file,
+                                                 const char* function,
+                                                 int line) {
+        std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
+        error->AppendBacktrace(file, function, line);
+        return error;
+    }
+
+    ErrorData::ErrorData(InternalErrorType type, std::string message)
+        : mType(type), mMessage(std::move(message)) {
+    }
+
+    void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
+        BacktraceRecord record;
+        record.file = file;
+        record.function = function;
+        record.line = line;
+
+        mBacktrace.push_back(std::move(record));
+    }
+
+    void ErrorData::AppendContext(std::string context) {
+        mContexts.push_back(std::move(context));
+    }
+
+    void ErrorData::AppendDebugGroup(std::string label) {
+        mDebugGroups.push_back(std::move(label));
+    }
+
+    InternalErrorType ErrorData::GetType() const {
+        return mType;
+    }
+
+    const std::string& ErrorData::GetMessage() const {
+        return mMessage;
+    }
+
+    const std::vector<ErrorData::BacktraceRecord>& ErrorData::GetBacktrace() const {
+        return mBacktrace;
+    }
+
+    const std::vector<std::string>& ErrorData::GetContexts() const {
+        return mContexts;
+    }
+
+    const std::vector<std::string>& ErrorData::GetDebugGroups() const {
+        return mDebugGroups;
+    }
+
+    std::string ErrorData::GetFormattedMessage() const {
+        std::ostringstream ss;
+        ss << mMessage << "\n";
+
+        if (!mContexts.empty()) {
+            for (auto context : mContexts) {
+                ss << " - While " << context << "\n";
+            }
+        }
+
+        // For non-validation errors, or erros that lack a context include the
+        // stack trace for debugging purposes.
+        if (mContexts.empty() || mType != InternalErrorType::Validation) {
+            for (const auto& callsite : mBacktrace) {
+                ss << "    at " << callsite.function << " (" << callsite.file << ":"
+                   << callsite.line << ")\n";
+            }
+        }
+
+        if (!mDebugGroups.empty()) {
+            ss << "\nDebug group stack:\n";
+            for (auto label : mDebugGroups) {
+                ss << " > \"" << label << "\"\n";
+            }
+        }
+
+        return ss.str();
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ErrorData.h b/src/dawn/native/ErrorData.h
new file mode 100644
index 0000000..901c54f
--- /dev/null
+++ b/src/dawn/native/ErrorData.h
@@ -0,0 +1,70 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORDATA_H_
+#define DAWNNATIVE_ERRORDATA_H_
+
+#include "dawn/common/Compiler.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace wgpu {
+    enum class ErrorType : uint32_t;
+}
+
+namespace dawn {
+    using ErrorType = wgpu::ErrorType;
+}
+
+namespace dawn::native {
+    enum class InternalErrorType : uint32_t;
+
+    class [[nodiscard]] ErrorData {
+      public:
+        [[nodiscard]] static std::unique_ptr<ErrorData> Create(
+            InternalErrorType type, std::string message, const char* file, const char* function,
+            int line);
+        ErrorData(InternalErrorType type, std::string message);
+
+        struct BacktraceRecord {
+            const char* file;
+            const char* function;
+            int line;
+        };
+        void AppendBacktrace(const char* file, const char* function, int line);
+        void AppendContext(std::string context);
+        void AppendDebugGroup(std::string label);
+
+        InternalErrorType GetType() const;
+        const std::string& GetMessage() const;
+        const std::vector<BacktraceRecord>& GetBacktrace() const;
+        const std::vector<std::string>& GetContexts() const;
+        const std::vector<std::string>& GetDebugGroups() const;
+
+        std::string GetFormattedMessage() const;
+
+      private:
+        InternalErrorType mType;
+        std::string mMessage;
+        std::vector<BacktraceRecord> mBacktrace;
+        std::vector<std::string> mContexts;
+        std::vector<std::string> mDebugGroups;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ERRORDATA_H_
diff --git a/src/dawn/native/ErrorInjector.cpp b/src/dawn/native/ErrorInjector.cpp
new file mode 100644
index 0000000..af87498
--- /dev/null
+++ b/src/dawn/native/ErrorInjector.cpp
@@ -0,0 +1,70 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ErrorInjector.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/DawnNative.h"
+
+namespace dawn::native {
+
+    namespace {
+
+        bool sIsEnabled = false;
+        uint64_t sNextIndex = 0;
+        uint64_t sInjectedFailureIndex = 0;
+        bool sHasPendingInjectedError = false;
+
+    }  // anonymous namespace
+
+    void EnableErrorInjector() {
+        sIsEnabled = true;
+    }
+
+    void DisableErrorInjector() {
+        sIsEnabled = false;
+    }
+
+    void ClearErrorInjector() {
+        sNextIndex = 0;
+        sHasPendingInjectedError = false;
+    }
+
+    bool ErrorInjectorEnabled() {
+        return sIsEnabled;
+    }
+
+    uint64_t AcquireErrorInjectorCallCount() {
+        uint64_t count = sNextIndex;
+        ClearErrorInjector();
+        return count;
+    }
+
+    bool ShouldInjectError() {
+        uint64_t index = sNextIndex++;
+        if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
+            sHasPendingInjectedError = false;
+            return true;
+        }
+        return false;
+    }
+
+    void InjectErrorAt(uint64_t index) {
+        // Only one error can be injected at a time.
+        ASSERT(!sHasPendingInjectedError);
+        sInjectedFailureIndex = index;
+        sHasPendingInjectedError = true;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ErrorInjector.h b/src/dawn/native/ErrorInjector.h
new file mode 100644
index 0000000..ab41886
--- /dev/null
+++ b/src/dawn/native/ErrorInjector.h
@@ -0,0 +1,68 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORINJECTOR_H_
+#define DAWNNATIVE_ERRORINJECTOR_H_
+
+#include <stdint.h>
+#include <type_traits>
+
+namespace dawn::native {
+
+    template <typename ErrorType>
+    struct InjectedErrorResult {
+        ErrorType error;
+        bool injected;
+    };
+
+    bool ErrorInjectorEnabled();
+
+    bool ShouldInjectError();
+
+    template <typename ErrorType>
+    InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
+        return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
+    }
+
+    template <typename ErrorType, typename... ErrorTypes>
+    InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
+        if (ShouldInjectError()) {
+            return InjectedErrorResult<ErrorType>{errorType, true};
+        }
+        return MaybeInjectError(errorTypes...);
+    }
+
+}  // namespace dawn::native
+
+#if defined(DAWN_ENABLE_ERROR_INJECTION)
+
+#    define INJECT_ERROR_OR_RUN(stmt, ...)                                                   \
+        [&]() {                                                                              \
+            if (DAWN_UNLIKELY(::dawn::native::ErrorInjectorEnabled())) {                     \
+                /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
+                auto injectedError = ::dawn::native::MaybeInjectError(__VA_ARGS__);          \
+                if (injectedError.injected) {                                                \
+                    return injectedError.error;                                              \
+                }                                                                            \
+            }                                                                                \
+            return (stmt);                                                                   \
+        }()
+
+#else
+
+#    define INJECT_ERROR_OR_RUN(stmt, ...) stmt
+
+#endif
+
+#endif  // DAWNNATIVE_ERRORINJECTOR_H_
diff --git a/src/dawn/native/ErrorScope.cpp b/src/dawn/native/ErrorScope.cpp
new file mode 100644
index 0000000..06b7a95
--- /dev/null
+++ b/src/dawn/native/ErrorScope.cpp
@@ -0,0 +1,92 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ErrorScope.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native {
+
+    namespace {
+
+        wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
+            switch (filter) {
+                case wgpu::ErrorFilter::Validation:
+                    return wgpu::ErrorType::Validation;
+                case wgpu::ErrorFilter::OutOfMemory:
+                    return wgpu::ErrorType::OutOfMemory;
+            }
+            UNREACHABLE();
+        }
+
+    }  // namespace
+
+    ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
+        : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {
+    }
+
+    wgpu::ErrorType ErrorScope::GetErrorType() const {
+        return mCapturedError;
+    }
+
+    const char* ErrorScope::GetErrorMessage() const {
+        return mErrorMessage.c_str();
+    }
+
+    void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
+        mScopes.push_back(ErrorScope(filter));
+    }
+
+    ErrorScope ErrorScopeStack::Pop() {
+        ASSERT(!mScopes.empty());
+        ErrorScope scope = std::move(mScopes.back());
+        mScopes.pop_back();
+        return scope;
+    }
+
+    bool ErrorScopeStack::Empty() const {
+        return mScopes.empty();
+    }
+
+    bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
+        for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
+            if (it->mMatchedErrorType != type) {
+                // Error filter does not match. Move on to the next scope.
+                continue;
+            }
+
+            // Filter matches.
+            // Record the error if the scope doesn't have one yet.
+            if (it->mCapturedError == wgpu::ErrorType::NoError) {
+                it->mCapturedError = type;
+                it->mErrorMessage = message;
+            }
+
+            if (type == wgpu::ErrorType::DeviceLost) {
+                if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
+                    // DeviceLost overrides any other error that is not a DeviceLost.
+                    it->mCapturedError = type;
+                    it->mErrorMessage = message;
+                }
+            } else {
+                // Errors that are not device lost are captured and stop propogating.
+                return true;
+            }
+        }
+
+        // The error was not captured.
+        return false;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ErrorScope.h b/src/dawn/native/ErrorScope.h
new file mode 100644
index 0000000..766a81e
--- /dev/null
+++ b/src/dawn/native/ErrorScope.h
@@ -0,0 +1,57 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORSCOPE_H_
+#define DAWNNATIVE_ERRORSCOPE_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include <string>
+#include <vector>
+
+namespace dawn::native {
+
+    class ErrorScope {
+      public:
+        wgpu::ErrorType GetErrorType() const;
+        const char* GetErrorMessage() const;
+
+      private:
+        friend class ErrorScopeStack;
+        explicit ErrorScope(wgpu::ErrorFilter errorFilter);
+
+        wgpu::ErrorType mMatchedErrorType;
+        wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
+        std::string mErrorMessage = "";
+    };
+
+    class ErrorScopeStack {
+      public:
+        void Push(wgpu::ErrorFilter errorFilter);
+        ErrorScope Pop();
+
+        bool Empty() const;
+
+        // Pass an error to the scopes in the stack. Returns true if one of the scopes
+        // captured the error. Returns false if the error should be forwarded to the
+        // uncaptured error callback.
+        bool HandleError(wgpu::ErrorType type, const char* message);
+
+      private:
+        std::vector<ErrorScope> mScopes;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_ERRORSCOPE_H_
diff --git a/src/dawn/native/ExternalTexture.cpp b/src/dawn/native/ExternalTexture.cpp
new file mode 100644
index 0000000..1570825
--- /dev/null
+++ b/src/dawn/native/ExternalTexture.cpp
@@ -0,0 +1,212 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ExternalTexture.h"
+
+#include "dawn/native/Buffer.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView) {
+        DAWN_INVALID_IF(
+            (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
+            "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
+            textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
+
+        DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
+                        "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
+                        textureView->GetDimension());
+
+        DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
+                        "The external texture plane (%s) mip level count (%u) is not 1.",
+                        textureView, textureView->GetLevelCount());
+
+        DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
+                        "The external texture plane (%s) sample count (%u) is not one.",
+                        textureView, textureView->GetTexture()->GetSampleCount());
+
+        return {};
+    }
+
+    MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+                                                 const ExternalTextureDescriptor* descriptor) {
+        ASSERT(descriptor);
+        ASSERT(descriptor->plane0);
+
+        DAWN_TRY(device->ValidateObject(descriptor->plane0));
+
+        wgpu::TextureFormat plane0Format = descriptor->plane0->GetFormat().format;
+
+        if (descriptor->plane1) {
+            DAWN_INVALID_IF(
+                device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+                "Bi-planar external textures are disabled until the implementation is completed.");
+
+            DAWN_INVALID_IF(descriptor->colorSpace != wgpu::PredefinedColorSpace::Srgb,
+                            "The specified color space (%s) is not %s.", descriptor->colorSpace,
+                            wgpu::PredefinedColorSpace::Srgb);
+
+            DAWN_TRY(device->ValidateObject(descriptor->plane1));
+            wgpu::TextureFormat plane1Format = descriptor->plane1->GetFormat().format;
+
+            DAWN_INVALID_IF(plane0Format != wgpu::TextureFormat::R8Unorm,
+                            "The bi-planar external texture plane (%s) format (%s) is not %s.",
+                            descriptor->plane0, plane0Format, wgpu::TextureFormat::R8Unorm);
+            DAWN_INVALID_IF(plane1Format != wgpu::TextureFormat::RG8Unorm,
+                            "The bi-planar external texture plane (%s) format (%s) is not %s.",
+                            descriptor->plane1, plane1Format, wgpu::TextureFormat::RG8Unorm);
+
+            DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
+            DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane1));
+        } else {
+            switch (plane0Format) {
+                case wgpu::TextureFormat::RGBA8Unorm:
+                case wgpu::TextureFormat::BGRA8Unorm:
+                case wgpu::TextureFormat::RGBA16Float:
+                    DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
+                    break;
+                default:
+                    return DAWN_FORMAT_VALIDATION_ERROR(
+                        "The external texture plane (%s) format (%s) is not a supported format "
+                        "(%s, %s, %s).",
+                        descriptor->plane0, plane0Format, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::RGBA16Float);
+            }
+        }
+
+        return {};
+    }
+
+    // static
+    ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
+        DeviceBase* device,
+        const ExternalTextureDescriptor* descriptor) {
+        Ref<ExternalTextureBase> externalTexture =
+            AcquireRef(new ExternalTextureBase(device, descriptor));
+        DAWN_TRY(externalTexture->Initialize(device, descriptor));
+        return std::move(externalTexture);
+    }
+
+    ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
+                                             const ExternalTextureDescriptor* descriptor)
+        : ApiObjectBase(device, descriptor->label), mState(ExternalTextureState::Alive) {
+        TrackInDevice();
+    }
+
+    ExternalTextureBase::ExternalTextureBase(DeviceBase* device)
+        : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
+        TrackInDevice();
+    }
+
+    ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    ExternalTextureBase::~ExternalTextureBase() = default;
+
+    MaybeError ExternalTextureBase::Initialize(DeviceBase* device,
+                                               const ExternalTextureDescriptor* descriptor) {
+        // Store any passed in TextureViews associated with individual planes.
+        mTextureViews[0] = descriptor->plane0;
+
+        if (descriptor->plane1) {
+            mTextureViews[1] = descriptor->plane1;
+        } else {
+            DAWN_TRY_ASSIGN(mTextureViews[1],
+                            device->GetOrCreateDummyTextureViewForExternalTexture());
+        }
+
+        // We must create a buffer to store parameters needed by a shader that operates on this
+        // external texture.
+        BufferDescriptor bufferDesc;
+        bufferDesc.size = sizeof(ExternalTextureParams);
+        bufferDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+        bufferDesc.label = "Dawn_External_Texture_Params_Buffer";
+
+        DAWN_TRY_ASSIGN(mParamsBuffer, device->CreateBuffer(&bufferDesc));
+
+        // Dawn & Tint's YUV to RGB conversion implementation was inspired by the conversions found
+        // in libYUV. If this implementation needs expanded to support more colorspaces, this file
+        // is an excellent reference: chromium/src/third_party/libyuv/source/row_common.cc.
+        //
+        // The conversion from YUV to RGB looks like this:
+        // r = Y * 1.164          + V * vr
+        // g = Y * 1.164 - U * ug - V * vg
+        // b = Y * 1.164 + U * ub
+        //
+        // By changing the values of vr, vg, ub, and ug we can change the destination color space.
+        ExternalTextureParams params;
+        params.numPlanes = descriptor->plane1 == nullptr ? 1 : 2;
+
+        switch (descriptor->colorSpace) {
+            case wgpu::PredefinedColorSpace::Srgb:
+                // Numbers derived from ITU-R recommendation for limited range BT.709
+                params.vr = 1.793;
+                params.vg = 0.392;
+                params.ub = 0.813;
+                params.ug = 2.017;
+                break;
+            case wgpu::PredefinedColorSpace::Undefined:
+                break;
+        }
+
+        DAWN_TRY(device->GetQueue()->WriteBuffer(mParamsBuffer.Get(), 0, &params,
+                                                 sizeof(ExternalTextureParams)));
+
+        return {};
+    }
+
+    const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
+    ExternalTextureBase::GetTextureViews() const {
+        return mTextureViews;
+    }
+
+    MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
+        ASSERT(!IsError());
+        DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
+                        "Destroyed external texture %s is used in a submit.", this);
+        return {};
+    }
+
+    void ExternalTextureBase::APIDestroy() {
+        if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
+            return;
+        }
+        Destroy();
+    }
+
+    void ExternalTextureBase::DestroyImpl() {
+        mState = ExternalTextureState::Destroyed;
+    }
+
+    // static
+    ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
+        return new ExternalTextureBase(device, ObjectBase::kError);
+    }
+
+    BufferBase* ExternalTextureBase::GetParamsBuffer() const {
+        return mParamsBuffer.Get();
+    }
+
+    ObjectType ExternalTextureBase::GetType() const {
+        return ObjectType::ExternalTexture;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ExternalTexture.h b/src/dawn/native/ExternalTexture.h
new file mode 100644
index 0000000..e32b631
--- /dev/null
+++ b/src/dawn/native/ExternalTexture.h
@@ -0,0 +1,77 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_EXTERNALTEXTURE_H_
+#define DAWNNATIVE_EXTERNALTEXTURE_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/Subresource.h"
+
+#include <array>
+
+namespace dawn::native {
+
+    class TextureViewBase;
+
+    struct ExternalTextureParams {
+        uint32_t numPlanes;
+        float vr;
+        float vg;
+        float ub;
+        float ug;
+    };
+
+    MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+                                                 const ExternalTextureDescriptor* descriptor);
+
+    class ExternalTextureBase : public ApiObjectBase {
+      public:
+        static ResultOrError<Ref<ExternalTextureBase>> Create(
+            DeviceBase* device,
+            const ExternalTextureDescriptor* descriptor);
+
+        BufferBase* GetParamsBuffer() const;
+        const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& GetTextureViews() const;
+        ObjectType GetType() const override;
+
+        MaybeError ValidateCanUseInSubmitNow() const;
+        static ExternalTextureBase* MakeError(DeviceBase* device);
+
+        void APIDestroy();
+
+      protected:
+        // Constructor used only for mocking and testing.
+        ExternalTextureBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+        ~ExternalTextureBase() override;
+
+      private:
+        ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+
+        enum class ExternalTextureState { Alive, Destroyed };
+        ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+        MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+
+        Ref<TextureBase> mDummyTexture;
+        Ref<BufferBase> mParamsBuffer;
+        std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;
+
+        ExternalTextureState mState;
+    };
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_EXTERNALTEXTURE_H_
diff --git a/src/dawn/native/Features.cpp b/src/dawn/native/Features.cpp
new file mode 100644
index 0000000..56a532c
--- /dev/null
+++ b/src/dawn/native/Features.cpp
@@ -0,0 +1,277 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Features.h"
+
+namespace dawn::native {
+    namespace {
+
+        struct FeatureEnumAndInfo {
+            Feature feature;
+            FeatureInfo info;
+            bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
+        };
+
+        using FeatureEnumAndInfoList =
+            std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
+
+        static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {
+            {{Feature::TextureCompressionBC,
+              {"texture-compression-bc", "Support Block Compressed (BC) texture formats",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
+              &WGPUDeviceProperties::textureCompressionBC},
+             {Feature::TextureCompressionETC2,
+              {"texture-compression-etc2",
+               "Support Ericsson Texture Compressed (ETC2/EAC) texture "
+               "formats",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+              &WGPUDeviceProperties::textureCompressionETC2},
+             {Feature::TextureCompressionASTC,
+              {"texture-compression-astc",
+               "Support Adaptable Scalable Texture Compressed (ASTC) "
+               "texture formats",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+              &WGPUDeviceProperties::textureCompressionASTC},
+             {Feature::ShaderFloat16,
+              {"shader-float16",
+               "Support 16bit float arithmetic and declarations in uniform and storage buffers",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
+              &WGPUDeviceProperties::shaderFloat16},
+             {Feature::PipelineStatisticsQuery,
+              {"pipeline-statistics-query", "Support Pipeline Statistics Query",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+              &WGPUDeviceProperties::pipelineStatisticsQuery},
+             {Feature::TimestampQuery,
+              {"timestamp-query", "Support Timestamp Query",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+              &WGPUDeviceProperties::timestampQuery},
+             {Feature::DepthClamping,
+              {"depth-clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
+              &WGPUDeviceProperties::depthClamping},
+             {Feature::Depth24UnormStencil8,
+              {"depth24unorm-stencil8", "Support depth24unorm-stencil8 texture format",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+              &WGPUDeviceProperties::depth24UnormStencil8},
+             {Feature::Depth32FloatStencil8,
+              {"depth32float-stencil8", "Support depth32float-stencil8 texture format",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+              &WGPUDeviceProperties::depth32FloatStencil8},
+             {Feature::DawnInternalUsages,
+              {"dawn-internal-usages",
+               "Add internal usages to resources to affect how the texture is allocated, but not "
+               "frontend validation. Other internal commands may access this usage.",
+               "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+               "dawn_internal_usages.md"},
+              &WGPUDeviceProperties::dawnInternalUsages},
+             {Feature::MultiPlanarFormats,
+              {"multiplanar-formats",
+               "Import and use multi-planar texture formats with per plane views",
+               "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
+              &WGPUDeviceProperties::multiPlanarFormats},
+             {Feature::DawnNative,
+              {"dawn-native", "WebGPU is running on top of dawn_native.",
+               "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+               "dawn_native.md"},
+              &WGPUDeviceProperties::dawnNative}}};
+
+        Feature FromAPIFeature(wgpu::FeatureName feature) {
+            switch (feature) {
+                case wgpu::FeatureName::Undefined:
+                    return Feature::InvalidEnum;
+
+                case wgpu::FeatureName::TimestampQuery:
+                    return Feature::TimestampQuery;
+                case wgpu::FeatureName::PipelineStatisticsQuery:
+                    return Feature::PipelineStatisticsQuery;
+                case wgpu::FeatureName::TextureCompressionBC:
+                    return Feature::TextureCompressionBC;
+                case wgpu::FeatureName::TextureCompressionETC2:
+                    return Feature::TextureCompressionETC2;
+                case wgpu::FeatureName::TextureCompressionASTC:
+                    return Feature::TextureCompressionASTC;
+                case wgpu::FeatureName::DepthClamping:
+                    return Feature::DepthClamping;
+                case wgpu::FeatureName::Depth24UnormStencil8:
+                    return Feature::Depth24UnormStencil8;
+                case wgpu::FeatureName::Depth32FloatStencil8:
+                    return Feature::Depth32FloatStencil8;
+                case wgpu::FeatureName::DawnShaderFloat16:
+                    return Feature::ShaderFloat16;
+                case wgpu::FeatureName::DawnInternalUsages:
+                    return Feature::DawnInternalUsages;
+                case wgpu::FeatureName::DawnMultiPlanarFormats:
+                    return Feature::MultiPlanarFormats;
+                case wgpu::FeatureName::DawnNative:
+                    return Feature::DawnNative;
+
+                case wgpu::FeatureName::IndirectFirstInstance:
+                    return Feature::InvalidEnum;
+            }
+            return Feature::InvalidEnum;
+        }
+
+        wgpu::FeatureName ToAPIFeature(Feature feature) {
+            switch (feature) {
+                case Feature::TextureCompressionBC:
+                    return wgpu::FeatureName::TextureCompressionBC;
+                case Feature::TextureCompressionETC2:
+                    return wgpu::FeatureName::TextureCompressionETC2;
+                case Feature::TextureCompressionASTC:
+                    return wgpu::FeatureName::TextureCompressionASTC;
+                case Feature::PipelineStatisticsQuery:
+                    return wgpu::FeatureName::PipelineStatisticsQuery;
+                case Feature::TimestampQuery:
+                    return wgpu::FeatureName::TimestampQuery;
+                case Feature::DepthClamping:
+                    return wgpu::FeatureName::DepthClamping;
+                case Feature::Depth24UnormStencil8:
+                    return wgpu::FeatureName::Depth24UnormStencil8;
+                case Feature::Depth32FloatStencil8:
+                    return wgpu::FeatureName::Depth32FloatStencil8;
+                case Feature::ShaderFloat16:
+                    return wgpu::FeatureName::DawnShaderFloat16;
+                case Feature::DawnInternalUsages:
+                    return wgpu::FeatureName::DawnInternalUsages;
+                case Feature::MultiPlanarFormats:
+                    return wgpu::FeatureName::DawnMultiPlanarFormats;
+                case Feature::DawnNative:
+                    return wgpu::FeatureName::DawnNative;
+
+                case Feature::EnumCount:
+                    UNREACHABLE();
+            }
+        }
+
+    }  // anonymous namespace
+
+    void FeaturesSet::EnableFeature(Feature feature) {
+        ASSERT(feature != Feature::InvalidEnum);
+        const size_t featureIndex = static_cast<size_t>(feature);
+        featuresBitSet.set(featureIndex);
+    }
+
+    void FeaturesSet::EnableFeature(wgpu::FeatureName feature) {
+        EnableFeature(FromAPIFeature(feature));
+    }
+
+    bool FeaturesSet::IsEnabled(Feature feature) const {
+        ASSERT(feature != Feature::InvalidEnum);
+        const size_t featureIndex = static_cast<size_t>(feature);
+        return featuresBitSet[featureIndex];
+    }
+
+    bool FeaturesSet::IsEnabled(wgpu::FeatureName feature) const {
+        Feature f = FromAPIFeature(feature);
+        return f != Feature::InvalidEnum && IsEnabled(f);
+    }
+
+    size_t FeaturesSet::EnumerateFeatures(wgpu::FeatureName* features) const {
+        for (uint32_t i : IterateBitSet(featuresBitSet)) {
+            wgpu::FeatureName feature = ToAPIFeature(static_cast<Feature>(i));
+            if (features != nullptr) {
+                *features = feature;
+                features += 1;
+            }
+        }
+        return featuresBitSet.count();
+    }
+
+    std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
+        std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
+
+        uint32_t index = 0;
+        for (uint32_t i : IterateBitSet(featuresBitSet)) {
+            Feature feature = static_cast<Feature>(i);
+            ASSERT(feature != Feature::InvalidEnum);
+
+            const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[i];
+            ASSERT(featureNameAndInfo.feature == feature);
+
+            enabledFeatureNames[index] = featureNameAndInfo.info.name;
+            ++index;
+        }
+        return enabledFeatureNames;
+    }
+
+    void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
+        ASSERT(properties != nullptr);
+
+        for (uint32_t i : IterateBitSet(featuresBitSet)) {
+            properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
+        }
+    }
+
+    wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature) {
+        ASSERT(feature != Feature::InvalidEnum);
+        return ToAPIFeature(feature);
+    }
+
+    FeaturesInfo::FeaturesInfo() {
+        for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
+            const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
+            ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
+            mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
+        }
+    }
+
+    const FeatureInfo* FeaturesInfo::GetFeatureInfo(wgpu::FeatureName feature) const {
+        Feature f = FromAPIFeature(feature);
+        if (f == Feature::InvalidEnum) {
+            return nullptr;
+        }
+        return &kFeatureNameAndInfoList[static_cast<size_t>(f)].info;
+    }
+
+    Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
+        ASSERT(featureName);
+
+        const auto& iter = mFeatureNameToEnumMap.find(featureName);
+        if (iter != mFeatureNameToEnumMap.cend()) {
+            return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
+        }
+
+        // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
+        constexpr std::array<std::pair<const char*, const char*>, 6>
+            kReplacementsForDeprecatedNames = {{
+                {"texture_compression_bc", "texture-compression-bc"},
+                {"depth_clamping", "depth-clamping"},
+                {"pipeline_statistics_query", "pipeline-statistics-query"},
+                {"shader_float16", "shader-float16"},
+                {"timestamp_query", "timestamp-query"},
+                {"multiplanar_formats", "multiplanar-formats"},
+            }};
+        for (const auto& [name, replacement] : kReplacementsForDeprecatedNames) {
+            if (strcmp(featureName, name) == 0) {
+                return FeatureNameToEnum(replacement);
+            }
+        }
+
+        return Feature::InvalidEnum;
+    }
+
+    wgpu::FeatureName FeaturesInfo::FeatureNameToAPIEnum(const char* featureName) const {
+        Feature f = FeatureNameToEnum(featureName);
+        if (f != Feature::InvalidEnum) {
+            return ToAPIFeature(f);
+        }
+        // Pass something invalid.
+        return static_cast<wgpu::FeatureName>(-1);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Features.h b/src/dawn/native/Features.h
new file mode 100644
index 0000000..de75e99
--- /dev/null
+++ b/src/dawn/native/Features.h
@@ -0,0 +1,83 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_FEATURES_H_
+#define DAWNNATIVE_FEATURES_H_
+
+#include <bitset>
+#include <unordered_map>
+#include <vector>
+
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+
+namespace dawn::native {
+
+    enum class Feature {
+        TextureCompressionBC,
+        TextureCompressionETC2,
+        TextureCompressionASTC,
+        ShaderFloat16,
+        PipelineStatisticsQuery,
+        TimestampQuery,
+        DepthClamping,
+        Depth24UnormStencil8,
+        Depth32FloatStencil8,
+
+        // Dawn-specific
+        DawnInternalUsages,
+        MultiPlanarFormats,
+        DawnNative,
+
+        EnumCount,
+        InvalidEnum = EnumCount,
+        FeatureMin = TextureCompressionBC,
+    };
+
+    // A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
+    // convenience to convert the enums of enum class Feature to the indices of a bitset.
+    struct FeaturesSet {
+        std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
+
+        void EnableFeature(Feature feature);
+        void EnableFeature(wgpu::FeatureName feature);
+        bool IsEnabled(Feature feature) const;
+        bool IsEnabled(wgpu::FeatureName feature) const;
+        // Returns |count|, the number of features. Writes out all |count| values if |features| is
+        // non-null.
+        size_t EnumerateFeatures(wgpu::FeatureName* features) const;
+        std::vector<const char*> GetEnabledFeatureNames() const;
+        void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
+    };
+
+    wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature);
+
+    class FeaturesInfo {
+      public:
+        FeaturesInfo();
+
+        // Used to query the details of an feature. Return nullptr if featureName is not a valid
+        // name of an feature supported in Dawn
+        const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature) const;
+        Feature FeatureNameToEnum(const char* featureName) const;
+        wgpu::FeatureName FeatureNameToAPIEnum(const char* featureName) const;
+
+      private:
+        std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_FEATURES_H_
diff --git a/src/dawn/native/Format.cpp b/src/dawn/native/Format.cpp
new file mode 100644
index 0000000..c201729
--- /dev/null
+++ b/src/dawn/native/Format.cpp
@@ -0,0 +1,492 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Format.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Texture.h"
+
+#include <bitset>
+
+namespace dawn::native {
+
+    // Format
+
+    // TODO(dawn:527): Remove when unused.
+    SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type) {
+        switch (type) {
+            case wgpu::TextureComponentType::Float:
+                return SampleTypeBit::Float;
+            case wgpu::TextureComponentType::Sint:
+                return SampleTypeBit::Sint;
+            case wgpu::TextureComponentType::Uint:
+                return SampleTypeBit::Uint;
+            case wgpu::TextureComponentType::DepthComparison:
+                return SampleTypeBit::Depth;
+        }
+        UNREACHABLE();
+    }
+
+    SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
+        switch (sampleType) {
+            case wgpu::TextureSampleType::Float:
+            case wgpu::TextureSampleType::UnfilterableFloat:
+            case wgpu::TextureSampleType::Sint:
+            case wgpu::TextureSampleType::Uint:
+            case wgpu::TextureSampleType::Depth:
+            case wgpu::TextureSampleType::Undefined:
+                // When the compiler complains that you need to add a case statement here, please
+                // also add a corresponding static assert below!
+                break;
+        }
+
+        static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0);
+        if (sampleType == wgpu::TextureSampleType::Undefined) {
+            return SampleTypeBit::None;
+        }
+
+        // Check that SampleTypeBit bits are in the same position / order as the respective
+        // wgpu::TextureSampleType value.
+        static_assert(SampleTypeBit::Float ==
+                      static_cast<SampleTypeBit>(
+                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)));
+        static_assert(
+            SampleTypeBit::UnfilterableFloat ==
+            static_cast<SampleTypeBit>(
+                1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)));
+        static_assert(SampleTypeBit::Uint ==
+                      static_cast<SampleTypeBit>(
+                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)));
+        static_assert(SampleTypeBit::Sint ==
+                      static_cast<SampleTypeBit>(
+                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)));
+        static_assert(SampleTypeBit::Depth ==
+                      static_cast<SampleTypeBit>(
+                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)));
+        return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
+    }
+
+    bool Format::IsColor() const {
+        return aspects == Aspect::Color;
+    }
+
+    bool Format::HasDepth() const {
+        return (aspects & Aspect::Depth) != 0;
+    }
+
+    bool Format::HasStencil() const {
+        return (aspects & Aspect::Stencil) != 0;
+    }
+
+    bool Format::HasDepthOrStencil() const {
+        return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
+    }
+
+    bool Format::IsMultiPlanar() const {
+        return (aspects & (Aspect::Plane0 | Aspect::Plane1)) != 0;
+    }
+
+    bool Format::CopyCompatibleWith(const Format& format) const {
+        // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
+        return baseFormat == format.baseFormat;
+    }
+
+    bool Format::ViewCompatibleWith(const Format& format) const {
+        // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
+        return baseFormat == format.baseFormat;
+    }
+
+    const AspectInfo& Format::GetAspectInfo(wgpu::TextureAspect aspect) const {
+        return GetAspectInfo(SelectFormatAspects(*this, aspect));
+    }
+
+    const AspectInfo& Format::GetAspectInfo(Aspect aspect) const {
+        ASSERT(HasOneBit(aspect));
+        ASSERT(aspects & aspect);
+        const size_t aspectIndex = GetAspectIndex(aspect);
+        ASSERT(aspectIndex < GetAspectCount(aspects));
+        return aspectInfo[aspectIndex];
+    }
+
+    FormatIndex Format::GetIndex() const {
+        return ComputeFormatIndex(format);
+    }
+
+    // FormatSet implementation
+
+    bool FormatSet::operator[](const Format& format) const {
+        return Base::operator[](format.GetIndex());
+    }
+
+    typename std::bitset<kKnownFormatCount>::reference FormatSet::operator[](const Format& format) {
+        return Base::operator[](format.GetIndex());
+    }
+
+    // Implementation details of the format table of the DeviceBase
+
+    // For the enum for formats are packed but this might change when we have a broader feature
+    // mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
+    FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) {
+        // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
+        // of the range of the FormatTable.
+        static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 >
+                      kKnownFormatCount);
+        return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1);
+    }
+
+    FormatTable BuildFormatTable(const DeviceBase* device) {
+        FormatTable table;
+        FormatSet formatsSet;
+
+        static constexpr SampleTypeBit kAnyFloat =
+            SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+
+        auto AddFormat = [&table, &formatsSet](Format format) {
+            FormatIndex index = ComputeFormatIndex(format.format);
+            ASSERT(index < table.size());
+
+            // This checks that each format is set at most once, the first part of checking that all
+            // formats are set exactly once.
+            ASSERT(!formatsSet[index]);
+
+            // Vulkan describes bytesPerRow in units of texels. If there's any format for which this
+            // ASSERT isn't true, then additional validation on bytesPerRow must be added.
+            const bool hasMultipleAspects = !HasOneBit(format.aspects);
+            ASSERT(hasMultipleAspects ||
+                   (kTextureBytesPerRowAlignment % format.aspectInfo[0].block.byteSize) == 0);
+
+            table[index] = format;
+            formatsSet.set(index);
+        };
+
+        auto AddColorFormat =
+            [&AddFormat](wgpu::TextureFormat format, bool renderable, bool supportsStorageUsage,
+                         bool supportsMultisample, bool supportsResolveTarget, uint32_t byteSize,
+                         SampleTypeBit sampleTypes, uint8_t componentCount,
+                         wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
+                Format internalFormat;
+                internalFormat.format = format;
+                internalFormat.isRenderable = renderable;
+                internalFormat.isCompressed = false;
+                internalFormat.isSupported = true;
+                internalFormat.supportsStorageUsage = supportsStorageUsage;
+
+                if (supportsMultisample) {
+                    ASSERT(renderable);
+                }
+                internalFormat.supportsMultisample = supportsMultisample;
+                internalFormat.supportsResolveTarget = supportsResolveTarget;
+                internalFormat.aspects = Aspect::Color;
+                internalFormat.componentCount = componentCount;
+
+                // Default baseFormat of each color formats should be themselves.
+                if (baseFormat == wgpu::TextureFormat::Undefined) {
+                    internalFormat.baseFormat = format;
+                } else {
+                    internalFormat.baseFormat = baseFormat;
+                }
+
+                AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+                firstAspect->block.byteSize = byteSize;
+                firstAspect->block.width = 1;
+                firstAspect->block.height = 1;
+                if (HasOneBit(sampleTypes)) {
+                    switch (sampleTypes) {
+                        case SampleTypeBit::Float:
+                        case SampleTypeBit::UnfilterableFloat:
+                            firstAspect->baseType = wgpu::TextureComponentType::Float;
+                            break;
+                        case SampleTypeBit::Sint:
+                            firstAspect->baseType = wgpu::TextureComponentType::Sint;
+                            break;
+                        case SampleTypeBit::Uint:
+                            firstAspect->baseType = wgpu::TextureComponentType::Uint;
+                            break;
+                        default:
+                            UNREACHABLE();
+                    }
+                } else {
+                    ASSERT((sampleTypes & SampleTypeBit::Float) != 0);
+                    firstAspect->baseType = wgpu::TextureComponentType::Float;
+                }
+                firstAspect->supportedSampleTypes = sampleTypes;
+                firstAspect->format = format;
+                AddFormat(internalFormat);
+            };
+
+        auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
+                                           bool isSupported) {
+            Format internalFormat;
+            internalFormat.format = format;
+            internalFormat.baseFormat = format;
+            internalFormat.isRenderable = true;
+            internalFormat.isCompressed = false;
+            internalFormat.isSupported = isSupported;
+            internalFormat.supportsStorageUsage = false;
+            internalFormat.supportsMultisample = true;
+            internalFormat.supportsResolveTarget = false;
+            internalFormat.aspects = Aspect::Depth;
+            internalFormat.componentCount = 1;
+
+            AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+            firstAspect->block.byteSize = byteSize;
+            firstAspect->block.width = 1;
+            firstAspect->block.height = 1;
+            firstAspect->baseType = wgpu::TextureComponentType::Float;
+            firstAspect->supportedSampleTypes = SampleTypeBit::Depth;
+            firstAspect->format = format;
+            AddFormat(internalFormat);
+        };
+
+        auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported) {
+            Format internalFormat;
+            internalFormat.format = format;
+            internalFormat.baseFormat = format;
+            internalFormat.isRenderable = true;
+            internalFormat.isCompressed = false;
+            internalFormat.isSupported = isSupported;
+            internalFormat.supportsStorageUsage = false;
+            internalFormat.supportsMultisample = true;
+            internalFormat.supportsResolveTarget = false;
+            internalFormat.aspects = Aspect::Stencil;
+            internalFormat.componentCount = 1;
+
+            // Duplicate the data for the stencil aspect in both the first and second aspect info.
+            //  - aspectInfo[0] is used by AddMultiAspectFormat to copy the info for the whole
+            //    stencil8 aspect of depth-stencil8 formats.
+            //  - aspectInfo[1] is the actual info used in the rest of Dawn since
+            //    GetAspectIndex(Aspect::Stencil) is 1.
+            ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
+
+            internalFormat.aspectInfo[0].block.byteSize = 1;
+            internalFormat.aspectInfo[0].block.width = 1;
+            internalFormat.aspectInfo[0].block.height = 1;
+            internalFormat.aspectInfo[0].baseType = wgpu::TextureComponentType::Uint;
+            internalFormat.aspectInfo[0].supportedSampleTypes = SampleTypeBit::Uint;
+            internalFormat.aspectInfo[0].format = format;
+
+            internalFormat.aspectInfo[1] = internalFormat.aspectInfo[0];
+
+            AddFormat(internalFormat);
+        };
+
+        auto AddCompressedFormat =
+            [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width,
+                         uint32_t height, bool isSupported, uint8_t componentCount,
+                         wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
+                Format internalFormat;
+                internalFormat.format = format;
+                internalFormat.isRenderable = false;
+                internalFormat.isCompressed = true;
+                internalFormat.isSupported = isSupported;
+                internalFormat.supportsStorageUsage = false;
+                internalFormat.supportsMultisample = false;
+                internalFormat.supportsResolveTarget = false;
+                internalFormat.aspects = Aspect::Color;
+                internalFormat.componentCount = componentCount;
+
+                // Default baseFormat of each compressed formats should be themselves.
+                if (baseFormat == wgpu::TextureFormat::Undefined) {
+                    internalFormat.baseFormat = format;
+                } else {
+                    internalFormat.baseFormat = baseFormat;
+                }
+
+                AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+                firstAspect->block.byteSize = byteSize;
+                firstAspect->block.width = width;
+                firstAspect->block.height = height;
+                firstAspect->baseType = wgpu::TextureComponentType::Float;
+                firstAspect->supportedSampleTypes = kAnyFloat;
+                firstAspect->format = format;
+                AddFormat(internalFormat);
+            };
+
+        auto AddMultiAspectFormat =
+            [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
+                                 wgpu::TextureFormat firstFormat, wgpu::TextureFormat secondFormat,
+                                 bool isRenderable, bool isSupported, bool supportsMultisample,
+                                 uint8_t componentCount) {
+                Format internalFormat;
+                internalFormat.format = format;
+                internalFormat.baseFormat = format;
+                internalFormat.isRenderable = isRenderable;
+                internalFormat.isCompressed = false;
+                internalFormat.isSupported = isSupported;
+                internalFormat.supportsStorageUsage = false;
+                internalFormat.supportsMultisample = supportsMultisample;
+                internalFormat.supportsResolveTarget = false;
+                internalFormat.aspects = aspects;
+                internalFormat.componentCount = componentCount;
+
+                // Multi aspect formats just copy information about single-aspect formats. This
+                // means that the single-plane formats must have been added before multi-aspect
+                // ones. (it is ASSERTed below).
+                const FormatIndex firstFormatIndex = ComputeFormatIndex(firstFormat);
+                const FormatIndex secondFormatIndex = ComputeFormatIndex(secondFormat);
+
+                ASSERT(table[firstFormatIndex].aspectInfo[0].format !=
+                       wgpu::TextureFormat::Undefined);
+                ASSERT(table[secondFormatIndex].aspectInfo[0].format !=
+                       wgpu::TextureFormat::Undefined);
+
+                internalFormat.aspectInfo[0] = table[firstFormatIndex].aspectInfo[0];
+                internalFormat.aspectInfo[1] = table[secondFormatIndex].aspectInfo[0];
+
+                AddFormat(internalFormat);
+            };
+
+        // clang-format off
+        // 1 byte color formats
+        AddColorFormat(wgpu::TextureFormat::R8Unorm, true, false, true, true, 1, kAnyFloat, 1);
+        AddColorFormat(wgpu::TextureFormat::R8Snorm, false, false, false, false, 1, kAnyFloat, 1);
+        AddColorFormat(wgpu::TextureFormat::R8Uint, true, false, true, false, 1, SampleTypeBit::Uint, 1);
+        AddColorFormat(wgpu::TextureFormat::R8Sint, true, false, true, false, 1, SampleTypeBit::Sint, 1);
+
+        // 2 bytes color formats
+        AddColorFormat(wgpu::TextureFormat::R16Uint, true, false, true, false, 2, SampleTypeBit::Uint, 1);
+        AddColorFormat(wgpu::TextureFormat::R16Sint, true, false, true, false, 2, SampleTypeBit::Sint, 1);
+        AddColorFormat(wgpu::TextureFormat::R16Float, true, false, true, true, 2, kAnyFloat, 1);
+        AddColorFormat(wgpu::TextureFormat::RG8Unorm, true, false, true, true, 2, kAnyFloat, 2);
+        AddColorFormat(wgpu::TextureFormat::RG8Snorm, false, false, false, false, 2, kAnyFloat, 2);
+        AddColorFormat(wgpu::TextureFormat::RG8Uint, true, false, true, false, 2, SampleTypeBit::Uint, 2);
+        AddColorFormat(wgpu::TextureFormat::RG8Sint, true, false, true, false, 2, SampleTypeBit::Sint, 2);
+
+        // 4 bytes color formats
+        AddColorFormat(wgpu::TextureFormat::R32Uint, true, true, false, false, 4, SampleTypeBit::Uint, 1);
+        AddColorFormat(wgpu::TextureFormat::R32Sint, true, true, false, false, 4, SampleTypeBit::Sint, 1);
+        AddColorFormat(wgpu::TextureFormat::R32Float, true, true, true, false, 4, SampleTypeBit::UnfilterableFloat, 1);
+        AddColorFormat(wgpu::TextureFormat::RG16Uint, true, false, true, false, 4, SampleTypeBit::Uint, 2);
+        AddColorFormat(wgpu::TextureFormat::RG16Sint, true, false, true, false, 4, SampleTypeBit::Sint, 2);
+        AddColorFormat(wgpu::TextureFormat::RG16Float, true, false, true, true, 4, kAnyFloat, 2);
+        AddColorFormat(wgpu::TextureFormat::RGBA8Unorm, true, true, true, true, 4, kAnyFloat, 4);
+        AddColorFormat(wgpu::TextureFormat::RGBA8UnormSrgb, true, false, true, true, 4, kAnyFloat, 4, wgpu::TextureFormat::RGBA8Unorm);
+        AddColorFormat(wgpu::TextureFormat::RGBA8Snorm, false, true, false, false, 4, kAnyFloat, 4);
+        AddColorFormat(wgpu::TextureFormat::RGBA8Uint, true, true, true, false, 4, SampleTypeBit::Uint, 4);
+        AddColorFormat(wgpu::TextureFormat::RGBA8Sint, true, true, true, false, 4, SampleTypeBit::Sint, 4);
+        AddColorFormat(wgpu::TextureFormat::BGRA8Unorm, true, false, true, true, 4, kAnyFloat, 4);
+        AddColorFormat(wgpu::TextureFormat::BGRA8UnormSrgb, true, false, true, true, 4, kAnyFloat, 4, wgpu::TextureFormat::BGRA8Unorm);
+        AddColorFormat(wgpu::TextureFormat::RGB10A2Unorm, true, false, true, true, 4, kAnyFloat, 4);
+
+        AddColorFormat(wgpu::TextureFormat::RG11B10Ufloat, false, false, false, false, 4, kAnyFloat, 3);
+        AddColorFormat(wgpu::TextureFormat::RGB9E5Ufloat, false, false, false, false, 4, kAnyFloat, 3);
+
+        // 8 bytes color formats
+        AddColorFormat(wgpu::TextureFormat::RG32Uint, true, true, false, false, 8, SampleTypeBit::Uint, 2);
+        AddColorFormat(wgpu::TextureFormat::RG32Sint, true, true, false, false, 8, SampleTypeBit::Sint, 2);
+        AddColorFormat(wgpu::TextureFormat::RG32Float, true, true, false, false, 8, SampleTypeBit::UnfilterableFloat, 2);
+        AddColorFormat(wgpu::TextureFormat::RGBA16Uint, true, true, true, false, 8, SampleTypeBit::Uint, 4);
+        AddColorFormat(wgpu::TextureFormat::RGBA16Sint, true, true, true, false, 8, SampleTypeBit::Sint, 4);
+        AddColorFormat(wgpu::TextureFormat::RGBA16Float, true, true, true, true, 8, kAnyFloat, 4);
+
+        // 16 bytes color formats
+        AddColorFormat(wgpu::TextureFormat::RGBA32Uint, true, true, false, false, 16, SampleTypeBit::Uint, 4);
+        AddColorFormat(wgpu::TextureFormat::RGBA32Sint, true, true, false, false, 16, SampleTypeBit::Sint, 4);
+        AddColorFormat(wgpu::TextureFormat::RGBA32Float, true, true, false, false, 16, SampleTypeBit::UnfilterableFloat, 4);
+
+        // Depth-stencil formats
+        AddStencilFormat(wgpu::TextureFormat::Stencil8, true);
+        AddDepthFormat(wgpu::TextureFormat::Depth16Unorm, 2, true);
+        // TODO(crbug.com/dawn/843): This is 4 because we read this to perform zero initialization,
+        // and textures are always use depth32float. We should improve this to be more robust. Perhaps,
+        // using 0 here to mean "unsized" and adding a backend-specific query for the block size.
+        AddDepthFormat(wgpu::TextureFormat::Depth24Plus, 4, true);
+        AddMultiAspectFormat(wgpu::TextureFormat::Depth24PlusStencil8,
+                              Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, true, true, 2);
+        bool isD24S8Supported = device->IsFeatureEnabled(Feature::Depth24UnormStencil8);
+        AddMultiAspectFormat(wgpu::TextureFormat::Depth24UnormStencil8,
+                              Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, isD24S8Supported, true, 2);
+        AddDepthFormat(wgpu::TextureFormat::Depth32Float, 4, true);
+        bool isD32S8Supported = device->IsFeatureEnabled(Feature::Depth32FloatStencil8);
+        AddMultiAspectFormat(wgpu::TextureFormat::Depth32FloatStencil8,
+                              Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Stencil8, true, isD32S8Supported, true, 2);
+
+        // BC compressed formats
+        bool isBCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionBC);
+        AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnorm, 8, 4, 4, isBCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, 8, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC1RGBAUnorm);
+        AddCompressedFormat(wgpu::TextureFormat::BC4RSnorm, 8, 4, 4, isBCFormatSupported, 1);
+        AddCompressedFormat(wgpu::TextureFormat::BC4RUnorm, 8, 4, 4, isBCFormatSupported, 1);
+        AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC2RGBAUnorm);
+        AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC3RGBAUnorm);
+        AddCompressedFormat(wgpu::TextureFormat::BC5RGSnorm, 16, 4, 4, isBCFormatSupported, 2);
+        AddCompressedFormat(wgpu::TextureFormat::BC5RGUnorm, 16, 4, 4, isBCFormatSupported, 2);
+        AddCompressedFormat(wgpu::TextureFormat::BC6HRGBFloat, 16, 4, 4, isBCFormatSupported, 3);
+        AddCompressedFormat(wgpu::TextureFormat::BC6HRGBUfloat, 16, 4, 4, isBCFormatSupported, 3);
+        AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC7RGBAUnorm);
+
+        // ETC2/EAC compressed formats
+        bool isETC2FormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionETC2);
+        AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8Unorm, 8, 4, 4, isETC2FormatSupported, 3);
+        AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8UnormSrgb, 8, 4, 4, isETC2FormatSupported, 3, wgpu::TextureFormat::ETC2RGB8Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1Unorm, 8, 4, 4, isETC2FormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1UnormSrgb, 8, 4, 4, isETC2FormatSupported, 4, wgpu::TextureFormat::ETC2RGB8A1Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8Unorm, 16, 4, 4, isETC2FormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8UnormSrgb, 16, 4, 4, isETC2FormatSupported, 4, wgpu::TextureFormat::ETC2RGBA8Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::EACR11Unorm, 8, 4, 4, isETC2FormatSupported, 1);
+        AddCompressedFormat(wgpu::TextureFormat::EACR11Snorm, 8, 4, 4, isETC2FormatSupported, 1);
+        AddCompressedFormat(wgpu::TextureFormat::EACRG11Unorm, 16, 4, 4, isETC2FormatSupported, 2);
+        AddCompressedFormat(wgpu::TextureFormat::EACRG11Snorm, 16, 4, 4, isETC2FormatSupported, 2);
+
+        // ASTC compressed formats
+        bool isASTCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionASTC);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC4x4Unorm, 16, 4, 4, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC4x4UnormSrgb, 16, 4, 4, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC4x4Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC5x4Unorm, 16, 5, 4, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC5x4UnormSrgb, 16, 5, 4, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC5x4Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC5x5Unorm, 16, 5, 5, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC5x5UnormSrgb, 16, 5, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC5x5Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC6x5Unorm, 16, 6, 5, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC6x5UnormSrgb, 16, 6, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC6x5Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC6x6Unorm, 16, 6, 6, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC6x6UnormSrgb, 16, 6, 6, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC6x6Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC8x5Unorm, 16, 8, 5, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC8x5UnormSrgb, 16, 8, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC8x5Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC8x6Unorm, 16, 8, 6, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC8x6UnormSrgb, 16, 8, 6, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC8x6Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC8x8Unorm, 16, 8, 8, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC8x8UnormSrgb, 16, 8, 8, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC8x8Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x5Unorm, 16, 10, 5, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x5UnormSrgb, 16, 10, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x5Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x6Unorm, 16, 10, 6, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x6UnormSrgb, 16, 10, 6, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x6Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x8Unorm, 16, 10, 8, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x8UnormSrgb, 16, 10, 8, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x8Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x10Unorm, 16, 10, 10, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC10x10UnormSrgb, 16, 10, 10, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x10Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC12x10Unorm, 16, 12, 10, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC12x10UnormSrgb, 16, 12, 10, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC12x10Unorm);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC12x12Unorm, 16, 12, 12, isASTCFormatSupported, 4);
+        AddCompressedFormat(wgpu::TextureFormat::ASTC12x12UnormSrgb, 16, 12, 12, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC12x12Unorm);
+
+        // multi-planar formats
+        const bool isMultiPlanarFormatSupported = device->IsFeatureEnabled(Feature::MultiPlanarFormats);
+        AddMultiAspectFormat(wgpu::TextureFormat::R8BG8Biplanar420Unorm, Aspect::Plane0 | Aspect::Plane1,
+            wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, false, isMultiPlanarFormatSupported, false, 3);
+
+        // clang-format on
+
+        // This checks that each format is set at least once, the second part of checking that all
+        // formats are checked exactly once.
+        ASSERT(formatsSet.all());
+
+        return table;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Format.h b/src/dawn/native/Format.h
new file mode 100644
index 0000000..457c6cb
--- /dev/null
+++ b/src/dawn/native/Format.h
@@ -0,0 +1,173 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_FORMAT_H_
+#define DAWNNATIVE_FORMAT_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/EnumClassBitmasks.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Subresource.h"
+
+#include <array>
+
+// About multi-planar formats.
+//
+// Dawn supports additional multi-planar formats when the multiplanar-formats extension is enabled.
+// When enabled, Dawn treats planar data as sub-resources (ie. 1 sub-resource == 1 view == 1 plane).
+// A multi-planar format name encodes the channel mapping and order of planes. For example,
+// R8BG8Biplanar420Unorm is YUV 4:2:0 where Plane 0 = R8, and Plane 1 = BG8.
+//
+// Requirements:
+// * Plane aspects cannot be combined with color, depth, or stencil aspects.
+// * Only compatible multi-planar formats of planes can be used with multi-planar texture
+// formats.
+// * Can't access multiple planes without creating per plane views (no color conversion).
+// * Multi-planar format cannot be written or read without a per plane view.
+//
+// TODO(dawn:551): Consider moving this comment.
+
+namespace dawn::native {
+
+    enum class Aspect : uint8_t;
+    class DeviceBase;
+
+    // This mirrors wgpu::TextureSampleType as a bitmask instead.
+    enum class SampleTypeBit : uint8_t {
+        None = 0x0,
+        Float = 0x1,
+        UnfilterableFloat = 0x2,
+        Depth = 0x4,
+        Sint = 0x8,
+        Uint = 0x10,
+    };
+
+    // Converts an wgpu::TextureComponentType to its bitmask representation.
+    SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type);
+    // Converts an wgpu::TextureSampleType to its bitmask representation.
+    SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType);
+
+    struct TexelBlockInfo {
+        uint32_t byteSize;
+        uint32_t width;
+        uint32_t height;
+    };
+
+    struct AspectInfo {
+        TexelBlockInfo block;
+        // TODO(crbug.com/dawn/367): Replace TextureComponentType with TextureSampleType, or make it
+        // an internal Dawn enum.
+        wgpu::TextureComponentType baseType;
+        SampleTypeBit supportedSampleTypes;
+        wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
+    };
+
+    // The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
+    // exact number of known format.
+    static constexpr uint32_t kKnownFormatCount = 96;
+
+    using FormatIndex = TypedInteger<struct FormatIndexT, uint32_t>;
+
+    struct Format;
+    using FormatTable = ityp::array<FormatIndex, Format, kKnownFormatCount>;
+
+    // A wgpu::TextureFormat along with all the information about it necessary for validation.
+    struct Format {
+        wgpu::TextureFormat format;
+
+        // TODO(crbug.com/dawn/1332): These members could be stored in a Format capability matrix.
+        bool isRenderable;
+        bool isCompressed;
+        // A format can be known but not supported because it is part of a disabled extension.
+        bool isSupported;
+        bool supportsStorageUsage;
+        bool supportsMultisample;
+        bool supportsResolveTarget;
+        Aspect aspects;
+        // Only used for renderable color formats, number of color channels.
+        uint8_t componentCount;
+
+        bool IsColor() const;
+        bool HasDepth() const;
+        bool HasStencil() const;
+        bool HasDepthOrStencil() const;
+
+        // IsMultiPlanar() returns true if the format allows selecting a plane index. This is only
+        // allowed by multi-planar formats (ex. NV12).
+        bool IsMultiPlanar() const;
+
+        const AspectInfo& GetAspectInfo(wgpu::TextureAspect aspect) const;
+        const AspectInfo& GetAspectInfo(Aspect aspect) const;
+
+        // The index of the format in the list of all known formats: a unique number for each format
+        // in [0, kKnownFormatCount)
+        FormatIndex GetIndex() const;
+
+        // baseFormat represents the memory layout of the format.
+        // If two formats has the same baseFormat, they could copy to and be viewed as the other
+        // format. Currently two formats have the same baseFormat if they differ only in sRGB-ness.
+        wgpu::TextureFormat baseFormat;
+
+        // Returns true if the formats are copy compatible.
+        // Currently means they differ only in sRGB-ness.
+        bool CopyCompatibleWith(const Format& format) const;
+
+        // Returns true if the formats are texture view format compatible.
+        // Currently means they differ only in sRGB-ness.
+        bool ViewCompatibleWith(const Format& format) const;
+
+      private:
+        // Used to store the aspectInfo for one or more planes. For single plane "color" formats,
+        // only the first aspect info or aspectInfo[0] is valid. For depth-stencil, the first aspect
+        // info is depth and the second aspect info is stencil. For multi-planar formats,
+        // aspectInfo[i] is the ith plane.
+        std::array<AspectInfo, kMaxPlanesPerFormat> aspectInfo;
+
+        friend FormatTable BuildFormatTable(const DeviceBase* device);
+    };
+
+    class FormatSet : public ityp::bitset<FormatIndex, kKnownFormatCount> {
+        using Base = ityp::bitset<FormatIndex, kKnownFormatCount>;
+
+      public:
+        using Base::Base;
+        using Base::operator[];
+
+        bool operator[](const Format& format) const;
+        typename Base::reference operator[](const Format& format);
+    };
+
+    // Implementation details of the format table in the device.
+
+    // Returns the index of a format in the FormatTable.
+    FormatIndex ComputeFormatIndex(wgpu::TextureFormat format);
+    // Builds the format table with the extensions enabled on the device.
+    FormatTable BuildFormatTable(const DeviceBase* device);
+
+}  // namespace dawn::native
+
+namespace dawn {
+
+    template <>
+    struct IsDawnBitmask<dawn::native::SampleTypeBit> {
+        static constexpr bool enable = true;
+    };
+
+}  // namespace dawn
+
+#endif  // DAWNNATIVE_FORMAT_H_
diff --git a/src/dawn/native/Forward.h b/src/dawn/native/Forward.h
new file mode 100644
index 0000000..36b092c
--- /dev/null
+++ b/src/dawn/native/Forward.h
@@ -0,0 +1,71 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_FORWARD_H_
+#define DAWNNATIVE_FORWARD_H_
+
+#include <cstdint>
+
+template <typename T>
+class Ref;
+
+namespace dawn::native {
+
+    enum class ObjectType : uint32_t;
+
+    class AdapterBase;
+    class BindGroupBase;
+    class BindGroupLayoutBase;
+    class BufferBase;
+    class ComputePipelineBase;
+    class CommandBufferBase;
+    class CommandEncoder;
+    class ComputePassEncoder;
+    class ExternalTextureBase;
+    class InstanceBase;
+    class PipelineBase;
+    class PipelineLayoutBase;
+    class QuerySetBase;
+    class QueueBase;
+    class RenderBundleBase;
+    class RenderBundleEncoder;
+    class RenderPassEncoder;
+    class RenderPipelineBase;
+    class ResourceHeapBase;
+    class SamplerBase;
+    class Surface;
+    class ShaderModuleBase;
+    class StagingBufferBase;
+    class SwapChainBase;
+    class NewSwapChainBase;
+    class TextureBase;
+    class TextureViewBase;
+
+    class DeviceBase;
+
+    template <typename T>
+    class PerStage;
+
+    struct Format;
+
+    // Aliases for frontend-only types.
+    using CommandEncoderBase = CommandEncoder;
+    using ComputePassEncoderBase = ComputePassEncoder;
+    using RenderBundleEncoderBase = RenderBundleEncoder;
+    using RenderPassEncoderBase = RenderPassEncoder;
+    using SurfaceBase = Surface;
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_FORWARD_H_
diff --git a/src/dawn/native/IndirectDrawMetadata.cpp b/src/dawn/native/IndirectDrawMetadata.cpp
new file mode 100644
index 0000000..ebe0e7f
--- /dev/null
+++ b/src/dawn/native/IndirectDrawMetadata.cpp
@@ -0,0 +1,193 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/IndirectDrawMetadata.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/IndirectDrawValidationEncoder.h"
+#include "dawn/native/Limits.h"
+#include "dawn/native/RenderBundle.h"
+
+#include <algorithm>
+#include <utility>
+
+namespace dawn::native {
+
+    uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
+        return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
+               kDrawIndexedIndirectSize;
+    }
+
+    IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
+        BufferBase* indirectBuffer)
+        : mIndirectBuffer(indirectBuffer) {
+    }
+
+    void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndexedIndirectDraw(
+        uint32_t maxDrawCallsPerIndirectValidationBatch,
+        uint32_t maxBatchOffsetRange,
+        IndexedIndirectDraw draw) {
+        const uint64_t newOffset = draw.clientBufferOffset;
+        auto it = mBatches.begin();
+        while (it != mBatches.end()) {
+            IndexedIndirectValidationBatch& batch = *it;
+            if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
+                // This batch is full. If its minOffset is to the right of the new offset, we can
+                // just insert a new batch here.
+                if (newOffset < batch.minOffset) {
+                    break;
+                }
+
+                // Otherwise keep looking.
+                ++it;
+                continue;
+            }
+
+            if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
+                batch.draws.push_back(std::move(draw));
+                return;
+            }
+
+            if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
+                // We can extend this batch to the left in order to fit the new offset.
+                batch.minOffset = newOffset;
+                batch.draws.push_back(std::move(draw));
+                return;
+            }
+
+            if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
+                // We can extend this batch to the right in order to fit the new offset.
+                batch.maxOffset = newOffset;
+                batch.draws.push_back(std::move(draw));
+                return;
+            }
+
+            if (newOffset < batch.minOffset) {
+                // We want to insert a new batch just before this one.
+                break;
+            }
+
+            ++it;
+        }
+
+        IndexedIndirectValidationBatch newBatch;
+        newBatch.minOffset = newOffset;
+        newBatch.maxOffset = newOffset;
+        newBatch.draws.push_back(std::move(draw));
+
+        mBatches.insert(it, std::move(newBatch));
+    }
+
+    void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
+        uint32_t maxDrawCallsPerIndirectValidationBatch,
+        uint32_t maxBatchOffsetRange,
+        const IndexedIndirectValidationBatch& newBatch) {
+        auto it = mBatches.begin();
+        while (it != mBatches.end()) {
+            IndexedIndirectValidationBatch& batch = *it;
+            uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
+            uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
+            if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
+                                                        maxDrawCallsPerIndirectValidationBatch) {
+                // This batch fits within the limits of an existing batch. Merge it.
+                batch.minOffset = min;
+                batch.maxOffset = max;
+                batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
+                return;
+            }
+
+            if (newBatch.minOffset < batch.minOffset) {
+                break;
+            }
+
+            ++it;
+        }
+        mBatches.push_back(newBatch);
+    }
+
+    const std::vector<IndirectDrawMetadata::IndexedIndirectValidationBatch>&
+    IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
+        return mBatches;
+    }
+
+    IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
+        : mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)),
+          mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)) {
+    }
+
+    IndirectDrawMetadata::~IndirectDrawMetadata() = default;
+
+    IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
+
+    IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
+
+    IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
+    IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
+        return &mIndexedIndirectBufferValidationInfo;
+    }
+
+    void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
+        auto [_, inserted] = mAddedBundles.insert(bundle);
+        if (!inserted) {
+            return;
+        }
+
+        for (const auto& [config, validationInfo] :
+             bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
+            auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
+            if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
+                // We already have batches for the same config. Merge the new ones in.
+                for (const IndexedIndirectValidationBatch& batch : validationInfo.GetBatches()) {
+                    it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
+                }
+            } else {
+                mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, validationInfo);
+            }
+        }
+    }
+
+    void IndirectDrawMetadata::AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+                                                      uint64_t indexBufferSize,
+                                                      BufferBase* indirectBuffer,
+                                                      uint64_t indirectOffset,
+                                                      DrawIndexedIndirectCmd* cmd) {
+        uint64_t numIndexBufferElements;
+        switch (indexFormat) {
+            case wgpu::IndexFormat::Uint16:
+                numIndexBufferElements = indexBufferSize / 2;
+                break;
+            case wgpu::IndexFormat::Uint32:
+                numIndexBufferElements = indexBufferSize / 4;
+                break;
+            case wgpu::IndexFormat::Undefined:
+                UNREACHABLE();
+        }
+
+        const IndexedIndirectConfig config(indirectBuffer, numIndexBufferElements);
+        auto it = mIndexedIndirectBufferValidationInfo.find(config);
+        if (it == mIndexedIndirectBufferValidationInfo.end()) {
+            auto result = mIndexedIndirectBufferValidationInfo.emplace(
+                config, IndexedIndirectBufferValidationInfo(indirectBuffer));
+            it = result.first;
+        }
+
+        IndexedIndirectDraw draw;
+        draw.clientBufferOffset = indirectOffset;
+        draw.cmd = cmd;
+        it->second.AddIndexedIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange,
+                                          std::move(draw));
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/IndirectDrawMetadata.h b/src/dawn/native/IndirectDrawMetadata.h
new file mode 100644
index 0000000..602be86
--- /dev/null
+++ b/src/dawn/native/IndirectDrawMetadata.h
@@ -0,0 +1,126 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INDIRECTDRAWMETADATA_H_
+#define DAWNNATIVE_INDIRECTDRAWMETADATA_H_
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Commands.h"
+
+#include <cstdint>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace dawn::native {
+
+    class RenderBundleBase;
+    struct CombinedLimits;
+
+    // In the unlikely scenario that indirect offsets used over a single buffer span more than
+    // this length of the buffer, we split the validation work into multiple batches.
+    uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
+
+    // Metadata corresponding to the validation requirements of a single render pass. This metadata
+    // is accumulated while its corresponding render pass is encoded, and is later used to encode
+    // validation commands to be inserted into the command buffer just before the render pass's own
+    // commands.
+    class IndirectDrawMetadata : public NonCopyable {
+      public:
+        struct IndexedIndirectDraw {
+            uint64_t clientBufferOffset;
+            // This is a pointer to the command that should be populated with the validated
+            // indirect scratch buffer. It is only valid up until the encoded command buffer
+            // is submitted.
+            DrawIndexedIndirectCmd* cmd;
+        };
+
+        struct IndexedIndirectValidationBatch {
+            uint64_t minOffset;
+            uint64_t maxOffset;
+            std::vector<IndexedIndirectDraw> draws;
+        };
+
+        // Tracks information about every draw call in this render pass which uses the same indirect
+        // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
+        // that validation work can be chunked efficiently if necessary.
+        class IndexedIndirectBufferValidationInfo {
+          public:
+            explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
+
+            // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
+            // assigned (and deferred) buffer ref and relative offset before returning.
+            void AddIndexedIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
+                                        uint32_t maxBatchOffsetRange,
+                                        IndexedIndirectDraw draw);
+
+            // Adds draw calls from an already-computed batch, e.g. from a previously encoded
+            // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
+            // it's added to mBatch.
+            void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
+                          uint32_t maxBatchOffsetRange,
+                          const IndexedIndirectValidationBatch& batch);
+
+            const std::vector<IndexedIndirectValidationBatch>& GetBatches() const;
+
+          private:
+            Ref<BufferBase> mIndirectBuffer;
+
+            // A list of information about validation batches that will need to be executed for the
+            // corresponding indirect buffer prior to a single render pass. These are kept sorted by
+            // minOffset and may overlap iff the number of offsets in one batch would otherwise
+            // exceed some large upper bound (roughly ~33M draw calls).
+            //
+            // Since the most common expected cases will overwhelmingly require only a single
+            // validation pass per render pass, this is optimized for efficient updates to a single
+            // batch rather than for efficient manipulation of a large number of batches.
+            std::vector<IndexedIndirectValidationBatch> mBatches;
+        };
+
+        // Combination of an indirect buffer reference, and the number of addressable index buffer
+        // elements at the time of a draw call.
+        using IndexedIndirectConfig = std::pair<BufferBase*, uint64_t>;
+        using IndexedIndirectBufferValidationInfoMap =
+            std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
+
+        explicit IndirectDrawMetadata(const CombinedLimits& limits);
+        ~IndirectDrawMetadata();
+
+        IndirectDrawMetadata(IndirectDrawMetadata&&);
+        IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
+
+        IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
+
+        void AddBundle(RenderBundleBase* bundle);
+        void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+                                    uint64_t indexBufferSize,
+                                    BufferBase* indirectBuffer,
+                                    uint64_t indirectOffset,
+                                    DrawIndexedIndirectCmd* cmd);
+
+      private:
+        IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
+        std::set<RenderBundleBase*> mAddedBundles;
+
+        uint32_t mMaxDrawCallsPerBatch;
+        uint32_t mMaxBatchOffsetRange;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_INDIRECTDRAWMETADATA_H_
diff --git a/src/dawn/native/IndirectDrawValidationEncoder.cpp b/src/dawn/native/IndirectDrawValidationEncoder.cpp
new file mode 100644
index 0000000..6567b3e
--- /dev/null
+++ b/src/dawn/native/IndirectDrawValidationEncoder.cpp
@@ -0,0 +1,382 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/IndirectDrawValidationEncoder.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include <cstdlib>
+#include <limits>
+
+namespace dawn::native {
+
+    namespace {
+        // NOTE: This must match the workgroup_size attribute on the compute entry point below.
+        constexpr uint64_t kWorkgroupSize = 64;
+
+        // Equivalent to the BatchInfo struct defined in the shader below.
+        struct BatchInfo {
+            uint64_t numIndexBufferElements;
+            uint32_t numDraws;
+            uint32_t padding;
+        };
+
+        // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
+        // various failure modes.
+        static const char sRenderValidationShaderSource[] = R"(
+            let kNumIndirectParamsPerDrawCall = 5u;
+
+            let kIndexCountEntry = 0u;
+            let kInstanceCountEntry = 1u;
+            let kFirstIndexEntry = 2u;
+            let kBaseVertexEntry = 3u;
+            let kFirstInstanceEntry = 4u;
+
+            struct BatchInfo {
+                numIndexBufferElementsLow: u32;
+                numIndexBufferElementsHigh: u32;
+                numDraws: u32;
+                padding: u32;
+                indirectOffsets: array<u32>;
+            };
+
+            struct IndirectParams {
+                data: array<u32>;
+            };
+
+            @group(0) @binding(0) var<storage, read> batch: BatchInfo;
+            @group(0) @binding(1) var<storage, read_write> clientParams: IndirectParams;
+            @group(0) @binding(2) var<storage, write> validatedParams: IndirectParams;
+
+            fn fail(drawIndex: u32) {
+                let index = drawIndex * kNumIndirectParamsPerDrawCall;
+                validatedParams.data[index + kIndexCountEntry] = 0u;
+                validatedParams.data[index + kInstanceCountEntry] = 0u;
+                validatedParams.data[index + kFirstIndexEntry] = 0u;
+                validatedParams.data[index + kBaseVertexEntry] = 0u;
+                validatedParams.data[index + kFirstInstanceEntry] = 0u;
+            }
+
+            fn pass(drawIndex: u32) {
+                let vIndex = drawIndex * kNumIndirectParamsPerDrawCall;
+                let cIndex = batch.indirectOffsets[drawIndex];
+                validatedParams.data[vIndex + kIndexCountEntry] =
+                    clientParams.data[cIndex + kIndexCountEntry];
+                validatedParams.data[vIndex + kInstanceCountEntry] =
+                    clientParams.data[cIndex + kInstanceCountEntry];
+                validatedParams.data[vIndex + kFirstIndexEntry] =
+                    clientParams.data[cIndex + kFirstIndexEntry];
+                validatedParams.data[vIndex + kBaseVertexEntry] =
+                    clientParams.data[cIndex + kBaseVertexEntry];
+                validatedParams.data[vIndex + kFirstInstanceEntry] =
+                    clientParams.data[cIndex + kFirstInstanceEntry];
+            }
+
+            @stage(compute) @workgroup_size(64, 1, 1)
+            fn main(@builtin(global_invocation_id) id : vec3<u32>) {
+                if (id.x >= batch.numDraws) {
+                    return;
+                }
+
+                let clientIndex = batch.indirectOffsets[id.x];
+                let firstInstance = clientParams.data[clientIndex + kFirstInstanceEntry];
+                if (firstInstance != 0u) {
+                    fail(id.x);
+                    return;
+                }
+
+                if (batch.numIndexBufferElementsHigh >= 2u) {
+                    // firstIndex and indexCount are both u32. The maximum possible sum of these
+                    // values is 0x1fffffffe, which is less than 0x200000000. Nothing to validate.
+                    pass(id.x);
+                    return;
+                }
+
+                let firstIndex = clientParams.data[clientIndex + kFirstIndexEntry];
+                if (batch.numIndexBufferElementsHigh == 0u &&
+                    batch.numIndexBufferElementsLow < firstIndex) {
+                    fail(id.x);
+                    return;
+                }
+
+                // Note that this subtraction may underflow, but only when
+                // numIndexBufferElementsHigh is 1u. The result is still correct in that case.
+                let maxIndexCount = batch.numIndexBufferElementsLow - firstIndex;
+                let indexCount = clientParams.data[clientIndex + kIndexCountEntry];
+                if (indexCount > maxIndexCount) {
+                    fail(id.x);
+                    return;
+                }
+                pass(id.x);
+            }
+        )";
+
+        ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(
+            DeviceBase* device) {
+            InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+            if (store->renderValidationPipeline == nullptr) {
+                // Create compute shader module if not cached before.
+                if (store->renderValidationShader == nullptr) {
+                    DAWN_TRY_ASSIGN(
+                        store->renderValidationShader,
+                        utils::CreateShaderModule(device, sRenderValidationShaderSource));
+                }
+
+                Ref<BindGroupLayoutBase> bindGroupLayout;
+                DAWN_TRY_ASSIGN(
+                    bindGroupLayout,
+                    utils::MakeBindGroupLayout(
+                        device,
+                        {
+                            {0, wgpu::ShaderStage::Compute,
+                             wgpu::BufferBindingType::ReadOnlyStorage},
+                            {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+                            {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                        },
+                        /* allowInternalBinding */ true));
+
+                Ref<PipelineLayoutBase> pipelineLayout;
+                DAWN_TRY_ASSIGN(pipelineLayout,
+                                utils::MakeBasicPipelineLayout(device, bindGroupLayout));
+
+                ComputePipelineDescriptor computePipelineDescriptor = {};
+                computePipelineDescriptor.layout = pipelineLayout.Get();
+                computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
+                computePipelineDescriptor.compute.entryPoint = "main";
+
+                DAWN_TRY_ASSIGN(store->renderValidationPipeline,
+                                device->CreateComputePipeline(&computePipelineDescriptor));
+            }
+
+            return store->renderValidationPipeline.Get();
+        }
+
+        size_t GetBatchDataSize(uint32_t numDraws) {
+            return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
+        }
+
+    }  // namespace
+
+    uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
+        const uint64_t batchDrawCallLimitByDispatchSize =
+            static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
+        const uint64_t batchDrawCallLimitByStorageBindingSize =
+            (limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
+        return static_cast<uint32_t>(
+            std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
+                      uint64_t(std::numeric_limits<uint32_t>::max())}));
+    }
+
+    MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+                                                    CommandEncoder* commandEncoder,
+                                                    RenderPassResourceUsageTracker* usageTracker,
+                                                    IndirectDrawMetadata* indirectDrawMetadata) {
+        struct Batch {
+            const IndirectDrawMetadata::IndexedIndirectValidationBatch* metadata;
+            uint64_t numIndexBufferElements;
+            uint64_t dataBufferOffset;
+            uint64_t dataSize;
+            uint64_t clientIndirectOffset;
+            uint64_t clientIndirectSize;
+            uint64_t validatedParamsOffset;
+            uint64_t validatedParamsSize;
+            BatchInfo* batchInfo;
+        };
+
+        struct Pass {
+            BufferBase* clientIndirectBuffer;
+            uint64_t validatedParamsSize = 0;
+            uint64_t batchDataSize = 0;
+            std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
+            std::vector<Batch> batches;
+        };
+
+        // First stage is grouping all batches into passes. We try to pack as many batches into a
+        // single pass as possible. Batches can be grouped together as long as they're validating
+        // data from the same indirect buffer, but they may still be split into multiple passes if
+        // the number of draw calls in a pass would exceed some (very high) upper bound.
+        size_t validatedParamsSize = 0;
+        std::vector<Pass> passes;
+        IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
+            *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
+        if (bufferInfoMap.empty()) {
+            return {};
+        }
+
+        const uint32_t maxStorageBufferBindingSize =
+            device->GetLimits().v1.maxStorageBufferBindingSize;
+        const uint32_t minStorageBufferOffsetAlignment =
+            device->GetLimits().v1.minStorageBufferOffsetAlignment;
+
+        for (auto& [config, validationInfo] : bufferInfoMap) {
+            BufferBase* clientIndirectBuffer = config.first;
+            for (const IndirectDrawMetadata::IndexedIndirectValidationBatch& batch :
+                 validationInfo.GetBatches()) {
+                const uint64_t minOffsetFromAlignedBoundary =
+                    batch.minOffset % minStorageBufferOffsetAlignment;
+                const uint64_t minOffsetAlignedDown =
+                    batch.minOffset - minOffsetFromAlignedBoundary;
+
+                Batch newBatch;
+                newBatch.metadata = &batch;
+                newBatch.numIndexBufferElements = config.second;
+                newBatch.dataSize = GetBatchDataSize(batch.draws.size());
+                newBatch.clientIndirectOffset = minOffsetAlignedDown;
+                newBatch.clientIndirectSize =
+                    batch.maxOffset + kDrawIndexedIndirectSize - minOffsetAlignedDown;
+
+                newBatch.validatedParamsSize = batch.draws.size() * kDrawIndexedIndirectSize;
+                newBatch.validatedParamsOffset =
+                    Align(validatedParamsSize, minStorageBufferOffsetAlignment);
+                validatedParamsSize = newBatch.validatedParamsOffset + newBatch.validatedParamsSize;
+                if (validatedParamsSize > maxStorageBufferBindingSize) {
+                    return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
+                }
+
+                Pass* currentPass = passes.empty() ? nullptr : &passes.back();
+                if (currentPass && currentPass->clientIndirectBuffer == clientIndirectBuffer) {
+                    uint64_t nextBatchDataOffset =
+                        Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
+                    uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
+                    if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
+                        // We can fit this batch in the current pass.
+                        newBatch.dataBufferOffset = nextBatchDataOffset;
+                        currentPass->batchDataSize = newPassBatchDataSize;
+                        currentPass->batches.push_back(newBatch);
+                        continue;
+                    }
+                }
+
+                // We need to start a new pass for this batch.
+                newBatch.dataBufferOffset = 0;
+
+                Pass newPass;
+                newPass.clientIndirectBuffer = clientIndirectBuffer;
+                newPass.batchDataSize = newBatch.dataSize;
+                newPass.batches.push_back(newBatch);
+                passes.push_back(std::move(newPass));
+            }
+        }
+
+        auto* const store = device->GetInternalPipelineStore();
+        ScratchBuffer& validatedParamsBuffer = store->scratchIndirectStorage;
+        ScratchBuffer& batchDataBuffer = store->scratchStorage;
+
+        uint64_t requiredBatchDataBufferSize = 0;
+        for (const Pass& pass : passes) {
+            requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
+        }
+        DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
+        usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
+
+        DAWN_TRY(validatedParamsBuffer.EnsureCapacity(validatedParamsSize));
+        usageTracker->BufferUsedAs(validatedParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
+
+        // Now we allocate and populate host-side batch data to be copied to the GPU.
+        for (Pass& pass : passes) {
+            // We use std::malloc here because it guarantees maximal scalar alignment.
+            pass.batchData = {std::malloc(pass.batchDataSize), std::free};
+            memset(pass.batchData.get(), 0, pass.batchDataSize);
+            uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
+            for (Batch& batch : pass.batches) {
+                batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
+                batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
+                batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
+
+                uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
+                uint64_t validatedParamsOffset = batch.validatedParamsOffset;
+                for (auto& draw : batch.metadata->draws) {
+                    // The shader uses this to index an array of u32, hence the division by 4 bytes.
+                    *indirectOffsets++ = static_cast<uint32_t>(
+                        (draw.clientBufferOffset - batch.clientIndirectOffset) / 4);
+
+                    draw.cmd->indirectBuffer = validatedParamsBuffer.GetBuffer();
+                    draw.cmd->indirectOffset = validatedParamsOffset;
+
+                    validatedParamsOffset += kDrawIndexedIndirectSize;
+                }
+            }
+        }
+
+        ComputePipelineBase* pipeline;
+        DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
+
+        Ref<BindGroupLayoutBase> layout;
+        DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+        BindGroupEntry bindings[3];
+        BindGroupEntry& bufferDataBinding = bindings[0];
+        bufferDataBinding.binding = 0;
+        bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
+
+        BindGroupEntry& clientIndirectBinding = bindings[1];
+        clientIndirectBinding.binding = 1;
+
+        BindGroupEntry& validatedParamsBinding = bindings[2];
+        validatedParamsBinding.binding = 2;
+        validatedParamsBinding.buffer = validatedParamsBuffer.GetBuffer();
+
+        BindGroupDescriptor bindGroupDescriptor = {};
+        bindGroupDescriptor.layout = layout.Get();
+        bindGroupDescriptor.entryCount = 3;
+        bindGroupDescriptor.entries = bindings;
+
+        // Finally, we can now encode our validation passes. Each pass first does a single
+        // WriteBuffer to get batch data over to the GPU, followed by a single compute pass. The
+        // compute pass encodes a separate SetBindGroup and Dispatch command for each batch.
+        for (const Pass& pass : passes) {
+            commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
+                                           static_cast<const uint8_t*>(pass.batchData.get()),
+                                           pass.batchDataSize);
+
+            Ref<ComputePassEncoder> passEncoder = commandEncoder->BeginComputePass();
+            passEncoder->APISetPipeline(pipeline);
+
+            clientIndirectBinding.buffer = pass.clientIndirectBuffer;
+
+            for (const Batch& batch : pass.batches) {
+                bufferDataBinding.offset = batch.dataBufferOffset;
+                bufferDataBinding.size = batch.dataSize;
+                clientIndirectBinding.offset = batch.clientIndirectOffset;
+                clientIndirectBinding.size = batch.clientIndirectSize;
+                validatedParamsBinding.offset = batch.validatedParamsOffset;
+                validatedParamsBinding.size = batch.validatedParamsSize;
+
+                Ref<BindGroupBase> bindGroup;
+                DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
+
+                const uint32_t numDrawsRoundedUp =
+                    (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
+                passEncoder->APISetBindGroup(0, bindGroup.Get());
+                passEncoder->APIDispatch(numDrawsRoundedUp);
+            }
+
+            passEncoder->APIEnd();
+        }
+
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/IndirectDrawValidationEncoder.h b/src/dawn/native/IndirectDrawValidationEncoder.h
new file mode 100644
index 0000000..6714137
--- /dev/null
+++ b/src/dawn/native/IndirectDrawValidationEncoder.h
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
+#define DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+
+namespace dawn::native {
+
+    class CommandEncoder;
+    struct CombinedLimits;
+    class DeviceBase;
+    class RenderPassResourceUsageTracker;
+
+    // The maximum number of draws call we can fit into a single validation batch. This is
+    // essentially limited by the number of indirect parameter blocks that can fit into the maximum
+    // allowed storage binding size (with the base limits, it is about 6.7M).
+    uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
+
+    MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+                                                    CommandEncoder* commandEncoder,
+                                                    RenderPassResourceUsageTracker* usageTracker,
+                                                    IndirectDrawMetadata* indirectDrawMetadata);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
diff --git a/src/dawn/native/Instance.cpp b/src/dawn/native/Instance.cpp
new file mode 100644
index 0000000..48bf740
--- /dev/null
+++ b/src/dawn/native/Instance.cpp
@@ -0,0 +1,435 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Instance.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/platform/DawnPlatform.h"
+
+// For SwiftShader fallback
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+#    include "dawn/native/VulkanBackend.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+#if defined(DAWN_USE_X11)
+#    include "dawn/native/XlibXcbFunctions.h"
+#endif  // defined(DAWN_USE_X11)
+
+#include <optional>
+
+namespace dawn::native {
+
+    // Forward definitions of each backend's "Connect" function that creates new BackendConnection.
+    // Conditionally compiled declarations are used to avoid using static constructors instead.
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+    namespace d3d12 {
+        BackendConnection* Connect(InstanceBase* instance);
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+    namespace metal {
+        BackendConnection* Connect(InstanceBase* instance);
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+    namespace null {
+        BackendConnection* Connect(InstanceBase* instance);
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_NULL)
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+    namespace opengl {
+        BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType);
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGL)
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+    namespace vulkan {
+        BackendConnection* Connect(InstanceBase* instance);
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+    namespace {
+
+        BackendsBitset GetEnabledBackends() {
+            BackendsBitset enabledBackends;
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+            enabledBackends.set(wgpu::BackendType::Null);
+#endif  // defined(DAWN_ENABLE_BACKEND_NULL)
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+            enabledBackends.set(wgpu::BackendType::D3D12);
+#endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+            enabledBackends.set(wgpu::BackendType::Metal);
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+            enabledBackends.set(wgpu::BackendType::Vulkan);
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+            enabledBackends.set(wgpu::BackendType::OpenGL);
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+            enabledBackends.set(wgpu::BackendType::OpenGLES);
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+            return enabledBackends;
+        }
+
+    }  // anonymous namespace
+
+    // InstanceBase
+
+    // static
+    InstanceBase* InstanceBase::Create(const InstanceDescriptor* descriptor) {
+        Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
+        static constexpr InstanceDescriptor kDefaultDesc = {};
+        if (descriptor == nullptr) {
+            descriptor = &kDefaultDesc;
+        }
+        if (instance->ConsumedError(instance->Initialize(descriptor))) {
+            return nullptr;
+        }
+        return instance.Detach();
+    }
+
+    // TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
+    MaybeError InstanceBase::Initialize(const InstanceDescriptor* descriptor) {
+        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain, wgpu::SType::DawnInstanceDescriptor));
+        const DawnInstanceDescriptor* dawnDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &dawnDesc);
+        if (dawnDesc != nullptr) {
+            for (uint32_t i = 0; i < dawnDesc->additionalRuntimeSearchPathsCount; ++i) {
+                mRuntimeSearchPaths.push_back(dawnDesc->additionalRuntimeSearchPaths[i]);
+            }
+        }
+        // Default paths to search are next to the shared library, next to the executable, and
+        // no path (just libvulkan.so).
+        if (auto p = GetModuleDirectory()) {
+            mRuntimeSearchPaths.push_back(std::move(*p));
+        }
+        if (auto p = GetExecutableDirectory()) {
+            mRuntimeSearchPaths.push_back(std::move(*p));
+        }
+        mRuntimeSearchPaths.push_back("");
+        return {};
+    }
+
+    void InstanceBase::APIRequestAdapter(const RequestAdapterOptions* options,
+                                         WGPURequestAdapterCallback callback,
+                                         void* userdata) {
+        static constexpr RequestAdapterOptions kDefaultOptions = {};
+        if (options == nullptr) {
+            options = &kDefaultOptions;
+        }
+        auto result = RequestAdapterInternal(options);
+        if (result.IsError()) {
+            auto err = result.AcquireError();
+            std::string msg = err->GetFormattedMessage();
+            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+            callback(WGPURequestAdapterStatus_Error, nullptr, msg.c_str(), userdata);
+        } else {
+            Ref<AdapterBase> adapter = result.AcquireSuccess();
+            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+            callback(WGPURequestAdapterStatus_Success, ToAPI(adapter.Detach()), nullptr, userdata);
+        }
+    }
+
+    ResultOrError<Ref<AdapterBase>> InstanceBase::RequestAdapterInternal(
+        const RequestAdapterOptions* options) {
+        ASSERT(options != nullptr);
+        if (options->forceFallbackAdapter) {
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+            if (GetEnabledBackends()[wgpu::BackendType::Vulkan]) {
+                dawn_native::vulkan::AdapterDiscoveryOptions vulkanOptions;
+                vulkanOptions.forceSwiftShader = true;
+                DAWN_TRY(DiscoverAdaptersInternal(&vulkanOptions));
+            }
+#else
+            return Ref<AdapterBase>(nullptr);
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+        } else {
+            DiscoverDefaultAdapters();
+        }
+
+        wgpu::AdapterType preferredType;
+        switch (options->powerPreference) {
+            case wgpu::PowerPreference::LowPower:
+                preferredType = wgpu::AdapterType::IntegratedGPU;
+                break;
+            case wgpu::PowerPreference::Undefined:
+            case wgpu::PowerPreference::HighPerformance:
+                preferredType = wgpu::AdapterType::DiscreteGPU;
+                break;
+        }
+
+        std::optional<size_t> discreteGPUAdapterIndex;
+        std::optional<size_t> integratedGPUAdapterIndex;
+        std::optional<size_t> cpuAdapterIndex;
+        std::optional<size_t> unknownAdapterIndex;
+
+        for (size_t i = 0; i < mAdapters.size(); ++i) {
+            AdapterProperties properties;
+            mAdapters[i]->APIGetProperties(&properties);
+
+            if (options->forceFallbackAdapter) {
+                if (!gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID)) {
+                    continue;
+                }
+                return mAdapters[i];
+            }
+            if (properties.adapterType == preferredType) {
+                return mAdapters[i];
+            }
+            switch (properties.adapterType) {
+                case wgpu::AdapterType::DiscreteGPU:
+                    discreteGPUAdapterIndex = i;
+                    break;
+                case wgpu::AdapterType::IntegratedGPU:
+                    integratedGPUAdapterIndex = i;
+                    break;
+                case wgpu::AdapterType::CPU:
+                    cpuAdapterIndex = i;
+                    break;
+                case wgpu::AdapterType::Unknown:
+                    unknownAdapterIndex = i;
+                    break;
+            }
+        }
+
+        // For now, we always prefer the discrete GPU
+        if (discreteGPUAdapterIndex) {
+            return mAdapters[*discreteGPUAdapterIndex];
+        }
+        if (integratedGPUAdapterIndex) {
+            return mAdapters[*integratedGPUAdapterIndex];
+        }
+        if (cpuAdapterIndex) {
+            return mAdapters[*cpuAdapterIndex];
+        }
+        if (unknownAdapterIndex) {
+            return mAdapters[*unknownAdapterIndex];
+        }
+
+        return Ref<AdapterBase>(nullptr);
+    }
+
+    void InstanceBase::DiscoverDefaultAdapters() {
+        for (wgpu::BackendType b : IterateBitSet(GetEnabledBackends())) {
+            EnsureBackendConnection(b);
+        }
+
+        if (mDiscoveredDefaultAdapters) {
+            return;
+        }
+
+        // Query and merge all default adapters for all backends
+        for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+            std::vector<Ref<AdapterBase>> backendAdapters = backend->DiscoverDefaultAdapters();
+
+            for (Ref<AdapterBase>& adapter : backendAdapters) {
+                ASSERT(adapter->GetBackendType() == backend->GetType());
+                ASSERT(adapter->GetInstance() == this);
+                mAdapters.push_back(std::move(adapter));
+            }
+        }
+
+        mDiscoveredDefaultAdapters = true;
+    }
+
+    // This is just a wrapper around the real logic that uses Error.h error handling.
+    bool InstanceBase::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+        return !ConsumedError(DiscoverAdaptersInternal(options));
+    }
+
+    const ToggleInfo* InstanceBase::GetToggleInfo(const char* toggleName) {
+        return mTogglesInfo.GetToggleInfo(toggleName);
+    }
+
+    Toggle InstanceBase::ToggleNameToEnum(const char* toggleName) {
+        return mTogglesInfo.ToggleNameToEnum(toggleName);
+    }
+
+    const FeatureInfo* InstanceBase::GetFeatureInfo(wgpu::FeatureName feature) {
+        return mFeaturesInfo.GetFeatureInfo(feature);
+    }
+
+    const std::vector<Ref<AdapterBase>>& InstanceBase::GetAdapters() const {
+        return mAdapters;
+    }
+
+    void InstanceBase::EnsureBackendConnection(wgpu::BackendType backendType) {
+        if (mBackendsConnected[backendType]) {
+            return;
+        }
+
+        auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
+            if (connection != nullptr) {
+                ASSERT(connection->GetType() == expectedType);
+                ASSERT(connection->GetInstance() == this);
+                mBackends.push_back(std::unique_ptr<BackendConnection>(connection));
+            }
+        };
+
+        switch (backendType) {
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+            case wgpu::BackendType::Null:
+                Register(null::Connect(this), wgpu::BackendType::Null);
+                break;
+#endif  // defined(DAWN_ENABLE_BACKEND_NULL)
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+            case wgpu::BackendType::D3D12:
+                Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
+                break;
+#endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+            case wgpu::BackendType::Metal:
+                Register(metal::Connect(this), wgpu::BackendType::Metal);
+                break;
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+            case wgpu::BackendType::Vulkan:
+                Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
+                break;
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+            case wgpu::BackendType::OpenGL:
+                Register(opengl::Connect(this, wgpu::BackendType::OpenGL),
+                         wgpu::BackendType::OpenGL);
+                break;
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+            case wgpu::BackendType::OpenGLES:
+                Register(opengl::Connect(this, wgpu::BackendType::OpenGLES),
+                         wgpu::BackendType::OpenGLES);
+                break;
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+            default:
+                UNREACHABLE();
+        }
+
+        mBackendsConnected.set(backendType);
+    }
+
+    MaybeError InstanceBase::DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options) {
+        wgpu::BackendType backendType = static_cast<wgpu::BackendType>(options->backendType);
+        DAWN_TRY(ValidateBackendType(backendType));
+
+        if (!GetEnabledBackends()[backendType]) {
+            return DAWN_FORMAT_VALIDATION_ERROR("%s not supported.", backendType);
+        }
+
+        EnsureBackendConnection(backendType);
+
+        bool foundBackend = false;
+        for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+            if (backend->GetType() != backendType) {
+                continue;
+            }
+            foundBackend = true;
+
+            std::vector<Ref<AdapterBase>> newAdapters;
+            DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
+
+            for (Ref<AdapterBase>& adapter : newAdapters) {
+                ASSERT(adapter->GetBackendType() == backend->GetType());
+                ASSERT(adapter->GetInstance() == this);
+                mAdapters.push_back(std::move(adapter));
+            }
+        }
+
+        DAWN_INVALID_IF(!foundBackend, "%s not available.", backendType);
+        return {};
+    }
+
+    bool InstanceBase::ConsumedError(MaybeError maybeError) {
+        if (maybeError.IsError()) {
+            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+
+            ASSERT(error != nullptr);
+            dawn::ErrorLog() << error->GetFormattedMessage();
+            return true;
+        }
+        return false;
+    }
+
+    bool InstanceBase::IsBackendValidationEnabled() const {
+        return mBackendValidationLevel != BackendValidationLevel::Disabled;
+    }
+
+    void InstanceBase::SetBackendValidationLevel(BackendValidationLevel level) {
+        mBackendValidationLevel = level;
+    }
+
+    BackendValidationLevel InstanceBase::GetBackendValidationLevel() const {
+        return mBackendValidationLevel;
+    }
+
+    void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+        mBeginCaptureOnStartup = beginCaptureOnStartup;
+    }
+
+    bool InstanceBase::IsBeginCaptureOnStartupEnabled() const {
+        return mBeginCaptureOnStartup;
+    }
+
+    void InstanceBase::SetPlatform(dawn::platform::Platform* platform) {
+        mPlatform = platform;
+    }
+
+    dawn::platform::Platform* InstanceBase::GetPlatform() {
+        if (mPlatform != nullptr) {
+            return mPlatform;
+        }
+
+        if (mDefaultPlatform == nullptr) {
+            mDefaultPlatform = std::make_unique<dawn::platform::Platform>();
+        }
+        return mDefaultPlatform.get();
+    }
+
+    const std::vector<std::string>& InstanceBase::GetRuntimeSearchPaths() const {
+        return mRuntimeSearchPaths;
+    }
+
+    const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {
+#if defined(DAWN_USE_X11)
+        if (mXlibXcbFunctions == nullptr) {
+            mXlibXcbFunctions = std::make_unique<XlibXcbFunctions>();
+        }
+        return mXlibXcbFunctions.get();
+#else
+        UNREACHABLE();
+#endif  // defined(DAWN_USE_X11)
+    }
+
+    Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
+        if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
+            return nullptr;
+        }
+
+        return new Surface(this, descriptor);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Instance.h b/src/dawn/native/Instance.h
new file mode 100644
index 0000000..5898689
--- /dev/null
+++ b/src/dawn/native/Instance.h
@@ -0,0 +1,129 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INSTANCE_H_
+#define DAWNNATIVE_INSTANCE_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Toggles.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+namespace dawn::platform {
+    class Platform;
+}  // namespace dawn::platform
+
+namespace dawn::native {
+
+    class Surface;
+    class XlibXcbFunctions;
+
+    using BackendsBitset = ityp::bitset<wgpu::BackendType, kEnumCount<wgpu::BackendType>>;
+
+    // This is called InstanceBase for consistency across the frontend, even if the backends don't
+    // specialize this class.
+    class InstanceBase final : public RefCounted {
+      public:
+        static InstanceBase* Create(const InstanceDescriptor* descriptor = nullptr);
+
+        void APIRequestAdapter(const RequestAdapterOptions* options,
+                               WGPURequestAdapterCallback callback,
+                               void* userdata);
+
+        void DiscoverDefaultAdapters();
+        bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
+
+        const std::vector<Ref<AdapterBase>>& GetAdapters() const;
+
+        // Used to handle error that happen up to device creation.
+        bool ConsumedError(MaybeError maybeError);
+
+        // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+        // of a toggle supported in Dawn.
+        const ToggleInfo* GetToggleInfo(const char* toggleName);
+        Toggle ToggleNameToEnum(const char* toggleName);
+
+        // Used to query the details of an feature. Return nullptr if featureName is not a valid
+        // name of an feature supported in Dawn.
+        const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature);
+
+        bool IsBackendValidationEnabled() const;
+        void SetBackendValidationLevel(BackendValidationLevel level);
+        BackendValidationLevel GetBackendValidationLevel() const;
+
+        void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+        bool IsBeginCaptureOnStartupEnabled() const;
+
+        void SetPlatform(dawn::platform::Platform* platform);
+        dawn::platform::Platform* GetPlatform();
+
+        const std::vector<std::string>& GetRuntimeSearchPaths() const;
+
+        // Get backend-independent libraries that need to be loaded dynamically.
+        const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
+
+        // Dawn API
+        Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
+
+      private:
+        InstanceBase() = default;
+        ~InstanceBase() = default;
+
+        InstanceBase(const InstanceBase& other) = delete;
+        InstanceBase& operator=(const InstanceBase& other) = delete;
+
+        MaybeError Initialize(const InstanceDescriptor* descriptor);
+
+        // Lazily creates connections to all backends that have been compiled.
+        void EnsureBackendConnection(wgpu::BackendType backendType);
+
+        MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
+
+        ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(
+            const RequestAdapterOptions* options);
+
+        std::vector<std::string> mRuntimeSearchPaths;
+
+        BackendsBitset mBackendsConnected;
+
+        bool mDiscoveredDefaultAdapters = false;
+
+        bool mBeginCaptureOnStartup = false;
+        BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
+
+        dawn::platform::Platform* mPlatform = nullptr;
+        std::unique_ptr<dawn::platform::Platform> mDefaultPlatform;
+
+        std::vector<std::unique_ptr<BackendConnection>> mBackends;
+        std::vector<Ref<AdapterBase>> mAdapters;
+
+        FeaturesInfo mFeaturesInfo;
+        TogglesInfo mTogglesInfo;
+
+#if defined(DAWN_USE_X11)
+        std::unique_ptr<XlibXcbFunctions> mXlibXcbFunctions;
+#endif  // defined(DAWN_USE_X11)
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_INSTANCE_H_
diff --git a/src/dawn/native/IntegerTypes.h b/src/dawn/native/IntegerTypes.h
new file mode 100644
index 0000000..fd4c2f1
--- /dev/null
+++ b/src/dawn/native/IntegerTypes.h
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INTEGERTYPES_H_
+#define DAWNNATIVE_INTEGERTYPES_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/TypedInteger.h"
+
+#include <cstdint>
+
+namespace dawn::native {
+    // Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
+    using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
+    constexpr BindingNumber kMaxBindingNumberTyped = BindingNumber(kMaxBindingNumber);
+
+    // Binding numbers get mapped to a packed range of indices
+    using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
+
+    using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
+
+    constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
+
+    using ColorAttachmentIndex = TypedInteger<struct ColorAttachmentIndexT, uint8_t>;
+
+    constexpr ColorAttachmentIndex kMaxColorAttachmentsTyped =
+        ColorAttachmentIndex(kMaxColorAttachments);
+
+    using VertexBufferSlot = TypedInteger<struct VertexBufferSlotT, uint8_t>;
+    using VertexAttributeLocation = TypedInteger<struct VertexAttributeLocationT, uint8_t>;
+
+    constexpr VertexBufferSlot kMaxVertexBuffersTyped = VertexBufferSlot(kMaxVertexBuffers);
+    constexpr VertexAttributeLocation kMaxVertexAttributesTyped =
+        VertexAttributeLocation(kMaxVertexAttributes);
+
+    // Serials are 64bit integers that are incremented by one each time to produce unique values.
+    // Some serials (like queue serials) are compared numerically to know which one is before
+    // another, while some serials are only checked for equality. We call serials only checked
+    // for equality IDs.
+
+    // Buffer mapping requests are stored outside of the buffer while they are being processed and
+    // cannot be invalidated. Instead they are associated with an ID, and when a map request is
+    // finished, the mapping callback is fired only if its ID matches the ID if the last request
+    // that was sent.
+    using MapRequestID = TypedInteger<struct MapRequestIDT, uint64_t>;
+
+    // The type for the WebGPU API fence serial values.
+    using FenceAPISerial = TypedInteger<struct FenceAPISerialT, uint64_t>;
+
+    // A serial used to watch the progression of GPU execution on a queue, each time operations
+    // that need to be followed individually are scheduled for execution on a queue, the serial
+    // is incremented by one. This way to know if something is done executing, we just need to
+    // compare its serial with the currently completed serial.
+    using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
+    constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
+
+    // An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
+    // created with a default layout will produce BindGroupLayouts with a non-zero compatibility
+    // token, which prevents them (and any BindGroups created with them) from being used with any
+    // other pipelines.
+    using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_INTEGERTYPES_H_
diff --git a/src/dawn/native/InternalPipelineStore.cpp b/src/dawn/native/InternalPipelineStore.cpp
new file mode 100644
index 0000000..a2532aa
--- /dev/null
+++ b/src/dawn/native/InternalPipelineStore.cpp
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/InternalPipelineStore.h"
+
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ShaderModule.h"
+
+#include <unordered_map>
+
+namespace dawn::native {
+
+    class RenderPipelineBase;
+    class ShaderModuleBase;
+
+    InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
+        : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
+          scratchIndirectStorage(device,
+                                 wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect |
+                                     wgpu::BufferUsage::Storage) {
+    }
+
+    InternalPipelineStore::~InternalPipelineStore() = default;
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/InternalPipelineStore.h b/src/dawn/native/InternalPipelineStore.h
new file mode 100644
index 0000000..64e7728
--- /dev/null
+++ b/src/dawn/native/InternalPipelineStore.h
@@ -0,0 +1,60 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INTERNALPIPELINESTORE_H_
+#define DAWNNATIVE_INTERNALPIPELINESTORE_H_
+
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ScratchBuffer.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <unordered_map>
+
+namespace dawn::native {
+
+    class DeviceBase;
+    class RenderPipelineBase;
+    class ShaderModuleBase;
+
+    // Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
+    // long-lived objects scoped to a device and used to support arbitrary pipeline operations.
+    struct InternalPipelineStore {
+        explicit InternalPipelineStore(DeviceBase* device);
+        ~InternalPipelineStore();
+
+        std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
+            copyTextureForBrowserPipelines;
+
+        Ref<ShaderModuleBase> copyTextureForBrowser;
+
+        Ref<ComputePipelineBase> timestampComputePipeline;
+        Ref<ShaderModuleBase> timestampCS;
+
+        Ref<ShaderModuleBase> dummyFragmentShader;
+
+        // A scratch buffer suitable for use as a copy destination and storage binding.
+        ScratchBuffer scratchStorage;
+
+        // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
+        // buffer for indirect dispatch or draw calls.
+        ScratchBuffer scratchIndirectStorage;
+
+        Ref<ComputePipelineBase> renderValidationPipeline;
+        Ref<ShaderModuleBase> renderValidationShader;
+        Ref<ComputePipelineBase> dispatchIndirectValidationPipeline;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_INTERNALPIPELINESTORE_H_
diff --git a/src/dawn/native/Limits.cpp b/src/dawn/native/Limits.cpp
new file mode 100644
index 0000000..a7b8ec9
--- /dev/null
+++ b/src/dawn/native/Limits.cpp
@@ -0,0 +1,213 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Limits.h"
+
+#include "dawn/common/Assert.h"
+
+#include <array>
+
+// clang-format off
+// TODO(crbug.com/dawn/685):
+// For now, only expose these tiers until metrics can determine better ones.
+#define LIMITS_WORKGROUP_STORAGE_SIZE(X)                                  \
+    X(Higher, maxComputeWorkgroupStorageSize, 16352, 32768, 49152, 65536)
+
+#define LIMITS_STORAGE_BUFFER_BINDING_SIZE(X)                                             \
+    X(Higher, maxStorageBufferBindingSize, 134217728, 1073741824, 2147483647, 4294967295)
+
+// TODO(crbug.com/dawn/685):
+// These limits don't have tiers yet. Define two tiers with the same values since the macros
+// in this file expect more than one tier.
+#define LIMITS_OTHER(X)                                                \
+    X(Higher,                     maxTextureDimension1D,  8192,  8192) \
+    X(Higher,                     maxTextureDimension2D,  8192,  8192) \
+    X(Higher,                     maxTextureDimension3D,  2048,  2048) \
+    X(Higher,                     maxTextureArrayLayers,   256,   256) \
+    X(Higher,                             maxBindGroups,     4,     4) \
+    X(Higher, maxDynamicUniformBuffersPerPipelineLayout,     8,     8) \
+    X(Higher, maxDynamicStorageBuffersPerPipelineLayout,     4,     4) \
+    X(Higher,          maxSampledTexturesPerShaderStage,    16,    16) \
+    X(Higher,                 maxSamplersPerShaderStage,    16,    16) \
+    X(Higher,           maxStorageBuffersPerShaderStage,     8,     8) \
+    X(Higher,          maxStorageTexturesPerShaderStage,     4,     4) \
+    X(Higher,           maxUniformBuffersPerShaderStage,    12,    12) \
+    X(Higher,               maxUniformBufferBindingSize, 65536, 65536) \
+    X( Lower,           minUniformBufferOffsetAlignment,   256,   256) \
+    X( Lower,           minStorageBufferOffsetAlignment,   256,   256) \
+    X(Higher,                          maxVertexBuffers,     8,     8) \
+    X(Higher,                       maxVertexAttributes,    16,    16) \
+    X(Higher,                maxVertexBufferArrayStride,  2048,  2048) \
+    X(Higher,             maxInterStageShaderComponents,    60,    60) \
+    X(Higher,         maxComputeInvocationsPerWorkgroup,   256,   256) \
+    X(Higher,                  maxComputeWorkgroupSizeX,   256,   256) \
+    X(Higher,                  maxComputeWorkgroupSizeY,   256,   256) \
+    X(Higher,                  maxComputeWorkgroupSizeZ,    64,    64) \
+    X(Higher,          maxComputeWorkgroupsPerDimension, 65535, 65535)
+// clang-format on
+
+#define LIMITS_EACH_GROUP(X)              \
+    X(LIMITS_WORKGROUP_STORAGE_SIZE)      \
+    X(LIMITS_STORAGE_BUFFER_BINDING_SIZE) \
+    X(LIMITS_OTHER)
+
+#define LIMITS(X)                         \
+    LIMITS_WORKGROUP_STORAGE_SIZE(X)      \
+    LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
+    LIMITS_OTHER(X)
+
+namespace dawn::native {
+    namespace {
+        template <uint32_t A, uint32_t B>
+        constexpr void StaticAssertSame() {
+            static_assert(A == B, "Mismatching tier count in limit group.");
+        }
+
+        template <uint32_t I, uint32_t... Is>
+        constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
+            int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
+            DAWN_UNUSED(unused);
+            return I;
+        }
+
+        enum class LimitBetterDirection {
+            Lower,
+            Higher,
+        };
+
+        template <LimitBetterDirection Better>
+        struct CheckLimit;
+
+        template <>
+        struct CheckLimit<LimitBetterDirection::Lower> {
+            template <typename T>
+            static bool IsBetter(T lhs, T rhs) {
+                return lhs < rhs;
+            }
+
+            template <typename T>
+            static MaybeError Validate(T supported, T required) {
+                DAWN_INVALID_IF(IsBetter(required, supported),
+                                "Required limit (%u) is lower than the supported limit (%u).",
+                                required, supported);
+                return {};
+            }
+        };
+
+        template <>
+        struct CheckLimit<LimitBetterDirection::Higher> {
+            template <typename T>
+            static bool IsBetter(T lhs, T rhs) {
+                return lhs > rhs;
+            }
+
+            template <typename T>
+            static MaybeError Validate(T supported, T required) {
+                DAWN_INVALID_IF(IsBetter(required, supported),
+                                "Required limit (%u) is greater than the supported limit (%u).",
+                                required, supported);
+                return {};
+            }
+        };
+
+        template <typename T>
+        bool IsLimitUndefined(T value) {
+            static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
+            return false;
+        }
+
+        template <>
+        bool IsLimitUndefined<uint32_t>(uint32_t value) {
+            return value == wgpu::kLimitU32Undefined;
+        }
+
+        template <>
+        bool IsLimitUndefined<uint64_t>(uint64_t value) {
+            return value == wgpu::kLimitU64Undefined;
+        }
+
+    }  // namespace
+
+    void GetDefaultLimits(Limits* limits) {
+        ASSERT(limits != nullptr);
+#define X(Better, limitName, base, ...) limits->limitName = base;
+        LIMITS(X)
+#undef X
+    }
+
+    Limits ReifyDefaultLimits(const Limits& limits) {
+        Limits out;
+#define X(Better, limitName, base, ...)                                           \
+    if (IsLimitUndefined(limits.limitName) ||                                     \
+        CheckLimit<LimitBetterDirection::Better>::IsBetter(                       \
+            static_cast<decltype(limits.limitName)>(base), limits.limitName)) {   \
+        /* If the limit is undefined or the default is better, use the default */ \
+        out.limitName = base;                                                     \
+    } else {                                                                      \
+        out.limitName = limits.limitName;                                         \
+    }
+        LIMITS(X)
+#undef X
+        return out;
+    }
+
+    MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
+#define X(Better, limitName, ...)                                                  \
+    if (!IsLimitUndefined(requiredLimits.limitName)) {                             \
+        DAWN_TRY_CONTEXT(CheckLimit<LimitBetterDirection::Better>::Validate(       \
+                             supportedLimits.limitName, requiredLimits.limitName), \
+                         "validating " #limitName);                                \
+    }
+        LIMITS(X)
+#undef X
+        return {};
+    }
+
+    Limits ApplyLimitTiers(Limits limits) {
+#define X_TIER_COUNT(Better, limitName, ...) , std::integer_sequence<uint64_t, __VA_ARGS__>{}.size()
+#define GET_TIER_COUNT(LIMIT_GROUP) \
+    ReduceSameValue(std::integer_sequence<uint32_t LIMIT_GROUP(X_TIER_COUNT)>{})
+
+#define X_EACH_GROUP(LIMIT_GROUP)                                    \
+    {                                                                \
+        constexpr uint32_t kTierCount = GET_TIER_COUNT(LIMIT_GROUP); \
+        for (uint32_t i = kTierCount; i != 0; --i) {                 \
+            LIMIT_GROUP(X_CHECK_BETTER_AND_CLAMP)                    \
+            /* Limits fit in tier and have been clamped. Break. */   \
+            break;                                                   \
+        }                                                            \
+    }
+
+#define X_CHECK_BETTER_AND_CLAMP(Better, limitName, ...)                                       \
+    {                                                                                          \
+        constexpr std::array<decltype(Limits::limitName), kTierCount> tiers{__VA_ARGS__};      \
+        decltype(Limits::limitName) tierValue = tiers[i - 1];                                  \
+        if (CheckLimit<LimitBetterDirection::Better>::IsBetter(tierValue, limits.limitName)) { \
+            /* The tier is better. Go to the next tier. */                                     \
+            continue;                                                                          \
+        } else if (tierValue != limits.limitName) {                                            \
+            /* Better than the tier. Degrade |limits| to the tier. */                          \
+            limits.limitName = tiers[i - 1];                                                   \
+        }                                                                                      \
+    }
+
+        LIMITS_EACH_GROUP(X_EACH_GROUP)
+#undef X_CHECK_BETTER
+#undef X_EACH_GROUP
+#undef GET_TIER_COUNT
+#undef X_TIER_COUNT
+        return limits;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Limits.h b/src/dawn/native/Limits.h
new file mode 100644
index 0000000..f41eaa8
--- /dev/null
+++ b/src/dawn/native/Limits.h
@@ -0,0 +1,43 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_LIMITS_H_
+#define DAWNNATIVE_LIMITS_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    struct CombinedLimits {
+        Limits v1;
+    };
+
+    // Populate |limits| with the default limits.
+    void GetDefaultLimits(Limits* limits);
+
+    // Returns a copy of |limits| where all undefined values are replaced
+    // with their defaults. Also clamps to the defaults if the provided limits
+    // are worse.
+    Limits ReifyDefaultLimits(const Limits& limits);
+
+    // Validate that |requiredLimits| are no better than |supportedLimits|.
+    MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
+
+    // Returns a copy of |limits| where limit tiers are applied.
+    Limits ApplyLimitTiers(Limits limits);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_LIMITS_H_
diff --git a/src/dawn/native/ObjectBase.cpp b/src/dawn/native/ObjectBase.cpp
new file mode 100644
index 0000000..3cafdb7
--- /dev/null
+++ b/src/dawn/native/ObjectBase.cpp
@@ -0,0 +1,90 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/Device.h"
+
+#include <mutex>
+
+namespace dawn::native {
+
+    static constexpr uint64_t kErrorPayload = 0;
+    static constexpr uint64_t kNotErrorPayload = 1;
+
+    ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {
+    }
+
+    ObjectBase::ObjectBase(DeviceBase* device, ErrorTag)
+        : RefCounted(kErrorPayload), mDevice(device) {
+    }
+
+    DeviceBase* ObjectBase::GetDevice() const {
+        return mDevice;
+    }
+
+    bool ObjectBase::IsError() const {
+        return GetRefCountPayload() == kErrorPayload;
+    }
+
+    ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
+        if (label) {
+            mLabel = label;
+        }
+    }
+
+    ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {
+    }
+
+    ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag)
+        : ObjectBase(device) {
+    }
+
+    ApiObjectBase::~ApiObjectBase() {
+        ASSERT(!IsAlive());
+    }
+
+    void ApiObjectBase::APISetLabel(const char* label) {
+        mLabel = label;
+        SetLabelImpl();
+    }
+
+    const std::string& ApiObjectBase::GetLabel() const {
+        return mLabel;
+    }
+
+    void ApiObjectBase::SetLabelImpl() {
+    }
+
+    bool ApiObjectBase::IsAlive() const {
+        return IsInList();
+    }
+
+    void ApiObjectBase::DeleteThis() {
+        Destroy();
+        RefCounted::DeleteThis();
+    }
+
+    void ApiObjectBase::TrackInDevice() {
+        ASSERT(GetDevice() != nullptr);
+        GetDevice()->TrackObject(this);
+    }
+
+    void ApiObjectBase::Destroy() {
+        const std::lock_guard<std::mutex> lock(*GetDevice()->GetObjectListMutex(GetType()));
+        if (RemoveFromList()) {
+            DestroyImpl();
+        }
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ObjectBase.h b/src/dawn/native/ObjectBase.h
new file mode 100644
index 0000000..8f110a1
--- /dev/null
+++ b/src/dawn/native/ObjectBase.h
@@ -0,0 +1,97 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OBJECTBASE_H_
+#define DAWNNATIVE_OBJECTBASE_H_
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Forward.h"
+
+#include <string>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    class ObjectBase : public RefCounted {
+      public:
+        struct ErrorTag {};
+        static constexpr ErrorTag kError = {};
+
+        explicit ObjectBase(DeviceBase* device);
+        ObjectBase(DeviceBase* device, ErrorTag tag);
+
+        DeviceBase* GetDevice() const;
+        bool IsError() const;
+
+      private:
+        // Pointer to owning device.
+        DeviceBase* mDevice;
+    };
+
+    class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
+      public:
+        struct LabelNotImplementedTag {};
+        static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
+        struct UntrackedByDeviceTag {};
+        static constexpr UntrackedByDeviceTag kUntrackedByDevice = {};
+
+        ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
+        ApiObjectBase(DeviceBase* device, const char* label);
+        ApiObjectBase(DeviceBase* device, ErrorTag tag);
+        ~ApiObjectBase() override;
+
+        virtual ObjectType GetType() const = 0;
+        const std::string& GetLabel() const;
+
+        // The ApiObjectBase is considered alive if it is tracked in a respective linked list owned
+        // by the owning device.
+        bool IsAlive() const;
+
+        // This needs to be public because it can be called from the device owning the object.
+        void Destroy();
+
+        // Dawn API
+        void APISetLabel(const char* label);
+
+      protected:
+        // Overriding of the RefCounted's DeleteThis function ensures that instances of objects
+        // always call their derived class implementation of Destroy prior to the derived
+        // class being destroyed. This guarantees that when ApiObjects' reference counts drop to 0,
+        // then the underlying backend's Destroy calls are executed. We cannot naively put the call
+        // to Destroy in the destructor of this class because it calls DestroyImpl
+        // which is a virtual function often implemented in the Derived class which would already
+        // have been destroyed by the time ApiObject's destructor is called by C++'s destruction
+        // order. Note that some classes like BindGroup may override the DeleteThis function again,
+        // and they should ensure that their overriding versions call this underlying version
+        // somewhere.
+        void DeleteThis() override;
+        void TrackInDevice();
+
+        // Sub-classes may override this function multiple times. Whenever overriding this function,
+        // however, users should be sure to call their parent's version in the new override to make
+        // sure that all destroy functionality is kept. This function is guaranteed to only be
+        // called once through the exposed Destroy function.
+        virtual void DestroyImpl() = 0;
+
+      private:
+        virtual void SetLabelImpl();
+
+        std::string mLabel;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_OBJECTBASE_H_
diff --git a/src/dawn/native/ObjectContentHasher.cpp b/src/dawn/native/ObjectContentHasher.cpp
new file mode 100644
index 0000000..58c892e
--- /dev/null
+++ b/src/dawn/native/ObjectContentHasher.cpp
@@ -0,0 +1,22 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ObjectContentHasher.h"
+
+namespace dawn::native {
+
+    size_t ObjectContentHasher::GetContentHash() const {
+        return mContentHash;
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/ObjectContentHasher.h b/src/dawn/native/ObjectContentHasher.h
new file mode 100644
index 0000000..c1ca32a
--- /dev/null
+++ b/src/dawn/native/ObjectContentHasher.h
@@ -0,0 +1,82 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
+#define DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
+
+#include "dawn/common/HashUtils.h"
+
+#include <string>
+#include <vector>
+
+namespace dawn::native {
+
+    // ObjectContentHasher records a hash that can be used as a key to lookup a cached object in a
+    // cache.
+    class ObjectContentHasher {
+      public:
+        // Record calls the appropriate record function based on the type.
+        template <typename T, typename... Args>
+        void Record(const T& value, const Args&... args) {
+            RecordImpl<T, Args...>::Call(this, value, args...);
+        }
+
+        size_t GetContentHash() const;
+
+      private:
+        template <typename T, typename... Args>
+        struct RecordImpl {
+            static constexpr void Call(ObjectContentHasher* recorder,
+                                       const T& value,
+                                       const Args&... args) {
+                HashCombine(&recorder->mContentHash, value, args...);
+            }
+        };
+
+        template <typename T>
+        struct RecordImpl<T*> {
+            static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
+                // Calling Record(objPtr) is not allowed. This check exists to only prevent such
+                // mistakes.
+                static_assert(obj == nullptr);
+            }
+        };
+
+        template <typename T>
+        struct RecordImpl<std::vector<T>> {
+            static constexpr void Call(ObjectContentHasher* recorder, const std::vector<T>& vec) {
+                recorder->RecordIterable<std::vector<T>>(vec);
+            }
+        };
+
+        template <typename IteratorT>
+        constexpr void RecordIterable(const IteratorT& iterable) {
+            for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+                Record(*it);
+            }
+        }
+
+        size_t mContentHash = 0;
+    };
+
+    template <>
+    struct ObjectContentHasher::RecordImpl<std::string> {
+        static constexpr void Call(ObjectContentHasher* recorder, const std::string& str) {
+            recorder->RecordIterable<std::string>(str);
+        }
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
diff --git a/src/dawn/native/PassResourceUsage.h b/src/dawn/native/PassResourceUsage.h
new file mode 100644
index 0000000..c6fe535
--- /dev/null
+++ b/src/dawn/native/PassResourceUsage.h
@@ -0,0 +1,100 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PASSRESOURCEUSAGE_H
+#define DAWNNATIVE_PASSRESOURCEUSAGE_H
+
+#include "dawn/native/SubresourceStorage.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <set>
+#include <vector>
+
+namespace dawn::native {
+
+    // This file declares various "ResourceUsage" structures. They are produced by the frontend
+    // while recording commands to be used for later validation and also some operations in the
+    // backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
+    // "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
+
+    class BufferBase;
+    class QuerySetBase;
+    class TextureBase;
+
+    // The texture usage inside passes must be tracked per-subresource.
+    using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
+
+    // Which resources are used by a synchronization scope and how they are used. The command
+    // buffer validation pre-computes this information so that backends with explicit barriers
+    // don't have to re-compute it.
+    struct SyncScopeResourceUsage {
+        std::vector<BufferBase*> buffers;
+        std::vector<wgpu::BufferUsage> bufferUsages;
+
+        std::vector<TextureBase*> textures;
+        std::vector<TextureSubresourceUsage> textureUsages;
+
+        std::vector<ExternalTextureBase*> externalTextures;
+    };
+
+    // Contains all the resource usage data for a compute pass.
+    //
+    // Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
+    // specification. ComputePassResourceUsage also stores nline the set of all buffers and
+    // textures used, because some unused BindGroups may not be used at all in synchronization
+    // scope but their resources still need to be validated on Queue::Submit.
+    struct ComputePassResourceUsage {
+        // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
+        // use the copy constructor (that's deleted) when doing operations on a
+        // vector<ComputePassResourceUsage>
+        ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
+        ComputePassResourceUsage() = default;
+
+        std::vector<SyncScopeResourceUsage> dispatchUsages;
+
+        // All the resources referenced by this compute pass for validation in Queue::Submit.
+        std::set<BufferBase*> referencedBuffers;
+        std::set<TextureBase*> referencedTextures;
+        std::set<ExternalTextureBase*> referencedExternalTextures;
+    };
+
+    // Contains all the resource usage data for a render pass.
+    //
+    // In the WebGPU specification render passes are synchronization scopes but we also need to
+    // track additional data. It is stored for render passes used by a CommandBuffer, but also in
+    // RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
+    struct RenderPassResourceUsage : public SyncScopeResourceUsage {
+        // Storage to track the occlusion queries used during the pass.
+        std::vector<QuerySetBase*> querySets;
+        std::vector<std::vector<bool>> queryAvailabilities;
+    };
+
+    using RenderPassUsages = std::vector<RenderPassResourceUsage>;
+    using ComputePassUsages = std::vector<ComputePassResourceUsage>;
+
+    // Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
+    // is used for validation and to produce barriers and lazy clears in the backends.
+    struct CommandBufferResourceUsage {
+        RenderPassUsages renderPasses;
+        ComputePassUsages computePasses;
+
+        // Resources used in commands that aren't in a pass.
+        std::set<BufferBase*> topLevelBuffers;
+        std::set<TextureBase*> topLevelTextures;
+        std::set<QuerySetBase*> usedQuerySets;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_PASSRESOURCEUSAGE_H
diff --git a/src/dawn/native/PassResourceUsageTracker.cpp b/src/dawn/native/PassResourceUsageTracker.cpp
new file mode 100644
index 0000000..b4814cf
--- /dev/null
+++ b/src/dawn/native/PassResourceUsageTracker.cpp
@@ -0,0 +1,243 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PassResourceUsageTracker.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Texture.h"
+
+#include <utility>
+
+namespace dawn::native {
+
+    void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
+        // std::map's operator[] will create the key and return 0 if the key didn't exist
+        // before.
+        mBufferUsages[buffer] |= usage;
+    }
+
+    void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
+        TextureBase* texture = view->GetTexture();
+        const SubresourceRange& range = view->GetSubresourceRange();
+
+        // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+        // wgpu::TextureUsage::None)
+        auto it = mTextureUsages.emplace(
+            std::piecewise_construct, std::forward_as_tuple(texture),
+            std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+                                  texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+        TextureSubresourceUsage& textureUsage = it.first->second;
+
+        textureUsage.Update(range,
+                            [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
+                                // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
+                                // branches.
+                                if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
+                                    (usage & wgpu::TextureUsage::RenderAttachment) != 0) {
+                                    // Using the same subresource as an attachment for two different
+                                    // render attachments is a write-write hazard. Add this internal
+                                    // usage so we will fail the check that a subresource with
+                                    // writable usage is the single usage.
+                                    *storedUsage |= kAgainAsRenderAttachment;
+                                }
+                                *storedUsage |= usage;
+                            });
+    }
+
+    void SyncScopeUsageTracker::AddRenderBundleTextureUsage(
+        TextureBase* texture,
+        const TextureSubresourceUsage& textureUsage) {
+        // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+        // wgpu::TextureUsage::None)
+        auto it = mTextureUsages.emplace(
+            std::piecewise_construct, std::forward_as_tuple(texture),
+            std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+                                  texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+        TextureSubresourceUsage* passTextureUsage = &it.first->second;
+
+        passTextureUsage->Merge(
+            textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
+                             const wgpu::TextureUsage& addedUsage) {
+                ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
+                *storedUsage |= addedUsage;
+            });
+    }
+
+    void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
+        for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+             ++bindingIndex) {
+            const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer: {
+                    BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
+                    switch (bindingInfo.buffer.type) {
+                        case wgpu::BufferBindingType::Uniform:
+                            BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
+                            break;
+                        case wgpu::BufferBindingType::Storage:
+                            BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
+                            break;
+                        case kInternalStorageBufferBinding:
+                            BufferUsedAs(buffer, kInternalStorageBuffer);
+                            break;
+                        case wgpu::BufferBindingType::ReadOnlyStorage:
+                            BufferUsedAs(buffer, kReadOnlyStorageBuffer);
+                            break;
+                        case wgpu::BufferBindingType::Undefined:
+                            UNREACHABLE();
+                    }
+                    break;
+                }
+
+                case BindingInfoType::Texture: {
+                    TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+                    TextureViewUsedAs(view, wgpu::TextureUsage::TextureBinding);
+                    break;
+                }
+
+                case BindingInfoType::StorageTexture: {
+                    TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+                    switch (bindingInfo.storageTexture.access) {
+                        case wgpu::StorageTextureAccess::WriteOnly:
+                            TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
+                            break;
+                        case wgpu::StorageTextureAccess::Undefined:
+                            UNREACHABLE();
+                    }
+                    break;
+                }
+
+                case BindingInfoType::ExternalTexture:
+                    UNREACHABLE();
+                    break;
+
+                case BindingInfoType::Sampler:
+                    break;
+            }
+        }
+
+        for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+            mExternalTextureUsages.insert(externalTexture.Get());
+        }
+    }
+
+    SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
+        SyncScopeResourceUsage result;
+        result.buffers.reserve(mBufferUsages.size());
+        result.bufferUsages.reserve(mBufferUsages.size());
+        result.textures.reserve(mTextureUsages.size());
+        result.textureUsages.reserve(mTextureUsages.size());
+
+        for (auto& [buffer, usage] : mBufferUsages) {
+            result.buffers.push_back(buffer);
+            result.bufferUsages.push_back(usage);
+        }
+
+        for (auto& [texture, usage] : mTextureUsages) {
+            result.textures.push_back(texture);
+            result.textureUsages.push_back(std::move(usage));
+        }
+
+        for (auto& it : mExternalTextureUsages) {
+            result.externalTextures.push_back(it);
+        }
+
+        mBufferUsages.clear();
+        mTextureUsages.clear();
+        mExternalTextureUsages.clear();
+
+        return result;
+    }
+
+    void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
+        mUsage.dispatchUsages.push_back(std::move(scope));
+    }
+
+    void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
+        mUsage.referencedBuffers.insert(buffer);
+    }
+
+    void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
+        for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
+            const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
+
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer: {
+                    mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
+                    break;
+                }
+
+                case BindingInfoType::Texture: {
+                    mUsage.referencedTextures.insert(
+                        group->GetBindingAsTextureView(index)->GetTexture());
+                    break;
+                }
+
+                case BindingInfoType::ExternalTexture:
+                    UNREACHABLE();
+                case BindingInfoType::StorageTexture:
+                case BindingInfoType::Sampler:
+                    break;
+            }
+        }
+
+        for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+            mUsage.referencedExternalTextures.insert(externalTexture.Get());
+        }
+    }
+
+    ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
+        return std::move(mUsage);
+    }
+
+    RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
+        RenderPassResourceUsage result;
+        *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
+
+        result.querySets.reserve(mQueryAvailabilities.size());
+        result.queryAvailabilities.reserve(mQueryAvailabilities.size());
+
+        for (auto& it : mQueryAvailabilities) {
+            result.querySets.push_back(it.first);
+            result.queryAvailabilities.push_back(std::move(it.second));
+        }
+
+        mQueryAvailabilities.clear();
+
+        return result;
+    }
+
+    void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
+                                                                uint32_t queryIndex) {
+        // The query availability only needs to be tracked again on render passes for checking
+        // query overwrite on render pass and resetting query sets on the Vulkan backend.
+        DAWN_ASSERT(querySet != nullptr);
+
+        // Gets the iterator for that querySet or create a new vector of bool set to false
+        // if the querySet wasn't registered.
+        auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
+        it->second[queryIndex] = true;
+    }
+
+    const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
+        return mQueryAvailabilities;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/PassResourceUsageTracker.h b/src/dawn/native/PassResourceUsageTracker.h
new file mode 100644
index 0000000..ad0ef92
--- /dev/null
+++ b/src/dawn/native/PassResourceUsageTracker.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
+#define DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
+
+#include "dawn/native/PassResourceUsage.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <map>
+
+namespace dawn::native {
+
+    class BindGroupBase;
+    class BufferBase;
+    class ExternalTextureBase;
+    class QuerySetBase;
+    class TextureBase;
+
+    using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
+
+    // Helper class to build SyncScopeResourceUsages
+    class SyncScopeUsageTracker {
+      public:
+        void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
+        void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
+        void AddRenderBundleTextureUsage(TextureBase* texture,
+                                         const TextureSubresourceUsage& textureUsage);
+
+        // Walks the bind groups and tracks all its resources.
+        void AddBindGroup(BindGroupBase* group);
+
+        // Returns the per-pass usage for use by backends for APIs with explicit barriers.
+        SyncScopeResourceUsage AcquireSyncScopeUsage();
+
+      private:
+        std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
+        std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
+        std::set<ExternalTextureBase*> mExternalTextureUsages;
+    };
+
+    // Helper class to build ComputePassResourceUsages
+    class ComputePassResourceUsageTracker {
+      public:
+        void AddDispatch(SyncScopeResourceUsage scope);
+        void AddReferencedBuffer(BufferBase* buffer);
+        void AddResourcesReferencedByBindGroup(BindGroupBase* group);
+
+        ComputePassResourceUsage AcquireResourceUsage();
+
+      private:
+        ComputePassResourceUsage mUsage;
+    };
+
+    // Helper class to build RenderPassResourceUsages
+    class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
+      public:
+        void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+        const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
+
+        RenderPassResourceUsage AcquireResourceUsage();
+
+      private:
+        // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
+        // instead.
+        using SyncScopeUsageTracker::AcquireSyncScopeUsage;
+
+        // Tracks queries used in the render pass to validate that they aren't written twice.
+        QueryAvailabilityMap mQueryAvailabilities;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
diff --git a/src/dawn/native/PerStage.cpp b/src/dawn/native/PerStage.cpp
new file mode 100644
index 0000000..f3d5dc5
--- /dev/null
+++ b/src/dawn/native/PerStage.cpp
@@ -0,0 +1,29 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PerStage.h"
+
+namespace dawn::native {
+
+    BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
+        std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
+        return BitSetIterator<kNumStages, SingleShaderStage>(bits);
+    }
+
+    wgpu::ShaderStage StageBit(SingleShaderStage stage) {
+        ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+        return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/PerStage.h b/src/dawn/native/PerStage.h
new file mode 100644
index 0000000..83039b2
--- /dev/null
+++ b/src/dawn/native/PerStage.h
@@ -0,0 +1,82 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PERSTAGE_H_
+#define DAWNNATIVE_PERSTAGE_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Constants.h"
+#include "dawn/native/Error.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native {
+
+    enum class SingleShaderStage { Vertex, Fragment, Compute };
+
+    static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages);
+    static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages);
+    static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages);
+
+    static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
+                  (1 << static_cast<uint32_t>(SingleShaderStage::Vertex)));
+    static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
+                  (1 << static_cast<uint32_t>(SingleShaderStage::Fragment)));
+    static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
+                  (1 << static_cast<uint32_t>(SingleShaderStage::Compute)));
+
+    BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
+    wgpu::ShaderStage StageBit(SingleShaderStage stage);
+
+    static constexpr wgpu::ShaderStage kAllStages =
+        static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
+
+    template <typename T>
+    class PerStage {
+      public:
+        PerStage() = default;
+        PerStage(const T& initialValue) {
+            mData.fill(initialValue);
+        }
+
+        T& operator[](SingleShaderStage stage) {
+            DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+            return mData[static_cast<uint32_t>(stage)];
+        }
+        const T& operator[](SingleShaderStage stage) const {
+            DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+            return mData[static_cast<uint32_t>(stage)];
+        }
+
+        T& operator[](wgpu::ShaderStage stageBit) {
+            uint32_t bit = static_cast<uint32_t>(stageBit);
+            DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+            return mData[Log2(bit)];
+        }
+        const T& operator[](wgpu::ShaderStage stageBit) const {
+            uint32_t bit = static_cast<uint32_t>(stageBit);
+            DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+            return mData[Log2(bit)];
+        }
+
+      private:
+        std::array<T, kNumStages> mData;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_PERSTAGE_H_
diff --git a/src/dawn/native/PersistentCache.cpp b/src/dawn/native/PersistentCache.cpp
new file mode 100644
index 0000000..ce3ab49
--- /dev/null
+++ b/src/dawn/native/PersistentCache.cpp
@@ -0,0 +1,64 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PersistentCache.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Device.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::native {
+
+    PersistentCache::PersistentCache(DeviceBase* device)
+        : mDevice(device), mCache(GetPlatformCache()) {
+    }
+
+    ScopedCachedBlob PersistentCache::LoadData(const PersistentCacheKey& key) {
+        ScopedCachedBlob blob = {};
+        if (mCache == nullptr) {
+            return blob;
+        }
+        std::lock_guard<std::mutex> lock(mMutex);
+        blob.bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(), nullptr, 0);
+        if (blob.bufferSize > 0) {
+            blob.buffer.reset(new uint8_t[blob.bufferSize]);
+            const size_t bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(),
+                                                       blob.buffer.get(), blob.bufferSize);
+            ASSERT(bufferSize == blob.bufferSize);
+            return blob;
+        }
+        return blob;
+    }
+
+    void PersistentCache::StoreData(const PersistentCacheKey& key, const void* value, size_t size) {
+        if (mCache == nullptr) {
+            return;
+        }
+        ASSERT(value != nullptr);
+        ASSERT(size > 0);
+        std::lock_guard<std::mutex> lock(mMutex);
+        mCache->StoreData(ToAPI(mDevice), key.data(), key.size(), value, size);
+    }
+
+    dawn::platform::CachingInterface* PersistentCache::GetPlatformCache() {
+        // TODO(dawn:549): Create a fingerprint of concatenated version strings (ex. Tint commit
+        // hash, Dawn commit hash). This will be used by the client so it may know when to discard
+        // previously cached Dawn objects should this fingerprint change.
+        dawn::platform::Platform* platform = mDevice->GetPlatform();
+        if (platform != nullptr) {
+            return platform->GetCachingInterface(/*fingerprint*/ nullptr, /*fingerprintSize*/ 0);
+        }
+        return nullptr;
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/PersistentCache.h b/src/dawn/native/PersistentCache.h
new file mode 100644
index 0000000..7854d59
--- /dev/null
+++ b/src/dawn/native/PersistentCache.h
@@ -0,0 +1,92 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PERSISTENTCACHE_H_
+#define DAWNNATIVE_PERSISTENTCACHE_H_
+
+#include "dawn/native/Error.h"
+
+#include <mutex>
+#include <vector>
+
+namespace dawn::platform {
+    class CachingInterface;
+}
+
+namespace dawn::native {
+
+    using PersistentCacheKey = std::vector<uint8_t>;
+
+    struct ScopedCachedBlob {
+        std::unique_ptr<uint8_t[]> buffer;
+        size_t bufferSize = 0;
+    };
+
+    class DeviceBase;
+
+    enum class PersistentKeyType { Shader };
+
+    // This class should always be thread-safe as it is used in Create*PipelineAsync() where it is
+    // called asynchronously.
+    // The thread-safety of any access to mCache (the function LoadData() and StoreData()) is
+    // protected by mMutex.
+    class PersistentCache {
+      public:
+        PersistentCache(DeviceBase* device);
+
+        // Combines load/store operations into a single call.
+        // If the load was successful, a non-empty blob is returned to the caller.
+        // Else, the creation callback |createFn| gets invoked with a callback
+        // |doCache| to store the newly created blob back in the cache.
+        //
+        // Example usage:
+        //
+        // ScopedCachedBlob cachedBlob = {};
+        // DAWN_TRY_ASSIGN(cachedBlob, GetOrCreate(key, [&](auto doCache)) {
+        //      // Create a new blob to be stored
+        //      doCache(newBlobPtr, newBlobSize); // store
+        // }));
+        //
+        template <typename CreateFn>
+        ResultOrError<ScopedCachedBlob> GetOrCreate(const PersistentCacheKey& key,
+                                                    CreateFn&& createFn) {
+            // Attempt to load an existing blob from the cache.
+            ScopedCachedBlob blob = LoadData(key);
+            if (blob.bufferSize > 0) {
+                return std::move(blob);
+            }
+
+            // Allow the caller to create a new blob to be stored for the given key.
+            DAWN_TRY(createFn([this, key](const void* value, size_t size) {
+                this->StoreData(key, value, size);
+            }));
+
+            return std::move(blob);
+        }
+
+      private:
+        // PersistentCache impl
+        ScopedCachedBlob LoadData(const PersistentCacheKey& key);
+        void StoreData(const PersistentCacheKey& key, const void* value, size_t size);
+
+        dawn::platform::CachingInterface* GetPlatformCache();
+
+        DeviceBase* mDevice = nullptr;
+
+        std::mutex mMutex;
+        dawn::platform::CachingInterface* mCache = nullptr;
+    };
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_PERSISTENTCACHE_H_
diff --git a/src/dawn/native/Pipeline.cpp b/src/dawn/native/Pipeline.cpp
new file mode 100644
index 0000000..344d948
--- /dev/null
+++ b/src/dawn/native/Pipeline.cpp
@@ -0,0 +1,259 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Pipeline.h"
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/ShaderModule.h"
+
+namespace dawn::native {
+    MaybeError ValidateProgrammableStage(DeviceBase* device,
+                                         const ShaderModuleBase* module,
+                                         const std::string& entryPoint,
+                                         uint32_t constantCount,
+                                         const ConstantEntry* constants,
+                                         const PipelineLayoutBase* layout,
+                                         SingleShaderStage stage) {
+        DAWN_TRY(device->ValidateObject(module));
+
+        DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
+                        "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
+                        module);
+
+        const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
+
+        if (!metadata.infringedLimitErrors.empty()) {
+            std::ostringstream out;
+            out << "Entry point \"" << entryPoint << "\" infringes limits:\n";
+            for (const std::string& limit : metadata.infringedLimitErrors) {
+                out << " - " << limit << "\n";
+            }
+            return DAWN_VALIDATION_ERROR(out.str());
+        }
+
+        DAWN_INVALID_IF(metadata.stage != stage,
+                        "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
+                        metadata.stage, entryPoint, stage);
+
+        if (layout != nullptr) {
+            DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
+        }
+
+        if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+            return DAWN_VALIDATION_ERROR(
+                "Pipeline overridable constants are disallowed because they are partially "
+                "implemented.");
+        }
+
+        // Validate if overridable constants exist in shader module
+        // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
+        size_t numUninitializedConstants = metadata.uninitializedOverridableConstants.size();
+        // Keep an initialized constants sets to handle duplicate initialization cases
+        std::unordered_set<std::string> stageInitializedConstantIdentifiers;
+        for (uint32_t i = 0; i < constantCount; i++) {
+            DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
+                            "Pipeline overridable constant \"%s\" not found in %s.",
+                            constants[i].key, module);
+
+            if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
+                if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
+                    numUninitializedConstants--;
+                }
+                stageInitializedConstantIdentifiers.insert(constants[i].key);
+            } else {
+                // There are duplicate initializations
+                return DAWN_FORMAT_VALIDATION_ERROR(
+                    "Pipeline overridable constants \"%s\" is set more than once in %s",
+                    constants[i].key, module);
+            }
+        }
+
+        // Validate if any overridable constant is left uninitialized
+        if (DAWN_UNLIKELY(numUninitializedConstants > 0)) {
+            std::string uninitializedConstantsArray;
+            bool isFirst = true;
+            for (std::string identifier : metadata.uninitializedOverridableConstants) {
+                if (stageInitializedConstantIdentifiers.count(identifier) > 0) {
+                    continue;
+                }
+
+                if (isFirst) {
+                    isFirst = false;
+                } else {
+                    uninitializedConstantsArray.append(", ");
+                }
+                uninitializedConstantsArray.append(identifier);
+            }
+
+            return DAWN_FORMAT_VALIDATION_ERROR(
+                "There are uninitialized pipeline overridable constants in shader module %s, their "
+                "identifiers:[%s]",
+                module, uninitializedConstantsArray);
+        }
+
+        return {};
+    }
+
+    // PipelineBase
+
+    PipelineBase::PipelineBase(DeviceBase* device,
+                               PipelineLayoutBase* layout,
+                               const char* label,
+                               std::vector<StageAndDescriptor> stages)
+        : ApiObjectBase(device, label), mLayout(layout) {
+        ASSERT(!stages.empty());
+
+        for (const StageAndDescriptor& stage : stages) {
+            // Extract argument for this stage.
+            SingleShaderStage shaderStage = stage.shaderStage;
+            ShaderModuleBase* module = stage.module;
+            const char* entryPointName = stage.entryPoint.c_str();
+
+            const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
+            ASSERT(metadata.stage == shaderStage);
+
+            // Record them internally.
+            bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
+            mStageMask |= StageBit(shaderStage);
+            mStages[shaderStage] = {module, entryPointName, &metadata, {}};
+            auto& constants = mStages[shaderStage].constants;
+            for (uint32_t i = 0; i < stage.constantCount; i++) {
+                constants.emplace(stage.constants[i].key, stage.constants[i].value);
+            }
+
+            // Compute the max() of all minBufferSizes across all stages.
+            RequiredBufferSizes stageMinBufferSizes =
+                ComputeRequiredBufferSizesForLayout(metadata, layout);
+
+            if (isFirstStage) {
+                mMinBufferSizes = std::move(stageMinBufferSizes);
+            } else {
+                for (BindGroupIndex group(0); group < mMinBufferSizes.size(); ++group) {
+                    ASSERT(stageMinBufferSizes[group].size() == mMinBufferSizes[group].size());
+
+                    for (size_t i = 0; i < stageMinBufferSizes[group].size(); ++i) {
+                        mMinBufferSizes[group][i] =
+                            std::max(mMinBufferSizes[group][i], stageMinBufferSizes[group][i]);
+                    }
+                }
+            }
+        }
+    }
+
+    PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+    }
+
+    PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    PipelineBase::~PipelineBase() = default;
+
+    PipelineLayoutBase* PipelineBase::GetLayout() {
+        ASSERT(!IsError());
+        return mLayout.Get();
+    }
+
+    const PipelineLayoutBase* PipelineBase::GetLayout() const {
+        ASSERT(!IsError());
+        return mLayout.Get();
+    }
+
+    const RequiredBufferSizes& PipelineBase::GetMinBufferSizes() const {
+        ASSERT(!IsError());
+        return mMinBufferSizes;
+    }
+
+    const ProgrammableStage& PipelineBase::GetStage(SingleShaderStage stage) const {
+        ASSERT(!IsError());
+        return mStages[stage];
+    }
+
+    const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
+        return mStages;
+    }
+
+    wgpu::ShaderStage PipelineBase::GetStageMask() const {
+        return mStageMask;
+    }
+
+    MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+        DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
+        DAWN_INVALID_IF(
+            groupIndex >= kMaxBindGroups,
+            "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
+            groupIndex, kMaxBindGroups);
+        return {};
+    }
+
+    ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(
+        uint32_t groupIndexIn) {
+        DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
+
+        BindGroupIndex groupIndex(groupIndexIn);
+        if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
+            return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
+        } else {
+            return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
+        }
+    }
+
+    BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
+        Ref<BindGroupLayoutBase> result;
+        if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
+                                       "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
+                                       this)) {
+            return BindGroupLayoutBase::MakeError(GetDevice());
+        }
+        return result.Detach();
+    }
+
+    size_t PipelineBase::ComputeContentHash() {
+        ObjectContentHasher recorder;
+        recorder.Record(mLayout->GetContentHash());
+
+        recorder.Record(mStageMask);
+        for (SingleShaderStage stage : IterateStages(mStageMask)) {
+            recorder.Record(mStages[stage].module->GetContentHash());
+            recorder.Record(mStages[stage].entryPoint);
+        }
+
+        return recorder.GetContentHash();
+    }
+
+    // static
+    bool PipelineBase::EqualForCache(const PipelineBase* a, const PipelineBase* b) {
+        // The layout is deduplicated so it can be compared by pointer.
+        if (a->mLayout.Get() != b->mLayout.Get() || a->mStageMask != b->mStageMask) {
+            return false;
+        }
+
+        for (SingleShaderStage stage : IterateStages(a->mStageMask)) {
+            // The module is deduplicated so it can be compared by pointer.
+            if (a->mStages[stage].module.Get() != b->mStages[stage].module.Get() ||
+                a->mStages[stage].entryPoint != b->mStages[stage].entryPoint) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Pipeline.h b/src/dawn/native/Pipeline.h
new file mode 100644
index 0000000..ab078c3
--- /dev/null
+++ b/src/dawn/native/Pipeline.h
@@ -0,0 +1,98 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PIPELINE_H_
+#define DAWNNATIVE_PIPELINE_H_
+
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+    MaybeError ValidateProgrammableStage(DeviceBase* device,
+                                         const ShaderModuleBase* module,
+                                         const std::string& entryPoint,
+                                         uint32_t constantCount,
+                                         const ConstantEntry* constants,
+                                         const PipelineLayoutBase* layout,
+                                         SingleShaderStage stage);
+
+    // Use map to make sure constant keys are sorted for creating shader cache keys
+    using PipelineConstantEntries = std::map<std::string, double>;
+
+    struct ProgrammableStage {
+        Ref<ShaderModuleBase> module;
+        std::string entryPoint;
+
+        // The metadata lives as long as module, that's ref-ed in the same structure.
+        const EntryPointMetadata* metadata = nullptr;
+
+        PipelineConstantEntries constants;
+    };
+
+    class PipelineBase : public ApiObjectBase, public CachedObject {
+      public:
+        ~PipelineBase() override;
+
+        PipelineLayoutBase* GetLayout();
+        const PipelineLayoutBase* GetLayout() const;
+        const RequiredBufferSizes& GetMinBufferSizes() const;
+        const ProgrammableStage& GetStage(SingleShaderStage stage) const;
+        const PerStage<ProgrammableStage>& GetAllStages() const;
+        wgpu::ShaderStage GetStageMask() const;
+
+        ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
+
+        // Helper functions for std::unordered_map-based pipeline caches.
+        size_t ComputeContentHash() override;
+        static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
+
+        // Implementation of the API entrypoint. Do not use in a reentrant manner.
+        BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
+
+        // Initialize() should only be called once by the frontend.
+        virtual MaybeError Initialize() = 0;
+
+      protected:
+        PipelineBase(DeviceBase* device,
+                     PipelineLayoutBase* layout,
+                     const char* label,
+                     std::vector<StageAndDescriptor> stages);
+        PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        // Constructor used only for mocking and testing.
+        PipelineBase(DeviceBase* device);
+
+      private:
+        MaybeError ValidateGetBindGroupLayout(uint32_t group);
+
+        wgpu::ShaderStage mStageMask = wgpu::ShaderStage::None;
+        PerStage<ProgrammableStage> mStages;
+
+        Ref<PipelineLayoutBase> mLayout;
+        RequiredBufferSizes mMinBufferSizes;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_PIPELINE_H_
diff --git a/src/dawn/native/PipelineLayout.cpp b/src/dawn/native/PipelineLayout.cpp
new file mode 100644
index 0000000..56ab100
--- /dev/null
+++ b/src/dawn/native/PipelineLayout.cpp
@@ -0,0 +1,409 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ShaderModule.h"
+
+namespace dawn::native {
+
+    MaybeError ValidatePipelineLayoutDescriptor(
+        DeviceBase* device,
+        const PipelineLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        if (descriptor->nextInChain != nullptr) {
+            return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+        }
+
+        if (descriptor->bindGroupLayoutCount > kMaxBindGroups) {
+            return DAWN_VALIDATION_ERROR("too many bind group layouts");
+        }
+
+        BindingCounts bindingCounts = {};
+        for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
+            DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
+            if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
+                pipelineCompatibilityToken) {
+                return DAWN_VALIDATION_ERROR(
+                    "cannot create a pipeline layout using a bind group layout that was created as "
+                    "part of a pipeline's default layout");
+            }
+            AccumulateBindingCounts(&bindingCounts,
+                                    descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
+        }
+
+        DAWN_TRY(ValidateBindingCounts(bindingCounts));
+        return {};
+    }
+
+    // PipelineLayoutBase
+
+    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+                                           const PipelineLayoutDescriptor* descriptor,
+                                           ApiObjectBase::UntrackedByDeviceTag tag)
+        : ApiObjectBase(device, descriptor->label) {
+        ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
+        for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
+             ++group) {
+            mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
+            mMask.set(group);
+        }
+    }
+
+    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+                                           const PipelineLayoutDescriptor* descriptor)
+        : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) {
+        TrackInDevice();
+    }
+
+    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device)
+        : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    PipelineLayoutBase::~PipelineLayoutBase() = default;
+
+    void PipelineLayoutBase::DestroyImpl() {
+        if (IsCachedReference()) {
+            // Do not uncache the actual cached object if we are a blueprint.
+            GetDevice()->UncachePipelineLayout(this);
+        }
+    }
+
+    // static
+    PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) {
+        return new PipelineLayoutBase(device, ObjectBase::kError);
+    }
+
+    // static
+    ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
+        DeviceBase* device,
+        std::vector<StageAndDescriptor> stages) {
+        using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
+
+        // Merges two entries at the same location, if they are allowed to be merged.
+        auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry,
+                               const BindGroupLayoutEntry& mergedEntry) -> MaybeError {
+            // Visibility is excluded because we take the OR across stages.
+            bool compatible =
+                modifiedEntry->binding == mergedEntry.binding &&
+                modifiedEntry->buffer.type == mergedEntry.buffer.type &&
+                modifiedEntry->sampler.type == mergedEntry.sampler.type &&
+                // Compatibility between these sample types is checked below.
+                (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) ==
+                    (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) &&
+                modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access;
+
+            // Minimum buffer binding size excluded because we take the maximum seen across stages.
+            if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) {
+                compatible = compatible && modifiedEntry->buffer.hasDynamicOffset ==
+                                               mergedEntry.buffer.hasDynamicOffset;
+            }
+
+            if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) {
+                // Sample types are compatible if they are exactly equal,
+                // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat.
+                // Note that the |mergedEntry| never has type Float. Texture bindings all start
+                // as UnfilterableFloat and are promoted to Float if they are statically used with
+                // a sampler.
+                ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float);
+                bool compatibleSampleTypes =
+                    modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType ||
+                    (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float &&
+                     mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat);
+                compatible =
+                    compatible && compatibleSampleTypes &&
+                    modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension &&
+                    modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled;
+            }
+
+            if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+                compatible =
+                    compatible &&
+                    modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format &&
+                    modifiedEntry->storageTexture.viewDimension ==
+                        mergedEntry.storageTexture.viewDimension;
+            }
+
+            // Check if any properties are incompatible with existing entry
+            // If compatible, we will merge some properties
+            if (!compatible) {
+                return DAWN_VALIDATION_ERROR(
+                    "Duplicate binding in default pipeline layout initialization "
+                    "not compatible with previous declaration");
+            }
+
+            // Use the max |minBufferBindingSize| we find.
+            modifiedEntry->buffer.minBindingSize =
+                std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize);
+
+            // Use the OR of all the stages at which we find this binding.
+            modifiedEntry->visibility |= mergedEntry.visibility;
+
+            return {};
+        };
+
+        // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
+        auto ConvertMetadataToEntry =
+            [](const ShaderBindingInfo& shaderBinding,
+               const ExternalTextureBindingLayout* externalTextureBindingEntry)
+            -> BindGroupLayoutEntry {
+            BindGroupLayoutEntry entry = {};
+            switch (shaderBinding.bindingType) {
+                case BindingInfoType::Buffer:
+                    entry.buffer.type = shaderBinding.buffer.type;
+                    entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset;
+                    entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize;
+                    break;
+                case BindingInfoType::Sampler:
+                    if (shaderBinding.sampler.isComparison) {
+                        entry.sampler.type = wgpu::SamplerBindingType::Comparison;
+                    } else {
+                        entry.sampler.type = wgpu::SamplerBindingType::Filtering;
+                    }
+                    break;
+                case BindingInfoType::Texture:
+                    switch (shaderBinding.texture.compatibleSampleTypes) {
+                        case SampleTypeBit::Depth:
+                            entry.texture.sampleType = wgpu::TextureSampleType::Depth;
+                            break;
+                        case SampleTypeBit::Sint:
+                            entry.texture.sampleType = wgpu::TextureSampleType::Sint;
+                            break;
+                        case SampleTypeBit::Uint:
+                            entry.texture.sampleType = wgpu::TextureSampleType::Uint;
+                            break;
+                        case SampleTypeBit::Float:
+                        case SampleTypeBit::UnfilterableFloat:
+                        case SampleTypeBit::None:
+                            UNREACHABLE();
+                            break;
+                        default:
+                            if (shaderBinding.texture.compatibleSampleTypes ==
+                                (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
+                                // Default to UnfilterableFloat. It will be promoted to Float if it
+                                // is used with a sampler.
+                                entry.texture.sampleType =
+                                    wgpu::TextureSampleType::UnfilterableFloat;
+                            } else {
+                                UNREACHABLE();
+                            }
+                    }
+                    entry.texture.viewDimension = shaderBinding.texture.viewDimension;
+                    entry.texture.multisampled = shaderBinding.texture.multisampled;
+                    break;
+                case BindingInfoType::StorageTexture:
+                    entry.storageTexture.access = shaderBinding.storageTexture.access;
+                    entry.storageTexture.format = shaderBinding.storageTexture.format;
+                    entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension;
+                    break;
+                case BindingInfoType::ExternalTexture:
+                    entry.nextInChain = externalTextureBindingEntry;
+                    break;
+            }
+            return entry;
+        };
+
+        PipelineCompatibilityToken pipelineCompatibilityToken =
+            device->GetNextPipelineCompatibilityToken();
+
+        // Creates the BGL from the entries for a stage, checking it is valid.
+        auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
+                            PipelineCompatibilityToken pipelineCompatibilityToken)
+            -> ResultOrError<Ref<BindGroupLayoutBase>> {
+            std::vector<BindGroupLayoutEntry> entryVec;
+            entryVec.reserve(entries.size());
+            for (auto& [_, entry] : entries) {
+                entryVec.push_back(entry);
+            }
+
+            BindGroupLayoutDescriptor desc = {};
+            desc.entries = entryVec.data();
+            desc.entryCount = entryVec.size();
+
+            if (device->IsValidationEnabled()) {
+                DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s",
+                                 &desc);
+            }
+            return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
+        };
+
+        ASSERT(!stages.empty());
+
+        // Data which BindGroupLayoutDescriptor will point to for creation
+        ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups>
+            entryData = {};
+
+        // External texture binding layouts are chained structs that are set as a pointer within
+        // the bind group layout entry. We declare an entry here so that it can be used when needed
+        // in each BindGroupLayoutEntry and so it can stay alive until the call to
+        // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct,
+        // there's no issue with using the same struct multiple times.
+        ExternalTextureBindingLayout externalTextureBindingLayout;
+
+        // Loops over all the reflected BindGroupLayoutEntries from shaders.
+        for (const StageAndDescriptor& stage : stages) {
+            const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+
+            for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
+                for (const auto& [bindingNumber, shaderBinding] : metadata.bindings[group]) {
+                    // Create the BindGroupLayoutEntry
+                    BindGroupLayoutEntry entry =
+                        ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout);
+                    entry.binding = static_cast<uint32_t>(bindingNumber);
+                    entry.visibility = StageBit(stage.shaderStage);
+
+                    // Add it to our map of all entries, if there is an existing entry, then we
+                    // need to merge, if we can.
+                    const auto& [existingEntry, inserted] =
+                        entryData[group].insert({bindingNumber, entry});
+                    if (!inserted) {
+                        DAWN_TRY(MergeEntries(&existingEntry->second, entry));
+                    }
+                }
+            }
+
+            // Promote any Unfilterable textures used with a sampler to Filtering.
+            for (const EntryPointMetadata::SamplerTexturePair& pair :
+                 metadata.samplerTexturePairs) {
+                BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
+                if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
+                    entry->texture.sampleType = wgpu::TextureSampleType::Float;
+                }
+            }
+        }
+
+        // Create the bind group layouts. We need to keep track of the last non-empty BGL because
+        // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing.
+        // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the
+        // same.
+        BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
+        ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
+        for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
+            DAWN_TRY_ASSIGN(bindGroupLayouts[group],
+                            CreateBGL(device, entryData[group], pipelineCompatibilityToken));
+            if (entryData[group].size() != 0) {
+                pipelineBGLCount = group + BindGroupIndex(1);
+            }
+        }
+
+        // Create the deduced pipeline layout, validating if it is valid.
+        ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
+        for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
+            bgls[group] = bindGroupLayouts[group].Get();
+        }
+
+        PipelineLayoutDescriptor desc = {};
+        desc.bindGroupLayouts = bgls.data();
+        desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
+
+        DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
+
+        Ref<PipelineLayoutBase> result;
+        DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
+        ASSERT(!result->IsError());
+
+        // Sanity check in debug that the pipeline layout is compatible with the current
+        // pipeline.
+        for (const StageAndDescriptor& stage : stages) {
+            const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+            ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
+                       .IsSuccess());
+        }
+
+        return std::move(result);
+    }
+
+    ObjectType PipelineLayoutBase::GetType() const {
+        return ObjectType::PipelineLayout;
+    }
+
+    const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
+        ASSERT(!IsError());
+        ASSERT(group < kMaxBindGroupsTyped);
+        ASSERT(mMask[group]);
+        const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+        ASSERT(bgl != nullptr);
+        return bgl;
+    }
+
+    BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
+        ASSERT(!IsError());
+        ASSERT(group < kMaxBindGroupsTyped);
+        ASSERT(mMask[group]);
+        BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+        ASSERT(bgl != nullptr);
+        return bgl;
+    }
+
+    const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
+        ASSERT(!IsError());
+        return mMask;
+    }
+
+    BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(
+        const PipelineLayoutBase* other) const {
+        ASSERT(!IsError());
+        return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
+    }
+
+    BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
+        ASSERT(!IsError());
+
+        for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+            if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
+                return i;
+            }
+        }
+        return kMaxBindGroupsTyped;
+    }
+
+    size_t PipelineLayoutBase::ComputeContentHash() {
+        ObjectContentHasher recorder;
+        recorder.Record(mMask);
+
+        for (BindGroupIndex group : IterateBitSet(mMask)) {
+            recorder.Record(GetBindGroupLayout(group)->GetContentHash());
+        }
+
+        return recorder.GetContentHash();
+    }
+
+    bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a,
+                                                      const PipelineLayoutBase* b) const {
+        if (a->mMask != b->mMask) {
+            return false;
+        }
+
+        for (BindGroupIndex group : IterateBitSet(a->mMask)) {
+            if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/PipelineLayout.h b/src/dawn/native/PipelineLayout.h
new file mode 100644
index 0000000..4850536
--- /dev/null
+++ b/src/dawn/native/PipelineLayout.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PIPELINELAYOUT_H_
+#define DAWNNATIVE_PIPELINELAYOUT_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+    MaybeError ValidatePipelineLayoutDescriptor(
+        DeviceBase*,
+        const PipelineLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+
+    using BindGroupLayoutArray =
+        ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
+    using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
+
+    struct StageAndDescriptor {
+        SingleShaderStage shaderStage;
+        ShaderModuleBase* module;
+        std::string entryPoint;
+        uint32_t constantCount = 0u;
+        ConstantEntry const* constants = nullptr;
+    };
+
+    class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
+      public:
+        PipelineLayoutBase(DeviceBase* device,
+                           const PipelineLayoutDescriptor* descriptor,
+                           ApiObjectBase::UntrackedByDeviceTag tag);
+        PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
+        ~PipelineLayoutBase() override;
+
+        static PipelineLayoutBase* MakeError(DeviceBase* device);
+        static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
+            DeviceBase* device,
+            std::vector<StageAndDescriptor> stages);
+
+        ObjectType GetType() const override;
+
+        const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
+        BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
+        const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
+
+        // Utility functions to compute inherited bind groups.
+        // Returns the inherited bind groups as a mask.
+        BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
+
+        // Returns the index of the first incompatible bind group in the range
+        // [0, kMaxBindGroups]
+        BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
+
+        // Functions necessary for the unordered_set<PipelineLayoutBase*>-based cache.
+        size_t ComputeContentHash() override;
+
+        struct EqualityFunc {
+            bool operator()(const PipelineLayoutBase* a, const PipelineLayoutBase* b) const;
+        };
+
+      protected:
+        // Constructor used only for mocking and testing.
+        PipelineLayoutBase(DeviceBase* device);
+        PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+        void DestroyImpl() override;
+
+        BindGroupLayoutArray mBindGroupLayouts;
+        BindGroupLayoutMask mMask;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_PIPELINELAYOUT_H_
diff --git a/src/dawn/native/PooledResourceMemoryAllocator.cpp b/src/dawn/native/PooledResourceMemoryAllocator.cpp
new file mode 100644
index 0000000..0a01a99
--- /dev/null
+++ b/src/dawn/native/PooledResourceMemoryAllocator.cpp
@@ -0,0 +1,60 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PooledResourceMemoryAllocator.h"
+#include "dawn/native/Device.h"
+
+namespace dawn::native {
+
+    PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(
+        ResourceHeapAllocator* heapAllocator)
+        : mHeapAllocator(heapAllocator) {
+    }
+
+    void PooledResourceMemoryAllocator::DestroyPool() {
+        for (auto& resourceHeap : mPool) {
+            ASSERT(resourceHeap != nullptr);
+            mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
+        }
+
+        mPool.clear();
+    }
+
+    ResultOrError<std::unique_ptr<ResourceHeapBase>>
+    PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
+        // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
+        // pooling is disabled in-frame when the memory is still pending. For high in-frame
+        // memory users, FIFO might be preferable when memory consumption is a higher priority.
+        std::unique_ptr<ResourceHeapBase> memory;
+        if (!mPool.empty()) {
+            memory = std::move(mPool.front());
+            mPool.pop_front();
+        }
+
+        if (memory == nullptr) {
+            DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
+        }
+
+        return std::move(memory);
+    }
+
+    void PooledResourceMemoryAllocator::DeallocateResourceHeap(
+        std::unique_ptr<ResourceHeapBase> allocation) {
+        mPool.push_front(std::move(allocation));
+    }
+
+    uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
+        return mPool.size();
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/PooledResourceMemoryAllocator.h b/src/dawn/native/PooledResourceMemoryAllocator.h
new file mode 100644
index 0000000..898bafe
--- /dev/null
+++ b/src/dawn/native/PooledResourceMemoryAllocator.h
@@ -0,0 +1,53 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
+#define DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+
+#include <deque>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    // |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
+    // pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
+    // The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
+    // the pool and made AVAILABLE.
+    class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
+      public:
+        PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
+        ~PooledResourceMemoryAllocator() override = default;
+
+        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+            uint64_t size) override;
+        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+
+        void DestroyPool();
+
+        // For testing purposes.
+        uint64_t GetPoolSizeForTesting() const;
+
+      private:
+        ResourceHeapAllocator* mHeapAllocator = nullptr;
+
+        std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
diff --git a/src/dawn/native/ProgrammableEncoder.cpp b/src/dawn/native/ProgrammableEncoder.cpp
new file mode 100644
index 0000000..8bdc08b
--- /dev/null
+++ b/src/dawn/native/ProgrammableEncoder.cpp
@@ -0,0 +1,203 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ProgrammableEncoder.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <cstring>
+
+namespace dawn::native {
+
+    ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+                                             const char* label,
+                                             EncodingContext* encodingContext)
+        : ApiObjectBase(device, label),
+          mEncodingContext(encodingContext),
+          mValidationEnabled(device->IsValidationEnabled()) {
+    }
+
+    ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+                                             EncodingContext* encodingContext,
+                                             ErrorTag errorTag)
+        : ApiObjectBase(device, errorTag),
+          mEncodingContext(encodingContext),
+          mValidationEnabled(device->IsValidationEnabled()) {
+    }
+
+    bool ProgrammableEncoder::IsValidationEnabled() const {
+        return mValidationEnabled;
+    }
+
+    MaybeError ProgrammableEncoder::ValidateProgrammableEncoderEnd() const {
+        DAWN_INVALID_IF(mDebugGroupStackSize != 0,
+                        "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup.",
+                        mDebugGroupStackSize);
+        return {};
+    }
+
+    void ProgrammableEncoder::APIInsertDebugMarker(const char* groupLabel) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                InsertDebugMarkerCmd* cmd =
+                    allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+                cmd->length = strlen(groupLabel);
+
+                char* label = allocator->AllocateData<char>(cmd->length + 1);
+                memcpy(label, groupLabel, cmd->length + 1);
+
+                return {};
+            },
+            "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+    }
+
+    void ProgrammableEncoder::APIPopDebugGroup() {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_INVALID_IF(
+                        mDebugGroupStackSize == 0,
+                        "PopDebugGroup called when no debug groups are currently pushed.");
+                }
+                allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+                mDebugGroupStackSize--;
+                mEncodingContext->PopDebugGroupLabel();
+
+                return {};
+            },
+            "encoding %s.PopDebugGroup().", this);
+    }
+
+    void ProgrammableEncoder::APIPushDebugGroup(const char* groupLabel) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                PushDebugGroupCmd* cmd =
+                    allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+                cmd->length = strlen(groupLabel);
+
+                char* label = allocator->AllocateData<char>(cmd->length + 1);
+                memcpy(label, groupLabel, cmd->length + 1);
+
+                mDebugGroupStackSize++;
+                mEncodingContext->PushDebugGroupLabel(groupLabel);
+
+                return {};
+            },
+            "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+    }
+
+    MaybeError ProgrammableEncoder::ValidateSetBindGroup(BindGroupIndex index,
+                                                         BindGroupBase* group,
+                                                         uint32_t dynamicOffsetCountIn,
+                                                         const uint32_t* dynamicOffsetsIn) const {
+        DAWN_TRY(GetDevice()->ValidateObject(group));
+
+        DAWN_INVALID_IF(index >= kMaxBindGroupsTyped,
+                        "Bind group index (%u) exceeds the maximum (%u).",
+                        static_cast<uint32_t>(index), kMaxBindGroups);
+
+        ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
+                                                                BindingIndex(dynamicOffsetCountIn));
+
+        // Dynamic offsets count must match the number required by the layout perfectly.
+        const BindGroupLayoutBase* layout = group->GetLayout();
+        DAWN_INVALID_IF(
+            layout->GetDynamicBufferCount() != dynamicOffsets.size(),
+            "The number of dynamic offsets (%u) does not match the number of dynamic buffers (%u) "
+            "in %s.",
+            static_cast<uint32_t>(dynamicOffsets.size()),
+            static_cast<uint32_t>(layout->GetDynamicBufferCount()), layout);
+
+        for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
+            const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
+
+            // BGL creation sorts bindings such that the dynamic buffer bindings are first.
+            // ASSERT that this true.
+            ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+            ASSERT(bindingInfo.buffer.hasDynamicOffset);
+
+            uint64_t requiredAlignment;
+            switch (bindingInfo.buffer.type) {
+                case wgpu::BufferBindingType::Uniform:
+                    requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
+                    break;
+                case wgpu::BufferBindingType::Storage:
+                case wgpu::BufferBindingType::ReadOnlyStorage:
+                case kInternalStorageBufferBinding:
+                    requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
+                    break;
+                case wgpu::BufferBindingType::Undefined:
+                    UNREACHABLE();
+            }
+
+            DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
+                            "Dynamic Offset[%u] (%u) is not %u byte aligned.",
+                            static_cast<uint32_t>(i), dynamicOffsets[i], requiredAlignment);
+
+            BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
+
+            // During BindGroup creation, validation ensures binding offset + binding size
+            // <= buffer size.
+            ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
+            ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
+
+            if ((dynamicOffsets[i] >
+                 bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
+                DAWN_INVALID_IF(
+                    (bufferBinding.buffer->GetSize() - bufferBinding.offset) == bufferBinding.size,
+                    "Dynamic Offset[%u] (%u) is out of bounds of %s with a size of %u and a bound "
+                    "range of (offset: %u, size: %u). The binding goes to the end of the buffer "
+                    "even with a dynamic offset of 0. Did you forget to specify "
+                    "the binding's size?",
+                    static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+                    bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+
+                return DAWN_FORMAT_VALIDATION_ERROR(
+                    "Dynamic Offset[%u] (%u) is out of bounds of "
+                    "%s with a size of %u and a bound range of (offset: %u, size: %u).",
+                    static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+                    bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+            }
+        }
+
+        return {};
+    }
+
+    void ProgrammableEncoder::RecordSetBindGroup(CommandAllocator* allocator,
+                                                 BindGroupIndex index,
+                                                 BindGroupBase* group,
+                                                 uint32_t dynamicOffsetCount,
+                                                 const uint32_t* dynamicOffsets) const {
+        SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
+        cmd->index = index;
+        cmd->group = group;
+        cmd->dynamicOffsetCount = dynamicOffsetCount;
+        if (dynamicOffsetCount > 0) {
+            uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
+            memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
+        }
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ProgrammableEncoder.h b/src/dawn/native/ProgrammableEncoder.h
new file mode 100644
index 0000000..6bba7a2
--- /dev/null
+++ b/src/dawn/native/ProgrammableEncoder.h
@@ -0,0 +1,72 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PROGRAMMABLEENCODER_H_
+#define DAWNNATIVE_PROGRAMMABLEENCODER_H_
+
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    // Base class for shared functionality between programmable encoders.
+    class ProgrammableEncoder : public ApiObjectBase {
+      public:
+        ProgrammableEncoder(DeviceBase* device,
+                            const char* label,
+                            EncodingContext* encodingContext);
+
+        void APIInsertDebugMarker(const char* groupLabel);
+        void APIPopDebugGroup();
+        void APIPushDebugGroup(const char* groupLabel);
+
+      protected:
+        bool IsValidationEnabled() const;
+        MaybeError ValidateProgrammableEncoderEnd() const;
+
+        // Compute and render passes do different things on SetBindGroup. These are helper functions
+        // for the logic they have in common.
+        MaybeError ValidateSetBindGroup(BindGroupIndex index,
+                                        BindGroupBase* group,
+                                        uint32_t dynamicOffsetCountIn,
+                                        const uint32_t* dynamicOffsetsIn) const;
+        void RecordSetBindGroup(CommandAllocator* allocator,
+                                BindGroupIndex index,
+                                BindGroupBase* group,
+                                uint32_t dynamicOffsetCount,
+                                const uint32_t* dynamicOffsets) const;
+
+        // Construct an "error" programmable pass encoder.
+        ProgrammableEncoder(DeviceBase* device,
+                            EncodingContext* encodingContext,
+                            ErrorTag errorTag);
+
+        EncodingContext* mEncodingContext = nullptr;
+
+        uint64_t mDebugGroupStackSize = 0;
+
+      private:
+        const bool mValidationEnabled;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_PROGRAMMABLEENCODER_H_
diff --git a/src/dawn/native/QueryHelper.cpp b/src/dawn/native/QueryHelper.cpp
new file mode 100644
index 0000000..c6d7541
--- /dev/null
+++ b/src/dawn/native/QueryHelper.cpp
@@ -0,0 +1,217 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/QueryHelper.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include <cmath>
+
+namespace dawn::native {
+
+    namespace {
+
+        // Assert the offsets in dawn::native::TimestampParams are same with the ones in the shader
+        static_assert(offsetof(dawn::native::TimestampParams, first) == 0);
+        static_assert(offsetof(dawn::native::TimestampParams, count) == 4);
+        static_assert(offsetof(dawn::native::TimestampParams, offset) == 8);
+        static_assert(offsetof(dawn::native::TimestampParams, multiplier) == 12);
+        static_assert(offsetof(dawn::native::TimestampParams, rightShift) == 16);
+
+        static const char sConvertTimestampsToNanoseconds[] = R"(
+            struct Timestamp {
+                low  : u32;
+                high : u32;
+            };
+
+            struct TimestampArr {
+                t : array<Timestamp>;
+            };
+
+            struct AvailabilityArr {
+                v : array<u32>;
+            };
+
+            struct TimestampParams {
+                first  : u32;
+                count  : u32;
+                offset : u32;
+                multiplier : u32;
+                right_shift  : u32;
+            };
+
+            @group(0) @binding(0) var<storage, read_write> timestamps : TimestampArr;
+            @group(0) @binding(1) var<storage, read> availability : AvailabilityArr;
+            @group(0) @binding(2) var<uniform> params : TimestampParams;
+
+            let sizeofTimestamp : u32 = 8u;
+
+            @stage(compute) @workgroup_size(8, 1, 1)
+            fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+                if (GlobalInvocationID.x >= params.count) { return; }
+
+                var index = GlobalInvocationID.x + params.offset / sizeofTimestamp;
+
+                // Return 0 for the unavailable value.
+                if (availability.v[GlobalInvocationID.x + params.first] == 0u) {
+                    timestamps.t[index].low = 0u;
+                    timestamps.t[index].high = 0u;
+                    return;
+                }
+
+                var timestamp = timestamps.t[index];
+
+                // TODO(dawn:1250): Consider using the umulExtended and uaddCarry intrinsics once
+                // available.
+                var chunks : array<u32, 5>;
+                chunks[0] = timestamp.low & 0xFFFFu;
+                chunks[1] = timestamp.low >> 16u;
+                chunks[2] = timestamp.high & 0xFFFFu;
+                chunks[3] = timestamp.high >> 16u;
+                chunks[4] = 0u;
+
+                // Multiply all the chunks with the integer period.
+                for (var i = 0u; i < 4u; i = i + 1u) {
+                    chunks[i] = chunks[i] * params.multiplier;
+                }
+
+                // Propagate the carry
+                var carry = 0u;
+                for (var i = 0u; i < 4u; i = i + 1u) {
+                    var chunk_with_carry = chunks[i] + carry;
+                    carry = chunk_with_carry >> 16u;
+                    chunks[i] = chunk_with_carry & 0xFFFFu;
+                }
+                chunks[4] = carry;
+
+                // Apply the right shift.
+                for (var i = 0u; i < 4u; i = i + 1u) {
+                    var low = chunks[i] >> params.right_shift;
+                    var high = (chunks[i + 1u] << (16u - params.right_shift)) & 0xFFFFu;
+                    chunks[i] = low | high;
+                }
+
+                timestamps.t[index].low = chunks[0] | (chunks[1] << 16u);
+                timestamps.t[index].high = chunks[2] | (chunks[3] << 16u);
+            }
+        )";
+
+        ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(
+            DeviceBase* device) {
+            InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+            if (store->timestampComputePipeline == nullptr) {
+                // Create compute shader module if not cached before.
+                if (store->timestampCS == nullptr) {
+                    DAWN_TRY_ASSIGN(
+                        store->timestampCS,
+                        utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
+                }
+
+                // Create binding group layout
+                Ref<BindGroupLayoutBase> bgl;
+                DAWN_TRY_ASSIGN(
+                    bgl, utils::MakeBindGroupLayout(
+                             device,
+                             {
+                                 {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+                                 {1, wgpu::ShaderStage::Compute,
+                                  wgpu::BufferBindingType::ReadOnlyStorage},
+                                 {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                             },
+                             /* allowInternalBinding */ true));
+
+                // Create pipeline layout
+                Ref<PipelineLayoutBase> layout;
+                DAWN_TRY_ASSIGN(layout, utils::MakeBasicPipelineLayout(device, bgl));
+
+                // Create ComputePipeline.
+                ComputePipelineDescriptor computePipelineDesc = {};
+                // Generate the layout based on shader module.
+                computePipelineDesc.layout = layout.Get();
+                computePipelineDesc.compute.module = store->timestampCS.Get();
+                computePipelineDesc.compute.entryPoint = "main";
+
+                DAWN_TRY_ASSIGN(store->timestampComputePipeline,
+                                device->CreateComputePipeline(&computePipelineDesc));
+            }
+
+            return store->timestampComputePipeline.Get();
+        }
+
+    }  // anonymous namespace
+
+    TimestampParams::TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period)
+        : first(first), count(count), offset(offset) {
+        // The overall conversion happening, if p is the period, m the multiplier, s the shift, is::
+        //
+        //   m = round(p * 2^s)
+        //
+        // Then in the shader we compute:
+        //
+        //   m / 2^s = round(p * 2^s) / 2*s ~= p
+        //
+        // The goal is to find the best shift to keep the precision of computations. The
+        // conversion shader uses chunks of 16 bits to compute the multiplication with the perios,
+        // so we need to keep the multiplier under 2^16. At the same time, the larger the
+        // multiplier, the better the precision, so we maximize the value of the right shift while
+        // keeping the multiplier under 2 ^ 16
+        uint32_t upperLog2 = ceil(log2(period));
+
+        // Clamp the shift to 16 because we're doing computations in 16bit chunks. The
+        // multiplication by the period will overflow the chunks, but timestamps are mostly
+        // informational so that's ok.
+        rightShift = 16u - std::min(upperLog2, 16u);
+        multiplier = uint32_t(period * (1 << rightShift));
+    }
+
+    MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+                                                    BufferBase* timestamps,
+                                                    BufferBase* availability,
+                                                    BufferBase* params) {
+        DeviceBase* device = encoder->GetDevice();
+
+        ComputePipelineBase* pipeline;
+        DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
+
+        // Prepare bind group layout.
+        Ref<BindGroupLayoutBase> layout;
+        DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+        // Create bind group after all binding entries are set.
+        Ref<BindGroupBase> bindGroup;
+        DAWN_TRY_ASSIGN(bindGroup,
+                        utils::MakeBindGroup(device, layout,
+                                             {{0, timestamps}, {1, availability}, {2, params}}));
+
+        // Create compute encoder and issue dispatch.
+        Ref<ComputePassEncoder> pass = encoder->BeginComputePass();
+        pass->APISetPipeline(pipeline);
+        pass->APISetBindGroup(0, bindGroup.Get());
+        pass->APIDispatch(
+            static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
+        pass->APIEnd();
+
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/QueryHelper.h b/src/dawn/native/QueryHelper.h
new file mode 100644
index 0000000..111b195
--- /dev/null
+++ b/src/dawn/native/QueryHelper.h
@@ -0,0 +1,43 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_QUERYHELPER_H_
+#define DAWNNATIVE_QUERYHELPER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+
+namespace dawn::native {
+
+    class BufferBase;
+    class CommandEncoder;
+
+    struct TimestampParams {
+        TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period);
+
+        uint32_t first;
+        uint32_t count;
+        uint32_t offset;
+        uint32_t multiplier;
+        uint32_t rightShift;
+    };
+
+    MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+                                                    BufferBase* timestamps,
+                                                    BufferBase* availability,
+                                                    BufferBase* params);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_QUERYHELPER_H_
diff --git a/src/dawn/native/QuerySet.cpp b/src/dawn/native/QuerySet.cpp
new file mode 100644
index 0000000..3f20dab
--- /dev/null
+++ b/src/dawn/native/QuerySet.cpp
@@ -0,0 +1,180 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/QuerySet.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <set>
+
+namespace dawn::native {
+
+    namespace {
+
+        class ErrorQuerySet final : public QuerySetBase {
+          public:
+            ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {
+            }
+
+          private:
+            void DestroyImpl() override {
+                UNREACHABLE();
+            }
+        };
+
+    }  // anonymous namespace
+
+    MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
+                                          const QuerySetDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+        DAWN_TRY(ValidateQueryType(descriptor->type));
+
+        DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
+                        "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
+                        kMaxQueryCount);
+
+        switch (descriptor->type) {
+            case wgpu::QueryType::Occlusion:
+                DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+                                "Pipeline statistics specified for a query of type %s.",
+                                descriptor->type);
+                break;
+
+            case wgpu::QueryType::PipelineStatistics: {
+                // TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
+                // Disallow it as unsafe until the implementaion is completed.
+                DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+                                "Pipeline statistics queries are disallowed because they are not "
+                                "fully implemented");
+
+                DAWN_INVALID_IF(
+                    !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
+                    "Pipeline statistics query set created without the feature being enabled.");
+
+                DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
+                                "Pipeline statistics query set created with 0 statistics.");
+
+                std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
+                for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+                    DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
+
+                    auto [_, inserted] =
+                        pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
+                    DAWN_INVALID_IF(!inserted, "Statistic %s is specified more than once.",
+                                    descriptor->pipelineStatistics[i]);
+                }
+            } break;
+
+            case wgpu::QueryType::Timestamp:
+                DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+                                "Timestamp queries are disallowed because they may expose precise "
+                                "timing information.");
+
+                DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
+                                "Timestamp query set created without the feature being enabled.");
+
+                DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+                                "Pipeline statistics specified for a query of type %s.",
+                                descriptor->type);
+                break;
+
+            default:
+                break;
+        }
+
+        return {};
+    }
+
+    QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
+        : ApiObjectBase(device, descriptor->label),
+          mQueryType(descriptor->type),
+          mQueryCount(descriptor->count),
+          mState(QuerySetState::Available) {
+        for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+            mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
+        }
+
+        mQueryAvailability.resize(descriptor->count);
+        TrackInDevice();
+    }
+
+    QuerySetBase::QuerySetBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    QuerySetBase::~QuerySetBase() {
+        // Uninitialized or already destroyed
+        ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
+    }
+
+    void QuerySetBase::DestroyImpl() {
+        mState = QuerySetState::Destroyed;
+    }
+
+    // static
+    QuerySetBase* QuerySetBase::MakeError(DeviceBase* device) {
+        return new ErrorQuerySet(device);
+    }
+
+    ObjectType QuerySetBase::GetType() const {
+        return ObjectType::QuerySet;
+    }
+
+    wgpu::QueryType QuerySetBase::GetQueryType() const {
+        return mQueryType;
+    }
+
+    uint32_t QuerySetBase::GetQueryCount() const {
+        return mQueryCount;
+    }
+
+    const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
+        return mPipelineStatistics;
+    }
+
+    const std::vector<bool>& QuerySetBase::GetQueryAvailability() const {
+        return mQueryAvailability;
+    }
+
+    void QuerySetBase::SetQueryAvailability(uint32_t index, bool available) {
+        mQueryAvailability[index] = available;
+    }
+
+    MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
+        ASSERT(!IsError());
+        DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
+        return {};
+    }
+
+    void QuerySetBase::APIDestroy() {
+        if (GetDevice()->ConsumedError(ValidateDestroy())) {
+            return;
+        }
+        Destroy();
+    }
+
+    MaybeError QuerySetBase::ValidateDestroy() const {
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/QuerySet.h b/src/dawn/native/QuerySet.h
new file mode 100644
index 0000000..39a69df
--- /dev/null
+++ b/src/dawn/native/QuerySet.h
@@ -0,0 +1,72 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_QUERYSET_H_
+#define DAWNNATIVE_QUERYSET_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
+
+    class QuerySetBase : public ApiObjectBase {
+      public:
+        QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
+
+        static QuerySetBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        wgpu::QueryType GetQueryType() const;
+        uint32_t GetQueryCount() const;
+        const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
+
+        const std::vector<bool>& GetQueryAvailability() const;
+        void SetQueryAvailability(uint32_t index, bool available);
+
+        MaybeError ValidateCanUseInSubmitNow() const;
+
+        void APIDestroy();
+
+      protected:
+        QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        // Constructor used only for mocking and testing.
+        QuerySetBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+        ~QuerySetBase() override;
+
+      private:
+        MaybeError ValidateDestroy() const;
+
+        wgpu::QueryType mQueryType;
+        uint32_t mQueryCount;
+        std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
+
+        enum class QuerySetState { Unavailable, Available, Destroyed };
+        QuerySetState mState = QuerySetState::Unavailable;
+
+        // Indicates the available queries on the query set for resolving
+        std::vector<bool> mQueryAvailability;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_QUERYSET_H_
diff --git a/src/dawn/native/Queue.cpp b/src/dawn/native/Queue.cpp
new file mode 100644
index 0000000..6c061d3
--- /dev/null
+++ b/src/dawn/native/Queue.cpp
@@ -0,0 +1,512 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Queue.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/CopyTextureForBrowserHelper.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderPassEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Texture.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <cstring>
+
+namespace dawn::native {
+
+    namespace {
+
+        void CopyTextureData(uint8_t* dstPointer,
+                             const uint8_t* srcPointer,
+                             uint32_t depth,
+                             uint32_t rowsPerImage,
+                             uint64_t imageAdditionalStride,
+                             uint32_t actualBytesPerRow,
+                             uint32_t dstBytesPerRow,
+                             uint32_t srcBytesPerRow) {
+            bool copyWholeLayer =
+                actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
+            bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
+
+            if (!copyWholeLayer) {  // copy row by row
+                for (uint32_t d = 0; d < depth; ++d) {
+                    for (uint32_t h = 0; h < rowsPerImage; ++h) {
+                        memcpy(dstPointer, srcPointer, actualBytesPerRow);
+                        dstPointer += dstBytesPerRow;
+                        srcPointer += srcBytesPerRow;
+                    }
+                    srcPointer += imageAdditionalStride;
+                }
+            } else {
+                uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
+                if (!copyWholeData) {  // copy layer by layer
+                    for (uint32_t d = 0; d < depth; ++d) {
+                        memcpy(dstPointer, srcPointer, layerSize);
+                        dstPointer += layerSize;
+                        srcPointer += layerSize + imageAdditionalStride;
+                    }
+                } else {  // do a single copy
+                    memcpy(dstPointer, srcPointer, layerSize * depth);
+                }
+            }
+        }
+
+        ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
+            DeviceBase* device,
+            const void* data,
+            uint32_t alignedBytesPerRow,
+            uint32_t optimallyAlignedBytesPerRow,
+            uint32_t alignedRowsPerImage,
+            const TextureDataLayout& dataLayout,
+            bool hasDepthOrStencil,
+            const TexelBlockInfo& blockInfo,
+            const Extent3D& writeSizePixel) {
+            uint64_t newDataSizeBytes;
+            DAWN_TRY_ASSIGN(
+                newDataSizeBytes,
+                ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
+                                           alignedRowsPerImage));
+
+            uint64_t optimalOffsetAlignment =
+                device->GetOptimalBufferToTextureCopyOffsetAlignment();
+            ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
+            ASSERT(IsPowerOfTwo(blockInfo.byteSize));
+            // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
+            // since both of them are powers of two, we only need to align to the max value.
+            uint64_t offsetAlignment =
+                std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
+
+            // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
+            // by WebGPU and Vulkan SPEC.
+            if (hasDepthOrStencil) {
+                constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
+                offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
+            }
+
+            UploadHandle uploadHandle;
+            DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                              newDataSizeBytes, device->GetPendingCommandSerial(),
+                                              offsetAlignment));
+            ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+            uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
+            const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
+            srcPointer += dataLayout.offset;
+
+            uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
+            if (dataRowsPerImage == 0) {
+                dataRowsPerImage = writeSizePixel.height / blockInfo.height;
+            }
+
+            ASSERT(dataRowsPerImage >= alignedRowsPerImage);
+            uint64_t imageAdditionalStride =
+                dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
+
+            CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
+                            alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
+                            optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
+
+            return uploadHandle;
+        }
+
+        struct SubmittedWorkDone : QueueBase::TaskInFlight {
+            SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
+                : mCallback(callback), mUserdata(userdata) {
+            }
+            void Finish() override {
+                ASSERT(mCallback != nullptr);
+                mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
+                mCallback = nullptr;
+            }
+            void HandleDeviceLoss() override {
+                ASSERT(mCallback != nullptr);
+                mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
+                mCallback = nullptr;
+            }
+            ~SubmittedWorkDone() override = default;
+
+          private:
+            WGPUQueueWorkDoneCallback mCallback = nullptr;
+            void* mUserdata;
+        };
+
+        class ErrorQueue : public QueueBase {
+          public:
+            ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {
+            }
+
+          private:
+            MaybeError SubmitImpl(uint32_t commandCount,
+                                  CommandBufferBase* const* commands) override {
+                UNREACHABLE();
+            }
+        };
+    }  // namespace
+
+    // QueueBase
+
+    QueueBase::TaskInFlight::~TaskInFlight() {
+    }
+
+    QueueBase::QueueBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+    }
+
+    QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    QueueBase::~QueueBase() {
+        ASSERT(mTasksInFlight.Empty());
+    }
+
+    void QueueBase::DestroyImpl() {
+    }
+
+    // static
+    QueueBase* QueueBase::MakeError(DeviceBase* device) {
+        return new ErrorQueue(device);
+    }
+
+    ObjectType QueueBase::GetType() const {
+        return ObjectType::Queue;
+    }
+
+    void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
+        SubmitInternal(commandCount, commands);
+
+        for (uint32_t i = 0; i < commandCount; ++i) {
+            commands[i]->Destroy();
+        }
+    }
+
+    void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
+                                           WGPUQueueWorkDoneCallback callback,
+                                           void* userdata) {
+        // The error status depends on the type of error so we let the validation function choose it
+        WGPUQueueWorkDoneStatus status;
+        if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
+            callback(status, userdata);
+            return;
+        }
+
+        std::unique_ptr<SubmittedWorkDone> task =
+            std::make_unique<SubmittedWorkDone>(callback, userdata);
+
+        // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
+        // also used to make sure ALL queue work is finished in tests, so we also wait for pending
+        // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
+        // spec).
+        TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
+    }
+
+    void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
+        mTasksInFlight.Enqueue(std::move(task), serial);
+        GetDevice()->AddFutureSerial(serial);
+    }
+
+    void QueueBase::Tick(ExecutionSerial finishedSerial) {
+        // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
+        // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
+        // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
+        // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
+        // callbacks.
+        std::vector<std::unique_ptr<TaskInFlight>> tasks;
+        for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
+            tasks.push_back(std::move(task));
+        }
+        mTasksInFlight.ClearUpTo(finishedSerial);
+
+        for (auto& task : tasks) {
+            task->Finish();
+        }
+    }
+
+    void QueueBase::HandleDeviceLoss() {
+        for (auto& task : mTasksInFlight.IterateAll()) {
+            task->HandleDeviceLoss();
+        }
+        mTasksInFlight.Clear();
+    }
+
+    void QueueBase::APIWriteBuffer(BufferBase* buffer,
+                                   uint64_t bufferOffset,
+                                   const void* data,
+                                   size_t size) {
+        GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
+    }
+
+    MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
+                                      uint64_t bufferOffset,
+                                      const void* data,
+                                      size_t size) {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+        DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+        DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+        return WriteBufferImpl(buffer, bufferOffset, data, size);
+    }
+
+    MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
+                                          uint64_t bufferOffset,
+                                          const void* data,
+                                          size_t size) {
+        if (size == 0) {
+            return {};
+        }
+
+        DeviceBase* device = GetDevice();
+
+        UploadHandle uploadHandle;
+        DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                          size, device->GetPendingCommandSerial(),
+                                          kCopyBufferToBufferOffsetAlignment));
+        ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+        memcpy(uploadHandle.mappedBuffer, data, size);
+
+        device->AddFutureSerial(device->GetPendingCommandSerial());
+
+        return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
+                                               buffer, bufferOffset, size);
+    }
+
+    void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
+                                    const void* data,
+                                    size_t dataSize,
+                                    const TextureDataLayout* dataLayout,
+                                    const Extent3D* writeSize) {
+        GetDevice()->ConsumedError(
+            WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
+    }
+
+    MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
+                                               const void* data,
+                                               size_t dataSize,
+                                               const TextureDataLayout& dataLayout,
+                                               const Extent3D* writeSize) {
+        DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
+
+        if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
+            return {};
+        }
+
+        const TexelBlockInfo& blockInfo =
+            destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+        TextureDataLayout layout = dataLayout;
+        ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
+        return WriteTextureImpl(*destination, data, layout, *writeSize);
+    }
+
+    MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
+                                           const void* data,
+                                           const TextureDataLayout& dataLayout,
+                                           const Extent3D& writeSizePixel) {
+        const Format& format = destination.texture->GetFormat();
+        const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
+
+        // We are only copying the part of the data that will appear in the texture.
+        // Note that validating texture copy range ensures that writeSizePixel->width and
+        // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
+        ASSERT(writeSizePixel.width % blockInfo.width == 0);
+        ASSERT(writeSizePixel.height % blockInfo.height == 0);
+        uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
+        uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
+
+        uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
+        uint32_t optimallyAlignedBytesPerRow =
+            Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
+
+        UploadHandle uploadHandle;
+        DAWN_TRY_ASSIGN(uploadHandle,
+                        UploadTextureDataAligningBytesPerRowAndOffset(
+                            GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
+                            alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo,
+                            writeSizePixel));
+
+        TextureDataLayout passDataLayout = dataLayout;
+        passDataLayout.offset = uploadHandle.startOffset;
+        passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
+        passDataLayout.rowsPerImage = alignedRowsPerImage;
+
+        TextureCopy textureCopy;
+        textureCopy.texture = destination.texture;
+        textureCopy.mipLevel = destination.mipLevel;
+        textureCopy.origin = destination.origin;
+        textureCopy.aspect = ConvertAspect(format, destination.aspect);
+
+        DeviceBase* device = GetDevice();
+
+        device->AddFutureSerial(device->GetPendingCommandSerial());
+
+        return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
+                                                &textureCopy, writeSizePixel);
+    }
+
+    void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
+                                             const ImageCopyTexture* destination,
+                                             const Extent3D* copySize,
+                                             const CopyTextureForBrowserOptions* options) {
+        GetDevice()->ConsumedError(
+            CopyTextureForBrowserInternal(source, destination, copySize, options));
+    }
+
+    MaybeError QueueBase::CopyTextureForBrowserInternal(
+        const ImageCopyTexture* source,
+        const ImageCopyTexture* destination,
+        const Extent3D* copySize,
+        const CopyTextureForBrowserOptions* options) {
+        if (GetDevice()->IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(
+                ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
+                "validating CopyTextureForBrowser from %s to %s", source->texture,
+                destination->texture);
+        }
+
+        return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
+    }
+
+    MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
+                                         CommandBufferBase* const* commands) const {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        for (uint32_t i = 0; i < commandCount; ++i) {
+            DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
+            DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
+
+            const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
+
+            for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
+                for (const BufferBase* buffer : scope.buffers) {
+                    DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+                }
+
+                for (const TextureBase* texture : scope.textures) {
+                    DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+                }
+
+                for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
+                    DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
+                }
+            }
+
+            for (const ComputePassResourceUsage& pass : usages.computePasses) {
+                for (const BufferBase* buffer : pass.referencedBuffers) {
+                    DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+                }
+                for (const TextureBase* texture : pass.referencedTextures) {
+                    DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+                }
+                for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
+                    DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
+                }
+            }
+
+            for (const BufferBase* buffer : usages.topLevelBuffers) {
+                DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+            }
+            for (const TextureBase* texture : usages.topLevelTextures) {
+                DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+            }
+            for (const QuerySetBase* querySet : usages.usedQuerySets) {
+                DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
+                                                      WGPUQueueWorkDoneStatus* status) const {
+        *status = WGPUQueueWorkDoneStatus_DeviceLost;
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+        *status = WGPUQueueWorkDoneStatus_Error;
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
+
+        return {};
+    }
+
+    MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
+                                               size_t dataSize,
+                                               const TextureDataLayout& dataLayout,
+                                               const Extent3D* writeSize) const {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+        DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+        DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
+
+        DAWN_INVALID_IF(dataLayout.offset > dataSize,
+                        "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
+                        dataSize);
+
+        DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
+                        "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
+                        destination->texture, wgpu::TextureUsage::CopyDst);
+
+        DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1,
+                        "Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(),
+                        destination->texture);
+
+        DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+        // We validate texture copy range before validating linear texture data,
+        // because in the latter we divide copyExtent.width by blockWidth and
+        // copyExtent.height by blockHeight while the divisibility conditions are
+        // checked in validating texture copy range.
+        DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
+
+        const TexelBlockInfo& blockInfo =
+            destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+
+        DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
+
+        DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
+
+        return {};
+    }
+
+    void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
+        DeviceBase* device = GetDevice();
+        if (device->ConsumedError(device->ValidateIsAlive())) {
+            // If device is lost, don't let any commands be submitted
+            return;
+        }
+
+        TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
+        if (device->IsValidationEnabled() &&
+            device->ConsumedError(ValidateSubmit(commandCount, commands))) {
+            return;
+        }
+        ASSERT(!IsError());
+
+        if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
+            return;
+        }
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Queue.h b/src/dawn/native/Queue.h
new file mode 100644
index 0000000..ee1074d
--- /dev/null
+++ b/src/dawn/native/Queue.h
@@ -0,0 +1,111 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_QUEUE_H_
+#define DAWNNATIVE_QUEUE_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    class QueueBase : public ApiObjectBase {
+      public:
+        struct TaskInFlight {
+            virtual ~TaskInFlight();
+            virtual void Finish() = 0;
+            virtual void HandleDeviceLoss() = 0;
+        };
+
+        ~QueueBase() override;
+
+        static QueueBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        // Dawn API
+        void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
+        void APIOnSubmittedWorkDone(uint64_t signalValue,
+                                    WGPUQueueWorkDoneCallback callback,
+                                    void* userdata);
+        void APIWriteBuffer(BufferBase* buffer,
+                            uint64_t bufferOffset,
+                            const void* data,
+                            size_t size);
+        void APIWriteTexture(const ImageCopyTexture* destination,
+                             const void* data,
+                             size_t dataSize,
+                             const TextureDataLayout* dataLayout,
+                             const Extent3D* writeSize);
+        void APICopyTextureForBrowser(const ImageCopyTexture* source,
+                                      const ImageCopyTexture* destination,
+                                      const Extent3D* copySize,
+                                      const CopyTextureForBrowserOptions* options);
+
+        MaybeError WriteBuffer(BufferBase* buffer,
+                               uint64_t bufferOffset,
+                               const void* data,
+                               size_t size);
+        void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
+        void Tick(ExecutionSerial finishedSerial);
+        void HandleDeviceLoss();
+
+      protected:
+        QueueBase(DeviceBase* device);
+        QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+        void DestroyImpl() override;
+
+      private:
+        MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
+                                        const void* data,
+                                        size_t dataSize,
+                                        const TextureDataLayout& dataLayout,
+                                        const Extent3D* writeSize);
+        MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
+                                                 const ImageCopyTexture* destination,
+                                                 const Extent3D* copySize,
+                                                 const CopyTextureForBrowserOptions* options);
+
+        virtual MaybeError SubmitImpl(uint32_t commandCount,
+                                      CommandBufferBase* const* commands) = 0;
+        virtual MaybeError WriteBufferImpl(BufferBase* buffer,
+                                           uint64_t bufferOffset,
+                                           const void* data,
+                                           size_t size);
+        virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
+                                            const void* data,
+                                            const TextureDataLayout& dataLayout,
+                                            const Extent3D& writeSize);
+
+        MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
+        MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
+                                               WGPUQueueWorkDoneStatus* status) const;
+        MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
+                                        size_t dataSize,
+                                        const TextureDataLayout& dataLayout,
+                                        const Extent3D* writeSize) const;
+
+        void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
+
+        SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_QUEUE_H_
diff --git a/src/dawn/native/RenderBundle.cpp b/src/dawn/native/RenderBundle.cpp
new file mode 100644
index 0000000..da10188
--- /dev/null
+++ b/src/dawn/native/RenderBundle.cpp
@@ -0,0 +1,91 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderBundle.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/RenderBundleEncoder.h"
+
+namespace dawn::native {
+
+    RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
+                                       const RenderBundleDescriptor* descriptor,
+                                       Ref<AttachmentState> attachmentState,
+                                       bool depthReadOnly,
+                                       bool stencilReadOnly,
+                                       RenderPassResourceUsage resourceUsage,
+                                       IndirectDrawMetadata indirectDrawMetadata)
+        : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
+          mCommands(encoder->AcquireCommands()),
+          mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
+          mAttachmentState(std::move(attachmentState)),
+          mDepthReadOnly(depthReadOnly),
+          mStencilReadOnly(stencilReadOnly),
+          mResourceUsage(std::move(resourceUsage)) {
+        TrackInDevice();
+    }
+
+    void RenderBundleBase::DestroyImpl() {
+        FreeCommands(&mCommands);
+
+        // Remove reference to the attachment state so that we don't have lingering references to
+        // it preventing it from being uncached in the device.
+        mAttachmentState = nullptr;
+    }
+
+    // static
+    RenderBundleBase* RenderBundleBase::MakeError(DeviceBase* device) {
+        return new RenderBundleBase(device, ObjectBase::kError);
+    }
+
+    RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
+        : ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {
+    }
+
+    ObjectType RenderBundleBase::GetType() const {
+        return ObjectType::RenderBundle;
+    }
+
+    CommandIterator* RenderBundleBase::GetCommands() {
+        return &mCommands;
+    }
+
+    const AttachmentState* RenderBundleBase::GetAttachmentState() const {
+        ASSERT(!IsError());
+        return mAttachmentState.Get();
+    }
+
+    bool RenderBundleBase::IsDepthReadOnly() const {
+        ASSERT(!IsError());
+        return mDepthReadOnly;
+    }
+
+    bool RenderBundleBase::IsStencilReadOnly() const {
+        ASSERT(!IsError());
+        return mStencilReadOnly;
+    }
+
+    const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
+        ASSERT(!IsError());
+        return mResourceUsage;
+    }
+
+    const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
+        return mIndirectDrawMetadata;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/RenderBundle.h b/src/dawn/native/RenderBundle.h
new file mode 100644
index 0000000..9112b77
--- /dev/null
+++ b/src/dawn/native/RenderBundle.h
@@ -0,0 +1,73 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERBUNDLE_H_
+#define DAWNNATIVE_RENDERBUNDLE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PassResourceUsage.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <bitset>
+
+namespace dawn::native {
+
+    struct RenderBundleDescriptor;
+    class RenderBundleEncoder;
+
+    class RenderBundleBase final : public ApiObjectBase {
+      public:
+        RenderBundleBase(RenderBundleEncoder* encoder,
+                         const RenderBundleDescriptor* descriptor,
+                         Ref<AttachmentState> attachmentState,
+                         bool depthReadOnly,
+                         bool stencilReadOnly,
+                         RenderPassResourceUsage resourceUsage,
+                         IndirectDrawMetadata indirectDrawMetadata);
+
+        static RenderBundleBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        CommandIterator* GetCommands();
+
+        const AttachmentState* GetAttachmentState() const;
+        bool IsDepthReadOnly() const;
+        bool IsStencilReadOnly() const;
+        const RenderPassResourceUsage& GetResourceUsage() const;
+        const IndirectDrawMetadata& GetIndirectDrawMetadata();
+
+      private:
+        RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
+
+        void DestroyImpl() override;
+
+        CommandIterator mCommands;
+        IndirectDrawMetadata mIndirectDrawMetadata;
+        Ref<AttachmentState> mAttachmentState;
+        bool mDepthReadOnly;
+        bool mStencilReadOnly;
+        RenderPassResourceUsage mResourceUsage;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RENDERBUNDLE_H_
diff --git a/src/dawn/native/RenderBundleEncoder.cpp b/src/dawn/native/RenderBundleEncoder.cpp
new file mode 100644
index 0000000..6d7a2db
--- /dev/null
+++ b/src/dawn/native/RenderBundleEncoder.cpp
@@ -0,0 +1,172 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderBundleEncoder.h"
+
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native {
+
+    MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
+                                             wgpu::TextureFormat textureFormat) {
+        DAWN_TRY(ValidateTextureFormat(textureFormat));
+        const Format* format = nullptr;
+        DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+        DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+                        "Texture format %s is not color renderable.", textureFormat);
+        return {};
+    }
+
+    MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
+                                                    wgpu::TextureFormat textureFormat,
+                                                    bool depthReadOnly,
+                                                    bool stencilReadOnly) {
+        DAWN_TRY(ValidateTextureFormat(textureFormat));
+        const Format* format = nullptr;
+        DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+        DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+                        "Texture format %s is not depth/stencil renderable.", textureFormat);
+
+        DAWN_INVALID_IF(
+            format->HasDepth() && format->HasStencil() && depthReadOnly != stencilReadOnly,
+            "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when format %s has "
+            "both depth and stencil aspects.",
+            depthReadOnly, stencilReadOnly, textureFormat);
+
+        return {};
+    }
+
+    MaybeError ValidateRenderBundleEncoderDescriptor(
+        const DeviceBase* device,
+        const RenderBundleEncoderDescriptor* descriptor) {
+        DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+                        "Sample count (%u) is not supported.", descriptor->sampleCount);
+
+        DAWN_INVALID_IF(
+            descriptor->colorFormatsCount > kMaxColorAttachments,
+            "Color formats count (%u) exceeds maximum number of color attachements (%u).",
+            descriptor->colorFormatsCount, kMaxColorAttachments);
+
+        bool allColorFormatsUndefined = true;
+        for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
+            wgpu::TextureFormat format = descriptor->colorFormats[i];
+            if (format != wgpu::TextureFormat::Undefined) {
+                DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, format),
+                                 "validating colorFormats[%u]", i);
+                allColorFormatsUndefined = false;
+            }
+        }
+
+        if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
+            DAWN_TRY_CONTEXT(ValidateDepthStencilAttachmentFormat(
+                                 device, descriptor->depthStencilFormat, descriptor->depthReadOnly,
+                                 descriptor->stencilReadOnly),
+                             "validating depthStencilFormat");
+        } else {
+            DAWN_INVALID_IF(
+                allColorFormatsUndefined,
+                "No color or depthStencil attachments specified. At least one is required.");
+        }
+
+        return {};
+    }
+
+    RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
+                                             const RenderBundleEncoderDescriptor* descriptor)
+        : RenderEncoderBase(device,
+                            descriptor->label,
+                            &mBundleEncodingContext,
+                            device->GetOrCreateAttachmentState(descriptor),
+                            descriptor->depthReadOnly,
+                            descriptor->stencilReadOnly),
+          mBundleEncodingContext(device, this) {
+        TrackInDevice();
+    }
+
+    RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
+        : RenderEncoderBase(device, &mBundleEncodingContext, errorTag),
+          mBundleEncodingContext(device, this) {
+    }
+
+    void RenderBundleEncoder::DestroyImpl() {
+        RenderEncoderBase::DestroyImpl();
+        mBundleEncodingContext.Destroy();
+    }
+
+    // static
+    Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
+        DeviceBase* device,
+        const RenderBundleEncoderDescriptor* descriptor) {
+        return AcquireRef(new RenderBundleEncoder(device, descriptor));
+    }
+
+    // static
+    RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
+        return new RenderBundleEncoder(device, ObjectBase::kError);
+    }
+
+    ObjectType RenderBundleEncoder::GetType() const {
+        return ObjectType::RenderBundleEncoder;
+    }
+
+    CommandIterator RenderBundleEncoder::AcquireCommands() {
+        return mBundleEncodingContext.AcquireCommands();
+    }
+
+    RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
+        RenderBundleBase* result = nullptr;
+
+        if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result, "calling %s.Finish(%s).",
+                                       this, descriptor)) {
+            return RenderBundleBase::MakeError(GetDevice());
+        }
+
+        return result;
+    }
+
+    ResultOrError<RenderBundleBase*> RenderBundleEncoder::FinishImpl(
+        const RenderBundleDescriptor* descriptor) {
+        // Even if mBundleEncodingContext.Finish() validation fails, calling it will mutate the
+        // internal state of the encoding context. Subsequent calls to encode commands will generate
+        // errors.
+        DAWN_TRY(mBundleEncodingContext.Finish());
+
+        RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
+        if (IsValidationEnabled()) {
+            DAWN_TRY(GetDevice()->ValidateObject(this));
+            DAWN_TRY(ValidateProgrammableEncoderEnd());
+            DAWN_TRY(ValidateFinish(usages));
+        }
+
+        return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), IsDepthReadOnly(),
+                                    IsStencilReadOnly(), std::move(usages),
+                                    std::move(mIndirectDrawMetadata));
+    }
+
+    MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+        DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/RenderBundleEncoder.h b/src/dawn/native/RenderBundleEncoder.h
new file mode 100644
index 0000000..46c1470
--- /dev/null
+++ b/src/dawn/native/RenderBundleEncoder.h
@@ -0,0 +1,56 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERBUNDLEENCODER_H_
+#define DAWNNATIVE_RENDERBUNDLEENCODER_H_
+
+#include "dawn/native/EncodingContext.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderEncoderBase.h"
+
+namespace dawn::native {
+
+    MaybeError ValidateRenderBundleEncoderDescriptor(
+        const DeviceBase* device,
+        const RenderBundleEncoderDescriptor* descriptor);
+
+    class RenderBundleEncoder final : public RenderEncoderBase {
+      public:
+        static Ref<RenderBundleEncoder> Create(DeviceBase* device,
+                                               const RenderBundleEncoderDescriptor* descriptor);
+        static RenderBundleEncoder* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
+
+        CommandIterator AcquireCommands();
+
+      private:
+        RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
+        RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
+
+        void DestroyImpl() override;
+
+        ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
+        MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
+
+        EncodingContext mBundleEncodingContext;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RENDERBUNDLEENCODER_H_
diff --git a/src/dawn/native/RenderEncoderBase.cpp b/src/dawn/native/RenderEncoderBase.cpp
new file mode 100644
index 0000000..f186c16
--- /dev/null
+++ b/src/dawn/native/RenderEncoderBase.cpp
@@ -0,0 +1,414 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderEncoderBase.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <math.h>
+#include <cstring>
+
+namespace dawn::native {
+
+    RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+                                         const char* label,
+                                         EncodingContext* encodingContext,
+                                         Ref<AttachmentState> attachmentState,
+                                         bool depthReadOnly,
+                                         bool stencilReadOnly)
+        : ProgrammableEncoder(device, label, encodingContext),
+          mIndirectDrawMetadata(device->GetLimits()),
+          mAttachmentState(std::move(attachmentState)),
+          mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+          mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
+        mDepthReadOnly = depthReadOnly;
+        mStencilReadOnly = stencilReadOnly;
+    }
+
+    RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+                                         EncodingContext* encodingContext,
+                                         ErrorTag errorTag)
+        : ProgrammableEncoder(device, encodingContext, errorTag),
+          mIndirectDrawMetadata(device->GetLimits()),
+          mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+          mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
+    }
+
+    void RenderEncoderBase::DestroyImpl() {
+        // Remove reference to the attachment state so that we don't have lingering references to
+        // it preventing it from being uncached in the device.
+        mAttachmentState = nullptr;
+    }
+
+    const AttachmentState* RenderEncoderBase::GetAttachmentState() const {
+        ASSERT(!IsError());
+        ASSERT(mAttachmentState != nullptr);
+        return mAttachmentState.Get();
+    }
+
+    bool RenderEncoderBase::IsDepthReadOnly() const {
+        ASSERT(!IsError());
+        return mDepthReadOnly;
+    }
+
+    bool RenderEncoderBase::IsStencilReadOnly() const {
+        ASSERT(!IsError());
+        return mStencilReadOnly;
+    }
+
+    Ref<AttachmentState> RenderEncoderBase::AcquireAttachmentState() {
+        return std::move(mAttachmentState);
+    }
+
+    void RenderEncoderBase::APIDraw(uint32_t vertexCount,
+                                    uint32_t instanceCount,
+                                    uint32_t firstVertex,
+                                    uint32_t firstInstance) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+                    DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+                                    "First instance (%u) must be zero.", firstInstance);
+
+                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
+                                                                                      firstVertex));
+                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
+                        instanceCount, firstInstance));
+                }
+
+                DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
+                draw->vertexCount = vertexCount;
+                draw->instanceCount = instanceCount;
+                draw->firstVertex = firstVertex;
+                draw->firstInstance = firstInstance;
+
+                return {};
+            },
+            "encoding %s.Draw(%u, %u, %u, %u).", this, vertexCount, instanceCount, firstVertex,
+            firstInstance);
+    }
+
+    void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
+                                           uint32_t instanceCount,
+                                           uint32_t firstIndex,
+                                           int32_t baseVertex,
+                                           uint32_t firstInstance) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+                    DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+                                    "First instance (%u) must be zero.", firstInstance);
+
+                    DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
+                                    "Base vertex (%u) must be zero.", baseVertex);
+
+                    DAWN_TRY(
+                        mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
+
+                    // Although we don't know actual vertex access range in CPU, we still call the
+                    // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step
+                    // mode vertex buffer with an array stride of zero.
+                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
+                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
+                        instanceCount, firstInstance));
+                }
+
+                DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
+                draw->indexCount = indexCount;
+                draw->instanceCount = instanceCount;
+                draw->firstIndex = firstIndex;
+                draw->baseVertex = baseVertex;
+                draw->firstInstance = firstInstance;
+
+                return {};
+            },
+            "encoding %s.DrawIndexed(%u, %u, %u, %i, %u).", this, indexCount, instanceCount,
+            firstIndex, baseVertex, firstInstance);
+    }
+
+    void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+                    DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+                    DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+                    DAWN_INVALID_IF(indirectOffset % 4 != 0,
+                                    "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+                    DAWN_INVALID_IF(
+                        indirectOffset >= indirectBuffer->GetSize() ||
+                            kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
+                        "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+                        indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+                }
+
+                DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
+                cmd->indirectBuffer = indirectBuffer;
+                cmd->indirectOffset = indirectOffset;
+
+                mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+                return {};
+            },
+            "encoding %s.DrawIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+    }
+
+    void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
+                                                   uint64_t indirectOffset) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+                    DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+                    DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+                    DAWN_INVALID_IF(indirectOffset % 4 != 0,
+                                    "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+                    DAWN_INVALID_IF(
+                        (indirectOffset >= indirectBuffer->GetSize() ||
+                         kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
+                        "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+                        indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+                }
+
+                DrawIndexedIndirectCmd* cmd =
+                    allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
+                if (IsValidationEnabled()) {
+                    // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
+                    // buffer which will store the validated indirect data. The buffer and offset
+                    // will be updated to point to it.
+                    // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
+                    // render pass, while the |cmd| pointer is still valid.
+                    cmd->indirectBuffer = nullptr;
+
+                    mIndirectDrawMetadata.AddIndexedIndirectDraw(
+                        mCommandBufferState.GetIndexFormat(),
+                        mCommandBufferState.GetIndexBufferSize(), indirectBuffer, indirectOffset,
+                        cmd);
+                } else {
+                    cmd->indirectBuffer = indirectBuffer;
+                    cmd->indirectOffset = indirectOffset;
+                }
+
+                // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
+                // validation, but it will unecessarily transition to indirectBuffer usage in the
+                // backend.
+                mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+                return {};
+            },
+            "encoding %s.DrawIndexedIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+    }
+
+    void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+
+                    DAWN_INVALID_IF(pipeline->GetAttachmentState() != mAttachmentState.Get(),
+                                    "Attachment state of %s is not compatible with %s.\n"
+                                    "%s expects an attachment state of %s.\n"
+                                    "%s has an attachment state of %s.",
+                                    pipeline, this, this, mAttachmentState.Get(), pipeline,
+                                    pipeline->GetAttachmentState());
+
+                    DAWN_INVALID_IF(pipeline->WritesDepth() && mDepthReadOnly,
+                                    "%s writes depth while %s's depthReadOnly is true", pipeline,
+                                    this);
+
+                    DAWN_INVALID_IF(pipeline->WritesStencil() && mStencilReadOnly,
+                                    "%s writes stencil while %s's stencilReadOnly is true",
+                                    pipeline, this);
+                }
+
+                mCommandBufferState.SetRenderPipeline(pipeline);
+
+                SetRenderPipelineCmd* cmd =
+                    allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
+                cmd->pipeline = pipeline;
+
+                return {};
+            },
+            "encoding %s.SetPipeline(%s).", this, pipeline);
+    }
+
+    void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
+                                              wgpu::IndexFormat format,
+                                              uint64_t offset,
+                                              uint64_t size) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(buffer));
+                    DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
+
+                    DAWN_TRY(ValidateIndexFormat(format));
+
+                    DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
+                                    "Index format must be specified");
+
+                    DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
+                                    "Index buffer offset (%u) is not a multiple of the size (%u) "
+                                    "of %s.",
+                                    offset, IndexFormatSize(format), format);
+
+                    uint64_t bufferSize = buffer->GetSize();
+                    DAWN_INVALID_IF(offset > bufferSize,
+                                    "Index buffer offset (%u) is larger than the size (%u) of %s.",
+                                    offset, bufferSize, buffer);
+
+                    uint64_t remainingSize = bufferSize - offset;
+
+                    if (size == wgpu::kWholeSize) {
+                        size = remainingSize;
+                    } else {
+                        DAWN_INVALID_IF(size > remainingSize,
+                                        "Index buffer range (offset: %u, size: %u) doesn't fit in "
+                                        "the size (%u) of "
+                                        "%s.",
+                                        offset, size, bufferSize, buffer);
+                    }
+                } else {
+                    if (size == wgpu::kWholeSize) {
+                        DAWN_ASSERT(buffer->GetSize() >= offset);
+                        size = buffer->GetSize() - offset;
+                    }
+                }
+
+                mCommandBufferState.SetIndexBuffer(format, size);
+
+                SetIndexBufferCmd* cmd =
+                    allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
+                cmd->buffer = buffer;
+                cmd->format = format;
+                cmd->offset = offset;
+                cmd->size = size;
+
+                mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
+
+                return {};
+            },
+            "encoding %s.SetIndexBuffer(%s, %s, %u, %u).", this, buffer, format, offset, size);
+    }
+
+    void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
+                                               BufferBase* buffer,
+                                               uint64_t offset,
+                                               uint64_t size) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(GetDevice()->ValidateObject(buffer));
+                    DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
+
+                    DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
+                                    "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
+                                    kMaxVertexBuffers - 1);
+
+                    DAWN_INVALID_IF(offset % 4 != 0,
+                                    "Vertex buffer offset (%u) is not a multiple of 4", offset);
+
+                    uint64_t bufferSize = buffer->GetSize();
+                    DAWN_INVALID_IF(offset > bufferSize,
+                                    "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
+                                    offset, bufferSize, buffer);
+
+                    uint64_t remainingSize = bufferSize - offset;
+
+                    if (size == wgpu::kWholeSize) {
+                        size = remainingSize;
+                    } else {
+                        DAWN_INVALID_IF(size > remainingSize,
+                                        "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
+                                        "the size (%u) "
+                                        "of %s.",
+                                        offset, size, bufferSize, buffer);
+                    }
+                } else {
+                    if (size == wgpu::kWholeSize) {
+                        DAWN_ASSERT(buffer->GetSize() >= offset);
+                        size = buffer->GetSize() - offset;
+                    }
+                }
+
+                mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
+
+                SetVertexBufferCmd* cmd =
+                    allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
+                cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
+                cmd->buffer = buffer;
+                cmd->offset = offset;
+                cmd->size = size;
+
+                mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
+
+                return {};
+            },
+            "encoding %s.SetVertexBuffer(%u, %s, %u, %u).", this, slot, buffer, offset, size);
+    }
+
+    void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
+                                            BindGroupBase* group,
+                                            uint32_t dynamicOffsetCount,
+                                            const uint32_t* dynamicOffsets) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                BindGroupIndex groupIndex(groupIndexIn);
+
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
+                                                  dynamicOffsets));
+                }
+
+                RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
+                                   dynamicOffsets);
+                mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
+                                                 dynamicOffsets);
+                mUsageTracker.AddBindGroup(group);
+
+                return {};
+            },
+            // TODO(dawn:1190): For unknown reasons formatting this message fails if `group` is used
+            // as a string value in the message. This despite the exact same code working as
+            // intended in ComputePassEncoder::APISetBindGroup. Replacing with a static [BindGroup]
+            // until the reason for the failure can be determined.
+            "encoding %s.SetBindGroup(%u, [BindGroup], %u, ...).", this, groupIndexIn,
+            dynamicOffsetCount);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/RenderEncoderBase.h b/src/dawn/native/RenderEncoderBase.h
new file mode 100644
index 0000000..80128f3
--- /dev/null
+++ b/src/dawn/native/RenderEncoderBase.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERENCODERBASE_H_
+#define DAWNNATIVE_RENDERENCODERBASE_H_
+
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/ProgrammableEncoder.h"
+
+namespace dawn::native {
+
+    class RenderEncoderBase : public ProgrammableEncoder {
+      public:
+        RenderEncoderBase(DeviceBase* device,
+                          const char* label,
+                          EncodingContext* encodingContext,
+                          Ref<AttachmentState> attachmentState,
+                          bool depthReadOnly,
+                          bool stencilReadOnly);
+
+        void APIDraw(uint32_t vertexCount,
+                     uint32_t instanceCount = 1,
+                     uint32_t firstVertex = 0,
+                     uint32_t firstInstance = 0);
+        void APIDrawIndexed(uint32_t vertexCount,
+                            uint32_t instanceCount,
+                            uint32_t firstIndex,
+                            int32_t baseVertex,
+                            uint32_t firstInstance);
+
+        void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+        void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+
+        void APISetPipeline(RenderPipelineBase* pipeline);
+
+        void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
+        void APISetIndexBuffer(BufferBase* buffer,
+                               wgpu::IndexFormat format,
+                               uint64_t offset,
+                               uint64_t size);
+
+        void APISetBindGroup(uint32_t groupIndex,
+                             BindGroupBase* group,
+                             uint32_t dynamicOffsetCount = 0,
+                             const uint32_t* dynamicOffsets = nullptr);
+
+        const AttachmentState* GetAttachmentState() const;
+        bool IsDepthReadOnly() const;
+        bool IsStencilReadOnly() const;
+        Ref<AttachmentState> AcquireAttachmentState();
+
+      protected:
+        // Construct an "error" render encoder base.
+        RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
+
+        void DestroyImpl() override;
+
+        CommandBufferStateTracker mCommandBufferState;
+        RenderPassResourceUsageTracker mUsageTracker;
+        IndirectDrawMetadata mIndirectDrawMetadata;
+
+      private:
+        Ref<AttachmentState> mAttachmentState;
+        const bool mDisableBaseVertex;
+        const bool mDisableBaseInstance;
+        bool mDepthReadOnly = false;
+        bool mStencilReadOnly = false;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RENDERENCODERBASE_H_
diff --git a/src/dawn/native/RenderPassEncoder.cpp b/src/dawn/native/RenderPassEncoder.cpp
new file mode 100644
index 0000000..b7f7563
--- /dev/null
+++ b/src/dawn/native/RenderPassEncoder.cpp
@@ -0,0 +1,425 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderPassEncoder.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderPipeline.h"
+
+#include <math.h>
+#include <cstring>
+
+namespace dawn::native {
+    namespace {
+
+        // Check the query at queryIndex is unavailable, otherwise it cannot be written.
+        MaybeError ValidateQueryIndexOverwrite(QuerySetBase* querySet,
+                                               uint32_t queryIndex,
+                                               const QueryAvailabilityMap& queryAvailabilityMap) {
+            auto it = queryAvailabilityMap.find(querySet);
+            DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
+                            "Query index %u of %s is written to twice in a render pass.",
+                            queryIndex, querySet);
+
+            return {};
+        }
+
+    }  // namespace
+
+    // The usage tracker is passed in here, because it is prepopulated with usages from the
+    // BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
+    // command, then this wouldn't be necessary.
+    RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+                                         const RenderPassDescriptor* descriptor,
+                                         CommandEncoder* commandEncoder,
+                                         EncodingContext* encodingContext,
+                                         RenderPassResourceUsageTracker usageTracker,
+                                         Ref<AttachmentState> attachmentState,
+                                         std::vector<TimestampWrite> timestampWritesAtEnd,
+                                         uint32_t renderTargetWidth,
+                                         uint32_t renderTargetHeight,
+                                         bool depthReadOnly,
+                                         bool stencilReadOnly)
+        : RenderEncoderBase(device,
+                            descriptor->label,
+                            encodingContext,
+                            std::move(attachmentState),
+                            depthReadOnly,
+                            stencilReadOnly),
+          mCommandEncoder(commandEncoder),
+          mRenderTargetWidth(renderTargetWidth),
+          mRenderTargetHeight(renderTargetHeight),
+          mOcclusionQuerySet(descriptor->occlusionQuerySet),
+          mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
+        mUsageTracker = std::move(usageTracker);
+        TrackInDevice();
+    }
+
+    // static
+    Ref<RenderPassEncoder> RenderPassEncoder::Create(
+        DeviceBase* device,
+        const RenderPassDescriptor* descriptor,
+        CommandEncoder* commandEncoder,
+        EncodingContext* encodingContext,
+        RenderPassResourceUsageTracker usageTracker,
+        Ref<AttachmentState> attachmentState,
+        std::vector<TimestampWrite> timestampWritesAtEnd,
+        uint32_t renderTargetWidth,
+        uint32_t renderTargetHeight,
+        bool depthReadOnly,
+        bool stencilReadOnly) {
+        return AcquireRef(new RenderPassEncoder(
+            device, descriptor, commandEncoder, encodingContext, std::move(usageTracker),
+            std::move(attachmentState), std::move(timestampWritesAtEnd), renderTargetWidth,
+            renderTargetHeight, depthReadOnly, stencilReadOnly));
+    }
+
+    RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+                                         CommandEncoder* commandEncoder,
+                                         EncodingContext* encodingContext,
+                                         ErrorTag errorTag)
+        : RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
+    }
+
+    // static
+    Ref<RenderPassEncoder> RenderPassEncoder::MakeError(DeviceBase* device,
+                                                        CommandEncoder* commandEncoder,
+                                                        EncodingContext* encodingContext) {
+        return AcquireRef(
+            new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
+    }
+
+    void RenderPassEncoder::DestroyImpl() {
+        RenderEncoderBase::DestroyImpl();
+        // Ensure that the pass has exited. This is done for passes only since validation requires
+        // they exit before destruction while bundles do not.
+        mEncodingContext->EnsurePassExited(this);
+    }
+
+    ObjectType RenderPassEncoder::GetType() const {
+        return ObjectType::RenderPassEncoder;
+    }
+
+    void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+        DAWN_ASSERT(querySet != nullptr);
+
+        // Track the query availability with true on render pass for rewrite validation and query
+        // reset on render pass on Vulkan
+        mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
+
+        // Track it again on command encoder for zero-initializing when resolving unused queries.
+        mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+    }
+
+    void RenderPassEncoder::APIEnd() {
+        if (mEncodingContext->TryEncode(
+                this,
+                [&](CommandAllocator* allocator) -> MaybeError {
+                    if (IsValidationEnabled()) {
+                        DAWN_TRY(ValidateProgrammableEncoderEnd());
+
+                        DAWN_INVALID_IF(
+                            mOcclusionQueryActive,
+                            "Render pass %s ended with incomplete occlusion query index %u of %s.",
+                            this, mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+                    }
+
+                    EndRenderPassCmd* cmd =
+                        allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
+                    // The query availability has already been updated at the beginning of render
+                    // pass, and no need to do update here.
+                    cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
+
+                    DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
+                                                              mCommandEncoder.Get(),
+                                                              std::move(mIndirectDrawMetadata)));
+                    return {};
+                },
+                "encoding %s.End().", this)) {
+        }
+    }
+
+    void RenderPassEncoder::APIEndPass() {
+        GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+        APIEnd();
+    }
+
+    void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                SetStencilReferenceCmd* cmd =
+                    allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
+                cmd->reference = reference;
+
+                return {};
+            },
+            "encoding %s.SetStencilReference(%u).", this, reference);
+    }
+
+    void RenderPassEncoder::APISetBlendConstant(const Color* color) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                SetBlendConstantCmd* cmd =
+                    allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
+                cmd->color = *color;
+
+                return {};
+            },
+            "encoding %s.SetBlendConstant(%s).", this, color);
+    }
+
+    void RenderPassEncoder::APISetViewport(float x,
+                                           float y,
+                                           float width,
+                                           float height,
+                                           float minDepth,
+                                           float maxDepth) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_INVALID_IF(
+                        (isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
+                         isnan(maxDepth)),
+                        "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
+                        "minDepth: %f, maxDepth: %f) is NaN.",
+                        x, y, width, height, minDepth, maxDepth);
+
+                    DAWN_INVALID_IF(
+                        x < 0 || y < 0 || width < 0 || height < 0,
+                        "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
+                        "value.",
+                        x, y, width, height);
+
+                    DAWN_INVALID_IF(
+                        x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
+                        "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
+                        "in "
+                        "the render target dimensions (%u x %u).",
+                        x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+
+                    // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
+                    DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
+                                    "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
+                                    "minDepth was "
+                                    "greater than maxDepth.",
+                                    minDepth, maxDepth);
+                }
+
+                SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
+                cmd->x = x;
+                cmd->y = y;
+                cmd->width = width;
+                cmd->height = height;
+                cmd->minDepth = minDepth;
+                cmd->maxDepth = maxDepth;
+
+                return {};
+            },
+            "encoding %s.SetViewport(%f, %f, %f, %f, %f, %f).", this, x, y, width, height, minDepth,
+            maxDepth);
+    }
+
+    void RenderPassEncoder::APISetScissorRect(uint32_t x,
+                                              uint32_t y,
+                                              uint32_t width,
+                                              uint32_t height) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_INVALID_IF(
+                        width > mRenderTargetWidth || height > mRenderTargetHeight ||
+                            x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
+                        "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
+                        "the render target dimensions (%u x %u).",
+                        x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+                }
+
+                SetScissorRectCmd* cmd =
+                    allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
+                cmd->x = x;
+                cmd->y = y;
+                cmd->width = width;
+                cmd->height = height;
+
+                return {};
+            },
+            "encoding %s.SetScissorRect(%u, %u, %u, %u).", this, x, y, width, height);
+    }
+
+    void RenderPassEncoder::APIExecuteBundles(uint32_t count,
+                                              RenderBundleBase* const* renderBundles) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    const AttachmentState* attachmentState = GetAttachmentState();
+                    bool depthReadOnlyInPass = IsDepthReadOnly();
+                    bool stencilReadOnlyInPass = IsStencilReadOnly();
+                    for (uint32_t i = 0; i < count; ++i) {
+                        DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
+
+                        DAWN_INVALID_IF(attachmentState != renderBundles[i]->GetAttachmentState(),
+                                        "Attachment state of renderBundles[%i] (%s) is not "
+                                        "compatible with %s.\n"
+                                        "%s expects an attachment state of %s.\n"
+                                        "renderBundles[%i] (%s) has an attachment state of %s.",
+                                        i, renderBundles[i], this, this, attachmentState, i,
+                                        renderBundles[i], renderBundles[i]->GetAttachmentState());
+
+                        bool depthReadOnlyInBundle = renderBundles[i]->IsDepthReadOnly();
+                        DAWN_INVALID_IF(
+                            depthReadOnlyInPass && !depthReadOnlyInBundle,
+                            "DepthReadOnly (%u) of renderBundle[%i] (%s) is not compatible "
+                            "with DepthReadOnly (%u) of %s.",
+                            depthReadOnlyInBundle, i, renderBundles[i], depthReadOnlyInPass, this);
+
+                        bool stencilReadOnlyInBundle = renderBundles[i]->IsStencilReadOnly();
+                        DAWN_INVALID_IF(stencilReadOnlyInPass && !stencilReadOnlyInBundle,
+                                        "StencilReadOnly (%u) of renderBundle[%i] (%s) is not "
+                                        "compatible with StencilReadOnly (%u) of %s.",
+                                        stencilReadOnlyInBundle, i, renderBundles[i],
+                                        stencilReadOnlyInPass, this);
+                    }
+                }
+
+                mCommandBufferState = CommandBufferStateTracker{};
+
+                ExecuteBundlesCmd* cmd =
+                    allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
+                cmd->count = count;
+
+                Ref<RenderBundleBase>* bundles =
+                    allocator->AllocateData<Ref<RenderBundleBase>>(count);
+                for (uint32_t i = 0; i < count; ++i) {
+                    bundles[i] = renderBundles[i];
+
+                    const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
+                    for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
+                        mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
+                    }
+
+                    for (uint32_t i = 0; i < usages.textures.size(); ++i) {
+                        mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
+                                                                  usages.textureUsages[i]);
+                    }
+
+                    if (IsValidationEnabled()) {
+                        mIndirectDrawMetadata.AddBundle(renderBundles[i]);
+                    }
+                }
+
+                return {};
+            },
+            "encoding %s.ExecuteBundles(%u, ...).", this, count);
+    }
+
+    void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
+                                    "The occlusionQuerySet in RenderPassDescriptor is not set.");
+
+                    // The type of querySet has been validated by ValidateRenderPassDescriptor
+
+                    DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
+                                    "Query index (%u) exceeds the number of queries (%u) in %s.",
+                                    queryIndex, mOcclusionQuerySet->GetQueryCount(),
+                                    mOcclusionQuerySet.Get());
+
+                    DAWN_INVALID_IF(mOcclusionQueryActive,
+                                    "An occlusion query (%u) in %s is already active.",
+                                    mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+
+                    DAWN_TRY_CONTEXT(
+                        ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
+                                                    mUsageTracker.GetQueryAvailabilityMap()),
+                        "validating the occlusion query index (%u) in %s", queryIndex,
+                        mOcclusionQuerySet.Get());
+                }
+
+                // Record the current query index for endOcclusionQuery.
+                mCurrentOcclusionQueryIndex = queryIndex;
+                mOcclusionQueryActive = true;
+
+                BeginOcclusionQueryCmd* cmd =
+                    allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
+                cmd->querySet = mOcclusionQuerySet.Get();
+                cmd->queryIndex = queryIndex;
+
+                return {};
+            },
+            "encoding %s.BeginOcclusionQuery(%u).", this, queryIndex);
+    }
+
+    void RenderPassEncoder::APIEndOcclusionQuery() {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
+                }
+
+                TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
+
+                mOcclusionQueryActive = false;
+
+                EndOcclusionQueryCmd* cmd =
+                    allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
+                cmd->querySet = mOcclusionQuerySet.Get();
+                cmd->queryIndex = mCurrentOcclusionQueryIndex;
+
+                return {};
+            },
+            "encoding %s.EndOcclusionQuery().", this);
+    }
+
+    void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+        mEncodingContext->TryEncode(
+            this,
+            [&](CommandAllocator* allocator) -> MaybeError {
+                if (IsValidationEnabled()) {
+                    DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+                    DAWN_TRY_CONTEXT(
+                        ValidateQueryIndexOverwrite(querySet, queryIndex,
+                                                    mUsageTracker.GetQueryAvailabilityMap()),
+                        "validating the timestamp query index (%u) of %s", queryIndex, querySet);
+                }
+
+                TrackQueryAvailability(querySet, queryIndex);
+
+                WriteTimestampCmd* cmd =
+                    allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+                cmd->querySet = querySet;
+                cmd->queryIndex = queryIndex;
+
+                return {};
+            },
+            "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/RenderPassEncoder.h b/src/dawn/native/RenderPassEncoder.h
new file mode 100644
index 0000000..970af73
--- /dev/null
+++ b/src/dawn/native/RenderPassEncoder.h
@@ -0,0 +1,103 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERPASSENCODER_H_
+#define DAWNNATIVE_RENDERPASSENCODER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/RenderEncoderBase.h"
+
+namespace dawn::native {
+
+    class RenderBundleBase;
+
+    class RenderPassEncoder final : public RenderEncoderBase {
+      public:
+        static Ref<RenderPassEncoder> Create(DeviceBase* device,
+                                             const RenderPassDescriptor* descriptor,
+                                             CommandEncoder* commandEncoder,
+                                             EncodingContext* encodingContext,
+                                             RenderPassResourceUsageTracker usageTracker,
+                                             Ref<AttachmentState> attachmentState,
+                                             std::vector<TimestampWrite> timestampWritesAtEnd,
+                                             uint32_t renderTargetWidth,
+                                             uint32_t renderTargetHeight,
+                                             bool depthReadOnly,
+                                             bool stencilReadOnly);
+        static Ref<RenderPassEncoder> MakeError(DeviceBase* device,
+                                                CommandEncoder* commandEncoder,
+                                                EncodingContext* encodingContext);
+
+        ObjectType GetType() const override;
+
+        void APIEnd();
+        void APIEndPass();  // TODO(dawn:1286): Remove after deprecation period.
+
+        void APISetStencilReference(uint32_t reference);
+        void APISetBlendConstant(const Color* color);
+        void APISetViewport(float x,
+                            float y,
+                            float width,
+                            float height,
+                            float minDepth,
+                            float maxDepth);
+        void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+        void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
+
+        void APIBeginOcclusionQuery(uint32_t queryIndex);
+        void APIEndOcclusionQuery();
+
+        void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+      protected:
+        RenderPassEncoder(DeviceBase* device,
+                          const RenderPassDescriptor* descriptor,
+                          CommandEncoder* commandEncoder,
+                          EncodingContext* encodingContext,
+                          RenderPassResourceUsageTracker usageTracker,
+                          Ref<AttachmentState> attachmentState,
+                          std::vector<TimestampWrite> timestampWritesAtEnd,
+                          uint32_t renderTargetWidth,
+                          uint32_t renderTargetHeight,
+                          bool depthReadOnly,
+                          bool stencilReadOnly);
+        RenderPassEncoder(DeviceBase* device,
+                          CommandEncoder* commandEncoder,
+                          EncodingContext* encodingContext,
+                          ErrorTag errorTag);
+
+      private:
+        void DestroyImpl() override;
+
+        void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+
+        // For render and compute passes, the encoding context is borrowed from the command encoder.
+        // Keep a reference to the encoder to make sure the context isn't freed.
+        Ref<CommandEncoder> mCommandEncoder;
+
+        uint32_t mRenderTargetWidth;
+        uint32_t mRenderTargetHeight;
+
+        // The resources for occlusion query
+        Ref<QuerySetBase> mOcclusionQuerySet;
+        uint32_t mCurrentOcclusionQueryIndex = 0;
+        bool mOcclusionQueryActive = false;
+
+        std::vector<TimestampWrite> mTimestampWritesAtEnd;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RENDERPASSENCODER_H_
diff --git a/src/dawn/native/RenderPipeline.cpp b/src/dawn/native/RenderPipeline.cpp
new file mode 100644
index 0000000..8af3554
--- /dev/null
+++ b/src/dawn/native/RenderPipeline.cpp
@@ -0,0 +1,1014 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/native/VertexFormat.h"
+
+#include <cmath>
+#include <sstream>
+
+namespace dawn::native {
+
+    // Helper functions
+    namespace {
+        MaybeError ValidateVertexAttribute(
+            DeviceBase* device,
+            const VertexAttribute* attribute,
+            const EntryPointMetadata& metadata,
+            uint64_t vertexBufferStride,
+            ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+            DAWN_TRY(ValidateVertexFormat(attribute->format));
+            const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
+
+            DAWN_INVALID_IF(
+                attribute->shaderLocation >= kMaxVertexAttributes,
+                "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
+                "(%u).",
+                attribute->shaderLocation, kMaxVertexAttributes);
+
+            VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
+
+            // No underflow is possible because the max vertex format size is smaller than
+            // kMaxVertexBufferArrayStride.
+            ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
+            DAWN_INVALID_IF(
+                attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
+                "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
+                "buffer stride (%u).",
+                attribute->offset, attribute->format, formatInfo.byteSize,
+                kMaxVertexBufferArrayStride);
+
+            // No overflow is possible because the offset is already validated to be less
+            // than kMaxVertexBufferArrayStride.
+            ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
+            DAWN_INVALID_IF(
+                vertexBufferStride > 0 &&
+                    attribute->offset + formatInfo.byteSize > vertexBufferStride,
+                "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
+                "stride (%u).",
+                attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
+
+            DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
+                            "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
+                            std::min(4u, formatInfo.byteSize));
+
+            DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
+                                formatInfo.baseType != metadata.vertexInputBaseTypes[location],
+                            "Attribute base type (%s) does not match the "
+                            "shader's base type (%s) in location (%u).",
+                            formatInfo.baseType, metadata.vertexInputBaseTypes[location],
+                            attribute->shaderLocation);
+
+            DAWN_INVALID_IF((*attributesSetMask)[location],
+                            "Attribute shader location (%u) is used more than once.",
+                            attribute->shaderLocation);
+
+            attributesSetMask->set(location);
+            return {};
+        }
+
+        MaybeError ValidateVertexBufferLayout(
+            DeviceBase* device,
+            const VertexBufferLayout* buffer,
+            const EntryPointMetadata& metadata,
+            ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+            DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
+            DAWN_INVALID_IF(
+                buffer->arrayStride > kMaxVertexBufferArrayStride,
+                "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
+                buffer->arrayStride, kMaxVertexBufferArrayStride);
+
+            DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
+                            "Vertex buffer arrayStride (%u) is not a multiple of 4.",
+                            buffer->arrayStride);
+
+            for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
+                DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
+                                                         buffer->arrayStride, attributesSetMask),
+                                 "validating attributes[%u].", i);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateVertexState(DeviceBase* device,
+                                       const VertexState* descriptor,
+                                       const PipelineLayoutBase* layout) {
+            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+            DAWN_INVALID_IF(
+                descriptor->bufferCount > kMaxVertexBuffers,
+                "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
+                descriptor->bufferCount, kMaxVertexBuffers);
+
+            DAWN_TRY_CONTEXT(
+                ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+                                          descriptor->constantCount, descriptor->constants, layout,
+                                          SingleShaderStage::Vertex),
+                "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
+                descriptor->entryPoint);
+            const EntryPointMetadata& vertexMetadata =
+                descriptor->module->GetEntryPoint(descriptor->entryPoint);
+
+            ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
+            uint32_t totalAttributesNum = 0;
+            for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
+                DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i],
+                                                            vertexMetadata, &attributesSetMask),
+                                 "validating buffers[%u].", i);
+                totalAttributesNum += descriptor->buffers[i].attributeCount;
+            }
+
+            // Every vertex attribute has a member called shaderLocation, and there are some
+            // requirements for shaderLocation: 1) >=0, 2) values are different across different
+            // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total
+            // attribute number never exceed kMaxVertexAttributes.
+            ASSERT(totalAttributesNum <= kMaxVertexAttributes);
+
+            // TODO(dawn:563): Specify which inputs were not used in error message.
+            DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
+                            "Pipeline vertex stage uses vertex buffers not in the vertex state");
+
+            return {};
+        }
+
+        MaybeError ValidatePrimitiveState(const DeviceBase* device,
+                                          const PrimitiveState* descriptor) {
+            DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+                                         wgpu::SType::PrimitiveDepthClampingState));
+            const PrimitiveDepthClampingState* clampInfo = nullptr;
+            FindInChain(descriptor->nextInChain, &clampInfo);
+            if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
+                return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
+            }
+            DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
+            DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
+            DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
+            DAWN_TRY(ValidateCullMode(descriptor->cullMode));
+
+            // Pipeline descriptors must have stripIndexFormat == undefined if they are using
+            // non-strip topologies.
+            if (!IsStripPrimitiveTopology(descriptor->topology)) {
+                DAWN_INVALID_IF(
+                    descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
+                    "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
+                    "topology (%s).",
+                    descriptor->stripIndexFormat, descriptor->topology);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateDepthStencilState(const DeviceBase* device,
+                                             const DepthStencilState* descriptor) {
+            if (descriptor->nextInChain != nullptr) {
+                return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+            }
+
+            DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
+            DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
+            DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
+            DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
+            DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
+            DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
+            DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
+            DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
+            DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
+
+            const Format* format;
+            DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+            DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+                            "Depth stencil format (%s) is not depth-stencil renderable.",
+                            descriptor->format);
+
+            DAWN_INVALID_IF(std::isnan(descriptor->depthBiasSlopeScale) ||
+                                std::isnan(descriptor->depthBiasClamp),
+                            "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
+                            descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
+
+            DAWN_INVALID_IF(
+                !format->HasDepth() && (descriptor->depthCompare != wgpu::CompareFunction::Always ||
+                                        descriptor->depthWriteEnabled),
+                "Depth stencil format (%s) doesn't have depth aspect while depthCompare (%s) is "
+                "not %s or depthWriteEnabled (%u) is true.",
+                descriptor->format, descriptor->depthCompare, wgpu::CompareFunction::Always,
+                descriptor->depthWriteEnabled);
+
+            DAWN_INVALID_IF(!format->HasStencil() && StencilTestEnabled(descriptor),
+                            "Depth stencil format (%s) doesn't have stencil aspect while stencil "
+                            "test or stencil write is enabled.",
+                            descriptor->format);
+
+            return {};
+        }
+
+        MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
+            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+            DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
+                            "Multisample count (%u) is not supported.", descriptor->count);
+
+            DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
+                            "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
+                            descriptor->count);
+
+            return {};
+        }
+
+        MaybeError ValidateBlendComponent(BlendComponent blendComponent) {
+            if (blendComponent.operation == wgpu::BlendOperation::Min ||
+                blendComponent.operation == wgpu::BlendOperation::Max) {
+                DAWN_INVALID_IF(blendComponent.srcFactor != wgpu::BlendFactor::One ||
+                                    blendComponent.dstFactor != wgpu::BlendFactor::One,
+                                "Blend factor is not %s when blend operation is %s.",
+                                wgpu::BlendFactor::One, blendComponent.operation);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
+            DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
+            DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
+            DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
+            DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
+            DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
+            DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
+            DAWN_TRY(ValidateBlendComponent(descriptor->alpha));
+            DAWN_TRY(ValidateBlendComponent(descriptor->color));
+
+            return {};
+        }
+
+        bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
+            return blendFactor == wgpu::BlendFactor::SrcAlpha ||
+                   blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
+                   blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
+        }
+
+        MaybeError ValidateColorTargetState(
+            DeviceBase* device,
+            const ColorTargetState* descriptor,
+            bool fragmentWritten,
+            const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
+            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+            if (descriptor->blend) {
+                DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend),
+                                 "validating blend state.");
+            }
+
+            DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
+
+            const Format* format;
+            DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+            DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+                            "Color format (%s) is not color renderable.", descriptor->format);
+
+            DAWN_INVALID_IF(
+                descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes &
+                                       SampleTypeBit::Float),
+                "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
+
+            if (fragmentWritten) {
+                DAWN_INVALID_IF(fragmentOutputVariable.baseType !=
+                                    format->GetAspectInfo(Aspect::Color).baseType,
+                                "Color format (%s) base type (%s) doesn't match the fragment "
+                                "module output type (%s).",
+                                descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
+                                fragmentOutputVariable.baseType);
+
+                DAWN_INVALID_IF(
+                    fragmentOutputVariable.componentCount < format->componentCount,
+                    "The fragment stage has fewer output components (%u) than the color format "
+                    "(%s) component count (%u).",
+                    fragmentOutputVariable.componentCount, descriptor->format,
+                    format->componentCount);
+
+                if (descriptor->blend) {
+                    if (fragmentOutputVariable.componentCount < 4u) {
+                        // No alpha channel output
+                        // Make sure there's no alpha involved in the blending operation
+                        DAWN_INVALID_IF(
+                            BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
+                                BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
+                            "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
+                            "but it is missing from fragment output.",
+                            descriptor->blend->color.srcFactor, descriptor->blend->color.dstFactor);
+                    }
+                }
+            } else {
+                DAWN_INVALID_IF(
+                    descriptor->writeMask != wgpu::ColorWriteMask::None,
+                    "Color target has no corresponding fragment stage output but writeMask (%s) is "
+                    "not zero.",
+                    descriptor->writeMask);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateFragmentState(DeviceBase* device,
+                                         const FragmentState* descriptor,
+                                         const PipelineLayoutBase* layout) {
+            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+            DAWN_TRY_CONTEXT(
+                ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+                                          descriptor->constantCount, descriptor->constants, layout,
+                                          SingleShaderStage::Fragment),
+                "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
+                descriptor->entryPoint);
+
+            DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
+                            "Number of targets (%u) exceeds the maximum (%u).",
+                            descriptor->targetCount, kMaxColorAttachments);
+
+            const EntryPointMetadata& fragmentMetadata =
+                descriptor->module->GetEntryPoint(descriptor->entryPoint);
+            for (ColorAttachmentIndex i(uint8_t(0));
+                 i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
+                const ColorTargetState* target = &descriptor->targets[static_cast<uint8_t>(i)];
+                if (target->format != wgpu::TextureFormat::Undefined) {
+                    DAWN_TRY_CONTEXT(ValidateColorTargetState(
+                                         device, target, fragmentMetadata.fragmentOutputsWritten[i],
+                                         fragmentMetadata.fragmentOutputVariables[i]),
+                                     "validating targets[%u].", static_cast<uint8_t>(i));
+                } else {
+                    DAWN_INVALID_IF(
+                        target->blend,
+                        "Color target[%u] blend state is set when the format is undefined.",
+                        static_cast<uint8_t>(i));
+                    DAWN_INVALID_IF(
+                        target->writeMask != wgpu::ColorWriteMask::None,
+                        "Color target[%u] write mask is set to (%s) when the format is undefined.",
+                        static_cast<uint8_t>(i), target->writeMask);
+                }
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateInterStageMatching(DeviceBase* device,
+                                              const VertexState& vertexState,
+                                              const FragmentState& fragmentState) {
+            const EntryPointMetadata& vertexMetadata =
+                vertexState.module->GetEntryPoint(vertexState.entryPoint);
+            const EntryPointMetadata& fragmentMetadata =
+                fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
+
+            // TODO(dawn:563): Can this message give more details?
+            DAWN_INVALID_IF(
+                vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
+                "One or more fragment inputs and vertex outputs are not one-to-one matching");
+
+            // TODO(dawn:802): Validate interpolation types and interpolition sampling types
+            for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
+                const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
+                const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
+                DAWN_INVALID_IF(
+                    vertexOutputInfo.baseType != fragmentInputInfo.baseType,
+                    "The base type (%s) of the vertex output at location %u is different from the "
+                    "base type (%s) of the fragment input at location %u.",
+                    vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
+
+                DAWN_INVALID_IF(
+                    vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
+                    "The component count (%u) of the vertex output at location %u is different "
+                    "from the component count (%u) of the fragment input at location %u.",
+                    vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
+
+                DAWN_INVALID_IF(
+                    vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
+                    "The interpolation type (%s) of the vertex output at location %u is different "
+                    "from the interpolation type (%s) of the fragment input at location %u.",
+                    vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
+
+                DAWN_INVALID_IF(
+                    vertexOutputInfo.interpolationSampling !=
+                        fragmentInputInfo.interpolationSampling,
+                    "The interpolation sampling (%s) of the vertex output at location %u is "
+                    "different from the interpolation sampling (%s) of the fragment input at "
+                    "location %u.",
+                    vertexOutputInfo.interpolationSampling, i,
+                    fragmentInputInfo.interpolationSampling, i);
+            }
+
+            return {};
+        }
+    }  // anonymous namespace
+
+    // Helper functions
+    size_t IndexFormatSize(wgpu::IndexFormat format) {
+        switch (format) {
+            case wgpu::IndexFormat::Uint16:
+                return sizeof(uint16_t);
+            case wgpu::IndexFormat::Uint32:
+                return sizeof(uint32_t);
+            case wgpu::IndexFormat::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+        return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
+               primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
+    }
+
+    MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+                                                const RenderPipelineDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+        if (descriptor->layout != nullptr) {
+            DAWN_TRY(device->ValidateObject(descriptor->layout));
+        }
+
+        DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
+                         "validating vertex state.");
+
+        DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
+                         "validating primitive state.");
+
+        if (descriptor->depthStencil) {
+            DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
+                             "validating depthStencil state.");
+        }
+
+        DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
+                         "validating multisample state.");
+
+        if (descriptor->fragment != nullptr) {
+            DAWN_TRY_CONTEXT(
+                ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
+                "validating fragment state.");
+
+            DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
+                            "Must have at least one color or depthStencil target.");
+
+            DAWN_TRY(
+                ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
+        }
+
+        return {};
+    }
+
+    std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
+        DeviceBase* device,
+        const RenderPipelineDescriptor* descriptor) {
+        std::vector<StageAndDescriptor> stages;
+        stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
+                          descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
+                          descriptor->vertex.constants});
+        if (descriptor->fragment != nullptr) {
+            stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
+                              descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
+                              descriptor->fragment->constants});
+        } else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
+            InternalPipelineStore* store = device->GetInternalPipelineStore();
+            // The dummy fragment shader module should already be initialized
+            DAWN_ASSERT(store->dummyFragmentShader != nullptr);
+            ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get();
+            stages.push_back(
+                {SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr});
+        }
+        return stages;
+    }
+
+    bool StencilTestEnabled(const DepthStencilState* depthStencil) {
+        return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
+               depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
+               depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+               depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
+               depthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
+               depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
+               depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+               depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
+    }
+
+    // RenderPipelineBase
+
+    RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
+                                           const RenderPipelineDescriptor* descriptor)
+        : PipelineBase(device,
+                       descriptor->layout,
+                       descriptor->label,
+                       GetRenderStagesAndSetDummyShader(device, descriptor)),
+          mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
+        mVertexBufferCount = descriptor->vertex.bufferCount;
+        const VertexBufferLayout* buffers = descriptor->vertex.buffers;
+        for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
+            if (buffers[slot].attributeCount == 0) {
+                continue;
+            }
+
+            VertexBufferSlot typedSlot(slot);
+
+            mVertexBufferSlotsUsed.set(typedSlot);
+            mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
+            mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
+            mVertexBufferInfos[typedSlot].usedBytesInStride = 0;
+            mVertexBufferInfos[typedSlot].lastStride = 0;
+            switch (buffers[slot].stepMode) {
+                case wgpu::VertexStepMode::Vertex:
+                    mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot);
+                    break;
+                case wgpu::VertexStepMode::Instance:
+                    mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot);
+                    break;
+                default:
+                    DAWN_UNREACHABLE();
+            }
+
+            for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
+                VertexAttributeLocation location = VertexAttributeLocation(
+                    static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
+                mAttributeLocationsUsed.set(location);
+                mAttributeInfos[location].shaderLocation = location;
+                mAttributeInfos[location].vertexBufferSlot = typedSlot;
+                mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
+                mAttributeInfos[location].format = buffers[slot].attributes[i].format;
+                // Compute the access boundary of this attribute by adding attribute format size to
+                // attribute offset. Although offset is in uint64_t, such sum must be no larger than
+                // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout
+                // validation of creating render pipeline. Therefore, calculating in uint16_t will
+                // cause no overflow.
+                uint32_t formatByteSize =
+                    GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize;
+                DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048);
+                uint16_t accessBoundary =
+                    uint16_t(buffers[slot].attributes[i].offset) + uint16_t(formatByteSize);
+                mVertexBufferInfos[typedSlot].usedBytesInStride =
+                    std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary);
+                mVertexBufferInfos[typedSlot].lastStride =
+                    std::max(mVertexBufferInfos[typedSlot].lastStride,
+                             mAttributeInfos[location].offset + formatByteSize);
+            }
+        }
+
+        mPrimitive = descriptor->primitive;
+        const PrimitiveDepthClampingState* clampInfo = nullptr;
+        FindInChain(mPrimitive.nextInChain, &clampInfo);
+        if (clampInfo) {
+            mClampDepth = clampInfo->clampDepth;
+        }
+        mMultisample = descriptor->multisample;
+
+        if (mAttachmentState->HasDepthStencilAttachment()) {
+            mDepthStencil = *descriptor->depthStencil;
+            mWritesDepth = mDepthStencil.depthWriteEnabled;
+            if (mDepthStencil.stencilWriteMask) {
+                if ((mPrimitive.cullMode != wgpu::CullMode::Front &&
+                     (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep ||
+                      mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+                      mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) ||
+                    (mPrimitive.cullMode != wgpu::CullMode::Back &&
+                     (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep ||
+                      mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+                      mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) {
+                    mWritesStencil = true;
+                }
+            }
+        } else {
+            // These default values below are useful for backends to fill information.
+            // The values indicate that depth and stencil test are disabled when backends
+            // set their own depth stencil states/descriptors according to the values in
+            // mDepthStencil.
+            mDepthStencil.format = wgpu::TextureFormat::Undefined;
+            mDepthStencil.depthWriteEnabled = false;
+            mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+            mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
+            mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
+            mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
+            mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
+            mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
+            mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
+            mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
+            mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
+            mDepthStencil.stencilReadMask = 0xff;
+            mDepthStencil.stencilWriteMask = 0xff;
+            mDepthStencil.depthBias = 0;
+            mDepthStencil.depthBiasSlopeScale = 0.0f;
+            mDepthStencil.depthBiasClamp = 0.0f;
+        }
+
+        for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+            // Vertex-only render pipeline have no color attachment. For a render pipeline with
+            // color attachments, there must be a valid FragmentState.
+            ASSERT(descriptor->fragment != nullptr);
+            const ColorTargetState* target =
+                &descriptor->fragment->targets[static_cast<uint8_t>(i)];
+            mTargets[i] = *target;
+
+            if (target->blend != nullptr) {
+                mTargetBlend[i] = *target->blend;
+                mTargets[i].blend = &mTargetBlend[i];
+            }
+        }
+
+        SetContentHash(ComputeContentHash());
+        TrackInDevice();
+    }
+
+    RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) {
+        TrackInDevice();
+    }
+
+    RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : PipelineBase(device, tag) {
+    }
+
+    RenderPipelineBase::~RenderPipelineBase() = default;
+
+    void RenderPipelineBase::DestroyImpl() {
+        if (IsCachedReference()) {
+            // Do not uncache the actual cached object if we are a blueprint.
+            GetDevice()->UncacheRenderPipeline(this);
+        }
+
+        // Remove reference to the attachment state so that we don't have lingering references to
+        // it preventing it from being uncached in the device.
+        mAttachmentState = nullptr;
+    }
+
+    // static
+    RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
+        class ErrorRenderPipeline final : public RenderPipelineBase {
+          public:
+            ErrorRenderPipeline(DeviceBase* device)
+                : RenderPipelineBase(device, ObjectBase::kError) {
+            }
+
+            MaybeError Initialize() override {
+                UNREACHABLE();
+                return {};
+            }
+        };
+
+        return new ErrorRenderPipeline(device);
+    }
+
+    ObjectType RenderPipelineBase::GetType() const {
+        return ObjectType::RenderPipeline;
+    }
+
+    const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
+    RenderPipelineBase::GetAttributeLocationsUsed() const {
+        ASSERT(!IsError());
+        return mAttributeLocationsUsed;
+    }
+
+    const VertexAttributeInfo& RenderPipelineBase::GetAttribute(
+        VertexAttributeLocation location) const {
+        ASSERT(!IsError());
+        ASSERT(mAttributeLocationsUsed[location]);
+        return mAttributeInfos[location];
+    }
+
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+    RenderPipelineBase::GetVertexBufferSlotsUsed() const {
+        ASSERT(!IsError());
+        return mVertexBufferSlotsUsed;
+    }
+
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+    RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const {
+        ASSERT(!IsError());
+        return mVertexBufferSlotsUsedAsVertexBuffer;
+    }
+
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+    RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const {
+        ASSERT(!IsError());
+        return mVertexBufferSlotsUsedAsInstanceBuffer;
+    }
+
+    const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const {
+        ASSERT(!IsError());
+        ASSERT(mVertexBufferSlotsUsed[slot]);
+        return mVertexBufferInfos[slot];
+    }
+
+    uint32_t RenderPipelineBase::GetVertexBufferCount() const {
+        ASSERT(!IsError());
+        return mVertexBufferCount;
+    }
+
+    const ColorTargetState* RenderPipelineBase::GetColorTargetState(
+        ColorAttachmentIndex attachmentSlot) const {
+        ASSERT(!IsError());
+        ASSERT(attachmentSlot < mTargets.size());
+        return &mTargets[attachmentSlot];
+    }
+
+    const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
+        ASSERT(!IsError());
+        return &mDepthStencil;
+    }
+
+    wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
+        ASSERT(!IsError());
+        return mPrimitive.topology;
+    }
+
+    wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
+        ASSERT(!IsError());
+        return mPrimitive.stripIndexFormat;
+    }
+
+    wgpu::CullMode RenderPipelineBase::GetCullMode() const {
+        ASSERT(!IsError());
+        return mPrimitive.cullMode;
+    }
+
+    wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
+        ASSERT(!IsError());
+        return mPrimitive.frontFace;
+    }
+
+    bool RenderPipelineBase::IsDepthBiasEnabled() const {
+        ASSERT(!IsError());
+        return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
+    }
+
+    int32_t RenderPipelineBase::GetDepthBias() const {
+        ASSERT(!IsError());
+        return mDepthStencil.depthBias;
+    }
+
+    float RenderPipelineBase::GetDepthBiasSlopeScale() const {
+        ASSERT(!IsError());
+        return mDepthStencil.depthBiasSlopeScale;
+    }
+
+    float RenderPipelineBase::GetDepthBiasClamp() const {
+        ASSERT(!IsError());
+        return mDepthStencil.depthBiasClamp;
+    }
+
+    bool RenderPipelineBase::ShouldClampDepth() const {
+        ASSERT(!IsError());
+        return mClampDepth;
+    }
+
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
+    RenderPipelineBase::GetColorAttachmentsMask() const {
+        ASSERT(!IsError());
+        return mAttachmentState->GetColorAttachmentsMask();
+    }
+
+    bool RenderPipelineBase::HasDepthStencilAttachment() const {
+        ASSERT(!IsError());
+        return mAttachmentState->HasDepthStencilAttachment();
+    }
+
+    wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
+        ColorAttachmentIndex attachment) const {
+        ASSERT(!IsError());
+        return mTargets[attachment].format;
+    }
+
+    wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
+        ASSERT(!IsError());
+        ASSERT(mAttachmentState->HasDepthStencilAttachment());
+        return mDepthStencil.format;
+    }
+
+    uint32_t RenderPipelineBase::GetSampleCount() const {
+        ASSERT(!IsError());
+        return mAttachmentState->GetSampleCount();
+    }
+
+    uint32_t RenderPipelineBase::GetSampleMask() const {
+        ASSERT(!IsError());
+        return mMultisample.mask;
+    }
+
+    bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
+        ASSERT(!IsError());
+        return mMultisample.alphaToCoverageEnabled;
+    }
+
+    const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
+        ASSERT(!IsError());
+
+        return mAttachmentState.Get();
+    }
+
+    bool RenderPipelineBase::WritesDepth() const {
+        ASSERT(!IsError());
+
+        return mWritesDepth;
+    }
+
+    bool RenderPipelineBase::WritesStencil() const {
+        ASSERT(!IsError());
+
+        return mWritesStencil;
+    }
+
+    size_t RenderPipelineBase::ComputeContentHash() {
+        ObjectContentHasher recorder;
+
+        // Record modules and layout
+        recorder.Record(PipelineBase::ComputeContentHash());
+
+        // Hierarchically record the attachment state.
+        // It contains the attachments set, texture formats, and sample count.
+        recorder.Record(mAttachmentState->GetContentHash());
+
+        // Record attachments
+        for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+            const ColorTargetState& desc = *GetColorTargetState(i);
+            recorder.Record(desc.writeMask);
+            if (desc.blend != nullptr) {
+                recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
+                                desc.blend->color.dstFactor);
+                recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
+                                desc.blend->alpha.dstFactor);
+            }
+        }
+
+        if (mAttachmentState->HasDepthStencilAttachment()) {
+            const DepthStencilState& desc = mDepthStencil;
+            recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
+            recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
+            recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
+                            desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
+            recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
+                            desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
+            recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
+        }
+
+        // Record vertex state
+        recorder.Record(mAttributeLocationsUsed);
+        for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) {
+            const VertexAttributeInfo& desc = GetAttribute(location);
+            recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format);
+        }
+
+        recorder.Record(mVertexBufferSlotsUsed);
+        for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) {
+            const VertexBufferInfo& desc = GetVertexBuffer(slot);
+            recorder.Record(desc.arrayStride, desc.stepMode);
+        }
+
+        // Record primitive state
+        recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
+                        mPrimitive.cullMode, mClampDepth);
+
+        // Record multisample state
+        // Sample count hashed as part of the attachment state
+        recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
+
+        return recorder.GetContentHash();
+    }
+
+    bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a,
+                                                      const RenderPipelineBase* b) const {
+        // Check the layout and shader stages.
+        if (!PipelineBase::EqualForCache(a, b)) {
+            return false;
+        }
+
+        // Check the attachment state.
+        // It contains the attachments set, texture formats, and sample count.
+        if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) {
+            return false;
+        }
+
+        if (a->mAttachmentState.Get() != nullptr) {
+            for (ColorAttachmentIndex i :
+                 IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
+                const ColorTargetState& descA = *a->GetColorTargetState(i);
+                const ColorTargetState& descB = *b->GetColorTargetState(i);
+                if (descA.writeMask != descB.writeMask) {
+                    return false;
+                }
+                if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
+                    return false;
+                }
+                if (descA.blend != nullptr) {
+                    if (descA.blend->color.operation != descB.blend->color.operation ||
+                        descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
+                        descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
+                        return false;
+                    }
+                    if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
+                        descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
+                        descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
+                        return false;
+                    }
+                }
+            }
+
+            // Check depth/stencil state
+            if (a->mAttachmentState->HasDepthStencilAttachment()) {
+                const DepthStencilState& stateA = a->mDepthStencil;
+                const DepthStencilState& stateB = b->mDepthStencil;
+
+                ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
+                ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
+                ASSERT(!std::isnan(stateA.depthBiasClamp));
+                ASSERT(!std::isnan(stateB.depthBiasClamp));
+
+                if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
+                    stateA.depthCompare != stateB.depthCompare ||
+                    stateA.depthBias != stateB.depthBias ||
+                    stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
+                    stateA.depthBiasClamp != stateB.depthBiasClamp) {
+                    return false;
+                }
+                if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
+                    stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
+                    stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
+                    stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
+                    return false;
+                }
+                if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
+                    stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
+                    stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
+                    stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
+                    return false;
+                }
+                if (stateA.stencilReadMask != stateB.stencilReadMask ||
+                    stateA.stencilWriteMask != stateB.stencilWriteMask) {
+                    return false;
+                }
+            }
+        }
+
+        // Check vertex state
+        if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
+            return false;
+        }
+
+        for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) {
+            const VertexAttributeInfo& descA = a->GetAttribute(loc);
+            const VertexAttributeInfo& descB = b->GetAttribute(loc);
+            if (descA.shaderLocation != descB.shaderLocation ||
+                descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
+                descA.format != descB.format) {
+                return false;
+            }
+        }
+
+        if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
+            return false;
+        }
+
+        for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) {
+            const VertexBufferInfo& descA = a->GetVertexBuffer(slot);
+            const VertexBufferInfo& descB = b->GetVertexBuffer(slot);
+            if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
+                return false;
+            }
+        }
+
+        // Check primitive state
+        {
+            const PrimitiveState& stateA = a->mPrimitive;
+            const PrimitiveState& stateB = b->mPrimitive;
+            if (stateA.topology != stateB.topology ||
+                stateA.stripIndexFormat != stateB.stripIndexFormat ||
+                stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
+                a->mClampDepth != b->mClampDepth) {
+                return false;
+            }
+        }
+
+        // Check multisample state
+        {
+            const MultisampleState& stateA = a->mMultisample;
+            const MultisampleState& stateB = b->mMultisample;
+            // Sample count already checked as part of the attachment state.
+            if (stateA.mask != stateB.mask ||
+                stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/RenderPipeline.h b/src/dawn/native/RenderPipeline.h
new file mode 100644
index 0000000..429f2a9
--- /dev/null
+++ b/src/dawn/native/RenderPipeline.h
@@ -0,0 +1,147 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERPIPELINE_H_
+#define DAWNNATIVE_RENDERPIPELINE_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/Pipeline.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+                                                const RenderPipelineDescriptor* descriptor);
+
+    std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
+        DeviceBase* device,
+        const RenderPipelineDescriptor* descriptor);
+
+    size_t IndexFormatSize(wgpu::IndexFormat format);
+
+    bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
+
+    bool StencilTestEnabled(const DepthStencilState* depthStencil);
+
+    struct VertexAttributeInfo {
+        wgpu::VertexFormat format;
+        uint64_t offset;
+        VertexAttributeLocation shaderLocation;
+        VertexBufferSlot vertexBufferSlot;
+    };
+
+    struct VertexBufferInfo {
+        uint64_t arrayStride;
+        wgpu::VertexStepMode stepMode;
+        uint16_t usedBytesInStride;
+        // As indicated in the spec, the lastStride is max(attribute.offset +
+        // sizeof(attribute.format)) for each attribute in the buffer[slot]
+        uint64_t lastStride;
+    };
+
+    class RenderPipelineBase : public PipelineBase {
+      public:
+        RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
+        ~RenderPipelineBase() override;
+
+        static RenderPipelineBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
+        GetAttributeLocationsUsed() const;
+        const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
+        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
+        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+        GetVertexBufferSlotsUsedAsVertexBuffer() const;
+        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+        GetVertexBufferSlotsUsedAsInstanceBuffer() const;
+        const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
+        uint32_t GetVertexBufferCount() const;
+
+        const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
+        const DepthStencilState* GetDepthStencilState() const;
+        wgpu::PrimitiveTopology GetPrimitiveTopology() const;
+        wgpu::IndexFormat GetStripIndexFormat() const;
+        wgpu::CullMode GetCullMode() const;
+        wgpu::FrontFace GetFrontFace() const;
+        bool IsDepthBiasEnabled() const;
+        int32_t GetDepthBias() const;
+        float GetDepthBiasSlopeScale() const;
+        float GetDepthBiasClamp() const;
+        bool ShouldClampDepth() const;
+
+        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+        bool HasDepthStencilAttachment() const;
+        wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex attachment) const;
+        wgpu::TextureFormat GetDepthStencilFormat() const;
+        uint32_t GetSampleCount() const;
+        uint32_t GetSampleMask() const;
+        bool IsAlphaToCoverageEnabled() const;
+        bool WritesDepth() const;
+        bool WritesStencil() const;
+
+        const AttachmentState* GetAttachmentState() const;
+
+        // Functions necessary for the unordered_set<RenderPipelineBase*>-based cache.
+        size_t ComputeContentHash() override;
+
+        struct EqualityFunc {
+            bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
+        };
+
+      protected:
+        // Constructor used only for mocking and testing.
+        RenderPipelineBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+      private:
+        RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        // Vertex state
+        uint32_t mVertexBufferCount;
+        ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
+        ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes>
+            mAttributeInfos;
+        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsVertexBuffer;
+        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsInstanceBuffer;
+        ityp::array<VertexBufferSlot, VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
+
+        // Attachments
+        Ref<AttachmentState> mAttachmentState;
+        ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
+        ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
+
+        // Other state
+        PrimitiveState mPrimitive;
+        DepthStencilState mDepthStencil;
+        MultisampleState mMultisample;
+        bool mClampDepth = false;
+        bool mWritesDepth = false;
+        bool mWritesStencil = false;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RENDERPIPELINE_H_
diff --git a/src/dawn/native/ResourceHeap.h b/src/dawn/native/ResourceHeap.h
new file mode 100644
index 0000000..cb45c88
--- /dev/null
+++ b/src/dawn/native/ResourceHeap.h
@@ -0,0 +1,31 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RESOURCEHEAP_H_
+#define DAWNNATIVE_RESOURCEHEAP_H_
+
+#include "dawn/native/Error.h"
+
+namespace dawn::native {
+
+    // Wrapper for a resource backed by a heap.
+    class ResourceHeapBase {
+      public:
+        ResourceHeapBase() = default;
+        virtual ~ResourceHeapBase() = default;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RESOURCEHEAP_H_
diff --git a/src/dawn/native/ResourceHeapAllocator.h b/src/dawn/native/ResourceHeapAllocator.h
new file mode 100644
index 0000000..3c86154
--- /dev/null
+++ b/src/dawn/native/ResourceHeapAllocator.h
@@ -0,0 +1,37 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
+#define DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ResourceHeap.h"
+
+#include <memory>
+
+namespace dawn::native {
+
+    // Interface for backend allocators that create memory heaps resoruces can be suballocated in.
+    class ResourceHeapAllocator {
+      public:
+        virtual ~ResourceHeapAllocator() = default;
+
+        virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+            uint64_t size) = 0;
+        virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
diff --git a/src/dawn/native/ResourceMemoryAllocation.cpp b/src/dawn/native/ResourceMemoryAllocation.cpp
new file mode 100644
index 0000000..8848c18
--- /dev/null
+++ b/src/dawn/native/ResourceMemoryAllocation.cpp
@@ -0,0 +1,53 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/common/Assert.h"
+
+namespace dawn::native {
+
+    ResourceMemoryAllocation::ResourceMemoryAllocation()
+        : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {
+    }
+
+    ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
+                                                       uint64_t offset,
+                                                       ResourceHeapBase* resourceHeap,
+                                                       uint8_t* mappedPointer)
+        : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {
+    }
+
+    ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
+        ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+        return mResourceHeap;
+    }
+
+    uint64_t ResourceMemoryAllocation::GetOffset() const {
+        ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+        return mOffset;
+    }
+
+    AllocationInfo ResourceMemoryAllocation::GetInfo() const {
+        return mInfo;
+    }
+
+    uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
+        return mMappedPointer;
+    }
+
+    void ResourceMemoryAllocation::Invalidate() {
+        mResourceHeap = nullptr;
+        mInfo = {};
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/ResourceMemoryAllocation.h b/src/dawn/native/ResourceMemoryAllocation.h
new file mode 100644
index 0000000..307d90a
--- /dev/null
+++ b/src/dawn/native/ResourceMemoryAllocation.h
@@ -0,0 +1,80 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
+#define DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
+
+#include <cstdint>
+
+namespace dawn::native {
+
+    class ResourceHeapBase;
+
+    // Allocation method determines how memory was sub-divided.
+    // Used by the device to get the allocator that was responsible for the allocation.
+    enum class AllocationMethod {
+
+        // Memory not sub-divided.
+        kDirect,
+
+        // Memory sub-divided using one or more blocks of various sizes.
+        kSubAllocated,
+
+        // Memory was allocated outside of Dawn.
+        kExternal,
+
+        // Memory not allocated or freed.
+        kInvalid
+    };
+
+    // Metadata that describes how the allocation was allocated.
+    struct AllocationInfo {
+        // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
+        // The block offset is within the entire allocator memory range and only required by the
+        // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
+        // allocation offset is always local to the memory.
+        uint64_t mBlockOffset = 0;
+
+        AllocationMethod mMethod = AllocationMethod::kInvalid;
+    };
+
+    // Handle into a resource heap pool.
+    class ResourceMemoryAllocation {
+      public:
+        ResourceMemoryAllocation();
+        ResourceMemoryAllocation(const AllocationInfo& info,
+                                 uint64_t offset,
+                                 ResourceHeapBase* resourceHeap,
+                                 uint8_t* mappedPointer = nullptr);
+        virtual ~ResourceMemoryAllocation() = default;
+
+        ResourceMemoryAllocation(const ResourceMemoryAllocation&) = default;
+        ResourceMemoryAllocation& operator=(const ResourceMemoryAllocation&) = default;
+
+        ResourceHeapBase* GetResourceHeap() const;
+        uint64_t GetOffset() const;
+        uint8_t* GetMappedPointer() const;
+        AllocationInfo GetInfo() const;
+
+        virtual void Invalidate();
+
+      private:
+        AllocationInfo mInfo;
+        uint64_t mOffset;
+        ResourceHeapBase* mResourceHeap;
+        uint8_t* mMappedPointer;
+    };
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
diff --git a/src/dawn/native/RingBufferAllocator.cpp b/src/dawn/native/RingBufferAllocator.cpp
new file mode 100644
index 0000000..e1dc7ae
--- /dev/null
+++ b/src/dawn/native/RingBufferAllocator.cpp
@@ -0,0 +1,121 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RingBufferAllocator.h"
+
+// Note: Current RingBufferAllocator implementation uses two indices (start and end) to implement a
+// circular queue. However, this approach defines a full queue when one element is still unused.
+//
+// For example, [E,E,E,E] would be equivelent to [U,U,U,U].
+//                 ^                                ^
+//                S=E=1                            S=E=1
+//
+// The latter case is eliminated by counting used bytes >= capacity. This definition prevents
+// (the last) byte and requires an extra variable to count used bytes. Alternatively, we could use
+// only two indices that keep increasing (unbounded) but can be still indexed using bit masks.
+// However, this 1) requires the size to always be a power-of-two and 2) remove tests that check
+// used bytes.
+namespace dawn::native {
+
+    RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+    }
+
+    void RingBufferAllocator::Deallocate(ExecutionSerial lastCompletedSerial) {
+        // Reclaim memory from previously recorded blocks.
+        for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
+            mUsedStartOffset = request.endOffset;
+            mUsedSize -= request.size;
+        }
+
+        // Dequeue previously recorded requests.
+        mInflightRequests.ClearUpTo(lastCompletedSerial);
+    }
+
+    uint64_t RingBufferAllocator::GetSize() const {
+        return mMaxBlockSize;
+    }
+
+    uint64_t RingBufferAllocator::GetUsedSize() const {
+        return mUsedSize;
+    }
+
+    bool RingBufferAllocator::Empty() const {
+        return mInflightRequests.Empty();
+    }
+
+    // Sub-allocate the ring-buffer by requesting a chunk of the specified size.
+    // This is a serial-based resource scheme, the life-span of resources (and the allocations) get
+    // tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
+    // completed up to a given serial. Each sub-allocation request is tracked in the serial offset
+    // queue, which identifies an existing (or new) frames-worth of resources. Internally, the
+    // ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
+    // in FIFO order as older frames would free resources before newer ones.
+    uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, ExecutionSerial serial) {
+        // Check if the buffer is full by comparing the used size.
+        // If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
+        // subsequent sub-alloc could fail where the used size was previously adjusted to include
+        // the wasted.
+        if (mUsedSize >= mMaxBlockSize) {
+            return kInvalidOffset;
+        }
+
+        // Ensure adding allocationSize does not overflow.
+        const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
+        if (allocationSize > remainingSize) {
+            return kInvalidOffset;
+        }
+
+        uint64_t startOffset = kInvalidOffset;
+
+        // Check if the buffer is NOT split (i.e sub-alloc on ends)
+        if (mUsedStartOffset <= mUsedEndOffset) {
+            // Order is important (try to sub-alloc at end first).
+            // This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
+            // wrapped).
+            if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
+                startOffset = mUsedEndOffset;
+                mUsedEndOffset += allocationSize;
+                mUsedSize += allocationSize;
+                mCurrentRequestSize += allocationSize;
+            } else if (allocationSize <= mUsedStartOffset) {  // Try to sub-alloc at front.
+                // Count the space at the end so that a subsequent
+                // sub-alloc cannot not succeed when the buffer is full.
+                const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
+
+                startOffset = 0;
+                mUsedEndOffset = allocationSize;
+                mUsedSize += requestSize;
+                mCurrentRequestSize += requestSize;
+            }
+        } else if (mUsedEndOffset + allocationSize <=
+                   mUsedStartOffset) {  // Otherwise, buffer is split where sub-alloc must be
+                                        // in-between.
+            startOffset = mUsedEndOffset;
+            mUsedEndOffset += allocationSize;
+            mUsedSize += allocationSize;
+            mCurrentRequestSize += allocationSize;
+        }
+
+        if (startOffset != kInvalidOffset) {
+            Request request;
+            request.endOffset = mUsedEndOffset;
+            request.size = mCurrentRequestSize;
+
+            mInflightRequests.Enqueue(std::move(request), serial);
+            mCurrentRequestSize = 0;  // reset
+        }
+
+        return startOffset;
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/RingBufferAllocator.h b/src/dawn/native/RingBufferAllocator.h
new file mode 100644
index 0000000..8049470
--- /dev/null
+++ b/src/dawn/native/RingBufferAllocator.h
@@ -0,0 +1,63 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RINGBUFFERALLOCATOR_H_
+#define DAWNNATIVE_RINGBUFFERALLOCATOR_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/IntegerTypes.h"
+
+#include <limits>
+#include <memory>
+
+// RingBufferAllocator is the front-end implementation used to manage a ring buffer in GPU memory.
+namespace dawn::native {
+
+    class RingBufferAllocator {
+      public:
+        RingBufferAllocator() = default;
+        RingBufferAllocator(uint64_t maxSize);
+        ~RingBufferAllocator() = default;
+        RingBufferAllocator(const RingBufferAllocator&) = default;
+        RingBufferAllocator& operator=(const RingBufferAllocator&) = default;
+
+        uint64_t Allocate(uint64_t allocationSize, ExecutionSerial serial);
+        void Deallocate(ExecutionSerial lastCompletedSerial);
+
+        uint64_t GetSize() const;
+        bool Empty() const;
+        uint64_t GetUsedSize() const;
+
+        static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+
+      private:
+        struct Request {
+            uint64_t endOffset;
+            uint64_t size;
+        };
+
+        SerialQueue<ExecutionSerial, Request>
+            mInflightRequests;  // Queue of the recorded sub-alloc requests
+                                // (e.g. frame of resources).
+
+        uint64_t mUsedEndOffset = 0;    // Tail of used sub-alloc requests (in bytes).
+        uint64_t mUsedStartOffset = 0;  // Head of used sub-alloc requests (in bytes).
+        uint64_t mMaxBlockSize = 0;     // Max size of the ring buffer (in bytes).
+        uint64_t mUsedSize = 0;  // Size of the sub-alloc requests (in bytes) of the ring buffer.
+        uint64_t mCurrentRequestSize =
+            0;  // Size of the sub-alloc requests (in bytes) of the current serial.
+    };
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_RINGBUFFERALLOCATOR_H_
diff --git a/src/dawn/native/Sampler.cpp b/src/dawn/native/Sampler.cpp
new file mode 100644
index 0000000..ffd8a72
--- /dev/null
+++ b/src/dawn/native/Sampler.cpp
@@ -0,0 +1,153 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <cmath>
+
+namespace dawn::native {
+
+    MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+        DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
+                        "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
+                        descriptor->lodMaxClamp);
+
+        DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
+                        "LOD clamp bounds [%f, %f] contain contain a negative number.",
+                        descriptor->lodMinClamp, descriptor->lodMaxClamp);
+
+        DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
+                        "LOD min clamp (%f) is larger than the max clamp (%f).",
+                        descriptor->lodMinClamp, descriptor->lodMaxClamp);
+
+        if (descriptor->maxAnisotropy > 1) {
+            DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
+                                descriptor->magFilter != wgpu::FilterMode::Linear ||
+                                descriptor->mipmapFilter != wgpu::FilterMode::Linear,
+                            "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
+                            "while using anisotropic filter (maxAnisotropy is %f)",
+                            descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
+                            wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
+        } else if (descriptor->maxAnisotropy == 0u) {
+            return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
+                                                descriptor->maxAnisotropy);
+        }
+
+        DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
+        DAWN_TRY(ValidateFilterMode(descriptor->magFilter));
+        DAWN_TRY(ValidateFilterMode(descriptor->mipmapFilter));
+        DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
+        DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
+        DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
+
+        // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
+        // SamplerDescriptor where it is a special value that means the sampler is not a
+        // comparison-sampler.
+        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+            DAWN_TRY(ValidateCompareFunction(descriptor->compare));
+        }
+
+        return {};
+    }
+
+    // SamplerBase
+
+    SamplerBase::SamplerBase(DeviceBase* device,
+                             const SamplerDescriptor* descriptor,
+                             ApiObjectBase::UntrackedByDeviceTag tag)
+        : ApiObjectBase(device, descriptor->label),
+          mAddressModeU(descriptor->addressModeU),
+          mAddressModeV(descriptor->addressModeV),
+          mAddressModeW(descriptor->addressModeW),
+          mMagFilter(descriptor->magFilter),
+          mMinFilter(descriptor->minFilter),
+          mMipmapFilter(descriptor->mipmapFilter),
+          mLodMinClamp(descriptor->lodMinClamp),
+          mLodMaxClamp(descriptor->lodMaxClamp),
+          mCompareFunction(descriptor->compare),
+          mMaxAnisotropy(descriptor->maxAnisotropy) {
+    }
+
+    SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
+        : SamplerBase(device, descriptor, kUntrackedByDevice) {
+        TrackInDevice();
+    }
+
+    SamplerBase::SamplerBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    SamplerBase::~SamplerBase() = default;
+
+    void SamplerBase::DestroyImpl() {
+        if (IsCachedReference()) {
+            // Do not uncache the actual cached object if we are a blueprint.
+            GetDevice()->UncacheSampler(this);
+        }
+    }
+
+    // static
+    SamplerBase* SamplerBase::MakeError(DeviceBase* device) {
+        return new SamplerBase(device, ObjectBase::kError);
+    }
+
+    ObjectType SamplerBase::GetType() const {
+        return ObjectType::Sampler;
+    }
+
+    bool SamplerBase::IsComparison() const {
+        return mCompareFunction != wgpu::CompareFunction::Undefined;
+    }
+
+    bool SamplerBase::IsFiltering() const {
+        return mMinFilter == wgpu::FilterMode::Linear || mMagFilter == wgpu::FilterMode::Linear ||
+               mMipmapFilter == wgpu::FilterMode::Linear;
+    }
+
+    size_t SamplerBase::ComputeContentHash() {
+        ObjectContentHasher recorder;
+        recorder.Record(mAddressModeU, mAddressModeV, mAddressModeW, mMagFilter, mMinFilter,
+                        mMipmapFilter, mLodMinClamp, mLodMaxClamp, mCompareFunction,
+                        mMaxAnisotropy);
+        return recorder.GetContentHash();
+    }
+
+    bool SamplerBase::EqualityFunc::operator()(const SamplerBase* a, const SamplerBase* b) const {
+        if (a == b) {
+            return true;
+        }
+
+        ASSERT(!std::isnan(a->mLodMinClamp));
+        ASSERT(!std::isnan(b->mLodMinClamp));
+        ASSERT(!std::isnan(a->mLodMaxClamp));
+        ASSERT(!std::isnan(b->mLodMaxClamp));
+
+        return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
+               a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
+               a->mMinFilter == b->mMinFilter && a->mMipmapFilter == b->mMipmapFilter &&
+               a->mLodMinClamp == b->mLodMinClamp && a->mLodMaxClamp == b->mLodMaxClamp &&
+               a->mCompareFunction == b->mCompareFunction && a->mMaxAnisotropy == b->mMaxAnisotropy;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Sampler.h b/src/dawn/native/Sampler.h
new file mode 100644
index 0000000..e21b52c
--- /dev/null
+++ b/src/dawn/native/Sampler.h
@@ -0,0 +1,80 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SAMPLER_H_
+#define DAWNNATIVE_SAMPLER_H_
+
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
+
+    class SamplerBase : public ApiObjectBase, public CachedObject {
+      public:
+        SamplerBase(DeviceBase* device,
+                    const SamplerDescriptor* descriptor,
+                    ApiObjectBase::UntrackedByDeviceTag tag);
+        SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
+        ~SamplerBase() override;
+
+        static SamplerBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        bool IsComparison() const;
+        bool IsFiltering() const;
+
+        // Functions necessary for the unordered_set<SamplerBase*>-based cache.
+        size_t ComputeContentHash() override;
+
+        struct EqualityFunc {
+            bool operator()(const SamplerBase* a, const SamplerBase* b) const;
+        };
+
+        uint16_t GetMaxAnisotropy() const {
+            return mMaxAnisotropy;
+        }
+
+      protected:
+        // Constructor used only for mocking and testing.
+        SamplerBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+      private:
+        SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        // TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
+        wgpu::AddressMode mAddressModeU;
+        wgpu::AddressMode mAddressModeV;
+        wgpu::AddressMode mAddressModeW;
+        wgpu::FilterMode mMagFilter;
+        wgpu::FilterMode mMinFilter;
+        wgpu::FilterMode mMipmapFilter;
+        float mLodMinClamp;
+        float mLodMaxClamp;
+        wgpu::CompareFunction mCompareFunction;
+        uint16_t mMaxAnisotropy;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_SAMPLER_H_
diff --git a/src/dawn/native/ScratchBuffer.cpp b/src/dawn/native/ScratchBuffer.cpp
new file mode 100644
index 0000000..be53683
--- /dev/null
+++ b/src/dawn/native/ScratchBuffer.cpp
@@ -0,0 +1,47 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ScratchBuffer.h"
+
+#include "dawn/native/Device.h"
+
+namespace dawn::native {
+
+    ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
+        : mDevice(device), mUsage(usage) {
+    }
+
+    ScratchBuffer::~ScratchBuffer() = default;
+
+    void ScratchBuffer::Reset() {
+        mBuffer = nullptr;
+    }
+
+    MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
+        if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
+            BufferDescriptor descriptor;
+            descriptor.size = capacity;
+            descriptor.usage = mUsage;
+            DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
+            mBuffer->SetIsDataInitialized();
+        }
+        return {};
+    }
+
+    BufferBase* ScratchBuffer::GetBuffer() const {
+        ASSERT(mBuffer.Get() != nullptr);
+        return mBuffer.Get();
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ScratchBuffer.h b/src/dawn/native/ScratchBuffer.h
new file mode 100644
index 0000000..7845022
--- /dev/null
+++ b/src/dawn/native/ScratchBuffer.h
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SCRATCHBUFFER_H_
+#define DAWNNATIVE_SCRATCHBUFFER_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Buffer.h"
+
+#include <cstdint>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    // A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
+    // commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
+    // be careful not to exposed uninitialized bytes to client shaders.
+    class ScratchBuffer {
+      public:
+        // Note that this object does not retain a reference to `device`, so `device` MUST outlive
+        // this object.
+        ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
+        ~ScratchBuffer();
+
+        // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
+        // fresh buffer.
+        void Reset();
+
+        // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
+        // `capacity` bytes of storage.
+        MaybeError EnsureCapacity(uint64_t capacity);
+
+        BufferBase* GetBuffer() const;
+
+      private:
+        DeviceBase* const mDevice;
+        const wgpu::BufferUsage mUsage;
+        Ref<BufferBase> mBuffer;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_SCRATCHBUFFER_H_
diff --git a/src/dawn/native/ShaderModule.cpp b/src/dawn/native/ShaderModule.cpp
new file mode 100644
index 0000000..779e6ae
--- /dev/null
+++ b/src/dawn/native/ShaderModule.cpp
@@ -0,0 +1,1329 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ShaderModule.h"
+
+#include "absl/strings/str_format.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/HashUtils.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/CompilationMessages.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/TintUtils.h"
+
+#include <tint/tint.h>
+
+#include <sstream>
+
+namespace dawn::native {
+
+    namespace {
+
+        tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
+            switch (format) {
+                case wgpu::VertexFormat::Uint8x2:
+                    return tint::transform::VertexFormat::kUint8x2;
+                case wgpu::VertexFormat::Uint8x4:
+                    return tint::transform::VertexFormat::kUint8x4;
+                case wgpu::VertexFormat::Sint8x2:
+                    return tint::transform::VertexFormat::kSint8x2;
+                case wgpu::VertexFormat::Sint8x4:
+                    return tint::transform::VertexFormat::kSint8x4;
+                case wgpu::VertexFormat::Unorm8x2:
+                    return tint::transform::VertexFormat::kUnorm8x2;
+                case wgpu::VertexFormat::Unorm8x4:
+                    return tint::transform::VertexFormat::kUnorm8x4;
+                case wgpu::VertexFormat::Snorm8x2:
+                    return tint::transform::VertexFormat::kSnorm8x2;
+                case wgpu::VertexFormat::Snorm8x4:
+                    return tint::transform::VertexFormat::kSnorm8x4;
+                case wgpu::VertexFormat::Uint16x2:
+                    return tint::transform::VertexFormat::kUint16x2;
+                case wgpu::VertexFormat::Uint16x4:
+                    return tint::transform::VertexFormat::kUint16x4;
+                case wgpu::VertexFormat::Sint16x2:
+                    return tint::transform::VertexFormat::kSint16x2;
+                case wgpu::VertexFormat::Sint16x4:
+                    return tint::transform::VertexFormat::kSint16x4;
+                case wgpu::VertexFormat::Unorm16x2:
+                    return tint::transform::VertexFormat::kUnorm16x2;
+                case wgpu::VertexFormat::Unorm16x4:
+                    return tint::transform::VertexFormat::kUnorm16x4;
+                case wgpu::VertexFormat::Snorm16x2:
+                    return tint::transform::VertexFormat::kSnorm16x2;
+                case wgpu::VertexFormat::Snorm16x4:
+                    return tint::transform::VertexFormat::kSnorm16x4;
+                case wgpu::VertexFormat::Float16x2:
+                    return tint::transform::VertexFormat::kFloat16x2;
+                case wgpu::VertexFormat::Float16x4:
+                    return tint::transform::VertexFormat::kFloat16x4;
+                case wgpu::VertexFormat::Float32:
+                    return tint::transform::VertexFormat::kFloat32;
+                case wgpu::VertexFormat::Float32x2:
+                    return tint::transform::VertexFormat::kFloat32x2;
+                case wgpu::VertexFormat::Float32x3:
+                    return tint::transform::VertexFormat::kFloat32x3;
+                case wgpu::VertexFormat::Float32x4:
+                    return tint::transform::VertexFormat::kFloat32x4;
+                case wgpu::VertexFormat::Uint32:
+                    return tint::transform::VertexFormat::kUint32;
+                case wgpu::VertexFormat::Uint32x2:
+                    return tint::transform::VertexFormat::kUint32x2;
+                case wgpu::VertexFormat::Uint32x3:
+                    return tint::transform::VertexFormat::kUint32x3;
+                case wgpu::VertexFormat::Uint32x4:
+                    return tint::transform::VertexFormat::kUint32x4;
+                case wgpu::VertexFormat::Sint32:
+                    return tint::transform::VertexFormat::kSint32;
+                case wgpu::VertexFormat::Sint32x2:
+                    return tint::transform::VertexFormat::kSint32x2;
+                case wgpu::VertexFormat::Sint32x3:
+                    return tint::transform::VertexFormat::kSint32x3;
+                case wgpu::VertexFormat::Sint32x4:
+                    return tint::transform::VertexFormat::kSint32x4;
+
+                case wgpu::VertexFormat::Undefined:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        tint::transform::VertexStepMode ToTintVertexStepMode(wgpu::VertexStepMode mode) {
+            switch (mode) {
+                case wgpu::VertexStepMode::Vertex:
+                    return tint::transform::VertexStepMode::kVertex;
+                case wgpu::VertexStepMode::Instance:
+                    return tint::transform::VertexStepMode::kInstance;
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(
+            tint::ast::PipelineStage stage) {
+            switch (stage) {
+                case tint::ast::PipelineStage::kVertex:
+                    return SingleShaderStage::Vertex;
+                case tint::ast::PipelineStage::kFragment:
+                    return SingleShaderStage::Fragment;
+                case tint::ast::PipelineStage::kCompute:
+                    return SingleShaderStage::Compute;
+                case tint::ast::PipelineStage::kNone:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        BindingInfoType TintResourceTypeToBindingInfoType(
+            tint::inspector::ResourceBinding::ResourceType type) {
+            switch (type) {
+                case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+                case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+                case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+                    return BindingInfoType::Buffer;
+                case tint::inspector::ResourceBinding::ResourceType::kSampler:
+                case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+                    return BindingInfoType::Sampler;
+                case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
+                case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
+                case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
+                case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
+                    return BindingInfoType::Texture;
+                case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+                    return BindingInfoType::StorageTexture;
+                case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
+                    return BindingInfoType::ExternalTexture;
+
+                default:
+                    UNREACHABLE();
+                    return BindingInfoType::Buffer;
+            }
+        }
+
+        wgpu::TextureFormat TintImageFormatToTextureFormat(
+            tint::inspector::ResourceBinding::TexelFormat format) {
+            switch (format) {
+                case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
+                    return wgpu::TextureFormat::R32Uint;
+                case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
+                    return wgpu::TextureFormat::R32Sint;
+                case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
+                    return wgpu::TextureFormat::R32Float;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
+                    return wgpu::TextureFormat::RGBA8Unorm;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
+                    return wgpu::TextureFormat::RGBA8Snorm;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
+                    return wgpu::TextureFormat::RGBA8Uint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
+                    return wgpu::TextureFormat::RGBA8Sint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
+                    return wgpu::TextureFormat::RG32Uint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
+                    return wgpu::TextureFormat::RG32Sint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
+                    return wgpu::TextureFormat::RG32Float;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
+                    return wgpu::TextureFormat::RGBA16Uint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
+                    return wgpu::TextureFormat::RGBA16Sint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
+                    return wgpu::TextureFormat::RGBA16Float;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
+                    return wgpu::TextureFormat::RGBA32Uint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
+                    return wgpu::TextureFormat::RGBA32Sint;
+                case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
+                    return wgpu::TextureFormat::RGBA32Float;
+                case tint::inspector::ResourceBinding::TexelFormat::kNone:
+                    return wgpu::TextureFormat::Undefined;
+
+                default:
+                    UNREACHABLE();
+                    return wgpu::TextureFormat::Undefined;
+            }
+        }
+
+        wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
+            tint::inspector::ResourceBinding::TextureDimension dim) {
+            switch (dim) {
+                case tint::inspector::ResourceBinding::TextureDimension::k1d:
+                    return wgpu::TextureViewDimension::e1D;
+                case tint::inspector::ResourceBinding::TextureDimension::k2d:
+                    return wgpu::TextureViewDimension::e2D;
+                case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
+                    return wgpu::TextureViewDimension::e2DArray;
+                case tint::inspector::ResourceBinding::TextureDimension::k3d:
+                    return wgpu::TextureViewDimension::e3D;
+                case tint::inspector::ResourceBinding::TextureDimension::kCube:
+                    return wgpu::TextureViewDimension::Cube;
+                case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
+                    return wgpu::TextureViewDimension::CubeArray;
+                case tint::inspector::ResourceBinding::TextureDimension::kNone:
+                    return wgpu::TextureViewDimension::Undefined;
+            }
+            UNREACHABLE();
+        }
+
+        SampleTypeBit TintSampledKindToSampleTypeBit(
+            tint::inspector::ResourceBinding::SampledKind s) {
+            switch (s) {
+                case tint::inspector::ResourceBinding::SampledKind::kSInt:
+                    return SampleTypeBit::Sint;
+                case tint::inspector::ResourceBinding::SampledKind::kUInt:
+                    return SampleTypeBit::Uint;
+                case tint::inspector::ResourceBinding::SampledKind::kFloat:
+                    return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+                case tint::inspector::ResourceBinding::SampledKind::kUnknown:
+                    return SampleTypeBit::None;
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
+            tint::inspector::ComponentType type) {
+            switch (type) {
+                case tint::inspector::ComponentType::kFloat:
+                    return wgpu::TextureComponentType::Float;
+                case tint::inspector::ComponentType::kSInt:
+                    return wgpu::TextureComponentType::Sint;
+                case tint::inspector::ComponentType::kUInt:
+                    return wgpu::TextureComponentType::Uint;
+                case tint::inspector::ComponentType::kUnknown:
+                    return DAWN_VALIDATION_ERROR(
+                        "Attempted to convert 'Unknown' component type from Tint");
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
+            tint::inspector::ComponentType type) {
+            switch (type) {
+                case tint::inspector::ComponentType::kFloat:
+                    return VertexFormatBaseType::Float;
+                case tint::inspector::ComponentType::kSInt:
+                    return VertexFormatBaseType::Sint;
+                case tint::inspector::ComponentType::kUInt:
+                    return VertexFormatBaseType::Uint;
+                case tint::inspector::ComponentType::kUnknown:
+                    return DAWN_VALIDATION_ERROR(
+                        "Attempted to convert 'Unknown' component type from Tint");
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
+            tint::inspector::ResourceBinding::ResourceType resource_type) {
+            switch (resource_type) {
+                case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+                    return wgpu::BufferBindingType::Uniform;
+                case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+                    return wgpu::BufferBindingType::Storage;
+                case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+                    return wgpu::BufferBindingType::ReadOnlyStorage;
+                default:
+                    return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
+            tint::inspector::ResourceBinding::ResourceType resource_type) {
+            switch (resource_type) {
+                case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+                    return wgpu::StorageTextureAccess::WriteOnly;
+                default:
+                    return DAWN_VALIDATION_ERROR(
+                        "Attempted to convert non-storage texture resource type");
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
+            tint::inspector::ComponentType type) {
+            switch (type) {
+                case tint::inspector::ComponentType::kFloat:
+                    return InterStageComponentType::Float;
+                case tint::inspector::ComponentType::kSInt:
+                    return InterStageComponentType::Sint;
+                case tint::inspector::ComponentType::kUInt:
+                    return InterStageComponentType::Uint;
+                case tint::inspector::ComponentType::kUnknown:
+                    return DAWN_VALIDATION_ERROR(
+                        "Attempted to convert 'Unknown' component type from Tint");
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
+            tint::inspector::CompositionType type) {
+            switch (type) {
+                case tint::inspector::CompositionType::kScalar:
+                    return 1u;
+                case tint::inspector::CompositionType::kVec2:
+                    return 2u;
+                case tint::inspector::CompositionType::kVec3:
+                    return 3u;
+                case tint::inspector::CompositionType::kVec4:
+                    return 4u;
+                case tint::inspector::CompositionType::kUnknown:
+                    return DAWN_VALIDATION_ERROR(
+                        "Attempt to convert 'Unknown' composition type from Tint");
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
+            tint::inspector::InterpolationType type) {
+            switch (type) {
+                case tint::inspector::InterpolationType::kPerspective:
+                    return InterpolationType::Perspective;
+                case tint::inspector::InterpolationType::kLinear:
+                    return InterpolationType::Linear;
+                case tint::inspector::InterpolationType::kFlat:
+                    return InterpolationType::Flat;
+                case tint::inspector::InterpolationType::kUnknown:
+                    return DAWN_VALIDATION_ERROR(
+                        "Attempted to convert 'Unknown' interpolation type from Tint");
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
+            tint::inspector::InterpolationSampling type) {
+            switch (type) {
+                case tint::inspector::InterpolationSampling::kNone:
+                    return InterpolationSampling::None;
+                case tint::inspector::InterpolationSampling::kCenter:
+                    return InterpolationSampling::Center;
+                case tint::inspector::InterpolationSampling::kCentroid:
+                    return InterpolationSampling::Centroid;
+                case tint::inspector::InterpolationSampling::kSample:
+                    return InterpolationSampling::Sample;
+                case tint::inspector::InterpolationSampling::kUnknown:
+                    return DAWN_VALIDATION_ERROR(
+                        "Attempted to convert 'Unknown' interpolation sampling type from Tint");
+            }
+            UNREACHABLE();
+        }
+
+        EntryPointMetadata::OverridableConstant::Type FromTintOverridableConstantType(
+            tint::inspector::OverridableConstant::Type type) {
+            switch (type) {
+                case tint::inspector::OverridableConstant::Type::kBool:
+                    return EntryPointMetadata::OverridableConstant::Type::Boolean;
+                case tint::inspector::OverridableConstant::Type::kFloat32:
+                    return EntryPointMetadata::OverridableConstant::Type::Float32;
+                case tint::inspector::OverridableConstant::Type::kInt32:
+                    return EntryPointMetadata::OverridableConstant::Type::Int32;
+                case tint::inspector::OverridableConstant::Type::kUint32:
+                    return EntryPointMetadata::OverridableConstant::Type::Uint32;
+            }
+            UNREACHABLE();
+        }
+
+        ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
+                                               OwnedCompilationMessages* outMessages) {
+            tint::Program program = tint::reader::wgsl::Parse(file);
+            if (outMessages != nullptr) {
+                outMessages->AddMessages(program.Diagnostics());
+            }
+            if (!program.IsValid()) {
+                return DAWN_FORMAT_VALIDATION_ERROR(
+                    "Tint WGSL reader failure:\nParser: %s\nShader:\n%s\n",
+                    program.Diagnostics().str(), file->content.data);
+            }
+
+            return std::move(program);
+        }
+
+        ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
+                                                OwnedCompilationMessages* outMessages) {
+            tint::Program program = tint::reader::spirv::Parse(spirv);
+            if (outMessages != nullptr) {
+                outMessages->AddMessages(program.Diagnostics());
+            }
+            if (!program.IsValid()) {
+                return DAWN_FORMAT_VALIDATION_ERROR("Tint SPIR-V reader failure:\nParser: %s\n",
+                                                    program.Diagnostics().str());
+            }
+
+            return std::move(program);
+        }
+
+        std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
+                                                         const BindGroupLayoutBase* layout) {
+            std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
+            uint32_t packedIdx = 0;
+
+            for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount();
+                 ++bindingIndex) {
+                const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
+                if (bindingInfo.buffer.minBindingSize != 0) {
+                    // Skip bindings that have minimum buffer size set in the layout
+                    continue;
+                }
+
+                ASSERT(packedIdx < requiredBufferSizes.size());
+                const auto& shaderInfo = shaderBindings.find(bindingInfo.binding);
+                if (shaderInfo != shaderBindings.end()) {
+                    requiredBufferSizes[packedIdx] = shaderInfo->second.buffer.minBindingSize;
+                } else {
+                    // We have to include buffers if they are included in the bind group's
+                    // packed vector. We don't actually need to check these at draw time, so
+                    // if this is a problem in the future we can optimize it further.
+                    requiredBufferSizes[packedIdx] = 0;
+                }
+                ++packedIdx;
+            }
+
+            return requiredBufferSizes;
+        }
+
+        MaybeError ValidateCompatibilityOfSingleBindingWithLayout(
+            const DeviceBase* device,
+            const BindGroupLayoutBase* layout,
+            SingleShaderStage entryPointStage,
+            BindingNumber bindingNumber,
+            const ShaderBindingInfo& shaderInfo) {
+            const BindGroupLayoutBase::BindingMap& layoutBindings = layout->GetBindingMap();
+
+            // An external texture binding found in the shader will later be expanded into multiple
+            // bindings at compile time. This expansion will have already happened in the bgl - so
+            // the shader and bgl will always mismatch at this point. Expansion info is contained in
+            // the bgl object, so we can still verify the bgl used to have an external texture in
+            // the slot corresponding to the shader reflection.
+            if (shaderInfo.bindingType == BindingInfoType::ExternalTexture) {
+                // If an external texture binding used to exist in the bgl, it will be found as a
+                // key in the ExternalTextureBindingExpansions map.
+                ExternalTextureBindingExpansionMap expansions =
+                    layout->GetExternalTextureBindingExpansionMap();
+                std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+                    expansions.find(bindingNumber);
+                // TODO(dawn:563): Provide info about the binding types.
+                DAWN_INVALID_IF(it == expansions.end(),
+                                "Binding type in the shader (texture_external) doesn't match the "
+                                "type in the layout.");
+
+                return {};
+            }
+
+            const auto& bindingIt = layoutBindings.find(bindingNumber);
+            DAWN_INVALID_IF(bindingIt == layoutBindings.end(), "Binding doesn't exist in %s.",
+                            layout);
+
+            BindingIndex bindingIndex(bindingIt->second);
+            const BindingInfo& layoutInfo = layout->GetBindingInfo(bindingIndex);
+
+            // TODO(dawn:563): Provide info about the binding types.
+            DAWN_INVALID_IF(
+                layoutInfo.bindingType != shaderInfo.bindingType,
+                "Binding type (buffer vs. texture vs. sampler vs. external) doesn't match the type "
+                "in the layout.");
+
+            ExternalTextureBindingExpansionMap expansions =
+                layout->GetExternalTextureBindingExpansionMap();
+            DAWN_INVALID_IF(expansions.find(bindingNumber) != expansions.end(),
+                            "Binding type (buffer vs. texture vs. sampler vs. external) doesn't "
+                            "match the type in the layout.");
+
+            // TODO(dawn:563): Provide info about the visibility.
+            DAWN_INVALID_IF(
+                (layoutInfo.visibility & StageBit(entryPointStage)) == 0,
+                "Entry point's stage is not in the binding visibility in the layout (%s)",
+                layoutInfo.visibility);
+
+            switch (layoutInfo.bindingType) {
+                case BindingInfoType::Texture: {
+                    DAWN_INVALID_IF(
+                        layoutInfo.texture.multisampled != shaderInfo.texture.multisampled,
+                        "Binding multisampled flag (%u) doesn't match the layout's multisampled "
+                        "flag (%u)",
+                        layoutInfo.texture.multisampled, shaderInfo.texture.multisampled);
+
+                    // TODO(dawn:563): Provide info about the sample types.
+                    DAWN_INVALID_IF((SampleTypeToSampleTypeBit(layoutInfo.texture.sampleType) &
+                                     shaderInfo.texture.compatibleSampleTypes) == 0,
+                                    "The sample type in the shader is not compatible with the "
+                                    "sample type of the layout.");
+
+                    DAWN_INVALID_IF(
+                        layoutInfo.texture.viewDimension != shaderInfo.texture.viewDimension,
+                        "The shader's binding dimension (%s) doesn't match the shader's binding "
+                        "dimension (%s).",
+                        layoutInfo.texture.viewDimension, shaderInfo.texture.viewDimension);
+                    break;
+                }
+
+                case BindingInfoType::StorageTexture: {
+                    ASSERT(layoutInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
+                    ASSERT(shaderInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
+
+                    DAWN_INVALID_IF(
+                        layoutInfo.storageTexture.access != shaderInfo.storageTexture.access,
+                        "The layout's binding access (%s) isn't compatible with the shader's "
+                        "binding access (%s).",
+                        layoutInfo.storageTexture.access, shaderInfo.storageTexture.access);
+
+                    DAWN_INVALID_IF(
+                        layoutInfo.storageTexture.format != shaderInfo.storageTexture.format,
+                        "The layout's binding format (%s) doesn't match the shader's binding "
+                        "format (%s).",
+                        layoutInfo.storageTexture.format, shaderInfo.storageTexture.format);
+
+                    DAWN_INVALID_IF(layoutInfo.storageTexture.viewDimension !=
+                                        shaderInfo.storageTexture.viewDimension,
+                                    "The layout's binding dimension (%s) doesn't match the "
+                                    "shader's binding dimension (%s).",
+                                    layoutInfo.storageTexture.viewDimension,
+                                    shaderInfo.storageTexture.viewDimension);
+                    break;
+                }
+
+                case BindingInfoType::Buffer: {
+                    // Binding mismatch between shader and bind group is invalid. For example, a
+                    // writable binding in the shader with a readonly storage buffer in the bind
+                    // group layout is invalid. For internal usage with internal shaders, a storage
+                    // binding in the shader with an internal storage buffer in the bind group
+                    // layout is also valid.
+                    bool validBindingConversion =
+                        (layoutInfo.buffer.type == kInternalStorageBufferBinding &&
+                         shaderInfo.buffer.type == wgpu::BufferBindingType::Storage);
+
+                    DAWN_INVALID_IF(
+                        layoutInfo.buffer.type != shaderInfo.buffer.type && !validBindingConversion,
+                        "The buffer type in the shader (%s) is not compatible with the type in the "
+                        "layout (%s).",
+                        shaderInfo.buffer.type, layoutInfo.buffer.type);
+
+                    DAWN_INVALID_IF(
+                        layoutInfo.buffer.minBindingSize != 0 &&
+                            shaderInfo.buffer.minBindingSize > layoutInfo.buffer.minBindingSize,
+                        "The shader uses more bytes of the buffer (%u) than the layout's "
+                        "minBindingSize (%u).",
+                        shaderInfo.buffer.minBindingSize, layoutInfo.buffer.minBindingSize);
+                    break;
+                }
+
+                case BindingInfoType::Sampler:
+                    DAWN_INVALID_IF(
+                        (layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison) !=
+                            shaderInfo.sampler.isComparison,
+                        "The sampler type in the shader (comparison: %u) doesn't match the type in "
+                        "the layout (comparison: %u).",
+                        shaderInfo.sampler.isComparison,
+                        layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison);
+                    break;
+
+                case BindingInfoType::ExternalTexture: {
+                    UNREACHABLE();
+                    break;
+                }
+            }
+
+            return {};
+        }
+        MaybeError ValidateCompatibilityWithBindGroupLayout(DeviceBase* device,
+                                                            BindGroupIndex group,
+                                                            const EntryPointMetadata& entryPoint,
+                                                            const BindGroupLayoutBase* layout) {
+            // Iterate over all bindings used by this group in the shader, and find the
+            // corresponding binding in the BindGroupLayout, if it exists.
+            for (const auto& [bindingId, bindingInfo] : entryPoint.bindings[group]) {
+                DAWN_TRY_CONTEXT(ValidateCompatibilityOfSingleBindingWithLayout(
+                                     device, layout, entryPoint.stage, bindingId, bindingInfo),
+                                 "validating that the entry-point's declaration for @group(%u) "
+                                 "@binding(%u) matches %s",
+                                 static_cast<uint32_t>(group), static_cast<uint32_t>(bindingId),
+                                 layout);
+            }
+
+            return {};
+        }
+
+        ResultOrError<std::unique_ptr<EntryPointMetadata>> ReflectEntryPointUsingTint(
+            const DeviceBase* device,
+            tint::inspector::Inspector* inspector,
+            const tint::inspector::EntryPoint& entryPoint) {
+            const CombinedLimits& limits = device->GetLimits();
+            constexpr uint32_t kMaxInterStageShaderLocation = kMaxInterStageShaderVariables - 1;
+
+            std::unique_ptr<EntryPointMetadata> metadata = std::make_unique<EntryPointMetadata>();
+
+            // Returns the invalid argument, and if it is true additionally store the formatted
+            // error in metadata.infringedLimits. This is to delay the emission of these validation
+            // errors until the entry point is used.
+#define DelayedInvalidIf(invalid, ...)                                              \
+    ([&]() {                                                                        \
+        if (invalid) {                                                              \
+            metadata->infringedLimitErrors.push_back(absl::StrFormat(__VA_ARGS__)); \
+        }                                                                           \
+        return invalid;                                                             \
+    })()
+
+            if (!entryPoint.overridable_constants.empty()) {
+                DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+                                "Pipeline overridable constants are disallowed because they "
+                                "are partially implemented.");
+
+                const auto& name2Id = inspector->GetConstantNameToIdMap();
+                const auto& id2Scalar = inspector->GetConstantIDs();
+
+                for (auto& c : entryPoint.overridable_constants) {
+                    uint32_t id = name2Id.at(c.name);
+                    OverridableConstantScalar defaultValue;
+                    if (c.is_initialized) {
+                        // if it is initialized, the scalar must exist
+                        const auto& scalar = id2Scalar.at(id);
+                        if (scalar.IsBool()) {
+                            defaultValue.b = scalar.AsBool();
+                        } else if (scalar.IsU32()) {
+                            defaultValue.u32 = scalar.AsU32();
+                        } else if (scalar.IsI32()) {
+                            defaultValue.i32 = scalar.AsI32();
+                        } else if (scalar.IsFloat()) {
+                            defaultValue.f32 = scalar.AsFloat();
+                        } else {
+                            UNREACHABLE();
+                        }
+                    }
+                    EntryPointMetadata::OverridableConstant constant = {
+                        id, FromTintOverridableConstantType(c.type), c.is_initialized,
+                        defaultValue};
+
+                    std::string identifier =
+                        c.is_numeric_id_specified ? std::to_string(constant.id) : c.name;
+                    metadata->overridableConstants[identifier] = constant;
+
+                    if (!c.is_initialized) {
+                        auto [_, inserted] = metadata->uninitializedOverridableConstants.emplace(
+                            std::move(identifier));
+                        // The insertion should have taken place
+                        ASSERT(inserted);
+                    } else {
+                        auto [_, inserted] = metadata->initializedOverridableConstants.emplace(
+                            std::move(identifier));
+                        // The insertion should have taken place
+                        ASSERT(inserted);
+                    }
+                }
+            }
+
+            DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
+
+            if (metadata->stage == SingleShaderStage::Compute) {
+                DelayedInvalidIf(
+                    entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
+                        entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
+                        entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
+                    "Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
+                    "maximum allowed (%u, %u, %u).",
+                    entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
+                    entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
+                    limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
+
+                // Dimensions have already been validated against their individual limits above.
+                // Cast to uint64_t to avoid overflow in this multiplication.
+                uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
+                                          entryPoint.workgroup_size_y * entryPoint.workgroup_size_z;
+                DelayedInvalidIf(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
+                                 "The total number of workgroup invocations (%u) exceeds the "
+                                 "maximum allowed (%u).",
+                                 numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
+
+                const size_t workgroupStorageSize =
+                    inspector->GetWorkgroupStorageSize(entryPoint.name);
+                DelayedInvalidIf(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
+                                 "The total use of workgroup storage (%u bytes) is larger than "
+                                 "the maximum allowed (%u bytes).",
+                                 workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
+
+                metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
+                metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
+                metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
+
+                metadata->usesNumWorkgroups = entryPoint.num_workgroups_used;
+            }
+
+            if (metadata->stage == SingleShaderStage::Vertex) {
+                for (const auto& inputVar : entryPoint.input_variables) {
+                    uint32_t unsanitizedLocation = inputVar.location_decoration;
+                    if (DelayedInvalidIf(unsanitizedLocation >= kMaxVertexAttributes,
+                                         "Vertex input variable \"%s\" has a location (%u) that "
+                                         "exceeds the maximum (%u)",
+                                         inputVar.name, unsanitizedLocation,
+                                         kMaxVertexAttributes)) {
+                        continue;
+                    }
+
+                    VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
+                    DAWN_TRY_ASSIGN(
+                        metadata->vertexInputBaseTypes[location],
+                        TintComponentTypeToVertexFormatBaseType(inputVar.component_type));
+                    metadata->usedVertexInputs.set(location);
+                }
+
+                // [[position]] must be declared in a vertex shader but is not exposed as an
+                // output variable by Tint so we directly add its components to the total.
+                uint32_t totalInterStageShaderComponents = 4;
+                for (const auto& outputVar : entryPoint.output_variables) {
+                    EntryPointMetadata::InterStageVariableInfo variable;
+                    DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToInterStageComponentType(
+                                                           outputVar.component_type));
+                    DAWN_TRY_ASSIGN(
+                        variable.componentCount,
+                        TintCompositionTypeToInterStageComponentCount(outputVar.composition_type));
+                    DAWN_TRY_ASSIGN(
+                        variable.interpolationType,
+                        TintInterpolationTypeToInterpolationType(outputVar.interpolation_type));
+                    DAWN_TRY_ASSIGN(variable.interpolationSampling,
+                                    TintInterpolationSamplingToInterpolationSamplingType(
+                                        outputVar.interpolation_sampling));
+                    totalInterStageShaderComponents += variable.componentCount;
+
+                    uint32_t location = outputVar.location_decoration;
+                    if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
+                                         "Vertex output variable \"%s\" has a location (%u) that "
+                                         "exceeds the maximum (%u).",
+                                         outputVar.name, location, kMaxInterStageShaderLocation)) {
+                        continue;
+                    }
+
+                    metadata->usedInterStageVariables.set(location);
+                    metadata->interStageVariables[location] = variable;
+                }
+
+                DelayedInvalidIf(
+                    totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+                    "Total vertex output components count (%u) exceeds the maximum (%u).",
+                    totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+            }
+
+            if (metadata->stage == SingleShaderStage::Fragment) {
+                uint32_t totalInterStageShaderComponents = 0;
+                for (const auto& inputVar : entryPoint.input_variables) {
+                    EntryPointMetadata::InterStageVariableInfo variable;
+                    DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToInterStageComponentType(
+                                                           inputVar.component_type));
+                    DAWN_TRY_ASSIGN(
+                        variable.componentCount,
+                        TintCompositionTypeToInterStageComponentCount(inputVar.composition_type));
+                    DAWN_TRY_ASSIGN(
+                        variable.interpolationType,
+                        TintInterpolationTypeToInterpolationType(inputVar.interpolation_type));
+                    DAWN_TRY_ASSIGN(variable.interpolationSampling,
+                                    TintInterpolationSamplingToInterpolationSamplingType(
+                                        inputVar.interpolation_sampling));
+                    totalInterStageShaderComponents += variable.componentCount;
+
+                    uint32_t location = inputVar.location_decoration;
+                    if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
+                                         "Fragment input variable \"%s\" has a location (%u) that "
+                                         "exceeds the maximum (%u).",
+                                         inputVar.name, location, kMaxInterStageShaderLocation)) {
+                        continue;
+                    }
+
+                    metadata->usedInterStageVariables.set(location);
+                    metadata->interStageVariables[location] = variable;
+                }
+
+                if (entryPoint.front_facing_used) {
+                    totalInterStageShaderComponents += 1;
+                }
+                if (entryPoint.input_sample_mask_used) {
+                    totalInterStageShaderComponents += 1;
+                }
+                if (entryPoint.sample_index_used) {
+                    totalInterStageShaderComponents += 1;
+                }
+                if (entryPoint.input_position_used) {
+                    totalInterStageShaderComponents += 4;
+                }
+
+                DelayedInvalidIf(
+                    totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+                    "Total fragment input components count (%u) exceeds the maximum (%u).",
+                    totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+
+                for (const auto& outputVar : entryPoint.output_variables) {
+                    EntryPointMetadata::FragmentOutputVariableInfo variable;
+                    DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToTextureComponentType(
+                                                           outputVar.component_type));
+                    DAWN_TRY_ASSIGN(
+                        variable.componentCount,
+                        TintCompositionTypeToInterStageComponentCount(outputVar.composition_type));
+                    ASSERT(variable.componentCount <= 4);
+
+                    uint32_t unsanitizedAttachment = outputVar.location_decoration;
+                    if (DelayedInvalidIf(unsanitizedAttachment >= kMaxColorAttachments,
+                                         "Fragment output variable \"%s\" has a location (%u) that "
+                                         "exceeds the maximum (%u).",
+                                         outputVar.name, unsanitizedAttachment,
+                                         kMaxColorAttachments)) {
+                        continue;
+                    }
+
+                    ColorAttachmentIndex attachment(static_cast<uint8_t>(unsanitizedAttachment));
+                    metadata->fragmentOutputVariables[attachment] = variable;
+                    metadata->fragmentOutputsWritten.set(attachment);
+                }
+            }
+
+            for (const tint::inspector::ResourceBinding& resource :
+                 inspector->GetResourceBindings(entryPoint.name)) {
+                ShaderBindingInfo info;
+
+                info.bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
+
+                switch (info.bindingType) {
+                    case BindingInfoType::Buffer:
+                        info.buffer.minBindingSize = resource.size_no_padding;
+                        DAWN_TRY_ASSIGN(info.buffer.type, TintResourceTypeToBufferBindingType(
+                                                              resource.resource_type));
+                        break;
+                    case BindingInfoType::Sampler:
+                        switch (resource.resource_type) {
+                            case tint::inspector::ResourceBinding::ResourceType::kSampler:
+                                info.sampler.isComparison = false;
+                                break;
+                            case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+                                info.sampler.isComparison = true;
+                                break;
+                            default:
+                                UNREACHABLE();
+                        }
+                        break;
+                    case BindingInfoType::Texture:
+                        info.texture.viewDimension =
+                            TintTextureDimensionToTextureViewDimension(resource.dim);
+                        if (resource.resource_type ==
+                                tint::inspector::ResourceBinding::ResourceType::kDepthTexture ||
+                            resource.resource_type == tint::inspector::ResourceBinding::
+                                                          ResourceType::kDepthMultisampledTexture) {
+                            info.texture.compatibleSampleTypes = SampleTypeBit::Depth;
+                        } else {
+                            info.texture.compatibleSampleTypes =
+                                TintSampledKindToSampleTypeBit(resource.sampled_kind);
+                        }
+                        info.texture.multisampled =
+                            resource.resource_type == tint::inspector::ResourceBinding::
+                                                          ResourceType::kMultisampledTexture ||
+                            resource.resource_type == tint::inspector::ResourceBinding::
+                                                          ResourceType::kDepthMultisampledTexture;
+
+                        break;
+                    case BindingInfoType::StorageTexture:
+                        DAWN_TRY_ASSIGN(
+                            info.storageTexture.access,
+                            TintResourceTypeToStorageTextureAccess(resource.resource_type));
+                        info.storageTexture.format =
+                            TintImageFormatToTextureFormat(resource.image_format);
+                        info.storageTexture.viewDimension =
+                            TintTextureDimensionToTextureViewDimension(resource.dim);
+
+                        break;
+                    case BindingInfoType::ExternalTexture:
+                        break;
+                    default:
+                        return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
+                }
+
+                BindingNumber bindingNumber(resource.binding);
+                BindGroupIndex bindGroupIndex(resource.bind_group);
+
+                if (DelayedInvalidIf(bindGroupIndex >= kMaxBindGroupsTyped,
+                                     "The entry-point uses a binding with a group decoration (%u) "
+                                     "that exceeds the maximum (%u).",
+                                     resource.bind_group, kMaxBindGroups) ||
+                    DelayedInvalidIf(bindingNumber > kMaxBindingNumberTyped,
+                                     "Binding number (%u) exceeds the maximum binding number (%u).",
+                                     uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped))) {
+                    continue;
+                }
+
+                const auto& [binding, inserted] =
+                    metadata->bindings[bindGroupIndex].emplace(bindingNumber, info);
+                DAWN_INVALID_IF(!inserted,
+                                "Entry-point has a duplicate binding for (group:%u, binding:%u).",
+                                resource.binding, resource.bind_group);
+            }
+
+            std::vector<tint::inspector::SamplerTexturePair> samplerTextureUses =
+                inspector->GetSamplerTextureUses(entryPoint.name);
+            metadata->samplerTexturePairs.reserve(samplerTextureUses.size());
+            std::transform(samplerTextureUses.begin(), samplerTextureUses.end(),
+                           std::back_inserter(metadata->samplerTexturePairs),
+                           [](const tint::inspector::SamplerTexturePair& pair) {
+                               EntryPointMetadata::SamplerTexturePair result;
+                               result.sampler = {BindGroupIndex(pair.sampler_binding_point.group),
+                                                 BindingNumber(pair.sampler_binding_point.binding)};
+                               result.texture = {BindGroupIndex(pair.texture_binding_point.group),
+                                                 BindingNumber(pair.texture_binding_point.binding)};
+                               return result;
+                           });
+
+#undef DelayedInvalidIf
+            return std::move(metadata);
+        }
+
+        ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
+            const DeviceBase* device,
+            const tint::Program* program) {
+            ASSERT(program->IsValid());
+
+            tint::inspector::Inspector inspector(program);
+            std::vector<tint::inspector::EntryPoint> entryPoints = inspector.GetEntryPoints();
+            DAWN_INVALID_IF(inspector.has_error(), "Tint Reflection failure: Inspector: %s\n",
+                            inspector.error());
+
+            EntryPointMetadataTable result;
+
+            for (const tint::inspector::EntryPoint& entryPoint : entryPoints) {
+                std::unique_ptr<EntryPointMetadata> metadata;
+                DAWN_TRY_ASSIGN_CONTEXT(metadata,
+                                        ReflectEntryPointUsingTint(device, &inspector, entryPoint),
+                                        "processing entry point \"%s\".", entryPoint.name);
+
+                ASSERT(result.count(entryPoint.name) == 0);
+                result[entryPoint.name] = std::move(metadata);
+            }
+            return std::move(result);
+        }
+    }  // anonymous namespace
+
+    ShaderModuleParseResult::ShaderModuleParseResult() = default;
+    ShaderModuleParseResult::~ShaderModuleParseResult() = default;
+
+    ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
+
+    ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
+        default;
+
+    bool ShaderModuleParseResult::HasParsedShader() const {
+        return tintProgram != nullptr;
+    }
+
+    // TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
+    // long as tint diagnostics are inspected / printed.
+    class TintSource {
+      public:
+        template <typename... ARGS>
+        TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {
+        }
+
+        tint::Source::File file;
+    };
+
+    MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+                                              const ShaderModuleDescriptor* descriptor,
+                                              ShaderModuleParseResult* parseResult,
+                                              OwnedCompilationMessages* outMessages) {
+        ASSERT(parseResult != nullptr);
+
+        const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
+        DAWN_INVALID_IF(chainedDescriptor == nullptr,
+                        "Shader module descriptor missing chained descriptor");
+
+        // For now only a single SPIRV or WGSL subdescriptor is allowed.
+        DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
+                                     wgpu::SType::ShaderModuleWGSLDescriptor));
+
+        ScopedTintICEHandler scopedICEHandler(device);
+
+        const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+        FindInChain(chainedDescriptor, &spirvDesc);
+        const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+        FindInChain(chainedDescriptor, &wgslDesc);
+
+        // We have a temporary toggle to force the SPIRV ingestion to go through a WGSL
+        // intermediate step. It is done by switching the spirvDesc for a wgslDesc below.
+        ShaderModuleWGSLDescriptor newWgslDesc;
+        std::string newWgslCode;
+        if (spirvDesc && device->IsToggleEnabled(Toggle::ForceWGSLStep)) {
+            std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+            tint::Program program;
+            DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+
+            tint::writer::wgsl::Options options;
+            auto result = tint::writer::wgsl::Generate(&program, options);
+            DAWN_INVALID_IF(!result.success, "Tint WGSL failure: Generator: %s", result.error);
+
+            newWgslCode = std::move(result.wgsl);
+            newWgslDesc.source = newWgslCode.c_str();
+
+            spirvDesc = nullptr;
+            wgslDesc = &newWgslDesc;
+        }
+
+        if (spirvDesc) {
+            DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowSpirv),
+                            "SPIR-V is disallowed.");
+
+            std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+            tint::Program program;
+            DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+            parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+        } else if (wgslDesc) {
+            auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
+
+            if (device->IsToggleEnabled(Toggle::DumpShaders)) {
+                std::ostringstream dumpedMsg;
+                dumpedMsg << "// Dumped WGSL:" << std::endl << wgslDesc->source;
+                device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+            }
+
+            tint::Program program;
+            DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
+            parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+            parseResult->tintSource = std::move(tintSource);
+        }
+
+        return {};
+    }
+
+    RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+                                                            const PipelineLayoutBase* layout) {
+        RequiredBufferSizes bufferSizes;
+        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            bufferSizes[group] = GetBindGroupMinBufferSizes(entryPoint.bindings[group],
+                                                            layout->GetBindGroupLayout(group));
+        }
+
+        return bufferSizes;
+    }
+
+    ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+                                               const tint::Program* program,
+                                               const tint::transform::DataMap& inputs,
+                                               tint::transform::DataMap* outputs,
+                                               OwnedCompilationMessages* outMessages) {
+        tint::transform::Output output = transform->Run(program, inputs);
+        if (outMessages != nullptr) {
+            outMessages->AddMessages(output.program.Diagnostics());
+        }
+        DAWN_INVALID_IF(!output.program.IsValid(), "Tint program failure: %s\n",
+                        output.program.Diagnostics().str());
+        if (outputs != nullptr) {
+            *outputs = std::move(output.data);
+        }
+        return std::move(output.program);
+    }
+
+    void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+                                         const std::string& entryPoint,
+                                         BindGroupIndex pullingBufferBindingSet,
+                                         tint::transform::DataMap* transformInputs) {
+        tint::transform::VertexPulling::Config cfg;
+        cfg.entry_point_name = entryPoint;
+        cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
+
+        cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
+        for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
+            const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
+            tint::transform::VertexBufferLayoutDescriptor* tintInfo =
+                &cfg.vertex_state[static_cast<uint8_t>(slot)];
+
+            tintInfo->array_stride = dawnInfo.arrayStride;
+            tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
+        }
+
+        for (VertexAttributeLocation location :
+             IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
+            const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
+            tint::transform::VertexAttributeDescriptor tintInfo;
+            tintInfo.format = ToTintVertexFormat(dawnInfo.format);
+            tintInfo.offset = dawnInfo.offset;
+            tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
+
+            uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
+            cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
+        }
+
+        transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
+    }
+
+    MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+                                                       const EntryPointMetadata& entryPoint,
+                                                       const PipelineLayoutBase* layout) {
+        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            DAWN_TRY_CONTEXT(ValidateCompatibilityWithBindGroupLayout(
+                                 device, group, entryPoint, layout->GetBindGroupLayout(group)),
+                             "validating the entry-point's compatibility for group %u with %s",
+                             static_cast<uint32_t>(group), layout->GetBindGroupLayout(group));
+        }
+
+        for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
+            DAWN_INVALID_IF(entryPoint.bindings[group].size() > 0,
+                            "The entry-point uses bindings in group %u but %s doesn't have a "
+                            "BindGroupLayout for this index",
+                            static_cast<uint32_t>(group), layout);
+        }
+
+        // Validate that filtering samplers are not used with unfilterable textures.
+        for (const auto& pair : entryPoint.samplerTexturePairs) {
+            const BindGroupLayoutBase* samplerBGL = layout->GetBindGroupLayout(pair.sampler.group);
+            const BindingInfo& samplerInfo =
+                samplerBGL->GetBindingInfo(samplerBGL->GetBindingIndex(pair.sampler.binding));
+            if (samplerInfo.sampler.type != wgpu::SamplerBindingType::Filtering) {
+                continue;
+            }
+            const BindGroupLayoutBase* textureBGL = layout->GetBindGroupLayout(pair.texture.group);
+            const BindingInfo& textureInfo =
+                textureBGL->GetBindingInfo(textureBGL->GetBindingIndex(pair.texture.binding));
+
+            ASSERT(textureInfo.bindingType != BindingInfoType::Buffer &&
+                   textureInfo.bindingType != BindingInfoType::Sampler &&
+                   textureInfo.bindingType != BindingInfoType::StorageTexture);
+
+            if (textureInfo.bindingType != BindingInfoType::Texture) {
+                continue;
+            }
+
+            // Uint/sint can't be statically used with a sampler, so they any
+            // texture bindings reflected must be float or depth textures. If
+            // the shader uses a float/depth texture but the bind group layout
+            // specifies a uint/sint texture binding,
+            // |ValidateCompatibilityWithBindGroupLayout| will fail since the
+            // sampleType does not match.
+            ASSERT(textureInfo.texture.sampleType != wgpu::TextureSampleType::Undefined &&
+                   textureInfo.texture.sampleType != wgpu::TextureSampleType::Uint &&
+                   textureInfo.texture.sampleType != wgpu::TextureSampleType::Sint);
+
+            DAWN_INVALID_IF(
+                textureInfo.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat,
+                "Texture binding (group:%u, binding:%u) is %s but used statically with a sampler "
+                "(group:%u, binding:%u) that's %s",
+                static_cast<uint32_t>(pair.texture.group),
+                static_cast<uint32_t>(pair.texture.binding),
+                wgpu::TextureSampleType::UnfilterableFloat,
+                static_cast<uint32_t>(pair.sampler.group),
+                static_cast<uint32_t>(pair.sampler.binding), wgpu::SamplerBindingType::Filtering);
+        }
+
+        return {};
+    }
+
+    // ShaderModuleBase
+
+    ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
+                                       const ShaderModuleDescriptor* descriptor,
+                                       ApiObjectBase::UntrackedByDeviceTag tag)
+        : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
+        ASSERT(descriptor->nextInChain != nullptr);
+        const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &spirvDesc);
+        const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &wgslDesc);
+        ASSERT(spirvDesc || wgslDesc);
+
+        if (spirvDesc) {
+            mType = Type::Spirv;
+            mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+        } else if (wgslDesc) {
+            mType = Type::Wgsl;
+            mWgsl = std::string(wgslDesc->source);
+        }
+    }
+
+    ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
+        : ShaderModuleBase(device, descriptor, kUntrackedByDevice) {
+        TrackInDevice();
+    }
+
+    ShaderModuleBase::ShaderModuleBase(DeviceBase* device)
+        : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag), mType(Type::Undefined) {
+    }
+
+    ShaderModuleBase::~ShaderModuleBase() = default;
+
+    void ShaderModuleBase::DestroyImpl() {
+        if (IsCachedReference()) {
+            // Do not uncache the actual cached object if we are a blueprint.
+            GetDevice()->UncacheShaderModule(this);
+        }
+    }
+
+    // static
+    Ref<ShaderModuleBase> ShaderModuleBase::MakeError(DeviceBase* device) {
+        return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
+    }
+
+    ObjectType ShaderModuleBase::GetType() const {
+        return ObjectType::ShaderModule;
+    }
+
+    bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
+        return mEntryPoints.count(entryPoint) > 0;
+    }
+
+    const EntryPointMetadata& ShaderModuleBase::GetEntryPoint(const std::string& entryPoint) const {
+        ASSERT(HasEntryPoint(entryPoint));
+        return *mEntryPoints.at(entryPoint);
+    }
+
+    size_t ShaderModuleBase::ComputeContentHash() {
+        ObjectContentHasher recorder;
+        recorder.Record(mType);
+        recorder.Record(mOriginalSpirv);
+        recorder.Record(mWgsl);
+        return recorder.GetContentHash();
+    }
+
+    bool ShaderModuleBase::EqualityFunc::operator()(const ShaderModuleBase* a,
+                                                    const ShaderModuleBase* b) const {
+        return a->mType == b->mType && a->mOriginalSpirv == b->mOriginalSpirv &&
+               a->mWgsl == b->mWgsl;
+    }
+
+    const tint::Program* ShaderModuleBase::GetTintProgram() const {
+        ASSERT(mTintProgram);
+        return mTintProgram.get();
+    }
+
+    void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
+                                                 void* userdata) {
+        if (callback == nullptr) {
+            return;
+        }
+
+        callback(WGPUCompilationInfoRequestStatus_Success,
+                 mCompilationMessages->GetCompilationInfo(), userdata);
+    }
+
+    void ShaderModuleBase::InjectCompilationMessages(
+        std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
+        // TODO(dawn:944): ensure the InjectCompilationMessages is properly handled for shader
+        // module returned from cache.
+        // InjectCompilationMessages should be called only once for a shader module, after it is
+        // created. However currently InjectCompilationMessages may be called on a shader module
+        // returned from cache rather than newly created, and violate the rule. We just skip the
+        // injection in this case for now, but a proper solution including ensure the cache goes
+        // before the validation is required.
+        if (mCompilationMessages != nullptr) {
+            return;
+        }
+        // Move the compilationMessages into the shader module and emit the tint errors and warnings
+        mCompilationMessages = std::move(compilationMessages);
+
+        // Emit the formatted Tint errors and warnings within the moved compilationMessages
+        const std::vector<std::string>& formattedTintMessages =
+            mCompilationMessages->GetFormattedTintMessages();
+        if (formattedTintMessages.empty()) {
+            return;
+        }
+        std::ostringstream t;
+        for (auto pMessage = formattedTintMessages.begin(); pMessage != formattedTintMessages.end();
+             pMessage++) {
+            if (pMessage != formattedTintMessages.begin()) {
+                t << std::endl;
+            }
+            t << *pMessage;
+        }
+        this->GetDevice()->EmitLog(WGPULoggingType_Warning, t.str().c_str());
+    }
+
+    OwnedCompilationMessages* ShaderModuleBase::GetCompilationMessages() const {
+        return mCompilationMessages.get();
+    }
+
+    // static
+    void ShaderModuleBase::AddExternalTextureTransform(const PipelineLayoutBase* layout,
+                                                       tint::transform::Manager* transformManager,
+                                                       tint::transform::DataMap* transformInputs) {
+        tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+        for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+            for (const auto& expansion : bgl->GetExternalTextureBindingExpansionMap()) {
+                newBindingsMap[{static_cast<uint32_t>(i),
+                                static_cast<uint32_t>(expansion.second.plane0)}] = {
+                    {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.plane1)},
+                    {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.params)}};
+            }
+        }
+
+        if (!newBindingsMap.empty()) {
+            transformManager->Add<tint::transform::MultiplanarExternalTexture>();
+            transformInputs->Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+                newBindingsMap);
+        }
+    }
+
+    MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
+        mTintProgram = std::move(parseResult->tintProgram);
+        mTintSource = std::move(parseResult->tintSource);
+
+        DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingTint(GetDevice(), mTintProgram.get()));
+        return {};
+    }
+
+    size_t PipelineLayoutEntryPointPairHashFunc::operator()(
+        const PipelineLayoutEntryPointPair& pair) const {
+        size_t hash = 0;
+        HashCombine(&hash, pair.first, pair.second);
+        return hash;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/ShaderModule.h b/src/dawn/native/ShaderModule.h
new file mode 100644
index 0000000..ff643eb
--- /dev/null
+++ b/src/dawn/native/ShaderModule.h
@@ -0,0 +1,314 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SHADERMODULE_H_
+#define DAWNNATIVE_SHADERMODULE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/CompilationMessages.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/VertexFormat.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <bitset>
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+namespace tint {
+
+    class Program;
+
+    namespace transform {
+        class DataMap;
+        class Manager;
+        class Transform;
+        class VertexPulling;
+    }  // namespace transform
+
+}  // namespace tint
+
+namespace dawn::native {
+
+    struct EntryPointMetadata;
+
+    // Base component type of an inter-stage variable
+    enum class InterStageComponentType {
+        Sint,
+        Uint,
+        Float,
+    };
+
+    enum class InterpolationType {
+        Perspective,
+        Linear,
+        Flat,
+    };
+
+    enum class InterpolationSampling {
+        None,
+        Center,
+        Centroid,
+        Sample,
+    };
+
+    using PipelineLayoutEntryPointPair = std::pair<PipelineLayoutBase*, std::string>;
+    struct PipelineLayoutEntryPointPairHashFunc {
+        size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
+    };
+
+    // A map from name to EntryPointMetadata.
+    using EntryPointMetadataTable =
+        std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
+
+    // Source for a tint program
+    class TintSource;
+
+    struct ShaderModuleParseResult {
+        ShaderModuleParseResult();
+        ~ShaderModuleParseResult();
+        ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
+        ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
+
+        bool HasParsedShader() const;
+
+        std::unique_ptr<tint::Program> tintProgram;
+        std::unique_ptr<TintSource> tintSource;
+    };
+
+    MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+                                              const ShaderModuleDescriptor* descriptor,
+                                              ShaderModuleParseResult* parseResult,
+                                              OwnedCompilationMessages* outMessages);
+    MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+                                                       const EntryPointMetadata& entryPoint,
+                                                       const PipelineLayoutBase* layout);
+
+    RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+                                                            const PipelineLayoutBase* layout);
+    ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+                                               const tint::Program* program,
+                                               const tint::transform::DataMap& inputs,
+                                               tint::transform::DataMap* outputs,
+                                               OwnedCompilationMessages* messages);
+
+    /// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
+    void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+                                         const std::string& entryPoint,
+                                         BindGroupIndex pullingBufferBindingSet,
+                                         tint::transform::DataMap* transformInputs);
+
+    // Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
+    // for isComparison instead of a wgpu::SamplerBindingType enum.
+    struct ShaderSamplerBindingInfo {
+        bool isComparison;
+    };
+
+    // Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
+    // instead of a single enum.
+    struct ShaderTextureBindingInfo {
+        SampleTypeBit compatibleSampleTypes;
+        wgpu::TextureViewDimension viewDimension;
+        bool multisampled;
+    };
+
+    // Per-binding shader metadata contains some SPIRV specific information in addition to
+    // most of the frontend per-binding information.
+    struct ShaderBindingInfo {
+        // The SPIRV ID of the resource.
+        uint32_t id;
+        uint32_t base_type_id;
+
+        BindingNumber binding;
+        BindingInfoType bindingType;
+
+        BufferBindingLayout buffer;
+        ShaderSamplerBindingInfo sampler;
+        ShaderTextureBindingInfo texture;
+        StorageTextureBindingLayout storageTexture;
+    };
+
+    using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
+    using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
+
+    // The WebGPU overridable constants only support these scalar types
+    union OverridableConstantScalar {
+        // Use int32_t for boolean to initialize the full 32bit
+        int32_t b;
+        float f32;
+        int32_t i32;
+        uint32_t u32;
+    };
+
+    // Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
+    // stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
+    // pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
+    // ShaderModuleBase.
+    struct EntryPointMetadata {
+        // It is valid for a shader to contain entry points that go over limits. To keep this
+        // structure with packed arrays and bitsets, we still validate against limits when
+        // doing reflection, but store the errors in this vector, for later use if the application
+        // tries to use the entry point.
+        std::vector<std::string> infringedLimitErrors;
+
+        // bindings[G][B] is the reflection data for the binding defined with
+        // @group(G) @binding(B) in WGSL / SPIRV.
+        BindingInfoArray bindings;
+
+        struct SamplerTexturePair {
+            BindingSlot sampler;
+            BindingSlot texture;
+        };
+        std::vector<SamplerTexturePair> samplerTexturePairs;
+
+        // The set of vertex attributes this entryPoint uses.
+        ityp::array<VertexAttributeLocation, VertexFormatBaseType, kMaxVertexAttributes>
+            vertexInputBaseTypes;
+        ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> usedVertexInputs;
+
+        // An array to record the basic types (float, int and uint) of the fragment shader outputs.
+        struct FragmentOutputVariableInfo {
+            wgpu::TextureComponentType baseType;
+            uint8_t componentCount;
+        };
+        ityp::array<ColorAttachmentIndex, FragmentOutputVariableInfo, kMaxColorAttachments>
+            fragmentOutputVariables;
+        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> fragmentOutputsWritten;
+
+        struct InterStageVariableInfo {
+            InterStageComponentType baseType;
+            uint32_t componentCount;
+            InterpolationType interpolationType;
+            InterpolationSampling interpolationSampling;
+        };
+        // Now that we only support vertex and fragment stages, there can't be both inter-stage
+        // inputs and outputs in one shader stage.
+        std::bitset<kMaxInterStageShaderVariables> usedInterStageVariables;
+        std::array<InterStageVariableInfo, kMaxInterStageShaderVariables> interStageVariables;
+
+        // The local workgroup size declared for a compute entry point (or 0s otehrwise).
+        Origin3D localWorkgroupSize;
+
+        // The shader stage for this binding.
+        SingleShaderStage stage;
+
+        struct OverridableConstant {
+            uint32_t id;
+            // Match tint::inspector::OverridableConstant::Type
+            // Bool is defined as a macro on linux X11 and cannot compile
+            enum class Type { Boolean, Float32, Uint32, Int32 } type;
+
+            // If the constant doesn't not have an initializer in the shader
+            // Then it is required for the pipeline stage to have a constant record to initialize a
+            // value
+            bool isInitialized;
+
+            // Store the default initialized value in shader
+            // This is used by metal backend as the function_constant does not have dafault values
+            // Initialized when isInitialized == true
+            OverridableConstantScalar defaultValue;
+        };
+
+        using OverridableConstantsMap = std::unordered_map<std::string, OverridableConstant>;
+
+        // Map identifier to overridable constant
+        // Identifier is unique: either the variable name or the numeric ID if specified
+        OverridableConstantsMap overridableConstants;
+
+        // Overridable constants that are not initialized in shaders
+        // They need value initialization from pipeline stage or it is a validation error
+        std::unordered_set<std::string> uninitializedOverridableConstants;
+
+        // Store constants with shader initialized values as well
+        // This is used by metal backend to set values with default initializers that are not
+        // overridden
+        std::unordered_set<std::string> initializedOverridableConstants;
+
+        bool usesNumWorkgroups = false;
+    };
+
+    class ShaderModuleBase : public ApiObjectBase, public CachedObject {
+      public:
+        ShaderModuleBase(DeviceBase* device,
+                         const ShaderModuleDescriptor* descriptor,
+                         ApiObjectBase::UntrackedByDeviceTag tag);
+        ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
+        ~ShaderModuleBase() override;
+
+        static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        // Return true iff the program has an entrypoint called `entryPoint`.
+        bool HasEntryPoint(const std::string& entryPoint) const;
+
+        // Return the metadata for the given `entryPoint`. HasEntryPoint with the same argument
+        // must be true.
+        const EntryPointMetadata& GetEntryPoint(const std::string& entryPoint) const;
+
+        // Functions necessary for the unordered_set<ShaderModuleBase*>-based cache.
+        size_t ComputeContentHash() override;
+
+        struct EqualityFunc {
+            bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
+        };
+
+        const tint::Program* GetTintProgram() const;
+
+        void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
+
+        void InjectCompilationMessages(
+            std::unique_ptr<OwnedCompilationMessages> compilationMessages);
+
+        OwnedCompilationMessages* GetCompilationMessages() const;
+
+      protected:
+        // Constructor used only for mocking and testing.
+        ShaderModuleBase(DeviceBase* device);
+        void DestroyImpl() override;
+
+        MaybeError InitializeBase(ShaderModuleParseResult* parseResult);
+
+        static void AddExternalTextureTransform(const PipelineLayoutBase* layout,
+                                                tint::transform::Manager* transformManager,
+                                                tint::transform::DataMap* transformInputs);
+
+      private:
+        ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        // The original data in the descriptor for caching.
+        enum class Type { Undefined, Spirv, Wgsl };
+        Type mType;
+        std::vector<uint32_t> mOriginalSpirv;
+        std::string mWgsl;
+
+        EntryPointMetadataTable mEntryPoints;
+        std::unique_ptr<tint::Program> mTintProgram;
+        std::unique_ptr<TintSource> mTintSource;  // Keep the tint::Source::File alive
+
+        std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_SHADERMODULE_H_
diff --git a/src/dawn/native/SpirvValidation.cpp b/src/dawn/native/SpirvValidation.cpp
new file mode 100644
index 0000000..72eb8c1
--- /dev/null
+++ b/src/dawn/native/SpirvValidation.cpp
@@ -0,0 +1,74 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/SpirvValidation.h"
+
+#include "dawn/native/Device.h"
+
+#include <spirv-tools/libspirv.hpp>
+#include <sstream>
+
+namespace dawn::native {
+
+    MaybeError ValidateSpirv(DeviceBase* device,
+                             const std::vector<uint32_t>& spirv,
+                             bool dumpSpirv) {
+        spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
+        spirvTools.SetMessageConsumer([device](spv_message_level_t level, const char*,
+                                               const spv_position_t& position,
+                                               const char* message) {
+            WGPULoggingType wgpuLogLevel;
+            switch (level) {
+                case SPV_MSG_FATAL:
+                case SPV_MSG_INTERNAL_ERROR:
+                case SPV_MSG_ERROR:
+                    wgpuLogLevel = WGPULoggingType_Error;
+                    break;
+                case SPV_MSG_WARNING:
+                    wgpuLogLevel = WGPULoggingType_Warning;
+                    break;
+                case SPV_MSG_INFO:
+                    wgpuLogLevel = WGPULoggingType_Info;
+                    break;
+                default:
+                    wgpuLogLevel = WGPULoggingType_Error;
+                    break;
+            }
+
+            std::ostringstream ss;
+            ss << "SPIRV line " << position.index << ": " << message << std::endl;
+            device->EmitLog(wgpuLogLevel, ss.str().c_str());
+        });
+
+        const bool valid = spirvTools.Validate(spirv);
+        if (dumpSpirv || !valid) {
+            std::ostringstream dumpedMsg;
+            std::string disassembly;
+            if (spirvTools.Disassemble(
+                    spirv, &disassembly,
+                    SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT)) {
+                dumpedMsg << "/* Dumped generated SPIRV disassembly */" << std::endl << disassembly;
+            } else {
+                dumpedMsg << "/* Failed to disassemble generated SPIRV */";
+            }
+            device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+        }
+
+        DAWN_INVALID_IF(!valid,
+                        "Produced invalid SPIRV. Please file a bug at https://crbug.com/tint.");
+
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/SpirvValidation.h b/src/dawn/native/SpirvValidation.h
new file mode 100644
index 0000000..984ebcd
--- /dev/null
+++ b/src/dawn/native/SpirvValidation.h
@@ -0,0 +1,27 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Error.h"
+
+#include <vector>
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    MaybeError ValidateSpirv(DeviceBase* device,
+                             const std::vector<uint32_t>& spirv,
+                             bool dumpSpirv);
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/StagingBuffer.cpp b/src/dawn/native/StagingBuffer.cpp
new file mode 100644
index 0000000..a6c258c
--- /dev/null
+++ b/src/dawn/native/StagingBuffer.cpp
@@ -0,0 +1,29 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/StagingBuffer.h"
+
+namespace dawn::native {
+
+    StagingBufferBase::StagingBufferBase(size_t size) : mBufferSize(size) {
+    }
+
+    size_t StagingBufferBase::GetSize() const {
+        return mBufferSize;
+    }
+
+    void* StagingBufferBase::GetMappedPointer() const {
+        return mMappedPointer;
+    }
+}  // namespace dawn::native
diff --git a/src/dawn/native/StagingBuffer.h b/src/dawn/native/StagingBuffer.h
new file mode 100644
index 0000000..0ebb1c4
--- /dev/null
+++ b/src/dawn/native/StagingBuffer.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFER_H_
+#define DAWNNATIVE_STAGINGBUFFER_H_
+
+#include "dawn/native/Error.h"
+
+namespace dawn::native {
+
+    class StagingBufferBase {
+      public:
+        StagingBufferBase(size_t size);
+        virtual ~StagingBufferBase() = default;
+
+        virtual MaybeError Initialize() = 0;
+
+        void* GetMappedPointer() const;
+        size_t GetSize() const;
+
+      protected:
+        void* mMappedPointer = nullptr;
+
+      private:
+        const size_t mBufferSize;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_STAGINGBUFFER_H_
diff --git a/src/dawn/native/Subresource.cpp b/src/dawn/native/Subresource.cpp
new file mode 100644
index 0000000..6ebba9f
--- /dev/null
+++ b/src/dawn/native/Subresource.cpp
@@ -0,0 +1,136 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Subresource.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Format.h"
+
+namespace dawn::native {
+
+    Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
+        Aspect aspectMask = ConvertAspect(format, aspect);
+        ASSERT(HasOneBit(aspectMask));
+        return aspectMask;
+    }
+
+    Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
+        Aspect aspectMask = SelectFormatAspects(format, aspect);
+        ASSERT(aspectMask != Aspect::None);
+        return aspectMask;
+    }
+
+    Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect) {
+        // Color view |format| must be treated as the same plane |aspect|.
+        if (format.aspects == Aspect::Color) {
+            switch (aspect) {
+                case wgpu::TextureAspect::Plane0Only:
+                    return Aspect::Plane0;
+                case wgpu::TextureAspect::Plane1Only:
+                    return Aspect::Plane1;
+                default:
+                    break;
+            }
+        }
+        return ConvertAspect(format, aspect);
+    }
+
+    Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect) {
+        switch (aspect) {
+            case wgpu::TextureAspect::All:
+                return format.aspects;
+            case wgpu::TextureAspect::DepthOnly:
+                return format.aspects & Aspect::Depth;
+            case wgpu::TextureAspect::StencilOnly:
+                return format.aspects & Aspect::Stencil;
+            case wgpu::TextureAspect::Plane0Only:
+                return format.aspects & Aspect::Plane0;
+            case wgpu::TextureAspect::Plane1Only:
+                return format.aspects & Aspect::Plane1;
+        }
+        UNREACHABLE();
+    }
+
+    uint8_t GetAspectIndex(Aspect aspect) {
+        ASSERT(HasOneBit(aspect));
+        switch (aspect) {
+            case Aspect::Color:
+            case Aspect::Depth:
+            case Aspect::Plane0:
+            case Aspect::CombinedDepthStencil:
+                return 0;
+            case Aspect::Plane1:
+            case Aspect::Stencil:
+                return 1;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    uint8_t GetAspectCount(Aspect aspects) {
+        // TODO(crbug.com/dawn/829): This should use popcount once Dawn has such a function.
+        // Note that we can't do a switch because compilers complain that Depth | Stencil is not
+        // a valid enum value.
+        if (aspects == Aspect::Color || aspects == Aspect::Depth ||
+            aspects == Aspect::CombinedDepthStencil) {
+            return 1;
+        } else if (aspects == (Aspect::Plane0 | Aspect::Plane1)) {
+            return 2;
+        } else if (aspects == Aspect::Stencil) {
+            // Fake a the existence of a depth aspect so that the stencil data stays at index 1.
+            ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
+            return 2;
+        } else {
+            ASSERT(aspects == (Aspect::Depth | Aspect::Stencil));
+            return 2;
+        }
+    }
+
+    SubresourceRange::SubresourceRange(Aspect aspects,
+                                       FirstAndCountRange<uint32_t> arrayLayerParam,
+                                       FirstAndCountRange<uint32_t> mipLevelParams)
+        : aspects(aspects),
+          baseArrayLayer(arrayLayerParam.first),
+          layerCount(arrayLayerParam.count),
+          baseMipLevel(mipLevelParams.first),
+          levelCount(mipLevelParams.count) {
+    }
+
+    SubresourceRange::SubresourceRange()
+        : aspects(Aspect::None), baseArrayLayer(0), layerCount(0), baseMipLevel(0), levelCount(0) {
+    }
+
+    // static
+    SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
+                                                         uint32_t baseArrayLayer,
+                                                         Aspect aspects) {
+        return {aspects, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+    }
+
+    // static
+    SubresourceRange SubresourceRange::MakeSingle(Aspect aspect,
+                                                  uint32_t baseArrayLayer,
+                                                  uint32_t baseMipLevel) {
+        ASSERT(HasOneBit(aspect));
+        return {aspect, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+    }
+
+    // static
+    SubresourceRange SubresourceRange::MakeFull(Aspect aspects,
+                                                uint32_t layerCount,
+                                                uint32_t levelCount) {
+        return {aspects, {0, layerCount}, {0, levelCount}};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Subresource.h b/src/dawn/native/Subresource.h
new file mode 100644
index 0000000..63795e5
--- /dev/null
+++ b/src/dawn/native/Subresource.h
@@ -0,0 +1,112 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SUBRESOURCE_H_
+#define DAWNNATIVE_SUBRESOURCE_H_
+
+#include "dawn/native/EnumClassBitmasks.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    // Note: Subresource indices are computed by iterating the aspects in increasing order.
+    // D3D12 uses these directly, so the order much match D3D12's indices.
+    //  - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
+    enum class Aspect : uint8_t {
+        None = 0x0,
+        Color = 0x1,
+        Depth = 0x2,
+        Stencil = 0x4,
+
+        // Aspects used to select individual planes in a multi-planar format.
+        Plane0 = 0x8,
+        Plane1 = 0x10,
+
+        // An aspect for that represents the combination of both the depth and stencil aspects. It
+        // can be ignored outside of the Vulkan backend.
+        CombinedDepthStencil = 0x20,
+    };
+
+    template <>
+    struct EnumBitmaskSize<Aspect> {
+        static constexpr unsigned value = 6;
+    };
+
+    // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+    // does not exist in the format.
+    // Also ASSERTs if "All" is selected and results in more than one aspect.
+    Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
+
+    // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+    // does not exist in the format.
+    Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
+
+    // Returns the Aspects of the Format that are selected by the wgpu::TextureAspect.
+    // Note that this can return Aspect::None if the Format doesn't have any of the
+    // selected aspects.
+    Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect);
+
+    // Convert TextureAspect to the aspect which corresponds to the view format. This
+    // special cases per plane view formats before calling ConvertAspect.
+    Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect);
+
+    // Helper struct to make it clear that what the parameters of a range mean.
+    template <typename T>
+    struct FirstAndCountRange {
+        T first;
+        T count;
+    };
+
+    struct SubresourceRange {
+        SubresourceRange(Aspect aspects,
+                         FirstAndCountRange<uint32_t> arrayLayerParam,
+                         FirstAndCountRange<uint32_t> mipLevelParams);
+        SubresourceRange();
+
+        Aspect aspects;
+        uint32_t baseArrayLayer;
+        uint32_t layerCount;
+        uint32_t baseMipLevel;
+        uint32_t levelCount;
+
+        static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
+                                                  uint32_t baseArrayLayer,
+                                                  Aspect aspects);
+        static SubresourceRange MakeSingle(Aspect aspect,
+                                           uint32_t baseArrayLayer,
+                                           uint32_t baseMipLevel);
+
+        static SubresourceRange MakeFull(Aspect aspects, uint32_t layerCount, uint32_t levelCount);
+    };
+
+    // Helper function to use aspects as linear indices in arrays.
+    uint8_t GetAspectIndex(Aspect aspect);
+    uint8_t GetAspectCount(Aspect aspects);
+
+    // The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
+    // the per plane index does not exceed the known maximum plane count.
+    static constexpr uint32_t kMaxPlanesPerFormat = 3;
+
+}  // namespace dawn::native
+
+namespace dawn {
+
+    template <>
+    struct IsDawnBitmask<dawn::native::Aspect> {
+        static constexpr bool enable = true;
+    };
+
+}  // namespace dawn
+
+#endif  // DAWNNATIVE_SUBRESOURCE_H_
diff --git a/src/dawn/native/SubresourceStorage.h b/src/dawn/native/SubresourceStorage.h
new file mode 100644
index 0000000..345f994
--- /dev/null
+++ b/src/dawn/native/SubresourceStorage.h
@@ -0,0 +1,555 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SUBRESOURCESTORAGE_H_
+#define DAWNNATIVE_SUBRESOURCESTORAGE_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/TypeTraits.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Subresource.h"
+
+#include <array>
+#include <limits>
+#include <memory>
+#include <vector>
+
+namespace dawn::native {
+
+    // SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
+    // value of type T except that it tries to compress similar subresources so that algorithms
+    // can act on a whole range of subresources at once if they have the same state.
+    //
+    // For example a very common case to optimize for is the tracking of the usage of texture
+    // subresources inside a render pass: the vast majority of texture views will select the whole
+    // texture while a small minority will select a sub-range. We want to optimize the common case
+    // by setting and checking a single "usage" value when a full subresource is used but at the
+    // same time allow per-subresource data when needed.
+    //
+    // Another example is barrier tracking per-subresource in the backends: it will often happen
+    // that during texture upload each mip level will have a different "barrier state". However
+    // when the texture is fully uploaded and after it is used for sampling (with a full view) for
+    // the first time, the barrier state will likely be the same across all the subresources.
+    // That's why some form of "recompression" of subresource state must be possibe.
+    //
+    // In order to keep the implementation details private and to avoid iterator-hell, this
+    // container uses a more functional approach of calling a closure on the interesting ranges.
+    // This is for example how to look at the state of all subresources.
+    //
+    //   subresources.Iterate([](const SubresourceRange& range, const T& data) {
+    //      // Do something with the knowledge that all the subresources in `range` have value
+    //      // `data`.
+    //   });
+    //
+    // SubresourceStorage internally tracks compression state per aspect and then per layer of each
+    // aspect. This means that a 2-aspect texture can have the following compression state:
+    //
+    //  - Aspect 0 is fully compressed.
+    //  - Aspect 1 is partially compressed:
+    //    - Aspect 1 layer 3 is decompressed.
+    //    - Aspect 1 layer 0-2 and 4-42 are compressed.
+    //
+    // A useful model to reason about SubresourceStorage is to represent is as a tree:
+    //
+    //  - SubresourceStorage is the root.
+    //    |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
+    //       any children because the data is constant across all of the subtree.
+    //      |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
+    //         its node doesn't have any children because the data is constant across all of the
+    //         subtree.
+    //        |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
+    //
+    // The concept of recompression is the removal of all child nodes of a non-leaf node when the
+    // data is constant across them. Decompression is the addition of child nodes to a leaf node
+    // and copying of its data to all its children.
+    //
+    // The choice of having secondary compression for array layers is to optimize for the cases
+    // where transfer operations are used to update specific layers of texture with render or
+    // transfer operations, while the rest is untouched. It seems much less likely that there
+    // would be operations that touch all Nth mips of a 2D array texture without touching the
+    // others.
+    //
+    // There are several hot code paths that create new SubresourceStorage like the tracking of
+    // resource usage per-pass. We don't want to allocate a container for the decompressed data
+    // unless we have to because it would dramatically lower performance. Instead
+    // SubresourceStorage contains an inline array that contains the per-aspect compressed data
+    // and only allocates a per-subresource on aspect decompression.
+    //
+    // T must be a copyable type that supports equality comparison with ==.
+    //
+    // The implementation of functions in this file can have a lot of control flow and corner cases
+    // so each modification should come with extensive tests and ensure 100% code coverage of the
+    // modified functions. See instructions at
+    // https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md#local-coverage-script
+    // to run the test with code coverage. A command line that worked in the past (with the right
+    // GN args for the out/coverage directory in a Chromium checkout) is:
+    //
+    /*
+       python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
+           "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
+           third_party/dawn/src/dawn/native
+    */
+    //
+    // TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
+    // if recompression can happen or not in Update() and Merge()
+    template <typename T>
+    class SubresourceStorage {
+      public:
+        static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
+        static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
+
+        // Creates the storage with the given "dimensions" and all subresources starting with the
+        // initial value.
+        SubresourceStorage(Aspect aspects,
+                           uint32_t arrayLayerCount,
+                           uint32_t mipLevelCount,
+                           T initialValue = {});
+
+        // Returns the data for a single subresource. Note that the reference returned might be the
+        // same for multiple subresources.
+        const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
+
+        // Given an iterateFunc that's a function or function-like objet that can be called with
+        // arguments of type (const SubresourceRange& range, const T& data) and returns void,
+        // calls it with aggregate ranges if possible, such that each subresource is part of
+        // exactly one of the ranges iterateFunc is called with (and obviously data is the value
+        // stored for that subresource). For example:
+        //
+        //   subresources.Iterate([&](const SubresourceRange& range, const T& data) {
+        //       // Do something with range and data.
+        //   });
+        template <typename F>
+        void Iterate(F&& iterateFunc) const;
+
+        // Given an updateFunc that's a function or function-like objet that can be called with
+        // arguments of type (const SubresourceRange& range, T* data) and returns void,
+        // calls it with ranges that in aggregate form `range` and pass for each of the
+        // sub-ranges a pointer to modify the value for that sub-range. For example:
+        //
+        //   subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
+        //       *data |= wgpu::TextureUsage::Stuff;
+        //   });
+        //
+        // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
+        // your code is likely to break when compression happens. Range should only be used for
+        // side effects like using it to compute a Vulkan pipeline barrier.
+        template <typename F>
+        void Update(const SubresourceRange& range, F&& updateFunc);
+
+        // Given a mergeFunc that's a function or a function-like object that can be called with
+        // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
+        // returns void, calls it with ranges that in aggregate form the full resources and pass
+        // for each of the sub-ranges a pointer to modify the value for that sub-range and the
+        // corresponding value from other for that sub-range. For example:
+        //
+        //   subresources.Merge(otherUsages,
+        //       [](const SubresourceRange&, T* data, const T& otherData) {
+        //          *data |= otherData;
+        //       });
+        //
+        // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
+        // your code is likely to break when compression happens. Range should only be used for
+        // side effects like using it to compute a Vulkan pipeline barrier.
+        template <typename U, typename F>
+        void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
+
+        // Other operations to consider:
+        //
+        //  - UpdateTo(Range, T) that updates the range to a constant value.
+
+        // Methods to query the internal state of SubresourceStorage for testing.
+        Aspect GetAspectsForTesting() const;
+        uint32_t GetArrayLayerCountForTesting() const;
+        uint32_t GetMipLevelCountForTesting() const;
+        bool IsAspectCompressedForTesting(Aspect aspect) const;
+        bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
+
+      private:
+        template <typename U>
+        friend class SubresourceStorage;
+
+        void DecompressAspect(uint32_t aspectIndex);
+        void RecompressAspect(uint32_t aspectIndex);
+
+        void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
+        void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
+
+        SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
+
+        // LayerCompressed should never be called when the aspect is compressed otherwise it would
+        // need to check that mLayerCompressed is not null before indexing it.
+        bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
+        bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
+
+        // Return references to the data for a compressed plane / layer or subresource.
+        // Each variant should be called exactly under the correct compression level.
+        T& DataInline(uint32_t aspectIndex);
+        T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
+        const T& DataInline(uint32_t aspectIndex) const;
+        const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
+
+        Aspect mAspects;
+        uint8_t mMipLevelCount;
+        uint16_t mArrayLayerCount;
+
+        // Invariant: if an aspect is marked compressed, then all it's layers are marked as
+        // compressed.
+        static constexpr size_t kMaxAspects = 2;
+        std::array<bool, kMaxAspects> mAspectCompressed;
+        std::array<T, kMaxAspects> mInlineAspectData;
+
+        // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
+        std::unique_ptr<bool[]> mLayerCompressed;
+
+        // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
+        // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
+        // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
+        std::unique_ptr<T[]> mData;
+    };
+
+    template <typename T>
+    SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
+                                              uint32_t arrayLayerCount,
+                                              uint32_t mipLevelCount,
+                                              T initialValue)
+        : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
+        ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
+        ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
+
+        uint32_t aspectCount = GetAspectCount(aspects);
+        ASSERT(aspectCount <= kMaxAspects);
+
+        for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
+            mAspectCompressed[aspectIndex] = true;
+            DataInline(aspectIndex) = initialValue;
+        }
+    }
+
+    template <typename T>
+    template <typename F>
+    void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
+        bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
+        bool fullAspects =
+            range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
+
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            uint32_t aspectIndex = GetAspectIndex(aspect);
+
+            // Call the updateFunc once for the whole aspect if possible or decompress and fallback
+            // to per-layer handling.
+            if (mAspectCompressed[aspectIndex]) {
+                if (fullAspects) {
+                    SubresourceRange updateRange =
+                        SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+                    updateFunc(updateRange, &DataInline(aspectIndex));
+                    continue;
+                }
+                DecompressAspect(aspectIndex);
+            }
+
+            uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
+            for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
+                // Call the updateFunc once for the whole layer if possible or decompress and
+                // fallback to per-level handling.
+                if (LayerCompressed(aspectIndex, layer)) {
+                    if (fullLayers) {
+                        SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
+                        updateFunc(updateRange, &Data(aspectIndex, layer));
+                        continue;
+                    }
+                    DecompressLayer(aspectIndex, layer);
+                }
+
+                // Worst case: call updateFunc per level.
+                uint32_t levelEnd = range.baseMipLevel + range.levelCount;
+                for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
+                    SubresourceRange updateRange =
+                        SubresourceRange::MakeSingle(aspect, layer, level);
+                    updateFunc(updateRange, &Data(aspectIndex, layer, level));
+                }
+
+                // If the range has fullLayers then it is likely we can recompress after the calls
+                // to updateFunc (this branch is skipped if updateFunc was called for the whole
+                // layer).
+                if (fullLayers) {
+                    RecompressLayer(aspectIndex, layer);
+                }
+            }
+
+            // If the range has fullAspects then it is likely we can recompress after the calls to
+            // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
+            if (fullAspects) {
+                RecompressAspect(aspectIndex);
+            }
+        }
+    }
+
+    template <typename T>
+    template <typename U, typename F>
+    void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
+        ASSERT(mAspects == other.mAspects);
+        ASSERT(mArrayLayerCount == other.mArrayLayerCount);
+        ASSERT(mMipLevelCount == other.mMipLevelCount);
+
+        for (Aspect aspect : IterateEnumMask(mAspects)) {
+            uint32_t aspectIndex = GetAspectIndex(aspect);
+
+            // If the other storage's aspect is compressed we don't need to decompress anything
+            // in `this` and can just iterate through it, merging with `other`'s constant value for
+            // the aspect. For code simplicity this can be done with a call to Update().
+            if (other.mAspectCompressed[aspectIndex]) {
+                const U& otherData = other.DataInline(aspectIndex);
+                Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
+                       [&](const SubresourceRange& subrange, T* data) {
+                           mergeFunc(subrange, data, otherData);
+                       });
+                continue;
+            }
+
+            // Other doesn't have the aspect compressed so we must do at least per-layer merging.
+            if (mAspectCompressed[aspectIndex]) {
+                DecompressAspect(aspectIndex);
+            }
+
+            for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+                // Similarly to above, use a fast path if other's layer is compressed.
+                if (other.LayerCompressed(aspectIndex, layer)) {
+                    const U& otherData = other.Data(aspectIndex, layer);
+                    Update(GetFullLayerRange(aspect, layer),
+                           [&](const SubresourceRange& subrange, T* data) {
+                               mergeFunc(subrange, data, otherData);
+                           });
+                    continue;
+                }
+
+                // Sad case, other is decompressed for this layer, do per-level merging.
+                if (LayerCompressed(aspectIndex, layer)) {
+                    DecompressLayer(aspectIndex, layer);
+                }
+
+                for (uint32_t level = 0; level < mMipLevelCount; level++) {
+                    SubresourceRange updateRange =
+                        SubresourceRange::MakeSingle(aspect, layer, level);
+                    mergeFunc(updateRange, &Data(aspectIndex, layer, level),
+                              other.Data(aspectIndex, layer, level));
+                }
+
+                RecompressLayer(aspectIndex, layer);
+            }
+
+            RecompressAspect(aspectIndex);
+        }
+    }
+
+    template <typename T>
+    template <typename F>
+    void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
+        for (Aspect aspect : IterateEnumMask(mAspects)) {
+            uint32_t aspectIndex = GetAspectIndex(aspect);
+
+            // Fastest path, call iterateFunc on the whole aspect at once.
+            if (mAspectCompressed[aspectIndex]) {
+                SubresourceRange range =
+                    SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+                iterateFunc(range, DataInline(aspectIndex));
+                continue;
+            }
+
+            for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+                // Fast path, call iterateFunc on the whole array layer at once.
+                if (LayerCompressed(aspectIndex, layer)) {
+                    SubresourceRange range = GetFullLayerRange(aspect, layer);
+                    iterateFunc(range, Data(aspectIndex, layer));
+                    continue;
+                }
+
+                // Slow path, call iterateFunc for each mip level.
+                for (uint32_t level = 0; level < mMipLevelCount; level++) {
+                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                    iterateFunc(range, Data(aspectIndex, layer, level));
+                }
+            }
+        }
+    }
+
+    template <typename T>
+    const T& SubresourceStorage<T>::Get(Aspect aspect,
+                                        uint32_t arrayLayer,
+                                        uint32_t mipLevel) const {
+        uint32_t aspectIndex = GetAspectIndex(aspect);
+        ASSERT(aspectIndex < GetAspectCount(mAspects));
+        ASSERT(arrayLayer < mArrayLayerCount);
+        ASSERT(mipLevel < mMipLevelCount);
+
+        // Fastest path, the aspect is compressed!
+        if (mAspectCompressed[aspectIndex]) {
+            return DataInline(aspectIndex);
+        }
+
+        // Fast path, the array layer is compressed.
+        if (LayerCompressed(aspectIndex, arrayLayer)) {
+            return Data(aspectIndex, arrayLayer);
+        }
+
+        return Data(aspectIndex, arrayLayer, mipLevel);
+    }
+
+    template <typename T>
+    Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
+        return mAspects;
+    }
+
+    template <typename T>
+    uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
+        return mArrayLayerCount;
+    }
+
+    template <typename T>
+    uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
+        return mMipLevelCount;
+    }
+
+    template <typename T>
+    bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
+        return mAspectCompressed[GetAspectIndex(aspect)];
+    }
+
+    template <typename T>
+    bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
+        return mAspectCompressed[GetAspectIndex(aspect)] ||
+               mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
+    }
+
+    template <typename T>
+    void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
+        ASSERT(mAspectCompressed[aspectIndex]);
+        const T& aspectData = DataInline(aspectIndex);
+        mAspectCompressed[aspectIndex] = false;
+
+        // Extra allocations are only needed when aspects are decompressed. Create them lazily.
+        if (mData == nullptr) {
+            ASSERT(mLayerCompressed == nullptr);
+
+            uint32_t aspectCount = GetAspectCount(mAspects);
+            mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
+            mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
+
+            for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount;
+                 layerIndex++) {
+                mLayerCompressed[layerIndex] = true;
+            }
+        }
+
+        ASSERT(LayerCompressed(aspectIndex, 0));
+        for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+            Data(aspectIndex, layer) = aspectData;
+            ASSERT(LayerCompressed(aspectIndex, layer));
+        }
+    }
+
+    template <typename T>
+    void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
+        ASSERT(!mAspectCompressed[aspectIndex]);
+        // All layers of the aspect must be compressed for the aspect to possibly recompress.
+        for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+            if (!LayerCompressed(aspectIndex, layer)) {
+                return;
+            }
+        }
+
+        T layer0Data = Data(aspectIndex, 0);
+        for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
+            if (!(Data(aspectIndex, layer) == layer0Data)) {
+                return;
+            }
+        }
+
+        mAspectCompressed[aspectIndex] = true;
+        DataInline(aspectIndex) = layer0Data;
+    }
+
+    template <typename T>
+    void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+        ASSERT(LayerCompressed(aspectIndex, layer));
+        ASSERT(!mAspectCompressed[aspectIndex]);
+        const T& layerData = Data(aspectIndex, layer);
+        LayerCompressed(aspectIndex, layer) = false;
+
+        // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
+        // allows starting the iteration at level 1.
+        for (uint32_t level = 1; level < mMipLevelCount; level++) {
+            Data(aspectIndex, layer, level) = layerData;
+        }
+    }
+
+    template <typename T>
+    void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+        ASSERT(!LayerCompressed(aspectIndex, layer));
+        ASSERT(!mAspectCompressed[aspectIndex]);
+        const T& level0Data = Data(aspectIndex, layer, 0);
+
+        for (uint32_t level = 1; level < mMipLevelCount; level++) {
+            if (!(Data(aspectIndex, layer, level) == level0Data)) {
+                return;
+            }
+        }
+
+        LayerCompressed(aspectIndex, layer) = true;
+    }
+
+    template <typename T>
+    SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
+        return {aspect, {layer, 1}, {0, mMipLevelCount}};
+    }
+
+    template <typename T>
+    bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
+        ASSERT(!mAspectCompressed[aspectIndex]);
+        return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+    }
+
+    template <typename T>
+    bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
+        ASSERT(!mAspectCompressed[aspectIndex]);
+        return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+    }
+
+    template <typename T>
+    T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
+        ASSERT(mAspectCompressed[aspectIndex]);
+        return mInlineAspectData[aspectIndex];
+    }
+    template <typename T>
+    T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
+        ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+        ASSERT(!mAspectCompressed[aspectIndex]);
+        return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+    }
+    template <typename T>
+    const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
+        ASSERT(mAspectCompressed[aspectIndex]);
+        return mInlineAspectData[aspectIndex];
+    }
+    template <typename T>
+    const T& SubresourceStorage<T>::Data(uint32_t aspectIndex,
+                                         uint32_t layer,
+                                         uint32_t level) const {
+        ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+        ASSERT(!mAspectCompressed[aspectIndex]);
+        return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+    }
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_SUBRESOURCESTORAGE_H_
diff --git a/src/dawn/native/Surface.cpp b/src/dawn/native/Surface.cpp
new file mode 100644
index 0000000..ff6fd07
--- /dev/null
+++ b/src/dawn/native/Surface.cpp
@@ -0,0 +1,270 @@
+// Copyright 2020 the Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Surface.h"
+
+#include "dawn/common/Platform.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/SwapChain.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    include <windows.ui.core.h>
+#    include <windows.ui.xaml.controls.h>
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+#    include "dawn/common/xlib_with_undefs.h"
+#endif  // defined(DAWN_USE_X11)
+
+namespace dawn::native {
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        Surface::Type value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        switch (value) {
+            case Surface::Type::AndroidWindow:
+                s->Append("AndroidWindow");
+                break;
+            case Surface::Type::MetalLayer:
+                s->Append("MetalLayer");
+                break;
+            case Surface::Type::WindowsHWND:
+                s->Append("WindowsHWND");
+                break;
+            case Surface::Type::WindowsCoreWindow:
+                s->Append("WindowsCoreWindow");
+                break;
+            case Surface::Type::WindowsSwapChainPanel:
+                s->Append("WindowsSwapChainPanel");
+                break;
+            case Surface::Type::XlibWindow:
+                s->Append("XlibWindow");
+                break;
+        }
+        return {true};
+    }
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+    bool InheritsFromCAMetalLayer(void* obj);
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+    MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+                                         const SurfaceDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->nextInChain == nullptr,
+                        "Surface cannot be created with %s. nextInChain is not specified.",
+                        descriptor);
+
+        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+                                     wgpu::SType::SurfaceDescriptorFromAndroidNativeWindow,
+                                     wgpu::SType::SurfaceDescriptorFromMetalLayer,
+                                     wgpu::SType::SurfaceDescriptorFromWindowsHWND,
+                                     wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
+                                     wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel,
+                                     wgpu::SType::SurfaceDescriptorFromXlibWindow));
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+        const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &metalDesc);
+        if (metalDesc) {
+            // Check that the layer is a CAMetalLayer (or a derived class).
+            DAWN_INVALID_IF(!InheritsFromCAMetalLayer(metalDesc->layer),
+                            "Layer must be a CAMetalLayer");
+            return {};
+        }
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_ANDROID)
+        const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &androidDesc);
+        // Currently the best validation we can do since it's not possible to check if the pointer
+        // to a ANativeWindow is valid.
+        if (androidDesc) {
+            DAWN_INVALID_IF(androidDesc->window == nullptr, "Android window is not set.");
+            return {};
+        }
+#endif  // defined(DAWN_PLATFORM_ANDROID)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    if defined(DAWN_PLATFORM_WIN32)
+        const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &hwndDesc);
+        if (hwndDesc) {
+            DAWN_INVALID_IF(IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0, "Invalid HWND");
+            return {};
+        }
+#    endif  // defined(DAWN_PLATFORM_WIN32)
+        const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &coreWindowDesc);
+        if (coreWindowDesc) {
+            // Validate the coreWindow by query for ICoreWindow interface
+            ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
+            DAWN_INVALID_IF(coreWindowDesc->coreWindow == nullptr ||
+                                FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
+                                           ->QueryInterface(IID_PPV_ARGS(&coreWindow))),
+                            "Invalid CoreWindow");
+            return {};
+        }
+        const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+        if (swapChainPanelDesc) {
+            // Validate the swapChainPanel by querying for ISwapChainPanel interface
+            ComPtr<ABI::Windows::UI::Xaml::Controls::ISwapChainPanel> swapChainPanel;
+            DAWN_INVALID_IF(swapChainPanelDesc->swapChainPanel == nullptr ||
+                                FAILED(static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel)
+                                           ->QueryInterface(IID_PPV_ARGS(&swapChainPanel))),
+                            "Invalid SwapChainPanel");
+            return {};
+        }
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+        const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &xDesc);
+        if (xDesc) {
+            // Check the validity of the window by calling a getter function on the window that
+            // returns a status code. If the window is bad the call return a status of zero. We
+            // need to set a temporary X11 error handler while doing this because the default
+            // X11 error handler exits the program on any error.
+            XErrorHandler oldErrorHandler =
+                XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
+            XWindowAttributes attributes;
+            int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
+                                              xDesc->window, &attributes);
+            XSetErrorHandler(oldErrorHandler);
+
+            DAWN_INVALID_IF(status == 0, "Invalid X Window");
+            return {};
+        }
+#endif  // defined(DAWN_USE_X11)
+
+        return DAWN_FORMAT_VALIDATION_ERROR("Unsupported sType (%s)",
+                                            descriptor->nextInChain->sType);
+    }
+
+    Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
+        : mInstance(instance) {
+        ASSERT(descriptor->nextInChain != nullptr);
+        const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
+        const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+        const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+        const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+        const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+        const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &androidDesc);
+        FindInChain(descriptor->nextInChain, &metalDesc);
+        FindInChain(descriptor->nextInChain, &hwndDesc);
+        FindInChain(descriptor->nextInChain, &coreWindowDesc);
+        FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+        FindInChain(descriptor->nextInChain, &xDesc);
+        if (metalDesc) {
+            mType = Type::MetalLayer;
+            mMetalLayer = metalDesc->layer;
+        } else if (androidDesc) {
+            mType = Type::AndroidWindow;
+            mAndroidNativeWindow = androidDesc->window;
+        } else if (hwndDesc) {
+            mType = Type::WindowsHWND;
+            mHInstance = hwndDesc->hinstance;
+            mHWND = hwndDesc->hwnd;
+        } else if (coreWindowDesc) {
+#if defined(DAWN_PLATFORM_WINDOWS)
+            mType = Type::WindowsCoreWindow;
+            mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+        } else if (swapChainPanelDesc) {
+#if defined(DAWN_PLATFORM_WINDOWS)
+            mType = Type::WindowsSwapChainPanel;
+            mSwapChainPanel = static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel);
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+        } else if (xDesc) {
+            mType = Type::XlibWindow;
+            mXDisplay = xDesc->display;
+            mXWindow = xDesc->window;
+        } else {
+            UNREACHABLE();
+        }
+    }
+
+    Surface::~Surface() {
+        if (mSwapChain != nullptr) {
+            mSwapChain->DetachFromSurface();
+            mSwapChain = nullptr;
+        }
+    }
+
+    NewSwapChainBase* Surface::GetAttachedSwapChain() {
+        return mSwapChain.Get();
+    }
+
+    void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
+        mSwapChain = swapChain;
+    }
+
+    InstanceBase* Surface::GetInstance() {
+        return mInstance.Get();
+    }
+
+    Surface::Type Surface::GetType() const {
+        return mType;
+    }
+
+    void* Surface::GetAndroidNativeWindow() const {
+        ASSERT(mType == Type::AndroidWindow);
+        return mAndroidNativeWindow;
+    }
+
+    void* Surface::GetMetalLayer() const {
+        ASSERT(mType == Type::MetalLayer);
+        return mMetalLayer;
+    }
+
+    void* Surface::GetHInstance() const {
+        ASSERT(mType == Type::WindowsHWND);
+        return mHInstance;
+    }
+    void* Surface::GetHWND() const {
+        ASSERT(mType == Type::WindowsHWND);
+        return mHWND;
+    }
+
+    IUnknown* Surface::GetCoreWindow() const {
+        ASSERT(mType == Type::WindowsCoreWindow);
+#if defined(DAWN_PLATFORM_WINDOWS)
+        return mCoreWindow.Get();
+#else
+        return nullptr;
+#endif
+    }
+
+    IUnknown* Surface::GetSwapChainPanel() const {
+        ASSERT(mType == Type::WindowsSwapChainPanel);
+#if defined(DAWN_PLATFORM_WINDOWS)
+        return mSwapChainPanel.Get();
+#else
+        return nullptr;
+#endif
+    }
+
+    void* Surface::GetXDisplay() const {
+        ASSERT(mType == Type::XlibWindow);
+        return mXDisplay;
+    }
+    uint32_t Surface::GetXWindow() const {
+        ASSERT(mType == Type::XlibWindow);
+        return mXWindow;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Surface.h b/src/dawn/native/Surface.h
new file mode 100644
index 0000000..c5d6185
--- /dev/null
+++ b/src/dawn/native/Surface.h
@@ -0,0 +1,124 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SURFACE_H_
+#define DAWNNATIVE_SURFACE_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/Platform.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    include "dawn/native/d3d12/d3d12_platform.h"
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+// Forward declare IUnknown
+// GetCoreWindow needs to return an IUnknown pointer
+// non-windows platforms don't have this type
+struct IUnknown;
+
+namespace dawn::native {
+
+    MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+                                         const SurfaceDescriptor* descriptor);
+
+    // A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
+    // aren't used because they would cause compilation errors on other OSes (or require
+    // ObjectiveC).
+    // The surface is also used to store the current swapchain so that we can detach it when it is
+    // replaced.
+    class Surface final : public RefCounted {
+      public:
+        Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
+
+        void SetAttachedSwapChain(NewSwapChainBase* swapChain);
+        NewSwapChainBase* GetAttachedSwapChain();
+
+        // These are valid to call on all Surfaces.
+        enum class Type {
+            AndroidWindow,
+            MetalLayer,
+            WindowsHWND,
+            WindowsCoreWindow,
+            WindowsSwapChainPanel,
+            XlibWindow,
+        };
+        Type GetType() const;
+        InstanceBase* GetInstance();
+
+        // Valid to call if the type is MetalLayer
+        void* GetMetalLayer() const;
+
+        // Valid to call if the type is Android
+        void* GetAndroidNativeWindow() const;
+
+        // Valid to call if the type is WindowsHWND
+        void* GetHInstance() const;
+        void* GetHWND() const;
+
+        // Valid to call if the type is WindowsCoreWindow
+        IUnknown* GetCoreWindow() const;
+
+        // Valid to call if the type is WindowsSwapChainPanel
+        IUnknown* GetSwapChainPanel() const;
+
+        // Valid to call if the type is WindowsXlib
+        void* GetXDisplay() const;
+        uint32_t GetXWindow() const;
+
+      private:
+        ~Surface() override;
+
+        Ref<InstanceBase> mInstance;
+        Type mType;
+
+        // The swapchain will set this to null when it is destroyed.
+        Ref<NewSwapChainBase> mSwapChain;
+
+        // MetalLayer
+        void* mMetalLayer = nullptr;
+
+        // ANativeWindow
+        void* mAndroidNativeWindow = nullptr;
+
+        // WindowsHwnd
+        void* mHInstance = nullptr;
+        void* mHWND = nullptr;
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+        // WindowsCoreWindow
+        ComPtr<IUnknown> mCoreWindow;
+
+        // WindowsSwapChainPanel
+        ComPtr<IUnknown> mSwapChainPanel;
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+        // Xlib
+        void* mXDisplay = nullptr;
+        uint32_t mXWindow = 0;
+    };
+
+    // Not defined in webgpu_absl_format.h/cpp because you can't forward-declare a nested type.
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        Surface::Type value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_SURFACE_H_
diff --git a/src/dawn/native/Surface_metal.mm b/src/dawn/native/Surface_metal.mm
new file mode 100644
index 0000000..ecb5d88
--- /dev/null
+++ b/src/dawn/native/Surface_metal.mm
@@ -0,0 +1,30 @@
+// Copyright 2020 the Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Contains a helper function for Surface.cpp that needs to be written in ObjectiveC.
+
+#if !defined(DAWN_ENABLE_BACKEND_METAL)
+#    error "Surface_metal.mm requires the Metal backend to be enabled."
+#endif  // !defined(DAWN_ENABLE_BACKEND_METAL)
+
+#import <QuartzCore/CAMetalLayer.h>
+
+namespace dawn::native {
+
+    bool InheritsFromCAMetalLayer(void* obj) {
+        id<NSObject> object = static_cast<id>(obj);
+        return [object isKindOfClass:[CAMetalLayer class]];
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/SwapChain.cpp b/src/dawn/native/SwapChain.cpp
new file mode 100644
index 0000000..1bd5fd7
--- /dev/null
+++ b/src/dawn/native/SwapChain.cpp
@@ -0,0 +1,422 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+
+    namespace {
+
+        class ErrorSwapChain final : public SwapChainBase {
+          public:
+            ErrorSwapChain(DeviceBase* device) : SwapChainBase(device, ObjectBase::kError) {
+            }
+
+          private:
+            void APIConfigure(wgpu::TextureFormat format,
+                              wgpu::TextureUsage allowedUsage,
+                              uint32_t width,
+                              uint32_t height) override {
+                GetDevice()->ConsumedError(
+                    DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+            }
+
+            TextureViewBase* APIGetCurrentTextureView() override {
+                GetDevice()->ConsumedError(
+                    DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+                return TextureViewBase::MakeError(GetDevice());
+            }
+
+            void APIPresent() override {
+                GetDevice()->ConsumedError(
+                    DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+            }
+        };
+
+    }  // anonymous namespace
+
+    MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+                                           const Surface* surface,
+                                           const SwapChainDescriptor* descriptor) {
+        if (descriptor->implementation != 0) {
+            DAWN_INVALID_IF(surface != nullptr,
+                            "Exactly one of surface or implementation must be set");
+
+            DawnSwapChainImplementation* impl =
+                reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
+
+            DAWN_INVALID_IF(!impl->Init || !impl->Destroy || !impl->Configure ||
+                                !impl->GetNextTexture || !impl->Present,
+                            "Implementation is incomplete");
+
+        } else {
+            DAWN_INVALID_IF(surface == nullptr,
+                            "At least one of surface or implementation must be set");
+
+            DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
+
+// TODO(crbug.com/dawn/160): Lift this restriction once wgpu::Instance::GetPreferredSurfaceFormat is
+// implemented.
+// TODO(dawn:286):
+#if defined(DAWN_PLATFORM_ANDROID)
+            constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
+#else
+            constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
+#endif  // !defined(DAWN_PLATFORM_ANDROID)
+            DAWN_INVALID_IF(descriptor->format != kRequireSwapChainFormat,
+                            "Format (%s) is not %s, which is (currently) the only accepted format.",
+                            descriptor->format, kRequireSwapChainFormat);
+
+            DAWN_INVALID_IF(descriptor->usage != wgpu::TextureUsage::RenderAttachment,
+                            "Usage (%s) is not %s, which is (currently) the only accepted usage.",
+                            descriptor->usage, wgpu::TextureUsage::RenderAttachment);
+
+            DAWN_INVALID_IF(descriptor->width == 0 || descriptor->height == 0,
+                            "Swap Chain size (width: %u, height: %u) is empty.", descriptor->width,
+                            descriptor->height);
+
+            DAWN_INVALID_IF(
+                descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
+                    descriptor->height > device->GetLimits().v1.maxTextureDimension2D,
+                "Swap Chain size (width: %u, height: %u) is greater than the maximum 2D texture "
+                "size (width: %u, height: %u).",
+                descriptor->width, descriptor->height, device->GetLimits().v1.maxTextureDimension2D,
+                device->GetLimits().v1.maxTextureDimension2D);
+        }
+
+        return {};
+    }
+
+    TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
+        TextureDescriptor desc;
+        desc.usage = swapChain->GetUsage();
+        desc.dimension = wgpu::TextureDimension::e2D;
+        desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
+        desc.format = swapChain->GetFormat();
+        desc.mipLevelCount = 1;
+        desc.sampleCount = 1;
+
+        return desc;
+    }
+
+    // SwapChainBase
+
+    SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+        TrackInDevice();
+    }
+
+    SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag) {
+    }
+
+    SwapChainBase::~SwapChainBase() {
+    }
+
+    void SwapChainBase::DestroyImpl() {
+    }
+
+    // static
+    SwapChainBase* SwapChainBase::MakeError(DeviceBase* device) {
+        return new ErrorSwapChain(device);
+    }
+
+    ObjectType SwapChainBase::GetType() const {
+        return ObjectType::SwapChain;
+    }
+
+    // OldSwapChainBase
+
+    OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
+        : SwapChainBase(device),
+          mImplementation(
+              *reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
+    }
+
+    OldSwapChainBase::~OldSwapChainBase() {
+        if (!IsError()) {
+            const auto& im = GetImplementation();
+            im.Destroy(im.userData);
+        }
+    }
+
+    void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+                                        wgpu::TextureUsage allowedUsage,
+                                        uint32_t width,
+                                        uint32_t height) {
+        if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
+            return;
+        }
+        ASSERT(!IsError());
+
+        allowedUsage |= wgpu::TextureUsage::Present;
+
+        mFormat = format;
+        mAllowedUsage = allowedUsage;
+        mWidth = width;
+        mHeight = height;
+        mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
+                                  static_cast<WGPUTextureUsage>(allowedUsage), width, height);
+    }
+
+    TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
+        if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
+            return TextureViewBase::MakeError(GetDevice());
+        }
+        ASSERT(!IsError());
+
+        // Return the same current texture view until Present is called.
+        if (mCurrentTextureView != nullptr) {
+            // Calling GetCurrentTextureView always returns a new reference so add it even when
+            // reuse the existing texture view.
+            mCurrentTextureView->Reference();
+            return mCurrentTextureView.Get();
+        }
+
+        // Create the backing texture and the view.
+        TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = mWidth;
+        descriptor.size.height = mHeight;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = mFormat;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = mAllowedUsage;
+
+        // Get the texture but remove the external refcount because it is never passed outside
+        // of dawn_native
+        mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
+
+        mCurrentTextureView = mCurrentTexture->APICreateView();
+        return mCurrentTextureView.Get();
+    }
+
+    void OldSwapChainBase::APIPresent() {
+        if (GetDevice()->ConsumedError(ValidatePresent())) {
+            return;
+        }
+        ASSERT(!IsError());
+
+        if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
+            return;
+        }
+
+        mImplementation.Present(mImplementation.userData);
+
+        mCurrentTexture = nullptr;
+        mCurrentTextureView = nullptr;
+    }
+
+    const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
+        ASSERT(!IsError());
+        return mImplementation;
+    }
+
+    MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
+                                                   wgpu::TextureUsage allowedUsage,
+                                                   uint32_t width,
+                                                   uint32_t height) const {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        DAWN_TRY(ValidateTextureUsage(allowedUsage));
+        DAWN_TRY(ValidateTextureFormat(format));
+
+        DAWN_INVALID_IF(width == 0 || height == 0,
+                        "Configuration size (width: %u, height: %u) for %s is empty.", width,
+                        height, this);
+
+        return {};
+    }
+
+    MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        // If width is 0, it implies swap chain has never been configured
+        DAWN_INVALID_IF(mWidth == 0, "%s was not configured prior to calling GetNextTexture.",
+                        this);
+
+        return {};
+    }
+
+    MaybeError OldSwapChainBase::ValidatePresent() const {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        DAWN_INVALID_IF(
+            mCurrentTextureView == nullptr,
+            "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
+            this);
+
+        return {};
+    }
+
+    // Implementation of NewSwapChainBase
+
+    NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
+                                       Surface* surface,
+                                       const SwapChainDescriptor* descriptor)
+        : SwapChainBase(device),
+          mAttached(false),
+          mWidth(descriptor->width),
+          mHeight(descriptor->height),
+          mFormat(descriptor->format),
+          mUsage(descriptor->usage),
+          mPresentMode(descriptor->presentMode),
+          mSurface(surface) {
+    }
+
+    NewSwapChainBase::~NewSwapChainBase() {
+        if (mCurrentTextureView != nullptr) {
+            ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+                   TextureBase::TextureState::Destroyed);
+        }
+
+        ASSERT(!mAttached);
+    }
+
+    void NewSwapChainBase::DetachFromSurface() {
+        if (mAttached) {
+            DetachFromSurfaceImpl();
+            mSurface = nullptr;
+            mAttached = false;
+        }
+    }
+
+    void NewSwapChainBase::SetIsAttached() {
+        mAttached = true;
+    }
+
+    void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+                                        wgpu::TextureUsage allowedUsage,
+                                        uint32_t width,
+                                        uint32_t height) {
+        GetDevice()->ConsumedError(
+            DAWN_FORMAT_VALIDATION_ERROR("Configure is invalid for surface-based swapchains."));
+    }
+
+    TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
+        Ref<TextureViewBase> result;
+        if (GetDevice()->ConsumedError(GetCurrentTextureView(), &result,
+                                       "calling %s.GetCurrentTextureView()", this)) {
+            return TextureViewBase::MakeError(GetDevice());
+        }
+        return result.Detach();
+    }
+
+    ResultOrError<Ref<TextureViewBase>> NewSwapChainBase::GetCurrentTextureView() {
+        DAWN_TRY(ValidateGetCurrentTextureView());
+
+        if (mCurrentTextureView != nullptr) {
+            // Calling GetCurrentTextureView always returns a new reference.
+            return mCurrentTextureView;
+        }
+
+        DAWN_TRY_ASSIGN(mCurrentTextureView, GetCurrentTextureViewImpl());
+
+        // Check that the return texture view matches exactly what was given for this descriptor.
+        ASSERT(mCurrentTextureView->GetTexture()->GetFormat().format == mFormat);
+        ASSERT(IsSubset(mUsage, mCurrentTextureView->GetTexture()->GetUsage()));
+        ASSERT(mCurrentTextureView->GetLevelCount() == 1);
+        ASSERT(mCurrentTextureView->GetLayerCount() == 1);
+        ASSERT(mCurrentTextureView->GetDimension() == wgpu::TextureViewDimension::e2D);
+        ASSERT(mCurrentTextureView->GetTexture()
+                   ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
+                   .width == mWidth);
+        ASSERT(mCurrentTextureView->GetTexture()
+                   ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
+                   .height == mHeight);
+
+        return mCurrentTextureView;
+    }
+
+    void NewSwapChainBase::APIPresent() {
+        if (GetDevice()->ConsumedError(ValidatePresent())) {
+            return;
+        }
+
+        if (GetDevice()->ConsumedError(PresentImpl())) {
+            return;
+        }
+
+        ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+               TextureBase::TextureState::Destroyed);
+        mCurrentTextureView = nullptr;
+    }
+
+    uint32_t NewSwapChainBase::GetWidth() const {
+        return mWidth;
+    }
+
+    uint32_t NewSwapChainBase::GetHeight() const {
+        return mHeight;
+    }
+
+    wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
+        return mFormat;
+    }
+
+    wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
+        return mUsage;
+    }
+
+    wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
+        return mPresentMode;
+    }
+
+    Surface* NewSwapChainBase::GetSurface() const {
+        return mSurface;
+    }
+
+    bool NewSwapChainBase::IsAttached() const {
+        return mAttached;
+    }
+
+    wgpu::BackendType NewSwapChainBase::GetBackendType() const {
+        return GetDevice()->GetAdapter()->GetBackendType();
+    }
+
+    MaybeError NewSwapChainBase::ValidatePresent() const {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        DAWN_INVALID_IF(!mAttached, "Cannot call Present called on detached %s.", this);
+
+        DAWN_INVALID_IF(
+            mCurrentTextureView == nullptr,
+            "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
+            this);
+
+        return {};
+    }
+
+    MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
+        DAWN_TRY(GetDevice()->ValidateIsAlive());
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+
+        DAWN_INVALID_IF(!mAttached, "Cannot call GetCurrentTextureView on detached %s.", this);
+
+        return {};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/SwapChain.h b/src/dawn/native/SwapChain.h
new file mode 100644
index 0000000..48b8270
--- /dev/null
+++ b/src/dawn/native/SwapChain.h
@@ -0,0 +1,170 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SWAPCHAIN_H_
+#define DAWNNATIVE_SWAPCHAIN_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+                                           const Surface* surface,
+                                           const SwapChainDescriptor* descriptor);
+
+    TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
+
+    class SwapChainBase : public ApiObjectBase {
+      public:
+        SwapChainBase(DeviceBase* device);
+
+        static SwapChainBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        // Dawn API
+        virtual void APIConfigure(wgpu::TextureFormat format,
+                                  wgpu::TextureUsage allowedUsage,
+                                  uint32_t width,
+                                  uint32_t height) = 0;
+        virtual TextureViewBase* APIGetCurrentTextureView() = 0;
+        virtual void APIPresent() = 0;
+
+      protected:
+        SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+        ~SwapChainBase() override;
+        void DestroyImpl() override;
+    };
+
+    // The base class for implementation-based SwapChains that are deprecated.
+    class OldSwapChainBase : public SwapChainBase {
+      public:
+        OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
+
+        // Dawn API
+        void APIConfigure(wgpu::TextureFormat format,
+                          wgpu::TextureUsage allowedUsage,
+                          uint32_t width,
+                          uint32_t height) override;
+        TextureViewBase* APIGetCurrentTextureView() override;
+        void APIPresent() override;
+
+      protected:
+        ~OldSwapChainBase() override;
+        const DawnSwapChainImplementation& GetImplementation();
+        virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
+        virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
+
+      private:
+        MaybeError ValidateConfigure(wgpu::TextureFormat format,
+                                     wgpu::TextureUsage allowedUsage,
+                                     uint32_t width,
+                                     uint32_t height) const;
+        MaybeError ValidateGetCurrentTextureView() const;
+        MaybeError ValidatePresent() const;
+
+        DawnSwapChainImplementation mImplementation = {};
+        wgpu::TextureFormat mFormat = {};
+        wgpu::TextureUsage mAllowedUsage;
+        uint32_t mWidth = 0;
+        uint32_t mHeight = 0;
+        Ref<TextureBase> mCurrentTexture;
+        Ref<TextureViewBase> mCurrentTextureView;
+    };
+
+    // The base class for surface-based SwapChains that aren't ready yet.
+    class NewSwapChainBase : public SwapChainBase {
+      public:
+        NewSwapChainBase(DeviceBase* device,
+                         Surface* surface,
+                         const SwapChainDescriptor* descriptor);
+
+        // This is called when the swapchain is detached when one of the following happens:
+        //
+        //  - The surface it is attached to is being destroyed.
+        //  - The swapchain is being replaced by another one on the surface.
+        //
+        // Note that the surface has a Ref on the last swapchain that was used on it so the
+        // SwapChain destructor will only be called after one of the things above happens.
+        //
+        // The call for the detaching previous swapchain should be called inside the backend
+        // implementation of SwapChains. This is to allow them to acquire any resources before
+        // calling detach to make a seamless transition from the previous swapchain.
+        //
+        // Likewise the call for the swapchain being destroyed must be done in the backend's
+        // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
+        // destructor.
+        void DetachFromSurface();
+
+        void SetIsAttached();
+
+        // Dawn API
+        void APIConfigure(wgpu::TextureFormat format,
+                          wgpu::TextureUsage allowedUsage,
+                          uint32_t width,
+                          uint32_t height) override;
+        TextureViewBase* APIGetCurrentTextureView() override;
+        void APIPresent() override;
+
+        uint32_t GetWidth() const;
+        uint32_t GetHeight() const;
+        wgpu::TextureFormat GetFormat() const;
+        wgpu::TextureUsage GetUsage() const;
+        wgpu::PresentMode GetPresentMode() const;
+        Surface* GetSurface() const;
+        bool IsAttached() const;
+        wgpu::BackendType GetBackendType() const;
+
+      protected:
+        ~NewSwapChainBase() override;
+
+      private:
+        bool mAttached;
+        uint32_t mWidth;
+        uint32_t mHeight;
+        wgpu::TextureFormat mFormat;
+        wgpu::TextureUsage mUsage;
+        wgpu::PresentMode mPresentMode;
+
+        // This is a weak reference to the surface. If the surface is destroyed it will call
+        // DetachFromSurface and mSurface will be updated to nullptr.
+        Surface* mSurface = nullptr;
+        Ref<TextureViewBase> mCurrentTextureView;
+
+        MaybeError ValidatePresent() const;
+        MaybeError ValidateGetCurrentTextureView() const;
+
+        // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
+        // manner, starting with GetCurrentTextureViewImpl.
+
+        // The returned texture view must match the swapchain descriptor exactly.
+        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureView();
+        virtual ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() = 0;
+        // The call to present must destroy the current view's texture so further access to it are
+        // invalid.
+        virtual MaybeError PresentImpl() = 0;
+
+        // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
+        // called no other virtual method can be called.
+        virtual void DetachFromSurfaceImpl() = 0;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_SWAPCHAIN_H_
diff --git a/src/dawn/native/Texture.cpp b/src/dawn/native/Texture.cpp
new file mode 100644
index 0000000..f4c4fc5
--- /dev/null
+++ b/src/dawn/native/Texture.cpp
@@ -0,0 +1,866 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Texture.h"
+
+#include <algorithm>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+    namespace {
+
+        MaybeError ValidateTextureViewFormatCompatibility(const DeviceBase* device,
+                                                          const Format& format,
+                                                          wgpu::TextureFormat viewFormatEnum) {
+            const Format* viewFormat;
+            DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(viewFormatEnum));
+
+            DAWN_INVALID_IF(!format.ViewCompatibleWith(*viewFormat),
+                            "The texture view format (%s) is not texture view format compatible "
+                            "with the texture format (%s).",
+                            viewFormatEnum, format.format);
+            return {};
+        }
+
+        MaybeError ValidateCanViewTextureAs(const DeviceBase* device,
+                                            const TextureBase* texture,
+                                            const Format& viewFormat,
+                                            wgpu::TextureAspect aspect) {
+            const Format& format = texture->GetFormat();
+
+            if (aspect != wgpu::TextureAspect::All) {
+                wgpu::TextureFormat aspectFormat = format.GetAspectInfo(aspect).format;
+                if (viewFormat.format == aspectFormat) {
+                    return {};
+                } else {
+                    return DAWN_FORMAT_VALIDATION_ERROR(
+                        "The view format (%s) is not compatible with %s of %s (%s).",
+                        viewFormat.format, aspect, format.format, aspectFormat);
+                }
+            }
+
+            if (format.format == viewFormat.format) {
+                return {};
+            }
+
+            const FormatSet& compatibleViewFormats = texture->GetViewFormats();
+            if (compatibleViewFormats[viewFormat]) {
+                // Validation of this list is done on texture creation, so we don't need to
+                // handle the case where a format is in the list, but not compatible.
+                return {};
+            }
+
+            // |viewFormat| is not in the list. Check compatibility to generate an error message
+            // depending on whether it could be compatible, but needs to be explicitly listed,
+            // or it could never be compatible.
+            if (!format.ViewCompatibleWith(viewFormat)) {
+                // The view format isn't compatible with the format at all. Return an error
+                // that indicates this, in addition to reporting that it's missing from the
+                // list.
+                return DAWN_FORMAT_VALIDATION_ERROR(
+                    "The texture view format (%s) is not compatible with the "
+                    "texture format (%s)."
+                    "The formats must be compatible, and the view format "
+                    "must be passed in the list of view formats on texture creation.",
+                    viewFormat.format, format.format);
+            } else {
+                // The view format is compatible, but not in the list.
+                return DAWN_FORMAT_VALIDATION_ERROR(
+                    "%s was not created with the texture view format (%s) "
+                    "in the list of compatible view formats.",
+                    texture, viewFormat.format);
+            }
+            return {};
+        }
+
+        bool IsTextureViewDimensionCompatibleWithTextureDimension(
+            wgpu::TextureViewDimension textureViewDimension,
+            wgpu::TextureDimension textureDimension) {
+            switch (textureViewDimension) {
+                case wgpu::TextureViewDimension::e2D:
+                case wgpu::TextureViewDimension::e2DArray:
+                case wgpu::TextureViewDimension::Cube:
+                case wgpu::TextureViewDimension::CubeArray:
+                    return textureDimension == wgpu::TextureDimension::e2D;
+
+                case wgpu::TextureViewDimension::e3D:
+                    return textureDimension == wgpu::TextureDimension::e3D;
+
+                case wgpu::TextureViewDimension::e1D:
+                    return textureDimension == wgpu::TextureDimension::e1D;
+
+                case wgpu::TextureViewDimension::Undefined:
+                    UNREACHABLE();
+            }
+        }
+
+        bool IsArrayLayerValidForTextureViewDimension(
+            wgpu::TextureViewDimension textureViewDimension,
+            uint32_t textureViewArrayLayer) {
+            switch (textureViewDimension) {
+                case wgpu::TextureViewDimension::e2D:
+                case wgpu::TextureViewDimension::e3D:
+                    return textureViewArrayLayer == 1u;
+                case wgpu::TextureViewDimension::e2DArray:
+                    return true;
+                case wgpu::TextureViewDimension::Cube:
+                    return textureViewArrayLayer == 6u;
+                case wgpu::TextureViewDimension::CubeArray:
+                    return textureViewArrayLayer % 6 == 0;
+                case wgpu::TextureViewDimension::e1D:
+                    return textureViewArrayLayer == 1u;
+
+                case wgpu::TextureViewDimension::Undefined:
+                    UNREACHABLE();
+            }
+        }
+
+        MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
+                                       wgpu::TextureUsage usage,
+                                       const Format* format) {
+            DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+                            "The sample count (%u) of the texture is not supported.",
+                            descriptor->sampleCount);
+
+            if (descriptor->sampleCount > 1) {
+                DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
+                                "The mip level count (%u) of a multisampled texture is not 1.",
+                                descriptor->mipLevelCount);
+
+                // Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
+                // Multisampled 2D array texture is not supported because on Metal it requires the
+                // version of macOS be greater than 10.14.
+                DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                                "The dimension (%s) of a multisampled texture is not 2D.",
+                                descriptor->dimension);
+
+                DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
+                                "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
+                                descriptor->size.depthOrArrayLayers);
+
+                DAWN_INVALID_IF(!format->supportsMultisample,
+                                "The texture format (%s) does not support multisampling.",
+                                format->format);
+
+                // Compressed formats are not renderable. They cannot support multisample.
+                ASSERT(!format->isCompressed);
+
+                DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
+                                "The sample count (%u) of a storage textures is not 1.",
+                                descriptor->sampleCount);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateTextureViewDimensionCompatibility(
+            const TextureBase* texture,
+            const TextureViewDescriptor* descriptor) {
+            DAWN_INVALID_IF(
+                !IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
+                                                          descriptor->arrayLayerCount),
+                "The dimension (%s) of the texture view is not compatible with the layer count "
+                "(%u) of %s.",
+                descriptor->dimension, descriptor->arrayLayerCount, texture);
+
+            DAWN_INVALID_IF(
+                !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
+                                                                      texture->GetDimension()),
+                "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
+                "of %s.",
+                descriptor->dimension, texture->GetDimension(), texture);
+
+            DAWN_INVALID_IF(texture->GetSampleCount() > 1 &&
+                                descriptor->dimension != wgpu::TextureViewDimension::e2D,
+                            "The dimension (%s) of the multisampled texture view is not %s.",
+                            descriptor->dimension, wgpu::TextureViewDimension::e2D);
+
+            switch (descriptor->dimension) {
+                case wgpu::TextureViewDimension::Cube:
+                case wgpu::TextureViewDimension::CubeArray:
+                    DAWN_INVALID_IF(
+                        texture->GetSize().width != texture->GetSize().height,
+                        "A %s texture view is not compatible with %s because the texture's width "
+                        "(%u) and height (%u) are not equal.",
+                        descriptor->dimension, texture, texture->GetSize().width,
+                        texture->GetSize().height);
+                    break;
+
+                case wgpu::TextureViewDimension::e1D:
+                case wgpu::TextureViewDimension::e2D:
+                case wgpu::TextureViewDimension::e2DArray:
+                case wgpu::TextureViewDimension::e3D:
+                    break;
+
+                case wgpu::TextureViewDimension::Undefined:
+                    UNREACHABLE();
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateTextureSize(const DeviceBase* device,
+                                       const TextureDescriptor* descriptor,
+                                       const Format* format) {
+            ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
+                   descriptor->size.depthOrArrayLayers != 0);
+            const CombinedLimits& limits = device->GetLimits();
+            Extent3D maxExtent;
+            switch (descriptor->dimension) {
+                case wgpu::TextureDimension::e1D:
+                    maxExtent = {limits.v1.maxTextureDimension1D, 1, 1};
+                    break;
+                case wgpu::TextureDimension::e2D:
+                    maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
+                                 limits.v1.maxTextureArrayLayers};
+                    break;
+                case wgpu::TextureDimension::e3D:
+                    maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
+                                 limits.v1.maxTextureDimension3D};
+                    break;
+            }
+            DAWN_INVALID_IF(descriptor->size.width > maxExtent.width ||
+                                descriptor->size.height > maxExtent.height ||
+                                descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
+                            "Texture size (%s) exceeded maximum texture size (%s).",
+                            &descriptor->size, &maxExtent);
+
+            switch (descriptor->dimension) {
+                case wgpu::TextureDimension::e1D:
+                    DAWN_INVALID_IF(
+                        descriptor->mipLevelCount != 1,
+                        "Texture mip level count (%u) is more than 1 when its dimension is %s.",
+                        descriptor->mipLevelCount, wgpu::TextureDimension::e1D);
+                    break;
+                case wgpu::TextureDimension::e2D: {
+                    uint32_t maxMippedDimension =
+                        std::max(descriptor->size.width, descriptor->size.height);
+                    DAWN_INVALID_IF(
+                        Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+                        "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+                        descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+                    break;
+                }
+                case wgpu::TextureDimension::e3D: {
+                    uint32_t maxMippedDimension = std::max(
+                        descriptor->size.width,
+                        std::max(descriptor->size.height, descriptor->size.depthOrArrayLayers));
+                    DAWN_INVALID_IF(
+                        Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+                        "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+                        descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+                    break;
+                }
+            }
+
+            if (format->isCompressed) {
+                const TexelBlockInfo& blockInfo =
+                    format->GetAspectInfo(wgpu::TextureAspect::All).block;
+                DAWN_INVALID_IF(
+                    descriptor->size.width % blockInfo.width != 0 ||
+                        descriptor->size.height % blockInfo.height != 0,
+                    "The size (%s) of the texture is not a multiple of the block width (%u) and "
+                    "height (%u) of the texture format (%s).",
+                    &descriptor->size, blockInfo.width, blockInfo.height, format->format);
+            }
+
+            return {};
+        }
+
+        MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor,
+                                        wgpu::TextureUsage usage,
+                                        const Format* format) {
+            DAWN_TRY(dawn::native::ValidateTextureUsage(usage));
+
+            DAWN_INVALID_IF(usage == wgpu::TextureUsage::None, "The texture usage must not be 0.");
+
+            constexpr wgpu::TextureUsage kValidCompressedUsages =
+                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+                wgpu::TextureUsage::CopyDst;
+            DAWN_INVALID_IF(
+                format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
+                "The texture usage (%s) is incompatible with the compressed texture format (%s).",
+                usage, format->format);
+
+            DAWN_INVALID_IF(
+                !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
+                "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
+                "format (%s).",
+                usage, wgpu::TextureUsage::RenderAttachment, format->format);
+
+            DAWN_INVALID_IF(
+                !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
+                "The texture usage (%s) includes %s, which is incompatible with the format (%s).",
+                usage, wgpu::TextureUsage::StorageBinding, format->format);
+
+            // Only allows simple readonly texture usages.
+            constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
+                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
+            DAWN_INVALID_IF(
+                format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
+                "The texture usage (%s) is incompatible with the multi-planar format (%s).", usage,
+                format->format);
+
+            return {};
+        }
+
+    }  // anonymous namespace
+
+    MaybeError ValidateTextureDescriptor(const DeviceBase* device,
+                                         const TextureDescriptor* descriptor) {
+        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+                                     wgpu::SType::DawnTextureInternalUsageDescriptor));
+
+        const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+        DAWN_INVALID_IF(
+            internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
+            "The dawn-internal-usages feature is not enabled");
+
+        const Format* format;
+        DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+
+        for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
+            DAWN_TRY_CONTEXT(
+                ValidateTextureViewFormatCompatibility(device, *format, descriptor->viewFormats[i]),
+                "validating viewFormats[%u]", i);
+        }
+
+        wgpu::TextureUsage usage = descriptor->usage;
+        if (internalUsageDesc != nullptr) {
+            usage |= internalUsageDesc->internalUsage;
+        }
+
+        DAWN_TRY(ValidateTextureUsage(descriptor, usage, format));
+        DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
+        DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
+
+        DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
+                            descriptor->size.depthOrArrayLayers == 0 ||
+                            descriptor->mipLevelCount == 0,
+                        "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
+                        descriptor->mipLevelCount);
+
+        DAWN_INVALID_IF(
+            descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
+            "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
+            descriptor->dimension, format->format);
+
+        // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
+        // doesn't support depth/stencil formats on 3D textures.
+        DAWN_INVALID_IF(
+            descriptor->dimension != wgpu::TextureDimension::e2D &&
+                (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
+            "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
+            descriptor->dimension, format->format);
+
+        DAWN_TRY(ValidateTextureSize(device, descriptor, format));
+
+        // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
+        // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
+        DAWN_INVALID_IF(
+            device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
+                descriptor->mipLevelCount > 1 &&
+                device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
+            "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
+            "disabled on Metal.");
+
+        DAWN_INVALID_IF(
+            device->IsToggleEnabled(Toggle::DisableR8RG8Mipmaps) && descriptor->mipLevelCount > 1 &&
+                (descriptor->format == wgpu::TextureFormat::R8Unorm ||
+                 descriptor->format == wgpu::TextureFormat::RG8Unorm),
+            "https://crbug.com/dawn/1071: r8unorm and rg8unorm textures with more than one mip "
+            "level are disabled on Metal.");
+
+        return {};
+    }
+
+    MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+                                             const TextureBase* texture,
+                                             const TextureViewDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+        // Parent texture should have been already validated.
+        ASSERT(texture);
+        ASSERT(!texture->IsError());
+
+        DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
+        DAWN_TRY(ValidateTextureFormat(descriptor->format));
+        DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
+
+        const Format& format = texture->GetFormat();
+        const Format* viewFormat;
+        DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(descriptor->format));
+
+        DAWN_INVALID_IF(
+            SelectFormatAspects(format, descriptor->aspect) == Aspect::None,
+            "Texture format (%s) does not have the texture view's selected aspect (%s).",
+            format.format, descriptor->aspect);
+
+        DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
+                        "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
+                        descriptor->arrayLayerCount, descriptor->mipLevelCount);
+
+        DAWN_INVALID_IF(
+            uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
+                uint64_t(texture->GetArrayLayers()),
+            "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
+            "texture's array layer count (%u).",
+            descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
+
+        DAWN_INVALID_IF(
+            uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
+                uint64_t(texture->GetNumMipLevels()),
+            "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
+            "texture's mip level count (%u).",
+            descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
+
+        DAWN_TRY(ValidateCanViewTextureAs(device, texture, *viewFormat, descriptor->aspect));
+        DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
+
+        return {};
+    }
+
+    TextureViewDescriptor GetTextureViewDescriptorWithDefaults(
+        const TextureBase* texture,
+        const TextureViewDescriptor* descriptor) {
+        ASSERT(texture);
+
+        TextureViewDescriptor desc = {};
+        if (descriptor) {
+            desc = *descriptor;
+        }
+
+        // The default value for the view dimension depends on the texture's dimension with a
+        // special case for 2DArray being chosen automatically if arrayLayerCount is unspecified.
+        if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
+            switch (texture->GetDimension()) {
+                case wgpu::TextureDimension::e1D:
+                    desc.dimension = wgpu::TextureViewDimension::e1D;
+                    break;
+
+                case wgpu::TextureDimension::e2D:
+                    desc.dimension = wgpu::TextureViewDimension::e2D;
+                    break;
+
+                case wgpu::TextureDimension::e3D:
+                    desc.dimension = wgpu::TextureViewDimension::e3D;
+                    break;
+            }
+        }
+
+        if (desc.format == wgpu::TextureFormat::Undefined) {
+            const Format& format = texture->GetFormat();
+            Aspect aspects = SelectFormatAspects(format, desc.aspect);
+            if (HasOneBit(aspects)) {
+                desc.format = format.GetAspectInfo(aspects).format;
+            } else {
+                desc.format = format.format;
+            }
+        }
+        if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
+            switch (desc.dimension) {
+                case wgpu::TextureViewDimension::e1D:
+                case wgpu::TextureViewDimension::e2D:
+                case wgpu::TextureViewDimension::e3D:
+                    desc.arrayLayerCount = 1;
+                    break;
+                case wgpu::TextureViewDimension::Cube:
+                    desc.arrayLayerCount = 6;
+                    break;
+                case wgpu::TextureViewDimension::e2DArray:
+                case wgpu::TextureViewDimension::CubeArray:
+                    desc.arrayLayerCount = texture->GetArrayLayers() - desc.baseArrayLayer;
+                    break;
+                default:
+                    // We don't put UNREACHABLE() here because we validate enums only after this
+                    // function sets default values. Otherwise, the UNREACHABLE() will be hit.
+                    break;
+            }
+        }
+
+        if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
+            desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
+        }
+        return desc;
+    }
+
+    // WebGPU only supports sample counts of 1 and 4. We could expand to more based on
+    // platform support, but it would probably be a feature.
+    bool IsValidSampleCount(uint32_t sampleCount) {
+        switch (sampleCount) {
+            case 1:
+            case 4:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    // TextureBase
+
+    TextureBase::TextureBase(DeviceBase* device,
+                             const TextureDescriptor* descriptor,
+                             TextureState state)
+        : ApiObjectBase(device, descriptor->label),
+          mDimension(descriptor->dimension),
+          mFormat(device->GetValidInternalFormat(descriptor->format)),
+          mSize(descriptor->size),
+          mMipLevelCount(descriptor->mipLevelCount),
+          mSampleCount(descriptor->sampleCount),
+          mUsage(descriptor->usage),
+          mInternalUsage(mUsage),
+          mState(state) {
+        uint32_t subresourceCount =
+            mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
+        mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
+
+        for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
+            if (descriptor->viewFormats[i] == descriptor->format) {
+                // Skip our own format, so the backends don't allocate the texture for
+                // reinterpretation if it's not needed.
+                continue;
+            }
+            mViewFormats[device->GetValidInternalFormat(descriptor->viewFormats[i])] = true;
+        }
+
+        const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+        FindInChain(descriptor->nextInChain, &internalUsageDesc);
+        if (internalUsageDesc != nullptr) {
+            mInternalUsage |= internalUsageDesc->internalUsage;
+        }
+        TrackInDevice();
+    }
+
+    static Format kUnusedFormat;
+
+    TextureBase::TextureBase(DeviceBase* device, TextureState state)
+        : ApiObjectBase(device, kLabelNotImplemented), mFormat(kUnusedFormat), mState(state) {
+        TrackInDevice();
+    }
+
+    TextureBase::TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
+    }
+
+    void TextureBase::DestroyImpl() {
+        mState = TextureState::Destroyed;
+    }
+
+    // static
+    TextureBase* TextureBase::MakeError(DeviceBase* device) {
+        return new TextureBase(device, ObjectBase::kError);
+    }
+
+    ObjectType TextureBase::GetType() const {
+        return ObjectType::Texture;
+    }
+
+    wgpu::TextureDimension TextureBase::GetDimension() const {
+        ASSERT(!IsError());
+        return mDimension;
+    }
+
+    const Format& TextureBase::GetFormat() const {
+        ASSERT(!IsError());
+        return mFormat;
+    }
+    const FormatSet& TextureBase::GetViewFormats() const {
+        ASSERT(!IsError());
+        return mViewFormats;
+    }
+    const Extent3D& TextureBase::GetSize() const {
+        ASSERT(!IsError());
+        return mSize;
+    }
+    uint32_t TextureBase::GetWidth() const {
+        ASSERT(!IsError());
+        return mSize.width;
+    }
+    uint32_t TextureBase::GetHeight() const {
+        ASSERT(!IsError());
+        return mSize.height;
+    }
+    uint32_t TextureBase::GetDepth() const {
+        ASSERT(!IsError());
+        ASSERT(mDimension == wgpu::TextureDimension::e3D);
+        return mSize.depthOrArrayLayers;
+    }
+    uint32_t TextureBase::GetArrayLayers() const {
+        ASSERT(!IsError());
+        if (mDimension == wgpu::TextureDimension::e3D) {
+            return 1;
+        }
+        return mSize.depthOrArrayLayers;
+    }
+    uint32_t TextureBase::GetNumMipLevels() const {
+        ASSERT(!IsError());
+        return mMipLevelCount;
+    }
+    SubresourceRange TextureBase::GetAllSubresources() const {
+        ASSERT(!IsError());
+        return {mFormat.aspects, {0, GetArrayLayers()}, {0, mMipLevelCount}};
+    }
+    uint32_t TextureBase::GetSampleCount() const {
+        ASSERT(!IsError());
+        return mSampleCount;
+    }
+    uint32_t TextureBase::GetSubresourceCount() const {
+        ASSERT(!IsError());
+        return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
+    }
+    wgpu::TextureUsage TextureBase::GetUsage() const {
+        ASSERT(!IsError());
+        return mUsage;
+    }
+    wgpu::TextureUsage TextureBase::GetInternalUsage() const {
+        ASSERT(!IsError());
+        return mInternalUsage;
+    }
+
+    TextureBase::TextureState TextureBase::GetTextureState() const {
+        ASSERT(!IsError());
+        return mState;
+    }
+
+    uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
+                                              uint32_t arraySlice,
+                                              Aspect aspect) const {
+        ASSERT(HasOneBit(aspect));
+        return mipLevel +
+               GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
+    }
+
+    bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
+        ASSERT(!IsError());
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t arrayLayer = range.baseArrayLayer;
+                 arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+                for (uint32_t mipLevel = range.baseMipLevel;
+                     mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+                    uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+                    ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+                    if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
+                        return false;
+                    }
+                }
+            }
+        }
+        return true;
+    }
+
+    void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
+                                                         const SubresourceRange& range) {
+        ASSERT(!IsError());
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t arrayLayer = range.baseArrayLayer;
+                 arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+                for (uint32_t mipLevel = range.baseMipLevel;
+                     mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+                    uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+                    ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+                    mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
+                }
+            }
+        }
+    }
+
+    MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
+        ASSERT(!IsError());
+        DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
+                        this);
+        return {};
+    }
+
+    bool TextureBase::IsMultisampledTexture() const {
+        ASSERT(!IsError());
+        return mSampleCount > 1;
+    }
+
+    Extent3D TextureBase::GetMipLevelVirtualSize(uint32_t level) const {
+        Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
+        if (mDimension == wgpu::TextureDimension::e1D) {
+            return extent;
+        }
+
+        extent.height = std::max(mSize.height >> level, 1u);
+        if (mDimension == wgpu::TextureDimension::e2D) {
+            return extent;
+        }
+
+        extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
+        return extent;
+    }
+
+    Extent3D TextureBase::GetMipLevelPhysicalSize(uint32_t level) const {
+        Extent3D extent = GetMipLevelVirtualSize(level);
+
+        // Compressed Textures will have paddings if their width or height is not a multiple of
+        // 4 at non-zero mipmap levels.
+        if (mFormat.isCompressed && level != 0) {
+            // If |level| is non-zero, then each dimension of |extent| is at most half of
+            // the max texture dimension. Computations here which add the block width/height
+            // to the extent cannot overflow.
+            const TexelBlockInfo& blockInfo = mFormat.GetAspectInfo(wgpu::TextureAspect::All).block;
+            extent.width = (extent.width + blockInfo.width - 1) / blockInfo.width * blockInfo.width;
+            extent.height =
+                (extent.height + blockInfo.height - 1) / blockInfo.height * blockInfo.height;
+        }
+
+        return extent;
+    }
+
+    Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
+                                                     const Origin3D& origin,
+                                                     const Extent3D& extent) const {
+        const Extent3D virtualSizeAtLevel = GetMipLevelVirtualSize(level);
+        ASSERT(origin.x <= virtualSizeAtLevel.width);
+        ASSERT(origin.y <= virtualSizeAtLevel.height);
+        uint32_t clampedCopyExtentWidth = (extent.width > virtualSizeAtLevel.width - origin.x)
+                                              ? (virtualSizeAtLevel.width - origin.x)
+                                              : extent.width;
+        uint32_t clampedCopyExtentHeight = (extent.height > virtualSizeAtLevel.height - origin.y)
+                                               ? (virtualSizeAtLevel.height - origin.y)
+                                               : extent.height;
+        return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
+    }
+
+    ResultOrError<Ref<TextureViewBase>> TextureBase::CreateView(
+        const TextureViewDescriptor* descriptor) {
+        return GetDevice()->CreateTextureView(this, descriptor);
+    }
+
+    TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
+        DeviceBase* device = GetDevice();
+
+        Ref<TextureViewBase> result;
+        if (device->ConsumedError(CreateView(descriptor), &result, "calling %s.CreateView(%s).",
+                                  this, descriptor)) {
+            return TextureViewBase::MakeError(device);
+        }
+        return result.Detach();
+    }
+
+    void TextureBase::APIDestroy() {
+        if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
+            return;
+        }
+        ASSERT(!IsError());
+        Destroy();
+    }
+
+    MaybeError TextureBase::ValidateDestroy() const {
+        DAWN_TRY(GetDevice()->ValidateObject(this));
+        return {};
+    }
+
+    // TextureViewBase
+
+    TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
+        : ApiObjectBase(texture->GetDevice(), descriptor->label),
+          mTexture(texture),
+          mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
+          mDimension(descriptor->dimension),
+          mRange({ConvertViewAspect(mFormat, descriptor->aspect),
+                  {descriptor->baseArrayLayer, descriptor->arrayLayerCount},
+                  {descriptor->baseMipLevel, descriptor->mipLevelCount}}) {
+        TrackInDevice();
+    }
+
+    TextureViewBase::TextureViewBase(TextureBase* texture)
+        : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
+          mTexture(texture),
+          mFormat(kUnusedFormat) {
+        TrackInDevice();
+    }
+
+    TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+        : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
+    }
+
+    void TextureViewBase::DestroyImpl() {
+    }
+
+    // static
+    TextureViewBase* TextureViewBase::MakeError(DeviceBase* device) {
+        return new TextureViewBase(device, ObjectBase::kError);
+    }
+
+    ObjectType TextureViewBase::GetType() const {
+        return ObjectType::TextureView;
+    }
+
+    const TextureBase* TextureViewBase::GetTexture() const {
+        ASSERT(!IsError());
+        return mTexture.Get();
+    }
+
+    TextureBase* TextureViewBase::GetTexture() {
+        ASSERT(!IsError());
+        return mTexture.Get();
+    }
+
+    Aspect TextureViewBase::GetAspects() const {
+        ASSERT(!IsError());
+        return mRange.aspects;
+    }
+
+    const Format& TextureViewBase::GetFormat() const {
+        ASSERT(!IsError());
+        return mFormat;
+    }
+
+    wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
+        ASSERT(!IsError());
+        return mDimension;
+    }
+
+    uint32_t TextureViewBase::GetBaseMipLevel() const {
+        ASSERT(!IsError());
+        return mRange.baseMipLevel;
+    }
+
+    uint32_t TextureViewBase::GetLevelCount() const {
+        ASSERT(!IsError());
+        return mRange.levelCount;
+    }
+
+    uint32_t TextureViewBase::GetBaseArrayLayer() const {
+        ASSERT(!IsError());
+        return mRange.baseArrayLayer;
+    }
+
+    uint32_t TextureViewBase::GetLayerCount() const {
+        ASSERT(!IsError());
+        return mRange.layerCount;
+    }
+
+    const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
+        ASSERT(!IsError());
+        return mRange;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Texture.h b/src/dawn/native/Texture.h
new file mode 100644
index 0000000..4024c82
--- /dev/null
+++ b/src/dawn/native/Texture.h
@@ -0,0 +1,163 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TEXTURE_H_
+#define DAWNNATIVE_TEXTURE_H_
+
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/Subresource.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <vector>
+
+namespace dawn::native {
+
+    MaybeError ValidateTextureDescriptor(const DeviceBase* device,
+                                         const TextureDescriptor* descriptor);
+    MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+                                             const TextureBase* texture,
+                                             const TextureViewDescriptor* descriptor);
+    TextureViewDescriptor GetTextureViewDescriptorWithDefaults(
+        const TextureBase* texture,
+        const TextureViewDescriptor* descriptor);
+
+    bool IsValidSampleCount(uint32_t sampleCount);
+
+    static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
+        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding |
+        kReadOnlyRenderAttachment;
+
+    class TextureBase : public ApiObjectBase {
+      public:
+        enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
+        enum class ClearValue { Zero, NonZero };
+        TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
+
+        static TextureBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        wgpu::TextureDimension GetDimension() const;
+        const Format& GetFormat() const;
+        const FormatSet& GetViewFormats() const;
+        const Extent3D& GetSize() const;
+        uint32_t GetWidth() const;
+        uint32_t GetHeight() const;
+        uint32_t GetDepth() const;
+        uint32_t GetArrayLayers() const;
+        uint32_t GetNumMipLevels() const;
+        SubresourceRange GetAllSubresources() const;
+        uint32_t GetSampleCount() const;
+        uint32_t GetSubresourceCount() const;
+
+        // |GetUsage| returns the usage with which the texture was created using the base WebGPU
+        // API. The dawn-internal-usages extension may add additional usages. |GetInternalUsage|
+        // returns the union of base usage and the usages added by the extension.
+        wgpu::TextureUsage GetUsage() const;
+        wgpu::TextureUsage GetInternalUsage() const;
+
+        TextureState GetTextureState() const;
+        uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
+        bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
+        void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
+
+        MaybeError ValidateCanUseInSubmitNow() const;
+
+        bool IsMultisampledTexture() const;
+
+        // For a texture with non-block-compressed texture format, its physical size is always equal
+        // to its virtual size. For a texture with block compressed texture format, the physical
+        // size is the one with paddings if necessary, which is always a multiple of the block size
+        // and used in texture copying. The virtual size is the one without paddings, which is not
+        // required to be a multiple of the block size and used in texture sampling.
+        Extent3D GetMipLevelPhysicalSize(uint32_t level) const;
+        Extent3D GetMipLevelVirtualSize(uint32_t level) const;
+        Extent3D ClampToMipLevelVirtualSize(uint32_t level,
+                                            const Origin3D& origin,
+                                            const Extent3D& extent) const;
+
+        ResultOrError<Ref<TextureViewBase>> CreateView(
+            const TextureViewDescriptor* descriptor = nullptr);
+
+        // Dawn API
+        TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
+        void APIDestroy();
+
+      protected:
+        // Constructor used only for mocking and testing.
+        TextureBase(DeviceBase* device, TextureState state);
+        void DestroyImpl() override;
+
+      private:
+        TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        MaybeError ValidateDestroy() const;
+        wgpu::TextureDimension mDimension;
+        const Format& mFormat;
+        FormatSet mViewFormats;
+        Extent3D mSize;
+        uint32_t mMipLevelCount;
+        uint32_t mSampleCount;
+        wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
+        wgpu::TextureUsage mInternalUsage = wgpu::TextureUsage::None;
+        TextureState mState;
+
+        // TODO(crbug.com/dawn/845): Use a more optimized data structure to save space
+        std::vector<bool> mIsSubresourceContentInitializedAtIndex;
+    };
+
+    class TextureViewBase : public ApiObjectBase {
+      public:
+        TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+        static TextureViewBase* MakeError(DeviceBase* device);
+
+        ObjectType GetType() const override;
+
+        const TextureBase* GetTexture() const;
+        TextureBase* GetTexture();
+
+        Aspect GetAspects() const;
+        const Format& GetFormat() const;
+        wgpu::TextureViewDimension GetDimension() const;
+        uint32_t GetBaseMipLevel() const;
+        uint32_t GetLevelCount() const;
+        uint32_t GetBaseArrayLayer() const;
+        uint32_t GetLayerCount() const;
+        const SubresourceRange& GetSubresourceRange() const;
+
+      protected:
+        // Constructor used only for mocking and testing.
+        TextureViewBase(TextureBase* texture);
+        void DestroyImpl() override;
+
+      private:
+        TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+        Ref<TextureBase> mTexture;
+
+        const Format& mFormat;
+        wgpu::TextureViewDimension mDimension;
+        SubresourceRange mRange;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_TEXTURE_H_
diff --git a/src/dawn/native/TintUtils.cpp b/src/dawn/native/TintUtils.cpp
new file mode 100644
index 0000000..d84c982
--- /dev/null
+++ b/src/dawn/native/TintUtils.cpp
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/Device.h"
+
+#include <tint/tint.h>
+
+namespace dawn::native {
+
+    namespace {
+
+        thread_local DeviceBase* tlDevice = nullptr;
+
+        void TintICEReporter(const tint::diag::List& diagnostics) {
+            if (tlDevice) {
+                tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
+            }
+        }
+
+        bool InitializeTintErrorReporter() {
+            tint::SetInternalCompilerErrorReporter(&TintICEReporter);
+            return true;
+        }
+
+    }  // namespace
+
+    ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
+        // Call tint::SetInternalCompilerErrorReporter() the first time
+        // this constructor is called. Static initialization is
+        // guaranteed to be thread-safe, and only occur once.
+        static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
+        (void)init_once_tint_error_reporter;
+
+        // Shouldn't have overlapping instances of this handler.
+        ASSERT(tlDevice == nullptr);
+        tlDevice = device;
+    }
+
+    ScopedTintICEHandler::~ScopedTintICEHandler() {
+        tlDevice = nullptr;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/TintUtils.h b/src/dawn/native/TintUtils.h
new file mode 100644
index 0000000..2dcb8f3
--- /dev/null
+++ b/src/dawn/native/TintUtils.h
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TINTUTILS_H_
+#define DAWNNATIVE_TINTUTILS_H_
+
+#include "dawn/common/NonCopyable.h"
+
+namespace dawn::native {
+
+    class DeviceBase;
+
+    // Indicates that for the lifetime of this object tint internal compiler errors should be
+    // reported to the given device.
+    class ScopedTintICEHandler : public NonCopyable {
+      public:
+        ScopedTintICEHandler(DeviceBase* device);
+        ~ScopedTintICEHandler();
+
+      private:
+        ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_TEXTURE_H_
diff --git a/src/dawn/native/ToBackend.h b/src/dawn/native/ToBackend.h
new file mode 100644
index 0000000..a2a69cb
--- /dev/null
+++ b/src/dawn/native/ToBackend.h
@@ -0,0 +1,155 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TOBACKEND_H_
+#define DAWNNATIVE_TOBACKEND_H_
+
+#include "dawn/native/Forward.h"
+
+namespace dawn::native {
+
+    // ToBackendTraits implements the mapping from base type to member type of BackendTraits
+    template <typename T, typename BackendTraits>
+    struct ToBackendTraits;
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<AdapterBase, BackendTraits> {
+        using BackendType = typename BackendTraits::AdapterType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<BindGroupBase, BackendTraits> {
+        using BackendType = typename BackendTraits::BindGroupType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
+        using BackendType = typename BackendTraits::BindGroupLayoutType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<BufferBase, BackendTraits> {
+        using BackendType = typename BackendTraits::BufferType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<CommandBufferBase, BackendTraits> {
+        using BackendType = typename BackendTraits::CommandBufferType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<ComputePipelineBase, BackendTraits> {
+        using BackendType = typename BackendTraits::ComputePipelineType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<DeviceBase, BackendTraits> {
+        using BackendType = typename BackendTraits::DeviceType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
+        using BackendType = typename BackendTraits::PipelineLayoutType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<QuerySetBase, BackendTraits> {
+        using BackendType = typename BackendTraits::QuerySetType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<QueueBase, BackendTraits> {
+        using BackendType = typename BackendTraits::QueueType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<RenderPipelineBase, BackendTraits> {
+        using BackendType = typename BackendTraits::RenderPipelineType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<ResourceHeapBase, BackendTraits> {
+        using BackendType = typename BackendTraits::ResourceHeapType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<SamplerBase, BackendTraits> {
+        using BackendType = typename BackendTraits::SamplerType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
+        using BackendType = typename BackendTraits::ShaderModuleType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<StagingBufferBase, BackendTraits> {
+        using BackendType = typename BackendTraits::StagingBufferType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<TextureBase, BackendTraits> {
+        using BackendType = typename BackendTraits::TextureType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<SwapChainBase, BackendTraits> {
+        using BackendType = typename BackendTraits::SwapChainType;
+    };
+
+    template <typename BackendTraits>
+    struct ToBackendTraits<TextureViewBase, BackendTraits> {
+        using BackendType = typename BackendTraits::TextureViewType;
+    };
+
+    // ToBackendBase implements conversion to the given BackendTraits
+    // To use it in a backend, use the following:
+    //   template<typename T>
+    //   auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
+    //       return ToBackendBase<MyBackendTraits>(common);
+    //   }
+
+    template <typename BackendTraits, typename T>
+    Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
+        return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(
+            common);
+    }
+
+    template <typename BackendTraits, typename T>
+    Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
+        return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(
+            common);
+    }
+
+    template <typename BackendTraits, typename T>
+    const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
+        const Ref<T>& common) {
+        return reinterpret_cast<
+            const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
+    }
+
+    template <typename BackendTraits, typename T>
+    typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
+        return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
+    }
+
+    template <typename BackendTraits, typename T>
+    const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
+        return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(
+            common);
+    }
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_TOBACKEND_H_
diff --git a/src/dawn/native/Toggles.cpp b/src/dawn/native/Toggles.cpp
new file mode 100644
index 0000000..9b3a655
--- /dev/null
+++ b/src/dawn/native/Toggles.cpp
@@ -0,0 +1,346 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Toggles.h"
+
+namespace dawn::native {
+    namespace {
+
+        struct ToggleEnumAndInfo {
+            Toggle toggle;
+            ToggleInfo info;
+        };
+
+        using ToggleEnumAndInfoList =
+            std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
+
+        static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
+            {Toggle::EmulateStoreAndMSAAResolve,
+             {"emulate_store_and_msaa_resolve",
+              "Emulate storing into multisampled color attachments and doing MSAA resolve "
+              "simultaneously. This workaround is enabled by default on the Metal drivers that do "
+              "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
+              "those platforms, we should do MSAA resolve in another render pass after ending the "
+              "previous one.",
+              "https://crbug.com/dawn/56"}},
+            {Toggle::NonzeroClearResourcesOnCreationForTesting,
+             {"nonzero_clear_resources_on_creation_for_testing",
+              "Clears texture to full 1 bits as soon as they are created, but doesn't update "
+              "the tracking state of the texture. This way we can test the logic of clearing "
+              "textures that use recycled memory.",
+              "https://crbug.com/dawn/145"}},
+            {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
+             {"always_resolve_into_zero_level_and_layer",
+              "When the resolve target is a texture view that is created on the non-zero level or "
+              "layer of a texture, we first resolve into a temporarily 2D texture with only one "
+              "mipmap level and one array layer, and copy the result of MSAA resolve into the "
+              "true resolve target. This workaround is enabled by default on the Metal drivers "
+              "that have bugs when setting non-zero resolveLevel or resolveSlice.",
+              "https://crbug.com/dawn/56"}},
+            {Toggle::LazyClearResourceOnFirstUse,
+             {"lazy_clear_resource_on_first_use",
+              "Clears resource to zero on first usage. This initializes the resource "
+              "so that no dirty bits from recycled memory is present in the new resource.",
+              "https://crbug.com/dawn/145"}},
+            {Toggle::TurnOffVsync,
+             {"turn_off_vsync",
+              "Turn off vsync when rendering. In order to do performance test or run perf tests, "
+              "turn off vsync so that the fps can exeed 60.",
+              "https://crbug.com/dawn/237"}},
+            {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
+             {"use_temporary_buffer_in_texture_to_texture_copy",
+              "Split texture-to-texture copy into two copies: copy from source texture into a "
+              "temporary buffer, and copy from the temporary buffer into the destination texture "
+              "when copying between compressed textures that don't have block-aligned sizes. This "
+              "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
+              "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
+              "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
+              "https://crbug.com/dawn/42"}},
+            {Toggle::UseD3D12ResourceHeapTier2,
+             {"use_d3d12_resource_heap_tier2",
+              "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
+              "texture and buffers in the same heap. This allows better heap re-use and reduces "
+              "fragmentation.",
+              "https://crbug.com/dawn/27"}},
+            {Toggle::UseD3D12RenderPass,
+             {"use_d3d12_render_pass",
+              "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
+              "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
+              "will emulate a render pass.",
+              "https://crbug.com/dawn/36"}},
+            {Toggle::UseD3D12ResidencyManagement,
+             {"use_d3d12_residency_management",
+              "Enable residency management. This allows page-in and page-out of resource heaps in "
+              "GPU memory. This component improves overcommitted performance by keeping the most "
+              "recently used resources local to the GPU. Turning this component off can cause "
+              "allocation failures when application memory exceeds physical device memory.",
+              "https://crbug.com/dawn/193"}},
+            {Toggle::DisableResourceSuballocation,
+             {"disable_resource_suballocation",
+              "Force the backends to not perform resource suballocation. This may expose "
+              "allocation "
+              "patterns which would otherwise only occur with large or specific types of "
+              "resources.",
+              "https://crbug.com/1313172"}},
+            {Toggle::SkipValidation,
+             {"skip_validation", "Skip expensive validation of Dawn commands.",
+              "https://crbug.com/dawn/271"}},
+            {Toggle::VulkanUseD32S8,
+             {"vulkan_use_d32s8",
+              "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
+              "backend will use D32S8 (toggle to on) but setting the toggle to off will make it "
+              "use the D24S8 format when possible.",
+              "https://crbug.com/dawn/286"}},
+            {Toggle::VulkanUseS8,
+             {"vulkan_use_s8",
+              "Vulkan has a pure stencil8 format but it is not universally available. When this "
+              "toggle is on, the backend will use S8 for the stencil8 format, otherwise it will "
+              "fallback to D32S8 or D24S8.",
+              "https://crbug.com/dawn/666"}},
+            {Toggle::MetalDisableSamplerCompare,
+             {"metal_disable_sampler_compare",
+              "Disables the use of sampler compare on Metal. This is unsupported before A9 "
+              "processors.",
+              "https://crbug.com/dawn/342"}},
+            {Toggle::MetalUseSharedModeForCounterSampleBuffer,
+             {"metal_use_shared_mode_for_counter_sample_buffer",
+              "The query set on Metal need to create MTLCounterSampleBuffer which storage mode "
+              "must be either MTLStorageModeShared or MTLStorageModePrivate. But the private mode "
+              "does not work properly on Intel platforms. The workaround is use shared mode "
+              "instead.",
+              "https://crbug.com/dawn/434"}},
+            {Toggle::DisableBaseVertex,
+             {"disable_base_vertex",
+              "Disables the use of non-zero base vertex which is unsupported on some platforms.",
+              "https://crbug.com/dawn/343"}},
+            {Toggle::DisableBaseInstance,
+             {"disable_base_instance",
+              "Disables the use of non-zero base instance which is unsupported on some "
+              "platforms.",
+              "https://crbug.com/dawn/343"}},
+            {Toggle::DisableIndexedDrawBuffers,
+             {"disable_indexed_draw_buffers",
+              "Disables the use of indexed draw buffer state which is unsupported on some "
+              "platforms.",
+              "https://crbug.com/dawn/582"}},
+            {Toggle::DisableSnormRead,
+             {"disable_snorm_read",
+              "Disables reading from Snorm textures which is unsupported on some platforms.",
+              "https://crbug.com/dawn/667"}},
+            {Toggle::DisableDepthStencilRead,
+             {"disable_depth_stencil_read",
+              "Disables reading from depth/stencil textures which is unsupported on some "
+              "platforms.",
+              "https://crbug.com/dawn/667"}},
+            {Toggle::DisableSampleVariables,
+             {"disable_sample_variables",
+              "Disables gl_SampleMask and related functionality which is unsupported on some "
+              "platforms.",
+              "https://crbug.com/dawn/673"}},
+            {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
+             {"use_d3d12_small_shader_visible_heap",
+              "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
+              "default. This setting is used to test bindgroup encoding.",
+              "https://crbug.com/dawn/155"}},
+            {Toggle::UseDXC,
+             {"use_dxc",
+              "Use DXC instead of FXC for compiling HLSL when both dxcompiler.dll and dxil.dll "
+              "is available.",
+              "https://crbug.com/dawn/402"}},
+            {Toggle::DisableRobustness,
+             {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
+            {Toggle::MetalEnableVertexPulling,
+             {"metal_enable_vertex_pulling",
+              "Uses vertex pulling to protect out-of-bounds reads on Metal",
+              "https://crbug.com/dawn/480"}},
+            {Toggle::DisallowUnsafeAPIs,
+             {"disallow_unsafe_apis",
+              "Produces validation errors on API entry points or parameter combinations that "
+              "aren't considered secure yet.",
+              "http://crbug.com/1138528"}},
+            {Toggle::FlushBeforeClientWaitSync,
+             {"flush_before_client_wait_sync",
+              "Call glFlush before glClientWaitSync to work around bugs in the latter",
+              "https://crbug.com/dawn/633"}},
+            {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+             {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_"
+              "level",
+              "Split texture-to-texture copy into two copies: copy from source texture into a "
+              "temporary buffer, and copy from the temporary buffer into the destination texture "
+              "under specific situations. This workaround is by default enabled on some Intel "
+              "GPUs which have a driver bug in the execution of CopyTextureRegion() when we copy "
+              "with the formats whose texel block sizes are less than 4 bytes from a greater mip "
+              "level to a smaller mip level on D3D12 backends.",
+              "https://crbug.com/1161355"}},
+            {Toggle::EmitHLSLDebugSymbols,
+             {"emit_hlsl_debug_symbols",
+              "Sets the D3DCOMPILE_SKIP_OPTIMIZATION and D3DCOMPILE_DEBUG compilation flags when "
+              "compiling HLSL code. Enables better shader debugging with external graphics "
+              "debugging tools.",
+              "https://crbug.com/dawn/776"}},
+            {Toggle::DisallowSpirv,
+             {"disallow_spirv",
+              "Disallow usage of SPIR-V completely so that only WGSL is used for shader modules. "
+              "This is useful to prevent a Chromium renderer process from successfully sending "
+              "SPIR-V code to be compiled in the GPU process.",
+              "https://crbug.com/1214923"}},
+            {Toggle::DumpShaders,
+             {"dump_shaders",
+              "Dump shaders for debugging purposes. Dumped shaders will be log via "
+              "EmitLog, thus printed in Chrome console or consumed by user-defined callback "
+              "function.",
+              "https://crbug.com/dawn/792"}},
+            {Toggle::DEPRECATED_DumpTranslatedShaders,
+             {"dump_translated_shaders", "Deprecated. Use dump_shaders",
+              "https://crbug.com/dawn/792"}},
+            {Toggle::ForceWGSLStep,
+             {"force_wgsl_step",
+              "When ingesting SPIR-V shaders, force a first conversion to WGSL. This allows "
+              "testing Tint's SPIRV->WGSL translation on real content to be sure that it will "
+              "work when the same translation runs in a WASM module in the page.",
+              "https://crbug.com/dawn/960"}},
+            {Toggle::DisableWorkgroupInit,
+             {"disable_workgroup_init",
+              "Disables the workgroup memory zero-initialization for compute shaders.",
+              "https://crbug.com/tint/1003"}},
+            {Toggle::DisableSymbolRenaming,
+             {"disable_symbol_renaming",
+              "Disables the WGSL symbol renaming so that names are preserved.",
+              "https://crbug.com/dawn/1016"}},
+            {Toggle::UseUserDefinedLabelsInBackend,
+             {"use_user_defined_labels_in_backend",
+              "Enables calls to SetLabel to be forwarded to backend-specific APIs that label "
+              "objects.",
+              "https://crbug.com/dawn/840"}},
+            {Toggle::DisableR8RG8Mipmaps,
+             {"disable_r8_rg8_mipmaps",
+              "Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers "
+              "to not clear correctly.",
+              "https://crbug.com/dawn/1071"}},
+            {Toggle::UseDummyFragmentInVertexOnlyPipeline,
+             {"use_dummy_fragment_in_vertex_only_pipeline",
+              "Use a dummy empty fragment shader in vertex only render pipeline. This toggle must "
+              "be enabled for OpenGL ES backend, and serves as a workaround by default enabled on "
+              "some Metal devices with Intel GPU to ensure the depth result is correct.",
+              "https://crbug.com/dawn/136"}},
+            {Toggle::FxcOptimizations,
+             {"fxc_optimizations",
+              "Enable optimizations when compiling with FXC. Disabled by default because FXC "
+              "miscompiles in many cases when optimizations are enabled.",
+              "https://crbug.com/dawn/1203"}},
+            {Toggle::RecordDetailedTimingInTraceEvents,
+             {"record_detailed_timing_in_trace_events",
+              "Record detailed timing information in trace events at certain point. Currently the "
+              "timing information is recorded right before calling ExecuteCommandLists on a D3D12 "
+              "command queue, and the information includes system time, CPU timestamp, GPU "
+              "timestamp, and their frequency.",
+              "https://crbug.com/dawn/1264"}},
+            {Toggle::DisableTimestampQueryConversion,
+             {"disable_timestamp_query_conversion",
+              "Resolve timestamp queries into ticks instead of nanoseconds.",
+              "https://crbug.com/dawn/1305"}},
+            {Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension,
+             {"use_vulkan_zero_initialize_workgroup_memory_extension",
+              "Initialize workgroup memory with OpConstantNull on Vulkan when the Vulkan extension "
+              "VK_KHR_zero_initialize_workgroup_memory is supported.",
+              "https://crbug.com/dawn/1302"}},
+
+            // Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle.
+        }};
+    }  // anonymous namespace
+
+    void TogglesSet::Set(Toggle toggle, bool enabled) {
+        if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+            Set(Toggle::DumpShaders, enabled);
+            return;
+        }
+        ASSERT(toggle != Toggle::InvalidEnum);
+        const size_t toggleIndex = static_cast<size_t>(toggle);
+        toggleBitset.set(toggleIndex, enabled);
+    }
+
+    bool TogglesSet::Has(Toggle toggle) const {
+        if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+            return Has(Toggle::DumpShaders);
+        }
+        ASSERT(toggle != Toggle::InvalidEnum);
+        const size_t toggleIndex = static_cast<size_t>(toggle);
+        return toggleBitset.test(toggleIndex);
+    }
+
+    std::vector<const char*> TogglesSet::GetContainedToggleNames() const {
+        std::vector<const char*> togglesNameInUse(toggleBitset.count());
+
+        uint32_t index = 0;
+        for (uint32_t i : IterateBitSet(toggleBitset)) {
+            const char* toggleName = ToggleEnumToName(static_cast<Toggle>(i));
+            togglesNameInUse[index] = toggleName;
+            ++index;
+        }
+
+        return togglesNameInUse;
+    }
+
+    const char* ToggleEnumToName(Toggle toggle) {
+        ASSERT(toggle != Toggle::InvalidEnum);
+
+        const ToggleEnumAndInfo& toggleNameAndInfo =
+            kToggleNameAndInfoList[static_cast<size_t>(toggle)];
+        ASSERT(toggleNameAndInfo.toggle == toggle);
+        return toggleNameAndInfo.info.name;
+    }
+
+    const ToggleInfo* TogglesInfo::GetToggleInfo(const char* toggleName) {
+        ASSERT(toggleName);
+
+        EnsureToggleNameToEnumMapInitialized();
+
+        const auto& iter = mToggleNameToEnumMap.find(toggleName);
+        if (iter != mToggleNameToEnumMap.cend()) {
+            return &kToggleNameAndInfoList[static_cast<size_t>(iter->second)].info;
+        }
+        return nullptr;
+    }
+
+    Toggle TogglesInfo::ToggleNameToEnum(const char* toggleName) {
+        ASSERT(toggleName);
+
+        EnsureToggleNameToEnumMapInitialized();
+
+        const auto& iter = mToggleNameToEnumMap.find(toggleName);
+        if (iter != mToggleNameToEnumMap.cend()) {
+            return kToggleNameAndInfoList[static_cast<size_t>(iter->second)].toggle;
+        }
+        return Toggle::InvalidEnum;
+    }
+
+    void TogglesInfo::EnsureToggleNameToEnumMapInitialized() {
+        if (mToggleNameToEnumMapInitialized) {
+            return;
+        }
+
+        for (size_t index = 0; index < kToggleNameAndInfoList.size(); ++index) {
+            const ToggleEnumAndInfo& toggleNameAndInfo = kToggleNameAndInfoList[index];
+            ASSERT(index == static_cast<size_t>(toggleNameAndInfo.toggle));
+            mToggleNameToEnumMap[toggleNameAndInfo.info.name] = toggleNameAndInfo.toggle;
+        }
+
+        mToggleNameToEnumMapInitialized = true;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/Toggles.h b/src/dawn/native/Toggles.h
new file mode 100644
index 0000000..a45a82e
--- /dev/null
+++ b/src/dawn/native/Toggles.h
@@ -0,0 +1,102 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TOGGLES_H_
+#define DAWNNATIVE_TOGGLES_H_
+
+#include <bitset>
+#include <unordered_map>
+#include <vector>
+
+#include "dawn/native/DawnNative.h"
+
+namespace dawn::native {
+
+    enum class Toggle {
+        EmulateStoreAndMSAAResolve,
+        NonzeroClearResourcesOnCreationForTesting,
+        AlwaysResolveIntoZeroLevelAndLayer,
+        LazyClearResourceOnFirstUse,
+        TurnOffVsync,
+        UseTemporaryBufferInCompressedTextureToTextureCopy,
+        UseD3D12ResourceHeapTier2,
+        UseD3D12RenderPass,
+        UseD3D12ResidencyManagement,
+        DisableResourceSuballocation,
+        SkipValidation,
+        VulkanUseD32S8,
+        VulkanUseS8,
+        MetalDisableSamplerCompare,
+        MetalUseSharedModeForCounterSampleBuffer,
+        DisableBaseVertex,
+        DisableBaseInstance,
+        DisableIndexedDrawBuffers,
+        DisableSnormRead,
+        DisableDepthStencilRead,
+        DisableSampleVariables,
+        UseD3D12SmallShaderVisibleHeapForTesting,
+        UseDXC,
+        DisableRobustness,
+        MetalEnableVertexPulling,
+        DisallowUnsafeAPIs,
+        FlushBeforeClientWaitSync,
+        UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+        EmitHLSLDebugSymbols,
+        DisallowSpirv,
+        DumpShaders,
+        DEPRECATED_DumpTranslatedShaders,  // Use DumpShaders
+        ForceWGSLStep,
+        DisableWorkgroupInit,
+        DisableSymbolRenaming,
+        UseUserDefinedLabelsInBackend,
+        DisableR8RG8Mipmaps,
+        UseDummyFragmentInVertexOnlyPipeline,
+        FxcOptimizations,
+        RecordDetailedTimingInTraceEvents,
+        DisableTimestampQueryConversion,
+        VulkanUseZeroInitializeWorkgroupMemoryExtension,
+
+        EnumCount,
+        InvalidEnum = EnumCount,
+    };
+
+    // A wrapper of the bitset to store if a toggle is present or not. This wrapper provides the
+    // convenience to convert the enums of enum class Toggle to the indices of a bitset.
+    struct TogglesSet {
+        std::bitset<static_cast<size_t>(Toggle::EnumCount)> toggleBitset;
+
+        void Set(Toggle toggle, bool enabled);
+        bool Has(Toggle toggle) const;
+        std::vector<const char*> GetContainedToggleNames() const;
+    };
+
+    const char* ToggleEnumToName(Toggle toggle);
+
+    class TogglesInfo {
+      public:
+        // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+        // of a toggle supported in Dawn.
+        const ToggleInfo* GetToggleInfo(const char* toggleName);
+        Toggle ToggleNameToEnum(const char* toggleName);
+
+      private:
+        void EnsureToggleNameToEnumMapInitialized();
+
+        bool mToggleNameToEnumMapInitialized = false;
+        std::unordered_map<std::string, Toggle> mToggleNameToEnumMap;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_TOGGLES_H_
diff --git a/src/dawn/native/VertexFormat.cpp b/src/dawn/native/VertexFormat.cpp
new file mode 100644
index 0000000..2f2ae7f
--- /dev/null
+++ b/src/dawn/native/VertexFormat.cpp
@@ -0,0 +1,69 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/VertexFormat.h"
+
+#include "dawn/common/Assert.h"
+
+#include <array>
+
+namespace dawn::native {
+
+    static constexpr std::array<VertexFormatInfo, 31> sVertexFormatTable = {{
+        //
+        {wgpu::VertexFormat::Undefined, 0, 0, 0, VertexFormatBaseType::Float},
+
+        {wgpu::VertexFormat::Uint8x2, 2, 2, 1, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Uint8x4, 4, 4, 1, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Sint8x2, 2, 2, 1, VertexFormatBaseType::Sint},
+        {wgpu::VertexFormat::Sint8x4, 4, 4, 1, VertexFormatBaseType::Sint},
+        {wgpu::VertexFormat::Unorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Unorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Snorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Snorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+
+        {wgpu::VertexFormat::Uint16x2, 4, 2, 2, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Uint16x4, 8, 4, 2, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Sint16x2, 4, 2, 2, VertexFormatBaseType::Sint},
+        {wgpu::VertexFormat::Sint16x4, 8, 4, 2, VertexFormatBaseType::Sint},
+        {wgpu::VertexFormat::Unorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Unorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Snorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Snorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Float16x2, 4, 2, 2, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Float16x4, 8, 4, 2, VertexFormatBaseType::Float},
+
+        {wgpu::VertexFormat::Float32, 4, 1, 4, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Float32x2, 8, 2, 4, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Float32x3, 12, 3, 4, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Float32x4, 16, 4, 4, VertexFormatBaseType::Float},
+        {wgpu::VertexFormat::Uint32, 4, 1, 4, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Uint32x2, 8, 2, 4, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Uint32x3, 12, 3, 4, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Uint32x4, 16, 4, 4, VertexFormatBaseType::Uint},
+        {wgpu::VertexFormat::Sint32, 4, 1, 4, VertexFormatBaseType::Sint},
+        {wgpu::VertexFormat::Sint32x2, 8, 2, 4, VertexFormatBaseType::Sint},
+        {wgpu::VertexFormat::Sint32x3, 12, 3, 4, VertexFormatBaseType::Sint},
+        {wgpu::VertexFormat::Sint32x4, 16, 4, 4, VertexFormatBaseType::Sint},
+        //
+    }};
+
+    const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format) {
+        ASSERT(format != wgpu::VertexFormat::Undefined);
+        ASSERT(static_cast<uint32_t>(format) < sVertexFormatTable.size());
+        ASSERT(sVertexFormatTable[static_cast<uint32_t>(format)].format == format);
+        return sVertexFormatTable[static_cast<uint32_t>(format)];
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/VertexFormat.h b/src/dawn/native/VertexFormat.h
new file mode 100644
index 0000000..f88ae28
--- /dev/null
+++ b/src/dawn/native/VertexFormat.h
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VERTEXFORMAT_H_
+#define DAWNNATIVE_VERTEXFORMAT_H_
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+    enum class VertexFormatBaseType {
+        Float,
+        Uint,
+        Sint,
+    };
+
+    struct VertexFormatInfo {
+        wgpu::VertexFormat format;
+        uint32_t byteSize;
+        uint32_t componentCount;
+        uint32_t componentByteSize;
+        VertexFormatBaseType baseType;
+    };
+
+    const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_VERTEXFORMAT_H_
diff --git a/src/dawn/native/XlibXcbFunctions.cpp b/src/dawn/native/XlibXcbFunctions.cpp
new file mode 100644
index 0000000..1b0f6e8
--- /dev/null
+++ b/src/dawn/native/XlibXcbFunctions.cpp
@@ -0,0 +1,31 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/XlibXcbFunctions.h"
+
+namespace dawn::native {
+
+    XlibXcbFunctions::XlibXcbFunctions() {
+        if (!mLib.Open("libX11-xcb.so.1") ||
+            !mLib.GetProc(&xGetXCBConnection, "XGetXCBConnection")) {
+            mLib.Close();
+        }
+    }
+    XlibXcbFunctions::~XlibXcbFunctions() = default;
+
+    bool XlibXcbFunctions::IsLoaded() const {
+        return xGetXCBConnection != nullptr;
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/XlibXcbFunctions.h b/src/dawn/native/XlibXcbFunctions.h
new file mode 100644
index 0000000..52998a4
--- /dev/null
+++ b/src/dawn/native/XlibXcbFunctions.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_XLIBXCBFUNCTIONS_H_
+#define DAWNNATIVE_XLIBXCBFUNCTIONS_H_
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/native/Error.h"
+
+#include "dawn/common/xlib_with_undefs.h"
+
+class DynamicLib;
+
+namespace dawn::native {
+
+    // A helper class that dynamically loads the x11-xcb library that contains XGetXCBConnection
+    // (and nothing else). This has to be dynamic because this libraries isn't present on all Linux
+    // deployment platforms that Chromium targets.
+    class XlibXcbFunctions {
+      public:
+        XlibXcbFunctions();
+        ~XlibXcbFunctions();
+
+        bool IsLoaded() const;
+
+        // Functions from x11-xcb
+        decltype(&::XGetXCBConnection) xGetXCBConnection = nullptr;
+
+      private:
+        DynamicLib mLib;
+    };
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_XLIBXCBFUNCTIONS_H_
diff --git a/src/dawn/native/d3d12/AdapterD3D12.cpp b/src/dawn/native/d3d12/AdapterD3D12.cpp
new file mode 100644
index 0000000..d31b9af
--- /dev/null
+++ b/src/dawn/native/d3d12/AdapterD3D12.cpp
@@ -0,0 +1,425 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/AdapterD3D12.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/WindowsUtils.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/d3d12/BackendD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+#include <sstream>
+
+namespace dawn::native::d3d12 {
+
+    Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
+        : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
+          mHardwareAdapter(hardwareAdapter),
+          mBackend(backend) {
+    }
+
+    Adapter::~Adapter() {
+        CleanUpDebugLayerFilters();
+    }
+
+    bool Adapter::SupportsExternalImages() const {
+        // Via dawn::native::d3d12::ExternalImageDXGI::Create
+        return true;
+    }
+
+    const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
+        return mDeviceInfo;
+    }
+
+    IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
+        return mHardwareAdapter.Get();
+    }
+
+    Backend* Adapter::GetBackend() const {
+        return mBackend;
+    }
+
+    ComPtr<ID3D12Device> Adapter::GetDevice() const {
+        return mD3d12Device;
+    }
+
+    const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
+        return mDriverVersion;
+    }
+
+    MaybeError Adapter::InitializeImpl() {
+        // D3D12 cannot check for feature support without a device.
+        // Create the device to populate the adapter properties then reuse it when needed for actual
+        // rendering.
+        const PlatformFunctions* functions = GetBackend()->GetFunctions();
+        if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
+                                                _uuidof(ID3D12Device), &mD3d12Device))) {
+            return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
+        }
+
+        DAWN_TRY(InitializeDebugLayerFilters());
+
+        DXGI_ADAPTER_DESC1 adapterDesc;
+        mHardwareAdapter->GetDesc1(&adapterDesc);
+
+        mDeviceId = adapterDesc.DeviceId;
+        mVendorId = adapterDesc.VendorId;
+        mName = WCharToUTF8(adapterDesc.Description);
+
+        DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+
+        if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
+            mAdapterType = wgpu::AdapterType::CPU;
+        } else {
+            mAdapterType = (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU
+                                               : wgpu::AdapterType::DiscreteGPU;
+        }
+
+        // Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
+        LARGE_INTEGER umdVersion;
+        if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
+            DXGI_ERROR_UNSUPPORTED) {
+            uint64_t encodedVersion = umdVersion.QuadPart;
+
+            std::ostringstream o;
+            o << "D3D12 driver version ";
+            for (size_t i = 0; i < mDriverVersion.size(); ++i) {
+                mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
+                o << mDriverVersion[i] << ".";
+            }
+            mDriverDescription = o.str();
+        }
+
+        return {};
+    }
+
+    bool Adapter::AreTimestampQueriesSupported() const {
+        D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+        queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+        queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+        ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
+        HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
+        if (FAILED(hr)) {
+            return false;
+        }
+
+        // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
+        // and vGPU implementations.
+        uint64_t timeStampFrequency;
+        hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
+        if (FAILED(hr)) {
+            return false;
+        }
+
+        return true;
+    }
+
+    MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+        if (AreTimestampQueriesSupported()) {
+            mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+        }
+        mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+        mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+        mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+        mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+        mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+
+        return {};
+    }
+
+    MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+        D3D12_FEATURE_DATA_D3D12_OPTIONS featureData = {};
+
+        DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+                                                                &featureData, sizeof(featureData)),
+                              "CheckFeatureSupport D3D12_FEATURE_D3D12_OPTIONS"));
+
+        // Check if the device is at least D3D_FEATURE_LEVEL_11_1 or D3D_FEATURE_LEVEL_11_0
+        const D3D_FEATURE_LEVEL levelsToQuery[]{D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0};
+
+        D3D12_FEATURE_DATA_FEATURE_LEVELS featureLevels;
+        featureLevels.NumFeatureLevels = sizeof(levelsToQuery) / sizeof(D3D_FEATURE_LEVEL);
+        featureLevels.pFeatureLevelsRequested = levelsToQuery;
+        DAWN_TRY(
+            CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_FEATURE_LEVELS,
+                                                           &featureLevels, sizeof(featureLevels)),
+                         "CheckFeatureSupport D3D12_FEATURE_FEATURE_LEVELS"));
+
+        if (featureLevels.MaxSupportedFeatureLevel == D3D_FEATURE_LEVEL_11_0 &&
+            featureData.ResourceBindingTier < D3D12_RESOURCE_BINDING_TIER_2) {
+            return DAWN_VALIDATION_ERROR(
+                "At least Resource Binding Tier 2 is required for D3D12 Feature Level 11.0 "
+                "devices.");
+        }
+
+        GetDefaultLimits(&limits->v1);
+
+        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels
+
+        // Limits that are the same across D3D feature levels
+        limits->v1.maxTextureDimension1D = D3D12_REQ_TEXTURE1D_U_DIMENSION;
+        limits->v1.maxTextureDimension2D = D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION;
+        limits->v1.maxTextureDimension3D = D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION;
+        limits->v1.maxTextureArrayLayers = D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION;
+        // Slot values can be 0-15, inclusive:
+        // https://docs.microsoft.com/en-ca/windows/win32/api/d3d12/ns-d3d12-d3d12_input_element_desc
+        limits->v1.maxVertexBuffers = 16;
+        limits->v1.maxVertexAttributes = D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT;
+
+        // Note: WebGPU requires FL11.1+
+        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-support
+        // Resource Binding Tier:   1      2      3
+
+        // Max(CBV+UAV+SRV)         1M    1M    1M+
+        // Max CBV per stage        14    14   full
+        // Max SRV per stage       128  full   full
+        // Max UAV in all stages    64    64   full
+        // Max Samplers per stage   16  2048   2048
+
+        // https://docs.microsoft.com/en-us/windows-hardware/test/hlk/testref/efad06e8-51d1-40ce-ad5c-573a134b4bb6
+        // "full" means the full heap can be used. This is tested
+        // to work for 1 million descriptors, and 1.1M for tier 3.
+        uint32_t maxCBVsPerStage;
+        uint32_t maxSRVsPerStage;
+        uint32_t maxUAVsAllStages;
+        uint32_t maxSamplersPerStage;
+        switch (featureData.ResourceBindingTier) {
+            case D3D12_RESOURCE_BINDING_TIER_1:
+                maxCBVsPerStage = 14;
+                maxSRVsPerStage = 128;
+                maxUAVsAllStages = 64;
+                maxSamplersPerStage = 16;
+                break;
+            case D3D12_RESOURCE_BINDING_TIER_2:
+                maxCBVsPerStage = 14;
+                maxSRVsPerStage = 1'000'000;
+                maxUAVsAllStages = 64;
+                maxSamplersPerStage = 2048;
+                break;
+            case D3D12_RESOURCE_BINDING_TIER_3:
+            default:
+                maxCBVsPerStage = 1'100'000;
+                maxSRVsPerStage = 1'100'000;
+                maxUAVsAllStages = 1'100'000;
+                maxSamplersPerStage = 2048;
+                break;
+        }
+
+        ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageTexturesPerShaderStage);
+        ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageBuffersPerShaderStage);
+        uint32_t maxUAVsPerStage = maxUAVsAllStages / 2;
+
+        limits->v1.maxUniformBuffersPerShaderStage = maxCBVsPerStage;
+        // Allocate half of the UAVs to storage buffers, and half to storage textures.
+        limits->v1.maxStorageTexturesPerShaderStage = maxUAVsPerStage / 2;
+        limits->v1.maxStorageBuffersPerShaderStage = maxUAVsPerStage - maxUAVsPerStage / 2;
+        limits->v1.maxSampledTexturesPerShaderStage = maxSRVsPerStage;
+        limits->v1.maxSamplersPerShaderStage = maxSamplersPerStage;
+
+        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/root-signature-limits
+        // In DWORDS. Descriptor tables cost 1, Root constants cost 1, Root descriptors cost 2.
+        static constexpr uint32_t kMaxRootSignatureSize = 64u;
+        // Dawn maps WebGPU's binding model by:
+        //  - (maxBindGroups)
+        //    CBVs/UAVs/SRVs for bind group are a root descriptor table
+        //  - (maxBindGroups)
+        //    Samplers for each bind group are a root descriptor table
+        //  - (2 * maxDynamicBuffers)
+        //    Each dynamic buffer is a root descriptor
+        //  RESERVED:
+        //  - 3 = max of:
+        //    - 2 root constants for the baseVertex/baseInstance constants.
+        //    - 3 root constants for num workgroups X, Y, Z
+        //  - 4 root constants (kMaxDynamicStorageBuffersPerPipelineLayout) for dynamic storage
+        //  buffer lengths.
+        static constexpr uint32_t kReservedSlots = 7;
+
+        // Available slots after base limits considered.
+        uint32_t availableRootSignatureSlots =
+            kMaxRootSignatureSize - kReservedSlots -
+            2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+                 limits->v1.maxDynamicStorageBuffersPerPipelineLayout);
+
+        // Because we need either:
+        //  - 1 cbv/uav/srv table + 1 sampler table
+        //  - 2 slots for a root descriptor
+        uint32_t availableDynamicBufferOrBindGroup = availableRootSignatureSlots / 2;
+
+        // We can either have a bind group, a dyn uniform buffer or a dyn storage buffer.
+        // Distribute evenly.
+        limits->v1.maxBindGroups += availableDynamicBufferOrBindGroup / 3;
+        limits->v1.maxDynamicUniformBuffersPerPipelineLayout +=
+            availableDynamicBufferOrBindGroup / 3;
+        limits->v1.maxDynamicStorageBuffersPerPipelineLayout +=
+            (availableDynamicBufferOrBindGroup - 2 * (availableDynamicBufferOrBindGroup / 3));
+
+        ASSERT(2 * (limits->v1.maxBindGroups +
+                    limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+                    limits->v1.maxDynamicStorageBuffersPerPipelineLayout) <=
+               kMaxRootSignatureSize - kReservedSlots);
+
+        // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
+        limits->v1.maxComputeWorkgroupSizeX = D3D12_CS_THREAD_GROUP_MAX_X;
+        limits->v1.maxComputeWorkgroupSizeY = D3D12_CS_THREAD_GROUP_MAX_Y;
+        limits->v1.maxComputeWorkgroupSizeZ = D3D12_CS_THREAD_GROUP_MAX_Z;
+        limits->v1.maxComputeInvocationsPerWorkgroup = D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP;
+
+        // https://docs.maxComputeWorkgroupSizeXmicrosoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_dispatch_arguments
+        limits->v1.maxComputeWorkgroupsPerDimension =
+            D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
+
+        // https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-devices-downlevel-compute-shaders
+        // Thread Group Shared Memory is limited to 16Kb on downlevel hardware. This is less than
+        // the 32Kb that is available to Direct3D 11 hardware. D3D12 is also 32kb.
+        limits->v1.maxComputeWorkgroupStorageSize = 32768;
+
+        // Max number of "constants" where each constant is a 16-byte float4
+        limits->v1.maxUniformBufferBindingSize = D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16;
+        // D3D12 has no documented limit on the size of a storage buffer binding.
+        limits->v1.maxStorageBufferBindingSize = 4294967295;
+
+        // TODO(crbug.com/dawn/685):
+        // LIMITS NOT SET:
+        // - maxInterStageShaderComponents
+        // - maxVertexBufferArrayStride
+
+        return {};
+    }
+
+    MaybeError Adapter::InitializeDebugLayerFilters() {
+        if (!GetInstance()->IsBackendValidationEnabled()) {
+            return {};
+        }
+
+        D3D12_MESSAGE_ID denyIds[] = {
+
+            //
+            // Permanent IDs: list of warnings that are not applicable
+            //
+
+            // Resource sub-allocation partially maps pre-allocated heaps. This means the
+            // entire physical addresses space may have no resources or have many resources
+            // assigned the same heap.
+            D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
+            D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
+
+            // The debug layer validates pipeline objects when they are created. Dawn validates
+            // them when them when they are set. Therefore, since the issue is caught at a later
+            // time, we can silence this warnings.
+            D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
+
+            // Adding a clear color during resource creation would require heuristics or delayed
+            // creation.
+            // https://crbug.com/dawn/418
+            D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
+            D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
+
+            // Dawn enforces proper Unmaps at a later time.
+            // https://crbug.com/dawn/422
+            D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
+
+            // WebGPU allows empty scissors without empty viewports.
+            D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
+
+            //
+            // Temporary IDs: list of warnings that should be fixed or promoted
+            //
+
+            // Remove after warning have been addressed
+            // https://crbug.com/dawn/421
+            D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
+
+            // For small placed resource alignment, we first request the small alignment, which may
+            // get rejected and generate a debug error. Then, we request 0 to get the allowed
+            // allowed alignment.
+            D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
+
+            // WebGPU allows OOB vertex buffer access and relies on D3D12's robust buffer access
+            // behavior.
+            D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL,
+
+            // WebGPU allows setVertexBuffer with offset that equals to the whole vertex buffer
+            // size.
+            // Even this means that no vertex buffer view has been set in D3D12 backend.
+            // https://crbug.com/dawn/1255
+            D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_NOT_SET,
+        };
+
+        // Create a retrieval filter with a deny list to suppress messages.
+        // Any messages remaining will be converted to Dawn errors.
+        D3D12_INFO_QUEUE_FILTER filter{};
+        // Filter out info/message and only create errors from warnings or worse.
+        D3D12_MESSAGE_SEVERITY severities[] = {
+            D3D12_MESSAGE_SEVERITY_INFO,
+            D3D12_MESSAGE_SEVERITY_MESSAGE,
+        };
+        filter.DenyList.NumSeverities = ARRAYSIZE(severities);
+        filter.DenyList.pSeverityList = severities;
+        filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
+        filter.DenyList.pIDList = denyIds;
+
+        ComPtr<ID3D12InfoQueue> infoQueue;
+        DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+                              "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+
+        // To avoid flooding the console, a storage-filter is also used to
+        // prevent messages from getting logged.
+        DAWN_TRY(CheckHRESULT(infoQueue->PushStorageFilter(&filter),
+                              "ID3D12InfoQueue::PushStorageFilter"));
+
+        DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
+                              "ID3D12InfoQueue::PushRetrievalFilter"));
+
+        return {};
+    }
+
+    void Adapter::CleanUpDebugLayerFilters() {
+        if (!GetInstance()->IsBackendValidationEnabled()) {
+            return;
+        }
+
+        // The device may not exist if this adapter failed to initialize.
+        if (mD3d12Device == nullptr) {
+            return;
+        }
+
+        // If the debug layer is not installed, return immediately to avoid crashing the process.
+        ComPtr<ID3D12InfoQueue> infoQueue;
+        if (FAILED(mD3d12Device.As(&infoQueue))) {
+            return;
+        }
+
+        infoQueue->PopRetrievalFilter();
+        infoQueue->PopStorageFilter();
+    }
+
+    ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+        return Device::Create(this, descriptor);
+    }
+
+    // Resets the backend device and creates a new one. If any D3D12 objects belonging to the
+    // current ID3D12Device have not been destroyed, a non-zero value will be returned upon Reset()
+    // and the subequent call to CreateDevice will return a handle the existing device instead of
+    // creating a new one.
+    MaybeError Adapter::ResetInternalDeviceForTestingImpl() {
+        ASSERT(mD3d12Device.Reset() == 0);
+        DAWN_TRY(Initialize());
+
+        return {};
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/AdapterD3D12.h b/src/dawn/native/d3d12/AdapterD3D12.h
new file mode 100644
index 0000000..3247a13
--- /dev/null
+++ b/src/dawn/native/d3d12/AdapterD3D12.h
@@ -0,0 +1,66 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_ADAPTERD3D12_H_
+#define DAWNNATIVE_D3D12_ADAPTERD3D12_H_
+
+#include "dawn/native/Adapter.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/native/d3d12/D3D12Info.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Backend;
+
+    class Adapter : public AdapterBase {
+      public:
+        Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
+        ~Adapter() override;
+
+        // AdapterBase Implementation
+        bool SupportsExternalImages() const override;
+
+        const D3D12DeviceInfo& GetDeviceInfo() const;
+        IDXGIAdapter3* GetHardwareAdapter() const;
+        Backend* GetBackend() const;
+        ComPtr<ID3D12Device> GetDevice() const;
+        const gpu_info::D3DDriverVersion& GetDriverVersion() const;
+
+      private:
+        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+            const DeviceDescriptor* descriptor) override;
+        MaybeError ResetInternalDeviceForTestingImpl() override;
+
+        bool AreTimestampQueriesSupported() const;
+
+        MaybeError InitializeImpl() override;
+        MaybeError InitializeSupportedFeaturesImpl() override;
+        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+
+        MaybeError InitializeDebugLayerFilters();
+        void CleanUpDebugLayerFilters();
+
+        ComPtr<IDXGIAdapter3> mHardwareAdapter;
+        ComPtr<ID3D12Device> mD3d12Device;
+        gpu_info::D3DDriverVersion mDriverVersion;
+
+        Backend* mBackend;
+        D3D12DeviceInfo mDeviceInfo = {};
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_ADAPTERD3D12_H_
diff --git a/src/dawn/native/d3d12/BackendD3D12.cpp b/src/dawn/native/d3d12/BackendD3D12.cpp
new file mode 100644
index 0000000..27a9882
--- /dev/null
+++ b/src/dawn/native/d3d12/BackendD3D12.cpp
@@ -0,0 +1,209 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BackendD3D12.h"
+
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+
+        ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
+                                                           BackendValidationLevel validationLevel,
+                                                           bool beginCaptureOnStartup) {
+            ComPtr<IDXGIFactory4> factory;
+
+            uint32_t dxgiFactoryFlags = 0;
+
+            // Enable the debug layer (requires the Graphics Tools "optional feature").
+            {
+                if (validationLevel != BackendValidationLevel::Disabled) {
+                    ComPtr<ID3D12Debug3> debugController;
+                    if (SUCCEEDED(
+                            functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
+                        ASSERT(debugController != nullptr);
+                        debugController->EnableDebugLayer();
+                        if (validationLevel == BackendValidationLevel::Full) {
+                            debugController->SetEnableGPUBasedValidation(true);
+                        }
+
+                        // Enable additional debug layers.
+                        dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
+                    }
+                }
+
+                if (beginCaptureOnStartup) {
+                    ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
+                    if (functions->dxgiGetDebugInterface1 != nullptr &&
+                        SUCCEEDED(functions->dxgiGetDebugInterface1(
+                            0, IID_PPV_ARGS(&graphicsAnalysis)))) {
+                        graphicsAnalysis->BeginCapture();
+                    }
+                }
+            }
+
+            if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
+                return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
+            }
+
+            ASSERT(factory != nullptr);
+            return std::move(factory);
+        }
+
+        ResultOrError<Ref<AdapterBase>> CreateAdapterFromIDXGIAdapter(
+            Backend* backend,
+            ComPtr<IDXGIAdapter> dxgiAdapter) {
+            ComPtr<IDXGIAdapter3> dxgiAdapter3;
+            DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
+            Ref<Adapter> adapter = AcquireRef(new Adapter(backend, std::move(dxgiAdapter3)));
+            DAWN_TRY(adapter->Initialize());
+
+            return {std::move(adapter)};
+        }
+
+    }  // anonymous namespace
+
+    Backend::Backend(InstanceBase* instance)
+        : BackendConnection(instance, wgpu::BackendType::D3D12) {
+    }
+
+    MaybeError Backend::Initialize() {
+        mFunctions = std::make_unique<PlatformFunctions>();
+        DAWN_TRY(mFunctions->LoadFunctions());
+
+        const auto instance = GetInstance();
+
+        DAWN_TRY_ASSIGN(mFactory,
+                        CreateFactory(mFunctions.get(), instance->GetBackendValidationLevel(),
+                                      instance->IsBeginCaptureOnStartupEnabled()));
+
+        return {};
+    }
+
+    ComPtr<IDXGIFactory4> Backend::GetFactory() const {
+        return mFactory;
+    }
+
+    MaybeError Backend::EnsureDxcLibrary() {
+        if (mDxcLibrary == nullptr) {
+            DAWN_TRY(CheckHRESULT(
+                mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
+                "DXC create library"));
+            ASSERT(mDxcLibrary != nullptr);
+        }
+        return {};
+    }
+
+    MaybeError Backend::EnsureDxcCompiler() {
+        if (mDxcCompiler == nullptr) {
+            DAWN_TRY(CheckHRESULT(
+                mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
+                "DXC create compiler"));
+            ASSERT(mDxcCompiler != nullptr);
+        }
+        return {};
+    }
+
+    MaybeError Backend::EnsureDxcValidator() {
+        if (mDxcValidator == nullptr) {
+            DAWN_TRY(CheckHRESULT(
+                mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
+                "DXC create validator"));
+            ASSERT(mDxcValidator != nullptr);
+        }
+        return {};
+    }
+
+    ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
+        ASSERT(mDxcLibrary != nullptr);
+        return mDxcLibrary;
+    }
+
+    ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
+        ASSERT(mDxcCompiler != nullptr);
+        return mDxcCompiler;
+    }
+
+    ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
+        ASSERT(mDxcValidator != nullptr);
+        return mDxcValidator;
+    }
+
+    const PlatformFunctions* Backend::GetFunctions() const {
+        return mFunctions.get();
+    }
+
+    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+        AdapterDiscoveryOptions options;
+        auto result = DiscoverAdapters(&options);
+        if (result.IsError()) {
+            GetInstance()->ConsumedError(result.AcquireError());
+            return {};
+        }
+        return result.AcquireSuccess();
+    }
+
+    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* optionsBase) {
+        ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
+        const AdapterDiscoveryOptions* options =
+            static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+        std::vector<Ref<AdapterBase>> adapters;
+        if (options->dxgiAdapter != nullptr) {
+            // |dxgiAdapter| was provided. Discover just that adapter.
+            Ref<AdapterBase> adapter;
+            DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
+            adapters.push_back(std::move(adapter));
+            return std::move(adapters);
+        }
+
+        // Enumerate and discover all available adapters.
+        for (uint32_t adapterIndex = 0;; ++adapterIndex) {
+            ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
+            if (mFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
+                break;  // No more adapters to enumerate.
+            }
+
+            ASSERT(dxgiAdapter != nullptr);
+            ResultOrError<Ref<AdapterBase>> adapter =
+                CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
+            if (adapter.IsError()) {
+                GetInstance()->ConsumedError(adapter.AcquireError());
+                continue;
+            }
+
+            adapters.push_back(adapter.AcquireSuccess());
+        }
+
+        return adapters;
+    }
+
+    BackendConnection* Connect(InstanceBase* instance) {
+        Backend* backend = new Backend(instance);
+
+        if (instance->ConsumedError(backend->Initialize())) {
+            delete backend;
+            return nullptr;
+        }
+
+        return backend;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BackendD3D12.h b/src/dawn/native/d3d12/BackendD3D12.h
new file mode 100644
index 0000000..01ae6bc
--- /dev/null
+++ b/src/dawn/native/d3d12/BackendD3D12.h
@@ -0,0 +1,59 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BACKENDD3D12_H_
+#define DAWNNATIVE_D3D12_BACKENDD3D12_H_
+
+#include "dawn/native/BackendConnection.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class PlatformFunctions;
+
+    class Backend : public BackendConnection {
+      public:
+        Backend(InstanceBase* instance);
+
+        MaybeError Initialize();
+
+        ComPtr<IDXGIFactory4> GetFactory() const;
+
+        MaybeError EnsureDxcLibrary();
+        MaybeError EnsureDxcCompiler();
+        MaybeError EnsureDxcValidator();
+        ComPtr<IDxcLibrary> GetDxcLibrary() const;
+        ComPtr<IDxcCompiler> GetDxcCompiler() const;
+        ComPtr<IDxcValidator> GetDxcValidator() const;
+
+        const PlatformFunctions* GetFunctions() const;
+
+        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+            const AdapterDiscoveryOptionsBase* optionsBase) override;
+
+      private:
+        // Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
+        // the D3D12 DLLs are unloaded before we are done using them.
+        std::unique_ptr<PlatformFunctions> mFunctions;
+        ComPtr<IDXGIFactory4> mFactory;
+        ComPtr<IDxcLibrary> mDxcLibrary;
+        ComPtr<IDxcCompiler> mDxcCompiler;
+        ComPtr<IDxcValidator> mDxcValidator;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_BACKENDD3D12_H_
diff --git a/src/dawn/native/d3d12/BindGroupD3D12.cpp b/src/dawn/native/d3d12/BindGroupD3D12.cpp
new file mode 100644
index 0000000..f169345
--- /dev/null
+++ b/src/dawn/native/d3d12/BindGroupD3D12.cpp
@@ -0,0 +1,268 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    // static
+    ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+                                                    const BindGroupDescriptor* descriptor) {
+        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+    }
+
+    BindGroup::BindGroup(Device* device,
+                         const BindGroupDescriptor* descriptor,
+                         uint32_t viewSizeIncrement,
+                         const CPUDescriptorHeapAllocation& viewAllocation)
+        : BindGroupBase(this, device, descriptor) {
+        BindGroupLayout* bgl = ToBackend(GetLayout());
+
+        mCPUViewAllocation = viewAllocation;
+
+        const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
+
+        ID3D12Device* d3d12Device = device->GetD3D12Device();
+
+        // It's not necessary to create descriptors in the descriptor heap for dynamic resources.
+        // This is because they are created as root descriptors which are never heap allocated.
+        // Since dynamic buffers are packed in the front, we can skip over these bindings by
+        // starting from the dynamic buffer count.
+        for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+             bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+            const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+
+            // Increment size does not need to be stored and is only used to get a handle
+            // local to the allocation with OffsetFrom().
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer: {
+                    BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+
+                    ID3D12Resource* resource = ToBackend(binding.buffer)->GetD3D12Resource();
+                    if (resource == nullptr) {
+                        // The Buffer was destroyed. Skip creating buffer views since there is no
+                        // resource. This bind group won't be used as it is an error to submit a
+                        // command buffer that references destroyed resources.
+                        continue;
+                    }
+
+                    switch (bindingInfo.buffer.type) {
+                        case wgpu::BufferBindingType::Uniform: {
+                            D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
+                            desc.SizeInBytes =
+                                Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
+                            desc.BufferLocation =
+                                ToBackend(binding.buffer)->GetVA() + binding.offset;
+
+                            d3d12Device->CreateConstantBufferView(
+                                &desc, viewAllocation.OffsetFrom(
+                                           viewSizeIncrement, descriptorHeapOffsets[bindingIndex]));
+                            break;
+                        }
+                        case wgpu::BufferBindingType::Storage:
+                        case kInternalStorageBufferBinding: {
+                            // Since Tint outputs HLSL shaders with RWByteAddressBuffer,
+                            // we must use D3D12_BUFFER_UAV_FLAG_RAW when making the
+                            // UNORDERED_ACCESS_VIEW_DESC. Using D3D12_BUFFER_UAV_FLAG_RAW requires
+                            // that we use DXGI_FORMAT_R32_TYPELESS as the format of the view.
+                            // DXGI_FORMAT_R32_TYPELESS requires that the element size be 4
+                            // byte aligned. Since binding.size and binding.offset are in bytes,
+                            // we need to divide by 4 to obtain the element size.
+                            D3D12_UNORDERED_ACCESS_VIEW_DESC desc;
+                            desc.Buffer.NumElements = binding.size / 4;
+                            desc.Format = DXGI_FORMAT_R32_TYPELESS;
+                            desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
+                            desc.Buffer.FirstElement = binding.offset / 4;
+                            desc.Buffer.StructureByteStride = 0;
+                            desc.Buffer.CounterOffsetInBytes = 0;
+                            desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
+
+                            d3d12Device->CreateUnorderedAccessView(
+                                resource, nullptr, &desc,
+                                viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                          descriptorHeapOffsets[bindingIndex]));
+                            break;
+                        }
+                        case wgpu::BufferBindingType::ReadOnlyStorage: {
+                            // Like StorageBuffer, Tint outputs HLSL shaders for readonly
+                            // storage buffer with ByteAddressBuffer. So we must use
+                            // D3D12_BUFFER_SRV_FLAG_RAW when making the SRV descriptor. And it has
+                            // similar requirement for format, element size, etc.
+                            D3D12_SHADER_RESOURCE_VIEW_DESC desc;
+                            desc.Format = DXGI_FORMAT_R32_TYPELESS;
+                            desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
+                            desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+                            desc.Buffer.FirstElement = binding.offset / 4;
+                            desc.Buffer.NumElements = binding.size / 4;
+                            desc.Buffer.StructureByteStride = 0;
+                            desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
+                            d3d12Device->CreateShaderResourceView(
+                                resource, &desc,
+                                viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                          descriptorHeapOffsets[bindingIndex]));
+                            break;
+                        }
+                        case wgpu::BufferBindingType::Undefined:
+                            UNREACHABLE();
+                    }
+
+                    break;
+                }
+
+                case BindingInfoType::Texture: {
+                    auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+                    auto& srv = view->GetSRVDescriptor();
+
+                    ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+                    if (resource == nullptr) {
+                        // The Texture was destroyed. Skip creating the SRV since there is no
+                        // resource. This bind group won't be used as it is an error to submit a
+                        // command buffer that references destroyed resources.
+                        continue;
+                    }
+
+                    d3d12Device->CreateShaderResourceView(
+                        resource, &srv,
+                        viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                  descriptorHeapOffsets[bindingIndex]));
+                    break;
+                }
+
+                case BindingInfoType::StorageTexture: {
+                    TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+                    ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+                    if (resource == nullptr) {
+                        // The Texture was destroyed. Skip creating the SRV/UAV since there is no
+                        // resource. This bind group won't be used as it is an error to submit a
+                        // command buffer that references destroyed resources.
+                        continue;
+                    }
+
+                    switch (bindingInfo.storageTexture.access) {
+                        case wgpu::StorageTextureAccess::WriteOnly: {
+                            D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
+                            d3d12Device->CreateUnorderedAccessView(
+                                resource, nullptr, &uav,
+                                viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                          descriptorHeapOffsets[bindingIndex]));
+                            break;
+                        }
+
+                        case wgpu::StorageTextureAccess::Undefined:
+                            UNREACHABLE();
+                    }
+
+                    break;
+                }
+
+                case BindingInfoType::ExternalTexture: {
+                    UNREACHABLE();
+                }
+
+                case BindingInfoType::Sampler: {
+                    // No-op as samplers will be later initialized by CreateSamplers().
+                    break;
+                }
+            }
+        }
+
+        // Loop through the dynamic storage buffers and build a flat map from the index of the
+        // dynamic storage buffer to its binding size. The index |dynamicStorageBufferIndex|
+        // means that it is the i'th buffer that is both dynamic and storage, in increasing order
+        // of BindingNumber.
+        mDynamicStorageBufferLengths.resize(bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+        uint32_t dynamicStorageBufferIndex = 0;
+        for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+             ++bindingIndex) {
+            if (bgl->IsStorageBufferBinding(bindingIndex)) {
+                mDynamicStorageBufferLengths[dynamicStorageBufferIndex++] =
+                    GetBindingAsBufferBinding(bindingIndex).size;
+            }
+        }
+    }
+
+    BindGroup::~BindGroup() = default;
+
+    void BindGroup::DestroyImpl() {
+        BindGroupBase::DestroyImpl();
+        ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
+        ASSERT(!mCPUViewAllocation.IsValid());
+    }
+
+    bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
+        const BindGroupLayout* bgl = ToBackend(GetLayout());
+
+        const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
+        if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
+            return true;
+        }
+
+        // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+        // If either failed, return early to re-allocate and switch the heaps.
+        Device* device = ToBackend(GetDevice());
+
+        D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+        if (!viewAllocator->AllocateGPUDescriptors(descriptorCount,
+                                                   device->GetPendingCommandSerial(),
+                                                   &baseCPUDescriptor, &mGPUViewAllocation)) {
+            return false;
+        }
+
+        // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+        // simple copies per bindgroup, a single non-simple copy could be issued.
+        // TODO(dawn:155): Consider doing this optimization.
+        device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+                                                        mCPUViewAllocation.GetBaseDescriptor(),
+                                                        D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
+
+        return true;
+    }
+
+    D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseViewDescriptor() const {
+        return mGPUViewAllocation.GetBaseDescriptor();
+    }
+
+    D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
+        ASSERT(mSamplerAllocationEntry != nullptr);
+        return mSamplerAllocationEntry->GetBaseDescriptor();
+    }
+
+    bool BindGroup::PopulateSamplers(Device* device,
+                                     ShaderVisibleDescriptorAllocator* samplerAllocator) {
+        if (mSamplerAllocationEntry == nullptr) {
+            return true;
+        }
+        return mSamplerAllocationEntry->Populate(device, samplerAllocator);
+    }
+
+    void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
+        mSamplerAllocationEntry = std::move(entry);
+    }
+
+    const BindGroup::DynamicStorageBufferLengths& BindGroup::GetDynamicStorageBufferLengths()
+        const {
+        return mDynamicStorageBufferLengths;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BindGroupD3D12.h b/src/dawn/native/d3d12/BindGroupD3D12.h
new file mode 100644
index 0000000..7fcf782
--- /dev/null
+++ b/src/dawn/native/d3d12/BindGroupD3D12.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
+#define DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+    class SamplerHeapCacheEntry;
+    class ShaderVisibleDescriptorAllocator;
+
+    class BindGroup final : public BindGroupBase, public PlacementAllocated {
+      public:
+        static ResultOrError<Ref<BindGroup>> Create(Device* device,
+                                                    const BindGroupDescriptor* descriptor);
+
+        BindGroup(Device* device,
+                  const BindGroupDescriptor* descriptor,
+                  uint32_t viewSizeIncrement,
+                  const CPUDescriptorHeapAllocation& viewAllocation);
+
+        // Returns true if the BindGroup was successfully populated.
+        bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
+        bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
+
+        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
+        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
+
+        void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
+
+        using DynamicStorageBufferLengths =
+            ityp::stack_vec<uint32_t, uint32_t, kMaxDynamicStorageBuffersPerPipelineLayout>;
+        const DynamicStorageBufferLengths& GetDynamicStorageBufferLengths() const;
+
+      private:
+        ~BindGroup() override;
+
+        void DestroyImpl() override;
+
+        Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
+
+        GPUDescriptorHeapAllocation mGPUViewAllocation;
+        CPUDescriptorHeapAllocation mCPUViewAllocation;
+
+        DynamicStorageBufferLengths mDynamicStorageBufferLengths;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
diff --git a/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp b/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
new file mode 100644
index 0000000..4e586a0
--- /dev/null
+++ b/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
@@ -0,0 +1,185 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+
+namespace dawn::native::d3d12 {
+    namespace {
+        D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(
+            const BindingInfo& bindingInfo) {
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer:
+                    switch (bindingInfo.buffer.type) {
+                        case wgpu::BufferBindingType::Uniform:
+                            return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
+                        case wgpu::BufferBindingType::Storage:
+                        case kInternalStorageBufferBinding:
+                            return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+                        case wgpu::BufferBindingType::ReadOnlyStorage:
+                            return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
+                        case wgpu::BufferBindingType::Undefined:
+                            UNREACHABLE();
+                    }
+
+                case BindingInfoType::Sampler:
+                    return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
+
+                case BindingInfoType::Texture:
+                case BindingInfoType::ExternalTexture:
+                    return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
+
+                case BindingInfoType::StorageTexture:
+                    switch (bindingInfo.storageTexture.access) {
+                        case wgpu::StorageTextureAccess::WriteOnly:
+                            return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+                        case wgpu::StorageTextureAccess::Undefined:
+                            UNREACHABLE();
+                    }
+            }
+        }
+    }  // anonymous namespace
+
+    // static
+    Ref<BindGroupLayout> BindGroupLayout::Create(
+        Device* device,
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+    }
+
+    BindGroupLayout::BindGroupLayout(Device* device,
+                                     const BindGroupLayoutDescriptor* descriptor,
+                                     PipelineCompatibilityToken pipelineCompatibilityToken)
+        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+          mDescriptorHeapOffsets(GetBindingCount()),
+          mShaderRegisters(GetBindingCount()),
+          mCbvUavSrvDescriptorCount(0),
+          mSamplerDescriptorCount(0),
+          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+        for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+            const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+            D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
+                WGPUBindingInfoToDescriptorRangeType(bindingInfo);
+            mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
+
+            // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
+            // need to allocate the descriptor from descriptor heap or create descriptor ranges.
+            if (bindingIndex < GetDynamicBufferCount()) {
+                continue;
+            }
+            ASSERT(!bindingInfo.buffer.hasDynamicOffset);
+
+            mDescriptorHeapOffsets[bindingIndex] =
+                descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+                    ? mSamplerDescriptorCount++
+                    : mCbvUavSrvDescriptorCount++;
+
+            D3D12_DESCRIPTOR_RANGE range;
+            range.RangeType = descriptorRangeType;
+            range.NumDescriptors = 1;
+            range.BaseShaderRegister = GetShaderRegister(bindingIndex);
+            range.RegisterSpace = kRegisterSpacePlaceholder;
+            range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
+
+            std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
+                descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+                    ? mSamplerDescriptorRanges
+                    : mCbvUavSrvDescriptorRanges;
+
+            // Try to join this range with the previous one, if the current range is a continuation
+            // of the previous. This is possible because the binding infos in the base type are
+            // sorted.
+            if (descriptorRanges.size() >= 2) {
+                D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
+                if (previous.RangeType == range.RangeType &&
+                    previous.BaseShaderRegister + previous.NumDescriptors ==
+                        range.BaseShaderRegister) {
+                    previous.NumDescriptors += range.NumDescriptors;
+                    continue;
+                }
+            }
+
+            descriptorRanges.push_back(range);
+        }
+
+        mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
+        mSamplerAllocator =
+            device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
+    }
+
+    ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+        Device* device,
+        const BindGroupDescriptor* descriptor) {
+        uint32_t viewSizeIncrement = 0;
+        CPUDescriptorHeapAllocation viewAllocation;
+        if (GetCbvUavSrvDescriptorCount() > 0) {
+            DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
+            viewSizeIncrement = mViewAllocator->GetSizeIncrement();
+        }
+
+        Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
+            mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
+
+        if (GetSamplerDescriptorCount() > 0) {
+            Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
+            DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
+                                                       bindGroup.Get(), mSamplerAllocator));
+            bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
+        }
+
+        return bindGroup;
+    }
+
+    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+                                              CPUDescriptorHeapAllocation* viewAllocation) {
+        if (viewAllocation->IsValid()) {
+            mViewAllocator->Deallocate(viewAllocation);
+        }
+
+        mBindGroupAllocator.Deallocate(bindGroup);
+    }
+
+    ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
+        return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
+    }
+
+    uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
+        return mShaderRegisters[bindingIndex];
+    }
+
+    uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
+        return mCbvUavSrvDescriptorCount;
+    }
+
+    uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
+        return mSamplerDescriptorCount;
+    }
+
+    const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges()
+        const {
+        return mCbvUavSrvDescriptorRanges;
+    }
+
+    const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
+        return mSamplerDescriptorRanges;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BindGroupLayoutD3D12.h b/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
new file mode 100644
index 0000000..f16b16b
--- /dev/null
+++ b/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
@@ -0,0 +1,94 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
+#define DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
+
+#include "dawn/native/BindGroupLayout.h"
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class BindGroup;
+    class CPUDescriptorHeapAllocation;
+    class Device;
+    class StagingDescriptorAllocator;
+
+    // A purposefully invalid register space.
+    //
+    // We use the bind group index as the register space, but don't know the bind group index until
+    // pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
+    static constexpr uint32_t kRegisterSpacePlaceholder =
+        D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
+
+    class BindGroupLayout final : public BindGroupLayoutBase {
+      public:
+        static Ref<BindGroupLayout> Create(Device* device,
+                                           const BindGroupLayoutDescriptor* descriptor,
+                                           PipelineCompatibilityToken pipelineCompatibilityToken);
+
+        ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+                                                        const BindGroupDescriptor* descriptor);
+        void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
+
+        // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
+        // dynamic binding indexes.
+        ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
+
+        // The D3D shader register that the Dawn binding index is mapped to by this bind group
+        // layout.
+        uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
+
+        // Counts of descriptors in the descriptor tables.
+        uint32_t GetCbvUavSrvDescriptorCount() const;
+        uint32_t GetSamplerDescriptorCount() const;
+
+        const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
+        const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
+
+      private:
+        BindGroupLayout(Device* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken);
+        ~BindGroupLayout() override = default;
+
+        // Contains the offset into the descriptor heap for the given resource view. Samplers and
+        // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
+        // within each group and tightly packed.
+        //
+        // Dynamic resources are not used here since their descriptors are placed directly in root
+        // parameters.
+        ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
+
+        // Contains the shader register this binding is mapped to.
+        ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
+
+        uint32_t mCbvUavSrvDescriptorCount;
+        uint32_t mSamplerDescriptorCount;
+
+        std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
+        std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
+
+        SlabAllocator<BindGroup> mBindGroupAllocator;
+
+        StagingDescriptorAllocator* mSamplerAllocator = nullptr;
+        StagingDescriptorAllocator* mViewAllocator = nullptr;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
diff --git a/src/dawn/native/d3d12/BufferD3D12.cpp b/src/dawn/native/d3d12/BufferD3D12.cpp
new file mode 100644
index 0000000..27d9991
--- /dev/null
+++ b/src/dawn/native/d3d12/BufferD3D12.cpp
@@ -0,0 +1,493 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BufferD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
+            D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
+
+            if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+                flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
+            }
+
+            return flags;
+        }
+
+        D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
+            D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
+
+            if (usage & wgpu::BufferUsage::CopySrc) {
+                resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
+            }
+            if (usage & wgpu::BufferUsage::CopyDst) {
+                resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+            }
+            if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
+                resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
+            }
+            if (usage & wgpu::BufferUsage::Index) {
+                resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
+            }
+            if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+                resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+            }
+            if (usage & kReadOnlyStorageBuffer) {
+                resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+                                  D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+            }
+            if (usage & wgpu::BufferUsage::Indirect) {
+                resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
+            }
+            if (usage & wgpu::BufferUsage::QueryResolve) {
+                resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+            }
+
+            return resourceState;
+        }
+
+        D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
+            if (allowedUsage & wgpu::BufferUsage::MapRead) {
+                return D3D12_HEAP_TYPE_READBACK;
+            } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
+                return D3D12_HEAP_TYPE_UPLOAD;
+            } else {
+                return D3D12_HEAP_TYPE_DEFAULT;
+            }
+        }
+
+        size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
+            if ((usage & wgpu::BufferUsage::Uniform) != 0) {
+                // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
+                // forbids binding a CBV to an unaligned size. To prevent, one can always safely
+                // align the buffer size to the CBV data alignment as other buffer usages
+                // ignore it (no size check). The validation will still enforce bound checks with
+                // the unaligned size returned by GetSize().
+                // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
+                return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
+            }
+            return 1;
+        }
+    }  // namespace
+
+    // static
+    ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+        DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+        return buffer;
+    }
+
+    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+        : BufferBase(device, descriptor) {
+    }
+
+    MaybeError Buffer::Initialize(bool mappedAtCreation) {
+        // Allocate at least 4 bytes so clamped accesses are always in bounds.
+        uint64_t size = std::max(GetSize(), uint64_t(4u));
+        size_t alignment = D3D12BufferSizeAlignment(GetUsage());
+        if (size > std::numeric_limits<uint64_t>::max() - alignment) {
+            // Alignment would overlow.
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+        }
+        mAllocatedSize = Align(size, alignment);
+
+        D3D12_RESOURCE_DESC resourceDescriptor;
+        resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+        resourceDescriptor.Alignment = 0;
+        resourceDescriptor.Width = mAllocatedSize;
+        resourceDescriptor.Height = 1;
+        resourceDescriptor.DepthOrArraySize = 1;
+        resourceDescriptor.MipLevels = 1;
+        resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+        resourceDescriptor.SampleDesc.Count = 1;
+        resourceDescriptor.SampleDesc.Quality = 0;
+        resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+        // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+        // and robust resource initialization.
+        resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
+
+        auto heapType = D3D12HeapType(GetUsage());
+        auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
+
+        // D3D12 requires buffers on the READBACK heap to have the D3D12_RESOURCE_STATE_COPY_DEST
+        // state
+        if (heapType == D3D12_HEAP_TYPE_READBACK) {
+            bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
+            mFixedResourceState = true;
+            mLastUsage = wgpu::BufferUsage::CopyDst;
+        }
+
+        // D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
+        // state
+        if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
+            bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
+            mFixedResourceState = true;
+            mLastUsage = wgpu::BufferUsage::CopySrc;
+        }
+
+        DAWN_TRY_ASSIGN(
+            mResourceAllocation,
+            ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
+
+        SetLabelImpl();
+
+        // The buffers with mappedAtCreation == true will be initialized in
+        // BufferBase::MapAtCreation().
+        if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+            !mappedAtCreation) {
+            CommandRecordingContext* commandRecordingContext;
+            DAWN_TRY_ASSIGN(commandRecordingContext,
+                            ToBackend(GetDevice())->GetPendingCommandContext());
+
+            DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
+        }
+
+        // Initialize the padding bytes to zero.
+        if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
+            !mappedAtCreation) {
+            uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+            if (paddingBytes > 0) {
+                CommandRecordingContext* commandRecordingContext;
+                DAWN_TRY_ASSIGN(commandRecordingContext,
+                                ToBackend(GetDevice())->GetPendingCommandContext());
+
+                uint32_t clearSize = paddingBytes;
+                uint64_t clearOffset = GetSize();
+                DAWN_TRY(ClearBuffer(commandRecordingContext, 0, clearOffset, clearSize));
+            }
+        }
+
+        return {};
+    }
+
+    Buffer::~Buffer() = default;
+
+    ID3D12Resource* Buffer::GetD3D12Resource() const {
+        return mResourceAllocation.GetD3D12Resource();
+    }
+
+    // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+    // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+    // cause subsequent errors.
+    bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                 D3D12_RESOURCE_BARRIER* barrier,
+                                                 wgpu::BufferUsage newUsage) {
+        // Track the underlying heap to ensure residency.
+        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+        commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+
+        // Return the resource barrier.
+        return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
+    }
+
+    void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                            wgpu::BufferUsage newUsage) {
+        D3D12_RESOURCE_BARRIER barrier;
+
+        if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
+            commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+        }
+    }
+
+    // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+    // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+    // cause subsequent errors.
+    bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                      D3D12_RESOURCE_BARRIER* barrier,
+                                                      wgpu::BufferUsage newUsage) {
+        // Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
+        if (mFixedResourceState) {
+            ASSERT(mLastUsage == newUsage);
+            return false;
+        }
+
+        D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
+        D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
+
+        // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
+        // If one of the usages isn't UAV, then other barriers are used.
+        bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
+                               newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+
+        if (needsUAVBarrier) {
+            barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
+            barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+            barrier->UAV.pResource = GetD3D12Resource();
+
+            mLastUsage = newUsage;
+            return true;
+        }
+
+        // We can skip transitions to already current usages.
+        if (IsSubset(newUsage, mLastUsage)) {
+            return false;
+        }
+
+        mLastUsage = newUsage;
+
+        // The COMMON state represents a state where no write operations can be pending, which makes
+        // it possible to transition to and from some states without synchronizaton (i.e. without an
+        // explicit ResourceBarrier call). A buffer can be implicitly promoted to 1) a single write
+        // state, or 2) multiple read states. A buffer that is accessed within a command list will
+        // always implicitly decay to the COMMON state after the call to ExecuteCommandLists
+        // completes - this is because all buffer writes are guaranteed to be completed before the
+        // next ExecuteCommandLists call executes.
+        // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+        // To track implicit decays, we must record the pending serial on which a transition will
+        // occur. When that buffer is used again, the previously recorded serial must be compared to
+        // the last completed serial to determine if the buffer has implicity decayed to the common
+        // state.
+        const ExecutionSerial pendingCommandSerial =
+            ToBackend(GetDevice())->GetPendingCommandSerial();
+        if (pendingCommandSerial > mLastUsedSerial) {
+            lastState = D3D12_RESOURCE_STATE_COMMON;
+            mLastUsedSerial = pendingCommandSerial;
+        }
+
+        // All possible buffer states used by Dawn are eligible for implicit promotion from COMMON.
+        // These are: COPY_SOURCE, VERTEX_AND_COPY_BUFFER, INDEX_BUFFER, COPY_DEST,
+        // UNORDERED_ACCESS, and INDIRECT_ARGUMENT. Note that for implicit promotion, the
+        // destination state cannot be 1) more than one write state, or 2) both a read and write
+        // state. This goes unchecked here because it should not be allowed through render/compute
+        // pass validation.
+        if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+            return false;
+        }
+
+        // TODO(crbug.com/dawn/1024): The before and after states must be different. Remove this
+        // workaround and use D3D12 states instead of WebGPU usages to manage the tracking of
+        // barrier state.
+        if (lastState == newState) {
+            return false;
+        }
+
+        barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+        barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+        barrier->Transition.pResource = GetD3D12Resource();
+        barrier->Transition.StateBefore = lastState;
+        barrier->Transition.StateAfter = newState;
+        barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+
+        return true;
+    }
+
+    D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
+        return mResourceAllocation.GetGPUPointer();
+    }
+
+    bool Buffer::IsCPUWritableAtCreation() const {
+        // We use a staging buffer for the buffers with mappedAtCreation == true and created on the
+        // READBACK heap because for the buffers on the READBACK heap, the data written on the CPU
+        // side won't be uploaded to GPU. When we enable zero-initialization, the CPU side memory
+        // of the buffer is all written to 0 but not the GPU side memory, so on the next mapping
+        // operation the zeroes get overwritten by whatever was in the GPU memory when the buffer
+        // was created. With a staging buffer, the data on the CPU side will first upload to the
+        // staging buffer, and copied from the staging buffer to the GPU memory of the current
+        // buffer in the unmap() call.
+        // TODO(enga): Handle CPU-visible memory on UMA
+        return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
+    }
+
+    MaybeError Buffer::MapInternal(bool isWrite,
+                                   size_t offset,
+                                   size_t size,
+                                   const char* contextInfo) {
+        // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+        // evicted. This buffer should already have been made resident when it was created.
+        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+        DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
+
+        D3D12_RANGE range = {offset, offset + size};
+        // mMappedData is the pointer to the start of the resource, irrespective of offset.
+        // MSDN says (note the weird use of "never"):
+        //
+        //   When ppData is not NULL, the pointer returned is never offset by any values in
+        //   pReadRange.
+        //
+        // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
+        DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
+
+        if (isWrite) {
+            mWrittenMappedRange = range;
+        }
+
+        return {};
+    }
+
+    MaybeError Buffer::MapAtCreationImpl() {
+        // We will use a staging buffer for MapRead buffers instead so we just clear the staging
+        // buffer and initialize the original buffer by copying the staging buffer to the original
+        // buffer one the first time Unmap() is called.
+        ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
+
+        // The buffers with mappedAtCreation == true will be initialized in
+        // BufferBase::MapAtCreation().
+        DAWN_TRY(MapInternal(true, 0, size_t(GetAllocatedSize()), "D3D12 map at creation"));
+
+        return {};
+    }
+
+    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+        CommandRecordingContext* commandContext;
+        DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
+        DAWN_TRY(EnsureDataInitialized(commandContext));
+
+        return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
+    }
+
+    void Buffer::UnmapImpl() {
+        GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
+        mMappedData = nullptr;
+        mWrittenMappedRange = {0, 0};
+
+        // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
+        // them when they are unmapped.
+        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+        ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
+    }
+
+    void* Buffer::GetMappedPointerImpl() {
+        // The frontend asks that the pointer returned is from the start of the resource
+        // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
+        return mMappedData;
+    }
+
+    void Buffer::DestroyImpl() {
+        if (mMappedData != nullptr) {
+            // If the buffer is currently mapped, unmap without flushing the writes to the GPU
+            // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
+            // which parts to flush, so we set it to an empty range to prevent flushes.
+            mWrittenMappedRange = {0, 0};
+        }
+        BufferBase::DestroyImpl();
+
+        ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
+    }
+
+    bool Buffer::CheckIsResidentForTesting() const {
+        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+        return heap->IsInList() || heap->IsResidencyLocked();
+    }
+
+    bool Buffer::CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const {
+        return mResourceAllocation.GetInfo().mMethod == allocationMethod;
+    }
+
+    MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+        if (!NeedsInitialization()) {
+            return {};
+        }
+
+        DAWN_TRY(InitializeToZero(commandContext));
+        return {};
+    }
+
+    ResultOrError<bool> Buffer::EnsureDataInitializedAsDestination(
+        CommandRecordingContext* commandContext,
+        uint64_t offset,
+        uint64_t size) {
+        if (!NeedsInitialization()) {
+            return {false};
+        }
+
+        if (IsFullBufferRange(offset, size)) {
+            SetIsDataInitialized();
+            return {false};
+        }
+
+        DAWN_TRY(InitializeToZero(commandContext));
+        return {true};
+    }
+
+    MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                          const CopyTextureToBufferCmd* copy) {
+        if (!NeedsInitialization()) {
+            return {};
+        }
+
+        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+            SetIsDataInitialized();
+        } else {
+            DAWN_TRY(InitializeToZero(commandContext));
+        }
+
+        return {};
+    }
+
+    void Buffer::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
+                     GetLabel());
+    }
+
+    MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+        ASSERT(NeedsInitialization());
+
+        // TODO(crbug.com/dawn/484): skip initializing the buffer when it is created on a heap
+        // that has already been zero initialized.
+        DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
+        SetIsDataInitialized();
+        GetDevice()->IncrementLazyClearCountForTesting();
+
+        return {};
+    }
+
+    MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+                                   uint8_t clearValue,
+                                   uint64_t offset,
+                                   uint64_t size) {
+        Device* device = ToBackend(GetDevice());
+        size = size > 0 ? size : GetAllocatedSize();
+
+        // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
+        // changed away, so we can only clear such buffer with buffer mapping.
+        if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
+            DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
+                                 "D3D12 map at clear buffer"));
+            memset(mMappedData, clearValue, size);
+            UnmapImpl();
+        } else if (clearValue == 0u) {
+            DAWN_TRY(device->ClearBufferToZero(commandContext, this, offset, size));
+        } else {
+            // TODO(crbug.com/dawn/852): use ClearUnorderedAccessView*() when the buffer usage
+            // includes STORAGE.
+            DynamicUploader* uploader = device->GetDynamicUploader();
+            UploadHandle uploadHandle;
+            DAWN_TRY_ASSIGN(uploadHandle,
+                            uploader->Allocate(size, device->GetPendingCommandSerial(),
+                                               kCopyBufferToBufferOffsetAlignment));
+
+            memset(uploadHandle.mappedBuffer, clearValue, size);
+
+            device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+                                                uploadHandle.startOffset, this, offset, size);
+        }
+
+        return {};
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BufferD3D12.h b/src/dawn/native/d3d12/BufferD3D12.h
new file mode 100644
index 0000000..253565a
--- /dev/null
+++ b/src/dawn/native/d3d12/BufferD3D12.h
@@ -0,0 +1,91 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BUFFERD3D12_H_
+#define DAWNNATIVE_D3D12_BUFFERD3D12_H_
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class CommandRecordingContext;
+    class Device;
+
+    class Buffer final : public BufferBase {
+      public:
+        static ResultOrError<Ref<Buffer>> Create(Device* device,
+                                                 const BufferDescriptor* descriptor);
+
+        ID3D12Resource* GetD3D12Resource() const;
+        D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
+
+        bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                             D3D12_RESOURCE_BARRIER* barrier,
+                                             wgpu::BufferUsage newUsage);
+        void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                        wgpu::BufferUsage newUsage);
+
+        bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
+        bool CheckIsResidentForTesting() const;
+
+        MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
+        ResultOrError<bool> EnsureDataInitializedAsDestination(
+            CommandRecordingContext* commandContext,
+            uint64_t offset,
+            uint64_t size);
+        MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                      const CopyTextureToBufferCmd* copy);
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+      private:
+        Buffer(Device* device, const BufferDescriptor* descriptor);
+        ~Buffer() override;
+
+        MaybeError Initialize(bool mappedAtCreation);
+        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+        void UnmapImpl() override;
+        void DestroyImpl() override;
+        bool IsCPUWritableAtCreation() const override;
+        virtual MaybeError MapAtCreationImpl() override;
+        void* GetMappedPointerImpl() override;
+
+        MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
+
+        bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                  D3D12_RESOURCE_BARRIER* barrier,
+                                                  wgpu::BufferUsage newUsage);
+
+        MaybeError InitializeToZero(CommandRecordingContext* commandContext);
+        MaybeError ClearBuffer(CommandRecordingContext* commandContext,
+                               uint8_t clearValue,
+                               uint64_t offset = 0,
+                               uint64_t size = 0);
+
+        ResourceHeapAllocation mResourceAllocation;
+        bool mFixedResourceState = false;
+        wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+        ExecutionSerial mLastUsedSerial = std::numeric_limits<ExecutionSerial>::max();
+
+        D3D12_RANGE mWrittenMappedRange = {0, 0};
+        void* mMappedData = nullptr;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_BUFFERD3D12_H_
diff --git a/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
new file mode 100644
index 0000000..617c196
--- /dev/null
+++ b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
@@ -0,0 +1,53 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::d3d12 {
+
+    CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(
+        D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+        uint32_t heapIndex)
+        : mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {
+    }
+
+    D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+        ASSERT(IsValid());
+        return mBaseDescriptor;
+    }
+
+    D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
+        uint32_t sizeIncrementInBytes,
+        uint32_t offsetInDescriptorCount) const {
+        ASSERT(IsValid());
+        D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
+        cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
+        return cpuHandle;
+    }
+
+    uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
+        ASSERT(mHeapIndex >= 0);
+        return mHeapIndex;
+    }
+
+    bool CPUDescriptorHeapAllocation::IsValid() const {
+        return mBaseDescriptor.ptr != 0;
+    }
+
+    void CPUDescriptorHeapAllocation::Invalidate() {
+        mBaseDescriptor = {0};
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
new file mode 100644
index 0000000..997d056
--- /dev/null
+++ b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
@@ -0,0 +1,47 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
+#define DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
+
+#include <cstdint>
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    // Wrapper for a handle into a CPU-only descriptor heap.
+    class CPUDescriptorHeapAllocation {
+      public:
+        CPUDescriptorHeapAllocation() = default;
+        CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
+
+        D3D12_CPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+
+        D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
+                                               uint32_t offsetInDescriptorCount) const;
+        uint32_t GetHeapIndex() const;
+
+        bool IsValid() const;
+
+        void Invalidate();
+
+      private:
+        D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+        uint32_t mHeapIndex = -1;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/src/dawn/native/d3d12/CommandAllocatorManager.cpp b/src/dawn/native/d3d12/CommandAllocatorManager.cpp
new file mode 100644
index 0000000..88ac0b8
--- /dev/null
+++ b/src/dawn/native/d3d12/CommandAllocatorManager.cpp
@@ -0,0 +1,72 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/CommandAllocatorManager.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+
+namespace dawn::native::d3d12 {
+
+    CommandAllocatorManager::CommandAllocatorManager(Device* device)
+        : device(device), mAllocatorCount(0) {
+        mFreeAllocators.set();
+    }
+
+    ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
+        // If there are no free allocators, get the oldest serial in flight and wait on it
+        if (mFreeAllocators.none()) {
+            const ExecutionSerial firstSerial = mInFlightCommandAllocators.FirstSerial();
+            DAWN_TRY(device->WaitForSerial(firstSerial));
+            DAWN_TRY(Tick(firstSerial));
+        }
+
+        ASSERT(mFreeAllocators.any());
+
+        // Get the index of the first free allocator from the bitset
+        unsigned int firstFreeIndex = *(IterateBitSet(mFreeAllocators).begin());
+
+        if (firstFreeIndex >= mAllocatorCount) {
+            ASSERT(firstFreeIndex == mAllocatorCount);
+            mAllocatorCount++;
+            DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateCommandAllocator(
+                                      D3D12_COMMAND_LIST_TYPE_DIRECT,
+                                      IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
+                                  "D3D12 create command allocator"));
+        }
+
+        // Mark the command allocator as used
+        mFreeAllocators.reset(firstFreeIndex);
+
+        // Enqueue the command allocator. It will be scheduled for reset after the next
+        // ExecuteCommandLists
+        mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
+                                           device->GetPendingCommandSerial());
+        return mCommandAllocators[firstFreeIndex].Get();
+    }
+
+    MaybeError CommandAllocatorManager::Tick(ExecutionSerial lastCompletedSerial) {
+        // Reset all command allocators that are no longer in flight
+        for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
+            DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
+            mFreeAllocators.set(it.index);
+        }
+        mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
+        return {};
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CommandAllocatorManager.h b/src/dawn/native/d3d12/CommandAllocatorManager.h
new file mode 100644
index 0000000..1f8cc1e
--- /dev/null
+++ b/src/dawn/native/d3d12/CommandAllocatorManager.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
+#define DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+
+#include <bitset>
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class CommandAllocatorManager {
+      public:
+        CommandAllocatorManager(Device* device);
+
+        // A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
+        // otherwise its commands may be reset before execution has completed on the GPU
+        ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
+        MaybeError Tick(ExecutionSerial lastCompletedSerial);
+
+      private:
+        Device* device;
+
+        // This must be at least 2 because the Device and Queue use separate command allocators
+        static constexpr unsigned int kMaxCommandAllocators = 32;
+        unsigned int mAllocatorCount;
+
+        struct IndexedCommandAllocator {
+            ComPtr<ID3D12CommandAllocator> commandAllocator;
+            unsigned int index;
+        };
+
+        ComPtr<ID3D12CommandAllocator> mCommandAllocators[kMaxCommandAllocators];
+        std::bitset<kMaxCommandAllocators> mFreeAllocators;
+        SerialQueue<ExecutionSerial, IndexedCommandAllocator> mInFlightCommandAllocators;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
diff --git a/src/dawn/native/d3d12/CommandBufferD3D12.cpp b/src/dawn/native/d3d12/CommandBufferD3D12.cpp
new file mode 100644
index 0000000..86022c7
--- /dev/null
+++ b/src/dawn/native/d3d12/CommandBufferD3D12.cpp
@@ -0,0 +1,1676 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/ComputePipelineD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/QuerySetD3D12.h"
+#include "dawn/native/d3d12/RenderPassBuilderD3D12.h"
+#include "dawn/native/d3d12/RenderPipelineD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+
+        DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
+            switch (format) {
+                case wgpu::IndexFormat::Undefined:
+                    return DXGI_FORMAT_UNKNOWN;
+                case wgpu::IndexFormat::Uint16:
+                    return DXGI_FORMAT_R16_UINT;
+                case wgpu::IndexFormat::Uint32:
+                    return DXGI_FORMAT_R32_UINT;
+            }
+        }
+
+        D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
+            switch (type) {
+                case wgpu::QueryType::Occlusion:
+                    return D3D12_QUERY_TYPE_BINARY_OCCLUSION;
+                case wgpu::QueryType::PipelineStatistics:
+                    return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
+                case wgpu::QueryType::Timestamp:
+                    return D3D12_QUERY_TYPE_TIMESTAMP;
+            }
+        }
+
+        bool CanUseCopyResource(const TextureCopy& src,
+                                const TextureCopy& dst,
+                                const Extent3D& copySize) {
+            // Checked by validation
+            ASSERT(src.texture->GetSampleCount() == dst.texture->GetSampleCount());
+            ASSERT(src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()));
+            ASSERT(src.aspect == dst.aspect);
+
+            const Extent3D& srcSize = src.texture->GetSize();
+            const Extent3D& dstSize = dst.texture->GetSize();
+
+            // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
+            // In order to use D3D12's copy resource, the textures must be the same dimensions, and
+            // the copy must be of the entire resource.
+            // TODO(dawn:129): Support 1D textures.
+            return src.aspect == src.texture->GetFormat().aspects &&
+                   src.texture->GetDimension() == dst.texture->GetDimension() &&  //
+                   dst.texture->GetNumMipLevels() == 1 &&                         //
+                   src.texture->GetNumMipLevels() == 1 &&  // A copy command is of a single mip, so
+                                                           // if a resource has more than one, we
+                                                           // definitely cannot use CopyResource.
+                   copySize.width == dstSize.width &&      //
+                   copySize.width == srcSize.width &&      //
+                   copySize.height == dstSize.height &&    //
+                   copySize.height == srcSize.height &&    //
+                   copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers &&  //
+                   copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
+        }
+
+        void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList,
+                                     WriteTimestampCmd* cmd) {
+            QuerySet* querySet = ToBackend(cmd->querySet.Get());
+            ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
+            commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP,
+                                  cmd->queryIndex);
+        }
+
+        void RecordResolveQuerySetCmd(ID3D12GraphicsCommandList* commandList,
+                                      Device* device,
+                                      QuerySet* querySet,
+                                      uint32_t firstQuery,
+                                      uint32_t queryCount,
+                                      Buffer* destination,
+                                      uint64_t destinationOffset) {
+            const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+            auto currentIt = availability.begin() + firstQuery;
+            auto lastIt = availability.begin() + firstQuery + queryCount;
+
+            // Traverse available queries in the range of [firstQuery, firstQuery +  queryCount - 1]
+            while (currentIt != lastIt) {
+                auto firstTrueIt = std::find(currentIt, lastIt, true);
+                // No available query found for resolving
+                if (firstTrueIt == lastIt) {
+                    break;
+                }
+                auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+                // The query index of firstTrueIt where the resolving starts
+                uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+                // The queries count between firstTrueIt and nextFalseIt need to be resolved
+                uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+
+                // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+                uint32_t resolveDestinationOffset =
+                    destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+
+                // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+                commandList->ResolveQueryData(
+                    querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()),
+                    resolveQueryIndex, resolveQueryCount, destination->GetD3D12Resource(),
+                    resolveDestinationOffset);
+
+                // Set current iterator to next false
+                currentIt = nextFalseIt;
+            }
+        }
+
+        void RecordFirstIndexOffset(ID3D12GraphicsCommandList* commandList,
+                                    RenderPipeline* pipeline,
+                                    uint32_t firstVertex,
+                                    uint32_t firstInstance) {
+            const FirstOffsetInfo& firstOffsetInfo = pipeline->GetFirstOffsetInfo();
+            if (!firstOffsetInfo.usesVertexIndex && !firstOffsetInfo.usesInstanceIndex) {
+                return;
+            }
+            std::array<uint32_t, 2> offsets{};
+            uint32_t count = 0;
+            if (firstOffsetInfo.usesVertexIndex) {
+                offsets[firstOffsetInfo.vertexIndexOffset / sizeof(uint32_t)] = firstVertex;
+                ++count;
+            }
+            if (firstOffsetInfo.usesInstanceIndex) {
+                offsets[firstOffsetInfo.instanceIndexOffset / sizeof(uint32_t)] = firstInstance;
+                ++count;
+            }
+            PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+            commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
+                                                       count, offsets.data(), 0);
+        }
+
+        bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
+                                            const TextureCopy& srcCopy,
+                                            const TextureCopy& dstCopy) {
+            // Currently we only need the workaround for an Intel D3D12 driver issue.
+            if (device->IsToggleEnabled(
+                    Toggle::
+                        UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
+                bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
+                ASSERT(
+                    srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
+
+                // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
+                // sizes of depth stencil formats are always no less than 4 bytes.
+                bool isSmallColorFormat =
+                    HasOneBit(srcCopy.aspect) &&
+                    srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
+                if (copyToLesserLevel && isSmallColorFormat) {
+                    return true;
+                }
+            }
+
+            return false;
+        }
+
+        MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+                                                        const TextureCopy& srcCopy,
+                                                        const TextureCopy& dstCopy,
+                                                        const Extent3D& copySize) {
+            ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
+            ASSERT(srcCopy.aspect == dstCopy.aspect);
+            dawn::native::Format format = srcCopy.texture->GetFormat();
+            const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+            ASSERT(copySize.width % blockInfo.width == 0);
+            uint32_t widthInBlocks = copySize.width / blockInfo.width;
+            ASSERT(copySize.height % blockInfo.height == 0);
+            uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+            // Create tempBuffer
+            uint32_t bytesPerRow =
+                Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
+            uint32_t rowsPerImage = heightInBlocks;
+
+            // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
+            // need to set mappedAtCreation to be true.
+            auto tempBufferSize =
+                ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
+
+            BufferDescriptor tempBufferDescriptor;
+            tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+            tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
+            Device* device = ToBackend(srcCopy.texture->GetDevice());
+            Ref<BufferBase> tempBufferBase;
+            DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+            Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
+
+            BufferCopy bufferCopy;
+            bufferCopy.buffer = tempBuffer;
+            bufferCopy.offset = 0;
+            bufferCopy.bytesPerRow = bytesPerRow;
+            bufferCopy.rowsPerImage = rowsPerImage;
+
+            // Copy from source texture into tempBuffer
+            tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
+            RecordBufferTextureCopy(BufferTextureCopyDirection::T2B,
+                                    recordingContext->GetCommandList(), bufferCopy, srcCopy,
+                                    copySize);
+
+            // Copy from tempBuffer into destination texture
+            tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
+            RecordBufferTextureCopy(BufferTextureCopyDirection::B2T,
+                                    recordingContext->GetCommandList(), bufferCopy, dstCopy,
+                                    copySize);
+
+            // Save tempBuffer into recordingContext
+            recordingContext->AddToTempBuffers(std::move(tempBuffer));
+
+            return {};
+        }
+
+        void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
+                                            ComputePipeline* pipeline,
+                                            DispatchCmd* dispatch) {
+            if (!pipeline->UsesNumWorkgroups()) {
+                return;
+            }
+
+            PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+            commandList->SetComputeRoot32BitConstants(layout->GetNumWorkgroupsParameterIndex(), 3,
+                                                      dispatch, 0);
+        }
+
+        // Records the necessary barriers for a synchronization scope using the resource usage
+        // data pre-computed in the frontend. Also performs lazy initialization if required.
+        // Returns whether any UAV are used in the synchronization scope.
+        bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
+                                            const SyncScopeResourceUsage& usages) {
+            std::vector<D3D12_RESOURCE_BARRIER> barriers;
+
+            ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+            wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
+
+            for (size_t i = 0; i < usages.buffers.size(); ++i) {
+                Buffer* buffer = ToBackend(usages.buffers[i]);
+
+                // TODO(crbug.com/dawn/852): clear storage buffers with
+                // ClearUnorderedAccessView*().
+                buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
+
+                D3D12_RESOURCE_BARRIER barrier;
+                if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+                                                            usages.bufferUsages[i])) {
+                    barriers.push_back(barrier);
+                }
+                bufferUsages |= usages.bufferUsages[i];
+            }
+
+            wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
+
+            for (size_t i = 0; i < usages.textures.size(); ++i) {
+                Texture* texture = ToBackend(usages.textures[i]);
+
+                // Clear subresources that are not render attachments. Render attachments will be
+                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+                // subresource has not been initialized before the render pass.
+                usages.textureUsages[i].Iterate(
+                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                            texture->EnsureSubresourceContentInitialized(commandContext, range);
+                        }
+                        textureUsages |= usage;
+                    });
+
+                ToBackend(usages.textures[i])
+                    ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
+                                                             usages.textureUsages[i]);
+            }
+
+            if (barriers.size()) {
+                commandList->ResourceBarrier(barriers.size(), barriers.data());
+            }
+
+            return (bufferUsages & wgpu::BufferUsage::Storage ||
+                    textureUsages & wgpu::TextureUsage::StorageBinding);
+        }
+
+    }  // anonymous namespace
+
+    class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
+        using Base = BindGroupTrackerBase;
+
+      public:
+        BindGroupStateTracker(Device* device)
+            : BindGroupTrackerBase(),
+              mDevice(device),
+              mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
+              mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {
+        }
+
+        void SetInComputePass(bool inCompute_) {
+            mInCompute = inCompute_;
+        }
+
+        MaybeError Apply(CommandRecordingContext* commandContext) {
+            BeforeApply();
+
+            ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+            UpdateRootSignatureIfNecessary(commandList);
+
+            // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
+            // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
+            // at any given time. This means that when we switch heaps, all other currently bound
+            // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
+            // the signal to change the bounded heaps.
+            // Re-populating all bindgroups after the last one fails causes duplicated allocations
+            // to occur on overflow.
+            bool didCreateBindGroupViews = true;
+            bool didCreateBindGroupSamplers = true;
+            for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
+                BindGroup* group = ToBackend(mBindGroups[index]);
+                didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
+                didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
+                if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
+                    break;
+                }
+            }
+
+            if (!didCreateBindGroupViews || !didCreateBindGroupSamplers) {
+                if (!didCreateBindGroupViews) {
+                    DAWN_TRY(mViewAllocator->AllocateAndSwitchShaderVisibleHeap());
+                }
+
+                if (!didCreateBindGroupSamplers) {
+                    DAWN_TRY(mSamplerAllocator->AllocateAndSwitchShaderVisibleHeap());
+                }
+
+                mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
+                mDirtyBindGroups |= mBindGroupLayoutsMask;
+
+                // Must be called before applying the bindgroups.
+                SetID3D12DescriptorHeaps(commandList);
+
+                for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
+                    BindGroup* group = ToBackend(mBindGroups[index]);
+                    didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
+                    didCreateBindGroupSamplers =
+                        group->PopulateSamplers(mDevice, mSamplerAllocator);
+                    ASSERT(didCreateBindGroupViews);
+                    ASSERT(didCreateBindGroupSamplers);
+                }
+            }
+
+            for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+                BindGroup* group = ToBackend(mBindGroups[index]);
+                ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
+                               mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
+            }
+
+            AfterApply();
+
+            return {};
+        }
+
+        void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
+            ASSERT(commandList != nullptr);
+            std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps = {
+                mViewAllocator->GetShaderVisibleHeap(), mSamplerAllocator->GetShaderVisibleHeap()};
+            ASSERT(descriptorHeaps[0] != nullptr);
+            ASSERT(descriptorHeaps[1] != nullptr);
+            commandList->SetDescriptorHeaps(descriptorHeaps.size(), descriptorHeaps.data());
+
+            // Descriptor table state is undefined at the beginning of a command list and after
+            // descriptor heaps are changed on a command list. Invalidate the root sampler tables to
+            // reset the root descriptor table for samplers, otherwise the shader cannot access the
+            // descriptor heaps.
+            mBoundRootSamplerTables = {};
+        }
+
+      private:
+        void UpdateRootSignatureIfNecessary(ID3D12GraphicsCommandList* commandList) {
+            if (mLastAppliedPipelineLayout != mPipelineLayout) {
+                if (mInCompute) {
+                    commandList->SetComputeRootSignature(
+                        ToBackend(mPipelineLayout)->GetRootSignature());
+                } else {
+                    commandList->SetGraphicsRootSignature(
+                        ToBackend(mPipelineLayout)->GetRootSignature());
+                }
+                // Invalidate the root sampler tables previously set in the root signature.
+                mBoundRootSamplerTables = {};
+            }
+        }
+
+        void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
+                            const PipelineLayout* pipelineLayout,
+                            BindGroupIndex index,
+                            BindGroup* group,
+                            uint32_t dynamicOffsetCountIn,
+                            const uint64_t* dynamicOffsetsIn) {
+            ityp::span<BindingIndex, const uint64_t> dynamicOffsets(
+                dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
+            ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
+
+            // Usually, the application won't set the same offsets many times,
+            // so always try to apply dynamic offsets even if the offsets stay the same
+            if (dynamicOffsets.size() != BindingIndex(0)) {
+                // Update dynamic offsets.
+                // Dynamic buffer bindings are packed at the beginning of the layout.
+                for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
+                     ++bindingIndex) {
+                    const BindingInfo& bindingInfo =
+                        group->GetLayout()->GetBindingInfo(bindingIndex);
+                    if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+                        // Skip dynamic buffers that are not visible. D3D12 does not have None
+                        // visibility.
+                        continue;
+                    }
+
+                    uint32_t parameterIndex =
+                        pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
+                    BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+
+                    // Calculate buffer locations that root descriptors links to. The location
+                    // is (base buffer location + initial offset + dynamic offset)
+                    uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
+                    uint64_t offset = binding.offset + dynamicOffset;
+                    D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
+                        ToBackend(binding.buffer)->GetVA() + offset;
+
+                    ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+                    switch (bindingInfo.buffer.type) {
+                        case wgpu::BufferBindingType::Uniform:
+                            if (mInCompute) {
+                                commandList->SetComputeRootConstantBufferView(parameterIndex,
+                                                                              bufferLocation);
+                            } else {
+                                commandList->SetGraphicsRootConstantBufferView(parameterIndex,
+                                                                               bufferLocation);
+                            }
+                            break;
+                        case wgpu::BufferBindingType::Storage:
+                        case kInternalStorageBufferBinding:
+                            if (mInCompute) {
+                                commandList->SetComputeRootUnorderedAccessView(parameterIndex,
+                                                                               bufferLocation);
+                            } else {
+                                commandList->SetGraphicsRootUnorderedAccessView(parameterIndex,
+                                                                                bufferLocation);
+                            }
+                            break;
+                        case wgpu::BufferBindingType::ReadOnlyStorage:
+                            if (mInCompute) {
+                                commandList->SetComputeRootShaderResourceView(parameterIndex,
+                                                                              bufferLocation);
+                            } else {
+                                commandList->SetGraphicsRootShaderResourceView(parameterIndex,
+                                                                               bufferLocation);
+                            }
+                            break;
+                        case wgpu::BufferBindingType::Undefined:
+                            UNREACHABLE();
+                    }
+                }
+            }
+
+            // It's not necessary to update descriptor tables if only the dynamic offset changed.
+            if (!mDirtyBindGroups[index]) {
+                return;
+            }
+
+            const uint32_t cbvUavSrvCount =
+                ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
+            const uint32_t samplerCount =
+                ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
+
+            if (cbvUavSrvCount > 0) {
+                uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
+                const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseViewDescriptor();
+                if (mInCompute) {
+                    commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
+                } else {
+                    commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
+                }
+            }
+
+            if (samplerCount > 0) {
+                uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
+                const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
+                    group->GetBaseSamplerDescriptor();
+                // Check if the group requires its sampler table to be set in the pipeline.
+                // This because sampler heap allocations could be cached and use the same table.
+                if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
+                    if (mInCompute) {
+                        commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
+                    } else {
+                        commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
+                    }
+
+                    mBoundRootSamplerTables[index] = baseDescriptor;
+                }
+            }
+
+            const auto& dynamicStorageBufferLengths = group->GetDynamicStorageBufferLengths();
+            if (dynamicStorageBufferLengths.size() != 0) {
+                uint32_t parameterIndex =
+                    pipelineLayout->GetDynamicStorageBufferLengthsParameterIndex();
+                uint32_t firstRegisterOffset =
+                    pipelineLayout->GetDynamicStorageBufferLengthInfo()[index].firstRegisterOffset;
+
+                if (mInCompute) {
+                    commandList->SetComputeRoot32BitConstants(
+                        parameterIndex, dynamicStorageBufferLengths.size(),
+                        dynamicStorageBufferLengths.data(), firstRegisterOffset);
+                } else {
+                    commandList->SetGraphicsRoot32BitConstants(
+                        parameterIndex, dynamicStorageBufferLengths.size(),
+                        dynamicStorageBufferLengths.data(), firstRegisterOffset);
+                }
+            }
+        }
+
+        Device* mDevice;
+
+        bool mInCompute = false;
+
+        ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
+            mBoundRootSamplerTables = {};
+
+        ShaderVisibleDescriptorAllocator* mViewAllocator;
+        ShaderVisibleDescriptorAllocator* mSamplerAllocator;
+    };
+
+    namespace {
+        class VertexBufferTracker {
+          public:
+            void OnSetVertexBuffer(VertexBufferSlot slot,
+                                   Buffer* buffer,
+                                   uint64_t offset,
+                                   uint64_t size) {
+                mStartSlot = std::min(mStartSlot, slot);
+                mEndSlot = std::max(mEndSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+
+                auto* d3d12BufferView = &mD3D12BufferViews[slot];
+                d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
+                d3d12BufferView->SizeInBytes = size;
+                // The bufferView stride is set based on the vertex state before a draw.
+            }
+
+            void Apply(ID3D12GraphicsCommandList* commandList,
+                       const RenderPipeline* renderPipeline) {
+                ASSERT(renderPipeline != nullptr);
+
+                VertexBufferSlot startSlot = mStartSlot;
+                VertexBufferSlot endSlot = mEndSlot;
+
+                // If the vertex state has changed, we need to update the StrideInBytes
+                // for the D3D12 buffer views. We also need to extend the dirty range to
+                // touch all these slots because the stride may have changed.
+                if (mLastAppliedRenderPipeline != renderPipeline) {
+                    mLastAppliedRenderPipeline = renderPipeline;
+
+                    for (VertexBufferSlot slot :
+                         IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+                        startSlot = std::min(startSlot, slot);
+                        endSlot = std::max(endSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+                        mD3D12BufferViews[slot].StrideInBytes =
+                            renderPipeline->GetVertexBuffer(slot).arrayStride;
+                    }
+                }
+
+                if (endSlot <= startSlot) {
+                    return;
+                }
+
+                // mD3D12BufferViews is kept up to date with the most recent data passed
+                // to SetVertexBuffer. This makes it correct to only track the start
+                // and end of the dirty range. When Apply is called,
+                // we will at worst set non-dirty vertex buffers in duplicate.
+                commandList->IASetVertexBuffers(static_cast<uint8_t>(startSlot),
+                                                static_cast<uint8_t>(ityp::Sub(endSlot, startSlot)),
+                                                &mD3D12BufferViews[startSlot]);
+
+                mStartSlot = VertexBufferSlot(kMaxVertexBuffers);
+                mEndSlot = VertexBufferSlot(uint8_t(0));
+            }
+
+          private:
+            // startSlot and endSlot indicate the range of dirty vertex buffers.
+            // If there are multiple calls to SetVertexBuffer, the start and end
+            // represent the union of the dirty ranges (the union may have non-dirty
+            // data in the middle of the range).
+            const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
+            VertexBufferSlot mStartSlot{kMaxVertexBuffers};
+            VertexBufferSlot mEndSlot{uint8_t(0)};
+            ityp::array<VertexBufferSlot, D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers>
+                mD3D12BufferViews = {};
+        };
+
+        void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
+                                           BeginRenderPassCmd* renderPass) {
+            ASSERT(renderPass != nullptr);
+
+            for (ColorAttachmentIndex i :
+                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                TextureViewBase* resolveTarget =
+                    renderPass->colorAttachments[i].resolveTarget.Get();
+                if (resolveTarget == nullptr) {
+                    continue;
+                }
+
+                TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
+                Texture* colorTexture = ToBackend(colorView->GetTexture());
+                Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
+
+                // Transition the usages of the color attachment and resolve target.
+                colorTexture->TrackUsageAndTransitionNow(commandContext,
+                                                         D3D12_RESOURCE_STATE_RESOLVE_SOURCE,
+                                                         colorView->GetSubresourceRange());
+                resolveTexture->TrackUsageAndTransitionNow(commandContext,
+                                                           D3D12_RESOURCE_STATE_RESOLVE_DEST,
+                                                           resolveTarget->GetSubresourceRange());
+
+                // Do MSAA resolve with ResolveSubResource().
+                ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
+                ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
+                const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
+                    resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(),
+                    Aspect::Color);
+                constexpr uint32_t kColorTextureSubresourceIndex = 0;
+                commandContext->GetCommandList()->ResolveSubresource(
+                    resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
+                    kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
+            }
+        }
+
+    }  // anonymous namespace
+
+    // static
+    Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+                                             const CommandBufferDescriptor* descriptor) {
+        return AcquireRef(new CommandBuffer(encoder, descriptor));
+    }
+
+    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+        : CommandBufferBase(encoder, descriptor) {
+    }
+
+    MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
+        Device* device = ToBackend(GetDevice());
+        BindGroupStateTracker bindingTracker(device);
+
+        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+        // Make sure we use the correct descriptors for this command list. Could be done once per
+        // actual command list but here is ok because there should be few command buffers.
+        bindingTracker.SetID3D12DescriptorHeaps(commandList);
+
+        size_t nextComputePassNumber = 0;
+        size_t nextRenderPassNumber = 0;
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::BeginComputePass: {
+                    mCommands.NextCommand<BeginComputePassCmd>();
+
+                    bindingTracker.SetInComputePass(true);
+                    DAWN_TRY(RecordComputePass(
+                        commandContext, &bindingTracker,
+                        GetResourceUsages().computePasses[nextComputePassNumber]));
+
+                    nextComputePassNumber++;
+                    break;
+                }
+
+                case Command::BeginRenderPass: {
+                    BeginRenderPassCmd* beginRenderPassCmd =
+                        mCommands.NextCommand<BeginRenderPassCmd>();
+
+                    const bool passHasUAV = TransitionAndClearForSyncScope(
+                        commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
+                    bindingTracker.SetInComputePass(false);
+
+                    LazyClearRenderPassAttachments(beginRenderPassCmd);
+                    DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
+                                              passHasUAV));
+
+                    nextRenderPassNumber++;
+                    break;
+                }
+
+                case Command::CopyBufferToBuffer: {
+                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                    if (copy->size == 0) {
+                        // Skip no-op copies.
+                        break;
+                    }
+                    Buffer* srcBuffer = ToBackend(copy->source.Get());
+                    Buffer* dstBuffer = ToBackend(copy->destination.Get());
+
+                    DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
+                    bool cleared;
+                    DAWN_TRY_ASSIGN(cleared,
+                                    dstBuffer->EnsureDataInitializedAsDestination(
+                                        commandContext, copy->destinationOffset, copy->size));
+                    DAWN_UNUSED(cleared);
+
+                    srcBuffer->TrackUsageAndTransitionNow(commandContext,
+                                                          wgpu::BufferUsage::CopySrc);
+                    dstBuffer->TrackUsageAndTransitionNow(commandContext,
+                                                          wgpu::BufferUsage::CopyDst);
+
+                    commandList->CopyBufferRegion(
+                        dstBuffer->GetD3D12Resource(), copy->destinationOffset,
+                        srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
+                    break;
+                }
+
+                case Command::CopyBufferToTexture: {
+                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    Buffer* buffer = ToBackend(copy->source.buffer.Get());
+                    Texture* texture = ToBackend(copy->destination.texture.Get());
+
+                    DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
+
+                    SubresourceRange subresources =
+                        GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+                    if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
+                                                      copy->destination.mipLevel)) {
+                        texture->SetIsSubresourceContentInitialized(true, subresources);
+                    } else {
+                        texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+                    }
+
+                    buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+                    texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
+                                                        subresources);
+
+                    RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, commandList,
+                                            copy->source, copy->destination, copy->copySize);
+
+                    break;
+                }
+
+                case Command::CopyTextureToBuffer: {
+                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    Texture* texture = ToBackend(copy->source.texture.Get());
+                    Buffer* buffer = ToBackend(copy->destination.buffer.Get());
+
+                    DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
+
+                    SubresourceRange subresources =
+                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
+                    texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+
+                    texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+                                                        subresources);
+                    buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+                    RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, commandList,
+                                            copy->destination, copy->source, copy->copySize);
+
+                    break;
+                }
+
+                case Command::CopyTextureToTexture: {
+                    CopyTextureToTextureCmd* copy =
+                        mCommands.NextCommand<CopyTextureToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    Texture* source = ToBackend(copy->source.texture.Get());
+                    Texture* destination = ToBackend(copy->destination.texture.Get());
+
+                    SubresourceRange srcRange =
+                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+                    SubresourceRange dstRange =
+                        GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+                    source->EnsureSubresourceContentInitialized(commandContext, srcRange);
+                    if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
+                                                      copy->destination.mipLevel)) {
+                        destination->SetIsSubresourceContentInitialized(true, dstRange);
+                    } else {
+                        destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
+                    }
+
+                    if (copy->source.texture.Get() == copy->destination.texture.Get() &&
+                        copy->source.mipLevel == copy->destination.mipLevel) {
+                        // When there are overlapped subresources, the layout of the overlapped
+                        // subresources should all be COMMON instead of what we set now. Currently
+                        // it is not allowed to copy with overlapped subresources, but we still
+                        // add the ASSERT here as a reminder for this possible misuse.
+                        ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
+                                                  copy->copySize.depthOrArrayLayers));
+                    }
+                    source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+                                                       srcRange);
+                    destination->TrackUsageAndTransitionNow(commandContext,
+                                                            wgpu::TextureUsage::CopyDst, dstRange);
+
+                    ASSERT(srcRange.aspects == dstRange.aspects);
+                    if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source,
+                                                       copy->destination)) {
+                        DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
+                            commandContext, copy->source, copy->destination, copy->copySize));
+                        break;
+                    }
+
+                    if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
+                        commandList->CopyResource(destination->GetD3D12Resource(),
+                                                  source->GetD3D12Resource());
+                    } else if (source->GetDimension() == wgpu::TextureDimension::e3D &&
+                               destination->GetDimension() == wgpu::TextureDimension::e3D) {
+                        for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+                            D3D12_TEXTURE_COPY_LOCATION srcLocation =
+                                ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel,
+                                                                     0, aspect);
+                            D3D12_TEXTURE_COPY_LOCATION dstLocation =
+                                ComputeTextureCopyLocationForTexture(
+                                    destination, copy->destination.mipLevel, 0, aspect);
+
+                            D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
+                                copy->source.origin, copy->copySize);
+
+                            commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
+                                                           copy->destination.origin.y,
+                                                           copy->destination.origin.z, &srcLocation,
+                                                           &sourceRegion);
+                        }
+                    } else {
+                        const dawn::native::Extent3D copyExtentOneSlice = {
+                            copy->copySize.width, copy->copySize.height, 1u};
+
+                        for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+                            for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+                                uint32_t sourceLayer = 0;
+                                uint32_t sourceZ = 0;
+                                switch (source->GetDimension()) {
+                                    case wgpu::TextureDimension::e1D:
+                                        ASSERT(copy->source.origin.z == 0);
+                                        break;
+                                    case wgpu::TextureDimension::e2D:
+                                        sourceLayer = copy->source.origin.z + z;
+                                        break;
+                                    case wgpu::TextureDimension::e3D:
+                                        sourceZ = copy->source.origin.z + z;
+                                        break;
+                                }
+
+                                uint32_t destinationLayer = 0;
+                                uint32_t destinationZ = 0;
+                                switch (destination->GetDimension()) {
+                                    case wgpu::TextureDimension::e1D:
+                                        ASSERT(copy->destination.origin.z == 0);
+                                        break;
+                                    case wgpu::TextureDimension::e2D:
+                                        destinationLayer = copy->destination.origin.z + z;
+                                        break;
+                                    case wgpu::TextureDimension::e3D:
+                                        destinationZ = copy->destination.origin.z + z;
+                                        break;
+                                }
+                                D3D12_TEXTURE_COPY_LOCATION srcLocation =
+                                    ComputeTextureCopyLocationForTexture(
+                                        source, copy->source.mipLevel, sourceLayer, aspect);
+
+                                D3D12_TEXTURE_COPY_LOCATION dstLocation =
+                                    ComputeTextureCopyLocationForTexture(destination,
+                                                                         copy->destination.mipLevel,
+                                                                         destinationLayer, aspect);
+
+                                Origin3D sourceOriginInSubresource = copy->source.origin;
+                                sourceOriginInSubresource.z = sourceZ;
+                                D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
+                                    sourceOriginInSubresource, copyExtentOneSlice);
+
+                                commandList->CopyTextureRegion(
+                                    &dstLocation, copy->destination.origin.x,
+                                    copy->destination.origin.y, destinationZ, &srcLocation,
+                                    &sourceRegion);
+                            }
+                        }
+                    }
+                    break;
+                }
+
+                case Command::ClearBuffer: {
+                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                    if (cmd->size == 0) {
+                        // Skip no-op fills.
+                        break;
+                    }
+                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+
+                    bool clearedToZero;
+                    DAWN_TRY_ASSIGN(clearedToZero, dstBuffer->EnsureDataInitializedAsDestination(
+                                                       commandContext, cmd->offset, cmd->size));
+
+                    if (!clearedToZero) {
+                        DAWN_TRY(device->ClearBufferToZero(commandContext, cmd->buffer.Get(),
+                                                           cmd->offset, cmd->size));
+                    }
+
+                    break;
+                }
+
+                case Command::ResolveQuerySet: {
+                    ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                    uint32_t firstQuery = cmd->firstQuery;
+                    uint32_t queryCount = cmd->queryCount;
+                    Buffer* destination = ToBackend(cmd->destination.Get());
+                    uint64_t destinationOffset = cmd->destinationOffset;
+
+                    bool cleared;
+                    DAWN_TRY_ASSIGN(cleared, destination->EnsureDataInitializedAsDestination(
+                                                 commandContext, destinationOffset,
+                                                 queryCount * sizeof(uint64_t)));
+                    DAWN_UNUSED(cleared);
+
+                    // Resolving unavailable queries is undefined behaviour on D3D12, we only can
+                    // resolve the available part of sparse queries. In order to resolve the
+                    // unavailables as 0s, we need to clear the resolving region of the destination
+                    // buffer to 0s.
+                    auto startIt = querySet->GetQueryAvailability().begin() + firstQuery;
+                    auto endIt = querySet->GetQueryAvailability().begin() + firstQuery + queryCount;
+                    bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+                    if (hasUnavailableQueries) {
+                        DAWN_TRY(device->ClearBufferToZero(commandContext, destination,
+                                                           destinationOffset,
+                                                           queryCount * sizeof(uint64_t)));
+                    }
+
+                    destination->TrackUsageAndTransitionNow(commandContext,
+                                                            wgpu::BufferUsage::QueryResolve);
+
+                    RecordResolveQuerySetCmd(commandList, device, querySet, firstQuery, queryCount,
+                                             destination, destinationOffset);
+
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                    RecordWriteTimestampCmd(commandList, cmd);
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        // PIX color is 1 byte per channel in ARGB format
+                        constexpr uint64_t kPIXBlackColor = 0xff000000;
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+                    }
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    mCommands.NextCommand<PopDebugGroupCmd>();
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixEndEventOnCommandList(commandList);
+                    }
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        // PIX color is 1 byte per channel in ARGB format
+                        constexpr uint64_t kPIXBlackColor = 0xff000000;
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+                    }
+                    break;
+                }
+
+                case Command::WriteBuffer: {
+                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                    const uint64_t offset = write->offset;
+                    const uint64_t size = write->size;
+                    if (size == 0) {
+                        continue;
+                    }
+
+                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                    uint8_t* data = mCommands.NextData<uint8_t>(size);
+                    Device* device = ToBackend(GetDevice());
+
+                    UploadHandle uploadHandle;
+                    DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                                      size, device->GetPendingCommandSerial(),
+                                                      kCopyBufferToBufferOffsetAlignment));
+                    ASSERT(uploadHandle.mappedBuffer != nullptr);
+                    memcpy(uploadHandle.mappedBuffer, data, size);
+
+                    bool cleared;
+                    DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+                                                 commandContext, offset, size));
+                    DAWN_UNUSED(cleared);
+                    dstBuffer->TrackUsageAndTransitionNow(commandContext,
+                                                          wgpu::BufferUsage::CopyDst);
+                    commandList->CopyBufferRegion(
+                        dstBuffer->GetD3D12Resource(), offset,
+                        ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+                        uploadHandle.startOffset, size);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
+                                                BindGroupStateTracker* bindingTracker,
+                                                const ComputePassResourceUsage& resourceUsages) {
+        uint64_t currentDispatch = 0;
+        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+        Command type;
+        ComputePipeline* lastPipeline = nullptr;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::Dispatch: {
+                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+                    // Skip noop dispatches, it can cause D3D12 warning from validation layers and
+                    // leads to device lost.
+                    if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
+                        break;
+                    }
+
+                    TransitionAndClearForSyncScope(commandContext,
+                                                   resourceUsages.dispatchUsages[currentDispatch]);
+                    DAWN_TRY(bindingTracker->Apply(commandContext));
+
+                    RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
+                    commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
+                    currentDispatch++;
+                    break;
+                }
+
+                case Command::DispatchIndirect: {
+                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+
+                    TransitionAndClearForSyncScope(commandContext,
+                                                   resourceUsages.dispatchUsages[currentDispatch]);
+                    DAWN_TRY(bindingTracker->Apply(commandContext));
+
+                    ComPtr<ID3D12CommandSignature> signature =
+                        lastPipeline->GetDispatchIndirectCommandSignature();
+                    commandList->ExecuteIndirect(
+                        signature.Get(), 1, ToBackend(dispatch->indirectBuffer)->GetD3D12Resource(),
+                        dispatch->indirectOffset, nullptr, 0);
+                    currentDispatch++;
+                    break;
+                }
+
+                case Command::EndComputePass: {
+                    mCommands.NextCommand<EndComputePassCmd>();
+                    return {};
+                }
+
+                case Command::SetComputePipeline: {
+                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                    ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                    commandList->SetPipelineState(pipeline->GetPipelineState());
+
+                    bindingTracker->OnSetPipeline(pipeline);
+                    lastPipeline = pipeline;
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+                    BindGroup* group = ToBackend(cmd->group.Get());
+                    uint32_t* dynamicOffsets = nullptr;
+
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+
+                    bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+                                                   dynamicOffsets);
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        // PIX color is 1 byte per channel in ARGB format
+                        constexpr uint64_t kPIXBlackColor = 0xff000000;
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+                    }
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    mCommands.NextCommand<PopDebugGroupCmd>();
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixEndEventOnCommandList(commandList);
+                    }
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        // PIX color is 1 byte per channel in ARGB format
+                        constexpr uint64_t kPIXBlackColor = 0xff000000;
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+                    }
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                    RecordWriteTimestampCmd(commandList, cmd);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
+                                              BeginRenderPassCmd* renderPass,
+                                              RenderPassBuilder* renderPassBuilder) {
+        Device* device = ToBackend(GetDevice());
+
+        CPUDescriptorHeapAllocation nullRTVAllocation;
+        D3D12_CPU_DESCRIPTOR_HANDLE nullRTV;
+
+        const auto& colorAttachmentsMaskBitSet =
+            renderPass->attachmentState->GetColorAttachmentsMask();
+        for (ColorAttachmentIndex i(uint8_t(0)); i < ColorAttachmentIndex(kMaxColorAttachments);
+             i++) {
+            if (colorAttachmentsMaskBitSet.test(i)) {
+                RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
+                TextureView* view = ToBackend(attachmentInfo.view.Get());
+
+                // Set view attachment.
+                CPUDescriptorHeapAllocation rtvAllocation;
+                DAWN_TRY_ASSIGN(
+                    rtvAllocation,
+                    device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+
+                const D3D12_RENDER_TARGET_VIEW_DESC viewDesc = view->GetRTVDescriptor();
+                const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
+                    rtvAllocation.GetBaseDescriptor();
+
+                device->GetD3D12Device()->CreateRenderTargetView(
+                    ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
+
+                renderPassBuilder->SetRenderTargetView(i, baseDescriptor, false);
+
+                // Set color load operation.
+                renderPassBuilder->SetRenderTargetBeginningAccess(
+                    i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
+
+                // Set color store operation.
+                if (attachmentInfo.resolveTarget != nullptr) {
+                    TextureView* resolveDestinationView =
+                        ToBackend(attachmentInfo.resolveTarget.Get());
+                    Texture* resolveDestinationTexture =
+                        ToBackend(resolveDestinationView->GetTexture());
+
+                    resolveDestinationTexture->TrackUsageAndTransitionNow(
+                        commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
+                        resolveDestinationView->GetSubresourceRange());
+
+                    renderPassBuilder->SetRenderTargetEndingAccessResolve(
+                        i, attachmentInfo.storeOp, view, resolveDestinationView);
+                } else {
+                    renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
+                }
+            } else {
+                if (!nullRTVAllocation.IsValid()) {
+                    DAWN_TRY_ASSIGN(
+                        nullRTVAllocation,
+                        device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+                    nullRTV = nullRTVAllocation.GetBaseDescriptor();
+                    D3D12_RENDER_TARGET_VIEW_DESC nullRTVDesc;
+                    nullRTVDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+                    nullRTVDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D;
+                    nullRTVDesc.Texture2D.MipSlice = 0;
+                    nullRTVDesc.Texture2D.PlaneSlice = 0;
+                    device->GetD3D12Device()->CreateRenderTargetView(nullptr, &nullRTVDesc,
+                                                                     nullRTV);
+                }
+
+                renderPassBuilder->SetRenderTargetView(i, nullRTV, true);
+            }
+        }
+
+        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+            RenderPassDepthStencilAttachmentInfo& attachmentInfo =
+                renderPass->depthStencilAttachment;
+            TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
+
+            // Set depth attachment.
+            CPUDescriptorHeapAllocation dsvAllocation;
+            DAWN_TRY_ASSIGN(
+                dsvAllocation,
+                device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+
+            const D3D12_DEPTH_STENCIL_VIEW_DESC viewDesc = view->GetDSVDescriptor(
+                attachmentInfo.depthReadOnly, attachmentInfo.stencilReadOnly);
+            const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvAllocation.GetBaseDescriptor();
+
+            device->GetD3D12Device()->CreateDepthStencilView(
+                ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
+
+            renderPassBuilder->SetDepthStencilView(baseDescriptor);
+
+            const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
+            const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
+
+            // Set depth/stencil load operations.
+            if (hasDepth) {
+                renderPassBuilder->SetDepthAccess(
+                    attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+                    attachmentInfo.clearDepth, view->GetD3D12Format());
+            } else {
+                renderPassBuilder->SetDepthNoAccess();
+            }
+
+            if (hasStencil) {
+                renderPassBuilder->SetStencilAccess(
+                    attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+                    attachmentInfo.clearStencil, view->GetD3D12Format());
+            } else {
+                renderPassBuilder->SetStencilNoAccess();
+            }
+
+        } else {
+            renderPassBuilder->SetDepthStencilNoAccess();
+        }
+
+        return {};
+    }
+
+    void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+                                               const RenderPassBuilder* renderPassBuilder) const {
+        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+        // Clear framebuffer attachments as needed.
+        {
+            for (const auto& attachment :
+                 renderPassBuilder->GetRenderPassRenderTargetDescriptors()) {
+                // Load op - color
+                if (attachment.cpuDescriptor.ptr != 0 &&
+                    attachment.BeginningAccess.Type ==
+                        D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+                    commandList->ClearRenderTargetView(
+                        attachment.cpuDescriptor, attachment.BeginningAccess.Clear.ClearValue.Color,
+                        0, nullptr);
+                }
+            }
+
+            if (renderPassBuilder->HasDepthOrStencil()) {
+                D3D12_CLEAR_FLAGS clearFlags = {};
+                float depthClear = 0.0f;
+                uint8_t stencilClear = 0u;
+
+                if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                        ->DepthBeginningAccess.Type ==
+                    D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+                    clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+                    depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                                     ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
+                }
+                if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                        ->StencilBeginningAccess.Type ==
+                    D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+                    clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+                    stencilClear =
+                        renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                            ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
+                }
+
+                if (clearFlags) {
+                    commandList->ClearDepthStencilView(
+                        renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
+                        clearFlags, depthClear, stencilClear, 0, nullptr);
+                }
+            }
+        }
+
+        commandList->OMSetRenderTargets(
+            static_cast<uint8_t>(renderPassBuilder->GetHighestColorAttachmentIndexPlusOne()),
+            renderPassBuilder->GetRenderTargetViews(), FALSE,
+            renderPassBuilder->HasDepthOrStencil()
+                ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
+                : nullptr);
+    }
+
+    MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
+                                               BindGroupStateTracker* bindingTracker,
+                                               BeginRenderPassCmd* renderPass,
+                                               const bool passHasUAV) {
+        Device* device = ToBackend(GetDevice());
+        const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
+
+        // renderPassBuilder must be scoped to RecordRenderPass because any underlying
+        // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
+        // valid until after EndRenderPass() has been called.
+        RenderPassBuilder renderPassBuilder(passHasUAV);
+
+        DAWN_TRY(SetupRenderPass(commandContext, renderPass, &renderPassBuilder));
+
+        // Use D3D12's native render pass API if it's available, otherwise emulate the
+        // beginning and ending access operations.
+        if (useRenderPass) {
+            commandContext->GetCommandList4()->BeginRenderPass(
+                static_cast<uint8_t>(renderPassBuilder.GetHighestColorAttachmentIndexPlusOne()),
+                renderPassBuilder.GetRenderPassRenderTargetDescriptors().data(),
+                renderPassBuilder.HasDepthOrStencil()
+                    ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
+                    : nullptr,
+                renderPassBuilder.GetRenderPassFlags());
+        } else {
+            EmulateBeginRenderPass(commandContext, &renderPassBuilder);
+        }
+
+        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+        // Set up default dynamic state
+        {
+            uint32_t width = renderPass->width;
+            uint32_t height = renderPass->height;
+            D3D12_VIEWPORT viewport = {
+                0.f, 0.f, static_cast<float>(width), static_cast<float>(height), 0.f, 1.f};
+            D3D12_RECT scissorRect = {0, 0, static_cast<long>(width), static_cast<long>(height)};
+            commandList->RSSetViewports(1, &viewport);
+            commandList->RSSetScissorRects(1, &scissorRect);
+
+            static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
+            commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
+
+            commandList->OMSetStencilRef(0);
+        }
+
+        RenderPipeline* lastPipeline = nullptr;
+        VertexBufferTracker vertexBufferTracker = {};
+
+        auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
+            switch (type) {
+                case Command::Draw: {
+                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+                    DAWN_TRY(bindingTracker->Apply(commandContext));
+                    vertexBufferTracker.Apply(commandList, lastPipeline);
+                    RecordFirstIndexOffset(commandList, lastPipeline, draw->firstVertex,
+                                           draw->firstInstance);
+                    commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
+                                               draw->firstVertex, draw->firstInstance);
+                    break;
+                }
+
+                case Command::DrawIndexed: {
+                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+                    DAWN_TRY(bindingTracker->Apply(commandContext));
+                    vertexBufferTracker.Apply(commandList, lastPipeline);
+                    RecordFirstIndexOffset(commandList, lastPipeline, draw->baseVertex,
+                                           draw->firstInstance);
+                    commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
+                                                      draw->firstIndex, draw->baseVertex,
+                                                      draw->firstInstance);
+                    break;
+                }
+
+                case Command::DrawIndirect: {
+                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+
+                    DAWN_TRY(bindingTracker->Apply(commandContext));
+                    vertexBufferTracker.Apply(commandList, lastPipeline);
+
+                    // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
+                    // Zero the index offset values to avoid reusing values from the previous draw
+                    RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
+
+                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                    ComPtr<ID3D12CommandSignature> signature =
+                        ToBackend(GetDevice())->GetDrawIndirectSignature();
+                    commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+                                                 draw->indirectOffset, nullptr, 0);
+                    break;
+                }
+
+                case Command::DrawIndexedIndirect: {
+                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+                    DAWN_TRY(bindingTracker->Apply(commandContext));
+                    vertexBufferTracker.Apply(commandList, lastPipeline);
+
+                    // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
+                    // Zero the index offset values to avoid reusing values from the previous draw
+                    RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
+
+                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                    ASSERT(buffer != nullptr);
+
+                    ComPtr<ID3D12CommandSignature> signature =
+                        ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
+                    commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+                                                 draw->indirectOffset, nullptr, 0);
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+                    const char* label = iter->NextData<char>(cmd->length + 1);
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        // PIX color is 1 byte per channel in ARGB format
+                        constexpr uint64_t kPIXBlackColor = 0xff000000;
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+                    }
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    iter->NextCommand<PopDebugGroupCmd>();
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixEndEventOnCommandList(commandList);
+                    }
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+                    const char* label = iter->NextData<char>(cmd->length + 1);
+
+                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                        // PIX color is 1 byte per channel in ARGB format
+                        constexpr uint64_t kPIXBlackColor = 0xff000000;
+                        ToBackend(GetDevice())
+                            ->GetFunctions()
+                            ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+                    }
+                    break;
+                }
+
+                case Command::SetRenderPipeline: {
+                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                    RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                    commandList->SetPipelineState(pipeline->GetPipelineState());
+                    commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
+
+                    bindingTracker->OnSetPipeline(pipeline);
+
+                    lastPipeline = pipeline;
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                    BindGroup* group = ToBackend(cmd->group.Get());
+                    uint32_t* dynamicOffsets = nullptr;
+
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+
+                    bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+                                                   dynamicOffsets);
+                    break;
+                }
+
+                case Command::SetIndexBuffer: {
+                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+
+                    D3D12_INDEX_BUFFER_VIEW bufferView;
+                    bufferView.Format = DXGIIndexFormat(cmd->format);
+                    bufferView.BufferLocation = ToBackend(cmd->buffer)->GetVA() + cmd->offset;
+                    bufferView.SizeInBytes = cmd->size;
+
+                    commandList->IASetIndexBuffer(&bufferView);
+                    break;
+                }
+
+                case Command::SetVertexBuffer: {
+                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+
+                    vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+                                                          cmd->offset, cmd->size);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+            return {};
+        };
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::EndRenderPass: {
+                    mCommands.NextCommand<EndRenderPassCmd>();
+                    if (useRenderPass) {
+                        commandContext->GetCommandList4()->EndRenderPass();
+                    } else if (renderPass->attachmentState->GetSampleCount() > 1) {
+                        ResolveMultisampledRenderPass(commandContext, renderPass);
+                    }
+                    return {};
+                }
+
+                case Command::SetStencilReference: {
+                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+
+                    commandList->OMSetStencilRef(cmd->reference);
+                    break;
+                }
+
+                case Command::SetViewport: {
+                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                    D3D12_VIEWPORT viewport;
+                    viewport.TopLeftX = cmd->x;
+                    viewport.TopLeftY = cmd->y;
+                    viewport.Width = cmd->width;
+                    viewport.Height = cmd->height;
+                    viewport.MinDepth = cmd->minDepth;
+                    viewport.MaxDepth = cmd->maxDepth;
+
+                    commandList->RSSetViewports(1, &viewport);
+                    break;
+                }
+
+                case Command::SetScissorRect: {
+                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                    D3D12_RECT rect;
+                    rect.left = cmd->x;
+                    rect.top = cmd->y;
+                    rect.right = cmd->x + cmd->width;
+                    rect.bottom = cmd->y + cmd->height;
+
+                    commandList->RSSetScissorRects(1, &rect);
+                    break;
+                }
+
+                case Command::SetBlendConstant: {
+                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                    const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
+                    commandList->OMSetBlendFactor(color.data());
+                    break;
+                }
+
+                case Command::ExecuteBundles: {
+                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                    for (uint32_t i = 0; i < cmd->count; ++i) {
+                        CommandIterator* iter = bundles[i]->GetCommands();
+                        iter->Reset();
+                        while (iter->NextCommandId(&type)) {
+                            DAWN_TRY(EncodeRenderBundleCommand(iter, type));
+                        }
+                    }
+                    break;
+                }
+
+                case Command::BeginOcclusionQuery: {
+                    BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                    ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+                           D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+                    commandList->BeginQuery(querySet->GetQueryHeap(),
+                                            D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
+                    break;
+                }
+
+                case Command::EndOcclusionQuery: {
+                    EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                    ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+                           D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+                    commandList->EndQuery(querySet->GetQueryHeap(),
+                                          D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                    RecordWriteTimestampCmd(commandList, cmd);
+                    break;
+                }
+
+                default: {
+                    DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
+                    break;
+                }
+            }
+        }
+        return {};
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CommandBufferD3D12.h b/src/dawn/native/d3d12/CommandBufferD3D12.h
new file mode 100644
index 0000000..d6d4438
--- /dev/null
+++ b/src/dawn/native/d3d12/CommandBufferD3D12.h
@@ -0,0 +1,57 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
+#define DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native {
+    struct BeginRenderPassCmd;
+}  // namespace dawn::native
+
+namespace dawn::native::d3d12 {
+
+    class BindGroupStateTracker;
+    class CommandRecordingContext;
+    class RenderPassBuilder;
+
+    class CommandBuffer final : public CommandBufferBase {
+      public:
+        static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+                                         const CommandBufferDescriptor* descriptor);
+
+        MaybeError RecordCommands(CommandRecordingContext* commandContext);
+
+      private:
+        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+        MaybeError RecordComputePass(CommandRecordingContext* commandContext,
+                                     BindGroupStateTracker* bindingTracker,
+                                     const ComputePassResourceUsage& resourceUsages);
+        MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
+                                    BindGroupStateTracker* bindingTracker,
+                                    BeginRenderPassCmd* renderPass,
+                                    bool passHasUAV);
+        MaybeError SetupRenderPass(CommandRecordingContext* commandContext,
+                                   BeginRenderPassCmd* renderPass,
+                                   RenderPassBuilder* renderPassBuilder);
+        void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+                                    const RenderPassBuilder* renderPassBuilder) const;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
diff --git a/src/dawn/native/d3d12/CommandRecordingContext.cpp b/src/dawn/native/d3d12/CommandRecordingContext.cpp
new file mode 100644
index 0000000..bb8ef81
--- /dev/null
+++ b/src/dawn/native/d3d12/CommandRecordingContext.cpp
@@ -0,0 +1,175 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+
+#include "dawn/native/d3d12/CommandAllocatorManager.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <profileapi.h>
+#include <sysinfoapi.h>
+
+namespace dawn::native::d3d12 {
+
+    void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
+        ASSERT(IsOpen());
+        mSharedTextures.insert(texture);
+    }
+
+    MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
+                                             CommandAllocatorManager* commandAllocationManager) {
+        ASSERT(!IsOpen());
+        ID3D12CommandAllocator* commandAllocator;
+        DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
+        if (mD3d12CommandList != nullptr) {
+            MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
+                                            "D3D12 resetting command list");
+            if (error.IsError()) {
+                mD3d12CommandList.Reset();
+                DAWN_TRY(std::move(error));
+            }
+        } else {
+            ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
+            DAWN_TRY(CheckHRESULT(
+                d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
+                                               nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
+                "D3D12 creating direct command list"));
+            mD3d12CommandList = std::move(d3d12GraphicsCommandList);
+            // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
+            // pass APIs introduced in Windows build 1809.
+            mD3d12CommandList.As(&mD3d12CommandList4);
+        }
+
+        mIsOpen = true;
+
+        return {};
+    }
+
+    MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
+        if (IsOpen()) {
+            // Shared textures must be transitioned to common state after the last usage in order
+            // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
+            // common state right before command list submission. TransitionUsageNow itself ensures
+            // no unnecessary transitions happen if the resources is already in the common state.
+            for (Texture* texture : mSharedTextures) {
+                DAWN_TRY(texture->AcquireKeyedMutex());
+                texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
+            }
+
+            MaybeError error =
+                CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
+            if (error.IsError()) {
+                Release();
+                DAWN_TRY(std::move(error));
+            }
+            DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(
+                mHeapsPendingUsage.data(), mHeapsPendingUsage.size()));
+
+            if (device->IsToggleEnabled(Toggle::RecordDetailedTimingInTraceEvents)) {
+                uint64_t gpuTimestamp;
+                uint64_t cpuTimestamp;
+                FILETIME fileTimeNonPrecise;
+                SYSTEMTIME systemTimeNonPrecise;
+
+                // Both supported since Windows 2000, have a accuracy of 1ms
+                GetSystemTimeAsFileTime(&fileTimeNonPrecise);
+                GetSystemTime(&systemTimeNonPrecise);
+                // Query CPU and GPU timestamps at almost the same time
+                device->GetCommandQueue()->GetClockCalibration(&gpuTimestamp, &cpuTimestamp);
+
+                uint64_t gpuFrequency;
+                uint64_t cpuFrequency;
+                LARGE_INTEGER cpuFrequencyLargeInteger;
+                device->GetCommandQueue()->GetTimestampFrequency(&gpuFrequency);
+                QueryPerformanceFrequency(
+                    &cpuFrequencyLargeInteger);  // Supported since Windows 2000
+                cpuFrequency = cpuFrequencyLargeInteger.QuadPart;
+
+                std::string timingInfo = absl::StrFormat(
+                    "UTC Time: %u/%u/%u %02u:%02u:%02u.%03u, File Time: %u, CPU "
+                    "Timestamp: %u, GPU Timestamp: %u, CPU Tick Frequency: %u, GPU Tick Frequency: "
+                    "%u",
+                    systemTimeNonPrecise.wYear, systemTimeNonPrecise.wMonth,
+                    systemTimeNonPrecise.wDay, systemTimeNonPrecise.wHour,
+                    systemTimeNonPrecise.wMinute, systemTimeNonPrecise.wSecond,
+                    systemTimeNonPrecise.wMilliseconds,
+                    (static_cast<uint64_t>(fileTimeNonPrecise.dwHighDateTime) << 32) +
+                        fileTimeNonPrecise.dwLowDateTime,
+                    cpuTimestamp, gpuTimestamp, cpuFrequency, gpuFrequency);
+
+                TRACE_EVENT_INSTANT1(
+                    device->GetPlatform(), General,
+                    "d3d12::CommandRecordingContext::ExecuteCommandList Detailed Timing", "Timing",
+                    timingInfo.c_str());
+            }
+
+            ID3D12CommandList* d3d12CommandList = GetCommandList();
+            device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
+
+            for (Texture* texture : mSharedTextures) {
+                texture->ReleaseKeyedMutex();
+            }
+
+            mIsOpen = false;
+            mSharedTextures.clear();
+            mHeapsPendingUsage.clear();
+        }
+        return {};
+    }
+
+    void CommandRecordingContext::TrackHeapUsage(Heap* heap, ExecutionSerial serial) {
+        // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
+        // tracking it more than once.
+        if (heap->GetLastUsage() < serial) {
+            heap->SetLastUsage(serial);
+            mHeapsPendingUsage.push_back(heap);
+        }
+    }
+
+    ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
+        ASSERT(mD3d12CommandList != nullptr);
+        ASSERT(IsOpen());
+        return mD3d12CommandList.Get();
+    }
+
+    // This function will fail on Windows versions prior to 1809. Support must be queried through
+    // the device before calling.
+    ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
+        ASSERT(IsOpen());
+        ASSERT(mD3d12CommandList != nullptr);
+        return mD3d12CommandList4.Get();
+    }
+
+    void CommandRecordingContext::Release() {
+        mD3d12CommandList.Reset();
+        mD3d12CommandList4.Reset();
+        mIsOpen = false;
+        mSharedTextures.clear();
+        mHeapsPendingUsage.clear();
+        mTempBuffers.clear();
+    }
+
+    bool CommandRecordingContext::IsOpen() const {
+        return mIsOpen;
+    }
+
+    void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
+        mTempBuffers.emplace_back(tempBuffer);
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CommandRecordingContext.h b/src/dawn/native/d3d12/CommandRecordingContext.h
new file mode 100644
index 0000000..21a60f2
--- /dev/null
+++ b/src/dawn/native/d3d12/CommandRecordingContext.h
@@ -0,0 +1,58 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include <set>
+
+namespace dawn::native::d3d12 {
+    class CommandAllocatorManager;
+    class Device;
+    class Heap;
+    class Texture;
+
+    class CommandRecordingContext {
+      public:
+        void AddToSharedTextureList(Texture* texture);
+        MaybeError Open(ID3D12Device* d3d12Device,
+                        CommandAllocatorManager* commandAllocationManager);
+
+        ID3D12GraphicsCommandList* GetCommandList() const;
+        ID3D12GraphicsCommandList4* GetCommandList4() const;
+        void Release();
+        bool IsOpen() const;
+
+        MaybeError ExecuteCommandList(Device* device);
+
+        void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
+
+        void AddToTempBuffers(Ref<Buffer> tempBuffer);
+
+      private:
+        ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
+        ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
+        bool mIsOpen = false;
+        std::set<Texture*> mSharedTextures;
+        std::vector<Heap*> mHeapsPendingUsage;
+
+        std::vector<Ref<Buffer>> mTempBuffers;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
diff --git a/src/dawn/native/d3d12/ComputePipelineD3D12.cpp b/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
new file mode 100644
index 0000000..6df1049
--- /dev/null
+++ b/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
@@ -0,0 +1,105 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ComputePipelineD3D12.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+        Device* device,
+        const ComputePipelineDescriptor* descriptor) {
+        return AcquireRef(new ComputePipeline(device, descriptor));
+    }
+
+    MaybeError ComputePipeline::Initialize() {
+        Device* device = ToBackend(GetDevice());
+        uint32_t compileFlags = 0;
+
+        if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+            !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+            compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
+        }
+
+        if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+            compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
+        }
+
+        // SPRIV-cross does matrix multiplication expecting row major matrices
+        compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+
+        const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+        ShaderModule* module = ToBackend(computeStage.module.Get());
+
+        D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
+        d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
+
+        CompiledShader compiledShader;
+        DAWN_TRY_ASSIGN(compiledShader, module->Compile(computeStage, SingleShaderStage::Compute,
+                                                        ToBackend(GetLayout()), compileFlags));
+        d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
+        auto* d3d12Device = device->GetD3D12Device();
+        DAWN_TRY(CheckHRESULT(
+            d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
+            "D3D12 creating pipeline state"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    ComputePipeline::~ComputePipeline() = default;
+
+    void ComputePipeline::DestroyImpl() {
+        ComputePipelineBase::DestroyImpl();
+        ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+    }
+
+    ID3D12PipelineState* ComputePipeline::GetPipelineState() const {
+        return mPipelineState.Get();
+    }
+
+    void ComputePipeline::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline",
+                     GetLabel());
+    }
+
+    void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                          WGPUCreateComputePipelineAsyncCallback callback,
+                                          void* userdata) {
+        std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+            std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+                                                             userdata);
+        CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+    }
+
+    bool ComputePipeline::UsesNumWorkgroups() const {
+        return GetStage(SingleShaderStage::Compute).metadata->usesNumWorkgroups;
+    }
+
+    ComPtr<ID3D12CommandSignature> ComputePipeline::GetDispatchIndirectCommandSignature() {
+        if (UsesNumWorkgroups()) {
+            return ToBackend(GetLayout())->GetDispatchIndirectCommandSignatureWithNumWorkgroups();
+        }
+        return ToBackend(GetDevice())->GetDispatchIndirectSignature();
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ComputePipelineD3D12.h b/src/dawn/native/d3d12/ComputePipelineD3D12.h
new file mode 100644
index 0000000..03a0259
--- /dev/null
+++ b/src/dawn/native/d3d12/ComputePipelineD3D12.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
+#define DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class ComputePipeline final : public ComputePipelineBase {
+      public:
+        static Ref<ComputePipeline> CreateUninitialized(
+            Device* device,
+            const ComputePipelineDescriptor* descriptor);
+        static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                    void* userdata);
+        ComputePipeline() = delete;
+
+        ID3D12PipelineState* GetPipelineState() const;
+
+        MaybeError Initialize() override;
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+        bool UsesNumWorkgroups() const;
+
+        ComPtr<ID3D12CommandSignature> GetDispatchIndirectCommandSignature();
+
+      private:
+        ~ComputePipeline() override;
+
+        void DestroyImpl() override;
+
+        using ComputePipelineBase::ComputePipelineBase;
+        ComPtr<ID3D12PipelineState> mPipelineState;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
diff --git a/src/dawn/native/d3d12/D3D11on12Util.cpp b/src/dawn/native/d3d12/D3D11on12Util.cpp
new file mode 100644
index 0000000..d48d41f
--- /dev/null
+++ b/src/dawn/native/d3d12/D3D11on12Util.cpp
@@ -0,0 +1,187 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// D3D12Backend.cpp: contains the definition of symbols exported by D3D12Backend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/d3d12/D3D11on12Util.h"
+
+#include "dawn/common/HashUtils.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include <dawn/native/D3D12Backend.h>
+
+namespace dawn::native::d3d12 {
+
+    void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
+        if (d3d11on12Device == nullptr) {
+            return;
+        }
+
+        ComPtr<ID3D11Device> d3d11Device;
+        if (FAILED(d3d11on12Device.As(&d3d11Device))) {
+            return;
+        }
+
+        ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+        d3d11Device->GetImmediateContext(&d3d11DeviceContext);
+
+        ASSERT(d3d11DeviceContext != nullptr);
+
+        // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
+        // are not released until work is submitted to the device context and flushed.
+        // The most minimal work we can get away with is issuing a TiledResourceBarrier.
+
+        // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
+        // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
+        ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
+        if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
+            return;
+        }
+
+        d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
+        d3d11DeviceContext2->Flush();
+    }
+
+    D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
+        ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
+        ComPtr<ID3D11On12Device> d3d11On12Device)
+        : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {
+    }
+
+    D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
+        ComPtr<ID3D11On12Device> d3d11On12Device)
+        : mD3D11on12Device(std::move(d3d11On12Device)) {
+    }
+
+    D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
+        if (mDXGIKeyedMutex == nullptr) {
+            return;
+        }
+
+        if (mAcquireCount > 0) {
+            mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+        }
+
+        ComPtr<ID3D11Resource> d3d11Resource;
+        if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
+            return;
+        }
+
+        ASSERT(mD3D11on12Device != nullptr);
+
+        ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
+        mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
+
+        d3d11Resource.Reset();
+        mDXGIKeyedMutex.Reset();
+
+        Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
+    }
+
+    MaybeError D3D11on12ResourceCacheEntry::AcquireKeyedMutex() {
+        ASSERT(mDXGIKeyedMutex != nullptr);
+        ASSERT(mAcquireCount >= 0);
+        if (mAcquireCount == 0) {
+            DAWN_TRY(CheckHRESULT(
+                mDXGIKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE),
+                "D3D12 acquiring shared mutex"));
+        }
+        mAcquireCount++;
+        return {};
+    }
+
+    void D3D11on12ResourceCacheEntry::ReleaseKeyedMutex() {
+        ASSERT(mDXGIKeyedMutex != nullptr);
+        ASSERT(mAcquireCount > 0);
+        mAcquireCount--;
+        if (mAcquireCount == 0) {
+            mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+        }
+    }
+
+    size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
+        const Ref<D3D11on12ResourceCacheEntry> a) const {
+        size_t hash = 0;
+        HashCombine(&hash, a->mD3D11on12Device.Get());
+        return hash;
+    }
+
+    bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
+        const Ref<D3D11on12ResourceCacheEntry> a,
+        const Ref<D3D11on12ResourceCacheEntry> b) const {
+        return a->mD3D11on12Device == b->mD3D11on12Device;
+    }
+
+    D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
+
+    D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
+
+    Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
+        WGPUDevice device,
+        ID3D12Resource* d3d12Resource) {
+        Device* backendDevice = reinterpret_cast<Device*>(device);
+        // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
+        // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
+        // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
+        // using the 11on12 device as the cache key.
+        ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
+        if (d3d11on12Device == nullptr) {
+            dawn::ErrorLog() << "Unable to create 11on12 device for external image";
+            return nullptr;
+        }
+
+        D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
+        auto iter = mCache.find(&blueprint);
+        if (iter != mCache.end()) {
+            return *iter;
+        }
+
+        // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
+        // are a viable alternative but are, unfortunately, not available on all versions of Windows
+        // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
+        // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
+        ComPtr<ID3D11Texture2D> d3d11Texture;
+        D3D11_RESOURCE_FLAGS resourceFlags;
+        resourceFlags.BindFlags = 0;
+        resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+        resourceFlags.CPUAccessFlags = 0;
+        resourceFlags.StructureByteStride = 0;
+        if (FAILED(d3d11on12Device->CreateWrappedResource(
+                d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
+                D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)))) {
+            return nullptr;
+        }
+
+        ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+        if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
+            return nullptr;
+        }
+
+        // Keep this cache from growing unbounded.
+        // TODO(dawn:625): Consider using a replacement policy based cache.
+        if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
+            mCache.clear();
+        }
+
+        Ref<D3D11on12ResourceCacheEntry> entry =
+            AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
+        mCache.insert(entry);
+
+        return entry;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D11on12Util.h b/src/dawn/native/d3d12/D3D11on12Util.h
new file mode 100644
index 0000000..af7e680
--- /dev/null
+++ b/src/dawn/native/d3d12/D3D11on12Util.h
@@ -0,0 +1,92 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D11ON12UTIL_H_
+#define DAWNNATIVE_D3D11ON12UTIL_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include <dawn/native/DawnNative.h>
+#include <memory>
+#include <unordered_set>
+
+struct ID3D11On12Device;
+struct IDXGIKeyedMutex;
+
+namespace dawn::native::d3d12 {
+
+    // Wraps 11 wrapped resources in a cache.
+    class D3D11on12ResourceCacheEntry : public RefCounted {
+      public:
+        D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
+        D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
+                                    ComPtr<ID3D11On12Device> d3d11on12Device);
+        ~D3D11on12ResourceCacheEntry();
+
+        MaybeError AcquireKeyedMutex();
+        void ReleaseKeyedMutex();
+
+        // Functors necessary for the
+        // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
+        struct HashFunc {
+            size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
+        };
+
+        struct EqualityFunc {
+            bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
+                            const Ref<D3D11on12ResourceCacheEntry> b) const;
+        };
+
+      private:
+        ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
+        ComPtr<ID3D11On12Device> mD3D11on12Device;
+        int64_t mAcquireCount = 0;
+    };
+
+    // |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
+    // Each entry represents a 11 resource that is exclusively accessed by Dawn device.
+    // Since each Dawn device creates and stores a 11on12 device, the 11on12 device
+    // is used as the key for the cache entry which ensures only the same 11 wrapped
+    // resource is re-used and also fully released.
+    //
+    // The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
+    // and special release code per ProduceTexture(device).
+    class D3D11on12ResourceCache {
+      public:
+        D3D11on12ResourceCache();
+        ~D3D11on12ResourceCache();
+
+        Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(
+            WGPUDevice device,
+            ID3D12Resource* d3d12Resource);
+
+      private:
+        // TODO(dawn:625): Figure out a large enough cache size.
+        static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
+
+        // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
+        // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
+        // waiting until Dawn device to shutdown.
+        using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
+                                         D3D11on12ResourceCacheEntry::HashFunc,
+                                         D3D11on12ResourceCacheEntry::EqualityFunc>;
+
+        Cache mCache;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D11ON12UTIL_H_
diff --git a/src/dawn/native/d3d12/D3D12Backend.cpp b/src/dawn/native/d3d12/D3D12Backend.cpp
new file mode 100644
index 0000000..18d7145
--- /dev/null
+++ b/src/dawn/native/d3d12/D3D12Backend.cpp
@@ -0,0 +1,179 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// D3D12Backend.cpp: contains the definition of symbols exported by D3D12Backend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/D3D12Backend.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/d3d12/D3D11on12Util.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/NativeSwapChainImplD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
+        return ToBackend(FromAPI(device))->GetD3D12Device();
+    }
+
+    DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+
+        DawnSwapChainImplementation impl;
+        impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
+        impl.textureUsage = WGPUTextureUsage_Present;
+
+        return impl;
+    }
+
+    WGPUTextureFormat GetNativeSwapChainPreferredFormat(
+        const DawnSwapChainImplementation* swapChain) {
+        NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+        return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+    }
+
+    ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
+        : ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {
+    }
+
+    ExternalImageDXGI::ExternalImageDXGI(ComPtr<ID3D12Resource> d3d12Resource,
+                                         const WGPUTextureDescriptor* descriptor)
+        : mD3D12Resource(std::move(d3d12Resource)),
+          mUsage(descriptor->usage),
+          mDimension(descriptor->dimension),
+          mSize(descriptor->size),
+          mFormat(descriptor->format),
+          mMipLevelCount(descriptor->mipLevelCount),
+          mSampleCount(descriptor->sampleCount) {
+        ASSERT(!descriptor->nextInChain ||
+               descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
+        if (descriptor->nextInChain) {
+            mUsageInternal = reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(
+                                 descriptor->nextInChain)
+                                 ->internalUsage;
+        }
+        mD3D11on12ResourceCache = std::make_unique<D3D11on12ResourceCache>();
+    }
+
+    ExternalImageDXGI::~ExternalImageDXGI() = default;
+
+    WGPUTexture ExternalImageDXGI::ProduceTexture(
+        WGPUDevice device,
+        const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+
+        // Ensure the texture usage is allowed
+        if (!IsSubset(descriptor->usage, mUsage)) {
+            dawn::ErrorLog() << "Texture usage is not valid for external image";
+            return nullptr;
+        }
+
+        TextureDescriptor textureDescriptor = {};
+        textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
+        textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
+        textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
+        textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
+        textureDescriptor.mipLevelCount = mMipLevelCount;
+        textureDescriptor.sampleCount = mSampleCount;
+
+        DawnTextureInternalUsageDescriptor internalDesc = {};
+        if (mUsageInternal) {
+            textureDescriptor.nextInChain = &internalDesc;
+            internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
+            internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+        }
+
+        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
+            mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(device, mD3D12Resource.Get());
+        if (d3d11on12Resource == nullptr) {
+            dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
+            return nullptr;
+        }
+
+        Ref<TextureBase> texture = backendDevice->CreateD3D12ExternalTexture(
+            &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
+            descriptor->isSwapChainTexture, descriptor->isInitialized);
+
+        return ToAPI(texture.Detach());
+    }
+
+    // static
+    std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
+        WGPUDevice device,
+        const ExternalImageDescriptorDXGISharedHandle* descriptor) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+
+        Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
+        if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
+                descriptor->sharedHandle, IID_PPV_ARGS(&d3d12Resource)))) {
+            return nullptr;
+        }
+
+        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+        if (backendDevice->ConsumedError(
+                ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
+            return nullptr;
+        }
+
+        if (backendDevice->ConsumedError(
+                ValidateTextureDescriptorCanBeWrapped(textureDescriptor),
+                "validating that a D3D12 external image can be wrapped with %s",
+                textureDescriptor)) {
+            return nullptr;
+        }
+
+        if (backendDevice->ConsumedError(
+                ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
+            return nullptr;
+        }
+
+        // Shared handle is assumed to support resource sharing capability. The resource
+        // shared capability tier must agree to share resources between D3D devices.
+        const Format* format =
+            backendDevice->GetInternalFormat(textureDescriptor->format).AcquireSuccess();
+        if (format->IsMultiPlanar()) {
+            if (backendDevice->ConsumedError(ValidateD3D12VideoTextureCanBeShared(
+                    backendDevice, D3D12TextureFormat(textureDescriptor->format)))) {
+                return nullptr;
+            }
+        }
+
+        std::unique_ptr<ExternalImageDXGI> result(
+            new ExternalImageDXGI(std::move(d3d12Resource), descriptor->cTextureDescriptor));
+        return result;
+    }
+
+    uint64_t SetExternalMemoryReservation(WGPUDevice device,
+                                          uint64_t requestedReservationSize,
+                                          MemorySegment memorySegment) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+
+        return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
+            memorySegment, requestedReservationSize);
+    }
+
+    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+        : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(nullptr) {
+    }
+
+    AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
+        : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D12Error.cpp b/src/dawn/native/d3d12/D3D12Error.cpp
new file mode 100644
index 0000000..23a9556
--- /dev/null
+++ b/src/dawn/native/d3d12/D3D12Error.cpp
@@ -0,0 +1,51 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/D3D12Error.h"
+
+#include <iomanip>
+#include <sstream>
+#include <string>
+
+namespace dawn::native::d3d12 {
+    MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
+        if (DAWN_LIKELY(SUCCEEDED(result))) {
+            return {};
+        }
+
+        std::ostringstream messageStream;
+        messageStream << context << " failed with ";
+        if (result == E_FAKE_ERROR_FOR_TESTING) {
+            messageStream << "E_FAKE_ERROR_FOR_TESTING";
+        } else {
+            messageStream << "0x" << std::uppercase << std::setfill('0') << std::setw(8) << std::hex
+                          << result;
+        }
+
+        if (result == DXGI_ERROR_DEVICE_REMOVED) {
+            return DAWN_DEVICE_LOST_ERROR(messageStream.str());
+        } else {
+            return DAWN_INTERNAL_ERROR(messageStream.str());
+        }
+    }
+
+    MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
+        if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
+            return DAWN_OUT_OF_MEMORY_ERROR(context);
+        }
+
+        return CheckHRESULTImpl(result, context);
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D12Error.h b/src/dawn/native/d3d12/D3D12Error.h
new file mode 100644
index 0000000..f70690a
--- /dev/null
+++ b/src/dawn/native/d3d12/D3D12Error.h
@@ -0,0 +1,45 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_D3D12ERROR_H_
+#define DAWNNATIVE_D3D12_D3D12ERROR_H_
+
+#include <d3d12.h>
+#include "dawn/native/Error.h"
+#include "dawn/native/ErrorInjector.h"
+
+namespace dawn::native::d3d12 {
+
+    constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
+    constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
+        MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
+
+    // Returns a success only if result of HResult is success
+    MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
+
+    // Uses CheckRESULT but returns OOM specific error when recoverable.
+    MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
+
+#define CheckHRESULT(resultIn, contextIn)    \
+    ::dawn::native::d3d12::CheckHRESULTImpl( \
+        INJECT_ERROR_OR_RUN(resultIn, E_FAKE_ERROR_FOR_TESTING), contextIn)
+#define CheckOutOfMemoryHRESULT(resultIn, contextIn)                        \
+    ::dawn::native::d3d12::CheckOutOfMemoryHRESULTImpl(                     \
+        INJECT_ERROR_OR_RUN(resultIn, E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING, \
+                            E_FAKE_ERROR_FOR_TESTING),                      \
+        contextIn)
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_D3D12ERROR_H_
diff --git a/src/dawn/native/d3d12/D3D12Info.cpp b/src/dawn/native/d3d12/D3D12Info.cpp
new file mode 100644
index 0000000..ebd629b
--- /dev/null
+++ b/src/dawn/native/d3d12/D3D12Info.cpp
@@ -0,0 +1,122 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/D3D12Info.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/BackendD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+namespace dawn::native::d3d12 {
+
+    ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+        D3D12DeviceInfo info = {};
+
+        // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
+        // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
+        // for backwards compat.
+        // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
+        D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
+        DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE,
+                                                                       &arch, sizeof(arch)),
+                              "ID3D12Device::CheckFeatureSupport"));
+
+        info.isUMA = arch.UMA;
+
+        D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
+        DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+                                                                       &options, sizeof(options)),
+                              "ID3D12Device::CheckFeatureSupport"));
+
+        info.resourceHeapTier = options.ResourceHeapTier;
+
+        // Windows builds 1809 and above can use the D3D12 render pass API. If we query
+        // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
+        // the render pass API.
+        info.supportsRenderPass = false;
+        D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
+        if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+                D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
+            // Performance regressions been observed when using a render pass on Intel graphics
+            // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
+            // pass on these platforms.
+            if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
+                !gpu_info::IsIntel(adapter.GetVendorId())) {
+                info.supportsRenderPass = true;
+            }
+        }
+
+        // Used to share resources cross-API. If we query CheckFeatureSupport for
+        // D3D12_FEATURE_D3D12_OPTIONS4 successfully, then we can use cross-API sharing.
+        info.supportsSharedResourceCapabilityTier1 = false;
+        D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureOptions4 = {};
+        if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+                D3D12_FEATURE_D3D12_OPTIONS4, &featureOptions4, sizeof(featureOptions4)))) {
+            // Tier 1 support additionally enables the NV12 format. Since only the NV12 format
+            // is used by Dawn, check for Tier 1.
+            if (featureOptions4.SharedResourceCompatibilityTier >=
+                D3D12_SHARED_RESOURCE_COMPATIBILITY_TIER_1) {
+                info.supportsSharedResourceCapabilityTier1 = true;
+            }
+        }
+
+        D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {{D3D_SHADER_MODEL_6_2},
+                                                               {D3D_SHADER_MODEL_6_1},
+                                                               {D3D_SHADER_MODEL_6_0},
+                                                               {D3D_SHADER_MODEL_5_1}};
+        uint32_t driverShaderModel = 0;
+        for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
+            if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+                    D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
+                driverShaderModel = shaderModel.HighestShaderModel;
+                break;
+            }
+        }
+
+        if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
+            return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
+        }
+
+        // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
+        ASSERT(driverShaderModel <= 0xFF);
+        uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
+        uint32_t shaderModelMinor = (driverShaderModel & 0xF);
+
+        ASSERT(shaderModelMajor < 10);
+        ASSERT(shaderModelMinor < 10);
+        info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
+
+        // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
+        // it to each of the stage's suffix.
+        std::wstring profileSuffix = L"s_M_n";
+        profileSuffix[2] = wchar_t('0' + shaderModelMajor);
+        profileSuffix[4] = wchar_t('0' + shaderModelMinor);
+
+        info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
+        info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
+        info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
+
+        D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
+        if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+                D3D12_FEATURE_D3D12_OPTIONS4, &featureData4, sizeof(featureData4)))) {
+            info.supportsShaderFloat16 = driverShaderModel >= D3D_SHADER_MODEL_6_2 &&
+                                         featureData4.Native16BitShaderOpsSupported;
+        }
+
+        return std::move(info);
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D12Info.h b/src/dawn/native/d3d12/D3D12Info.h
new file mode 100644
index 0000000..83ee837
--- /dev/null
+++ b/src/dawn/native/d3d12/D3D12Info.h
@@ -0,0 +1,41 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_D3D12INFO_H_
+#define DAWNNATIVE_D3D12_D3D12INFO_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Adapter;
+
+    struct D3D12DeviceInfo {
+        bool isUMA;
+        uint32_t resourceHeapTier;
+        bool supportsRenderPass;
+        bool supportsShaderFloat16;
+        // shaderModel indicates the maximum supported shader model, for example, the value 62
+        // indicates that current driver supports the maximum shader model is shader model 6.2.
+        uint32_t shaderModel;
+        PerStage<std::wstring> shaderProfiles;
+        bool supportsSharedResourceCapabilityTier1;
+    };
+
+    ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_D3D12INFO_H_
diff --git a/src/dawn/native/d3d12/DeviceD3D12.cpp b/src/dawn/native/d3d12/DeviceD3D12.cpp
new file mode 100644
index 0000000..415b486
--- /dev/null
+++ b/src/dawn/native/d3d12/DeviceD3D12.cpp
@@ -0,0 +1,744 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/BackendD3D12.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/CommandAllocatorManager.h"
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+#include "dawn/native/d3d12/ComputePipelineD3D12.h"
+#include "dawn/native/d3d12/D3D11on12Util.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/QuerySetD3D12.h"
+#include "dawn/native/d3d12/QueueD3D12.h"
+#include "dawn/native/d3d12/RenderPipelineD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/ResourceAllocatorManagerD3D12.h"
+#include "dawn/native/d3d12/SamplerD3D12.h"
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/SwapChainD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+#include <sstream>
+
+namespace dawn::native::d3d12 {
+
+    // TODO(dawn:155): Figure out these values.
+    static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
+    static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
+
+    // Value may change in the future to better accomodate large clears.
+    static constexpr uint64_t kZeroBufferSize = 1024 * 1024 * 4;  // 4 Mb
+
+    static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
+
+    // static
+    ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
+                                              const DeviceDescriptor* descriptor) {
+        Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+        DAWN_TRY(device->Initialize());
+        return device;
+    }
+
+    MaybeError Device::Initialize() {
+        InitTogglesFromDriver();
+
+        mD3d12Device = ToBackend(GetAdapter())->GetDevice();
+
+        ASSERT(mD3d12Device != nullptr);
+
+        // Create device-global objects
+        D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+        queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+        queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+        DAWN_TRY(
+            CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
+                         "D3D12 create command queue"));
+
+        if (IsFeatureEnabled(Feature::TimestampQuery) &&
+            !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+            // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
+            // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
+            // always support timestamps except where there are bugs in Windows container and vGPU
+            // implementations.
+            uint64_t frequency;
+            DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
+                                  "D3D12 get timestamp frequency"));
+            // Calculate the period in nanoseconds by the frequency.
+            mTimestampPeriod = static_cast<float>(1e9) / frequency;
+        }
+
+        // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
+        // value.
+        mCommandQueue.As(&mD3d12SharingContract);
+
+        DAWN_TRY(
+            CheckHRESULT(mD3d12Device->CreateFence(uint64_t(GetLastSubmittedCommandSerial()),
+                                                   D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&mFence)),
+                         "D3D12 create fence"));
+
+        mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+        ASSERT(mFenceEvent != nullptr);
+
+        // Initialize backend services
+        mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
+
+        // Zero sized allocator is never requested and does not need to exist.
+        for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
+            mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+                this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+                D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
+        }
+
+        for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
+            mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+                this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+                D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+        }
+
+        mRenderTargetViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+            this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
+
+        mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+            this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
+
+        mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
+
+        mResidencyManager = std::make_unique<ResidencyManager>(this);
+        mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
+
+        // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
+        DAWN_TRY_ASSIGN(
+            mSamplerShaderVisibleDescriptorAllocator,
+            ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
+
+        DAWN_TRY_ASSIGN(
+            mViewShaderVisibleDescriptorAllocator,
+            ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
+
+        // Initialize indirect commands
+        D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
+        argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
+
+        D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+        programDesc.ByteStride = 3 * sizeof(uint32_t);
+        programDesc.NumArgumentDescs = 1;
+        programDesc.pArgumentDescs = &argumentDesc;
+
+        GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+                                                 IID_PPV_ARGS(&mDispatchIndirectSignature));
+
+        argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
+        programDesc.ByteStride = 4 * sizeof(uint32_t);
+
+        GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+                                                 IID_PPV_ARGS(&mDrawIndirectSignature));
+
+        argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
+        programDesc.ByteStride = 5 * sizeof(uint32_t);
+
+        GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+                                                 IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
+
+        DAWN_TRY(DeviceBase::Initialize(new Queue(this)));
+        // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
+        // device initialization to call NextSerial
+        DAWN_TRY(NextSerial());
+
+        // The environment can only use DXC when it's available. Override the decision if it is not
+        // applicable.
+        DAWN_TRY(ApplyUseDxcToggle());
+
+        DAWN_TRY(CreateZeroBuffer());
+
+        return {};
+    }
+
+    Device::~Device() {
+        Destroy();
+    }
+
+    ID3D12Device* Device::GetD3D12Device() const {
+        return mD3d12Device.Get();
+    }
+
+    ComPtr<ID3D12CommandQueue> Device::GetCommandQueue() const {
+        return mCommandQueue;
+    }
+
+    ID3D12SharingContract* Device::GetSharingContract() const {
+        return mD3d12SharingContract.Get();
+    }
+
+    ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
+        return mDispatchIndirectSignature;
+    }
+
+    ComPtr<ID3D12CommandSignature> Device::GetDrawIndirectSignature() const {
+        return mDrawIndirectSignature;
+    }
+
+    ComPtr<ID3D12CommandSignature> Device::GetDrawIndexedIndirectSignature() const {
+        return mDrawIndexedIndirectSignature;
+    }
+
+    ComPtr<IDXGIFactory4> Device::GetFactory() const {
+        return ToBackend(GetAdapter())->GetBackend()->GetFactory();
+    }
+
+    MaybeError Device::ApplyUseDxcToggle() {
+        if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
+            ForceSetToggle(Toggle::UseDXC, false);
+        } else if (IsFeatureEnabled(Feature::ShaderFloat16)) {
+            // Currently we can only use DXC to compile HLSL shaders using float16.
+            ForceSetToggle(Toggle::UseDXC, true);
+        }
+
+        if (IsToggleEnabled(Toggle::UseDXC)) {
+            DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
+            DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
+            DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
+        }
+
+        return {};
+    }
+
+    ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
+        return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
+    }
+
+    ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
+        return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
+    }
+
+    ComPtr<IDxcValidator> Device::GetDxcValidator() const {
+        return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
+    }
+
+    const PlatformFunctions* Device::GetFunctions() const {
+        return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
+    }
+
+    CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
+        return mCommandAllocatorManager.get();
+    }
+
+    ResidencyManager* Device::GetResidencyManager() const {
+        return mResidencyManager.get();
+    }
+
+    ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
+        // Callers of GetPendingCommandList do so to record commands. Only reserve a command
+        // allocator when it is needed so we don't submit empty command lists
+        if (!mPendingCommands.IsOpen()) {
+            DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
+        }
+        return &mPendingCommands;
+    }
+
+    MaybeError Device::CreateZeroBuffer() {
+        BufferDescriptor zeroBufferDescriptor;
+        zeroBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        zeroBufferDescriptor.size = kZeroBufferSize;
+        zeroBufferDescriptor.label = "ZeroBuffer_Internal";
+        DAWN_TRY_ASSIGN(mZeroBuffer, Buffer::Create(this, &zeroBufferDescriptor));
+
+        return {};
+    }
+
+    MaybeError Device::ClearBufferToZero(CommandRecordingContext* commandContext,
+                                         BufferBase* destination,
+                                         uint64_t offset,
+                                         uint64_t size) {
+        // TODO(crbug.com/dawn/852): It would be ideal to clear the buffer in CreateZeroBuffer, but
+        // the allocation of the staging buffer causes various end2end tests that monitor heap usage
+        // to fail if it's done during device creation. Perhaps ClearUnorderedAccessView*() can be
+        // used to avoid that.
+        if (!mZeroBuffer->IsDataInitialized()) {
+            DynamicUploader* uploader = GetDynamicUploader();
+            UploadHandle uploadHandle;
+            DAWN_TRY_ASSIGN(uploadHandle,
+                            uploader->Allocate(kZeroBufferSize, GetPendingCommandSerial(),
+                                               kCopyBufferToBufferOffsetAlignment));
+
+            memset(uploadHandle.mappedBuffer, 0u, kZeroBufferSize);
+
+            CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+                                        uploadHandle.startOffset, mZeroBuffer.Get(), 0,
+                                        kZeroBufferSize);
+
+            mZeroBuffer->SetIsDataInitialized();
+        }
+
+        Buffer* dstBuffer = ToBackend(destination);
+
+        // Necessary to ensure residency of the zero buffer.
+        mZeroBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+        dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+        while (size > 0) {
+            uint64_t copySize = std::min(kZeroBufferSize, size);
+            commandContext->GetCommandList()->CopyBufferRegion(
+                dstBuffer->GetD3D12Resource(), offset, mZeroBuffer->GetD3D12Resource(), 0,
+                copySize);
+
+            offset += copySize;
+            size -= copySize;
+        }
+
+        return {};
+    }
+
+    MaybeError Device::TickImpl() {
+        // Perform cleanup operations to free unused objects
+        ExecutionSerial completedSerial = GetCompletedCommandSerial();
+
+        mResourceAllocatorManager->Tick(completedSerial);
+        DAWN_TRY(mCommandAllocatorManager->Tick(completedSerial));
+        mViewShaderVisibleDescriptorAllocator->Tick(completedSerial);
+        mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
+        mRenderTargetViewAllocator->Tick(completedSerial);
+        mDepthStencilViewAllocator->Tick(completedSerial);
+        mUsedComObjectRefs.ClearUpTo(completedSerial);
+
+        if (mPendingCommands.IsOpen()) {
+            DAWN_TRY(ExecutePendingCommandContext());
+            DAWN_TRY(NextSerial());
+        }
+
+        DAWN_TRY(CheckDebugLayerAndGenerateErrors());
+
+        return {};
+    }
+
+    MaybeError Device::NextSerial() {
+        IncrementLastSubmittedCommandSerial();
+
+        return CheckHRESULT(
+            mCommandQueue->Signal(mFence.Get(), uint64_t(GetLastSubmittedCommandSerial())),
+            "D3D12 command queue signal fence");
+    }
+
+    MaybeError Device::WaitForSerial(ExecutionSerial serial) {
+        DAWN_TRY(CheckPassedSerials());
+        if (GetCompletedCommandSerial() < serial) {
+            DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
+                                  "D3D12 set event on completion"));
+            WaitForSingleObject(mFenceEvent, INFINITE);
+            DAWN_TRY(CheckPassedSerials());
+        }
+        return {};
+    }
+
+    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+        ExecutionSerial completedSerial = ExecutionSerial(mFence->GetCompletedValue());
+        if (DAWN_UNLIKELY(completedSerial == ExecutionSerial(UINT64_MAX))) {
+            // GetCompletedValue returns UINT64_MAX if the device was removed.
+            // Try to query the failure reason.
+            DAWN_TRY(CheckHRESULT(mD3d12Device->GetDeviceRemovedReason(),
+                                  "ID3D12Device::GetDeviceRemovedReason"));
+            // Otherwise, return a generic device lost error.
+            return DAWN_DEVICE_LOST_ERROR("Device lost");
+        }
+
+        if (completedSerial <= GetCompletedCommandSerial()) {
+            return ExecutionSerial(0);
+        }
+
+        return completedSerial;
+    }
+
+    void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
+        mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
+    }
+
+    MaybeError Device::ExecutePendingCommandContext() {
+        return mPendingCommands.ExecuteCommandList(this);
+    }
+
+    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) {
+        return BindGroup::Create(this, descriptor);
+    }
+    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+    }
+    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+        return Buffer::Create(this, descriptor);
+    }
+    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) {
+        return CommandBuffer::Create(encoder, descriptor);
+    }
+    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) {
+        return ComputePipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) {
+        return PipelineLayout::Create(this, descriptor);
+    }
+    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) {
+        return QuerySet::Create(this, descriptor);
+    }
+    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) {
+        return RenderPipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+        return Sampler::Create(this, descriptor);
+    }
+    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) {
+        return ShaderModule::Create(this, descriptor, parseResult);
+    }
+    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) {
+        return OldSwapChain::Create(this, descriptor);
+    }
+    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) {
+        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+    }
+    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+        return Texture::Create(this, descriptor);
+    }
+    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) {
+        return TextureView::Create(texture, descriptor);
+    }
+    void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                                    void* userdata) {
+        ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+    }
+    void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                                   WGPUCreateRenderPipelineAsyncCallback callback,
+                                                   void* userdata) {
+        RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+    }
+
+    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+        std::unique_ptr<StagingBufferBase> stagingBuffer =
+            std::make_unique<StagingBuffer>(size, this);
+        DAWN_TRY(stagingBuffer->Initialize());
+        return std::move(stagingBuffer);
+    }
+
+    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                               uint64_t sourceOffset,
+                                               BufferBase* destination,
+                                               uint64_t destinationOffset,
+                                               uint64_t size) {
+        CommandRecordingContext* commandRecordingContext;
+        DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
+
+        Buffer* dstBuffer = ToBackend(destination);
+
+        bool cleared;
+        DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+                                     commandRecordingContext, destinationOffset, size));
+        DAWN_UNUSED(cleared);
+
+        CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
+                                    destinationOffset, size);
+
+        return {};
+    }
+
+    void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+                                             StagingBufferBase* source,
+                                             uint64_t sourceOffset,
+                                             BufferBase* destination,
+                                             uint64_t destinationOffset,
+                                             uint64_t size) {
+        ASSERT(commandContext != nullptr);
+        Buffer* dstBuffer = ToBackend(destination);
+        StagingBuffer* srcBuffer = ToBackend(source);
+        dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+        commandContext->GetCommandList()->CopyBufferRegion(
+            dstBuffer->GetD3D12Resource(), destinationOffset, srcBuffer->GetResource(),
+            sourceOffset, size);
+    }
+
+    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                                const TextureDataLayout& src,
+                                                TextureCopy* dst,
+                                                const Extent3D& copySizePixels) {
+        CommandRecordingContext* commandContext;
+        DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
+        Texture* texture = ToBackend(dst->texture.Get());
+
+        SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+        if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
+            texture->SetIsSubresourceContentInitialized(true, range);
+        } else {
+            texture->EnsureSubresourceContentInitialized(commandContext, range);
+        }
+
+        texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
+
+        RecordBufferTextureCopyWithBufferHandle(
+            BufferTextureCopyDirection::B2T, commandContext->GetCommandList(),
+            ToBackend(source)->GetResource(), src.offset, src.bytesPerRow, src.rowsPerImage, *dst,
+            copySizePixels);
+
+        return {};
+    }
+
+    void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
+        mResourceAllocatorManager->DeallocateMemory(allocation);
+    }
+
+    ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& resourceDescriptor,
+        D3D12_RESOURCE_STATES initialUsage) {
+        return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor,
+                                                         initialUsage);
+    }
+
+    Ref<TextureBase> Device::CreateD3D12ExternalTexture(
+        const TextureDescriptor* descriptor,
+        ComPtr<ID3D12Resource> d3d12Texture,
+        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+        bool isSwapChainTexture,
+        bool isInitialized) {
+        Ref<Texture> dawnTexture;
+        if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
+                                                       std::move(d3d11on12Resource),
+                                                       isSwapChainTexture, isInitialized),
+                          &dawnTexture)) {
+            return nullptr;
+        }
+        return {dawnTexture};
+    }
+
+    ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
+        if (mD3d11On12Device == nullptr) {
+            ComPtr<ID3D11Device> d3d11Device;
+            D3D_FEATURE_LEVEL d3dFeatureLevel;
+            IUnknown* const iUnknownQueue = mCommandQueue.Get();
+            if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
+                                                             &iUnknownQueue, 1, 1, &d3d11Device,
+                                                             nullptr, &d3dFeatureLevel))) {
+                return nullptr;
+            }
+
+            ComPtr<ID3D11On12Device> d3d11on12Device;
+            HRESULT hr = d3d11Device.As(&d3d11on12Device);
+            ASSERT(SUCCEEDED(hr));
+
+            mD3d11On12Device = std::move(d3d11on12Device);
+        }
+        return mD3d11On12Device;
+    }
+
+    const D3D12DeviceInfo& Device::GetDeviceInfo() const {
+        return ToBackend(GetAdapter())->GetDeviceInfo();
+    }
+
+    void Device::InitTogglesFromDriver() {
+        const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
+        SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
+        SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
+        SetToggle(Toggle::UseD3D12ResidencyManagement, true);
+        SetToggle(Toggle::UseDXC, false);
+
+        // Disable optimizations when using FXC
+        // See https://crbug.com/dawn/1203
+        SetToggle(Toggle::FxcOptimizations, false);
+
+        // By default use the maximum shader-visible heap size allowed.
+        SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
+
+        uint32_t deviceId = GetAdapter()->GetDeviceId();
+        uint32_t vendorId = GetAdapter()->GetVendorId();
+
+        // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
+        // See http://crbug.com/1161355 for more information.
+        if (gpu_info::IsIntel(vendorId) &&
+            (gpu_info::IsSkylake(deviceId) || gpu_info::IsKabylake(deviceId) ||
+             gpu_info::IsCoffeelake(deviceId))) {
+            constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
+            if (gpu_info::CompareD3DDriverVersion(vendorId,
+                                                  ToBackend(GetAdapter())->GetDriverVersion(),
+                                                  kFirstDriverVersionWithFix) < 0) {
+                SetToggle(
+                    Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+                    true);
+            }
+        }
+    }
+
+    MaybeError Device::WaitForIdleForDestruction() {
+        // Immediately forget about all pending commands
+        mPendingCommands.Release();
+
+        DAWN_TRY(NextSerial());
+        // Wait for all in-flight commands to finish executing
+        DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
+
+        return {};
+    }
+
+    MaybeError Device::CheckDebugLayerAndGenerateErrors() {
+        if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
+            return {};
+        }
+
+        ComPtr<ID3D12InfoQueue> infoQueue;
+        DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+                              "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+        uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
+
+        // Check if any errors have occurred otherwise we would be creating an empty error. Note
+        // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
+        // because we only convert WARNINGS or higher messages to dawn errors.
+        if (totalErrors == 0) {
+            return {};
+        }
+
+        std::ostringstream messages;
+        uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
+        for (uint64_t i = 0; i < errorsToPrint; ++i) {
+            SIZE_T messageLength = 0;
+            HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
+            if (FAILED(hr)) {
+                messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
+                continue;
+            }
+
+            std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
+            D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
+            hr = infoQueue->GetMessage(i, message, &messageLength);
+            if (FAILED(hr)) {
+                messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
+                continue;
+            }
+
+            messages << message->pDescription << " (" << message->ID << ")\n";
+        }
+        if (errorsToPrint < totalErrors) {
+            messages << (totalErrors - errorsToPrint) << " messages silenced\n";
+        }
+        // We only print up to the first kMaxDebugMessagesToPrint errors
+        infoQueue->ClearStoredMessages();
+
+        return DAWN_INTERNAL_ERROR(messages.str());
+    }
+
+    void Device::DestroyImpl() {
+        ASSERT(GetState() == State::Disconnected);
+
+        // Immediately forget about all pending commands for the case where device is lost on its
+        // own and WaitForIdleForDestruction isn't called.
+        mPendingCommands.Release();
+
+        if (mFenceEvent != nullptr) {
+            ::CloseHandle(mFenceEvent);
+        }
+
+        // Release recycled resource heaps.
+        if (mResourceAllocatorManager != nullptr) {
+            mResourceAllocatorManager->DestroyPool();
+        }
+
+        // We need to handle clearing up com object refs that were enqeued after TickImpl
+        mUsedComObjectRefs.ClearUpTo(std::numeric_limits<ExecutionSerial>::max());
+
+        ASSERT(mUsedComObjectRefs.Empty());
+        ASSERT(!mPendingCommands.IsOpen());
+    }
+
+    ShaderVisibleDescriptorAllocator* Device::GetViewShaderVisibleDescriptorAllocator() const {
+        return mViewShaderVisibleDescriptorAllocator.get();
+    }
+
+    ShaderVisibleDescriptorAllocator* Device::GetSamplerShaderVisibleDescriptorAllocator() const {
+        return mSamplerShaderVisibleDescriptorAllocator.get();
+    }
+
+    StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
+        uint32_t descriptorCount) const {
+        ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
+        // This is Log2 of the next power of two, plus 1.
+        uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+        return mViewAllocators[allocatorIndex].get();
+    }
+
+    StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
+        uint32_t descriptorCount) const {
+        ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
+        // This is Log2 of the next power of two, plus 1.
+        uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+        return mSamplerAllocators[allocatorIndex].get();
+    }
+
+    StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
+        return mRenderTargetViewAllocator.get();
+    }
+
+    StagingDescriptorAllocator* Device::GetDepthStencilViewAllocator() const {
+        return mDepthStencilViewAllocator.get();
+    }
+
+    SamplerHeapCache* Device::GetSamplerHeapCache() {
+        return mSamplerHeapCache.get();
+    }
+
+    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+        return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
+    }
+
+    // TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
+    // should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
+    // Current implementations would try to allocate additional 511 bytes,
+    // so we return 1 and let ComputeTextureCopySplits take care of the alignment.
+    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+        return 1;
+    }
+
+    float Device::GetTimestampPeriodInNS() const {
+        return mTimestampPeriod;
+    }
+
+    bool Device::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+        ComputePipelineBase* computePipeline) const {
+        return ToBackend(computePipeline)->UsesNumWorkgroups();
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/DeviceD3D12.h b/src/dawn/native/d3d12/DeviceD3D12.h
new file mode 100644
index 0000000..1a83792
--- /dev/null
+++ b/src/dawn/native/d3d12/DeviceD3D12.h
@@ -0,0 +1,265 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_DEVICED3D12_H_
+#define DAWNNATIVE_D3D12_DEVICED3D12_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D12Info.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    class CommandAllocatorManager;
+    class PlatformFunctions;
+    class ResidencyManager;
+    class ResourceAllocatorManager;
+    class SamplerHeapCache;
+    class ShaderVisibleDescriptorAllocator;
+    class StagingDescriptorAllocator;
+
+#define ASSERT_SUCCESS(hr)            \
+    do {                              \
+        HRESULT succeeded = hr;       \
+        ASSERT(SUCCEEDED(succeeded)); \
+    } while (0)
+
+    // Definition of backend types
+    class Device final : public DeviceBase {
+      public:
+        static ResultOrError<Ref<Device>> Create(Adapter* adapter,
+                                                 const DeviceDescriptor* descriptor);
+        ~Device() override;
+
+        MaybeError Initialize();
+
+        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+            CommandEncoder* encoder,
+            const CommandBufferDescriptor* descriptor) override;
+
+        MaybeError TickImpl() override;
+
+        ID3D12Device* GetD3D12Device() const;
+        ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
+        ID3D12SharingContract* GetSharingContract() const;
+
+        ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
+        ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
+        ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
+
+        CommandAllocatorManager* GetCommandAllocatorManager() const;
+        ResidencyManager* GetResidencyManager() const;
+
+        const PlatformFunctions* GetFunctions() const;
+        ComPtr<IDXGIFactory4> GetFactory() const;
+        ComPtr<IDxcLibrary> GetDxcLibrary() const;
+        ComPtr<IDxcCompiler> GetDxcCompiler() const;
+        ComPtr<IDxcValidator> GetDxcValidator() const;
+
+        ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
+
+        MaybeError ClearBufferToZero(CommandRecordingContext* commandContext,
+                                     BufferBase* destination,
+                                     uint64_t destinationOffset,
+                                     uint64_t size);
+
+        const D3D12DeviceInfo& GetDeviceInfo() const;
+
+        MaybeError NextSerial();
+        MaybeError WaitForSerial(ExecutionSerial serial);
+
+        void ReferenceUntilUnused(ComPtr<IUnknown> object);
+
+        MaybeError ExecutePendingCommandContext();
+
+        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) override;
+
+        void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+                                         StagingBufferBase* source,
+                                         uint64_t sourceOffset,
+                                         BufferBase* destination,
+                                         uint64_t destinationOffset,
+                                         uint64_t size);
+
+        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) override;
+
+        ResultOrError<ResourceHeapAllocation> AllocateMemory(
+            D3D12_HEAP_TYPE heapType,
+            const D3D12_RESOURCE_DESC& resourceDescriptor,
+            D3D12_RESOURCE_STATES initialUsage);
+
+        void DeallocateMemory(ResourceHeapAllocation& allocation);
+
+        ShaderVisibleDescriptorAllocator* GetViewShaderVisibleDescriptorAllocator() const;
+        ShaderVisibleDescriptorAllocator* GetSamplerShaderVisibleDescriptorAllocator() const;
+
+        // Returns nullptr when descriptor count is zero.
+        StagingDescriptorAllocator* GetViewStagingDescriptorAllocator(
+            uint32_t descriptorCount) const;
+
+        StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
+            uint32_t descriptorCount) const;
+
+        SamplerHeapCache* GetSamplerHeapCache();
+
+        StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
+
+        StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
+
+        Ref<TextureBase> CreateD3D12ExternalTexture(
+            const TextureDescriptor* descriptor,
+            ComPtr<ID3D12Resource> d3d12Texture,
+            Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+            bool isSwapChainTexture,
+            bool isInitialized);
+
+        ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
+
+        void InitTogglesFromDriver();
+
+        uint32_t GetOptimalBytesPerRowAlignment() const override;
+        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+        float GetTimestampPeriodInNS() const override;
+
+        bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+            ComputePipelineBase* computePipeline) const override;
+
+      private:
+        using DeviceBase::DeviceBase;
+
+        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+            const BindGroupDescriptor* descriptor) override;
+        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken) override;
+        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+            const BufferDescriptor* descriptor) override;
+        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+            const PipelineLayoutDescriptor* descriptor) override;
+        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+            const QuerySetDescriptor* descriptor) override;
+        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+            const SamplerDescriptor* descriptor) override;
+        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+            const ShaderModuleDescriptor* descriptor,
+            ShaderModuleParseResult* parseResult) override;
+        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+            Surface* surface,
+            NewSwapChainBase* previousSwapChain,
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+            const TextureDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+            TextureBase* texture,
+            const TextureViewDescriptor* descriptor) override;
+        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+            const ComputePipelineDescriptor* descriptor) override;
+        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+            const RenderPipelineDescriptor* descriptor) override;
+        void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                WGPUCreateComputePipelineAsyncCallback callback,
+                                                void* userdata) override;
+        void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                               WGPUCreateRenderPipelineAsyncCallback callback,
+                                               void* userdata) override;
+
+        void DestroyImpl() override;
+        MaybeError WaitForIdleForDestruction() override;
+
+        MaybeError CheckDebugLayerAndGenerateErrors();
+
+        MaybeError ApplyUseDxcToggle();
+
+        MaybeError CreateZeroBuffer();
+
+        ComPtr<ID3D12Fence> mFence;
+        HANDLE mFenceEvent = nullptr;
+        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+        ComPtr<ID3D12Device> mD3d12Device;  // Device is owned by adapter and will not be outlived.
+        ComPtr<ID3D12CommandQueue> mCommandQueue;
+        ComPtr<ID3D12SharingContract> mD3d12SharingContract;
+
+        // 11on12 device corresponding to mCommandQueue
+        ComPtr<ID3D11On12Device> mD3d11On12Device;
+
+        ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
+        ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
+        ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
+
+        CommandRecordingContext mPendingCommands;
+
+        SerialQueue<ExecutionSerial, ComPtr<IUnknown>> mUsedComObjectRefs;
+
+        std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
+        std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
+        std::unique_ptr<ResidencyManager> mResidencyManager;
+
+        static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup =
+            3 * kMaxSamplersPerShaderStage;
+        static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
+            kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
+
+        static constexpr uint32_t kNumSamplerDescriptorAllocators =
+            ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
+        static constexpr uint32_t kNumViewDescriptorAllocators =
+            ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
+
+        // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+        // the range [0, kMaxSamplerDescriptorsPerBindGroup].
+        std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
+            mViewAllocators;
+
+        // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+        // the range [0, kMaxViewDescriptorsPerBindGroup].
+        std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
+            mSamplerAllocators;
+
+        std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
+
+        std::unique_ptr<StagingDescriptorAllocator> mDepthStencilViewAllocator;
+
+        std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
+
+        std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
+
+        // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
+        // release is called.
+        std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
+
+        // A buffer filled with zeros that is used to copy into other buffers when they need to be
+        // cleared.
+        Ref<Buffer> mZeroBuffer;
+
+        // The number of nanoseconds required for a timestamp query to be incremented by 1
+        float mTimestampPeriod = 1.0f;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_DEVICED3D12_H_
diff --git a/src/dawn/native/d3d12/Forward.h b/src/dawn/native/d3d12/Forward.h
new file mode 100644
index 0000000..a7aedb7
--- /dev/null
+++ b/src/dawn/native/d3d12/Forward.h
@@ -0,0 +1,69 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_FORWARD_H_
+#define DAWNNATIVE_D3D12_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::d3d12 {
+
+    class Adapter;
+    class BindGroup;
+    class BindGroupLayout;
+    class Buffer;
+    class CommandBuffer;
+    class ComputePipeline;
+    class Device;
+    class Heap;
+    class PipelineLayout;
+    class QuerySet;
+    class Queue;
+    class RenderPipeline;
+    class Sampler;
+    class ShaderModule;
+    class StagingBuffer;
+    class SwapChain;
+    class Texture;
+    class TextureView;
+
+    struct D3D12BackendTraits {
+        using AdapterType = Adapter;
+        using BindGroupType = BindGroup;
+        using BindGroupLayoutType = BindGroupLayout;
+        using BufferType = Buffer;
+        using CommandBufferType = CommandBuffer;
+        using ComputePipelineType = ComputePipeline;
+        using DeviceType = Device;
+        using PipelineLayoutType = PipelineLayout;
+        using QuerySetType = QuerySet;
+        using QueueType = Queue;
+        using RenderPipelineType = RenderPipeline;
+        using ResourceHeapType = Heap;
+        using SamplerType = Sampler;
+        using ShaderModuleType = ShaderModule;
+        using StagingBufferType = StagingBuffer;
+        using SwapChainType = SwapChain;
+        using TextureType = Texture;
+        using TextureViewType = TextureView;
+    };
+
+    template <typename T>
+    auto ToBackend(T&& common) -> decltype(ToBackendBase<D3D12BackendTraits>(common)) {
+        return ToBackendBase<D3D12BackendTraits>(common);
+    }
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_FORWARD_H_
diff --git a/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
new file mode 100644
index 0000000..e5d4fb9
--- /dev/null
+++ b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    GPUDescriptorHeapAllocation::GPUDescriptorHeapAllocation(
+        D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+        ExecutionSerial lastUsageSerial,
+        HeapVersionID heapSerial)
+        : mBaseDescriptor(baseDescriptor),
+          mLastUsageSerial(lastUsageSerial),
+          mHeapSerial(heapSerial) {
+    }
+
+    D3D12_GPU_DESCRIPTOR_HANDLE GPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+        return mBaseDescriptor;
+    }
+
+    ExecutionSerial GPUDescriptorHeapAllocation::GetLastUsageSerial() const {
+        return mLastUsageSerial;
+    }
+
+    HeapVersionID GPUDescriptorHeapAllocation::GetHeapSerial() const {
+        return mHeapSerial;
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
new file mode 100644
index 0000000..7f7ce1e
--- /dev/null
+++ b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
@@ -0,0 +1,44 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_GPUDESCRIPTORHEAPALLOCATION_H_
+#define DAWNNATIVE_D3D12_GPUDESCRIPTORHEAPALLOCATION_H_
+
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    // Wrapper for a handle into a GPU-only descriptor heap.
+    class GPUDescriptorHeapAllocation {
+      public:
+        GPUDescriptorHeapAllocation() = default;
+        GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+                                    ExecutionSerial lastUsageSerial,
+                                    HeapVersionID heapSerial);
+
+        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+        ExecutionSerial GetLastUsageSerial() const;
+        HeapVersionID GetHeapSerial() const;
+
+      private:
+        D3D12_GPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+        ExecutionSerial mLastUsageSerial = ExecutionSerial(0);
+        HeapVersionID mHeapSerial = HeapVersionID(0);
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp b/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
new file mode 100644
index 0000000..5a26be3
--- /dev/null
+++ b/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
@@ -0,0 +1,71 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    HeapAllocator::HeapAllocator(Device* device,
+                                 D3D12_HEAP_TYPE heapType,
+                                 D3D12_HEAP_FLAGS heapFlags,
+                                 MemorySegment memorySegment)
+        : mDevice(device),
+          mHeapType(heapType),
+          mHeapFlags(heapFlags),
+          mMemorySegment(memorySegment) {
+    }
+
+    ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
+        uint64_t size) {
+        D3D12_HEAP_DESC heapDesc;
+        heapDesc.SizeInBytes = size;
+        heapDesc.Properties.Type = mHeapType;
+        heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+        heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+        heapDesc.Properties.CreationNodeMask = 0;
+        heapDesc.Properties.VisibleNodeMask = 0;
+        // It is preferred to use a size that is a multiple of the alignment.
+        // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
+        // if the heap size is too small, the VMM would fragment.
+        // TODO(crbug.com/dawn/849): Consider having MSAA vs non-MSAA heaps.
+        heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
+        heapDesc.Flags = mHeapFlags;
+
+        // CreateHeap will implicitly make the created heap resident. We must ensure enough free
+        // memory exists before allocating to avoid an out-of-memory error when overcommitted.
+        DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(size, mMemorySegment));
+
+        ComPtr<ID3D12Heap> d3d12Heap;
+        DAWN_TRY(CheckOutOfMemoryHRESULT(
+            mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
+            "ID3D12Device::CreateHeap"));
+
+        std::unique_ptr<ResourceHeapBase> heapBase =
+            std::make_unique<Heap>(std::move(d3d12Heap), mMemorySegment, size);
+
+        // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
+        // avoid calling MakeResident a second time.
+        mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
+        return std::move(heapBase);
+    }
+
+    void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
+        mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/HeapAllocatorD3D12.h b/src/dawn/native/d3d12/HeapAllocatorD3D12.h
new file mode 100644
index 0000000..055f739
--- /dev/null
+++ b/src/dawn/native/d3d12/HeapAllocatorD3D12.h
@@ -0,0 +1,48 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
+#define DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
+
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    // Wrapper to allocate a D3D12 heap.
+    class HeapAllocator : public ResourceHeapAllocator {
+      public:
+        HeapAllocator(Device* device,
+                      D3D12_HEAP_TYPE heapType,
+                      D3D12_HEAP_FLAGS heapFlags,
+                      MemorySegment memorySegment);
+        ~HeapAllocator() override = default;
+
+        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+            uint64_t size) override;
+        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+
+      private:
+        Device* mDevice;
+        D3D12_HEAP_TYPE mHeapType;
+        D3D12_HEAP_FLAGS mHeapFlags;
+        MemorySegment mMemorySegment;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
diff --git a/src/dawn/native/d3d12/HeapD3D12.cpp b/src/dawn/native/d3d12/HeapD3D12.cpp
new file mode 100644
index 0000000..7426757
--- /dev/null
+++ b/src/dawn/native/d3d12/HeapD3D12.cpp
@@ -0,0 +1,31 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/HeapD3D12.h"
+
+namespace dawn::native::d3d12 {
+    Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
+        : Pageable(std::move(d3d12Pageable), memorySegment, size) {
+        mD3d12Pageable.As(&mD3d12Heap);
+    }
+
+    // This function should only be used when mD3D12Pageable was initialized from a
+    // ID3D12Pageable that was initially created as an ID3D12Heap (i.e. SubAllocation). If the
+    // ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
+    // use GetD3D12Pageable().
+    ID3D12Heap* Heap::GetD3D12Heap() const {
+        return mD3d12Heap.Get();
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/HeapD3D12.h b/src/dawn/native/d3d12/HeapD3D12.h
new file mode 100644
index 0000000..c160366
--- /dev/null
+++ b/src/dawn/native/d3d12/HeapD3D12.h
@@ -0,0 +1,40 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_HEAPD3D12_H_
+#define DAWNNATIVE_D3D12_HEAPD3D12_H_
+
+#include "dawn/native/ResourceHeap.h"
+#include "dawn/native/d3d12/PageableD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    // This class is used to represent ID3D12Heap allocations, as well as an implicit heap
+    // representing a directly allocated resource. It inherits from Pageable because each Heap must
+    // be represented in the ResidencyManager.
+    class Heap : public ResourceHeapBase, public Pageable {
+      public:
+        Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+
+        ID3D12Heap* GetD3D12Heap() const;
+
+      private:
+        ComPtr<ID3D12Heap> mD3d12Heap;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_HEAPD3D12_H_
diff --git a/src/dawn/native/d3d12/IntegerTypes.h b/src/dawn/native/d3d12/IntegerTypes.h
new file mode 100644
index 0000000..1e3dbfb
--- /dev/null
+++ b/src/dawn/native/d3d12/IntegerTypes.h
@@ -0,0 +1,31 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_INTEGERTYPES_H_
+#define DAWNNATIVE_D3D12_INTEGERTYPES_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/TypedInteger.h"
+
+#include <cstdint>
+
+namespace dawn::native::d3d12 {
+
+    // An ID used to desambiguate between multiple uses of the same descriptor heap in the
+    // BindGroup allocations.
+    using HeapVersionID = TypedInteger<struct HeapVersionIDT, uint64_t>;
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_INTEGERTYPES_H_
diff --git a/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
new file mode 100644
index 0000000..5156af5
--- /dev/null
+++ b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
@@ -0,0 +1,120 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/NativeSwapChainImplD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
+            DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
+            if (allowedUsages & WGPUTextureUsage_TextureBinding) {
+                usage |= DXGI_USAGE_SHADER_INPUT;
+            }
+            if (allowedUsages & WGPUTextureUsage_StorageBinding) {
+                usage |= DXGI_USAGE_UNORDERED_ACCESS;
+            }
+            if (allowedUsages & WGPUTextureUsage_RenderAttachment) {
+                usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+            }
+            return usage;
+        }
+
+        static constexpr unsigned int kFrameCount = 3;
+    }  // anonymous namespace
+
+    NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
+        : mWindow(window), mDevice(device), mInterval(1) {
+    }
+
+    NativeSwapChainImpl::~NativeSwapChainImpl() {
+    }
+
+    void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                      WGPUTextureUsage usage,
+                                                      uint32_t width,
+                                                      uint32_t height) {
+        ASSERT(width > 0);
+        ASSERT(height > 0);
+        ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+
+        ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
+        ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
+
+        mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
+
+        // Create the D3D12 swapchain, assuming only two buffers for now
+        DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+        swapChainDesc.Width = width;
+        swapChainDesc.Height = height;
+        swapChainDesc.Format = D3D12TextureFormat(GetPreferredFormat());
+        swapChainDesc.BufferUsage = D3D12SwapChainBufferUsage(usage);
+        swapChainDesc.BufferCount = kFrameCount;
+        swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+        swapChainDesc.SampleDesc.Count = 1;
+        swapChainDesc.SampleDesc.Quality = 0;
+
+        ComPtr<IDXGISwapChain1> swapChain1;
+        ASSERT_SUCCESS(factory->CreateSwapChainForHwnd(queue.Get(), mWindow, &swapChainDesc,
+                                                       nullptr, nullptr, &swapChain1));
+
+        ASSERT_SUCCESS(swapChain1.As(&mSwapChain));
+
+        // Gather the resources that will be used to present to the swapchain
+        mBuffers.resize(kFrameCount);
+        for (uint32_t i = 0; i < kFrameCount; ++i) {
+            ASSERT_SUCCESS(mSwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])));
+        }
+
+        // Set the initial serial of buffers to 0 so that we don't wait on them when they are first
+        // used
+        mBufferSerials.resize(kFrameCount, ExecutionSerial(0));
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+        mCurrentBuffer = mSwapChain->GetCurrentBackBufferIndex();
+        nextTexture->texture.ptr = mBuffers[mCurrentBuffer].Get();
+
+        // TODO(crbug.com/dawn/269) Currently we force the CPU to wait for the GPU to be finished
+        // with the buffer. Ideally the synchronization should be all done on the GPU.
+        ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Present() {
+        // This assumes the texture has already been transition to the PRESENT state.
+
+        ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
+        // TODO(crbug.com/dawn/833): Make the serial ticking implicit.
+        ASSERT(mDevice->NextSerial().IsSuccess());
+
+        mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+        return wgpu::TextureFormat::RGBA8Unorm;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
new file mode 100644
index 0000000..8ed5ee2
--- /dev/null
+++ b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
@@ -0,0 +1,60 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
+#define DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <vector>
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class NativeSwapChainImpl {
+      public:
+        using WSIContext = DawnWSIContextD3D12;
+
+        NativeSwapChainImpl(Device* device, HWND window);
+        ~NativeSwapChainImpl();
+
+        void Init(DawnWSIContextD3D12* context);
+        DawnSwapChainError Configure(WGPUTextureFormat format,
+                                     WGPUTextureUsage,
+                                     uint32_t width,
+                                     uint32_t height);
+        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+        DawnSwapChainError Present();
+
+        wgpu::TextureFormat GetPreferredFormat() const;
+
+      private:
+        HWND mWindow = nullptr;
+        Device* mDevice = nullptr;
+        UINT mInterval;
+
+        ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
+        std::vector<ComPtr<ID3D12Resource>> mBuffers;
+        std::vector<ExecutionSerial> mBufferSerials;
+        uint32_t mCurrentBuffer;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
diff --git a/src/dawn/native/d3d12/PageableD3D12.cpp b/src/dawn/native/d3d12/PageableD3D12.cpp
new file mode 100644
index 0000000..1394209
--- /dev/null
+++ b/src/dawn/native/d3d12/PageableD3D12.cpp
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/PageableD3D12.h"
+
+namespace dawn::native::d3d12 {
+    Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable,
+                       MemorySegment memorySegment,
+                       uint64_t size)
+        : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {
+    }
+
+    // When a pageable is destroyed, it no longer resides in resident memory, so we must evict
+    // it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
+    // ResidencyManager will attempt to use it after it has been deallocated.
+    Pageable::~Pageable() {
+        if (IsInResidencyLRUCache()) {
+            RemoveFromList();
+        }
+    }
+
+    ID3D12Pageable* Pageable::GetD3D12Pageable() const {
+        return mD3d12Pageable.Get();
+    }
+
+    ExecutionSerial Pageable::GetLastUsage() const {
+        return mLastUsage;
+    }
+
+    void Pageable::SetLastUsage(ExecutionSerial serial) {
+        mLastUsage = serial;
+    }
+
+    ExecutionSerial Pageable::GetLastSubmission() const {
+        return mLastSubmission;
+    }
+
+    void Pageable::SetLastSubmission(ExecutionSerial serial) {
+        mLastSubmission = serial;
+    }
+
+    MemorySegment Pageable::GetMemorySegment() const {
+        return mMemorySegment;
+    }
+
+    uint64_t Pageable::GetSize() const {
+        return mSize;
+    }
+
+    bool Pageable::IsInResidencyLRUCache() const {
+        return IsInList();
+    }
+
+    void Pageable::IncrementResidencyLock() {
+        mResidencyLockRefCount++;
+    }
+
+    void Pageable::DecrementResidencyLock() {
+        mResidencyLockRefCount--;
+    }
+
+    bool Pageable::IsResidencyLocked() const {
+        return mResidencyLockRefCount != 0;
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/PageableD3D12.h b/src/dawn/native/d3d12/PageableD3D12.h
new file mode 100644
index 0000000..19355dc
--- /dev/null
+++ b/src/dawn/native/d3d12/PageableD3D12.h
@@ -0,0 +1,80 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_PAGEABLED3D12_H_
+#define DAWNNATIVE_D3D12_PAGEABLED3D12_H_
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+    // This class is used to represent ID3D12Pageable allocations, and also serves as a node within
+    // the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
+    // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
+    // LRU cache when it is evicted from resident memory due to budget constraints, or when the
+    // pageable allocation is released.
+    class Pageable : public LinkNode<Pageable> {
+      public:
+        Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+        ~Pageable();
+
+        ID3D12Pageable* GetD3D12Pageable() const;
+
+        // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
+        // used. We must check this serial against the current serial when recording usages to
+        // ensure we do not process residency for this pageable multiple times.
+        ExecutionSerial GetLastUsage() const;
+        void SetLastUsage(ExecutionSerial serial);
+
+        // The residency manager must know the last serial that any portion of the pageable was
+        // submitted to be used so that we can ensure this pageable stays resident in memory at
+        // least until that serial has completed.
+        ExecutionSerial GetLastSubmission() const;
+        void SetLastSubmission(ExecutionSerial serial);
+
+        MemorySegment GetMemorySegment() const;
+
+        uint64_t GetSize() const;
+
+        bool IsInResidencyLRUCache() const;
+
+        // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
+        // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
+        // mapped in a single heap, we must track the number of resources currently locked.
+        void IncrementResidencyLock();
+        void DecrementResidencyLock();
+        bool IsResidencyLocked() const;
+
+      protected:
+        ComPtr<ID3D12Pageable> mD3d12Pageable;
+
+      private:
+        // mLastUsage denotes the last time this pageable was recorded for use.
+        ExecutionSerial mLastUsage = ExecutionSerial(0);
+        // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
+        // although this variable often contains the same value as mLastUsage, it can differ in some
+        // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
+        // updated upon the call, but the backend operation is deferred until the next submission
+        // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
+        // accurately identify when a pageable can be evicted.
+        ExecutionSerial mLastSubmission = ExecutionSerial(0);
+        MemorySegment mMemorySegment;
+        uint32_t mResidencyLockRefCount = 0;
+        uint64_t mSize = 0;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif
diff --git a/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp b/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
new file mode 100644
index 0000000..794a763
--- /dev/null
+++ b/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
@@ -0,0 +1,377 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include <sstream>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace dawn::native::d3d12 {
+    namespace {
+
+        // Reserve register names for internal use. This registers map to bindings in the shader,
+        // but are not directly related to allocation of the root signature.
+        // In the root signature, it the index of the root parameter where these registers are
+        // used that determines the layout of the root signature.
+        static constexpr uint32_t kRenderOrComputeInternalRegisterSpace = kMaxBindGroups + 1;
+        static constexpr uint32_t kRenderOrComputeInternalBaseRegister = 0;
+
+        static constexpr uint32_t kDynamicStorageBufferLengthsRegisterSpace = kMaxBindGroups + 2;
+        static constexpr uint32_t kDynamicStorageBufferLengthsBaseRegister = 0;
+
+        static constexpr uint32_t kInvalidDynamicStorageBufferLengthsParameterIndex =
+            std::numeric_limits<uint32_t>::max();
+
+        D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
+            ASSERT(visibility != wgpu::ShaderStage::None);
+
+            if (visibility == wgpu::ShaderStage::Vertex) {
+                return D3D12_SHADER_VISIBILITY_VERTEX;
+            }
+
+            if (visibility == wgpu::ShaderStage::Fragment) {
+                return D3D12_SHADER_VISIBILITY_PIXEL;
+            }
+
+            // For compute or any two combination of stages, visibility must be ALL
+            return D3D12_SHADER_VISIBILITY_ALL;
+        }
+
+        D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BufferBindingType type) {
+            switch (type) {
+                case wgpu::BufferBindingType::Uniform:
+                    return D3D12_ROOT_PARAMETER_TYPE_CBV;
+                case wgpu::BufferBindingType::Storage:
+                case kInternalStorageBufferBinding:
+                    return D3D12_ROOT_PARAMETER_TYPE_UAV;
+                case wgpu::BufferBindingType::ReadOnlyStorage:
+                    return D3D12_ROOT_PARAMETER_TYPE_SRV;
+                case wgpu::BufferBindingType::Undefined:
+                    UNREACHABLE();
+            }
+        }
+
+    }  // anonymous namespace
+
+    ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+        Device* device,
+        const PipelineLayoutDescriptor* descriptor) {
+        Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+        DAWN_TRY(layout->Initialize());
+        return layout;
+    }
+
+    MaybeError PipelineLayout::Initialize() {
+        Device* device = ToBackend(GetDevice());
+        // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
+        // descriptor.
+        std::vector<D3D12_ROOT_PARAMETER> rootParameters;
+
+        size_t rangesCount = 0;
+        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+            const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+            rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
+                           bindGroupLayout->GetSamplerDescriptorRanges().size();
+        }
+
+        // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
+        std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
+
+        uint32_t rangeIndex = 0;
+
+        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+            const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+
+            // Set the root descriptor table parameter and copy ranges. Ranges are offset by the
+            // bind group index Returns whether or not the parameter was set. A root parameter is
+            // not set if the number of ranges is 0
+            auto SetRootDescriptorTable =
+                [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
+                auto rangeCount = descriptorRanges.size();
+                if (rangeCount == 0) {
+                    return false;
+                }
+
+                D3D12_ROOT_PARAMETER rootParameter = {};
+                rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
+                rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+                rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
+                rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
+
+                for (auto& range : descriptorRanges) {
+                    ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
+                    ranges[rangeIndex] = range;
+                    ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
+                    rangeIndex++;
+                }
+
+                rootParameters.emplace_back(rootParameter);
+
+                return true;
+            };
+
+            if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
+                mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
+            }
+            if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
+                mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
+            }
+
+            // Init root descriptors in root signatures for dynamic buffer bindings.
+            // These are packed at the beginning of the layout binding info.
+            for (BindingIndex dynamicBindingIndex{0};
+                 dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
+                 ++dynamicBindingIndex) {
+                const BindingInfo& bindingInfo =
+                    bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
+
+                if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+                    // Skip dynamic buffers that are not visible. D3D12 does not have None
+                    // visibility.
+                    continue;
+                }
+
+                D3D12_ROOT_PARAMETER rootParameter = {};
+
+                // Setup root descriptor.
+                D3D12_ROOT_DESCRIPTOR rootDescriptor;
+                rootDescriptor.ShaderRegister =
+                    bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
+                rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
+
+                // Set root descriptors in root signatures.
+                rootParameter.Descriptor = rootDescriptor;
+                mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
+
+                // Set parameter types according to bind group layout descriptor.
+                rootParameter.ParameterType = RootParameterType(bindingInfo.buffer.type);
+
+                // Set visibilities according to bind group layout descriptor.
+                rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
+
+                rootParameters.emplace_back(rootParameter);
+            }
+        }
+
+        // Make sure that we added exactly the number of elements we expected. If we added more,
+        // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
+        ASSERT(rangeIndex == rangesCount);
+
+        D3D12_ROOT_PARAMETER renderOrComputeInternalConstants{};
+        renderOrComputeInternalConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+        renderOrComputeInternalConstants.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+        // Always allocate 3 constants for either:
+        //  - vertex_index and instance_index
+        //  - num_workgroups_x, num_workgroups_y and num_workgroups_z
+        // NOTE: We should consider delaying root signature creation until we know how many values
+        // we need
+        renderOrComputeInternalConstants.Constants.Num32BitValues = 3;
+        renderOrComputeInternalConstants.Constants.RegisterSpace =
+            kRenderOrComputeInternalRegisterSpace;
+        renderOrComputeInternalConstants.Constants.ShaderRegister =
+            kRenderOrComputeInternalBaseRegister;
+        mFirstIndexOffsetParameterIndex = rootParameters.size();
+        mNumWorkgroupsParameterIndex = rootParameters.size();
+        // NOTE: We should consider moving this entry to earlier in the root signature since offsets
+        // would need to be updated often
+        rootParameters.emplace_back(renderOrComputeInternalConstants);
+
+        // Loops over all of the dynamic storage buffer bindings in the layout and build
+        // a mapping from the binding to the next offset into the root constant array where
+        // that dynamic storage buffer's binding size will be stored. The next register offset
+        // to use is tracked with |dynamicStorageBufferLengthsShaderRegisterOffset|.
+        // This data will be used by shader translation to emit a load from the root constant
+        // array to use as the binding's size in runtime array calculations.
+        // Each bind group's length data is stored contiguously in the root constant array,
+        // so the loop also computes the first register offset for each group where the
+        // data should start.
+        uint32_t dynamicStorageBufferLengthsShaderRegisterOffset = 0;
+        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+            const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+
+            mDynamicStorageBufferLengthInfo[group].firstRegisterOffset =
+                dynamicStorageBufferLengthsShaderRegisterOffset;
+            mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.reserve(
+                bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+
+            for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+                 ++bindingIndex) {
+                if (bgl->IsStorageBufferBinding(bindingIndex)) {
+                    mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.push_back(
+                        {bgl->GetBindingInfo(bindingIndex).binding,
+                         dynamicStorageBufferLengthsShaderRegisterOffset++});
+                }
+            }
+
+            ASSERT(mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.size() ==
+                   bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+        }
+        ASSERT(dynamicStorageBufferLengthsShaderRegisterOffset <=
+               kMaxDynamicStorageBuffersPerPipelineLayout);
+
+        if (dynamicStorageBufferLengthsShaderRegisterOffset > 0) {
+            D3D12_ROOT_PARAMETER dynamicStorageBufferLengthConstants{};
+            dynamicStorageBufferLengthConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+            dynamicStorageBufferLengthConstants.ParameterType =
+                D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+            dynamicStorageBufferLengthConstants.Constants.Num32BitValues =
+                dynamicStorageBufferLengthsShaderRegisterOffset;
+            dynamicStorageBufferLengthConstants.Constants.RegisterSpace =
+                kDynamicStorageBufferLengthsRegisterSpace;
+            dynamicStorageBufferLengthConstants.Constants.ShaderRegister =
+                kDynamicStorageBufferLengthsBaseRegister;
+            mDynamicStorageBufferLengthsParameterIndex = rootParameters.size();
+            rootParameters.emplace_back(dynamicStorageBufferLengthConstants);
+        } else {
+            mDynamicStorageBufferLengthsParameterIndex =
+                kInvalidDynamicStorageBufferLengthsParameterIndex;
+        }
+
+        D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
+        rootSignatureDescriptor.NumParameters = rootParameters.size();
+        rootSignatureDescriptor.pParameters = rootParameters.data();
+        rootSignatureDescriptor.NumStaticSamplers = 0;
+        rootSignatureDescriptor.pStaticSamplers = nullptr;
+        rootSignatureDescriptor.Flags =
+            D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
+
+        ComPtr<ID3DBlob> signature;
+        ComPtr<ID3DBlob> error;
+        HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
+            &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error);
+        if (DAWN_UNLIKELY(FAILED(hr))) {
+            std::ostringstream messageStream;
+            if (error) {
+                messageStream << static_cast<const char*>(error->GetBufferPointer());
+
+                // |error| is observed to always end with a \n, but is not
+                // specified to do so, so we add an extra newline just in case.
+                messageStream << std::endl;
+            }
+            messageStream << "D3D12 serialize root signature";
+            DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
+        }
+        DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
+                                  0, signature->GetBufferPointer(), signature->GetBufferSize(),
+                                  IID_PPV_ARGS(&mRootSignature)),
+                              "D3D12 create root signature"));
+        return {};
+    }
+
+    uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
+        ASSERT(group < kMaxBindGroupsTyped);
+        return mCbvUavSrvRootParameterInfo[group];
+    }
+
+    uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
+        ASSERT(group < kMaxBindGroupsTyped);
+        return mSamplerRootParameterInfo[group];
+    }
+
+    ID3D12RootSignature* PipelineLayout::GetRootSignature() const {
+        return mRootSignature.Get();
+    }
+
+    const PipelineLayout::DynamicStorageBufferLengthInfo&
+    PipelineLayout::GetDynamicStorageBufferLengthInfo() const {
+        return mDynamicStorageBufferLengthInfo;
+    }
+
+    uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
+                                                          BindingIndex bindingIndex) const {
+        ASSERT(group < kMaxBindGroupsTyped);
+        ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
+        ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).buffer.hasDynamicOffset);
+        ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
+               wgpu::ShaderStage::None);
+        return mDynamicRootParameterIndices[group][bindingIndex];
+    }
+
+    uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
+        return kRenderOrComputeInternalRegisterSpace;
+    }
+
+    uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
+        return kRenderOrComputeInternalBaseRegister;
+    }
+
+    uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
+        return mFirstIndexOffsetParameterIndex;
+    }
+
+    uint32_t PipelineLayout::GetNumWorkgroupsRegisterSpace() const {
+        return kRenderOrComputeInternalRegisterSpace;
+    }
+
+    uint32_t PipelineLayout::GetNumWorkgroupsShaderRegister() const {
+        return kRenderOrComputeInternalBaseRegister;
+    }
+
+    uint32_t PipelineLayout::GetNumWorkgroupsParameterIndex() const {
+        return mNumWorkgroupsParameterIndex;
+    }
+
+    uint32_t PipelineLayout::GetDynamicStorageBufferLengthsRegisterSpace() const {
+        return kDynamicStorageBufferLengthsRegisterSpace;
+    }
+
+    uint32_t PipelineLayout::GetDynamicStorageBufferLengthsShaderRegister() const {
+        return kDynamicStorageBufferLengthsBaseRegister;
+    }
+
+    uint32_t PipelineLayout::GetDynamicStorageBufferLengthsParameterIndex() const {
+        ASSERT(mDynamicStorageBufferLengthsParameterIndex !=
+               kInvalidDynamicStorageBufferLengthsParameterIndex);
+        return mDynamicStorageBufferLengthsParameterIndex;
+    }
+
+    ID3D12CommandSignature* PipelineLayout::GetDispatchIndirectCommandSignatureWithNumWorkgroups() {
+        // mDispatchIndirectCommandSignatureWithNumWorkgroups won't be created until it is needed.
+        if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get() != nullptr) {
+            return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
+        }
+
+        D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+        argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+        argumentDescs[0].Constant.RootParameterIndex = GetNumWorkgroupsParameterIndex();
+        argumentDescs[0].Constant.Num32BitValuesToSet = 3;
+        argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+
+        // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+        // command. That command must come last.
+        argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
+
+        D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+        programDesc.ByteStride = 6 * sizeof(uint32_t);
+        programDesc.NumArgumentDescs = 2;
+        programDesc.pArgumentDescs = argumentDescs;
+
+        // The root signature must be specified if and only if the command signature changes one of
+        // the root arguments.
+        ToBackend(GetDevice())
+            ->GetD3D12Device()
+            ->CreateCommandSignature(
+                &programDesc, GetRootSignature(),
+                IID_PPV_ARGS(&mDispatchIndirectCommandSignatureWithNumWorkgroups));
+        return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/PipelineLayoutD3D12.h b/src/dawn/native/d3d12/PipelineLayoutD3D12.h
new file mode 100644
index 0000000..d1e8453
--- /dev/null
+++ b/src/dawn/native/d3d12/PipelineLayoutD3D12.h
@@ -0,0 +1,100 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+#define DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class PipelineLayout final : public PipelineLayoutBase {
+      public:
+        static ResultOrError<Ref<PipelineLayout>> Create(
+            Device* device,
+            const PipelineLayoutDescriptor* descriptor);
+
+        uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
+        uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
+
+        // Returns the index of the root parameter reserved for a dynamic buffer binding
+        uint32_t GetDynamicRootParameterIndex(BindGroupIndex group,
+                                              BindingIndex bindingIndex) const;
+
+        uint32_t GetFirstIndexOffsetRegisterSpace() const;
+        uint32_t GetFirstIndexOffsetShaderRegister() const;
+        uint32_t GetFirstIndexOffsetParameterIndex() const;
+
+        uint32_t GetNumWorkgroupsRegisterSpace() const;
+        uint32_t GetNumWorkgroupsShaderRegister() const;
+        uint32_t GetNumWorkgroupsParameterIndex() const;
+
+        uint32_t GetDynamicStorageBufferLengthsRegisterSpace() const;
+        uint32_t GetDynamicStorageBufferLengthsShaderRegister() const;
+        uint32_t GetDynamicStorageBufferLengthsParameterIndex() const;
+
+        ID3D12RootSignature* GetRootSignature() const;
+
+        ID3D12CommandSignature* GetDispatchIndirectCommandSignatureWithNumWorkgroups();
+
+        struct PerBindGroupDynamicStorageBufferLengthInfo {
+            // First register offset for a bind group's dynamic storage buffer lengths.
+            // This is the index into the array of root constants where this bind group's
+            // lengths start.
+            uint32_t firstRegisterOffset;
+
+            struct BindingAndRegisterOffset {
+                BindingNumber binding;
+                uint32_t registerOffset;
+            };
+            // Associative list of (BindingNumber,registerOffset) pairs, which is passed into
+            // the shader to map the BindingPoint(thisGroup, BindingNumber) to the registerOffset
+            // into the root constant array which holds the dynamic storage buffer lengths.
+            std::vector<BindingAndRegisterOffset> bindingAndRegisterOffsets;
+        };
+
+        // Flat map from bind group index to the list of (BindingNumber,Register) pairs.
+        // Each pair is used in shader translation to
+        using DynamicStorageBufferLengthInfo =
+            ityp::array<BindGroupIndex, PerBindGroupDynamicStorageBufferLengthInfo, kMaxBindGroups>;
+
+        const DynamicStorageBufferLengthInfo& GetDynamicStorageBufferLengthInfo() const;
+
+      private:
+        ~PipelineLayout() override = default;
+        using PipelineLayoutBase::PipelineLayoutBase;
+        MaybeError Initialize();
+        ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
+        ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
+        ityp::array<BindGroupIndex,
+                    ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
+                    kMaxBindGroups>
+            mDynamicRootParameterIndices;
+        DynamicStorageBufferLengthInfo mDynamicStorageBufferLengthInfo;
+        uint32_t mFirstIndexOffsetParameterIndex;
+        uint32_t mNumWorkgroupsParameterIndex;
+        uint32_t mDynamicStorageBufferLengthsParameterIndex;
+        ComPtr<ID3D12RootSignature> mRootSignature;
+        ComPtr<ID3D12CommandSignature> mDispatchIndirectCommandSignatureWithNumWorkgroups;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
diff --git a/src/dawn/native/d3d12/PlatformFunctions.cpp b/src/dawn/native/d3d12/PlatformFunctions.cpp
new file mode 100644
index 0000000..786ae5a
--- /dev/null
+++ b/src/dawn/native/d3d12/PlatformFunctions.cpp
@@ -0,0 +1,271 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+#include "dawn/common/DynamicLib.h"
+
+#include <comdef.h>
+#include <array>
+#include <sstream>
+
+namespace dawn::native::d3d12 {
+    namespace {
+        // Extract Version from "10.0.{Version}.0" if possible, otherwise return 0.
+        uint32_t GetWindowsSDKVersionFromDirectoryName(const char* directoryName) {
+            constexpr char kPrefix[] = "10.0.";
+            constexpr char kPostfix[] = ".0";
+
+            constexpr uint32_t kPrefixLen = sizeof(kPrefix) - 1;
+            constexpr uint32_t kPostfixLen = sizeof(kPostfix) - 1;
+            const uint32_t directoryNameLen = strlen(directoryName);
+
+            if (directoryNameLen < kPrefixLen + kPostfixLen + 1) {
+                return 0;
+            }
+
+            // Check if directoryName starts with "10.0.".
+            if (strncmp(directoryName, kPrefix, kPrefixLen) != 0) {
+                return 0;
+            }
+
+            // Check if directoryName ends with ".0".
+            if (strncmp(directoryName + (directoryNameLen - kPostfixLen), kPostfix, kPostfixLen) !=
+                0) {
+                return 0;
+            }
+
+            // Extract Version from "10.0.{Version}.0" and convert Version into an integer.
+            return atoi(directoryName + kPrefixLen);
+        }
+
+        class ScopedFileHandle final {
+          public:
+            explicit ScopedFileHandle(HANDLE handle) : mHandle(handle) {
+            }
+            ~ScopedFileHandle() {
+                if (mHandle != INVALID_HANDLE_VALUE) {
+                    ASSERT(FindClose(mHandle));
+                }
+            }
+            HANDLE GetHandle() const {
+                return mHandle;
+            }
+
+          private:
+            HANDLE mHandle;
+        };
+
+        std::string GetWindowsSDKBasePath() {
+            const char* kDefaultWindowsSDKPath =
+                "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\*";
+            WIN32_FIND_DATAA fileData;
+            ScopedFileHandle handle(FindFirstFileA(kDefaultWindowsSDKPath, &fileData));
+            if (handle.GetHandle() == INVALID_HANDLE_VALUE) {
+                return "";
+            }
+
+            uint32_t highestWindowsSDKVersion = 0;
+            do {
+                if (!(fileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+                    continue;
+                }
+
+                highestWindowsSDKVersion =
+                    std::max(highestWindowsSDKVersion,
+                             GetWindowsSDKVersionFromDirectoryName(fileData.cFileName));
+            } while (FindNextFileA(handle.GetHandle(), &fileData));
+
+            if (highestWindowsSDKVersion == 0) {
+                return "";
+            }
+
+            // Currently we only support using DXC on x64.
+            std::ostringstream ostream;
+            ostream << "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0."
+                    << highestWindowsSDKVersion << ".0\\x64\\";
+
+            return ostream.str();
+        }
+    }  // anonymous namespace
+
+    PlatformFunctions::PlatformFunctions() = default;
+    PlatformFunctions::~PlatformFunctions() = default;
+
+    MaybeError PlatformFunctions::LoadFunctions() {
+        DAWN_TRY(LoadD3D12());
+        DAWN_TRY(LoadDXGI());
+        LoadDXCLibraries();
+        DAWN_TRY(LoadFXCompiler());
+        DAWN_TRY(LoadD3D11());
+        LoadPIXRuntime();
+        return {};
+    }
+
+    MaybeError PlatformFunctions::LoadD3D12() {
+#if DAWN_PLATFORM_WINUWP
+        d3d12CreateDevice = &D3D12CreateDevice;
+        d3d12GetDebugInterface = &D3D12GetDebugInterface;
+        d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
+        d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
+        d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
+        d3d12CreateVersionedRootSignatureDeserializer =
+            &D3D12CreateVersionedRootSignatureDeserializer;
+#else
+        std::string error;
+        if (!mD3D12Lib.Open("d3d12.dll", &error) ||
+            !mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
+            !mD3D12Lib.GetProc(&d3d12GetDebugInterface, "D3D12GetDebugInterface", &error) ||
+            !mD3D12Lib.GetProc(&d3d12SerializeRootSignature, "D3D12SerializeRootSignature",
+                               &error) ||
+            !mD3D12Lib.GetProc(&d3d12CreateRootSignatureDeserializer,
+                               "D3D12CreateRootSignatureDeserializer", &error) ||
+            !mD3D12Lib.GetProc(&d3d12SerializeVersionedRootSignature,
+                               "D3D12SerializeVersionedRootSignature", &error) ||
+            !mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
+                               "D3D12CreateVersionedRootSignatureDeserializer", &error)) {
+            return DAWN_INTERNAL_ERROR(error.c_str());
+        }
+#endif
+
+        return {};
+    }
+
+    MaybeError PlatformFunctions::LoadD3D11() {
+#if DAWN_PLATFORM_WINUWP
+        d3d11on12CreateDevice = &D3D11On12CreateDevice;
+#else
+        std::string error;
+        if (!mD3D11Lib.Open("d3d11.dll", &error) ||
+            !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
+            return DAWN_INTERNAL_ERROR(error.c_str());
+        }
+#endif
+
+        return {};
+    }
+
+    MaybeError PlatformFunctions::LoadDXGI() {
+#if DAWN_PLATFORM_WINUWP
+#    if defined(_DEBUG)
+        // DXGIGetDebugInterface1 is tagged as a development-only capability
+        // which implies that linking to this function will cause
+        // the application to fail Windows store certification
+        // But we need it when debuging using VS Graphics Diagnostics or PIX
+        // So we only link to it in debug build
+        dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
+#    endif
+        createDxgiFactory2 = &CreateDXGIFactory2;
+#else
+        std::string error;
+        if (!mDXGILib.Open("dxgi.dll", &error) ||
+            !mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
+            !mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
+            return DAWN_INTERNAL_ERROR(error.c_str());
+        }
+#endif
+
+        return {};
+    }
+
+    void PlatformFunctions::LoadDXCLibraries() {
+        // TODO(dawn:766)
+        // Statically linked with dxcompiler.lib in UWP
+        // currently linked with dxcompiler.lib making CoreApp unable to activate
+        // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
+        // successfully executed.
+
+        const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
+
+        LoadDXIL(windowsSDKBasePath);
+        LoadDXCompiler(windowsSDKBasePath);
+    }
+
+    void PlatformFunctions::LoadDXIL(const std::string& baseWindowsSDKPath) {
+        const char* dxilDLLName = "dxil.dll";
+        const std::array<std::string, 2> kDxilDLLPaths = {
+            {dxilDLLName, baseWindowsSDKPath + dxilDLLName}};
+
+        for (const std::string& dxilDLLPath : kDxilDLLPaths) {
+            if (mDXILLib.Open(dxilDLLPath, nullptr)) {
+                return;
+            }
+        }
+        ASSERT(!mDXILLib.Valid());
+    }
+
+    void PlatformFunctions::LoadDXCompiler(const std::string& baseWindowsSDKPath) {
+        // DXIL must be loaded before DXC, otherwise shader signing is unavailable
+        if (!mDXILLib.Valid()) {
+            return;
+        }
+
+        const char* dxCompilerDLLName = "dxcompiler.dll";
+        const std::array<std::string, 2> kDxCompilerDLLPaths = {
+            {dxCompilerDLLName, baseWindowsSDKPath + dxCompilerDLLName}};
+
+        DynamicLib dxCompilerLib;
+        for (const std::string& dxCompilerDLLName : kDxCompilerDLLPaths) {
+            if (dxCompilerLib.Open(dxCompilerDLLName, nullptr)) {
+                break;
+            }
+        }
+
+        if (dxCompilerLib.Valid() &&
+            dxCompilerLib.GetProc(&dxcCreateInstance, "DxcCreateInstance", nullptr)) {
+            mDXCompilerLib = std::move(dxCompilerLib);
+        } else {
+            mDXILLib.Close();
+        }
+    }
+
+    MaybeError PlatformFunctions::LoadFXCompiler() {
+#if DAWN_PLATFORM_WINUWP
+        d3dCompile = &D3DCompile;
+        d3dDisassemble = &D3DDisassemble;
+#else
+        std::string error;
+        if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
+            !mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error) ||
+            !mFXCompilerLib.GetProc(&d3dDisassemble, "D3DDisassemble", &error)) {
+            return DAWN_INTERNAL_ERROR(error.c_str());
+        }
+#endif
+        return {};
+    }
+
+    bool PlatformFunctions::IsPIXEventRuntimeLoaded() const {
+        return mPIXEventRuntimeLib.Valid();
+    }
+
+    bool PlatformFunctions::IsDXCAvailable() const {
+        return mDXILLib.Valid() && mDXCompilerLib.Valid();
+    }
+
+    void PlatformFunctions::LoadPIXRuntime() {
+        // TODO(dawn:766):
+        // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
+        // So maybe we should put WinPixEventRuntime as a third party package
+        // Currently PIX is not going to be loaded in UWP since the following
+        // mPIXEventRuntimeLib.Open will fail.
+        if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
+            !mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList,
+                                         "PIXBeginEventOnCommandList") ||
+            !mPIXEventRuntimeLib.GetProc(&pixEndEventOnCommandList, "PIXEndEventOnCommandList") ||
+            !mPIXEventRuntimeLib.GetProc(&pixSetMarkerOnCommandList, "PIXSetMarkerOnCommandList")) {
+            mPIXEventRuntimeLib.Close();
+        }
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/PlatformFunctions.h b/src/dawn/native/d3d12/PlatformFunctions.h
new file mode 100644
index 0000000..a236b1a
--- /dev/null
+++ b/src/dawn/native/d3d12/PlatformFunctions.h
@@ -0,0 +1,110 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
+#define DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/native/Error.h"
+
+#include <d3dcompiler.h>
+
+namespace dawn::native::d3d12 {
+
+    // Loads the functions required from the platform dynamically so that we don't need to rely on
+    // them being present in the system. For example linking against d3d12.lib would prevent
+    // dawn_native from loading on Windows 7 system where d3d12.dll doesn't exist.
+    class PlatformFunctions {
+      public:
+        PlatformFunctions();
+        ~PlatformFunctions();
+
+        MaybeError LoadFunctions();
+        bool IsPIXEventRuntimeLoaded() const;
+        bool IsDXCAvailable() const;
+
+        // Functions from d3d12.dll
+        PFN_D3D12_CREATE_DEVICE d3d12CreateDevice = nullptr;
+        PFN_D3D12_GET_DEBUG_INTERFACE d3d12GetDebugInterface = nullptr;
+
+        PFN_D3D12_SERIALIZE_ROOT_SIGNATURE d3d12SerializeRootSignature = nullptr;
+        PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER d3d12CreateRootSignatureDeserializer = nullptr;
+        PFN_D3D12_SERIALIZE_VERSIONED_ROOT_SIGNATURE d3d12SerializeVersionedRootSignature = nullptr;
+        PFN_D3D12_CREATE_VERSIONED_ROOT_SIGNATURE_DESERIALIZER
+        d3d12CreateVersionedRootSignatureDeserializer = nullptr;
+
+        // Functions from dxgi.dll
+        using PFN_DXGI_GET_DEBUG_INTERFACE1 = HRESULT(WINAPI*)(UINT Flags,
+                                                               REFIID riid,
+                                                               _COM_Outptr_ void** pDebug);
+        PFN_DXGI_GET_DEBUG_INTERFACE1 dxgiGetDebugInterface1 = nullptr;
+
+        using PFN_CREATE_DXGI_FACTORY2 = HRESULT(WINAPI*)(UINT Flags,
+                                                          REFIID riid,
+                                                          _COM_Outptr_ void** ppFactory);
+        PFN_CREATE_DXGI_FACTORY2 createDxgiFactory2 = nullptr;
+
+        // Functions from dxcompiler.dll
+        using PFN_DXC_CREATE_INSTANCE = HRESULT(WINAPI*)(REFCLSID rclsid,
+                                                         REFIID riid,
+                                                         _COM_Outptr_ void** ppCompiler);
+        PFN_DXC_CREATE_INSTANCE dxcCreateInstance = nullptr;
+
+        // Functions from d3d3compiler.dll
+        pD3DCompile d3dCompile = nullptr;
+        pD3DDisassemble d3dDisassemble = nullptr;
+
+        // Functions from WinPixEventRuntime.dll
+        using PFN_PIX_END_EVENT_ON_COMMAND_LIST =
+            HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList);
+
+        PFN_PIX_END_EVENT_ON_COMMAND_LIST pixEndEventOnCommandList = nullptr;
+
+        using PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST = HRESULT(
+            WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
+
+        PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST pixBeginEventOnCommandList = nullptr;
+
+        using PFN_SET_MARKER_ON_COMMAND_LIST = HRESULT(
+            WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
+
+        PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
+
+        // Functions from D3D11.dll
+        PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
+
+      private:
+        MaybeError LoadD3D12();
+        MaybeError LoadD3D11();
+        MaybeError LoadDXGI();
+        void LoadDXCLibraries();
+        void LoadDXIL(const std::string& baseWindowsSDKPath);
+        void LoadDXCompiler(const std::string& baseWindowsSDKPath);
+        MaybeError LoadFXCompiler();
+        void LoadPIXRuntime();
+
+        DynamicLib mD3D12Lib;
+        DynamicLib mD3D11Lib;
+        DynamicLib mDXGILib;
+        DynamicLib mDXILLib;
+        DynamicLib mDXCompilerLib;
+        DynamicLib mFXCompilerLib;
+        DynamicLib mPIXEventRuntimeLib;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
diff --git a/src/dawn/native/d3d12/QuerySetD3D12.cpp b/src/dawn/native/d3d12/QuerySetD3D12.cpp
new file mode 100644
index 0000000..458c23d
--- /dev/null
+++ b/src/dawn/native/d3d12/QuerySetD3D12.cpp
@@ -0,0 +1,75 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/QuerySetD3D12.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
+            switch (type) {
+                case wgpu::QueryType::Occlusion:
+                    return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
+                case wgpu::QueryType::PipelineStatistics:
+                    return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
+                case wgpu::QueryType::Timestamp:
+                    return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
+            }
+        }
+    }  // anonymous namespace
+
+    // static
+    ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+                                                  const QuerySetDescriptor* descriptor) {
+        Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
+        DAWN_TRY(querySet->Initialize());
+        return querySet;
+    }
+
+    MaybeError QuerySet::Initialize() {
+        D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
+        queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
+        queryHeapDesc.Count = std::max(GetQueryCount(), uint32_t(1u));
+
+        ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
+        DAWN_TRY(CheckOutOfMemoryHRESULT(
+            d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
+            "ID3D12Device::CreateQueryHeap"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
+        return mQueryHeap.Get();
+    }
+
+    QuerySet::~QuerySet() = default;
+
+    void QuerySet::DestroyImpl() {
+        QuerySetBase::DestroyImpl();
+        ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
+        mQueryHeap = nullptr;
+    }
+
+    void QuerySet::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), mQueryHeap.Get(), "Dawn_QuerySet", GetLabel());
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/QuerySetD3D12.h b/src/dawn/native/d3d12/QuerySetD3D12.h
new file mode 100644
index 0000000..5ace792
--- /dev/null
+++ b/src/dawn/native/d3d12/QuerySetD3D12.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_QUERYSETD3D12_H_
+#define DAWNNATIVE_D3D12_QUERYSETD3D12_H_
+
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class QuerySet : public QuerySetBase {
+      public:
+        static ResultOrError<Ref<QuerySet>> Create(Device* device,
+                                                   const QuerySetDescriptor* descriptor);
+
+        ID3D12QueryHeap* GetQueryHeap() const;
+
+      private:
+        ~QuerySet() override;
+        using QuerySetBase::QuerySetBase;
+        MaybeError Initialize();
+
+        // Dawn API
+        void DestroyImpl() override;
+        void SetLabelImpl() override;
+
+        ComPtr<ID3D12QueryHeap> mQueryHeap;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_QUERYSETD3D12_H_
diff --git a/src/dawn/native/d3d12/QueueD3D12.cpp b/src/dawn/native/d3d12/QueueD3D12.cpp
new file mode 100644
index 0000000..cb92f21
--- /dev/null
+++ b/src/dawn/native/d3d12/QueueD3D12.cpp
@@ -0,0 +1,54 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/QueueD3D12.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::d3d12 {
+
+    Queue::Queue(Device* device) : QueueBase(device) {
+    }
+
+    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+        Device* device = ToBackend(GetDevice());
+
+        DAWN_TRY(device->Tick());
+
+        CommandRecordingContext* commandContext;
+        DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+        TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
+                           "CommandBufferD3D12::RecordCommands");
+        for (uint32_t i = 0; i < commandCount; ++i) {
+            DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
+        }
+        TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording,
+                         "CommandBufferD3D12::RecordCommands");
+
+        DAWN_TRY(device->ExecutePendingCommandContext());
+
+        DAWN_TRY(device->NextSerial());
+        return {};
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/QueueD3D12.h b/src/dawn/native/d3d12/QueueD3D12.h
new file mode 100644
index 0000000..6f15a7d
--- /dev/null
+++ b/src/dawn/native/d3d12/QueueD3D12.h
@@ -0,0 +1,37 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_QUEUED3D12_H_
+#define DAWNNATIVE_D3D12_QUEUED3D12_H_
+
+#include "dawn/native/Queue.h"
+
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class Queue final : public QueueBase {
+      public:
+        Queue(Device* device);
+
+      private:
+        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_QUEUED3D12_H_
diff --git a/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp b/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
new file mode 100644
index 0000000..fc41331
--- /dev/null
+++ b/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
@@ -0,0 +1,250 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/RenderPassBuilderD3D12.h"
+
+#include "dawn/native/Format.h"
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
+            switch (loadOp) {
+                case wgpu::LoadOp::Clear:
+                    return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
+                case wgpu::LoadOp::Load:
+                    return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
+                case wgpu::LoadOp::Undefined:
+                    UNREACHABLE();
+                    break;
+            }
+        }
+
+        D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
+            switch (storeOp) {
+                case wgpu::StoreOp::Discard:
+                    return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
+                case wgpu::StoreOp::Store:
+                    return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
+                case wgpu::StoreOp::Undefined:
+                    UNREACHABLE();
+                    break;
+            }
+        }
+
+        D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
+            wgpu::StoreOp storeOp,
+            TextureView* resolveSource,
+            TextureView* resolveDestination) {
+            D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
+
+            resolveParameters.Format = resolveDestination->GetD3D12Format();
+            resolveParameters.pSrcResource =
+                ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
+            resolveParameters.pDstResource =
+                ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
+
+            // Clear or preserve the resolve source.
+            if (storeOp == wgpu::StoreOp::Discard) {
+                resolveParameters.PreserveResolveSource = false;
+            } else if (storeOp == wgpu::StoreOp::Store) {
+                resolveParameters.PreserveResolveSource = true;
+            }
+
+            // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
+            // TODO: Investigate and determine how integer format resolves should work in WebGPU.
+            switch (resolveDestination->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+                case wgpu::TextureComponentType::Sint:
+                case wgpu::TextureComponentType::Uint:
+                    resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_MAX;
+                    break;
+                case wgpu::TextureComponentType::Float:
+                    resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
+                    break;
+
+                case wgpu::TextureComponentType::DepthComparison:
+                    UNREACHABLE();
+            }
+
+            resolveParameters.SubresourceCount = 1;
+
+            return resolveParameters;
+        }
+
+        D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
+        D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
+            D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
+            Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
+            ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
+
+            subresourceParameters.DstX = 0;
+            subresourceParameters.DstY = 0;
+            subresourceParameters.SrcSubresource = 0;
+            subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
+                resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
+                Aspect::Color);
+            // Resolving a specified sub-rect is only valid on hardware that supports sample
+            // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
+            // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
+            // "empty" to resolve the entire region.
+            subresourceParameters.SrcRect = {0, 0, 0, 0};
+
+            return subresourceParameters;
+        }
+    }  // anonymous namespace
+
+    RenderPassBuilder::RenderPassBuilder(bool hasUAV) {
+        if (hasUAV) {
+            mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
+        }
+    }
+
+    void RenderPassBuilder::SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+                                                D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+                                                bool isNullRTV) {
+        mRenderTargetViews[attachmentIndex] = baseDescriptor;
+        mRenderPassRenderTargetDescriptors[attachmentIndex].cpuDescriptor = baseDescriptor;
+        if (!isNullRTV) {
+            mHighestColorAttachmentIndexPlusOne =
+                std::max(mHighestColorAttachmentIndexPlusOne,
+                         ColorAttachmentIndex{
+                             static_cast<uint8_t>(static_cast<uint8_t>(attachmentIndex) + 1u)});
+        }
+    }
+
+    void RenderPassBuilder::SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
+        mRenderPassDepthStencilDesc.cpuDescriptor = baseDescriptor;
+    }
+
+    ColorAttachmentIndex RenderPassBuilder::GetHighestColorAttachmentIndexPlusOne() const {
+        return mHighestColorAttachmentIndexPlusOne;
+    }
+
+    bool RenderPassBuilder::HasDepthOrStencil() const {
+        return mHasDepthOrStencil;
+    }
+
+    ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+    RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
+        return {mRenderPassRenderTargetDescriptors.data(), mHighestColorAttachmentIndexPlusOne};
+    }
+
+    const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC*
+    RenderPassBuilder::GetRenderPassDepthStencilDescriptor() const {
+        return &mRenderPassDepthStencilDesc;
+    }
+
+    D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
+        return mRenderPassFlags;
+    }
+
+    const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
+        return mRenderTargetViews.data();
+    }
+
+    void RenderPassBuilder::SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+                                                           wgpu::LoadOp loadOp,
+                                                           dawn::native::Color clearColor,
+                                                           DXGI_FORMAT format) {
+        mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
+            D3D12BeginningAccessType(loadOp);
+        if (loadOp == wgpu::LoadOp::Clear) {
+            mRenderPassRenderTargetDescriptors[attachment]
+                .BeginningAccess.Clear.ClearValue.Color[0] = clearColor.r;
+            mRenderPassRenderTargetDescriptors[attachment]
+                .BeginningAccess.Clear.ClearValue.Color[1] = clearColor.g;
+            mRenderPassRenderTargetDescriptors[attachment]
+                .BeginningAccess.Clear.ClearValue.Color[2] = clearColor.b;
+            mRenderPassRenderTargetDescriptors[attachment]
+                .BeginningAccess.Clear.ClearValue.Color[3] = clearColor.a;
+            mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
+                format;
+        }
+    }
+
+    void RenderPassBuilder::SetRenderTargetEndingAccess(ColorAttachmentIndex attachment,
+                                                        wgpu::StoreOp storeOp) {
+        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+            D3D12EndingAccessType(storeOp);
+    }
+
+    void RenderPassBuilder::SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+                                                               wgpu::StoreOp storeOp,
+                                                               TextureView* resolveSource,
+                                                               TextureView* resolveDestination) {
+        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+            D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
+        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
+            D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
+
+        mSubresourceParams[attachment] =
+            D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
+
+        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
+            &mSubresourceParams[attachment];
+    }
+
+    void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
+                                           wgpu::StoreOp storeOp,
+                                           float clearDepth,
+                                           DXGI_FORMAT format) {
+        mHasDepthOrStencil = true;
+        mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+        if (loadOp == wgpu::LoadOp::Clear) {
+            mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
+                clearDepth;
+            mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
+        }
+        mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
+    }
+
+    void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
+                                             wgpu::StoreOp storeOp,
+                                             uint8_t clearStencil,
+                                             DXGI_FORMAT format) {
+        mHasDepthOrStencil = true;
+        mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+        if (loadOp == wgpu::LoadOp::Clear) {
+            mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil
+                .Stencil = clearStencil;
+            mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
+        }
+        mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
+    }
+
+    void RenderPassBuilder::SetDepthNoAccess() {
+        mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
+            D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+        mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
+            D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+    }
+
+    void RenderPassBuilder::SetDepthStencilNoAccess() {
+        SetDepthNoAccess();
+        SetStencilNoAccess();
+    }
+
+    void RenderPassBuilder::SetStencilNoAccess() {
+        mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
+            D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+        mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
+            D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/RenderPassBuilderD3D12.h b/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
new file mode 100644
index 0000000..5731c52
--- /dev/null
+++ b/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
@@ -0,0 +1,101 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
+#define DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native::d3d12 {
+
+    class TextureView;
+
+    // RenderPassBuilder stores parameters related to render pass load and store operations.
+    // When the D3D12 render pass API is available, the needed descriptors can be fetched
+    // directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
+    // descriptors are still fetched and any information necessary to emulate the load and store
+    // operations is extracted from the descriptors.
+    class RenderPassBuilder {
+      public:
+        RenderPassBuilder(bool hasUAV);
+
+        // Returns the highest color attachment index + 1. If there is no color attachment, returns
+        // 0. Range: [0, kMaxColorAttachments + 1)
+        ColorAttachmentIndex GetHighestColorAttachmentIndexPlusOne() const;
+
+        // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
+        // storage if D3D12 render pass API is unavailable.
+        ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+        GetRenderPassRenderTargetDescriptors() const;
+        const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
+
+        D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
+
+        // Returns attachment RTVs to use with OMSetRenderTargets.
+        const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
+
+        bool HasDepthOrStencil() const;
+
+        // Functions that set the appropriate values in the render pass descriptors.
+        void SetDepthAccess(wgpu::LoadOp loadOp,
+                            wgpu::StoreOp storeOp,
+                            float clearDepth,
+                            DXGI_FORMAT format);
+        void SetDepthNoAccess();
+        void SetDepthStencilNoAccess();
+        void SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+                                            wgpu::LoadOp loadOp,
+                                            dawn::native::Color clearColor,
+                                            DXGI_FORMAT format);
+        void SetRenderTargetEndingAccess(ColorAttachmentIndex attachment, wgpu::StoreOp storeOp);
+        void SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+                                                wgpu::StoreOp storeOp,
+                                                TextureView* resolveSource,
+                                                TextureView* resolveDestination);
+        void SetStencilAccess(wgpu::LoadOp loadOp,
+                              wgpu::StoreOp storeOp,
+                              uint8_t clearStencil,
+                              DXGI_FORMAT format);
+        void SetStencilNoAccess();
+
+        void SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+                                 D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+                                 bool isNullRTV);
+        void SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
+
+      private:
+        ColorAttachmentIndex mHighestColorAttachmentIndexPlusOne{uint8_t(0)};
+        bool mHasDepthOrStencil = false;
+        D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
+        D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
+        ityp::
+            array<ColorAttachmentIndex, D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
+                mRenderPassRenderTargetDescriptors;
+        ityp::array<ColorAttachmentIndex, D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments>
+            mRenderTargetViews;
+        ityp::array<ColorAttachmentIndex,
+                    D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
+                    kMaxColorAttachments>
+            mSubresourceParams;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
diff --git a/src/dawn/native/d3d12/RenderPipelineD3D12.cpp b/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
new file mode 100644
index 0000000..7168454
--- /dev/null
+++ b/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
@@ -0,0 +1,505 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/RenderPipelineD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+#include <d3dcompiler.h>
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
+            switch (format) {
+                case wgpu::VertexFormat::Uint8x2:
+                    return DXGI_FORMAT_R8G8_UINT;
+                case wgpu::VertexFormat::Uint8x4:
+                    return DXGI_FORMAT_R8G8B8A8_UINT;
+                case wgpu::VertexFormat::Sint8x2:
+                    return DXGI_FORMAT_R8G8_SINT;
+                case wgpu::VertexFormat::Sint8x4:
+                    return DXGI_FORMAT_R8G8B8A8_SINT;
+                case wgpu::VertexFormat::Unorm8x2:
+                    return DXGI_FORMAT_R8G8_UNORM;
+                case wgpu::VertexFormat::Unorm8x4:
+                    return DXGI_FORMAT_R8G8B8A8_UNORM;
+                case wgpu::VertexFormat::Snorm8x2:
+                    return DXGI_FORMAT_R8G8_SNORM;
+                case wgpu::VertexFormat::Snorm8x4:
+                    return DXGI_FORMAT_R8G8B8A8_SNORM;
+                case wgpu::VertexFormat::Uint16x2:
+                    return DXGI_FORMAT_R16G16_UINT;
+                case wgpu::VertexFormat::Uint16x4:
+                    return DXGI_FORMAT_R16G16B16A16_UINT;
+                case wgpu::VertexFormat::Sint16x2:
+                    return DXGI_FORMAT_R16G16_SINT;
+                case wgpu::VertexFormat::Sint16x4:
+                    return DXGI_FORMAT_R16G16B16A16_SINT;
+                case wgpu::VertexFormat::Unorm16x2:
+                    return DXGI_FORMAT_R16G16_UNORM;
+                case wgpu::VertexFormat::Unorm16x4:
+                    return DXGI_FORMAT_R16G16B16A16_UNORM;
+                case wgpu::VertexFormat::Snorm16x2:
+                    return DXGI_FORMAT_R16G16_SNORM;
+                case wgpu::VertexFormat::Snorm16x4:
+                    return DXGI_FORMAT_R16G16B16A16_SNORM;
+                case wgpu::VertexFormat::Float16x2:
+                    return DXGI_FORMAT_R16G16_FLOAT;
+                case wgpu::VertexFormat::Float16x4:
+                    return DXGI_FORMAT_R16G16B16A16_FLOAT;
+                case wgpu::VertexFormat::Float32:
+                    return DXGI_FORMAT_R32_FLOAT;
+                case wgpu::VertexFormat::Float32x2:
+                    return DXGI_FORMAT_R32G32_FLOAT;
+                case wgpu::VertexFormat::Float32x3:
+                    return DXGI_FORMAT_R32G32B32_FLOAT;
+                case wgpu::VertexFormat::Float32x4:
+                    return DXGI_FORMAT_R32G32B32A32_FLOAT;
+                case wgpu::VertexFormat::Uint32:
+                    return DXGI_FORMAT_R32_UINT;
+                case wgpu::VertexFormat::Uint32x2:
+                    return DXGI_FORMAT_R32G32_UINT;
+                case wgpu::VertexFormat::Uint32x3:
+                    return DXGI_FORMAT_R32G32B32_UINT;
+                case wgpu::VertexFormat::Uint32x4:
+                    return DXGI_FORMAT_R32G32B32A32_UINT;
+                case wgpu::VertexFormat::Sint32:
+                    return DXGI_FORMAT_R32_SINT;
+                case wgpu::VertexFormat::Sint32x2:
+                    return DXGI_FORMAT_R32G32_SINT;
+                case wgpu::VertexFormat::Sint32x3:
+                    return DXGI_FORMAT_R32G32B32_SINT;
+                case wgpu::VertexFormat::Sint32x4:
+                    return DXGI_FORMAT_R32G32B32A32_SINT;
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        D3D12_INPUT_CLASSIFICATION VertexStepModeFunction(wgpu::VertexStepMode mode) {
+            switch (mode) {
+                case wgpu::VertexStepMode::Vertex:
+                    return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
+                case wgpu::VertexStepMode::Instance:
+                    return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
+            }
+        }
+
+        D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+            switch (primitiveTopology) {
+                case wgpu::PrimitiveTopology::PointList:
+                    return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
+                case wgpu::PrimitiveTopology::LineList:
+                    return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
+                case wgpu::PrimitiveTopology::LineStrip:
+                    return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
+                case wgpu::PrimitiveTopology::TriangleList:
+                    return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
+                case wgpu::PrimitiveTopology::TriangleStrip:
+                    return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
+            }
+        }
+
+        D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
+            wgpu::PrimitiveTopology primitiveTopology) {
+            switch (primitiveTopology) {
+                case wgpu::PrimitiveTopology::PointList:
+                    return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
+                case wgpu::PrimitiveTopology::LineList:
+                case wgpu::PrimitiveTopology::LineStrip:
+                    return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
+                case wgpu::PrimitiveTopology::TriangleList:
+                case wgpu::PrimitiveTopology::TriangleStrip:
+                    return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
+            }
+        }
+
+        D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
+            switch (mode) {
+                case wgpu::CullMode::None:
+                    return D3D12_CULL_MODE_NONE;
+                case wgpu::CullMode::Front:
+                    return D3D12_CULL_MODE_FRONT;
+                case wgpu::CullMode::Back:
+                    return D3D12_CULL_MODE_BACK;
+            }
+        }
+
+        D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
+            switch (factor) {
+                case wgpu::BlendFactor::Zero:
+                    return D3D12_BLEND_ZERO;
+                case wgpu::BlendFactor::One:
+                    return D3D12_BLEND_ONE;
+                case wgpu::BlendFactor::Src:
+                    return D3D12_BLEND_SRC_COLOR;
+                case wgpu::BlendFactor::OneMinusSrc:
+                    return D3D12_BLEND_INV_SRC_COLOR;
+                case wgpu::BlendFactor::SrcAlpha:
+                    return D3D12_BLEND_SRC_ALPHA;
+                case wgpu::BlendFactor::OneMinusSrcAlpha:
+                    return D3D12_BLEND_INV_SRC_ALPHA;
+                case wgpu::BlendFactor::Dst:
+                    return D3D12_BLEND_DEST_COLOR;
+                case wgpu::BlendFactor::OneMinusDst:
+                    return D3D12_BLEND_INV_DEST_COLOR;
+                case wgpu::BlendFactor::DstAlpha:
+                    return D3D12_BLEND_DEST_ALPHA;
+                case wgpu::BlendFactor::OneMinusDstAlpha:
+                    return D3D12_BLEND_INV_DEST_ALPHA;
+                case wgpu::BlendFactor::SrcAlphaSaturated:
+                    return D3D12_BLEND_SRC_ALPHA_SAT;
+                case wgpu::BlendFactor::Constant:
+                    return D3D12_BLEND_BLEND_FACTOR;
+                case wgpu::BlendFactor::OneMinusConstant:
+                    return D3D12_BLEND_INV_BLEND_FACTOR;
+            }
+        }
+
+        // When a blend factor is defined for the alpha channel, any of the factors that don't
+        // explicitly state that they apply to alpha should be treated as their explicitly-alpha
+        // equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
+        D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
+            switch (factor) {
+                case wgpu::BlendFactor::Src:
+                    return D3D12_BLEND_SRC_ALPHA;
+                case wgpu::BlendFactor::OneMinusSrc:
+                    return D3D12_BLEND_INV_SRC_ALPHA;
+                case wgpu::BlendFactor::Dst:
+                    return D3D12_BLEND_DEST_ALPHA;
+                case wgpu::BlendFactor::OneMinusDst:
+                    return D3D12_BLEND_INV_DEST_ALPHA;
+
+                // Other blend factors translate to the same D3D12 enum as the color blend factors.
+                default:
+                    return D3D12Blend(factor);
+            }
+        }
+
+        D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
+            switch (operation) {
+                case wgpu::BlendOperation::Add:
+                    return D3D12_BLEND_OP_ADD;
+                case wgpu::BlendOperation::Subtract:
+                    return D3D12_BLEND_OP_SUBTRACT;
+                case wgpu::BlendOperation::ReverseSubtract:
+                    return D3D12_BLEND_OP_REV_SUBTRACT;
+                case wgpu::BlendOperation::Min:
+                    return D3D12_BLEND_OP_MIN;
+                case wgpu::BlendOperation::Max:
+                    return D3D12_BLEND_OP_MAX;
+            }
+        }
+
+        uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
+            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
+                              D3D12_COLOR_WRITE_ENABLE_RED,
+                          "ColorWriteMask values must match");
+            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
+                              D3D12_COLOR_WRITE_ENABLE_GREEN,
+                          "ColorWriteMask values must match");
+            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
+                              D3D12_COLOR_WRITE_ENABLE_BLUE,
+                          "ColorWriteMask values must match");
+            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
+                              D3D12_COLOR_WRITE_ENABLE_ALPHA,
+                          "ColorWriteMask values must match");
+            return static_cast<uint8_t>(writeMask);
+        }
+
+        D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
+            D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
+            blendDesc.BlendEnable = state->blend != nullptr;
+            if (blendDesc.BlendEnable) {
+                blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
+                blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
+                blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
+                blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
+                blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
+                blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
+            }
+            blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
+            blendDesc.LogicOpEnable = false;
+            blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
+            return blendDesc;
+        }
+
+        D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
+            switch (op) {
+                case wgpu::StencilOperation::Keep:
+                    return D3D12_STENCIL_OP_KEEP;
+                case wgpu::StencilOperation::Zero:
+                    return D3D12_STENCIL_OP_ZERO;
+                case wgpu::StencilOperation::Replace:
+                    return D3D12_STENCIL_OP_REPLACE;
+                case wgpu::StencilOperation::IncrementClamp:
+                    return D3D12_STENCIL_OP_INCR_SAT;
+                case wgpu::StencilOperation::DecrementClamp:
+                    return D3D12_STENCIL_OP_DECR_SAT;
+                case wgpu::StencilOperation::Invert:
+                    return D3D12_STENCIL_OP_INVERT;
+                case wgpu::StencilOperation::IncrementWrap:
+                    return D3D12_STENCIL_OP_INCR;
+                case wgpu::StencilOperation::DecrementWrap:
+                    return D3D12_STENCIL_OP_DECR;
+            }
+        }
+
+        D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
+            D3D12_DEPTH_STENCILOP_DESC desc;
+
+            desc.StencilFailOp = StencilOp(descriptor.failOp);
+            desc.StencilDepthFailOp = StencilOp(descriptor.depthFailOp);
+            desc.StencilPassOp = StencilOp(descriptor.passOp);
+            desc.StencilFunc = ToD3D12ComparisonFunc(descriptor.compare);
+
+            return desc;
+        }
+
+        D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
+            D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
+            mDepthStencilDescriptor.DepthEnable =
+                (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+                 !descriptor->depthWriteEnabled)
+                    ? FALSE
+                    : TRUE;
+            mDepthStencilDescriptor.DepthWriteMask = descriptor->depthWriteEnabled
+                                                         ? D3D12_DEPTH_WRITE_MASK_ALL
+                                                         : D3D12_DEPTH_WRITE_MASK_ZERO;
+            mDepthStencilDescriptor.DepthFunc = ToD3D12ComparisonFunc(descriptor->depthCompare);
+
+            mDepthStencilDescriptor.StencilEnable = StencilTestEnabled(descriptor) ? TRUE : FALSE;
+            mDepthStencilDescriptor.StencilReadMask =
+                static_cast<UINT8>(descriptor->stencilReadMask);
+            mDepthStencilDescriptor.StencilWriteMask =
+                static_cast<UINT8>(descriptor->stencilWriteMask);
+
+            mDepthStencilDescriptor.FrontFace = StencilOpDesc(descriptor->stencilFront);
+            mDepthStencilDescriptor.BackFace = StencilOpDesc(descriptor->stencilBack);
+            return mDepthStencilDescriptor;
+        }
+
+        D3D12_INDEX_BUFFER_STRIP_CUT_VALUE ComputeIndexBufferStripCutValue(
+            wgpu::PrimitiveTopology primitiveTopology,
+            wgpu::IndexFormat indexFormat) {
+            if (primitiveTopology != wgpu::PrimitiveTopology::TriangleStrip &&
+                primitiveTopology != wgpu::PrimitiveTopology::LineStrip) {
+                return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
+            }
+
+            switch (indexFormat) {
+                case wgpu::IndexFormat::Uint16:
+                    return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
+                case wgpu::IndexFormat::Uint32:
+                    return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF;
+                case wgpu::IndexFormat::Undefined:
+                    return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
+            }
+        }
+
+    }  // anonymous namespace
+
+    Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+        Device* device,
+        const RenderPipelineDescriptor* descriptor) {
+        return AcquireRef(new RenderPipeline(device, descriptor));
+    }
+
+    MaybeError RenderPipeline::Initialize() {
+        Device* device = ToBackend(GetDevice());
+        uint32_t compileFlags = 0;
+
+        if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+            !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+            compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
+        }
+
+        if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+            compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
+        }
+
+        // SPRIV-cross does matrix multiplication expecting row major matrices
+        compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+
+        // FXC can miscompile code that depends on special float values (NaN, INF, etc) when IEEE
+        // strictness is not enabled. See crbug.com/tint/976.
+        compileFlags |= D3DCOMPILE_IEEE_STRICTNESS;
+
+        D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
+
+        PerStage<ProgrammableStage> pipelineStages = GetAllStages();
+
+        PerStage<D3D12_SHADER_BYTECODE*> shaders;
+        shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
+        shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
+
+        PerStage<CompiledShader> compiledShader;
+
+        for (auto stage : IterateStages(GetStageMask())) {
+            DAWN_TRY_ASSIGN(
+                compiledShader[stage],
+                ToBackend(pipelineStages[stage].module)
+                    ->Compile(pipelineStages[stage], stage, ToBackend(GetLayout()), compileFlags));
+            *shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
+        }
+
+        mFirstOffsetInfo = compiledShader[SingleShaderStage::Vertex].firstOffsetInfo;
+
+        PipelineLayout* layout = ToBackend(GetLayout());
+
+        descriptorD3D12.pRootSignature = layout->GetRootSignature();
+
+        // D3D12 logs warnings if any empty input state is used
+        std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
+        if (GetAttributeLocationsUsed().any()) {
+            descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
+        }
+
+        descriptorD3D12.IBStripCutValue =
+            ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
+
+        descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
+        descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
+        descriptorD3D12.RasterizerState.FrontCounterClockwise =
+            (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
+        descriptorD3D12.RasterizerState.DepthBias = GetDepthBias();
+        descriptorD3D12.RasterizerState.DepthBiasClamp = GetDepthBiasClamp();
+        descriptorD3D12.RasterizerState.SlopeScaledDepthBias = GetDepthBiasSlopeScale();
+        descriptorD3D12.RasterizerState.DepthClipEnable = TRUE;
+        descriptorD3D12.RasterizerState.MultisampleEnable = (GetSampleCount() > 1) ? TRUE : FALSE;
+        descriptorD3D12.RasterizerState.AntialiasedLineEnable = FALSE;
+        descriptorD3D12.RasterizerState.ForcedSampleCount = 0;
+        descriptorD3D12.RasterizerState.ConservativeRaster =
+            D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
+
+        if (HasDepthStencilAttachment()) {
+            descriptorD3D12.DSVFormat = D3D12TextureFormat(GetDepthStencilFormat());
+        }
+
+        static_assert(kMaxColorAttachments == 8);
+        for (uint8_t i = 0; i < kMaxColorAttachments; i++) {
+            descriptorD3D12.RTVFormats[i] = DXGI_FORMAT_UNKNOWN;
+            descriptorD3D12.BlendState.RenderTarget[i].BlendEnable = false;
+            descriptorD3D12.BlendState.RenderTarget[i].RenderTargetWriteMask = 0;
+            descriptorD3D12.BlendState.RenderTarget[i].LogicOpEnable = false;
+            descriptorD3D12.BlendState.RenderTarget[i].LogicOp = D3D12_LOGIC_OP_NOOP;
+        }
+        ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
+            GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
+        for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+            descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
+                D3D12TextureFormat(GetColorAttachmentFormat(i));
+            descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
+                ComputeColorDesc(GetColorTargetState(i));
+        }
+        ASSERT(highestColorAttachmentIndexPlusOne <= kMaxColorAttachmentsTyped);
+        descriptorD3D12.NumRenderTargets = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+
+        descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
+        descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
+
+        descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
+
+        descriptorD3D12.SampleMask = GetSampleMask();
+        descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
+        descriptorD3D12.SampleDesc.Count = GetSampleCount();
+        descriptorD3D12.SampleDesc.Quality = 0;
+
+        mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
+
+        DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
+                                  &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
+                              "D3D12 create graphics pipeline state"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    RenderPipeline::~RenderPipeline() = default;
+
+    void RenderPipeline::DestroyImpl() {
+        RenderPipelineBase::DestroyImpl();
+        ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+    }
+
+    D3D12_PRIMITIVE_TOPOLOGY RenderPipeline::GetD3D12PrimitiveTopology() const {
+        return mD3d12PrimitiveTopology;
+    }
+
+    ID3D12PipelineState* RenderPipeline::GetPipelineState() const {
+        return mPipelineState.Get();
+    }
+
+    const FirstOffsetInfo& RenderPipeline::GetFirstOffsetInfo() const {
+        return mFirstOffsetInfo;
+    }
+
+    void RenderPipeline::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
+    }
+
+    D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
+        std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
+        unsigned int count = 0;
+        for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+            D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
+
+            const VertexAttributeInfo& attribute = GetAttribute(loc);
+
+            // If the HLSL semantic is TEXCOORDN the SemanticName should be "TEXCOORD" and the
+            // SemanticIndex N
+            inputElementDescriptor.SemanticName = "TEXCOORD";
+            inputElementDescriptor.SemanticIndex = static_cast<uint8_t>(loc);
+            inputElementDescriptor.Format = VertexFormatType(attribute.format);
+            inputElementDescriptor.InputSlot = static_cast<uint8_t>(attribute.vertexBufferSlot);
+
+            const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
+
+            inputElementDescriptor.AlignedByteOffset = attribute.offset;
+            inputElementDescriptor.InputSlotClass = VertexStepModeFunction(input.stepMode);
+            if (inputElementDescriptor.InputSlotClass ==
+                D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA) {
+                inputElementDescriptor.InstanceDataStepRate = 0;
+            } else {
+                inputElementDescriptor.InstanceDataStepRate = 1;
+            }
+        }
+
+        D3D12_INPUT_LAYOUT_DESC inputLayoutDescriptor;
+        inputLayoutDescriptor.pInputElementDescs = &(*inputElementDescriptors)[0];
+        inputLayoutDescriptor.NumElements = count;
+        return inputLayoutDescriptor;
+    }
+
+    void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                         WGPUCreateRenderPipelineAsyncCallback callback,
+                                         void* userdata) {
+        std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+            std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+                                                            userdata);
+        CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/RenderPipelineD3D12.h b/src/dawn/native/d3d12/RenderPipelineD3D12.h
new file mode 100644
index 0000000..13d4a1a
--- /dev/null
+++ b/src/dawn/native/d3d12/RenderPipelineD3D12.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
+#define DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class RenderPipeline final : public RenderPipelineBase {
+      public:
+        static Ref<RenderPipeline> CreateUninitialized(Device* device,
+                                                       const RenderPipelineDescriptor* descriptor);
+        static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                    WGPUCreateRenderPipelineAsyncCallback callback,
+                                    void* userdata);
+        RenderPipeline() = delete;
+
+        MaybeError Initialize() override;
+
+        D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
+        ID3D12PipelineState* GetPipelineState() const;
+
+        const FirstOffsetInfo& GetFirstOffsetInfo() const;
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+      private:
+        ~RenderPipeline() override;
+
+        void DestroyImpl() override;
+
+        using RenderPipelineBase::RenderPipelineBase;
+        D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
+            std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
+
+        D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
+        ComPtr<ID3D12PipelineState> mPipelineState;
+        FirstOffsetInfo mFirstOffsetInfo;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
diff --git a/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp b/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
new file mode 100644
index 0000000..b7aab2c
--- /dev/null
+++ b/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
@@ -0,0 +1,371 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    ResidencyManager::ResidencyManager(Device* device)
+        : mDevice(device),
+          mResidencyManagementEnabled(
+              device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
+        UpdateVideoMemoryInfo();
+    }
+
+    // Increments number of locks on a heap to ensure the heap remains resident.
+    MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
+        if (!mResidencyManagementEnabled) {
+            return {};
+        }
+
+        // If the heap isn't already resident, make it resident.
+        if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
+            ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
+            uint64_t size = pageable->GetSize();
+
+            DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()),
+                                             size, 1, &d3d12Pageable));
+        }
+
+        // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
+        if (pageable->IsInResidencyLRUCache()) {
+            pageable->RemoveFromList();
+        }
+
+        pageable->IncrementResidencyLock();
+
+        return {};
+    }
+
+    // Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
+    // inserted into the LRU cache and becomes eligible for eviction.
+    void ResidencyManager::UnlockAllocation(Pageable* pageable) {
+        if (!mResidencyManagementEnabled) {
+            return;
+        }
+
+        ASSERT(pageable->IsResidencyLocked());
+        ASSERT(!pageable->IsInResidencyLRUCache());
+        pageable->DecrementResidencyLock();
+
+        // If another lock still exists on the heap, nothing further should be done.
+        if (pageable->IsResidencyLocked()) {
+            return;
+        }
+
+        // When all locks have been removed, the resource remains resident and becomes tracked in
+        // the corresponding LRU.
+        TrackResidentAllocation(pageable);
+    }
+
+    // Returns the appropriate MemorySegmentInfo for a given MemorySegment.
+    ResidencyManager::MemorySegmentInfo* ResidencyManager::GetMemorySegmentInfo(
+        MemorySegment memorySegment) {
+        switch (memorySegment) {
+            case MemorySegment::Local:
+                return &mVideoMemoryInfo.local;
+            case MemorySegment::NonLocal:
+                ASSERT(!mDevice->GetDeviceInfo().isUMA);
+                return &mVideoMemoryInfo.nonLocal;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    // Allows an application component external to Dawn to cap Dawn's residency budgets to prevent
+    // competition for device memory. Returns the amount of memory reserved, which may be less
+    // that the requested reservation when under pressure.
+    uint64_t ResidencyManager::SetExternalMemoryReservation(MemorySegment segment,
+                                                            uint64_t requestedReservationSize) {
+        MemorySegmentInfo* segmentInfo = GetMemorySegmentInfo(segment);
+
+        segmentInfo->externalRequest = requestedReservationSize;
+
+        UpdateMemorySegmentInfo(segmentInfo);
+
+        return segmentInfo->externalReservation;
+    }
+
+    void ResidencyManager::UpdateVideoMemoryInfo() {
+        UpdateMemorySegmentInfo(&mVideoMemoryInfo.local);
+        if (!mDevice->GetDeviceInfo().isUMA) {
+            UpdateMemorySegmentInfo(&mVideoMemoryInfo.nonLocal);
+        }
+    }
+
+    void ResidencyManager::UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo) {
+        DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
+
+        ToBackend(mDevice->GetAdapter())
+            ->GetHardwareAdapter()
+            ->QueryVideoMemoryInfo(0, segmentInfo->dxgiSegment, &queryVideoMemoryInfo);
+
+        // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
+        // system, and may be lower than expected in certain scenarios. Under memory pressure, we
+        // cap the external reservation to half the available budget, which prevents the external
+        // component from consuming a disproportionate share of memory and ensures that Dawn can
+        // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
+        // and subject to future experimentation.
+        segmentInfo->externalReservation =
+            std::min(queryVideoMemoryInfo.Budget / 2, segmentInfo->externalRequest);
+
+        segmentInfo->usage = queryVideoMemoryInfo.CurrentUsage - segmentInfo->externalReservation;
+
+        // If we're restricting the budget for testing, leave the budget as is.
+        if (mRestrictBudgetForTesting) {
+            return;
+        }
+
+        // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
+        // decreases fluctuations in the operating-system-defined budget, which improves stability
+        // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
+        // chosen and subject to future experimentation.
+        static constexpr float kBudgetCap = 0.95;
+        segmentInfo->budget =
+            (queryVideoMemoryInfo.Budget - segmentInfo->externalReservation) * kBudgetCap;
+    }
+
+    // Removes a heap from the LRU and returns the least recently used heap when possible. Returns
+    // nullptr when nothing further can be evicted.
+    ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
+        MemorySegmentInfo* memorySegment) {
+        // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
+        // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
+        // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
+        // the process budget and starving Dawn, which will cause thrash.
+        if (memorySegment->lruCache.empty()) {
+            return nullptr;
+        }
+
+        Pageable* pageable = memorySegment->lruCache.head()->value();
+
+        ExecutionSerial lastSubmissionSerial = pageable->GetLastSubmission();
+
+        // If the next candidate for eviction was inserted into the LRU during the current serial,
+        // it is because more memory is being used in a single command list than is available.
+        // In this scenario, we cannot make any more resources resident and thrashing must occur.
+        if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
+            return nullptr;
+        }
+
+        // We must ensure that any previous use of a resource has completed before the resource can
+        // be evicted.
+        if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
+            DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
+        }
+
+        pageable->RemoveFromList();
+        return pageable;
+    }
+
+    MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
+                                                   MemorySegment memorySegment) {
+        if (!mResidencyManagementEnabled) {
+            return {};
+        }
+
+        uint64_t bytesEvicted;
+        DAWN_TRY_ASSIGN(bytesEvicted,
+                        EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
+        DAWN_UNUSED(bytesEvicted);
+
+        return {};
+    }
+
+    // Any time we need to make something resident, we must check that we have enough free memory to
+    // make the new object resident while also staying within budget. If there isn't enough
+    // memory, we should evict until there is. Returns the number of bytes evicted.
+    ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(
+        uint64_t sizeToMakeResident,
+        MemorySegmentInfo* memorySegment) {
+        ASSERT(mResidencyManagementEnabled);
+
+        UpdateMemorySegmentInfo(memorySegment);
+
+        uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + memorySegment->usage;
+
+        // Return when we can call MakeResident and remain under budget.
+        if (memoryUsageAfterMakeResident < memorySegment->budget) {
+            return 0;
+        }
+
+        std::vector<ID3D12Pageable*> resourcesToEvict;
+        uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
+        uint64_t sizeEvicted = 0;
+        while (sizeEvicted < sizeNeededToBeUnderBudget) {
+            Pageable* pageable;
+            DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
+
+            // If no heap was returned, then nothing more can be evicted.
+            if (pageable == nullptr) {
+                break;
+            }
+
+            sizeEvicted += pageable->GetSize();
+            resourcesToEvict.push_back(pageable->GetD3D12Pageable());
+        }
+
+        if (resourcesToEvict.size() > 0) {
+            DAWN_TRY(CheckHRESULT(
+                mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
+                "Evicting resident heaps to free memory"));
+        }
+
+        return sizeEvicted;
+    }
+
+    // Given a list of heaps that are pending usage, this function will estimate memory needed,
+    // evict resources until enough space is available, then make resident any heaps scheduled for
+    // usage.
+    MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
+        if (!mResidencyManagementEnabled) {
+            return {};
+        }
+
+        std::vector<ID3D12Pageable*> localHeapsToMakeResident;
+        std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
+        uint64_t localSizeToMakeResident = 0;
+        uint64_t nonLocalSizeToMakeResident = 0;
+
+        ExecutionSerial pendingCommandSerial = mDevice->GetPendingCommandSerial();
+        for (size_t i = 0; i < heapCount; i++) {
+            Heap* heap = heaps[i];
+
+            // Heaps that are locked resident are not tracked in the LRU cache.
+            if (heap->IsResidencyLocked()) {
+                continue;
+            }
+
+            if (heap->IsInResidencyLRUCache()) {
+                // If the heap is already in the LRU, we must remove it and append again below to
+                // update its position in the LRU.
+                heap->RemoveFromList();
+            } else {
+                if (heap->GetMemorySegment() == MemorySegment::Local) {
+                    localSizeToMakeResident += heap->GetSize();
+                    localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
+                } else {
+                    nonLocalSizeToMakeResident += heap->GetSize();
+                    nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
+                }
+            }
+
+            // If we submit a command list to the GPU, we must ensure that heaps referenced by that
+            // command list stay resident at least until that command list has finished execution.
+            // Setting this serial unnecessarily can leave the LRU in a state where nothing is
+            // eligible for eviction, even though some evictions may be possible.
+            heap->SetLastSubmission(pendingCommandSerial);
+
+            // Insert the heap into the appropriate LRU.
+            TrackResidentAllocation(heap);
+        }
+
+        if (localSizeToMakeResident > 0) {
+            return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
+                                           localHeapsToMakeResident.size(),
+                                           localHeapsToMakeResident.data());
+        }
+
+        if (nonLocalSizeToMakeResident > 0) {
+            ASSERT(!mDevice->GetDeviceInfo().isUMA);
+            return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
+                                           nonLocalHeapsToMakeResident.size(),
+                                           nonLocalHeapsToMakeResident.data());
+        }
+
+        return {};
+    }
+
+    MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
+                                                         uint64_t sizeToMakeResident,
+                                                         uint64_t numberOfObjectsToMakeResident,
+                                                         ID3D12Pageable** allocations) {
+        uint64_t bytesEvicted;
+        DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
+        DAWN_UNUSED(bytesEvicted);
+
+        // Note that MakeResident is a synchronous function and can add a significant
+        // overhead to command recording. In the future, it may be possible to decrease this
+        // overhead by using MakeResident on a secondary thread, or by instead making use of
+        // the EnqueueMakeResident function (which is not available on all Windows 10
+        // platforms).
+        HRESULT hr =
+            mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+
+        // A MakeResident call can fail if there's not enough available memory. This
+        // could occur when there's significant fragmentation or if the allocation size
+        // estimates are incorrect. We may be able to continue execution by evicting some
+        // more memory and calling MakeResident again.
+        while (FAILED(hr)) {
+            constexpr uint32_t kAdditonalSizeToEvict = 50000000;  // 50MB
+
+            uint64_t sizeEvicted = 0;
+
+            DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
+
+            // If nothing can be evicted after MakeResident has failed, we cannot continue
+            // execution and must throw a fatal error.
+            if (sizeEvicted == 0) {
+                return DAWN_OUT_OF_MEMORY_ERROR(
+                    "MakeResident has failed due to excessive video memory usage.");
+            }
+
+            hr =
+                mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+        }
+
+        return {};
+    }
+
+    // Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
+    // become resident within the current serial. Failing to call this function when an allocation
+    // is implicitly made resident will cause the residency manager to view the allocation as
+    // non-resident and call MakeResident - which will make D3D12's internal residency refcount on
+    // the allocation out of sync with Dawn.
+    void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
+        if (!mResidencyManagementEnabled) {
+            return;
+        }
+
+        ASSERT(pageable->IsInList() == false);
+        GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
+    }
+
+    // Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
+    // this function must be called before any resources have been created.
+    void ResidencyManager::RestrictBudgetForTesting(uint64_t artificialBudgetCap) {
+        ASSERT(mVideoMemoryInfo.nonLocal.lruCache.empty());
+        ASSERT(!mRestrictBudgetForTesting);
+
+        mRestrictBudgetForTesting = true;
+        UpdateVideoMemoryInfo();
+
+        // Dawn has a non-zero memory usage even before any resources have been created, and this
+        // value can vary depending on the environment Dawn is running in. By adding this in
+        // addition to the artificial budget cap, we can create a predictable and reproducible
+        // budget for testing.
+        mVideoMemoryInfo.local.budget = mVideoMemoryInfo.local.usage + artificialBudgetCap;
+        if (!mDevice->GetDeviceInfo().isUMA) {
+            mVideoMemoryInfo.nonLocal.budget =
+                mVideoMemoryInfo.nonLocal.usage + artificialBudgetCap;
+        }
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ResidencyManagerD3D12.h b/src/dawn/native/d3d12/ResidencyManagerD3D12.h
new file mode 100644
index 0000000..26d9cf0
--- /dev/null
+++ b/src/dawn/native/d3d12/ResidencyManagerD3D12.h
@@ -0,0 +1,82 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
+#define DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+    class Heap;
+    class Pageable;
+
+    class ResidencyManager {
+      public:
+        ResidencyManager(Device* device);
+
+        MaybeError LockAllocation(Pageable* pageable);
+        void UnlockAllocation(Pageable* pageable);
+
+        MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
+        MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
+
+        uint64_t SetExternalMemoryReservation(MemorySegment segment,
+                                              uint64_t requestedReservationSize);
+
+        void TrackResidentAllocation(Pageable* pageable);
+
+        void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
+
+      private:
+        struct MemorySegmentInfo {
+            const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
+            LinkedList<Pageable> lruCache = {};
+            uint64_t budget = 0;
+            uint64_t usage = 0;
+            uint64_t externalReservation = 0;
+            uint64_t externalRequest = 0;
+        };
+
+        struct VideoMemoryInfo {
+            MemorySegmentInfo local = {DXGI_MEMORY_SEGMENT_GROUP_LOCAL};
+            MemorySegmentInfo nonLocal = {DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL};
+        };
+
+        MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
+        ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
+                                                      MemorySegmentInfo* memorySegment);
+        ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
+        MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
+                                           uint64_t sizeToMakeResident,
+                                           uint64_t numberOfObjectsToMakeResident,
+                                           ID3D12Pageable** allocations);
+        void UpdateVideoMemoryInfo();
+        void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
+
+        Device* mDevice;
+        bool mResidencyManagementEnabled = false;
+        bool mRestrictBudgetForTesting = false;
+        VideoMemoryInfo mVideoMemoryInfo = {};
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
diff --git a/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
new file mode 100644
index 0000000..5ed9c1d
--- /dev/null
+++ b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -0,0 +1,417 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ResourceAllocatorManagerD3D12.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+    namespace {
+        MemorySegment GetMemorySegment(Device* device, D3D12_HEAP_TYPE heapType) {
+            if (device->GetDeviceInfo().isUMA) {
+                return MemorySegment::Local;
+            }
+
+            D3D12_HEAP_PROPERTIES heapProperties =
+                device->GetD3D12Device()->GetCustomHeapProperties(0, heapType);
+
+            if (heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1) {
+                return MemorySegment::Local;
+            }
+
+            return MemorySegment::NonLocal;
+        }
+
+        D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
+            switch (resourceHeapKind) {
+                case Readback_OnlyBuffers:
+                case Readback_AllBuffersAndTextures:
+                    return D3D12_HEAP_TYPE_READBACK;
+                case Default_AllBuffersAndTextures:
+                case Default_OnlyBuffers:
+                case Default_OnlyNonRenderableOrDepthTextures:
+                case Default_OnlyRenderableOrDepthTextures:
+                    return D3D12_HEAP_TYPE_DEFAULT;
+                case Upload_OnlyBuffers:
+                case Upload_AllBuffersAndTextures:
+                    return D3D12_HEAP_TYPE_UPLOAD;
+                case EnumCount:
+                    UNREACHABLE();
+            }
+        }
+
+        D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
+            switch (resourceHeapKind) {
+                case Default_AllBuffersAndTextures:
+                case Readback_AllBuffersAndTextures:
+                case Upload_AllBuffersAndTextures:
+                    return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
+                case Default_OnlyBuffers:
+                case Readback_OnlyBuffers:
+                case Upload_OnlyBuffers:
+                    return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
+                case Default_OnlyNonRenderableOrDepthTextures:
+                    return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
+                case Default_OnlyRenderableOrDepthTextures:
+                    return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
+                case EnumCount:
+                    UNREACHABLE();
+            }
+        }
+
+        ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
+                                             D3D12_HEAP_TYPE heapType,
+                                             D3D12_RESOURCE_FLAGS flags,
+                                             uint32_t resourceHeapTier) {
+            if (resourceHeapTier >= 2) {
+                switch (heapType) {
+                    case D3D12_HEAP_TYPE_UPLOAD:
+                        return Upload_AllBuffersAndTextures;
+                    case D3D12_HEAP_TYPE_DEFAULT:
+                        return Default_AllBuffersAndTextures;
+                    case D3D12_HEAP_TYPE_READBACK:
+                        return Readback_AllBuffersAndTextures;
+                    default:
+                        UNREACHABLE();
+                }
+            }
+
+            switch (dimension) {
+                case D3D12_RESOURCE_DIMENSION_BUFFER: {
+                    switch (heapType) {
+                        case D3D12_HEAP_TYPE_UPLOAD:
+                            return Upload_OnlyBuffers;
+                        case D3D12_HEAP_TYPE_DEFAULT:
+                            return Default_OnlyBuffers;
+                        case D3D12_HEAP_TYPE_READBACK:
+                            return Readback_OnlyBuffers;
+                        default:
+                            UNREACHABLE();
+                    }
+                    break;
+                }
+                case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
+                case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
+                case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
+                    switch (heapType) {
+                        case D3D12_HEAP_TYPE_DEFAULT: {
+                            if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+                                (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
+                                return Default_OnlyRenderableOrDepthTextures;
+                            }
+                            return Default_OnlyNonRenderableOrDepthTextures;
+                        }
+
+                        default:
+                            UNREACHABLE();
+                    }
+                    break;
+                }
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
+                                               uint32_t sampleCount,
+                                               uint64_t requestedAlignment) {
+            switch (resourceHeapKind) {
+                // Small resources can take advantage of smaller alignments. For example,
+                // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
+                // Must be non-depth or without render-target to use small resource alignment.
+                // This also applies to MSAA textures (4MB => 64KB).
+                //
+                // Note: Only known to be used for small textures; however, MSDN suggests
+                // it could be extended for more cases. If so, this could default to always
+                // attempt small resource placement.
+                // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
+                case Default_OnlyNonRenderableOrDepthTextures:
+                    return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
+                                             : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
+                default:
+                    return requestedAlignment;
+            }
+        }
+
+        bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
+            // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
+            // textures, or typeless resources
+            // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
+            // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+            return !IsTypeless(resourceDescriptor.Format) &&
+                   resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
+                   (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
+                                                D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
+        }
+
+    }  // namespace
+
+    ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
+        mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
+                                ? mDevice->GetDeviceInfo().resourceHeapTier
+                                : 1;
+
+        for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
+            const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
+            mHeapAllocators[i] = std::make_unique<HeapAllocator>(
+                mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
+                GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
+            mPooledHeapAllocators[i] =
+                std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
+            mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
+                kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
+        }
+    }
+
+    ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& resourceDescriptor,
+        D3D12_RESOURCE_STATES initialUsage) {
+        // In order to suppress a warning in the D3D12 debug layer, we need to specify an
+        // optimized clear value. As there are no negative consequences when picking a mismatched
+        // clear value, we use zero as the optimized clear value. This also enables fast clears on
+        // some architectures.
+        D3D12_CLEAR_VALUE zero{};
+        D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
+        if (IsClearValueOptimizable(resourceDescriptor)) {
+            zero.Format = resourceDescriptor.Format;
+            optimizedClearValue = &zero;
+        }
+
+        // TODO(crbug.com/dawn/849): Conditionally disable sub-allocation.
+        // For very large resources, there is no benefit to suballocate.
+        // For very small resources, it is inefficent to suballocate given the min. heap
+        // size could be much larger then the resource allocation.
+        // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
+        if (!mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
+            ResourceHeapAllocation subAllocation;
+            DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
+                                                                optimizedClearValue, initialUsage));
+            if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+                return std::move(subAllocation);
+            }
+        }
+
+        // If sub-allocation fails, fall-back to direct allocation (committed resource).
+        ResourceHeapAllocation directAllocation;
+        DAWN_TRY_ASSIGN(directAllocation,
+                        CreateCommittedResource(heapType, resourceDescriptor, optimizedClearValue,
+                                                initialUsage));
+        if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+            return std::move(directAllocation);
+        }
+
+        // If direct allocation fails, the system is probably out of memory.
+        return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
+    }
+
+    void ResourceAllocatorManager::Tick(ExecutionSerial completedSerial) {
+        for (ResourceHeapAllocation& allocation :
+             mAllocationsToDelete.IterateUpTo(completedSerial)) {
+            if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
+                FreeMemory(allocation);
+            }
+        }
+        mAllocationsToDelete.ClearUpTo(completedSerial);
+        mHeapsToDelete.ClearUpTo(completedSerial);
+    }
+
+    void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
+        if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+            return;
+        }
+
+        mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+
+        // Directly allocated ResourceHeapAllocations are created with a heap object that must be
+        // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
+        // for more information. Acquire this heap as a unique_ptr and add it to the queue of heaps
+        // to delete. It cannot be deleted immediately because it may be in use by in-flight or
+        // pending commands.
+        if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
+            mHeapsToDelete.Enqueue(std::unique_ptr<ResourceHeapBase>(allocation.GetResourceHeap()),
+                                   mDevice->GetPendingCommandSerial());
+        }
+
+        // Invalidate the allocation immediately in case one accidentally
+        // calls DeallocateMemory again using the same allocation.
+        allocation.Invalidate();
+
+        ASSERT(allocation.GetD3D12Resource() == nullptr);
+    }
+
+    void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
+        ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+
+        D3D12_HEAP_PROPERTIES heapProp;
+        allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
+
+        const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
+
+        const size_t resourceHeapKindIndex =
+            GetResourceHeapKind(resourceDescriptor.Dimension, heapProp.Type,
+                                resourceDescriptor.Flags, mResourceHeapTier);
+
+        mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
+    }
+
+    ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+        const D3D12_CLEAR_VALUE* optimizedClearValue,
+        D3D12_RESOURCE_STATES initialUsage) {
+        const ResourceHeapKind resourceHeapKind =
+            GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
+                                requestedResourceDescriptor.Flags, mResourceHeapTier);
+
+        D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
+        resourceDescriptor.Alignment = GetResourcePlacementAlignment(
+            resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
+            requestedResourceDescriptor.Alignment);
+
+        // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
+        // twice.
+        D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+            mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+
+        // If the requested resource alignment was rejected, let D3D tell us what the
+        // required alignment is for this resource.
+        if (resourceDescriptor.Alignment != 0 &&
+            resourceDescriptor.Alignment != resourceInfo.Alignment) {
+            resourceDescriptor.Alignment = 0;
+            resourceInfo =
+                mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+        }
+
+        // If d3d tells us the resource size is invalid, treat the error as OOM.
+        // Otherwise, creating the resource could cause a device loss (too large).
+        // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+        // incorrectly allocate a mismatched size.
+        if (resourceInfo.SizeInBytes == 0 ||
+            resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+            return DAWN_OUT_OF_MEMORY_ERROR(absl::StrFormat(
+                "Resource allocation size (%u) was invalid.", resourceInfo.SizeInBytes));
+        }
+
+        BuddyMemoryAllocator* allocator =
+            mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
+
+        ResourceMemoryAllocation allocation;
+        DAWN_TRY_ASSIGN(allocation,
+                        allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
+        if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+            return ResourceHeapAllocation{};  // invalid
+        }
+
+        Heap* heap = ToBackend(allocation.GetResourceHeap());
+
+        // Before calling CreatePlacedResource, we must ensure the target heap is resident.
+        // CreatePlacedResource will fail if it is not.
+        DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
+
+        // With placed resources, a single heap can be reused.
+        // The resource placed at an offset is only reclaimed
+        // upon Tick or after the last command list using the resource has completed
+        // on the GPU. This means the same physical memory is not reused
+        // within the same command-list and does not require additional synchronization (aliasing
+        // barrier).
+        // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+        ComPtr<ID3D12Resource> placedResource;
+        DAWN_TRY(CheckOutOfMemoryHRESULT(
+            mDevice->GetD3D12Device()->CreatePlacedResource(
+                heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
+                optimizedClearValue, IID_PPV_ARGS(&placedResource)),
+            "ID3D12Device::CreatePlacedResource"));
+
+        // After CreatePlacedResource has finished, the heap can be unlocked from residency. This
+        // will insert it into the residency LRU.
+        mDevice->GetResidencyManager()->UnlockAllocation(heap);
+
+        return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
+                                      std::move(placedResource), heap};
+    }
+
+    ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& resourceDescriptor,
+        const D3D12_CLEAR_VALUE* optimizedClearValue,
+        D3D12_RESOURCE_STATES initialUsage) {
+        D3D12_HEAP_PROPERTIES heapProperties;
+        heapProperties.Type = heapType;
+        heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+        heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+        heapProperties.CreationNodeMask = 0;
+        heapProperties.VisibleNodeMask = 0;
+
+        // If d3d tells us the resource size is invalid, treat the error as OOM.
+        // Otherwise, creating the resource could cause a device loss (too large).
+        // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+        // incorrectly allocate a mismatched size.
+        D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+            mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+        if (resourceInfo.SizeInBytes == 0 ||
+            resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
+        }
+
+        if (resourceInfo.SizeInBytes > kMaxHeapSize) {
+            return ResourceHeapAllocation{};  // Invalid
+        }
+
+        // CreateCommittedResource will implicitly make the created resource resident. We must
+        // ensure enough free memory exists before allocating to avoid an out-of-memory error when
+        // overcommitted.
+        DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(
+            resourceInfo.SizeInBytes, GetMemorySegment(mDevice, heapType)));
+
+        // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
+        // provided to CreateCommittedResource.
+        ComPtr<ID3D12Resource> committedResource;
+        DAWN_TRY(CheckOutOfMemoryHRESULT(
+            mDevice->GetD3D12Device()->CreateCommittedResource(
+                &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
+                optimizedClearValue, IID_PPV_ARGS(&committedResource)),
+            "ID3D12Device::CreateCommittedResource"));
+
+        // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
+        // resource allocation. Because Dawn's memory residency management occurs at the resource
+        // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
+        // object. This object is created manually, and must be deleted manually upon deallocation
+        // of the committed resource.
+        Heap* heap = new Heap(committedResource, GetMemorySegment(mDevice, heapType),
+                              resourceInfo.SizeInBytes);
+
+        // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
+        // track this to avoid calling MakeResident a second time.
+        mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
+
+        AllocationInfo info;
+        info.mMethod = AllocationMethod::kDirect;
+
+        return ResourceHeapAllocation{info,
+                                      /*offset*/ 0, std::move(committedResource), heap};
+    }
+
+    void ResourceAllocatorManager::DestroyPool() {
+        for (auto& alloc : mPooledHeapAllocators) {
+            alloc->DestroyPool();
+        }
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
new file mode 100644
index 0000000..331c982
--- /dev/null
+++ b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
@@ -0,0 +1,108 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
+#define DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/BuddyMemoryAllocator.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PooledResourceMemoryAllocator.h"
+#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+
+#include <array>
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    // Resource heap types + flags combinations are named after the D3D constants.
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
+    enum ResourceHeapKind {
+
+        // Resource heap tier 2
+        // Allows resource heaps to contain all buffer and textures types.
+        // This enables better heap re-use by avoiding the need for separate heaps and
+        // also reduces fragmentation.
+        Readback_AllBuffersAndTextures,
+        Upload_AllBuffersAndTextures,
+        Default_AllBuffersAndTextures,
+
+        // Resource heap tier 1
+        // Resource heaps only support types from a single resource category.
+        Readback_OnlyBuffers,
+        Upload_OnlyBuffers,
+        Default_OnlyBuffers,
+
+        Default_OnlyNonRenderableOrDepthTextures,
+        Default_OnlyRenderableOrDepthTextures,
+
+        EnumCount,
+        InvalidEnum = EnumCount,
+    };
+
+    // Manages a list of resource allocators used by the device to create resources using
+    // multiple allocation methods.
+    class ResourceAllocatorManager {
+      public:
+        ResourceAllocatorManager(Device* device);
+
+        ResultOrError<ResourceHeapAllocation> AllocateMemory(
+            D3D12_HEAP_TYPE heapType,
+            const D3D12_RESOURCE_DESC& resourceDescriptor,
+            D3D12_RESOURCE_STATES initialUsage);
+
+        void DeallocateMemory(ResourceHeapAllocation& allocation);
+
+        void Tick(ExecutionSerial lastCompletedSerial);
+
+        void DestroyPool();
+
+      private:
+        void FreeMemory(ResourceHeapAllocation& allocation);
+
+        ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
+            D3D12_HEAP_TYPE heapType,
+            const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+            const D3D12_CLEAR_VALUE* optimizedClearValue,
+            D3D12_RESOURCE_STATES initialUsage);
+
+        ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
+            D3D12_HEAP_TYPE heapType,
+            const D3D12_RESOURCE_DESC& resourceDescriptor,
+            const D3D12_CLEAR_VALUE* optimizedClearValue,
+            D3D12_RESOURCE_STATES initialUsage);
+
+        Device* mDevice;
+        uint32_t mResourceHeapTier;
+
+        static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll;  // 32GB
+        static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll;            // 4MB
+
+        std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
+            mSubAllocatedResourceAllocators;
+        std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
+
+        std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
+            mPooledHeapAllocators;
+
+        SerialQueue<ExecutionSerial, ResourceHeapAllocation> mAllocationsToDelete;
+        SerialQueue<ExecutionSerial, std::unique_ptr<ResourceHeapBase>> mHeapsToDelete;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
diff --git a/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
new file mode 100644
index 0000000..910e4fb
--- /dev/null
+++ b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -0,0 +1,43 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+
+#include <utility>
+
+namespace dawn::native::d3d12 {
+    ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
+                                                   uint64_t offset,
+                                                   ComPtr<ID3D12Resource> resource,
+                                                   Heap* heap)
+        : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
+        ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
+    }
+
+    void ResourceHeapAllocation::Invalidate() {
+        ResourceMemoryAllocation::Invalidate();
+        mResource.Reset();
+    }
+
+    ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
+        return mResource.Get();
+    }
+
+    D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
+        return mResource->GetGPUVirtualAddress();
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
new file mode 100644
index 0000000..c9de601
--- /dev/null
+++ b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
@@ -0,0 +1,48 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
+#define DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Heap;
+
+    class ResourceHeapAllocation : public ResourceMemoryAllocation {
+      public:
+        ResourceHeapAllocation() = default;
+        ResourceHeapAllocation(const AllocationInfo& info,
+                               uint64_t offset,
+                               ComPtr<ID3D12Resource> resource,
+                               Heap* heap);
+        ~ResourceHeapAllocation() override = default;
+        ResourceHeapAllocation(const ResourceHeapAllocation&) = default;
+        ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
+
+        void Invalidate() override;
+
+        ID3D12Resource* GetD3D12Resource() const;
+        D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
+
+      private:
+        ComPtr<ID3D12Resource> mResource;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
diff --git a/src/dawn/native/d3d12/SamplerD3D12.cpp b/src/dawn/native/d3d12/SamplerD3D12.cpp
new file mode 100644
index 0000000..c656931
--- /dev/null
+++ b/src/dawn/native/d3d12/SamplerD3D12.cpp
@@ -0,0 +1,106 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/SamplerD3D12.h"
+
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
+            switch (mode) {
+                case wgpu::AddressMode::Repeat:
+                    return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
+                case wgpu::AddressMode::MirrorRepeat:
+                    return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
+                case wgpu::AddressMode::ClampToEdge:
+                    return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
+            }
+        }
+    }  // namespace
+
+    // static
+    Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+        return AcquireRef(new Sampler(device, descriptor));
+    }
+
+    Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+        : SamplerBase(device, descriptor) {
+        D3D12_FILTER_TYPE minFilter;
+        switch (descriptor->minFilter) {
+            case wgpu::FilterMode::Nearest:
+                minFilter = D3D12_FILTER_TYPE_POINT;
+                break;
+            case wgpu::FilterMode::Linear:
+                minFilter = D3D12_FILTER_TYPE_LINEAR;
+                break;
+        }
+
+        D3D12_FILTER_TYPE magFilter;
+        switch (descriptor->magFilter) {
+            case wgpu::FilterMode::Nearest:
+                magFilter = D3D12_FILTER_TYPE_POINT;
+                break;
+            case wgpu::FilterMode::Linear:
+                magFilter = D3D12_FILTER_TYPE_LINEAR;
+                break;
+        }
+
+        D3D12_FILTER_TYPE mipmapFilter;
+        switch (descriptor->mipmapFilter) {
+            case wgpu::FilterMode::Nearest:
+                mipmapFilter = D3D12_FILTER_TYPE_POINT;
+                break;
+            case wgpu::FilterMode::Linear:
+                mipmapFilter = D3D12_FILTER_TYPE_LINEAR;
+                break;
+        }
+
+        D3D12_FILTER_REDUCTION_TYPE reduction =
+            descriptor->compare == wgpu::CompareFunction::Undefined
+                ? D3D12_FILTER_REDUCTION_TYPE_STANDARD
+                : D3D12_FILTER_REDUCTION_TYPE_COMPARISON;
+
+        // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_sampler_desc
+        mSamplerDesc.MaxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+        if (mSamplerDesc.MaxAnisotropy > 1) {
+            mSamplerDesc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(reduction);
+        } else {
+            mSamplerDesc.Filter =
+                D3D12_ENCODE_BASIC_FILTER(minFilter, magFilter, mipmapFilter, reduction);
+        }
+
+        mSamplerDesc.AddressU = AddressMode(descriptor->addressModeU);
+        mSamplerDesc.AddressV = AddressMode(descriptor->addressModeV);
+        mSamplerDesc.AddressW = AddressMode(descriptor->addressModeW);
+        mSamplerDesc.MipLODBias = 0.f;
+
+        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+            mSamplerDesc.ComparisonFunc = ToD3D12ComparisonFunc(descriptor->compare);
+        } else {
+            // Still set the function so it's not garbage.
+            mSamplerDesc.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
+        }
+        mSamplerDesc.MinLOD = descriptor->lodMinClamp;
+        mSamplerDesc.MaxLOD = descriptor->lodMaxClamp;
+    }
+
+    const D3D12_SAMPLER_DESC& Sampler::GetSamplerDescriptor() const {
+        return mSamplerDesc;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/SamplerD3D12.h b/src/dawn/native/d3d12/SamplerD3D12.h
new file mode 100644
index 0000000..e296afb
--- /dev/null
+++ b/src/dawn/native/d3d12/SamplerD3D12.h
@@ -0,0 +1,40 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SAMPLERD3D12_H_
+#define DAWNNATIVE_D3D12_SAMPLERD3D12_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class Sampler final : public SamplerBase {
+      public:
+        static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
+
+        const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
+
+      private:
+        Sampler(Device* device, const SamplerDescriptor* descriptor);
+        ~Sampler() override = default;
+        D3D12_SAMPLER_DESC mSamplerDesc = {};
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_SAMPLERD3D12_H_
diff --git a/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
new file mode 100644
index 0000000..4659b36
--- /dev/null
+++ b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
@@ -0,0 +1,166 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/HashUtils.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/SamplerD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
+        : mSamplers(std::move(samplers)) {
+    }
+
+    SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
+                                                 StagingDescriptorAllocator* allocator,
+                                                 std::vector<Sampler*> samplers,
+                                                 CPUDescriptorHeapAllocation allocation)
+        : mCPUAllocation(std::move(allocation)),
+          mSamplers(std::move(samplers)),
+          mAllocator(allocator),
+          mCache(cache) {
+        ASSERT(mCache != nullptr);
+        ASSERT(mCPUAllocation.IsValid());
+        ASSERT(!mSamplers.empty());
+    }
+
+    std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
+        return std::move(mSamplers);
+    }
+
+    SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
+        // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
+        if (mCPUAllocation.IsValid()) {
+            mCache->RemoveCacheEntry(this);
+            mAllocator->Deallocate(&mCPUAllocation);
+        }
+
+        ASSERT(!mCPUAllocation.IsValid());
+    }
+
+    bool SamplerHeapCacheEntry::Populate(Device* device,
+                                         ShaderVisibleDescriptorAllocator* allocator) {
+        if (allocator->IsAllocationStillValid(mGPUAllocation)) {
+            return true;
+        }
+
+        ASSERT(!mSamplers.empty());
+
+        // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+        // If either failed, return early to re-allocate and switch the heaps.
+        const uint32_t descriptorCount = mSamplers.size();
+        D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+        if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
+                                               &baseCPUDescriptor, &mGPUAllocation)) {
+            return false;
+        }
+
+        // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+        // simple copies per bindgroup, a single non-simple copy could be issued.
+        // TODO(dawn:155): Consider doing this optimization.
+        device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+                                                        mCPUAllocation.GetBaseDescriptor(),
+                                                        D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+
+        return true;
+    }
+
+    D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
+        return mGPUAllocation.GetBaseDescriptor();
+    }
+
+    ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
+        const BindGroup* group,
+        StagingDescriptorAllocator* samplerAllocator) {
+        const BindGroupLayout* bgl = ToBackend(group->GetLayout());
+
+        // If a previously created bindgroup used the same samplers, the backing sampler heap
+        // allocation can be reused. The packed list of samplers acts as the key to lookup the
+        // allocation in a cache.
+        // TODO(dawn:155): Avoid re-allocating the vector each lookup.
+        std::vector<Sampler*> samplers;
+        samplers.reserve(bgl->GetSamplerDescriptorCount());
+
+        for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+             bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+            const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+            if (bindingInfo.bindingType == BindingInfoType::Sampler) {
+                samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
+            }
+        }
+
+        // Check the cache if there exists a sampler heap allocation that corresponds to the
+        // samplers.
+        SamplerHeapCacheEntry blueprint(std::move(samplers));
+        auto iter = mCache.find(&blueprint);
+        if (iter != mCache.end()) {
+            return Ref<SamplerHeapCacheEntry>(*iter);
+        }
+
+        // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
+        // real entry below.
+        samplers = std::move(blueprint.AcquireSamplers());
+
+        CPUDescriptorHeapAllocation allocation;
+        DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
+
+        const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
+        ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
+
+        for (uint32_t i = 0; i < samplers.size(); ++i) {
+            const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
+            d3d12Device->CreateSampler(&samplerDesc,
+                                       allocation.OffsetFrom(samplerSizeIncrement, i));
+        }
+
+        Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
+            this, samplerAllocator, std::move(samplers), std::move(allocation)));
+        mCache.insert(entry.Get());
+        return std::move(entry);
+    }
+
+    SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {
+    }
+
+    SamplerHeapCache::~SamplerHeapCache() {
+        ASSERT(mCache.empty());
+    }
+
+    void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
+        ASSERT(entry->GetRefCountForTesting() == 0);
+        size_t removedCount = mCache.erase(entry);
+        ASSERT(removedCount == 1);
+    }
+
+    size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
+        size_t hash = 0;
+        for (const Sampler* sampler : entry->mSamplers) {
+            HashCombine(&hash, sampler);
+        }
+        return hash;
+    }
+
+    bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
+                                                         const SamplerHeapCacheEntry* b) const {
+        return a->mSamplers == b->mSamplers;
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
new file mode 100644
index 0000000..be38d21
--- /dev/null
+++ b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
@@ -0,0 +1,107 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
+#define DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+
+#include <unordered_set>
+
+// |SamplerHeapCacheEntry| maintains a cache of sampler descriptor heap allocations.
+// Each entry represents one or more sampler descriptors that co-exist in a CPU and
+// GPU descriptor heap. The CPU-side allocation is deallocated once the final reference
+// has been released while the GPU-side allocation is deallocated when the GPU is finished.
+//
+// The BindGroupLayout hands out these entries upon constructing the bindgroup. If the entry is not
+// invalid, it will allocate and initialize so it may be reused by another bindgroup.
+//
+// The cache is primary needed for the GPU sampler heap, which is much smaller than the view heap
+// and switches incur expensive pipeline flushes.
+namespace dawn::native::d3d12 {
+
+    class BindGroup;
+    class Device;
+    class Sampler;
+    class SamplerHeapCache;
+    class StagingDescriptorAllocator;
+    class ShaderVisibleDescriptorAllocator;
+
+    // Wraps sampler descriptor heap allocations in a cache.
+    class SamplerHeapCacheEntry : public RefCounted {
+      public:
+        SamplerHeapCacheEntry() = default;
+        SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
+        SamplerHeapCacheEntry(SamplerHeapCache* cache,
+                              StagingDescriptorAllocator* allocator,
+                              std::vector<Sampler*> samplers,
+                              CPUDescriptorHeapAllocation allocation);
+        ~SamplerHeapCacheEntry() override;
+
+        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+
+        std::vector<Sampler*>&& AcquireSamplers();
+
+        bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
+
+        // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
+        struct HashFunc {
+            size_t operator()(const SamplerHeapCacheEntry* entry) const;
+        };
+
+        struct EqualityFunc {
+            bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
+        };
+
+      private:
+        CPUDescriptorHeapAllocation mCPUAllocation;
+        GPUDescriptorHeapAllocation mGPUAllocation;
+
+        // Storing raw pointer because the sampler object will be already hashed
+        // by the device and will already be unique.
+        std::vector<Sampler*> mSamplers;
+
+        StagingDescriptorAllocator* mAllocator = nullptr;
+        SamplerHeapCache* mCache = nullptr;
+    };
+
+    // Cache descriptor heap allocations so that we don't create duplicate ones for every
+    // BindGroup.
+    class SamplerHeapCache {
+      public:
+        SamplerHeapCache(Device* device);
+        ~SamplerHeapCache();
+
+        ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
+            const BindGroup* group,
+            StagingDescriptorAllocator* samplerAllocator);
+
+        void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
+
+      private:
+        Device* mDevice;
+
+        using Cache = std::unordered_set<SamplerHeapCacheEntry*,
+                                         SamplerHeapCacheEntry::HashFunc,
+                                         SamplerHeapCacheEntry::EqualityFunc>;
+
+        Cache mCache;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
diff --git a/src/dawn/native/d3d12/ShaderModuleD3D12.cpp b/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
new file mode 100644
index 0000000..0dea76e
--- /dev/null
+++ b/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
@@ -0,0 +1,846 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/WindowsUtils.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <d3dcompiler.h>
+
+#include <tint/tint.h>
+#include <map>
+#include <sstream>
+#include <unordered_map>
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        ResultOrError<uint64_t> GetDXCompilerVersion(ComPtr<IDxcValidator> dxcValidator) {
+            ComPtr<IDxcVersionInfo> versionInfo;
+            DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
+                                  "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
+
+            uint32_t compilerMajor, compilerMinor;
+            DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
+                                  "IDxcVersionInfo::GetVersion"));
+
+            // Pack both into a single version number.
+            return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
+        }
+
+        uint64_t GetD3DCompilerVersion() {
+            return D3D_COMPILER_VERSION;
+        }
+
+        struct CompareBindingPoint {
+            constexpr bool operator()(const tint::transform::BindingPoint& lhs,
+                                      const tint::transform::BindingPoint& rhs) const {
+                if (lhs.group != rhs.group) {
+                    return lhs.group < rhs.group;
+                } else {
+                    return lhs.binding < rhs.binding;
+                }
+            }
+        };
+
+        void Serialize(std::stringstream& output, const tint::ast::Access& access) {
+            output << access;
+        }
+
+        void Serialize(std::stringstream& output,
+                       const tint::transform::BindingPoint& binding_point) {
+            output << "(BindingPoint";
+            output << " group=" << binding_point.group;
+            output << " binding=" << binding_point.binding;
+            output << ")";
+        }
+
+        template <typename T,
+                  typename = typename std::enable_if<std::is_fundamental<T>::value>::type>
+        void Serialize(std::stringstream& output, const T& val) {
+            output << val;
+        }
+
+        template <typename T>
+        void Serialize(std::stringstream& output,
+                       const std::unordered_map<tint::transform::BindingPoint, T>& map) {
+            output << "(map";
+
+            std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(),
+                                                                                   map.end());
+            for (auto& [bindingPoint, value] : sorted) {
+                output << " ";
+                Serialize(output, bindingPoint);
+                output << "=";
+                Serialize(output, value);
+            }
+            output << ")";
+        }
+
+        void Serialize(std::stringstream& output,
+                       const tint::writer::ArrayLengthFromUniformOptions& arrayLengthFromUniform) {
+            output << "(ArrayLengthFromUniformOptions";
+            output << " ubo_binding=";
+            Serialize(output, arrayLengthFromUniform.ubo_binding);
+            output << " bindpoint_to_size_index=";
+            Serialize(output, arrayLengthFromUniform.bindpoint_to_size_index);
+            output << ")";
+        }
+
+        // 32 bit float has 7 decimal digits of precision so setting n to 8 should be enough
+        std::string FloatToStringWithPrecision(float v, std::streamsize n = 8) {
+            std::ostringstream out;
+            out.precision(n);
+            out << std::fixed << v;
+            return out.str();
+        }
+
+        std::string GetHLSLValueString(EntryPointMetadata::OverridableConstant::Type dawnType,
+                                       const OverridableConstantScalar* entry,
+                                       double value = 0) {
+            switch (dawnType) {
+                case EntryPointMetadata::OverridableConstant::Type::Boolean:
+                    return std::to_string(entry ? entry->b : static_cast<int32_t>(value));
+                case EntryPointMetadata::OverridableConstant::Type::Float32:
+                    return FloatToStringWithPrecision(entry ? entry->f32
+                                                            : static_cast<float>(value));
+                case EntryPointMetadata::OverridableConstant::Type::Int32:
+                    return std::to_string(entry ? entry->i32 : static_cast<int32_t>(value));
+                case EntryPointMetadata::OverridableConstant::Type::Uint32:
+                    return std::to_string(entry ? entry->u32 : static_cast<uint32_t>(value));
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        constexpr char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
+
+        void GetOverridableConstantsDefines(
+            std::vector<std::pair<std::string, std::string>>* defineStrings,
+            const PipelineConstantEntries* pipelineConstantEntries,
+            const EntryPointMetadata::OverridableConstantsMap* shaderEntryPointConstants) {
+            std::unordered_set<std::string> overriddenConstants;
+
+            // Set pipeline overridden values
+            for (const auto& [name, value] : *pipelineConstantEntries) {
+                overriddenConstants.insert(name);
+
+                // This is already validated so `name` must exist
+                const auto& moduleConstant = shaderEntryPointConstants->at(name);
+
+                defineStrings->emplace_back(
+                    kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+                    GetHLSLValueString(moduleConstant.type, nullptr, value));
+            }
+
+            // Set shader initialized default values
+            for (const auto& iter : *shaderEntryPointConstants) {
+                const std::string& name = iter.first;
+                if (overriddenConstants.count(name) != 0) {
+                    // This constant already has overridden value
+                    continue;
+                }
+
+                const auto& moduleConstant = shaderEntryPointConstants->at(name);
+
+                // Uninitialized default values are okay since they ar only defined to pass
+                // compilation but not used
+                defineStrings->emplace_back(
+                    kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+                    GetHLSLValueString(moduleConstant.type, &moduleConstant.defaultValue));
+            }
+        }
+
+        // The inputs to a shader compilation. These have been intentionally isolated from the
+        // device to help ensure that the pipeline cache key contains all inputs for compilation.
+        struct ShaderCompilationRequest {
+            enum Compiler { FXC, DXC };
+
+            // Common inputs
+            Compiler compiler;
+            const tint::Program* program;
+            const char* entryPointName;
+            SingleShaderStage stage;
+            uint32_t compileFlags;
+            bool disableSymbolRenaming;
+            tint::transform::BindingRemapper::BindingPoints remappedBindingPoints;
+            tint::transform::BindingRemapper::AccessControls remappedAccessControls;
+            bool isRobustnessEnabled;
+            bool usesNumWorkgroups;
+            uint32_t numWorkgroupsRegisterSpace;
+            uint32_t numWorkgroupsShaderRegister;
+            tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+            std::vector<std::pair<std::string, std::string>> defineStrings;
+
+            // FXC/DXC common inputs
+            bool disableWorkgroupInit;
+
+            // FXC inputs
+            uint64_t fxcVersion;
+
+            // DXC inputs
+            uint64_t dxcVersion;
+            const D3D12DeviceInfo* deviceInfo;
+            bool hasShaderFloat16Feature;
+
+            static ResultOrError<ShaderCompilationRequest> Create(
+                const char* entryPointName,
+                SingleShaderStage stage,
+                const PipelineLayout* layout,
+                uint32_t compileFlags,
+                const Device* device,
+                const tint::Program* program,
+                const EntryPointMetadata& entryPoint,
+                const ProgrammableStage& programmableStage) {
+                Compiler compiler;
+                uint64_t dxcVersion = 0;
+                if (device->IsToggleEnabled(Toggle::UseDXC)) {
+                    compiler = Compiler::DXC;
+                    DAWN_TRY_ASSIGN(dxcVersion, GetDXCompilerVersion(device->GetDxcValidator()));
+                } else {
+                    compiler = Compiler::FXC;
+                }
+
+                using tint::transform::BindingPoint;
+                using tint::transform::BindingRemapper;
+
+                BindingRemapper::BindingPoints remappedBindingPoints;
+                BindingRemapper::AccessControls remappedAccessControls;
+
+                tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+                arrayLengthFromUniform.ubo_binding = {
+                    layout->GetDynamicStorageBufferLengthsRegisterSpace(),
+                    layout->GetDynamicStorageBufferLengthsShaderRegister()};
+
+                const BindingInfoArray& moduleBindingInfo = entryPoint.bindings;
+                for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+                    const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+                    const auto& groupBindingInfo = moduleBindingInfo[group];
+
+                    // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify
+                    // the Tint AST to make the "bindings" decoration match the offset chosen by
+                    // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
+                    // assigned to each interface variable.
+                    for (const auto& [binding, bindingInfo] : groupBindingInfo) {
+                        BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+                        BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                                     static_cast<uint32_t>(binding)};
+                        BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+                                                     bgl->GetShaderRegister(bindingIndex)};
+                        if (srcBindingPoint != dstBindingPoint) {
+                            remappedBindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+                        }
+
+                        // Declaring a read-only storage buffer in HLSL but specifying a storage
+                        // buffer in the BGL produces the wrong output. Force read-only storage
+                        // buffer bindings to be treated as UAV instead of SRV. Internal storage
+                        // buffer is a storage buffer used in the internal pipeline.
+                        const bool forceStorageBufferAsUAV =
+                            (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
+                             (bgl->GetBindingInfo(bindingIndex).buffer.type ==
+                                  wgpu::BufferBindingType::Storage ||
+                              bgl->GetBindingInfo(bindingIndex).buffer.type ==
+                                  kInternalStorageBufferBinding));
+                        if (forceStorageBufferAsUAV) {
+                            remappedAccessControls.emplace(srcBindingPoint,
+                                                           tint::ast::Access::kReadWrite);
+                        }
+                    }
+
+                    // Add arrayLengthFromUniform options
+                    {
+                        for (const auto& bindingAndRegisterOffset :
+                             layout->GetDynamicStorageBufferLengthInfo()[group]
+                                 .bindingAndRegisterOffsets) {
+                            BindingNumber binding = bindingAndRegisterOffset.binding;
+                            uint32_t registerOffset = bindingAndRegisterOffset.registerOffset;
+
+                            BindingPoint bindingPoint{static_cast<uint32_t>(group),
+                                                      static_cast<uint32_t>(binding)};
+                            // Get the renamed binding point if it was remapped.
+                            auto it = remappedBindingPoints.find(bindingPoint);
+                            if (it != remappedBindingPoints.end()) {
+                                bindingPoint = it->second;
+                            }
+
+                            arrayLengthFromUniform.bindpoint_to_size_index.emplace(bindingPoint,
+                                                                                   registerOffset);
+                        }
+                    }
+                }
+
+                ShaderCompilationRequest request;
+                request.compiler = compiler;
+                request.program = program;
+                request.entryPointName = entryPointName;
+                request.stage = stage;
+                request.compileFlags = compileFlags;
+                request.disableSymbolRenaming =
+                    device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
+                request.remappedBindingPoints = std::move(remappedBindingPoints);
+                request.remappedAccessControls = std::move(remappedAccessControls);
+                request.isRobustnessEnabled = device->IsRobustnessEnabled();
+                request.disableWorkgroupInit =
+                    device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+                request.usesNumWorkgroups = entryPoint.usesNumWorkgroups;
+                request.numWorkgroupsShaderRegister = layout->GetNumWorkgroupsShaderRegister();
+                request.numWorkgroupsRegisterSpace = layout->GetNumWorkgroupsRegisterSpace();
+                request.arrayLengthFromUniform = std::move(arrayLengthFromUniform);
+                request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
+                request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
+                request.deviceInfo = &device->GetDeviceInfo();
+                request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
+
+                GetOverridableConstantsDefines(
+                    &request.defineStrings, &programmableStage.constants,
+                    &programmableStage.module->GetEntryPoint(programmableStage.entryPoint)
+                         .overridableConstants);
+
+                return std::move(request);
+            }
+
+            ResultOrError<PersistentCacheKey> CreateCacheKey() const {
+                // Generate the WGSL from the Tint program so it's normalized.
+                // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
+                // compact representation.
+                auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
+                if (!result.success) {
+                    std::ostringstream errorStream;
+                    errorStream << "Tint WGSL failure:" << std::endl;
+                    errorStream << "Generator: " << result.error << std::endl;
+                    return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
+                }
+
+                std::stringstream stream;
+
+                // Prefix the key with the type to avoid collisions from another type that could
+                // have the same key.
+                stream << static_cast<uint32_t>(PersistentKeyType::Shader);
+                stream << "\n";
+
+                stream << result.wgsl.length();
+                stream << "\n";
+
+                stream << result.wgsl;
+                stream << "\n";
+
+                stream << "(ShaderCompilationRequest";
+                stream << " compiler=" << compiler;
+                stream << " entryPointName=" << entryPointName;
+                stream << " stage=" << uint32_t(stage);
+                stream << " compileFlags=" << compileFlags;
+                stream << " disableSymbolRenaming=" << disableSymbolRenaming;
+
+                stream << " remappedBindingPoints=";
+                Serialize(stream, remappedBindingPoints);
+
+                stream << " remappedAccessControls=";
+                Serialize(stream, remappedAccessControls);
+
+                stream << " useNumWorkgroups=" << usesNumWorkgroups;
+                stream << " numWorkgroupsRegisterSpace=" << numWorkgroupsRegisterSpace;
+                stream << " numWorkgroupsShaderRegister=" << numWorkgroupsShaderRegister;
+
+                stream << " arrayLengthFromUniform=";
+                Serialize(stream, arrayLengthFromUniform);
+
+                stream << " shaderModel=" << deviceInfo->shaderModel;
+                stream << " disableWorkgroupInit=" << disableWorkgroupInit;
+                stream << " isRobustnessEnabled=" << isRobustnessEnabled;
+                stream << " fxcVersion=" << fxcVersion;
+                stream << " dxcVersion=" << dxcVersion;
+                stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
+
+                stream << " defines={";
+                for (const auto& [name, value] : defineStrings) {
+                    stream << " <" << name << "," << value << ">";
+                }
+                stream << " }";
+
+                stream << ")";
+                stream << "\n";
+
+                return PersistentCacheKey(std::istreambuf_iterator<char>{stream},
+                                          std::istreambuf_iterator<char>{});
+            }
+        };
+
+        std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
+            std::vector<const wchar_t*> arguments;
+            if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
+                arguments.push_back(L"/Gec");
+            }
+            if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
+                arguments.push_back(L"/Gis");
+            }
+            constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+            if (compileFlags & d3dCompileFlagsBits) {
+                switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
+                    case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+                        arguments.push_back(L"/O0");
+                        break;
+                    case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+                        arguments.push_back(L"/O2");
+                        break;
+                    case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+                        arguments.push_back(L"/O3");
+                        break;
+                }
+            }
+            if (compileFlags & D3DCOMPILE_DEBUG) {
+                arguments.push_back(L"/Zi");
+            }
+            if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
+                arguments.push_back(L"/Zpr");
+            }
+            if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
+                arguments.push_back(L"/Zpc");
+            }
+            if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
+                arguments.push_back(L"/Gfa");
+            }
+            if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
+                arguments.push_back(L"/Gfp");
+            }
+            if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
+                arguments.push_back(L"/res_may_alias");
+            }
+
+            if (enable16BitTypes) {
+                // enable-16bit-types are only allowed in -HV 2018 (default)
+                arguments.push_back(L"/enable-16bit-types");
+            }
+
+            arguments.push_back(L"-HV");
+            arguments.push_back(L"2018");
+
+            return arguments;
+        }
+
+        ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
+                                                         IDxcCompiler* dxcCompiler,
+                                                         const ShaderCompilationRequest& request,
+                                                         const std::string& hlslSource) {
+            ComPtr<IDxcBlobEncoding> sourceBlob;
+            DAWN_TRY(
+                CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
+                                 hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
+                             "DXC create blob"));
+
+            std::wstring entryPointW;
+            DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
+
+            std::vector<const wchar_t*> arguments =
+                GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
+
+            // Build defines for overridable constants
+            std::vector<std::pair<std::wstring, std::wstring>> defineStrings;
+            defineStrings.reserve(request.defineStrings.size());
+            for (const auto& [name, value] : request.defineStrings) {
+                defineStrings.emplace_back(UTF8ToWStr(name.c_str()), UTF8ToWStr(value.c_str()));
+            }
+
+            std::vector<DxcDefine> dxcDefines;
+            dxcDefines.reserve(defineStrings.size());
+            for (const auto& [name, value] : defineStrings) {
+                dxcDefines.push_back({name.c_str(), value.c_str()});
+            }
+
+            ComPtr<IDxcOperationResult> result;
+            DAWN_TRY(CheckHRESULT(
+                dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
+                                     request.deviceInfo->shaderProfiles[request.stage].c_str(),
+                                     arguments.data(), arguments.size(), dxcDefines.data(),
+                                     dxcDefines.size(), nullptr, &result),
+                "DXC compile"));
+
+            HRESULT hr;
+            DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
+
+            if (FAILED(hr)) {
+                ComPtr<IDxcBlobEncoding> errors;
+                DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
+
+                return DAWN_FORMAT_VALIDATION_ERROR("DXC compile failed with: %s",
+                                                    static_cast<char*>(errors->GetBufferPointer()));
+            }
+
+            ComPtr<IDxcBlob> compiledShader;
+            DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
+            return std::move(compiledShader);
+        }
+
+        std::string CompileFlagsToStringFXC(uint32_t compileFlags) {
+            struct Flag {
+                uint32_t value;
+                const char* name;
+            };
+            constexpr Flag flags[] = {
+            // Populated from d3dcompiler.h
+#define F(f) Flag{f, #f}
+                F(D3DCOMPILE_DEBUG),
+                F(D3DCOMPILE_SKIP_VALIDATION),
+                F(D3DCOMPILE_SKIP_OPTIMIZATION),
+                F(D3DCOMPILE_PACK_MATRIX_ROW_MAJOR),
+                F(D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR),
+                F(D3DCOMPILE_PARTIAL_PRECISION),
+                F(D3DCOMPILE_FORCE_VS_SOFTWARE_NO_OPT),
+                F(D3DCOMPILE_FORCE_PS_SOFTWARE_NO_OPT),
+                F(D3DCOMPILE_NO_PRESHADER),
+                F(D3DCOMPILE_AVOID_FLOW_CONTROL),
+                F(D3DCOMPILE_PREFER_FLOW_CONTROL),
+                F(D3DCOMPILE_ENABLE_STRICTNESS),
+                F(D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY),
+                F(D3DCOMPILE_IEEE_STRICTNESS),
+                F(D3DCOMPILE_RESERVED16),
+                F(D3DCOMPILE_RESERVED17),
+                F(D3DCOMPILE_WARNINGS_ARE_ERRORS),
+                F(D3DCOMPILE_RESOURCES_MAY_ALIAS),
+                F(D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES),
+                F(D3DCOMPILE_ALL_RESOURCES_BOUND),
+                F(D3DCOMPILE_DEBUG_NAME_FOR_SOURCE),
+                F(D3DCOMPILE_DEBUG_NAME_FOR_BINARY),
+#undef F
+            };
+
+            std::string result;
+            for (const Flag& f : flags) {
+                if ((compileFlags & f.value) != 0) {
+                    result += f.name + std::string("\n");
+                }
+            }
+
+            // Optimization level must be handled separately as two bits are used, and the values
+            // don't map neatly to 0-3.
+            constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+            switch (compileFlags & d3dCompileFlagsBits) {
+                case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL0";
+                    break;
+                case D3DCOMPILE_OPTIMIZATION_LEVEL1:
+                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL1";
+                    break;
+                case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL2";
+                    break;
+                case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL3";
+                    break;
+            }
+            result += std::string("\n");
+
+            return result;
+        }
+
+        ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
+                                                         const ShaderCompilationRequest& request,
+                                                         const std::string& hlslSource) {
+            const char* targetProfile = nullptr;
+            switch (request.stage) {
+                case SingleShaderStage::Vertex:
+                    targetProfile = "vs_5_1";
+                    break;
+                case SingleShaderStage::Fragment:
+                    targetProfile = "ps_5_1";
+                    break;
+                case SingleShaderStage::Compute:
+                    targetProfile = "cs_5_1";
+                    break;
+            }
+
+            ComPtr<ID3DBlob> compiledShader;
+            ComPtr<ID3DBlob> errors;
+
+            // Build defines for overridable constants
+            const D3D_SHADER_MACRO* pDefines = nullptr;
+            std::vector<D3D_SHADER_MACRO> fxcDefines;
+            if (request.defineStrings.size() > 0) {
+                fxcDefines.reserve(request.defineStrings.size() + 1);
+                for (const auto& [name, value] : request.defineStrings) {
+                    fxcDefines.push_back({name.c_str(), value.c_str()});
+                }
+                // d3dCompile D3D_SHADER_MACRO* pDefines is a nullptr terminated array
+                fxcDefines.push_back({nullptr, nullptr});
+                pDefines = fxcDefines.data();
+            }
+
+            DAWN_INVALID_IF(FAILED(functions->d3dCompile(
+                                hlslSource.c_str(), hlslSource.length(), nullptr, pDefines, nullptr,
+                                request.entryPointName, targetProfile, request.compileFlags, 0,
+                                &compiledShader, &errors)),
+                            "D3D compile failed with: %s",
+                            static_cast<char*>(errors->GetBufferPointer()));
+
+            return std::move(compiledShader);
+        }
+
+        ResultOrError<std::string> TranslateToHLSL(dawn::platform::Platform* platform,
+                                                   const ShaderCompilationRequest& request,
+                                                   std::string* remappedEntryPointName) {
+            std::ostringstream errorStream;
+            errorStream << "Tint HLSL failure:" << std::endl;
+
+            tint::transform::Manager transformManager;
+            tint::transform::DataMap transformInputs;
+
+            if (request.isRobustnessEnabled) {
+                transformManager.Add<tint::transform::Robustness>();
+            }
+
+            transformManager.Add<tint::transform::BindingRemapper>();
+
+            transformManager.Add<tint::transform::SingleEntryPoint>();
+            transformInputs.Add<tint::transform::SingleEntryPoint::Config>(request.entryPointName);
+
+            transformManager.Add<tint::transform::Renamer>();
+
+            if (request.disableSymbolRenaming) {
+                // We still need to rename HLSL reserved keywords
+                transformInputs.Add<tint::transform::Renamer::Config>(
+                    tint::transform::Renamer::Target::kHlslKeywords);
+            }
+
+            // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
+            // the remapping but should not be considered a collision because they have
+            // different types.
+            const bool mayCollide = true;
+            transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
+                std::move(request.remappedBindingPoints), std::move(request.remappedAccessControls),
+                mayCollide);
+
+            tint::Program transformedProgram;
+            tint::transform::DataMap transformOutputs;
+            {
+                TRACE_EVENT0(platform, General, "RunTransforms");
+                DAWN_TRY_ASSIGN(transformedProgram,
+                                RunTransforms(&transformManager, request.program, transformInputs,
+                                              &transformOutputs, nullptr));
+            }
+
+            if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+                auto it = data->remappings.find(request.entryPointName);
+                if (it != data->remappings.end()) {
+                    *remappedEntryPointName = it->second;
+                } else {
+                    DAWN_INVALID_IF(!request.disableSymbolRenaming,
+                                    "Could not find remapped name for entry point.");
+
+                    *remappedEntryPointName = request.entryPointName;
+                }
+            } else {
+                return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
+            }
+
+            tint::writer::hlsl::Options options;
+            options.disable_workgroup_init = request.disableWorkgroupInit;
+            if (request.usesNumWorkgroups) {
+                options.root_constant_binding_point.group = request.numWorkgroupsRegisterSpace;
+                options.root_constant_binding_point.binding = request.numWorkgroupsShaderRegister;
+            }
+            // TODO(dawn:549): HLSL generation outputs the indices into the
+            // array_length_from_uniform buffer that were actually used. When the blob cache can
+            // store more than compiled shaders, we should reflect these used indices and store
+            // them as well. This would allow us to only upload root constants that are actually
+            // read by the shader.
+            options.array_length_from_uniform = request.arrayLengthFromUniform;
+            TRACE_EVENT0(platform, General, "tint::writer::hlsl::Generate");
+            auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
+            DAWN_INVALID_IF(!result.success, "An error occured while generating HLSL: %s",
+                            result.error);
+
+            return std::move(result.hlsl);
+        }
+
+        template <typename F>
+        MaybeError CompileShader(dawn::platform::Platform* platform,
+                                 const PlatformFunctions* functions,
+                                 IDxcLibrary* dxcLibrary,
+                                 IDxcCompiler* dxcCompiler,
+                                 ShaderCompilationRequest&& request,
+                                 bool dumpShaders,
+                                 F&& DumpShadersEmitLog,
+                                 CompiledShader* compiledShader) {
+            // Compile the source shader to HLSL.
+            std::string hlslSource;
+            std::string remappedEntryPoint;
+            DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(platform, request, &remappedEntryPoint));
+            if (dumpShaders) {
+                std::ostringstream dumpedMsg;
+                dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
+                DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+            }
+            request.entryPointName = remappedEntryPoint.c_str();
+            switch (request.compiler) {
+                case ShaderCompilationRequest::Compiler::DXC: {
+                    TRACE_EVENT0(platform, General, "CompileShaderDXC");
+                    DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
+                                    CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
+                    break;
+                }
+                case ShaderCompilationRequest::Compiler::FXC: {
+                    TRACE_EVENT0(platform, General, "CompileShaderFXC");
+                    DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
+                                    CompileShaderFXC(functions, request, hlslSource));
+                    break;
+                }
+            }
+
+            if (dumpShaders && request.compiler == ShaderCompilationRequest::Compiler::FXC) {
+                std::ostringstream dumpedMsg;
+                dumpedMsg << "/* FXC compile flags */ " << std::endl
+                          << CompileFlagsToStringFXC(request.compileFlags) << std::endl;
+
+                dumpedMsg << "/* Dumped disassembled DXBC */" << std::endl;
+
+                ComPtr<ID3DBlob> disassembly;
+                if (FAILED(functions->d3dDisassemble(
+                        compiledShader->compiledFXCShader->GetBufferPointer(),
+                        compiledShader->compiledFXCShader->GetBufferSize(), 0, nullptr,
+                        &disassembly))) {
+                    dumpedMsg << "D3D disassemble failed" << std::endl;
+                } else {
+                    dumpedMsg << reinterpret_cast<const char*>(disassembly->GetBufferPointer());
+                }
+                DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+            }
+
+            return {};
+        }
+
+    }  // anonymous namespace
+
+    // static
+    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                          const ShaderModuleDescriptor* descriptor,
+                                                          ShaderModuleParseResult* parseResult) {
+        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+        DAWN_TRY(module->Initialize(parseResult));
+        return module;
+    }
+
+    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+        : ShaderModuleBase(device, descriptor) {
+    }
+
+    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+        ScopedTintICEHandler scopedICEHandler(GetDevice());
+        return InitializeBase(parseResult);
+    }
+
+    ResultOrError<CompiledShader> ShaderModule::Compile(const ProgrammableStage& programmableStage,
+                                                        SingleShaderStage stage,
+                                                        const PipelineLayout* layout,
+                                                        uint32_t compileFlags) {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleD3D12::Compile");
+        ASSERT(!IsError());
+
+        ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+        Device* device = ToBackend(GetDevice());
+
+        CompiledShader compiledShader = {};
+
+        tint::transform::Manager transformManager;
+        tint::transform::DataMap transformInputs;
+
+        const tint::Program* program = GetTintProgram();
+        tint::Program programAsValue;
+
+        AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+        if (stage == SingleShaderStage::Vertex) {
+            transformManager.Add<tint::transform::FirstIndexOffset>();
+            transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
+                layout->GetFirstIndexOffsetShaderRegister(),
+                layout->GetFirstIndexOffsetRegisterSpace());
+        }
+
+        tint::transform::DataMap transformOutputs;
+        DAWN_TRY_ASSIGN(programAsValue, RunTransforms(&transformManager, program, transformInputs,
+                                                      &transformOutputs, nullptr));
+        program = &programAsValue;
+
+        if (stage == SingleShaderStage::Vertex) {
+            if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
+                // TODO(dawn:549): Consider adding this information to the pipeline cache once we
+                // can store more than the shader blob in it.
+                compiledShader.firstOffsetInfo.usesVertexIndex = data->has_vertex_index;
+                if (compiledShader.firstOffsetInfo.usesVertexIndex) {
+                    compiledShader.firstOffsetInfo.vertexIndexOffset = data->first_vertex_offset;
+                }
+                compiledShader.firstOffsetInfo.usesInstanceIndex = data->has_instance_index;
+                if (compiledShader.firstOffsetInfo.usesInstanceIndex) {
+                    compiledShader.firstOffsetInfo.instanceIndexOffset =
+                        data->first_instance_offset;
+                }
+            }
+        }
+
+        ShaderCompilationRequest request;
+        DAWN_TRY_ASSIGN(
+            request, ShaderCompilationRequest::Create(
+                         programmableStage.entryPoint.c_str(), stage, layout, compileFlags, device,
+                         program, GetEntryPoint(programmableStage.entryPoint), programmableStage));
+
+        PersistentCacheKey shaderCacheKey;
+        DAWN_TRY_ASSIGN(shaderCacheKey, request.CreateCacheKey());
+
+        DAWN_TRY_ASSIGN(
+            compiledShader.cachedShader,
+            device->GetPersistentCache()->GetOrCreate(
+                shaderCacheKey, [&](auto doCache) -> MaybeError {
+                    DAWN_TRY(CompileShader(
+                        device->GetPlatform(), device->GetFunctions(),
+                        device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get()
+                                                                : nullptr,
+                        device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get()
+                                                                : nullptr,
+                        std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
+                        [&](WGPULoggingType loggingType, const char* message) {
+                            GetDevice()->EmitLog(loggingType, message);
+                        },
+                        &compiledShader));
+                    const D3D12_SHADER_BYTECODE shader = compiledShader.GetD3D12ShaderBytecode();
+                    doCache(shader.pShaderBytecode, shader.BytecodeLength);
+                    return {};
+                }));
+
+        return std::move(compiledShader);
+    }
+
+    D3D12_SHADER_BYTECODE CompiledShader::GetD3D12ShaderBytecode() const {
+        if (cachedShader.buffer != nullptr) {
+            return {cachedShader.buffer.get(), cachedShader.bufferSize};
+        } else if (compiledFXCShader != nullptr) {
+            return {compiledFXCShader->GetBufferPointer(), compiledFXCShader->GetBufferSize()};
+        } else if (compiledDXCShader != nullptr) {
+            return {compiledDXCShader->GetBufferPointer(), compiledDXCShader->GetBufferSize()};
+        }
+        UNREACHABLE();
+        return {};
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ShaderModuleD3D12.h b/src/dawn/native/d3d12/ShaderModuleD3D12.h
new file mode 100644
index 0000000..2fd3a80
--- /dev/null
+++ b/src/dawn/native/d3d12/ShaderModuleD3D12.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
+#define DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
+
+#include "dawn/native/PersistentCache.h"
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native {
+    struct ProgrammableStage;
+}  // namespace dawn::native
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+    class PipelineLayout;
+
+    struct FirstOffsetInfo {
+        bool usesVertexIndex;
+        uint32_t vertexIndexOffset;
+        bool usesInstanceIndex;
+        uint32_t instanceIndexOffset;
+    };
+
+    // Manages a ref to one of the various representations of shader blobs and information used to
+    // emulate vertex/instance index starts
+    struct CompiledShader {
+        ScopedCachedBlob cachedShader;
+        ComPtr<ID3DBlob> compiledFXCShader;
+        ComPtr<IDxcBlob> compiledDXCShader;
+        D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
+
+        FirstOffsetInfo firstOffsetInfo;
+    };
+
+    class ShaderModule final : public ShaderModuleBase {
+      public:
+        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                       const ShaderModuleDescriptor* descriptor,
+                                                       ShaderModuleParseResult* parseResult);
+
+        ResultOrError<CompiledShader> Compile(const ProgrammableStage& programmableStage,
+                                              SingleShaderStage stage,
+                                              const PipelineLayout* layout,
+                                              uint32_t compileFlags);
+
+      private:
+        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+        ~ShaderModule() override = default;
+        MaybeError Initialize(ShaderModuleParseResult* parseResult);
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
diff --git a/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
new file mode 100644
index 0000000..32d6cd6
--- /dev/null
+++ b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
@@ -0,0 +1,254 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    // Limits the min/max heap size to always be some known value for testing.
+    // Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
+    // We change the value from {1024, 512} to {32, 16} because we use blending
+    // for D3D12DescriptorHeapTests.EncodeManyUBO and R16Float has limited range
+    // and low precision at big integer.
+    static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {32, 16};
+
+    uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
+                                              bool useSmallSize) {
+        if (useSmallSize) {
+            return kShaderVisibleSmallHeapSizes[heapType];
+        }
+
+        // Minimum heap size must be large enough to satisfy the largest descriptor allocation
+        // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
+        // memory should only a tiny fraction ever be used.
+        // TODO(dawn:155): Figure out these values.
+        switch (heapType) {
+            case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+                return 4096;
+            case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+                return 256;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
+                                              bool useSmallSize) {
+        if (useSmallSize) {
+            return kShaderVisibleSmallHeapSizes[heapType];
+        }
+
+        switch (heapType) {
+            case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+                return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
+            case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+                return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+        switch (heapType) {
+            case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+            case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+                return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    // static
+    ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>>
+    ShaderVisibleDescriptorAllocator::Create(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+        std::unique_ptr<ShaderVisibleDescriptorAllocator> allocator =
+            std::make_unique<ShaderVisibleDescriptorAllocator>(device, heapType);
+        DAWN_TRY(allocator->AllocateAndSwitchShaderVisibleHeap());
+        return std::move(allocator);
+    }
+
+    ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(
+        Device* device,
+        D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+        : mHeapType(heapType),
+          mDevice(device),
+          mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+          mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
+              heapType,
+              mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
+        ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+               heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+    }
+
+    bool ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(
+        uint32_t descriptorCount,
+        ExecutionSerial pendingSerial,
+        D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+        GPUDescriptorHeapAllocation* allocation) {
+        ASSERT(mHeap != nullptr);
+        const uint64_t startOffset = mAllocator.Allocate(descriptorCount, pendingSerial);
+        if (startOffset == RingBufferAllocator::kInvalidOffset) {
+            return false;
+        }
+
+        ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
+
+        const uint64_t heapOffset = mSizeIncrement * startOffset;
+
+        // Check for 32-bit overflow since CPU heap start handle uses size_t.
+        const size_t cpuHeapStartPtr = descriptorHeap->GetCPUDescriptorHandleForHeapStart().ptr;
+
+        ASSERT(heapOffset <= std::numeric_limits<size_t>::max() - cpuHeapStartPtr);
+
+        *baseCPUDescriptor = {cpuHeapStartPtr + static_cast<size_t>(heapOffset)};
+
+        const D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor = {
+            descriptorHeap->GetGPUDescriptorHandleForHeapStart().ptr + heapOffset};
+
+        // Record both the device and heap serials to determine later if the allocations are
+        // still valid.
+        *allocation = GPUDescriptorHeapAllocation{baseGPUDescriptor, pendingSerial, mHeapSerial};
+
+        return true;
+    }
+
+    ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
+        return mHeap->GetD3D12DescriptorHeap();
+    }
+
+    void ShaderVisibleDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+        mAllocator.Deallocate(completedSerial);
+    }
+
+    ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
+    ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
+        // The size in bytes of a descriptor heap is best calculated by the increment size
+        // multiplied by the number of descriptors. In practice, this is only an estimate and
+        // the actual size may vary depending on the driver.
+        const uint64_t kSize = mSizeIncrement * descriptorCount;
+
+        DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
+
+        ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
+        D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+        heapDescriptor.Type = mHeapType;
+        heapDescriptor.NumDescriptors = descriptorCount;
+        heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
+        heapDescriptor.NodeMask = 0;
+        DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
+                                             &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
+                                         "ID3D12Device::CreateDescriptorHeap"));
+
+        std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
+            std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
+
+        // We must track the allocation in the LRU when it is created, otherwise the residency
+        // manager will see the allocation as non-resident in the later call to LockAllocation.
+        mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
+
+        return std::move(descriptorHeap);
+    }
+
+    // Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
+    MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
+        std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
+        // Dynamically allocate using a two-phase allocation strategy.
+        // The first phase increasingly grows a small heap in binary sizes for light users while the
+        // second phase pool-allocates largest sized heaps for heavy users.
+        if (mHeap != nullptr) {
+            mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
+
+            const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
+                mHeapType,
+                mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+            if (mDescriptorCount < maxDescriptorCount) {
+                // Phase #1. Grow the heaps in powers-of-two.
+                mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
+                mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
+            } else {
+                // Phase #2. Pool-allocate heaps.
+                // Return the switched out heap to the pool and retrieve the oldest heap that is no
+                // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
+                // heaps for heavy users.
+                // TODO(dawn:256): Consider periodically triming to avoid OOM.
+                mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
+                if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
+                    descriptorHeap = std::move(mPool.front().heap);
+                    mPool.pop_front();
+                }
+            }
+        }
+
+        if (descriptorHeap == nullptr) {
+            DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
+        }
+
+        DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
+
+        // Create a FIFO buffer from the recently created heap.
+        mHeap = std::move(descriptorHeap);
+        mAllocator = RingBufferAllocator(mDescriptorCount);
+
+        // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
+        // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
+        // heap serial.
+        mHeapSerial++;
+
+        return {};
+    }
+
+    HeapVersionID ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSerialForTesting() const {
+        return mHeapSerial;
+    }
+
+    uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting() const {
+        return mAllocator.GetSize();
+    }
+
+    uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting() const {
+        return mPool.size();
+    }
+
+    bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
+        return mHeap->IsResidencyLocked();
+    }
+
+    bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
+        ASSERT(!mPool.empty());
+        return mPool.back().heap->IsInResidencyLRUCache();
+    }
+
+    bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
+        const GPUDescriptorHeapAllocation& allocation) const {
+        // Consider valid if allocated for the pending submit and the shader visible heaps
+        // have not switched over.
+        return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
+                allocation.GetHeapSerial() == mHeapSerial);
+    }
+
+    ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
+        ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+        uint64_t size)
+        : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
+          mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {
+    }
+
+    ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
+        return mD3d12DescriptorHeap.Get();
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
new file mode 100644
index 0000000..dca8b29
--- /dev/null
+++ b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
@@ -0,0 +1,105 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
+#define DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/RingBufferAllocator.h"
+#include "dawn/native/d3d12/IntegerTypes.h"
+#include "dawn/native/d3d12/PageableD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include <list>
+
+// |ShaderVisibleDescriptorAllocator| allocates a variable-sized block of descriptors from a GPU
+// descriptor heap pool.
+// Internally, it manages a list of heaps using a ringbuffer block allocator. The heap is in one
+// of two states: switched in or out. Only a switched in heap can be bound to the pipeline. If
+// the heap is full, the caller must switch-in a new heap before re-allocating and the old one
+// is returned to the pool.
+namespace dawn::native::d3d12 {
+
+    class Device;
+    class GPUDescriptorHeapAllocation;
+
+    class ShaderVisibleDescriptorHeap : public Pageable {
+      public:
+        ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+                                    uint64_t size);
+        ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
+
+      private:
+        ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
+    };
+
+    class ShaderVisibleDescriptorAllocator {
+      public:
+        static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
+            Device* device,
+            D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+
+        ShaderVisibleDescriptorAllocator(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+
+        // Returns true if the allocation was successful, when false is returned the current heap is
+        // full and AllocateAndSwitchShaderVisibleHeap() must be called.
+        bool AllocateGPUDescriptors(uint32_t descriptorCount,
+                                    ExecutionSerial pendingSerial,
+                                    D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+                                    GPUDescriptorHeapAllocation* allocation);
+
+        void Tick(ExecutionSerial completedSerial);
+
+        ID3D12DescriptorHeap* GetShaderVisibleHeap() const;
+        MaybeError AllocateAndSwitchShaderVisibleHeap();
+
+        // For testing purposes only.
+        HeapVersionID GetShaderVisibleHeapSerialForTesting() const;
+        uint64_t GetShaderVisibleHeapSizeForTesting() const;
+        uint64_t GetShaderVisiblePoolSizeForTesting() const;
+        bool IsShaderVisibleHeapLockedResidentForTesting() const;
+        bool IsLastShaderVisibleHeapInLRUForTesting() const;
+
+        bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
+
+      private:
+        struct SerialDescriptorHeap {
+            ExecutionSerial heapSerial;
+            std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
+        };
+
+        ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
+            uint32_t descriptorCount) const;
+
+        std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
+        RingBufferAllocator mAllocator;
+        std::list<SerialDescriptorHeap> mPool;
+        D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
+
+        Device* mDevice;
+
+        // The serial value of 0 means the shader-visible heaps have not been allocated.
+        // This value is never returned in the GPUDescriptorHeapAllocation after
+        // AllocateGPUDescriptors() is called.
+        HeapVersionID mHeapSerial = HeapVersionID(0);
+
+        uint32_t mSizeIncrement;
+
+        // The descriptor count is the current size of the heap in number of descriptors.
+        // This is stored on the allocator to avoid extra conversions.
+        uint32_t mDescriptorCount = 0;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
diff --git a/src/dawn/native/d3d12/StagingBufferD3D12.cpp b/src/dawn/native/d3d12/StagingBufferD3D12.cpp
new file mode 100644
index 0000000..e608a14
--- /dev/null
+++ b/src/dawn/native/d3d12/StagingBufferD3D12.cpp
@@ -0,0 +1,77 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    StagingBuffer::StagingBuffer(size_t size, Device* device)
+        : StagingBufferBase(size), mDevice(device) {
+    }
+
+    MaybeError StagingBuffer::Initialize() {
+        D3D12_RESOURCE_DESC resourceDescriptor;
+        resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+        resourceDescriptor.Alignment = 0;
+        resourceDescriptor.Width = GetSize();
+        resourceDescriptor.Height = 1;
+        resourceDescriptor.DepthOrArraySize = 1;
+        resourceDescriptor.MipLevels = 1;
+        resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+        resourceDescriptor.SampleDesc.Count = 1;
+        resourceDescriptor.SampleDesc.Quality = 0;
+        resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+        resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
+
+        DAWN_TRY_ASSIGN(mUploadHeap,
+                        mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
+                                                D3D12_RESOURCE_STATE_GENERIC_READ));
+
+        // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+        // evicted. This buffer should already have been made resident when it was created.
+        DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(
+            ToBackend(mUploadHeap.GetResourceHeap())));
+
+        SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
+
+        return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
+    }
+
+    StagingBuffer::~StagingBuffer() {
+        // Always check if the allocation is valid before Unmap.
+        // The resource would not exist had it failed to allocate.
+        if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
+            return;
+        }
+
+        // The underlying heap was locked in residency upon creation. We must unlock it when this
+        // buffer becomes unmapped.
+        mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
+
+        // Invalidate the CPU virtual address & flush cache (if needed).
+        GetResource()->Unmap(0, nullptr);
+        mMappedPointer = nullptr;
+
+        mDevice->DeallocateMemory(mUploadHeap);
+    }
+
+    ID3D12Resource* StagingBuffer::GetResource() const {
+        return mUploadHeap.GetD3D12Resource();
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/StagingBufferD3D12.h b/src/dawn/native/d3d12/StagingBufferD3D12.h
new file mode 100644
index 0000000..b810541
--- /dev/null
+++ b/src/dawn/native/d3d12/StagingBufferD3D12.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFERD3D12_H_
+#define DAWNNATIVE_STAGINGBUFFERD3D12_H_
+
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class StagingBuffer : public StagingBufferBase {
+      public:
+        StagingBuffer(size_t size, Device* device);
+        ~StagingBuffer() override;
+
+        ID3D12Resource* GetResource() const;
+
+        MaybeError Initialize() override;
+
+      private:
+        Device* mDevice;
+        ResourceHeapAllocation mUploadHeap;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_STAGINGBUFFERD3D12_H_
diff --git a/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
new file mode 100644
index 0000000..b64da30
--- /dev/null
+++ b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
@@ -0,0 +1,152 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Math.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    StagingDescriptorAllocator::StagingDescriptorAllocator(Device* device,
+                                                           uint32_t descriptorCount,
+                                                           uint32_t heapSize,
+                                                           D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+        : mDevice(device),
+          mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+          mBlockSize(descriptorCount * mSizeIncrement),
+          mHeapSize(RoundUp(heapSize, descriptorCount)),
+          mHeapType(heapType) {
+        ASSERT(descriptorCount <= heapSize);
+    }
+
+    StagingDescriptorAllocator::~StagingDescriptorAllocator() {
+        const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+        for (auto& buffer : mPool) {
+            ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
+        }
+        ASSERT(mAvailableHeaps.size() == mPool.size());
+    }
+
+    ResultOrError<CPUDescriptorHeapAllocation>
+    StagingDescriptorAllocator::AllocateCPUDescriptors() {
+        if (mAvailableHeaps.empty()) {
+            DAWN_TRY(AllocateCPUHeap());
+        }
+
+        ASSERT(!mAvailableHeaps.empty());
+
+        const uint32_t heapIndex = mAvailableHeaps.back();
+        NonShaderVisibleBuffer& buffer = mPool[heapIndex];
+
+        ASSERT(!buffer.freeBlockIndices.empty());
+
+        const Index blockIndex = buffer.freeBlockIndices.back();
+
+        buffer.freeBlockIndices.pop_back();
+
+        if (buffer.freeBlockIndices.empty()) {
+            mAvailableHeaps.pop_back();
+        }
+
+        const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
+            buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
+
+        return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
+    }
+
+    MaybeError StagingDescriptorAllocator::AllocateCPUHeap() {
+        D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+        heapDescriptor.Type = mHeapType;
+        heapDescriptor.NumDescriptors = mHeapSize;
+        heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
+        heapDescriptor.NodeMask = 0;
+
+        ComPtr<ID3D12DescriptorHeap> heap;
+        DAWN_TRY(CheckHRESULT(
+            mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
+            "ID3D12Device::CreateDescriptorHeap"));
+
+        NonShaderVisibleBuffer newBuffer;
+        newBuffer.heap = std::move(heap);
+
+        const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+        newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
+
+        for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
+            newBuffer.freeBlockIndices.push_back(blockIndex);
+        }
+
+        mAvailableHeaps.push_back(mPool.size());
+        mPool.emplace_back(std::move(newBuffer));
+
+        return {};
+    }
+
+    void StagingDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
+        ASSERT(allocation->IsValid());
+
+        const uint32_t heapIndex = allocation->GetHeapIndex();
+
+        ASSERT(heapIndex < mPool.size());
+
+        // Insert the deallocated block back into the free-list. Order does not matter. However,
+        // having blocks be non-contigious could slow down future allocations due to poor cache
+        // locality.
+        // TODO(dawn:155): Consider more optimization.
+        std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
+        if (freeBlockIndices.empty()) {
+            mAvailableHeaps.emplace_back(heapIndex);
+        }
+
+        const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
+            mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
+
+        const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
+
+        const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
+
+        freeBlockIndices.emplace_back(blockIndex);
+
+        // Invalidate the handle in case the developer accidentally uses it again.
+        allocation->Invalidate();
+    }
+
+    uint32_t StagingDescriptorAllocator::GetSizeIncrement() const {
+        return mSizeIncrement;
+    }
+
+    StagingDescriptorAllocator::Index StagingDescriptorAllocator::GetFreeBlockIndicesSize() const {
+        return ((mHeapSize * mSizeIncrement) / mBlockSize);
+    }
+
+    ResultOrError<CPUDescriptorHeapAllocation>
+    StagingDescriptorAllocator::AllocateTransientCPUDescriptors() {
+        CPUDescriptorHeapAllocation allocation;
+        DAWN_TRY_ASSIGN(allocation, AllocateCPUDescriptors());
+        mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+        return allocation;
+    }
+
+    void StagingDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+        for (CPUDescriptorHeapAllocation& allocation :
+             mAllocationsToDelete.IterateUpTo(completedSerial)) {
+            Deallocate(&allocation);
+        }
+
+        mAllocationsToDelete.ClearUpTo(completedSerial);
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
new file mode 100644
index 0000000..454aa09
--- /dev/null
+++ b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
@@ -0,0 +1,85 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
+#define DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
+
+#include "dawn/native/Error.h"
+
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+
+#include <vector>
+
+// |StagingDescriptorAllocator| allocates a fixed-size block of descriptors from a CPU
+// descriptor heap pool.
+// Internally, it manages a list of heaps using a fixed-size block allocator. The fixed-size
+// block allocator is backed by a list of free blocks (free-list). The heap is in one of two
+// states: AVAILABLE or not. To allocate, the next free block is removed from the free-list
+// and the corresponding heap offset is returned. The AVAILABLE heap always has room for
+// at-least one free block. If no AVAILABLE heap exists, a new heap is created and inserted
+// back into the pool to be immediately used. To deallocate, the block corresponding to the
+// offset is inserted back into the free-list.
+namespace dawn::native::d3d12 {
+
+    class Device;
+
+    class StagingDescriptorAllocator {
+      public:
+        StagingDescriptorAllocator() = default;
+        StagingDescriptorAllocator(Device* device,
+                                   uint32_t descriptorCount,
+                                   uint32_t heapSize,
+                                   D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+        ~StagingDescriptorAllocator();
+
+        ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
+
+        // Will call Deallocate when the serial is passed.
+        ResultOrError<CPUDescriptorHeapAllocation> AllocateTransientCPUDescriptors();
+
+        void Deallocate(CPUDescriptorHeapAllocation* allocation);
+
+        uint32_t GetSizeIncrement() const;
+
+        void Tick(ExecutionSerial completedSerial);
+
+      private:
+        using Index = uint16_t;
+
+        struct NonShaderVisibleBuffer {
+            ComPtr<ID3D12DescriptorHeap> heap;
+            std::vector<Index> freeBlockIndices;
+        };
+
+        MaybeError AllocateCPUHeap();
+
+        Index GetFreeBlockIndicesSize() const;
+
+        std::vector<uint32_t> mAvailableHeaps;  // Indices into the pool.
+        std::vector<NonShaderVisibleBuffer> mPool;
+
+        Device* mDevice;
+
+        uint32_t mSizeIncrement;  // Size of the descriptor (in bytes).
+        uint32_t mBlockSize;      // Size of the block of descriptors (in bytes).
+        uint32_t mHeapSize;       // Size of the heap (in number of descriptors).
+
+        D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
+
+        SerialQueue<ExecutionSerial, CPUDescriptorHeapAllocation> mAllocationsToDelete;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
diff --git a/src/dawn/native/d3d12/SwapChainD3D12.cpp b/src/dawn/native/d3d12/SwapChainD3D12.cpp
new file mode 100644
index 0000000..0c23a01
--- /dev/null
+++ b/src/dawn/native/d3d12/SwapChainD3D12.cpp
@@ -0,0 +1,375 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/SwapChainD3D12.h"
+
+#include "dawn/native/Surface.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+#include <dawn/dawn_wsi.h>
+
+#include <windows.ui.xaml.media.dxinterop.h>
+
+namespace dawn::native::d3d12 {
+    namespace {
+
+        uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
+            switch (mode) {
+                case wgpu::PresentMode::Immediate:
+                case wgpu::PresentMode::Fifo:
+                    return 2;
+                case wgpu::PresentMode::Mailbox:
+                    return 3;
+            }
+        }
+
+        uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
+            switch (mode) {
+                case wgpu::PresentMode::Immediate:
+                case wgpu::PresentMode::Mailbox:
+                    return 0;
+                case wgpu::PresentMode::Fifo:
+                    return 1;
+            }
+        }
+
+        UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
+            UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
+
+            if (mode == wgpu::PresentMode::Immediate) {
+                flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
+            }
+
+            return flags;
+        }
+
+        DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
+            DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
+            if (usage & wgpu::TextureUsage::TextureBinding) {
+                dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
+            }
+            if (usage & wgpu::TextureUsage::StorageBinding) {
+                dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
+            }
+            if (usage & wgpu::TextureUsage::RenderAttachment) {
+                dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+            }
+            return dxgiUsage;
+        }
+
+    }  // namespace
+
+    // OldSwapChain
+
+    // static
+    Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+        return AcquireRef(new OldSwapChain(device, descriptor));
+    }
+
+    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+        : OldSwapChainBase(device, descriptor) {
+        const auto& im = GetImplementation();
+        DawnWSIContextD3D12 wsiContext = {};
+        wsiContext.device = ToAPI(GetDevice());
+        im.Init(im.userData, &wsiContext);
+
+        ASSERT(im.textureUsage != WGPUTextureUsage_None);
+        mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+    }
+
+    OldSwapChain::~OldSwapChain() = default;
+
+    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+        DeviceBase* device = GetDevice();
+        const auto& im = GetImplementation();
+        DawnSwapChainNextTexture next = {};
+        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+        if (error) {
+            device->HandleError(InternalErrorType::Internal, error);
+            return nullptr;
+        }
+
+        ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
+        Ref<Texture> dawnTexture;
+        if (device->ConsumedError(
+                Texture::Create(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture)),
+                &dawnTexture)) {
+            return nullptr;
+        }
+
+        return dawnTexture.Detach();
+    }
+
+    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+        Device* device = ToBackend(GetDevice());
+
+        CommandRecordingContext* commandContext;
+        DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+        // Perform the necessary transition for the texture to be presented.
+        ToBackend(view->GetTexture())
+            ->TrackUsageAndTransitionNow(commandContext, mTextureUsage,
+                                         view->GetSubresourceRange());
+
+        DAWN_TRY(device->ExecutePendingCommandContext());
+
+        return {};
+    }
+
+    // SwapChain
+
+    // static
+    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor) {
+        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+        DAWN_TRY(swapchain->Initialize(previousSwapChain));
+        return swapchain;
+    }
+
+    SwapChain::~SwapChain() = default;
+
+    void SwapChain::DestroyImpl() {
+        SwapChainBase::DestroyImpl();
+        DetachFromSurface();
+    }
+
+    // Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
+    // nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
+    // surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
+    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+        ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
+
+        // Precompute the configuration parameters we want for the DXGI swapchain.
+        mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
+        mConfig.format = D3D12TextureFormat(GetFormat());
+        mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
+        mConfig.usage = ToDXGIUsage(GetUsage());
+
+        // There is no previous swapchain so we can create one directly and don't have anything else
+        // to do.
+        if (previousSwapChain == nullptr) {
+            return InitializeSwapChainFromScratch();
+        }
+
+        // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+        // multiple backends one after the other. It probably needs to block until the backend
+        // and GPU are completely finished with the previous swapchain.
+        DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12,
+                        "D3D12 SwapChain cannot switch backend types from %s to %s.",
+                        previousSwapChain->GetBackendType(), wgpu::BackendType::D3D12);
+
+        // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+        SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+        // TODO(crbug.com/dawn/269): Figure out switching an HWND between devices, it might
+        // require just losing the reference to the swapchain, but might also need to wait for
+        // all previous operations to complete.
+        DAWN_INVALID_IF(GetDevice() != previousSwapChain->GetDevice(),
+                        "D3D12 SwapChain cannot switch between D3D Devices");
+
+        // The previous swapchain is on the same device so we want to reuse it but it is still not
+        // always possible. Because DXGI requires that a new swapchain be created if the
+        // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
+        bool canReuseSwapChain =
+            ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
+             DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
+
+        // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
+        // to be forgotten (otherwise DXGI complains that there are outstanding references).
+        if (!canReuseSwapChain) {
+            DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+            return InitializeSwapChainFromScratch();
+        }
+
+        // After all this we know we can reuse the swapchain, see if it is possible to also reuse
+        // the buffers.
+        mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
+
+        bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
+                               GetHeight() == previousSwapChain->GetHeight() &&
+                               GetFormat() == previousSwapChain->GetFormat() &&
+                               GetPresentMode() == previousSwapChain->GetPresentMode();
+        if (canReuseBuffers) {
+            mBuffers = std::move(previousD3D12SwapChain->mBuffers);
+            mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
+            mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
+            return {};
+        }
+
+        // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
+        // that all references to buffers are lost before it is called. Contrary to D3D11, the
+        // application is responsible for keeping references to the buffers until the GPU is done
+        // using them so we have no choice but to synchrounously wait for all operations to complete
+        // on the previous swapchain and then lose references to its buffers.
+        DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+        DAWN_TRY(
+            CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
+                                                       mConfig.format, mConfig.swapChainFlags),
+                         "IDXGISwapChain::ResizeBuffer"));
+        return CollectSwapChainBuffers();
+    }
+
+    MaybeError SwapChain::InitializeSwapChainFromScratch() {
+        ASSERT(mDXGISwapChain == nullptr);
+
+        Device* device = ToBackend(GetDevice());
+
+        DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+        swapChainDesc.Width = GetWidth();
+        swapChainDesc.Height = GetHeight();
+        swapChainDesc.Format = mConfig.format;
+        swapChainDesc.Stereo = false;
+        swapChainDesc.SampleDesc.Count = 1;
+        swapChainDesc.SampleDesc.Quality = 0;
+        swapChainDesc.BufferUsage = mConfig.usage;
+        swapChainDesc.BufferCount = mConfig.bufferCount;
+        swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
+        swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+        swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
+        swapChainDesc.Flags = mConfig.swapChainFlags;
+
+        ComPtr<IDXGIFactory2> factory2 = nullptr;
+        DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
+                              "Getting IDXGIFactory2"));
+
+        ComPtr<IDXGISwapChain1> swapChain1;
+        switch (GetSurface()->GetType()) {
+            case Surface::Type::WindowsHWND: {
+                DAWN_TRY(CheckHRESULT(
+                    factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
+                                                     static_cast<HWND>(GetSurface()->GetHWND()),
+                                                     &swapChainDesc, nullptr, nullptr, &swapChain1),
+                    "Creating the IDXGISwapChain1"));
+                break;
+            }
+            case Surface::Type::WindowsCoreWindow: {
+                DAWN_TRY(CheckHRESULT(
+                    factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
+                                                           GetSurface()->GetCoreWindow(),
+                                                           &swapChainDesc, nullptr, &swapChain1),
+                    "Creating the IDXGISwapChain1"));
+                break;
+            }
+            case Surface::Type::WindowsSwapChainPanel: {
+                DAWN_TRY(CheckHRESULT(
+                    factory2->CreateSwapChainForComposition(device->GetCommandQueue().Get(),
+                                                            &swapChainDesc, nullptr, &swapChain1),
+                    "Creating the IDXGISwapChain1"));
+                ComPtr<ISwapChainPanelNative> swapChainPanelNative;
+                DAWN_TRY(CheckHRESULT(GetSurface()->GetSwapChainPanel()->QueryInterface(
+                                          IID_PPV_ARGS(&swapChainPanelNative)),
+                                      "Getting ISwapChainPanelNative"));
+                DAWN_TRY(CheckHRESULT(swapChainPanelNative->SetSwapChain(swapChain1.Get()),
+                                      "Setting SwapChain"));
+                break;
+            }
+            default:
+                UNREACHABLE();
+        }
+
+        DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
+
+        return CollectSwapChainBuffers();
+    }
+
+    MaybeError SwapChain::CollectSwapChainBuffers() {
+        ASSERT(mDXGISwapChain != nullptr);
+        ASSERT(mBuffers.empty());
+
+        mBuffers.resize(mConfig.bufferCount);
+        for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
+            DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
+                                  "Getting IDXGISwapChain buffer"));
+        }
+
+        // Pretend all the buffers were last used at the beginning of time.
+        mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
+        return {};
+    }
+
+    MaybeError SwapChain::PresentImpl() {
+        Device* device = ToBackend(GetDevice());
+
+        // Transition the texture to the present state as required by IDXGISwapChain1::Present()
+        // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+        // presentable texture to present at the end of submits that use them.
+        CommandRecordingContext* commandContext;
+        DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+        mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
+                                                mApiTexture->GetAllSubresources());
+        DAWN_TRY(device->ExecutePendingCommandContext());
+
+        // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
+        // message to the application that it could stop rendering.
+        HRESULT presentResult =
+            mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
+        if (presentResult != DXGI_STATUS_OCCLUDED) {
+            DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
+        }
+
+        // Record that "new" is the last time the buffer has been used.
+        DAWN_TRY(device->NextSerial());
+        mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
+
+        mApiTexture->APIDestroy();
+        mApiTexture = nullptr;
+
+        return {};
+    }
+
+    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+        Device* device = ToBackend(GetDevice());
+
+        // Synchronously wait until previous operations on the next swapchain buffer are finished.
+        // This is the logic that performs frame pacing.
+        // TODO(crbug.com/dawn/269): Consider whether this should  be lifted for Mailbox so that
+        // there is not frame pacing.
+        mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
+        DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
+
+        // Create the API side objects for this use of the swapchain's buffer.
+        TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
+        DAWN_TRY_ASSIGN(mApiTexture, Texture::Create(ToBackend(GetDevice()), &descriptor,
+                                                     mBuffers[mCurrentBuffer]));
+        return mApiTexture->CreateView();
+    }
+
+    MaybeError SwapChain::DetachAndWaitForDeallocation() {
+        DetachFromSurface();
+
+        // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
+        // SerialQueue with the current "pending serial" so that we don't destroy the texture
+        // before it is finished being used. Flush the commands and wait for that serial to be
+        // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
+        Device* device = ToBackend(GetDevice());
+        DAWN_TRY(device->NextSerial());
+        DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
+        return device->TickImpl();
+    }
+
+    void SwapChain::DetachFromSurfaceImpl() {
+        if (mApiTexture != nullptr) {
+            mApiTexture->APIDestroy();
+            mApiTexture = nullptr;
+        }
+
+        mDXGISwapChain = nullptr;
+        mBuffers.clear();
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/SwapChainD3D12.h b/src/dawn/native/d3d12/SwapChainD3D12.h
new file mode 100644
index 0000000..1e8a7d9
--- /dev/null
+++ b/src/dawn/native/d3d12/SwapChainD3D12.h
@@ -0,0 +1,89 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SWAPCHAIND3D12_H_
+#define DAWNNATIVE_D3D12_SWAPCHAIND3D12_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class Device;
+    class Texture;
+
+    class OldSwapChain final : public OldSwapChainBase {
+      public:
+        static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+
+      protected:
+        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+        ~OldSwapChain() override;
+        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+        MaybeError OnBeforePresent(TextureViewBase* view) override;
+
+        wgpu::TextureUsage mTextureUsage;
+    };
+
+    class SwapChain final : public NewSwapChainBase {
+      public:
+        static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor);
+
+      private:
+        ~SwapChain() override;
+
+        void DestroyImpl() override;
+
+        using NewSwapChainBase::NewSwapChainBase;
+        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+        struct Config {
+            // Information that's passed to the D3D12 swapchain creation call.
+            UINT bufferCount;
+            UINT swapChainFlags;
+            DXGI_FORMAT format;
+            DXGI_USAGE usage;
+        };
+
+        // NewSwapChainBase implementation
+        MaybeError PresentImpl() override;
+        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+        void DetachFromSurfaceImpl() override;
+
+        // Does the swapchain initialization steps assuming there is nothing we can reuse.
+        MaybeError InitializeSwapChainFromScratch();
+        // Does the swapchain initialization step of gathering the buffers.
+        MaybeError CollectSwapChainBuffers();
+        // Calls DetachFromSurface but also synchronously waits until all references to the
+        // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
+        MaybeError DetachAndWaitForDeallocation();
+
+        Config mConfig;
+
+        ComPtr<IDXGISwapChain3> mDXGISwapChain;
+        std::vector<ComPtr<ID3D12Resource>> mBuffers;
+        std::vector<ExecutionSerial> mBufferLastUsedSerials;
+        uint32_t mCurrentBuffer = 0;
+
+        Ref<Texture> mApiTexture;
+    };
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_SWAPCHAIN_D3D12_H_
diff --git a/src/dawn/native/d3d12/TextureCopySplitter.cpp b/src/dawn/native/d3d12/TextureCopySplitter.cpp
new file mode 100644
index 0000000..83e55fd
--- /dev/null
+++ b/src/dawn/native/d3d12/TextureCopySplitter.cpp
@@ -0,0 +1,539 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/TextureCopySplitter.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+        Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
+                                     uint32_t offset,
+                                     uint32_t bytesPerRow) {
+            ASSERT(bytesPerRow != 0);
+            uint32_t byteOffsetX = offset % bytesPerRow;
+            uint32_t byteOffsetY = offset - byteOffsetX;
+
+            return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
+                    byteOffsetY / bytesPerRow * blockInfo.height, 0};
+        }
+
+        uint64_t OffsetToFirstCopiedTexel(const TexelBlockInfo& blockInfo,
+                                          uint32_t bytesPerRow,
+                                          uint64_t alignedOffset,
+                                          Origin3D bufferOffset) {
+            ASSERT(bufferOffset.z == 0);
+            return alignedOffset + bufferOffset.x * blockInfo.byteSize / blockInfo.width +
+                   bufferOffset.y * bytesPerRow / blockInfo.height;
+        }
+
+        uint64_t AlignDownForDataPlacement(uint32_t offset) {
+            return offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
+        }
+    }  // namespace
+
+    TextureCopySubresource::CopyInfo* TextureCopySubresource::AddCopy() {
+        ASSERT(this->count < kMaxTextureCopyRegions);
+        return &this->copies[this->count++];
+    }
+
+    TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+                                                           Extent3D copySize,
+                                                           const TexelBlockInfo& blockInfo,
+                                                           uint64_t offset,
+                                                           uint32_t bytesPerRow) {
+        TextureCopySubresource copy;
+
+        ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+
+        // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
+        // preceding our data.
+        uint64_t alignedOffset = AlignDownForDataPlacement(offset);
+
+        // If the provided offset to the data was already 512-aligned, we can simply copy the data
+        // without further translation.
+        if (offset == alignedOffset) {
+            copy.count = 1;
+
+            copy.copies[0].alignedOffset = alignedOffset;
+            copy.copies[0].textureOffset = origin;
+            copy.copies[0].copySize = copySize;
+            copy.copies[0].bufferOffset = {0, 0, 0};
+            copy.copies[0].bufferSize = copySize;
+
+            return copy;
+        }
+
+        ASSERT(alignedOffset < offset);
+        ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
+
+        // We must reinterpret our aligned offset into X and Y offsets with respect to the row
+        // pitch.
+        //
+        // You can visualize the data in the buffer like this:
+        // |-----------------------++++++++++++++++++++++++++++++++|
+        // ^ 512-aligned address   ^ Aligned offset               ^ End of copy data
+        //
+        // Now when you consider the row pitch, you can visualize the data like this:
+        // |~~~~~~~~~~~~~~~~|
+        // |~~~~~+++++++++++|
+        // |++++++++++++++++|
+        // |+++++~~~~~~~~~~~|
+        // |<---row pitch-->|
+        //
+        // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
+        // |YYYYYYYYYYYYYYYY|
+        // |XXXXXX++++++++++|
+        // |++++++++++++++++|
+        // |++++++~~~~~~~~~~|
+        // |<---row pitch-->|
+        Origin3D texelOffset = ComputeTexelOffsets(
+            blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
+
+        ASSERT(texelOffset.y <= blockInfo.height);
+        ASSERT(texelOffset.z == 0);
+
+        uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
+        uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
+        if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
+            // The region's rows fit inside the bytes per row. In this case, extend the width of the
+            // PlacedFootprint and copy the buffer with an offset location
+            //  |<------------- bytes per row ------------->|
+            //
+            //  |-------------------------------------------|
+            //  |                                           |
+            //  |                 +++++++++++++++++~~~~~~~~~|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++         |
+            //  |-------------------------------------------|
+
+            // Copy 0:
+            //  |----------------------------------|
+            //  |                                  |
+            //  |                 +++++++++++++++++|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+            //  |----------------------------------|
+
+            copy.count = 1;
+
+            copy.copies[0].alignedOffset = alignedOffset;
+            copy.copies[0].textureOffset = origin;
+            copy.copies[0].copySize = copySize;
+            copy.copies[0].bufferOffset = texelOffset;
+
+            copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
+            copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
+            copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+            return copy;
+        }
+
+        // The region's rows straddle the bytes per row. Split the copy into two copies
+        //  |<------------- bytes per row ------------->|
+        //
+        //  |-------------------------------------------|
+        //  |                                           |
+        //  |                                   ++++++++|
+        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |+++++++++                                  |
+        //  |-------------------------------------------|
+
+        //  Copy 0:
+        //  |-------------------------------------------|
+        //  |                                           |
+        //  |                                   ++++++++|
+        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+        //  |-------------------------------------------|
+
+        //  Copy 1:
+        //  |---------|
+        //  |         |
+        //  |         |
+        //  |+++++++++|
+        //  |+++++++++|
+        //  |+++++++++|
+        //  |+++++++++|
+        //  |+++++++++|
+        //  |---------|
+
+        copy.count = 2;
+
+        copy.copies[0].alignedOffset = alignedOffset;
+        copy.copies[0].textureOffset = origin;
+
+        ASSERT(bytesPerRow > byteOffsetInRowPitch);
+        uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
+        copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
+        copy.copies[0].copySize.height = copySize.height;
+        copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+        copy.copies[0].bufferOffset = texelOffset;
+        copy.copies[0].bufferSize.width = texelsPerRow;
+        copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
+        copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+        uint64_t offsetForCopy1 =
+            offset + copy.copies[0].copySize.width / blockInfo.width * blockInfo.byteSize;
+        uint64_t alignedOffsetForCopy1 = AlignDownForDataPlacement(offsetForCopy1);
+        Origin3D texelOffsetForCopy1 = ComputeTexelOffsets(
+            blockInfo, static_cast<uint32_t>(offsetForCopy1 - alignedOffsetForCopy1), bytesPerRow);
+
+        ASSERT(texelOffsetForCopy1.y <= blockInfo.height);
+        ASSERT(texelOffsetForCopy1.z == 0);
+
+        copy.copies[1].alignedOffset = alignedOffsetForCopy1;
+        copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
+        copy.copies[1].textureOffset.y = origin.y;
+        copy.copies[1].textureOffset.z = origin.z;
+
+        ASSERT(copySize.width > copy.copies[0].copySize.width);
+        copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
+        copy.copies[1].copySize.height = copySize.height;
+        copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+        copy.copies[1].bufferOffset = texelOffsetForCopy1;
+        copy.copies[1].bufferSize.width = copy.copies[1].copySize.width + texelOffsetForCopy1.x;
+        copy.copies[1].bufferSize.height = copySize.height + texelOffsetForCopy1.y;
+        copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+        return copy;
+    }
+
+    TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+                                                 Extent3D copySize,
+                                                 const TexelBlockInfo& blockInfo,
+                                                 uint64_t offset,
+                                                 uint32_t bytesPerRow,
+                                                 uint32_t rowsPerImage) {
+        TextureCopySplits copies;
+
+        const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+        // The function Compute2DTextureCopySubresource() decides how to split the copy based on:
+        // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+        // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
+        // Each layer of a 2D array might need to be split, but because of the WebGPU
+        // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) layers
+        // will be at an offset multiple of 512 of each other, which means they will all result in
+        // the same 2D split. Thus we can just compute the copy splits for the first and second
+        // layers, and reuse them for the remaining layers by adding the related offset of each
+        // layer. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
+        // share the same copy split, so in this situation we just need to compute copy split once
+        // and reuse it for all the layers.
+        Extent3D copyOneLayerSize = copySize;
+        Origin3D copyFirstLayerOrigin = origin;
+        copyOneLayerSize.depthOrArrayLayers = 1;
+        copyFirstLayerOrigin.z = 0;
+
+        copies.copySubresources[0] = Compute2DTextureCopySubresource(
+            copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow);
+
+        // When the copy only refers one texture 2D array layer,
+        // copies.copySubresources[1] will never be used so we can safely early return here.
+        if (copySize.depthOrArrayLayers == 1) {
+            return copies;
+        }
+
+        if (bytesPerLayer % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
+            copies.copySubresources[1] = copies.copySubresources[0];
+            copies.copySubresources[1].copies[0].alignedOffset += bytesPerLayer;
+            copies.copySubresources[1].copies[1].alignedOffset += bytesPerLayer;
+        } else {
+            const uint64_t bufferOffsetNextLayer = offset + bytesPerLayer;
+            copies.copySubresources[1] =
+                Compute2DTextureCopySubresource(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
+                                                bufferOffsetNextLayer, bytesPerRow);
+        }
+
+        return copies;
+    }
+
+    void Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
+        Origin3D origin,
+        Extent3D copySize,
+        const TexelBlockInfo& blockInfo,
+        uint32_t bytesPerRow,
+        uint32_t rowsPerImage,
+        TextureCopySubresource& copy,
+        uint32_t i) {
+        // Let's assign data and show why copy region generated by ComputeTextureCopySubresource
+        // is incorrect if there is an empty row at the beginning of the copy block.
+        // Assuming that bytesPerRow is 256 and we are doing a B2T copy, and copy size is {width: 2,
+        // height: 4, depthOrArrayLayers: 3}. Then the data layout in buffer is demonstrated
+        // as below:
+        //
+        //               |<----- bytes per row ------>|
+        //
+        //               |----------------------------|
+        //  row (N - 1)  |                            |
+        //  row N        |                 ++~~~~~~~~~|
+        //  row (N + 1)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 2)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 3)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 4)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 5)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 6)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 7)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 8)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 9)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 10) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+        //  row (N + 11) |~~~~~~~~~~~~~~~~~++         |
+        //               |----------------------------|
+
+        // The copy we mean to do is the following:
+        //
+        //   - image 0: row N to row (N + 3),
+        //   - image 1: row (N + 4) to row (N + 7),
+        //   - image 2: row (N + 8) to row (N + 11).
+        //
+        // Note that alignedOffset is at the beginning of row (N - 1), while buffer offset makes
+        // the copy start at row N. Row (N - 1) is the empty row between alignedOffset and offset.
+        //
+        // The 2D copy region of image 0 we received from Compute2DTextureCopySubresource() is
+        // the following:
+        //
+        //              |-------------------|
+        //  row (N - 1) |                   |
+        //  row N       |                 ++|
+        //  row (N + 1) |~~~~~~~~~~~~~~~~~++|
+        //  row (N + 2) |~~~~~~~~~~~~~~~~~++|
+        //  row (N + 3) |~~~~~~~~~~~~~~~~~++|
+        //              |-------------------|
+        //
+        // However, if we simply expand the copy region of image 0 to all depth ranges of a 3D
+        // texture, we will copy 5 rows every time, and every first row of each slice will be
+        // skipped. As a result, the copied data will be:
+        //
+        //   - image 0: row N to row (N + 3), which is correct. Row (N - 1) is skipped.
+        //   - image 1: row (N + 5) to row (N + 8) because row (N + 4) is skipped. It is incorrect.
+        //
+        // Likewise, all other image followed will be incorrect because we wrongly keep skipping
+        // one row for each depth slice.
+        //
+        // Solution: split the copy region to two copies: copy 3 (rowsPerImage - 1) rows in and
+        // expand to all depth slices in the first copy. 3 rows + one skipped rows = 4 rows, which
+        // equals to rowsPerImage. Then copy the last row in the second copy. However, the copy
+        // block of the last row of the last image may out-of-bound (see the details below), so
+        // we need an extra copy for the very last row.
+
+        // Copy 0: copy 3 rows, not 4 rows.
+        //                _____________________
+        //               /                    /|
+        //              /                    / |
+        //              |-------------------|  |
+        //  row (N - 1) |                   |  |
+        //  row N       |                 ++|  |
+        //  row (N + 1) |~~~~~~~~~~~~~~~~~++| /
+        //  row (N + 2) |~~~~~~~~~~~~~~~~~++|/
+        //              |-------------------|
+
+        // Copy 1: move down two rows and copy the last row on image 0, and expand to
+        // copySize.depthOrArrayLayers - 1 depth slices. Note that if we expand it to all depth
+        // slices, the last copy block will be row (N + 9) to row (N + 12). Row (N + 11) might
+        // be the last row of the entire buffer. Then row (N + 12) will be out-of-bound.
+        //                _____________________
+        //               /                    /|
+        //              /                    / |
+        //              |-------------------|  |
+        //  row (N + 1) |                   |  |
+        //  row (N + 2) |                   |  |
+        //  row (N + 3) |                 ++| /
+        //  row (N + 4) |~~~~~~~~~~~~~~~~~~~|/
+        //              |-------------------|
+        //
+        //  copy 2: copy the last row of the last image.
+        //              |-------------------|
+        //  row (N + 11)|                 ++|
+        //              |-------------------|
+
+        // Copy 0: copy copySize.height - 1 rows
+        TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+        copy0.copySize.height = copySize.height - blockInfo.height;
+        copy0.bufferSize.height = rowsPerImage * blockInfo.height;  // rowsPerImageInTexels
+
+        // Copy 1: move down 2 rows and copy the last row on image 0, and expand to all depth slices
+        // but the last one.
+        TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+        *copy1 = copy0;
+        copy1->alignedOffset += 2 * bytesPerRow;
+        copy1->textureOffset.y += copySize.height - blockInfo.height;
+        // Offset two rows from the copy height for the bufferOffset (See the figure above):
+        //   - one for the row we advanced in the buffer: row (N + 4).
+        //   - one for the last row we want to copy: row (N + 3) itself.
+        copy1->bufferOffset.y = copySize.height - 2 * blockInfo.height;
+        copy1->copySize.height = blockInfo.height;
+        copy1->copySize.depthOrArrayLayers--;
+        copy1->bufferSize.depthOrArrayLayers--;
+
+        // Copy 2: copy the last row of the last image.
+        uint64_t offsetForCopy0 = OffsetToFirstCopiedTexel(blockInfo, bytesPerRow,
+                                                           copy0.alignedOffset, copy0.bufferOffset);
+        uint64_t offsetForLastRowOfLastImage =
+            offsetForCopy0 + bytesPerRow * (copy0.copySize.height +
+                                            rowsPerImage * (copySize.depthOrArrayLayers - 1));
+        uint64_t alignedOffsetForLastRowOfLastImage =
+            AlignDownForDataPlacement(offsetForLastRowOfLastImage);
+        Origin3D texelOffsetForLastRowOfLastImage = ComputeTexelOffsets(
+            blockInfo,
+            static_cast<uint32_t>(offsetForLastRowOfLastImage - alignedOffsetForLastRowOfLastImage),
+            bytesPerRow);
+
+        TextureCopySubresource::CopyInfo* copy2 = copy.AddCopy();
+        copy2->alignedOffset = alignedOffsetForLastRowOfLastImage;
+        copy2->textureOffset = copy1->textureOffset;
+        copy2->textureOffset.z = origin.z + copySize.depthOrArrayLayers - 1;
+        copy2->copySize = copy1->copySize;
+        copy2->copySize.depthOrArrayLayers = 1;
+        copy2->bufferOffset = texelOffsetForLastRowOfLastImage;
+        copy2->bufferSize.width = copy1->bufferSize.width;
+        ASSERT(copy2->copySize.height == 1);
+        copy2->bufferSize.height = copy2->bufferOffset.y + copy2->copySize.height;
+        copy2->bufferSize.depthOrArrayLayers = 1;
+    }
+
+    void Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(Extent3D copySize,
+                                                                       uint32_t bytesPerRow,
+                                                                       TextureCopySubresource& copy,
+                                                                       uint32_t i) {
+        // Read the comments of Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight() for
+        // the reason why it is incorrect if we simply extend the copy region to all depth slices
+        // when there is an empty first row at the copy region.
+        //
+        // If the copy height is odd, we can use two copies to make it correct:
+        //   - copy 0: only copy the first depth slice. Keep other arguments the same.
+        //   - copy 1: copy all rest depth slices because it will start without an empty row if
+        //     copy height is odd. Odd height + one (empty row) is even. An even row number times
+        //     bytesPerRow (256) will be aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+
+        // Copy 0: copy the first depth slice (image 0)
+        TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+        copy0.copySize.depthOrArrayLayers = 1;
+        copy0.bufferSize.depthOrArrayLayers = 1;
+
+        // Copy 1: copy the rest depth slices in one shot
+        TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+        *copy1 = copy0;
+        ASSERT(copySize.height % 2 == 1);
+        copy1->alignedOffset += (copySize.height + 1) * bytesPerRow;
+        ASSERT(copy1->alignedOffset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0);
+        // textureOffset.z should add one because the first slice has already been copied in copy0.
+        copy1->textureOffset.z++;
+        // bufferOffset.y should be 0 because we skipped the first depth slice and there is no empty
+        // row in this copy region.
+        copy1->bufferOffset.y = 0;
+        copy1->copySize.height = copySize.height;
+        copy1->copySize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+        copy1->bufferSize.height = copySize.height;
+        copy1->bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+    }
+
+    TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+                                                      Extent3D copySize,
+                                                      const TexelBlockInfo& blockInfo,
+                                                      uint64_t offset,
+                                                      uint32_t bytesPerRow,
+                                                      uint32_t rowsPerImage) {
+        // To compute the copy region(s) for 3D textures, we call Compute2DTextureCopySubresource
+        // and get copy region(s) for the first slice of the copy, then extend to all depth slices
+        // and become a 3D copy. However, this doesn't work as easily as that due to some corner
+        // cases.
+        //
+        // For example, if bufferSize.height is greater than rowsPerImage in the generated copy
+        // region and we simply extend the 2D copy region to all copied depth slices, copied data
+        // will be incorrectly offset for each depth slice except the first one.
+        //
+        // For these special cases, we need to recompute the copy regions for 3D textures via
+        // split the incorrect copy region to a couple more copy regions.
+
+        // Call Compute2DTextureCopySubresource and get copy regions. This function has already
+        // forwarded "copySize.depthOrArrayLayers" to all depth slices.
+        TextureCopySubresource copySubresource =
+            Compute2DTextureCopySubresource(origin, copySize, blockInfo, offset, bytesPerRow);
+
+        ASSERT(copySubresource.count <= 2);
+        // If copySize.depth is 1, we can return copySubresource. Because we don't need to extend
+        // the copy region(s) to other depth slice(s).
+        if (copySize.depthOrArrayLayers == 1) {
+            return copySubresource;
+        }
+
+        uint32_t rowsPerImageInTexels = rowsPerImage * blockInfo.height;
+        // The copy region(s) generated by Compute2DTextureCopySubresource might be incorrect.
+        // However, we may append a couple more copy regions in the for loop below. We don't need
+        // to revise these new added copy regions.
+        uint32_t originalCopyCount = copySubresource.count;
+        for (uint32_t i = 0; i < originalCopyCount; ++i) {
+            // There can be one empty row at most in a copy region.
+            ASSERT(copySubresource.copies[i].bufferSize.height <=
+                   rowsPerImageInTexels + blockInfo.height);
+            Extent3D& bufferSize = copySubresource.copies[i].bufferSize;
+
+            if (bufferSize.height == rowsPerImageInTexels) {
+                // If the copy region's bufferSize.height equals to rowsPerImageInTexels, we can use
+                // this copy region without any modification.
+                continue;
+            }
+
+            if (bufferSize.height < rowsPerImageInTexels) {
+                // If we are copying multiple depth slices, we should skip rowsPerImageInTexels rows
+                // for each slice even though we only copy partial rows in each slice sometimes.
+                bufferSize.height = rowsPerImageInTexels;
+            } else {
+                // bufferSize.height > rowsPerImageInTexels. There is an empty row in this copy
+                // region due to alignment adjustment.
+
+                // bytesPerRow is definitely 256, and it is definitely a full copy on height.
+                // Otherwise, bufferSize.height wount be greater than rowsPerImageInTexels and
+                // there won't be an empty row at the beginning of this copy region.
+                ASSERT(bytesPerRow == D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
+                ASSERT(copySize.height == rowsPerImageInTexels);
+
+                if (copySize.height % 2 == 0) {
+                    // If copySize.height is even and there is an empty row at the beginning of the
+                    // first slice of the copy region, the offset of all depth slices will never be
+                    // aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512) and there is always
+                    // an empty row at each depth slice. We need a totally different approach to
+                    // split the copy region.
+                    Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
+                        origin, copySize, blockInfo, bytesPerRow, rowsPerImage, copySubresource, i);
+                } else {
+                    // If copySize.height is odd and there is an empty row at the beginning of the
+                    // first slice of the copy region, we can split the copy region into two copies:
+                    // copy0 to copy the first slice, copy1 to copy the rest slices because the
+                    // offset of slice 1 is aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+                    // without an empty row. This is an easier case relative to cases with even copy
+                    // height.
+                    Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(
+                        copySize, bytesPerRow, copySubresource, i);
+                }
+            }
+        }
+
+        return copySubresource;
+    }
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/TextureCopySplitter.h b/src/dawn/native/d3d12/TextureCopySplitter.h
new file mode 100644
index 0000000..d549b90
--- /dev/null
+++ b/src/dawn/native/d3d12/TextureCopySplitter.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
+#define DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native {
+
+    struct TexelBlockInfo;
+
+}  // namespace dawn::native
+
+namespace dawn::native::d3d12 {
+
+    struct TextureCopySubresource {
+        static constexpr unsigned int kMaxTextureCopyRegions = 4;
+
+        struct CopyInfo {
+            uint64_t alignedOffset = 0;
+            Origin3D textureOffset;
+            Origin3D bufferOffset;
+            Extent3D bufferSize;
+
+            Extent3D copySize;
+        };
+
+        CopyInfo* AddCopy();
+
+        uint32_t count = 0;
+        std::array<CopyInfo, kMaxTextureCopyRegions> copies;
+    };
+
+    struct TextureCopySplits {
+        static constexpr uint32_t kMaxTextureCopySubresources = 2;
+
+        std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
+    };
+
+    // This function is shared by 2D and 3D texture copy splitter. But it only knows how to handle
+    // 2D non-arrayed textures correctly, and just forwards "copySize.depthOrArrayLayers". See
+    // details in Compute{2D|3D}TextureCopySplits about how we generate copy regions for 2D array
+    // and 3D textures based on this function.
+    // The resulting copies triggered by API like CopyTextureRegion are equivalent to the copy
+    // regions defines by the arguments of TextureCopySubresource returned by this function and its
+    // counterparts. These arguments should strictly conform to particular invariants. Otherwise,
+    // D3D12 driver may report validation errors when we call CopyTextureRegion. Some important
+    // invariants are listed below. For more details
+    // of these invariants, see src/dawn/tests/unittests/d3d12/CopySplitTests.cpp.
+    //   - Inside each copy region: 1) its buffer offset plus copy size should be less than its
+    //     buffer size, 2) its buffer offset on y-axis should be less than copy format's
+    //     blockInfo.height, 3) its buffer offset on z-axis should be 0.
+    //   - Each copy region has an offset (aka alignedOffset) aligned to
+    //     D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT
+    //   - The buffer footprint of each copy region should be entirely within the copied buffer,
+    //     which means that the last "texel" of the buffer footprint doesn't go past the end of
+    //     the buffer even though the last "texel" might not be copied.
+    //   - If there are multiple copy regions, each copy region should not overlap with the others.
+    //   - Copy region(s) combined should exactly be equivalent to the texture region to be copied.
+    //   - Every pixel accessed by every copy region should not be out of the bound of the copied
+    //     texture and buffer.
+    TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+                                                           Extent3D copySize,
+                                                           const TexelBlockInfo& blockInfo,
+                                                           uint64_t offset,
+                                                           uint32_t bytesPerRow);
+
+    TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+                                                 Extent3D copySize,
+                                                 const TexelBlockInfo& blockInfo,
+                                                 uint64_t offset,
+                                                 uint32_t bytesPerRow,
+                                                 uint32_t rowsPerImage);
+
+    TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+                                                      Extent3D copySize,
+                                                      const TexelBlockInfo& blockInfo,
+                                                      uint64_t offset,
+                                                      uint32_t bytesPerRow,
+                                                      uint32_t rowsPerImage);
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
diff --git a/src/dawn/native/d3d12/TextureD3D12.cpp b/src/dawn/native/d3d12/TextureD3D12.cpp
new file mode 100644
index 0000000..5e79417
--- /dev/null
+++ b/src/dawn/native/d3d12/TextureD3D12.cpp
@@ -0,0 +1,1388 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D11on12Util.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResourceAllocatorManagerD3D12.h"
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/TextureCopySplitter.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+    namespace {
+
+        D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
+            D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
+
+            if (usage & kPresentTextureUsage) {
+                // The present usage is only used internally by the swapchain and is never used in
+                // combination with other usages.
+                ASSERT(usage == kPresentTextureUsage);
+                return D3D12_RESOURCE_STATE_PRESENT;
+            }
+
+            if (usage & wgpu::TextureUsage::CopySrc) {
+                resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
+            }
+            if (usage & wgpu::TextureUsage::CopyDst) {
+                resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+            }
+            if (usage & (wgpu::TextureUsage::TextureBinding)) {
+                resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+                                  D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+            }
+            if (usage & wgpu::TextureUsage::StorageBinding) {
+                resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+            }
+            if (usage & wgpu::TextureUsage::RenderAttachment) {
+                if (format.HasDepthOrStencil()) {
+                    resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
+                } else {
+                    resourceState |= D3D12_RESOURCE_STATE_RENDER_TARGET;
+                }
+            }
+
+            if (usage & kReadOnlyRenderAttachment) {
+                // There is no STENCIL_READ state. Readonly for stencil is bundled with DEPTH_READ.
+                resourceState |= D3D12_RESOURCE_STATE_DEPTH_READ;
+            }
+
+            return resourceState;
+        }
+
+        D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage,
+                                                const Format& format,
+                                                bool isMultisampledTexture) {
+            D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
+
+            if (usage & wgpu::TextureUsage::StorageBinding) {
+                flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
+            }
+
+            // A multisampled resource must have either D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET or
+            // D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL set in D3D12_RESOURCE_DESC::Flags.
+            // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_resource_desc
+            if ((usage & wgpu::TextureUsage::RenderAttachment) != 0 || isMultisampledTexture) {
+                if (format.HasDepthOrStencil()) {
+                    flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
+                } else {
+                    flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
+                }
+            }
+
+            ASSERT(!(flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+                   flags == D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
+            return flags;
+        }
+
+        D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
+            switch (dimension) {
+                case wgpu::TextureDimension::e1D:
+                    return D3D12_RESOURCE_DIMENSION_TEXTURE1D;
+                case wgpu::TextureDimension::e2D:
+                    return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
+                case wgpu::TextureDimension::e3D:
+                    return D3D12_RESOURCE_DIMENSION_TEXTURE3D;
+            }
+        }
+
+        DXGI_FORMAT D3D12TypelessTextureFormat(wgpu::TextureFormat format) {
+            switch (format) {
+                case wgpu::TextureFormat::R8Unorm:
+                case wgpu::TextureFormat::R8Snorm:
+                case wgpu::TextureFormat::R8Uint:
+                case wgpu::TextureFormat::R8Sint:
+                    return DXGI_FORMAT_R8_TYPELESS;
+
+                case wgpu::TextureFormat::R16Uint:
+                case wgpu::TextureFormat::R16Sint:
+                case wgpu::TextureFormat::R16Float:
+                case wgpu::TextureFormat::Depth16Unorm:
+                    return DXGI_FORMAT_R16_TYPELESS;
+
+                case wgpu::TextureFormat::RG8Unorm:
+                case wgpu::TextureFormat::RG8Snorm:
+                case wgpu::TextureFormat::RG8Uint:
+                case wgpu::TextureFormat::RG8Sint:
+                    return DXGI_FORMAT_R8G8_TYPELESS;
+
+                case wgpu::TextureFormat::R32Uint:
+                case wgpu::TextureFormat::R32Sint:
+                case wgpu::TextureFormat::R32Float:
+                    return DXGI_FORMAT_R32_TYPELESS;
+
+                case wgpu::TextureFormat::RG16Uint:
+                case wgpu::TextureFormat::RG16Sint:
+                case wgpu::TextureFormat::RG16Float:
+                    return DXGI_FORMAT_R16G16_TYPELESS;
+
+                case wgpu::TextureFormat::RGBA8Unorm:
+                case wgpu::TextureFormat::RGBA8UnormSrgb:
+                case wgpu::TextureFormat::RGBA8Snorm:
+                case wgpu::TextureFormat::RGBA8Uint:
+                case wgpu::TextureFormat::RGBA8Sint:
+                    return DXGI_FORMAT_R8G8B8A8_TYPELESS;
+
+                case wgpu::TextureFormat::BGRA8Unorm:
+                case wgpu::TextureFormat::BGRA8UnormSrgb:
+                    return DXGI_FORMAT_B8G8R8A8_TYPELESS;
+
+                case wgpu::TextureFormat::RGB10A2Unorm:
+                    return DXGI_FORMAT_R10G10B10A2_TYPELESS;
+
+                case wgpu::TextureFormat::RG11B10Ufloat:
+                    return DXGI_FORMAT_R11G11B10_FLOAT;
+                case wgpu::TextureFormat::RGB9E5Ufloat:
+                    return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+                case wgpu::TextureFormat::RG32Uint:
+                case wgpu::TextureFormat::RG32Sint:
+                case wgpu::TextureFormat::RG32Float:
+                    return DXGI_FORMAT_R32G32_TYPELESS;
+
+                case wgpu::TextureFormat::RGBA16Uint:
+                case wgpu::TextureFormat::RGBA16Sint:
+                case wgpu::TextureFormat::RGBA16Float:
+                    return DXGI_FORMAT_R16G16B16A16_TYPELESS;
+
+                case wgpu::TextureFormat::RGBA32Uint:
+                case wgpu::TextureFormat::RGBA32Sint:
+                case wgpu::TextureFormat::RGBA32Float:
+                    return DXGI_FORMAT_R32G32B32A32_TYPELESS;
+
+                case wgpu::TextureFormat::Depth32Float:
+                case wgpu::TextureFormat::Depth24Plus:
+                    return DXGI_FORMAT_R32_TYPELESS;
+
+                // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
+                case wgpu::TextureFormat::Stencil8:
+                case wgpu::TextureFormat::Depth24UnormStencil8:
+                    return DXGI_FORMAT_R24G8_TYPELESS;
+                case wgpu::TextureFormat::Depth24PlusStencil8:
+                case wgpu::TextureFormat::Depth32FloatStencil8:
+                    return DXGI_FORMAT_R32G8X24_TYPELESS;
+
+                case wgpu::TextureFormat::BC1RGBAUnorm:
+                case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+                    return DXGI_FORMAT_BC1_TYPELESS;
+
+                case wgpu::TextureFormat::BC2RGBAUnorm:
+                case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+                    return DXGI_FORMAT_BC2_TYPELESS;
+
+                case wgpu::TextureFormat::BC3RGBAUnorm:
+                case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+                    return DXGI_FORMAT_BC3_TYPELESS;
+
+                case wgpu::TextureFormat::BC4RSnorm:
+                case wgpu::TextureFormat::BC4RUnorm:
+                    return DXGI_FORMAT_BC4_TYPELESS;
+
+                case wgpu::TextureFormat::BC5RGSnorm:
+                case wgpu::TextureFormat::BC5RGUnorm:
+                    return DXGI_FORMAT_BC5_TYPELESS;
+
+                case wgpu::TextureFormat::BC6HRGBFloat:
+                case wgpu::TextureFormat::BC6HRGBUfloat:
+                    return DXGI_FORMAT_BC6H_TYPELESS;
+
+                case wgpu::TextureFormat::BC7RGBAUnorm:
+                case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                    return DXGI_FORMAT_BC7_TYPELESS;
+
+                case wgpu::TextureFormat::ETC2RGB8Unorm:
+                case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+                case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+                case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+                case wgpu::TextureFormat::ETC2RGBA8Unorm:
+                case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+                case wgpu::TextureFormat::EACR11Unorm:
+                case wgpu::TextureFormat::EACR11Snorm:
+                case wgpu::TextureFormat::EACRG11Unorm:
+                case wgpu::TextureFormat::EACRG11Snorm:
+
+                case wgpu::TextureFormat::ASTC4x4Unorm:
+                case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+                case wgpu::TextureFormat::ASTC5x4Unorm:
+                case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+                case wgpu::TextureFormat::ASTC5x5Unorm:
+                case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+                case wgpu::TextureFormat::ASTC6x5Unorm:
+                case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+                case wgpu::TextureFormat::ASTC6x6Unorm:
+                case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+                case wgpu::TextureFormat::ASTC8x5Unorm:
+                case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+                case wgpu::TextureFormat::ASTC8x6Unorm:
+                case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+                case wgpu::TextureFormat::ASTC8x8Unorm:
+                case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+                case wgpu::TextureFormat::ASTC10x5Unorm:
+                case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+                case wgpu::TextureFormat::ASTC10x6Unorm:
+                case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+                case wgpu::TextureFormat::ASTC10x8Unorm:
+                case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+                case wgpu::TextureFormat::ASTC10x10Unorm:
+                case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+                case wgpu::TextureFormat::ASTC12x10Unorm:
+                case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+                case wgpu::TextureFormat::ASTC12x12Unorm:
+                case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+                case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                case wgpu::TextureFormat::Undefined:
+                    UNREACHABLE();
+            }
+        }
+
+    }  // namespace
+
+    DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R8Unorm:
+                return DXGI_FORMAT_R8_UNORM;
+            case wgpu::TextureFormat::R8Snorm:
+                return DXGI_FORMAT_R8_SNORM;
+            case wgpu::TextureFormat::R8Uint:
+                return DXGI_FORMAT_R8_UINT;
+            case wgpu::TextureFormat::R8Sint:
+                return DXGI_FORMAT_R8_SINT;
+
+            case wgpu::TextureFormat::R16Uint:
+                return DXGI_FORMAT_R16_UINT;
+            case wgpu::TextureFormat::R16Sint:
+                return DXGI_FORMAT_R16_SINT;
+            case wgpu::TextureFormat::R16Float:
+                return DXGI_FORMAT_R16_FLOAT;
+            case wgpu::TextureFormat::RG8Unorm:
+                return DXGI_FORMAT_R8G8_UNORM;
+            case wgpu::TextureFormat::RG8Snorm:
+                return DXGI_FORMAT_R8G8_SNORM;
+            case wgpu::TextureFormat::RG8Uint:
+                return DXGI_FORMAT_R8G8_UINT;
+            case wgpu::TextureFormat::RG8Sint:
+                return DXGI_FORMAT_R8G8_SINT;
+
+            case wgpu::TextureFormat::R32Uint:
+                return DXGI_FORMAT_R32_UINT;
+            case wgpu::TextureFormat::R32Sint:
+                return DXGI_FORMAT_R32_SINT;
+            case wgpu::TextureFormat::R32Float:
+                return DXGI_FORMAT_R32_FLOAT;
+            case wgpu::TextureFormat::RG16Uint:
+                return DXGI_FORMAT_R16G16_UINT;
+            case wgpu::TextureFormat::RG16Sint:
+                return DXGI_FORMAT_R16G16_SINT;
+            case wgpu::TextureFormat::RG16Float:
+                return DXGI_FORMAT_R16G16_FLOAT;
+            case wgpu::TextureFormat::RGBA8Unorm:
+                return DXGI_FORMAT_R8G8B8A8_UNORM;
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+                return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+            case wgpu::TextureFormat::RGBA8Snorm:
+                return DXGI_FORMAT_R8G8B8A8_SNORM;
+            case wgpu::TextureFormat::RGBA8Uint:
+                return DXGI_FORMAT_R8G8B8A8_UINT;
+            case wgpu::TextureFormat::RGBA8Sint:
+                return DXGI_FORMAT_R8G8B8A8_SINT;
+            case wgpu::TextureFormat::BGRA8Unorm:
+                return DXGI_FORMAT_B8G8R8A8_UNORM;
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+                return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
+            case wgpu::TextureFormat::RGB10A2Unorm:
+                return DXGI_FORMAT_R10G10B10A2_UNORM;
+            case wgpu::TextureFormat::RG11B10Ufloat:
+                return DXGI_FORMAT_R11G11B10_FLOAT;
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+                return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+            case wgpu::TextureFormat::RG32Uint:
+                return DXGI_FORMAT_R32G32_UINT;
+            case wgpu::TextureFormat::RG32Sint:
+                return DXGI_FORMAT_R32G32_SINT;
+            case wgpu::TextureFormat::RG32Float:
+                return DXGI_FORMAT_R32G32_FLOAT;
+            case wgpu::TextureFormat::RGBA16Uint:
+                return DXGI_FORMAT_R16G16B16A16_UINT;
+            case wgpu::TextureFormat::RGBA16Sint:
+                return DXGI_FORMAT_R16G16B16A16_SINT;
+            case wgpu::TextureFormat::RGBA16Float:
+                return DXGI_FORMAT_R16G16B16A16_FLOAT;
+
+            case wgpu::TextureFormat::RGBA32Uint:
+                return DXGI_FORMAT_R32G32B32A32_UINT;
+            case wgpu::TextureFormat::RGBA32Sint:
+                return DXGI_FORMAT_R32G32B32A32_SINT;
+            case wgpu::TextureFormat::RGBA32Float:
+                return DXGI_FORMAT_R32G32B32A32_FLOAT;
+
+            case wgpu::TextureFormat::Depth16Unorm:
+                return DXGI_FORMAT_D16_UNORM;
+            case wgpu::TextureFormat::Depth32Float:
+            case wgpu::TextureFormat::Depth24Plus:
+                return DXGI_FORMAT_D32_FLOAT;
+            // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
+            case wgpu::TextureFormat::Stencil8:
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                return DXGI_FORMAT_D24_UNORM_S8_UINT;
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
+
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+                return DXGI_FORMAT_BC1_UNORM;
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+                return DXGI_FORMAT_BC1_UNORM_SRGB;
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+                return DXGI_FORMAT_BC2_UNORM;
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+                return DXGI_FORMAT_BC2_UNORM_SRGB;
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+                return DXGI_FORMAT_BC3_UNORM;
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+                return DXGI_FORMAT_BC3_UNORM_SRGB;
+            case wgpu::TextureFormat::BC4RSnorm:
+                return DXGI_FORMAT_BC4_SNORM;
+            case wgpu::TextureFormat::BC4RUnorm:
+                return DXGI_FORMAT_BC4_UNORM;
+            case wgpu::TextureFormat::BC5RGSnorm:
+                return DXGI_FORMAT_BC5_SNORM;
+            case wgpu::TextureFormat::BC5RGUnorm:
+                return DXGI_FORMAT_BC5_UNORM;
+            case wgpu::TextureFormat::BC6HRGBFloat:
+                return DXGI_FORMAT_BC6H_SF16;
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+                return DXGI_FORMAT_BC6H_UF16;
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+                return DXGI_FORMAT_BC7_UNORM;
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return DXGI_FORMAT_BC7_UNORM_SRGB;
+
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return DXGI_FORMAT_NV12;
+
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+            case wgpu::TextureFormat::EACR11Unorm:
+            case wgpu::TextureFormat::EACR11Snorm:
+            case wgpu::TextureFormat::EACRG11Unorm:
+            case wgpu::TextureFormat::EACRG11Snorm:
+
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+            case wgpu::TextureFormat::Undefined:
+                UNREACHABLE();
+        }
+    }
+
+    MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                        "Texture dimension (%s) is not %s.", descriptor->dimension,
+                        wgpu::TextureDimension::e2D);
+
+        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                        descriptor->mipLevelCount);
+
+        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                        descriptor->sampleCount);
+
+        return {};
+    }
+
+    MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+                                                const TextureDescriptor* dawnDescriptor) {
+        const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
+        DAWN_INVALID_IF(
+            (dawnDescriptor->size.width != d3dDescriptor.Width) ||
+                (dawnDescriptor->size.height != d3dDescriptor.Height) ||
+                (dawnDescriptor->size.depthOrArrayLayers != 1),
+            "D3D12 texture size (Width: %u, Height: %u, DepthOrArraySize: 1) doesn't match Dawn "
+            "descriptor size (width: %u, height: %u, depthOrArrayLayers: %u).",
+            d3dDescriptor.Width, d3dDescriptor.Height, dawnDescriptor->size.width,
+            dawnDescriptor->size.height, dawnDescriptor->size.depthOrArrayLayers);
+
+        const DXGI_FORMAT dxgiFormatFromDescriptor = D3D12TextureFormat(dawnDescriptor->format);
+        DAWN_INVALID_IF(
+            dxgiFormatFromDescriptor != d3dDescriptor.Format,
+            "D3D12 texture format (%x) is not compatible with Dawn descriptor format (%s).",
+            d3dDescriptor.Format, dawnDescriptor->format);
+
+        DAWN_INVALID_IF(d3dDescriptor.MipLevels != 1,
+                        "D3D12 texture number of miplevels (%u) is not 1.",
+                        d3dDescriptor.MipLevels);
+
+        DAWN_INVALID_IF(d3dDescriptor.DepthOrArraySize != 1,
+                        "D3D12 texture array size (%u) is not 1.", d3dDescriptor.DepthOrArraySize);
+
+        // Shared textures cannot be multi-sample so no need to check those.
+        ASSERT(d3dDescriptor.SampleDesc.Count == 1);
+        ASSERT(d3dDescriptor.SampleDesc.Quality == 0);
+
+        return {};
+    }
+
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_shared_resource_compatibility_tier
+    MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat) {
+        const bool supportsSharedResourceCapabilityTier1 =
+            device->GetDeviceInfo().supportsSharedResourceCapabilityTier1;
+        switch (textureFormat) {
+            // MSDN docs are not correct, NV12 requires at-least tier 1.
+            case DXGI_FORMAT_NV12:
+                if (supportsSharedResourceCapabilityTier1) {
+                    return {};
+                }
+                break;
+            default:
+                break;
+        }
+
+        return DAWN_FORMAT_VALIDATION_ERROR("DXGI format does not support cross-API sharing.");
+    }
+
+    // static
+    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+                                                const TextureDescriptor* descriptor) {
+        Ref<Texture> dawnTexture =
+            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+
+        DAWN_INVALID_IF(dawnTexture->GetFormat().IsMultiPlanar(),
+                        "Cannot create a multi-planar formatted texture directly");
+
+        DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
+        return std::move(dawnTexture);
+    }
+
+    // static
+    ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
+        Device* device,
+        const TextureDescriptor* descriptor,
+        ComPtr<ID3D12Resource> d3d12Texture,
+        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+        bool isSwapChainTexture,
+        bool isInitialized) {
+        Ref<Texture> dawnTexture =
+            AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+        DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
+            descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), isSwapChainTexture));
+
+        // Importing a multi-planar format must be initialized. This is required because
+        // a shared multi-planar format cannot be initialized by Dawn.
+        DAWN_INVALID_IF(
+            !isInitialized && dawnTexture->GetFormat().IsMultiPlanar(),
+            "Cannot create a texture with a multi-planar format (%s) with uninitialized data.",
+            dawnTexture->GetFormat().format);
+
+        dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
+                                                        dawnTexture->GetAllSubresources());
+        return std::move(dawnTexture);
+    }
+
+    // static
+    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+                                                const TextureDescriptor* descriptor,
+                                                ComPtr<ID3D12Resource> d3d12Texture) {
+        Ref<Texture> dawnTexture =
+            AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+        DAWN_TRY(dawnTexture->InitializeAsSwapChainTexture(std::move(d3d12Texture)));
+        return std::move(dawnTexture);
+    }
+
+    MaybeError Texture::InitializeAsExternalTexture(
+        const TextureDescriptor* descriptor,
+        ComPtr<ID3D12Resource> d3d12Texture,
+        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+        bool isSwapChainTexture) {
+        mD3D11on12Resource = std::move(d3d11on12Resource);
+        mSwapChainTexture = isSwapChainTexture;
+
+        D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
+        mD3D12ResourceFlags = desc.Flags;
+
+        AllocationInfo info;
+        info.mMethod = AllocationMethod::kExternal;
+        // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+        // texture is owned externally. The texture's owning entity must remain responsible for
+        // memory management.
+        mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+        SetLabelHelper("Dawn_ExternalTexture");
+
+        return {};
+    }
+
+    MaybeError Texture::InitializeAsInternalTexture() {
+        D3D12_RESOURCE_DESC resourceDescriptor;
+        resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
+        resourceDescriptor.Alignment = 0;
+
+        const Extent3D& size = GetSize();
+        resourceDescriptor.Width = size.width;
+        resourceDescriptor.Height = size.height;
+        resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
+
+        // This will need to be much more nuanced when WebGPU has
+        // texture view compatibility rules.
+        const bool needsTypelessFormat =
+            GetFormat().HasDepthOrStencil() &&
+            (GetInternalUsage() & wgpu::TextureUsage::TextureBinding) != 0;
+
+        DXGI_FORMAT dxgiFormat = needsTypelessFormat
+                                     ? D3D12TypelessTextureFormat(GetFormat().format)
+                                     : D3D12TextureFormat(GetFormat().format);
+
+        resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
+        resourceDescriptor.Format = dxgiFormat;
+        resourceDescriptor.SampleDesc.Count = GetSampleCount();
+        resourceDescriptor.SampleDesc.Quality = 0;
+        resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
+        resourceDescriptor.Flags =
+            D3D12ResourceFlags(GetInternalUsage(), GetFormat(), IsMultisampledTexture());
+        mD3D12ResourceFlags = resourceDescriptor.Flags;
+
+        DAWN_TRY_ASSIGN(mResourceAllocation,
+                        ToBackend(GetDevice())
+                            ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
+                                             D3D12_RESOURCE_STATE_COMMON));
+
+        SetLabelImpl();
+
+        Device* device = ToBackend(GetDevice());
+
+        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+            CommandRecordingContext* commandContext;
+            DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+            DAWN_TRY(ClearTexture(commandContext, GetAllSubresources(),
+                                  TextureBase::ClearValue::NonZero));
+        }
+
+        return {};
+    }
+
+    MaybeError Texture::InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture) {
+        AllocationInfo info;
+        info.mMethod = AllocationMethod::kExternal;
+        // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+        // texture is owned externally. The texture's owning entity must remain responsible for
+        // memory management.
+        mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+        SetLabelHelper("Dawn_SwapChainTexture");
+
+        return {};
+    }
+
+    Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+        : TextureBase(device, descriptor, state),
+          mSubresourceStateAndDecay(
+              GetFormat().aspects,
+              GetArrayLayers(),
+              GetNumMipLevels(),
+              {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, kMaxExecutionSerial, false}) {
+    }
+
+    Texture::~Texture() {
+    }
+
+    void Texture::DestroyImpl() {
+        TextureBase::DestroyImpl();
+
+        Device* device = ToBackend(GetDevice());
+
+        // In PIX's D3D12-only mode, there is no way to determine frame boundaries
+        // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
+        // PIX will wait forever for a present that never happens.
+        // If we know we're dealing with a swapbuffer texture, inform PIX we've
+        // "presented" the texture so it can determine frame boundaries and use its
+        // contents for the UI.
+        if (mSwapChainTexture) {
+            ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
+            if (d3dSharingContract != nullptr) {
+                d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
+            }
+        }
+
+        device->DeallocateMemory(mResourceAllocation);
+
+        // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
+        // We can set mSwapChainTexture to false to avoid passing a nullptr to
+        // ID3D12SharingContract::Present.
+        mSwapChainTexture = false;
+
+        // Now that the texture has been destroyed. It should release the refptr
+        // of the d3d11on12 resource.
+        mD3D11on12Resource = nullptr;
+    }
+
+    DXGI_FORMAT Texture::GetD3D12Format() const {
+        return D3D12TextureFormat(GetFormat().format);
+    }
+
+    ID3D12Resource* Texture::GetD3D12Resource() const {
+        return mResourceAllocation.GetD3D12Resource();
+    }
+
+    DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
+        ASSERT(GetFormat().aspects & aspect);
+
+        switch (GetFormat().format) {
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+            case wgpu::TextureFormat::Stencil8:
+                switch (aspect) {
+                    case Aspect::Depth:
+                        return DXGI_FORMAT_R32_FLOAT;
+                    case Aspect::Stencil:
+                        return DXGI_FORMAT_R8_UINT;
+                    default:
+                        UNREACHABLE();
+                }
+            default:
+                ASSERT(HasOneBit(GetFormat().aspects));
+                return GetD3D12Format();
+        }
+    }
+
+    MaybeError Texture::AcquireKeyedMutex() {
+        ASSERT(mD3D11on12Resource != nullptr);
+        return mD3D11on12Resource->AcquireKeyedMutex();
+    }
+
+    void Texture::ReleaseKeyedMutex() {
+        ASSERT(mD3D11on12Resource != nullptr);
+        mD3D11on12Resource->ReleaseKeyedMutex();
+    }
+
+    void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                             wgpu::TextureUsage usage,
+                                             const SubresourceRange& range) {
+        TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
+    }
+
+    void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                                wgpu::TextureUsage usage) {
+        TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
+                                   GetAllSubresources());
+    }
+
+    void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                                D3D12_RESOURCE_STATES newState) {
+        TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
+    }
+
+    void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                             D3D12_RESOURCE_STATES newState,
+                                             const SubresourceRange& range) {
+        if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+            // Track the underlying heap to ensure residency.
+            Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+            commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+        }
+
+        std::vector<D3D12_RESOURCE_BARRIER> barriers;
+
+        // TODO(enga): Consider adding a Count helper.
+        uint32_t aspectCount = 0;
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            aspectCount++;
+            DAWN_UNUSED(aspect);
+        }
+
+        barriers.reserve(range.levelCount * range.layerCount * aspectCount);
+
+        TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
+        if (barriers.size()) {
+            commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
+        }
+    }
+
+    void Texture::TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+                                             const SubresourceRange& range,
+                                             StateAndDecay* state,
+                                             D3D12_RESOURCE_STATES newState,
+                                             ExecutionSerial pendingCommandSerial) const {
+        // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
+        // return false.
+        if (state->lastState == newState) {
+            return;
+        }
+
+        D3D12_RESOURCE_STATES lastState = state->lastState;
+
+        // The COMMON state represents a state where no write operations can be pending, and
+        // where all pixels are uncompressed. This makes it possible to transition to and
+        // from some states without synchronization (i.e. without an explicit
+        // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
+        // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
+        // state when all of the following are true: 1) the texture is accessed on a command
+        // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
+        // 3) the texture was promoted implicitly to a read-only state and is still in that
+        // state.
+        // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+        // To track implicit decays, we must record the pending serial on which that
+        // transition will occur. When that texture is used again, the previously recorded
+        // serial must be compared to the last completed serial to determine if the texture
+        // has implicity decayed to the common state.
+        if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
+            lastState = D3D12_RESOURCE_STATE_COMMON;
+        }
+
+        // Update the tracked state.
+        state->lastState = newState;
+
+        // Destination states that qualify for an implicit promotion for a
+        // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
+        // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
+        {
+            static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
+                D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+                D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
+
+            if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+                if (IsSubset(newState, kD3D12PromotableReadOnlyStates)) {
+                    // Implicit texture state decays can only occur when the texture was implicitly
+                    // transitioned to a read-only state. isValidToDecay is needed to differentiate
+                    // between resources that were implictly or explicitly transitioned to a
+                    // read-only state.
+                    state->isValidToDecay = true;
+                    state->lastDecaySerial = pendingCommandSerial;
+                    return;
+                } else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
+                    state->isValidToDecay = false;
+                    return;
+                }
+            }
+        }
+
+        D3D12_RESOURCE_BARRIER barrier;
+        barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+        barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+        barrier.Transition.pResource = GetD3D12Resource();
+        barrier.Transition.StateBefore = lastState;
+        barrier.Transition.StateAfter = newState;
+
+        bool isFullRange = range.baseArrayLayer == 0 && range.baseMipLevel == 0 &&
+                           range.layerCount == GetArrayLayers() &&
+                           range.levelCount == GetNumMipLevels() &&
+                           range.aspects == GetFormat().aspects;
+
+        // Use a single transition for all subresources if possible.
+        if (isFullRange) {
+            barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+            barriers->push_back(barrier);
+        } else {
+            for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
+                    for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
+                        barrier.Transition.Subresource =
+                            GetSubresourceIndex(range.baseMipLevel + mipLevel,
+                                                range.baseArrayLayer + arrayLayer, aspect);
+                        barriers->push_back(barrier);
+                    }
+                }
+            }
+        }
+
+        state->isValidToDecay = false;
+    }
+
+    void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
+        // Textures with keyed mutexes can be written from other graphics queues. Hence, they
+        // must be acquired before command list submission to ensure work from the other queues
+        // has finished. See Device::ExecuteCommandContext.
+        if (mD3D11on12Resource != nullptr) {
+            commandContext->AddToSharedTextureList(this);
+        }
+    }
+
+    void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                       std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                                       wgpu::TextureUsage usage,
+                                                       const SubresourceRange& range) {
+        TransitionUsageAndGetResourceBarrier(commandContext, barrier,
+                                             D3D12TextureUsage(usage, GetFormat()), range);
+    }
+
+    void Texture::TransitionUsageAndGetResourceBarrier(
+        CommandRecordingContext* commandContext,
+        std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+        D3D12_RESOURCE_STATES newState,
+        const SubresourceRange& range) {
+        HandleTransitionSpecialCases(commandContext);
+
+        const ExecutionSerial pendingCommandSerial =
+            ToBackend(GetDevice())->GetPendingCommandSerial();
+
+        mSubresourceStateAndDecay.Update(
+            range, [&](const SubresourceRange& updateRange, StateAndDecay* state) {
+                TransitionSubresourceRange(barriers, updateRange, state, newState,
+                                           pendingCommandSerial);
+            });
+    }
+
+    void Texture::TrackUsageAndGetResourceBarrierForPass(
+        CommandRecordingContext* commandContext,
+        std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+        const TextureSubresourceUsage& textureUsages) {
+        if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+            // Track the underlying heap to ensure residency.
+            Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+            commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+        }
+
+        HandleTransitionSpecialCases(commandContext);
+
+        const ExecutionSerial pendingCommandSerial =
+            ToBackend(GetDevice())->GetPendingCommandSerial();
+
+        mSubresourceStateAndDecay.Merge(textureUsages, [&](const SubresourceRange& mergeRange,
+                                                           StateAndDecay* state,
+                                                           wgpu::TextureUsage usage) {
+            // Skip if this subresource is not used during the current pass
+            if (usage == wgpu::TextureUsage::None) {
+                return;
+            }
+
+            D3D12_RESOURCE_STATES newState = D3D12TextureUsage(usage, GetFormat());
+            TransitionSubresourceRange(barriers, mergeRange, state, newState, pendingCommandSerial);
+        });
+    }
+
+    D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t mipLevel,
+                                                            uint32_t baseSlice,
+                                                            uint32_t sliceCount) const {
+        D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
+        rtvDesc.Format = GetD3D12Format();
+        if (IsMultisampledTexture()) {
+            ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+            ASSERT(GetNumMipLevels() == 1);
+            ASSERT(sliceCount == 1);
+            ASSERT(baseSlice == 0);
+            ASSERT(mipLevel == 0);
+            rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
+            return rtvDesc;
+        }
+        switch (GetDimension()) {
+            case wgpu::TextureDimension::e2D:
+                // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
+                // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
+                // them as 1-layer 2D array textures. (Just like how we treat SRVs)
+                // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
+                // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
+                // _rtv
+                rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
+                rtvDesc.Texture2DArray.FirstArraySlice = baseSlice;
+                rtvDesc.Texture2DArray.ArraySize = sliceCount;
+                rtvDesc.Texture2DArray.MipSlice = mipLevel;
+                rtvDesc.Texture2DArray.PlaneSlice = 0;
+                break;
+            case wgpu::TextureDimension::e3D:
+                rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
+                rtvDesc.Texture3D.MipSlice = mipLevel;
+                rtvDesc.Texture3D.FirstWSlice = baseSlice;
+                rtvDesc.Texture3D.WSize = sliceCount;
+                break;
+            case wgpu::TextureDimension::e1D:
+                UNREACHABLE();
+                break;
+        }
+        return rtvDesc;
+    }
+
+    D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
+                                                            uint32_t baseArrayLayer,
+                                                            uint32_t layerCount,
+                                                            Aspect aspects,
+                                                            bool depthReadOnly,
+                                                            bool stencilReadOnly) const {
+        D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
+        dsvDesc.Format = GetD3D12Format();
+        dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
+        if (depthReadOnly && aspects & Aspect::Depth) {
+            dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_DEPTH;
+        }
+        if (stencilReadOnly && aspects & Aspect::Stencil) {
+            dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_STENCIL;
+        }
+
+        if (IsMultisampledTexture()) {
+            ASSERT(GetNumMipLevels() == 1);
+            ASSERT(layerCount == 1);
+            ASSERT(baseArrayLayer == 0);
+            ASSERT(mipLevel == 0);
+            dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
+        } else {
+            dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
+            dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
+            dsvDesc.Texture2DArray.ArraySize = layerCount;
+            dsvDesc.Texture2DArray.MipSlice = mipLevel;
+        }
+
+        return dsvDesc;
+    }
+
+    MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+                                     const SubresourceRange& range,
+                                     TextureBase::ClearValue clearValue) {
+        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+        Device* device = ToBackend(GetDevice());
+
+        uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+        float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+
+        if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) != 0) {
+            TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
+
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                for (uint32_t layer = range.baseArrayLayer;
+                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                    // Iterate the aspects individually to determine which clear flags to use.
+                    D3D12_CLEAR_FLAGS clearFlags = {};
+                    for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                        if (clearValue == TextureBase::ClearValue::Zero &&
+                            IsSubresourceContentInitialized(
+                                SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+                            // Skip lazy clears if already initialized.
+                            continue;
+                        }
+
+                        switch (aspect) {
+                            case Aspect::Depth:
+                                clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+                                break;
+                            case Aspect::Stencil:
+                                clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+                                break;
+                            default:
+                                UNREACHABLE();
+                        }
+                    }
+
+                    if (clearFlags == 0) {
+                        continue;
+                    }
+
+                    CPUDescriptorHeapAllocation dsvHandle;
+                    DAWN_TRY_ASSIGN(
+                        dsvHandle,
+                        device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+                    const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
+                        dsvHandle.GetBaseDescriptor();
+                    D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc =
+                        GetDSVDescriptor(level, layer, 1, range.aspects, false, false);
+                    device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
+                                                                     baseDescriptor);
+
+                    commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
+                                                       clearColor, 0, nullptr);
+                }
+            }
+        } else if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) != 0) {
+            TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET, range);
+
+            const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor, fClearColor};
+
+            ASSERT(range.aspects == Aspect::Color);
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                for (uint32_t layer = range.baseArrayLayer;
+                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(
+                            SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
+                    }
+
+                    CPUDescriptorHeapAllocation rtvHeap;
+                    DAWN_TRY_ASSIGN(
+                        rtvHeap,
+                        device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+                    const D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetBaseDescriptor();
+
+                    uint32_t baseSlice = layer;
+                    uint32_t sliceCount = 1;
+                    if (GetDimension() == wgpu::TextureDimension::e3D) {
+                        baseSlice = 0;
+                        sliceCount = std::max(GetDepth() >> level, 1u);
+                    }
+                    D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
+                        GetRTVDescriptor(level, baseSlice, sliceCount);
+                    device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
+                                                                     rtvHandle);
+                    commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
+                }
+            }
+        } else {
+            // create temp buffer with clear color to copy to the texture image
+            TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
+
+            for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+
+                Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+                uint32_t bytesPerRow =
+                    Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+                          kTextureBytesPerRowAlignment);
+                uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+                                      largestMipSize.depthOrArrayLayers;
+                DynamicUploader* uploader = device->GetDynamicUploader();
+                UploadHandle uploadHandle;
+                DAWN_TRY_ASSIGN(uploadHandle,
+                                uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+                                                   blockInfo.byteSize));
+                memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; ++level) {
+                    // compute d3d12 texture copy locations for texture and buffer
+                    Extent3D copySize = GetMipLevelPhysicalSize(level);
+
+                    for (uint32_t layer = range.baseArrayLayer;
+                         layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                        if (clearValue == TextureBase::ClearValue::Zero &&
+                            IsSubresourceContentInitialized(
+                                SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+                            // Skip lazy clears if already initialized.
+                            continue;
+                        }
+
+                        TextureCopy textureCopy;
+                        textureCopy.texture = this;
+                        textureCopy.origin = {0, 0, layer};
+                        textureCopy.mipLevel = level;
+                        textureCopy.aspect = aspect;
+                        RecordBufferTextureCopyWithBufferHandle(
+                            BufferTextureCopyDirection::B2T, commandList,
+                            ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+                            uploadHandle.startOffset, bytesPerRow, GetHeight(), textureCopy,
+                            copySize);
+                    }
+                }
+            }
+        }
+        if (clearValue == TextureBase::ClearValue::Zero) {
+            SetIsSubresourceContentInitialized(true, range);
+            GetDevice()->IncrementLazyClearCountForTesting();
+        }
+        return {};
+    }
+
+    void Texture::SetLabelHelper(const char* prefix) {
+        SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
+                     GetLabel());
+    }
+
+    void Texture::SetLabelImpl() {
+        SetLabelHelper("Dawn_InternalTexture");
+    }
+
+    void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                                      const SubresourceRange& range) {
+        if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+            return;
+        }
+        if (!IsSubresourceContentInitialized(range)) {
+            // If subresource has not been initialized, clear it to black as it could contain
+            // dirty bits from recycled memory
+            GetDevice()->ConsumedError(
+                ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
+        }
+    }
+
+    bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
+        return lastState == other.lastState && lastDecaySerial == other.lastDecaySerial &&
+               isValidToDecay == other.isValidToDecay;
+    }
+
+    // static
+    Ref<TextureView> TextureView::Create(TextureBase* texture,
+                                         const TextureViewDescriptor* descriptor) {
+        return AcquireRef(new TextureView(texture, descriptor));
+    }
+
+    TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+        : TextureViewBase(texture, descriptor) {
+        mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
+        mSrvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+
+        UINT planeSlice = 0;
+        const Format& textureFormat = texture->GetFormat();
+        if (textureFormat.HasDepthOrStencil()) {
+            // Configure the SRV descriptor to reinterpret the texture allocated as
+            // TYPELESS as a single-plane shader-accessible view.
+            switch (textureFormat.format) {
+                case wgpu::TextureFormat::Depth32Float:
+                case wgpu::TextureFormat::Depth24Plus:
+                    mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT;
+                    break;
+                case wgpu::TextureFormat::Depth16Unorm:
+                    mSrvDesc.Format = DXGI_FORMAT_R16_UNORM;
+                    break;
+                case wgpu::TextureFormat::Stencil8:
+                case wgpu::TextureFormat::Depth24UnormStencil8: {
+                    Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
+                    ASSERT(aspects != Aspect::None);
+                    if (!HasZeroOrOneBits(aspects)) {
+                        // A single aspect is not selected. The texture view must not be
+                        // sampled.
+                        mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
+                        break;
+                    }
+                    switch (aspects) {
+                        case Aspect::Depth:
+                            planeSlice = 0;
+                            mSrvDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
+                            break;
+                        case Aspect::Stencil:
+                            planeSlice = 1;
+                            mSrvDesc.Format = DXGI_FORMAT_X24_TYPELESS_G8_UINT;
+                            // Stencil is accessed using the .g component in the shader.
+                            // Map it to the zeroth component to match other APIs.
+                            mSrvDesc.Shader4ComponentMapping =
+                                D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+                                    D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+                            break;
+                        default:
+                            UNREACHABLE();
+                            break;
+                    }
+                    break;
+                }
+                case wgpu::TextureFormat::Depth24PlusStencil8:
+                case wgpu::TextureFormat::Depth32FloatStencil8: {
+                    Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
+                    ASSERT(aspects != Aspect::None);
+                    if (!HasZeroOrOneBits(aspects)) {
+                        // A single aspect is not selected. The texture view must not be
+                        // sampled.
+                        mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
+                        break;
+                    }
+                    switch (aspects) {
+                        case Aspect::Depth:
+                            planeSlice = 0;
+                            mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
+                            break;
+                        case Aspect::Stencil:
+                            planeSlice = 1;
+                            mSrvDesc.Format = DXGI_FORMAT_X32_TYPELESS_G8X24_UINT;
+                            // Stencil is accessed using the .g component in the shader.
+                            // Map it to the zeroth component to match other APIs.
+                            mSrvDesc.Shader4ComponentMapping =
+                                D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+                                    D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+                            break;
+                        default:
+                            UNREACHABLE();
+                            break;
+                    }
+                    break;
+                }
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+        }
+
+        // Per plane view formats must have the plane slice number be the index of the plane in the
+        // array of textures.
+        if (texture->GetFormat().IsMultiPlanar()) {
+            const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
+            planeSlice = GetAspectIndex(planeAspect);
+            mSrvDesc.Format =
+                D3D12TextureFormat(texture->GetFormat().GetAspectInfo(planeAspect).format);
+        }
+
+        // Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
+        // and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
+        // array textures.
+        // Multisampled textures may only be one array layer, so we use
+        // D3D12_SRV_DIMENSION_TEXTURE2DMS.
+        // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
+        // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
+        if (GetTexture()->IsMultisampledTexture()) {
+            switch (descriptor->dimension) {
+                case wgpu::TextureViewDimension::e2DArray:
+                    ASSERT(texture->GetArrayLayers() == 1);
+                    [[fallthrough]];
+                case wgpu::TextureViewDimension::e2D:
+                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
+                    break;
+
+                default:
+                    UNREACHABLE();
+            }
+        } else {
+            switch (descriptor->dimension) {
+                case wgpu::TextureViewDimension::e1D:
+                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE1D;
+                    mSrvDesc.Texture1D.MipLevels = descriptor->mipLevelCount;
+                    mSrvDesc.Texture1D.MostDetailedMip = descriptor->baseMipLevel;
+                    mSrvDesc.Texture1D.ResourceMinLODClamp = 0;
+                    break;
+
+                case wgpu::TextureViewDimension::e2D:
+                case wgpu::TextureViewDimension::e2DArray:
+                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
+                    mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
+                    mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
+                    mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
+                    mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
+                    mSrvDesc.Texture2DArray.PlaneSlice = planeSlice;
+                    mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
+                    break;
+                case wgpu::TextureViewDimension::Cube:
+                case wgpu::TextureViewDimension::CubeArray:
+                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+                    ASSERT(descriptor->arrayLayerCount % 6 == 0);
+                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
+                    mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
+                    mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
+                    mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
+                    mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
+                    mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
+                    break;
+                case wgpu::TextureViewDimension::e3D:
+                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e3D);
+                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE3D;
+                    mSrvDesc.Texture3D.MostDetailedMip = descriptor->baseMipLevel;
+                    mSrvDesc.Texture3D.MipLevels = descriptor->mipLevelCount;
+                    mSrvDesc.Texture3D.ResourceMinLODClamp = 0;
+                    break;
+
+                case wgpu::TextureViewDimension::Undefined:
+                    UNREACHABLE();
+            }
+        }
+    }
+
+    DXGI_FORMAT TextureView::GetD3D12Format() const {
+        return D3D12TextureFormat(GetFormat().format);
+    }
+
+    const D3D12_SHADER_RESOURCE_VIEW_DESC& TextureView::GetSRVDescriptor() const {
+        ASSERT(mSrvDesc.Format != DXGI_FORMAT_UNKNOWN);
+        return mSrvDesc;
+    }
+
+    D3D12_RENDER_TARGET_VIEW_DESC TextureView::GetRTVDescriptor() const {
+        return ToBackend(GetTexture())
+            ->GetRTVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount());
+    }
+
+    D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor(bool depthReadOnly,
+                                                                bool stencilReadOnly) const {
+        ASSERT(GetLevelCount() == 1);
+        return ToBackend(GetTexture())
+            ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount(),
+                               GetAspects(), depthReadOnly, stencilReadOnly);
+    }
+
+    D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
+        D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc;
+        uavDesc.Format = GetD3D12Format();
+
+        ASSERT(!GetTexture()->IsMultisampledTexture());
+        switch (GetDimension()) {
+            case wgpu::TextureViewDimension::e1D:
+                uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE1D;
+                uavDesc.Texture1D.MipSlice = GetBaseMipLevel();
+                break;
+            case wgpu::TextureViewDimension::e2D:
+            case wgpu::TextureViewDimension::e2DArray:
+                uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
+                uavDesc.Texture2DArray.FirstArraySlice = GetBaseArrayLayer();
+                uavDesc.Texture2DArray.ArraySize = GetLayerCount();
+                uavDesc.Texture2DArray.MipSlice = GetBaseMipLevel();
+                uavDesc.Texture2DArray.PlaneSlice = 0;
+                break;
+            case wgpu::TextureViewDimension::e3D:
+                uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE3D;
+                uavDesc.Texture3D.FirstWSlice = 0;
+                uavDesc.Texture3D.WSize = GetTexture()->GetDepth() >> GetBaseMipLevel();
+                uavDesc.Texture3D.MipSlice = GetBaseMipLevel();
+                break;
+            // Cube and Cubemap can't be used as storage texture. So there is no need to create UAV
+            // descriptor for them.
+            case wgpu::TextureViewDimension::Cube:
+            case wgpu::TextureViewDimension::CubeArray:
+            case wgpu::TextureViewDimension::Undefined:
+                UNREACHABLE();
+        }
+        return uavDesc;
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/TextureD3D12.h b/src/dawn/native/d3d12/TextureD3D12.h
new file mode 100644
index 0000000..76572ba
--- /dev/null
+++ b/src/dawn/native/d3d12/TextureD3D12.h
@@ -0,0 +1,162 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_TEXTURED3D12_H_
+#define DAWNNATIVE_D3D12_TEXTURED3D12_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/d3d12/IntegerTypes.h"
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    class CommandRecordingContext;
+    class Device;
+    class D3D11on12ResourceCacheEntry;
+
+    DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
+    MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+                                                const TextureDescriptor* descriptor);
+    MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
+    MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
+
+    class Texture final : public TextureBase {
+      public:
+        static ResultOrError<Ref<Texture>> Create(Device* device,
+                                                  const TextureDescriptor* descriptor);
+        static ResultOrError<Ref<Texture>> CreateExternalImage(
+            Device* device,
+            const TextureDescriptor* descriptor,
+            ComPtr<ID3D12Resource> d3d12Texture,
+            Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+            bool isSwapChainTexture,
+            bool isInitialized);
+        static ResultOrError<Ref<Texture>> Create(Device* device,
+                                                  const TextureDescriptor* descriptor,
+                                                  ComPtr<ID3D12Resource> d3d12Texture);
+
+        DXGI_FORMAT GetD3D12Format() const;
+        ID3D12Resource* GetD3D12Resource() const;
+        DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
+
+        D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(uint32_t mipLevel,
+                                                       uint32_t baseSlice,
+                                                       uint32_t sliceCount) const;
+        D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
+                                                       uint32_t baseArrayLayer,
+                                                       uint32_t layerCount,
+                                                       Aspect aspects,
+                                                       bool depthReadOnly,
+                                                       bool stencilReadOnly) const;
+
+        void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                                 const SubresourceRange& range);
+
+        MaybeError AcquireKeyedMutex();
+        void ReleaseKeyedMutex();
+
+        void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
+                                                    std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                                    const TextureSubresourceUsage& textureUsages);
+        void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                  std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                                  wgpu::TextureUsage usage,
+                                                  const SubresourceRange& range);
+        void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                        wgpu::TextureUsage usage,
+                                        const SubresourceRange& range);
+        void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                        D3D12_RESOURCE_STATES newState,
+                                        const SubresourceRange& range);
+        void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                           wgpu::TextureUsage usage);
+        void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                           D3D12_RESOURCE_STATES newState);
+
+      private:
+        Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+        ~Texture() override;
+        using TextureBase::TextureBase;
+
+        MaybeError InitializeAsInternalTexture();
+        MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+                                               ComPtr<ID3D12Resource> d3d12Texture,
+                                               Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+                                               bool isSwapChainTexture);
+        MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
+
+        void SetLabelHelper(const char* prefix);
+
+        // Dawn API
+        void SetLabelImpl() override;
+        void DestroyImpl() override;
+
+        MaybeError ClearTexture(CommandRecordingContext* commandContext,
+                                const SubresourceRange& range,
+                                TextureBase::ClearValue clearValue);
+
+        // Barriers implementation details.
+        struct StateAndDecay {
+            D3D12_RESOURCE_STATES lastState;
+            ExecutionSerial lastDecaySerial;
+            bool isValidToDecay;
+
+            bool operator==(const StateAndDecay& other) const;
+        };
+        void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                  std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                                  D3D12_RESOURCE_STATES newState,
+                                                  const SubresourceRange& range);
+        void TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+                                        const SubresourceRange& range,
+                                        StateAndDecay* state,
+                                        D3D12_RESOURCE_STATES subresourceNewState,
+                                        ExecutionSerial pendingCommandSerial) const;
+        void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
+
+        SubresourceStorage<StateAndDecay> mSubresourceStateAndDecay;
+
+        ResourceHeapAllocation mResourceAllocation;
+        bool mSwapChainTexture = false;
+        D3D12_RESOURCE_FLAGS mD3D12ResourceFlags;
+
+        Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
+    };
+
+    class TextureView final : public TextureViewBase {
+      public:
+        static Ref<TextureView> Create(TextureBase* texture,
+                                       const TextureViewDescriptor* descriptor);
+
+        DXGI_FORMAT GetD3D12Format() const;
+
+        const D3D12_SHADER_RESOURCE_VIEW_DESC& GetSRVDescriptor() const;
+        D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor() const;
+        D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(bool depthReadOnly,
+                                                       bool stencilReadOnly) const;
+        D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
+
+      private:
+        TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+        D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
+    };
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_TEXTURED3D12_H_
diff --git a/src/dawn/native/d3d12/UtilsD3D12.cpp b/src/dawn/native/d3d12/UtilsD3D12.cpp
new file mode 100644
index 0000000..8d4749f
--- /dev/null
+++ b/src/dawn/native/d3d12/UtilsD3D12.cpp
@@ -0,0 +1,308 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include <stringapiset.h>
+
+namespace dawn::native::d3d12 {
+
+    ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
+        size_t len = strlen(str);
+        if (len == 0) {
+            return std::wstring();
+        }
+        int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
+        if (numChars == 0) {
+            return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+        }
+        std::wstring result;
+        result.resize(numChars);
+        int numConvertedChars =
+            MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
+        if (numConvertedChars != numChars) {
+            return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+        }
+        return std::move(result);
+    }
+
+    D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
+        switch (func) {
+            case wgpu::CompareFunction::Never:
+                return D3D12_COMPARISON_FUNC_NEVER;
+            case wgpu::CompareFunction::Less:
+                return D3D12_COMPARISON_FUNC_LESS;
+            case wgpu::CompareFunction::LessEqual:
+                return D3D12_COMPARISON_FUNC_LESS_EQUAL;
+            case wgpu::CompareFunction::Greater:
+                return D3D12_COMPARISON_FUNC_GREATER;
+            case wgpu::CompareFunction::GreaterEqual:
+                return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
+            case wgpu::CompareFunction::Equal:
+                return D3D12_COMPARISON_FUNC_EQUAL;
+            case wgpu::CompareFunction::NotEqual:
+                return D3D12_COMPARISON_FUNC_NOT_EQUAL;
+            case wgpu::CompareFunction::Always:
+                return D3D12_COMPARISON_FUNC_ALWAYS;
+
+            case wgpu::CompareFunction::Undefined:
+                UNREACHABLE();
+        }
+    }
+
+    D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+                                                                     uint32_t level,
+                                                                     uint32_t layer,
+                                                                     Aspect aspect) {
+        D3D12_TEXTURE_COPY_LOCATION copyLocation;
+        copyLocation.pResource = texture->GetD3D12Resource();
+        copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
+        copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
+
+        return copyLocation;
+    }
+
+    D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+        const Texture* texture,
+        ID3D12Resource* bufferResource,
+        const Extent3D& bufferSize,
+        const uint64_t offset,
+        const uint32_t rowPitch,
+        Aspect aspect) {
+        D3D12_TEXTURE_COPY_LOCATION bufferLocation;
+        bufferLocation.pResource = bufferResource;
+        bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
+        bufferLocation.PlacedFootprint.Offset = offset;
+        bufferLocation.PlacedFootprint.Footprint.Format =
+            texture->GetD3D12CopyableSubresourceFormat(aspect);
+        bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
+        bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
+        bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
+        bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
+        return bufferLocation;
+    }
+
+    D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize) {
+        D3D12_BOX sourceRegion;
+        sourceRegion.left = offset.x;
+        sourceRegion.top = offset.y;
+        sourceRegion.front = offset.z;
+        sourceRegion.right = offset.x + copySize.width;
+        sourceRegion.bottom = offset.y + copySize.height;
+        sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
+        return sourceRegion;
+    }
+
+    bool IsTypeless(DXGI_FORMAT format) {
+        // List generated from <dxgiformat.h>
+        switch (format) {
+            case DXGI_FORMAT_R32G32B32A32_TYPELESS:
+            case DXGI_FORMAT_R32G32B32_TYPELESS:
+            case DXGI_FORMAT_R16G16B16A16_TYPELESS:
+            case DXGI_FORMAT_R32G32_TYPELESS:
+            case DXGI_FORMAT_R32G8X24_TYPELESS:
+            case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
+            case DXGI_FORMAT_R10G10B10A2_TYPELESS:
+            case DXGI_FORMAT_R8G8B8A8_TYPELESS:
+            case DXGI_FORMAT_R16G16_TYPELESS:
+            case DXGI_FORMAT_R32_TYPELESS:
+            case DXGI_FORMAT_R24G8_TYPELESS:
+            case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
+            case DXGI_FORMAT_R8G8_TYPELESS:
+            case DXGI_FORMAT_R16_TYPELESS:
+            case DXGI_FORMAT_R8_TYPELESS:
+            case DXGI_FORMAT_BC1_TYPELESS:
+            case DXGI_FORMAT_BC2_TYPELESS:
+            case DXGI_FORMAT_BC3_TYPELESS:
+            case DXGI_FORMAT_BC4_TYPELESS:
+            case DXGI_FORMAT_BC5_TYPELESS:
+            case DXGI_FORMAT_B8G8R8A8_TYPELESS:
+            case DXGI_FORMAT_B8G8R8X8_TYPELESS:
+            case DXGI_FORMAT_BC6H_TYPELESS:
+            case DXGI_FORMAT_BC7_TYPELESS:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    void RecordBufferTextureCopyFromSplits(BufferTextureCopyDirection direction,
+                                           ID3D12GraphicsCommandList* commandList,
+                                           const TextureCopySubresource& baseCopySplit,
+                                           ID3D12Resource* bufferResource,
+                                           uint64_t baseOffset,
+                                           uint64_t bufferBytesPerRow,
+                                           TextureBase* textureBase,
+                                           uint32_t textureMiplevel,
+                                           uint32_t textureLayer,
+                                           Aspect aspect) {
+        Texture* texture = ToBackend(textureBase);
+        const D3D12_TEXTURE_COPY_LOCATION textureLocation =
+            ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
+
+        for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
+            const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
+
+            // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
+            // members in TextureCopySubresource::CopyInfo.
+            const uint64_t offsetBytes = info.alignedOffset + baseOffset;
+            const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+                ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
+                                                          offsetBytes, bufferBytesPerRow, aspect);
+            if (direction == BufferTextureCopyDirection::B2T) {
+                const D3D12_BOX sourceRegion =
+                    ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+                commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
+                                               info.textureOffset.y, info.textureOffset.z,
+                                               &bufferLocation, &sourceRegion);
+            } else {
+                ASSERT(direction == BufferTextureCopyDirection::T2B);
+                const D3D12_BOX sourceRegion =
+                    ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+
+                commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
+                                               info.bufferOffset.y, info.bufferOffset.z,
+                                               &textureLocation, &sourceRegion);
+            }
+        }
+    }
+
+    void Record2DBufferTextureCopyWithSplit(BufferTextureCopyDirection direction,
+                                            ID3D12GraphicsCommandList* commandList,
+                                            ID3D12Resource* bufferResource,
+                                            const uint64_t offset,
+                                            const uint32_t bytesPerRow,
+                                            const uint32_t rowsPerImage,
+                                            const TextureCopy& textureCopy,
+                                            const TexelBlockInfo& blockInfo,
+                                            const Extent3D& copySize) {
+        // See comments in Compute2DTextureCopySplits() for more details.
+        const TextureCopySplits copySplits = Compute2DTextureCopySplits(
+            textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
+
+        const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+        // copySplits.copySubresources[1] is always calculated for the second copy layer with
+        // extra "bytesPerLayer" copy offset compared with the first copy layer. So
+        // here we use an array bufferOffsetsForNextLayer to record the extra offsets
+        // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
+        // the next copy layer that uses copySplits.copySubresources[0], and
+        // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
+        // that uses copySplits.copySubresources[1].
+        std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
+            bufferOffsetsForNextLayer = {{0u, 0u}};
+
+        for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
+            const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
+
+            const TextureCopySubresource& copySplitPerLayerBase =
+                copySplits.copySubresources[splitIndex];
+            const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
+            const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
+
+            RecordBufferTextureCopyFromSplits(direction, commandList, copySplitPerLayerBase,
+                                              bufferResource, bufferOffsetForNextLayer, bytesPerRow,
+                                              textureCopy.texture.Get(), textureCopy.mipLevel,
+                                              copyTextureLayer, textureCopy.aspect);
+
+            bufferOffsetsForNextLayer[splitIndex] +=
+                bytesPerLayer * copySplits.copySubresources.size();
+        }
+    }
+
+    void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+                                                 ID3D12GraphicsCommandList* commandList,
+                                                 ID3D12Resource* bufferResource,
+                                                 const uint64_t offset,
+                                                 const uint32_t bytesPerRow,
+                                                 const uint32_t rowsPerImage,
+                                                 const TextureCopy& textureCopy,
+                                                 const Extent3D& copySize) {
+        ASSERT(HasOneBit(textureCopy.aspect));
+
+        TextureBase* texture = textureCopy.texture.Get();
+        const TexelBlockInfo& blockInfo =
+            texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+
+        switch (texture->GetDimension()) {
+            case wgpu::TextureDimension::e1D: {
+                // 1D textures copy splits are a subset of the single-layer 2D texture copy splits,
+                // at least while 1D textures can only have a single array layer.
+                ASSERT(texture->GetArrayLayers() == 1);
+
+                TextureCopySubresource copyRegions = Compute2DTextureCopySubresource(
+                    textureCopy.origin, copySize, blockInfo, offset, bytesPerRow);
+                RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
+                                                  bufferResource, 0, bytesPerRow, texture,
+                                                  textureCopy.mipLevel, 0, textureCopy.aspect);
+                break;
+            }
+
+            // Record the CopyTextureRegion commands for 2D textures, with special handling of array
+            // layers since each require their own set of copies.
+            case wgpu::TextureDimension::e2D:
+                Record2DBufferTextureCopyWithSplit(direction, commandList, bufferResource, offset,
+                                                   bytesPerRow, rowsPerImage, textureCopy,
+                                                   blockInfo, copySize);
+                break;
+
+            case wgpu::TextureDimension::e3D: {
+                // See comments in Compute3DTextureCopySplits() for more details.
+                TextureCopySubresource copyRegions = Compute3DTextureCopySplits(
+                    textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
+
+                RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
+                                                  bufferResource, 0, bytesPerRow, texture,
+                                                  textureCopy.mipLevel, 0, textureCopy.aspect);
+                break;
+            }
+        }
+    }
+
+    void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+                                 ID3D12GraphicsCommandList* commandList,
+                                 const BufferCopy& bufferCopy,
+                                 const TextureCopy& textureCopy,
+                                 const Extent3D& copySize) {
+        RecordBufferTextureCopyWithBufferHandle(direction, commandList,
+                                                ToBackend(bufferCopy.buffer)->GetD3D12Resource(),
+                                                bufferCopy.offset, bufferCopy.bytesPerRow,
+                                                bufferCopy.rowsPerImage, textureCopy, copySize);
+    }
+
+    void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
+        if (!object) {
+            return;
+        }
+
+        if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+            object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
+            return;
+        }
+
+        std::string objectName = prefix;
+        objectName += "_";
+        objectName += label;
+        object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
+    }
+
+}  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/UtilsD3D12.h b/src/dawn/native/d3d12/UtilsD3D12.h
new file mode 100644
index 0000000..00c850f
--- /dev/null
+++ b/src/dawn/native/d3d12/UtilsD3D12.h
@@ -0,0 +1,74 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_UTILSD3D12_H_
+#define DAWNNATIVE_D3D12_UTILSD3D12_H_
+
+#include "dawn/native/Commands.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/TextureCopySplitter.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::d3d12 {
+
+    ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
+
+    D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
+
+    D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+                                                                     uint32_t level,
+                                                                     uint32_t layer,
+                                                                     Aspect aspect);
+
+    D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+        const Texture* texture,
+        ID3D12Resource* bufferResource,
+        const Extent3D& bufferSize,
+        const uint64_t offset,
+        const uint32_t rowPitch,
+        Aspect aspect);
+    D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
+
+    bool IsTypeless(DXGI_FORMAT format);
+
+    enum class BufferTextureCopyDirection {
+        B2T,
+        T2B,
+    };
+
+    void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+                                                 ID3D12GraphicsCommandList* commandList,
+                                                 ID3D12Resource* bufferResource,
+                                                 const uint64_t offset,
+                                                 const uint32_t bytesPerRow,
+                                                 const uint32_t rowsPerImage,
+                                                 const TextureCopy& textureCopy,
+                                                 const Extent3D& copySize);
+
+    void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+                                 ID3D12GraphicsCommandList* commandList,
+                                 const BufferCopy& bufferCopy,
+                                 const TextureCopy& textureCopy,
+                                 const Extent3D& copySize);
+
+    void SetDebugName(Device* device,
+                      ID3D12Object* object,
+                      const char* prefix,
+                      std::string label = "");
+
+}  // namespace dawn::native::d3d12
+
+#endif  // DAWNNATIVE_D3D12_UTILSD3D12_H_
diff --git a/src/dawn/native/d3d12/d3d12_platform.h b/src/dawn/native/d3d12/d3d12_platform.h
new file mode 100644
index 0000000..f020fec
--- /dev/null
+++ b/src/dawn/native/d3d12/d3d12_platform.h
@@ -0,0 +1,37 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_D3D12PLATFORM_H_
+#define DAWNNATIVE_D3D12_D3D12PLATFORM_H_
+
+// Pre-emptively include windows.h but remove its macros so that they aren't set when declaring the
+// COM interfaces. Otherwise ID3D12InfoQueue::GetMessage would be either GetMessageA or GetMessageW
+// which causes compilation errors.
+#include "dawn/common/windows_with_undefs.h"
+
+#include <d3d11_2.h>
+#include <d3d11on12.h>
+#include <d3d12.h>
+#include <dxcapi.h>
+#include <dxgi1_4.h>
+#include <wrl.h>
+
+// DXProgrammableCapture.h takes a dependency on other platform header
+// files, so it must be defined after them.
+#include <DXProgrammableCapture.h>
+#include <dxgidebug.h>
+
+using Microsoft::WRL::ComPtr;
+
+#endif  // DAWNNATIVE_D3D12_D3D12PLATFORM_H_
diff --git a/src/dawn/native/dawn_platform.h b/src/dawn/native/dawn_platform.h
new file mode 100644
index 0000000..c8863af
--- /dev/null
+++ b/src/dawn/native/dawn_platform.h
@@ -0,0 +1,62 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_DAWNPLATFORM_H_
+#define DAWNNATIVE_DAWNPLATFORM_H_
+
+// Use webgpu_cpp to have the enum and bitfield definitions
+#include <dawn/webgpu_cpp.h>
+
+#include <dawn/native/dawn_platform_autogen.h>
+
+namespace dawn::native {
+
+    // kEnumCount is a constant specifying the number of enums in a WebGPU enum type,
+    // if the enums are contiguous, making it suitable for iteration.
+    // It is defined in dawn_platform_autogen.h
+    template <typename T>
+    constexpr uint32_t kEnumCount = EnumCount<T>::value;
+
+    // Extra buffer usages
+    // Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
+    // usage as storage buffer in the internal pipeline.
+    static constexpr wgpu::BufferUsage kInternalStorageBuffer =
+        static_cast<wgpu::BufferUsage>(0x40000000);
+
+    // Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
+    static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
+        static_cast<wgpu::BufferUsage>(0x80000000);
+
+    // Extra texture usages
+    // Add an extra texture usage (readonly render attachment usage) for render pass resource
+    // tracking
+    static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
+        static_cast<wgpu::TextureUsage>(0x40000000);
+
+    // Internal usage to help tracking when a subresource is used as render attachment usage
+    // more than once in a render pass.
+    static constexpr wgpu::TextureUsage kAgainAsRenderAttachment =
+        static_cast<wgpu::TextureUsage>(0x80000001);
+
+    // Add an extra texture usage for textures that will be presented, for use in backends
+    // that needs to transition to present usage.
+    // This currently aliases wgpu::TextureUsage::Present, we would assign it
+    // some bit when wgpu::TextureUsage::Present is removed.
+    static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
+
+    static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
+        static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_DAWNPLATFORM_H_
diff --git a/src/dawn/native/metal/BackendMTL.h b/src/dawn/native/metal/BackendMTL.h
new file mode 100644
index 0000000..0dd7204
--- /dev/null
+++ b/src/dawn/native/metal/BackendMTL.h
@@ -0,0 +1,33 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BACKENDMTL_H_
+#define DAWNNATIVE_METAL_BACKENDMTL_H_
+
+#include "dawn/native/BackendConnection.h"
+
+namespace dawn::native::metal {
+
+    class Backend : public BackendConnection {
+      public:
+        Backend(InstanceBase* instance);
+
+        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+            const AdapterDiscoveryOptionsBase* optionsBase) override;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_BACKENDMTL_H_
diff --git a/src/dawn/native/metal/BackendMTL.mm b/src/dawn/native/metal/BackendMTL.mm
new file mode 100644
index 0000000..6f4751f
--- /dev/null
+++ b/src/dawn/native/metal/BackendMTL.mm
@@ -0,0 +1,674 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BackendMTL.h"
+
+#include "dawn/common/CoreFoundationRef.h"
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/NSRef.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/MetalBackend.h"
+#include "dawn/native/metal/BufferMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+#if defined(DAWN_PLATFORM_MACOS)
+#    import <IOKit/IOKitLib.h>
+#    include "dawn/common/IOKitRef.h"
+#endif
+
+#include <vector>
+
+namespace dawn::native::metal {
+
+    namespace {
+
+        struct PCIIDs {
+            uint32_t vendorId;
+            uint32_t deviceId;
+        };
+
+        struct Vendor {
+            const char* trademark;
+            uint32_t vendorId;
+        };
+
+#if defined(DAWN_PLATFORM_MACOS)
+        const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
+                                   {"Radeon", gpu_info::kVendorID_AMD},
+                                   {"Intel", gpu_info::kVendorID_Intel},
+                                   {"Geforce", gpu_info::kVendorID_Nvidia},
+                                   {"Quadro", gpu_info::kVendorID_Nvidia}};
+
+        // Find vendor ID from MTLDevice name.
+        MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
+            uint32_t vendorId = 0;
+            const char* deviceName = [device.name UTF8String];
+            for (const auto& it : kVendors) {
+                if (strstr(deviceName, it.trademark) != nullptr) {
+                    vendorId = it.vendorId;
+                    break;
+                }
+            }
+
+            if (vendorId == 0) {
+                return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
+            }
+
+            // Set vendor id with 0
+            *ids = PCIIDs{vendorId, 0};
+            return {};
+        }
+
+        // Extracts an integer property from a registry entry.
+        uint32_t GetEntryProperty(io_registry_entry_t entry, CFStringRef name) {
+            uint32_t value = 0;
+
+            // Recursively search registry entry and its parents for property name
+            // The data should release with CFRelease
+            CFRef<CFDataRef> data =
+                AcquireCFRef(static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
+                    entry, kIOServicePlane, name, kCFAllocatorDefault,
+                    kIORegistryIterateRecursively | kIORegistryIterateParents)));
+
+            if (data == nullptr) {
+                return value;
+            }
+
+            // CFDataGetBytePtr() is guaranteed to return a read-only pointer
+            value = *reinterpret_cast<const uint32_t*>(CFDataGetBytePtr(data.Get()));
+            return value;
+        }
+
+        // Queries the IO Registry to find the PCI device and vendor IDs of the MTLDevice.
+        // The registry entry correponding to [device registryID] doesn't contain the exact PCI ids
+        // because it corresponds to a driver. However its parent entry corresponds to the device
+        // itself and has uint32_t "device-id" and "registry-id" keys. For example on a dual-GPU
+        // MacBook Pro 2017 the IORegistry explorer shows the following tree (simplified here):
+        //
+        //  - PCI0@0
+        //  | - AppleACPIPCI
+        //  | | - IGPU@2 (type IOPCIDevice)
+        //  | | | - IntelAccelerator (type IOGraphicsAccelerator2)
+        //  | | - PEG0@1
+        //  | | | - IOPP
+        //  | | | | - GFX0@0 (type IOPCIDevice)
+        //  | | | | | - AMDRadeonX4000_AMDBaffinGraphicsAccelerator (type IOGraphicsAccelerator2)
+        //
+        // [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
+        // their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
+        MaybeError API_AVAILABLE(macos(10.13))
+            GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+            // Get a matching dictionary for the IOGraphicsAccelerator2
+            CFRef<CFMutableDictionaryRef> matchingDict =
+                AcquireCFRef(IORegistryEntryIDMatching([device registryID]));
+            if (matchingDict == nullptr) {
+                return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
+            }
+
+            // IOServiceGetMatchingService will consume the reference on the matching dictionary,
+            // so we don't need to release the dictionary.
+            IORef<io_registry_entry_t> acceleratorEntry = AcquireIORef(
+                IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict.Detach()));
+            if (acceleratorEntry == IO_OBJECT_NULL) {
+                return DAWN_INTERNAL_ERROR(
+                    "Failed to get the IO registry entry for the accelerator");
+            }
+
+            // Get the parent entry that will be the IOPCIDevice
+            IORef<io_registry_entry_t> deviceEntry;
+            if (IORegistryEntryGetParentEntry(acceleratorEntry.Get(), kIOServicePlane,
+                                              deviceEntry.InitializeInto()) != kIOReturnSuccess) {
+                return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
+            }
+
+            ASSERT(deviceEntry != IO_OBJECT_NULL);
+
+            uint32_t vendorId = GetEntryProperty(deviceEntry.Get(), CFSTR("vendor-id"));
+            uint32_t deviceId = GetEntryProperty(deviceEntry.Get(), CFSTR("device-id"));
+
+            *ids = PCIIDs{vendorId, deviceId};
+
+            return {};
+        }
+
+        MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+            // [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
+            // id by vendor name on old macOS
+            if (@available(macos 10.13, *)) {
+                return GetDeviceIORegistryPCIInfo(device, ids);
+            } else {
+                return GetVendorIdFromVendors(device, ids);
+            }
+        }
+
+        bool IsMetalSupported() {
+            // Metal was first introduced in macOS 10.11
+            // WebGPU is targeted at macOS 10.12+
+            // TODO(dawn:1181): Dawn native should allow non-conformant WebGPU on macOS 10.11
+            return IsMacOSVersionAtLeast(10, 12);
+        }
+#elif defined(DAWN_PLATFORM_IOS)
+        MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+            DAWN_UNUSED(device);
+            *ids = PCIIDs{0, 0};
+            return {};
+        }
+
+        bool IsMetalSupported() {
+            return true;
+        }
+#else
+#    error "Unsupported Apple platform."
+#endif
+
+        DAWN_NOINLINE bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
+            API_AVAILABLE(macos(11.0), ios(14.0)) {
+            bool isBlitBoundarySupported =
+                [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
+            bool isDispatchBoundarySupported =
+                [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
+            bool isDrawBoundarySupported =
+                [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
+
+            return isBlitBoundarySupported && isDispatchBoundarySupported &&
+                   isDrawBoundarySupported;
+        }
+
+        // This method has seen hard-to-debug crashes. See crbug.com/dawn/1102.
+        // For now, it is written defensively, with many potentially unnecessary guards until
+        // we narrow down the cause of the problem.
+        DAWN_NOINLINE bool IsGPUCounterSupported(id<MTLDevice> device,
+                                                 MTLCommonCounterSet counterSetName,
+                                                 std::vector<MTLCommonCounter> counterNames)
+            API_AVAILABLE(macos(10.15), ios(14.0)) {
+            NSPRef<id<MTLCounterSet>> counterSet = nil;
+            if (![device respondsToSelector:@selector(counterSets)]) {
+                dawn::ErrorLog() << "MTLDevice does not respond to selector: counterSets.";
+                return false;
+            }
+            NSArray<id<MTLCounterSet>>* counterSets = device.counterSets;
+            if (counterSets == nil) {
+                // On some systems, [device counterSets] may be null and not an empty array.
+                return false;
+            }
+            // MTLDevice’s counterSets property declares which counter sets it supports. Check
+            // whether it's available on the device before requesting a counter set.
+            // Note: Don't do for..in loop to avoid potentially crashy interaction with
+            // NSFastEnumeration.
+            for (NSUInteger i = 0; i < counterSets.count; ++i) {
+                id<MTLCounterSet> set = [counterSets objectAtIndex:i];
+                if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
+                    counterSet = set;
+                    break;
+                }
+            }
+
+            // The counter set is not supported.
+            if (counterSet == nil) {
+                return false;
+            }
+
+            if (![*counterSet respondsToSelector:@selector(counters)]) {
+                dawn::ErrorLog() << "MTLCounterSet does not respond to selector: counters.";
+                return false;
+            }
+            NSArray<id<MTLCounter>>* countersInSet = (*counterSet).counters;
+            if (countersInSet == nil) {
+                // On some systems, [MTLCounterSet counters] may be null and not an empty array.
+                return false;
+            }
+
+            // A GPU might support a counter set, but only support a subset of the counters in that
+            // set, check if the counter set supports all specific counters we need. Return false
+            // if there is a counter unsupported.
+            for (MTLCommonCounter counterName : counterNames) {
+                bool found = false;
+                // Note: Don't do for..in loop to avoid potentially crashy interaction with
+                // NSFastEnumeration.
+                for (NSUInteger i = 0; i < countersInSet.count; ++i) {
+                    id<MTLCounter> counter = [countersInSet objectAtIndex:i];
+                    if ([counter.name caseInsensitiveCompare:counterName] == NSOrderedSame) {
+                        found = true;
+                        break;
+                    }
+                }
+                if (!found) {
+                    return false;
+                }
+            }
+
+            if (@available(macOS 11.0, iOS 14.0, *)) {
+                // Check whether it can read GPU counters at the specified command boundary. Apple
+                // family GPUs do not support sampling between different Metal commands, because
+                // they defer fragment processing until after the GPU processes all the primitives
+                // in the render pass.
+                if (!IsCounterSamplingBoundarySupport(device)) {
+                    return false;
+                }
+            }
+
+            return true;
+        }
+
+    }  // anonymous namespace
+
+    // The Metal backend's Adapter.
+
+    class Adapter : public AdapterBase {
+      public:
+        Adapter(InstanceBase* instance, id<MTLDevice> device)
+            : AdapterBase(instance, wgpu::BackendType::Metal), mDevice(device) {
+            mName = std::string([[*mDevice name] UTF8String]);
+
+            PCIIDs ids;
+            if (!instance->ConsumedError(GetDevicePCIInfo(device, &ids))) {
+                mVendorId = ids.vendorId;
+                mDeviceId = ids.deviceId;
+            }
+
+#if defined(DAWN_PLATFORM_IOS)
+            mAdapterType = wgpu::AdapterType::IntegratedGPU;
+            const char* systemName = "iOS ";
+#elif defined(DAWN_PLATFORM_MACOS)
+            if ([device isLowPower]) {
+                mAdapterType = wgpu::AdapterType::IntegratedGPU;
+            } else {
+                mAdapterType = wgpu::AdapterType::DiscreteGPU;
+            }
+            const char* systemName = "macOS ";
+#else
+#    error "Unsupported Apple platform."
+#endif
+
+            NSString* osVersion = [[NSProcessInfo processInfo] operatingSystemVersionString];
+            mDriverDescription =
+                "Metal driver on " + std::string(systemName) + [osVersion UTF8String];
+        }
+
+        // AdapterBase Implementation
+        bool SupportsExternalImages() const override {
+            // Via dawn::native::metal::WrapIOSurface
+            return true;
+        }
+
+      private:
+        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+            const DeviceDescriptor* descriptor) override {
+            return Device::Create(this, mDevice, descriptor);
+        }
+
+        MaybeError InitializeImpl() override {
+            return {};
+        }
+
+        MaybeError InitializeSupportedFeaturesImpl() override {
+            // Check compressed texture format with deprecated MTLFeatureSet way.
+#if defined(DAWN_PLATFORM_MACOS)
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+                mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+            }
+#endif
+#if defined(DAWN_PLATFORM_IOS)
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+                mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+            }
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+                mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
+            }
+#endif
+
+            // Check compressed texture format with MTLGPUFamily
+            if (@available(macOS 10.15, iOS 13.0, *)) {
+                if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
+                    mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
+                    mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
+                    mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
+                }
+            }
+
+            if (@available(macOS 10.15, iOS 14.0, *)) {
+                if (IsGPUCounterSupported(
+                        *mDevice, MTLCommonCounterSetStatistic,
+                        {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
+                         MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
+                         MTLCommonCounterComputeKernelInvocations})) {
+                    mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+                }
+
+                if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
+                                          {MTLCommonCounterTimestamp})) {
+                    bool enableTimestampQuery = true;
+
+#if defined(DAWN_PLATFORM_MACOS)
+                    // Disable timestamp query on < macOS 11.0 on AMD GPU because WriteTimestamp
+                    // fails to call without any copy commands on MTLBlitCommandEncoder. This issue
+                    // has been fixed on macOS 11.0. See crbug.com/dawn/545.
+                    if (gpu_info::IsAMD(mVendorId) && !IsMacOSVersionAtLeast(11)) {
+                        enableTimestampQuery = false;
+                    }
+#endif
+
+                    if (enableTimestampQuery) {
+                        mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+                    }
+                }
+            }
+
+            if (@available(macOS 10.11, iOS 11.0, *)) {
+                mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+            }
+
+            if (@available(macOS 10.11, iOS 9.0, *)) {
+                mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+            }
+
+            // Uses newTextureWithDescriptor::iosurface::plane which is available
+            // on ios 11.0+ and macOS 11.0+
+            if (@available(macOS 10.11, iOS 11.0, *)) {
+                mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+            }
+
+#if defined(DAWN_PLATFORM_MACOS)
+            // MTLPixelFormatDepth24Unorm_Stencil8 is only available on macOS 10.11+
+            if ([*mDevice isDepth24Stencil8PixelFormatSupported]) {
+                mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+            }
+#endif
+
+            return {};
+        }
+
+        enum class MTLGPUFamily {
+            Apple1,
+            Apple2,
+            Apple3,
+            Apple4,
+            Apple5,
+            Apple6,
+            Apple7,
+            Mac1,
+            Mac2,
+        };
+
+        ResultOrError<MTLGPUFamily> GetMTLGPUFamily() const {
+            // https://developer.apple.com/documentation/metal/mtldevice/detecting_gpu_features_and_metal_software_versions?language=objc
+
+            if (@available(macOS 10.15, iOS 10.13, *)) {
+                if ([*mDevice supportsFamily:MTLGPUFamilyMac2]) {
+                    return MTLGPUFamily::Mac2;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
+                    return MTLGPUFamily::Mac1;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple7]) {
+                    return MTLGPUFamily::Apple7;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple6]) {
+                    return MTLGPUFamily::Apple6;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple5]) {
+                    return MTLGPUFamily::Apple5;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple4]) {
+                    return MTLGPUFamily::Apple4;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
+                    return MTLGPUFamily::Apple3;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
+                    return MTLGPUFamily::Apple2;
+                }
+                if ([*mDevice supportsFamily:MTLGPUFamilyApple1]) {
+                    return MTLGPUFamily::Apple1;
+                }
+            }
+
+#if TARGET_OS_OSX
+            if (@available(macOS 10.14, *)) {
+                if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily2_v1]) {
+                    return MTLGPUFamily::Mac2;
+                }
+            }
+            if (@available(macOS 10.11, *)) {
+                if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+                    return MTLGPUFamily::Mac1;
+                }
+            }
+#elif TARGET_OS_IOS
+            if (@available(iOS 10.11, *)) {
+                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily4_v1]) {
+                    return MTLGPUFamily::Apple4;
+                }
+            }
+            if (@available(iOS 9.0, *)) {
+                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
+                    return MTLGPUFamily::Apple3;
+                }
+            }
+            if (@available(iOS 8.0, *)) {
+                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+                    return MTLGPUFamily::Apple2;
+                }
+            }
+            if (@available(iOS 8.0, *)) {
+                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+                    return MTLGPUFamily::Apple1;
+                }
+            }
+#endif
+            return DAWN_INTERNAL_ERROR("Unsupported Metal device");
+        }
+
+        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
+            struct MTLDeviceLimits {
+                uint32_t maxVertexAttribsPerDescriptor;
+                uint32_t maxBufferArgumentEntriesPerFunc;
+                uint32_t maxTextureArgumentEntriesPerFunc;
+                uint32_t maxSamplerStateArgumentEntriesPerFunc;
+                uint32_t maxThreadsPerThreadgroup;
+                uint32_t maxTotalThreadgroupMemory;
+                uint32_t maxFragmentInputComponents;
+                uint32_t max1DTextureSize;
+                uint32_t max2DTextureSize;
+                uint32_t max3DTextureSize;
+                uint32_t maxTextureArrayLayers;
+                uint32_t minBufferOffsetAlignment;
+            };
+
+            struct LimitsForFamily {
+                uint32_t MTLDeviceLimits::*limit;
+                ityp::array<MTLGPUFamily, uint32_t, 9> values;
+            };
+
+            // clang-format off
+            // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
+            //                                                               Apple                                                      Mac
+            //                                                                   1,      2,      3,      4,      5,      6,      7,       1,      2
+            constexpr LimitsForFamily kMTLLimits[12] = {
+                {&MTLDeviceLimits::maxVertexAttribsPerDescriptor,         {    31u,    31u,    31u,    31u,    31u,    31u,    31u,     31u,    31u }},
+                {&MTLDeviceLimits::maxBufferArgumentEntriesPerFunc,       {    31u,    31u,    31u,    31u,    31u,    31u,    31u,     31u,    31u }},
+                {&MTLDeviceLimits::maxTextureArgumentEntriesPerFunc,      {    31u,    31u,    31u,    96u,    96u,   128u,   128u,    128u,   128u }},
+                {&MTLDeviceLimits::maxSamplerStateArgumentEntriesPerFunc, {    16u,    16u,    16u,    16u,    16u,    16u,    16u,     16u,    16u }},
+                {&MTLDeviceLimits::maxThreadsPerThreadgroup,              {   512u,   512u,   512u,  1024u,  1024u,  1024u,  1024u,   1024u,  1024u }},
+                {&MTLDeviceLimits::maxTotalThreadgroupMemory,             { 16352u, 16352u, 16384u, 32768u, 32768u, 32768u, 32768u,  32768u, 32768u }},
+                {&MTLDeviceLimits::maxFragmentInputComponents,            {    60u,    60u,    60u,   124u,   124u,   124u,   124u,    124u,   124u }},
+                {&MTLDeviceLimits::max1DTextureSize,                      {  8192u,  8192u, 16384u, 16384u, 16384u, 16384u, 16384u,  16384u, 16384u }},
+                {&MTLDeviceLimits::max2DTextureSize,                      {  8192u,  8192u, 16384u, 16384u, 16384u, 16384u, 16384u,  16384u, 16384u }},
+                {&MTLDeviceLimits::max3DTextureSize,                      {  2048u,  2048u,  2048u,  2048u,  2048u,  2048u,  2048u,   2048u,  2048u }},
+                {&MTLDeviceLimits::maxTextureArrayLayers,                 {  2048u,  2048u,  2048u,  2048u,  2048u,  2048u,  2048u,   2048u,  2048u }},
+                {&MTLDeviceLimits::minBufferOffsetAlignment,              {     4u,     4u,     4u,     4u,     4u,     4u,     4u,    256u,   256u }},
+            };
+            // clang-format on
+
+            MTLGPUFamily mtlGPUFamily;
+            DAWN_TRY_ASSIGN(mtlGPUFamily, GetMTLGPUFamily());
+
+            MTLDeviceLimits mtlLimits;
+            for (const auto& limitsForFamily : kMTLLimits) {
+                mtlLimits.*limitsForFamily.limit = limitsForFamily.values[mtlGPUFamily];
+            }
+
+            GetDefaultLimits(&limits->v1);
+
+            limits->v1.maxTextureDimension1D = mtlLimits.max1DTextureSize;
+            limits->v1.maxTextureDimension2D = mtlLimits.max2DTextureSize;
+            limits->v1.maxTextureDimension3D = mtlLimits.max3DTextureSize;
+            limits->v1.maxTextureArrayLayers = mtlLimits.maxTextureArrayLayers;
+
+            uint32_t maxBuffersPerStage = mtlLimits.maxBufferArgumentEntriesPerFunc;
+            maxBuffersPerStage -= 1;  // One slot is reserved to store buffer lengths.
+
+            uint32_t baseMaxBuffersPerStage = limits->v1.maxStorageBuffersPerShaderStage +
+                                              limits->v1.maxUniformBuffersPerShaderStage +
+                                              limits->v1.maxVertexBuffers;
+
+            ASSERT(maxBuffersPerStage >= baseMaxBuffersPerStage);
+            {
+                uint32_t additional = maxBuffersPerStage - baseMaxBuffersPerStage;
+                limits->v1.maxStorageBuffersPerShaderStage += additional / 3;
+                limits->v1.maxUniformBuffersPerShaderStage += additional / 3;
+                limits->v1.maxVertexBuffers += (additional - 2 * (additional / 3));
+            }
+
+            uint32_t baseMaxTexturesPerStage = limits->v1.maxSampledTexturesPerShaderStage +
+                                               limits->v1.maxStorageTexturesPerShaderStage;
+
+            ASSERT(mtlLimits.maxTextureArgumentEntriesPerFunc >= baseMaxTexturesPerStage);
+            {
+                uint32_t additional =
+                    mtlLimits.maxTextureArgumentEntriesPerFunc - baseMaxTexturesPerStage;
+                limits->v1.maxSampledTexturesPerShaderStage += additional / 2;
+                limits->v1.maxStorageTexturesPerShaderStage += (additional - additional / 2);
+            }
+
+            limits->v1.maxSamplersPerShaderStage = mtlLimits.maxSamplerStateArgumentEntriesPerFunc;
+
+            // Metal limits are per-function, so the layout limits are the same as the stage
+            // limits. Note: this should likely change if the implementation uses Metal argument
+            // buffers. Non-dynamic buffers will probably be bound argument buffers, but dynamic
+            // buffers may be set directly.
+            //   Mac GPU families with tier 1 argument buffers support 64
+            //   buffers, 128 textures, and 16 samplers. Mac GPU families
+            //   with tier 2 argument buffers support 500000 buffers and
+            //   textures, and 1024 unique samplers
+            limits->v1.maxDynamicUniformBuffersPerPipelineLayout =
+                limits->v1.maxUniformBuffersPerShaderStage;
+            limits->v1.maxDynamicStorageBuffersPerPipelineLayout =
+                limits->v1.maxStorageBuffersPerShaderStage;
+
+            // The WebGPU limit is the limit across all vertex buffers, combined.
+            limits->v1.maxVertexAttributes =
+                limits->v1.maxVertexBuffers * mtlLimits.maxVertexAttribsPerDescriptor;
+
+            limits->v1.maxInterStageShaderComponents = mtlLimits.maxFragmentInputComponents;
+
+            limits->v1.maxComputeWorkgroupStorageSize = mtlLimits.maxTotalThreadgroupMemory;
+            limits->v1.maxComputeInvocationsPerWorkgroup = mtlLimits.maxThreadsPerThreadgroup;
+            limits->v1.maxComputeWorkgroupSizeX = mtlLimits.maxThreadsPerThreadgroup;
+            limits->v1.maxComputeWorkgroupSizeY = mtlLimits.maxThreadsPerThreadgroup;
+            limits->v1.maxComputeWorkgroupSizeZ = mtlLimits.maxThreadsPerThreadgroup;
+
+            limits->v1.minUniformBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+            limits->v1.minStorageBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+
+            uint64_t maxBufferSize = Buffer::QueryMaxBufferLength(*mDevice);
+
+            // Metal has no documented limit on the size of a binding. Use the maximum
+            // buffer size.
+            limits->v1.maxUniformBufferBindingSize = maxBufferSize;
+            limits->v1.maxStorageBufferBindingSize = maxBufferSize;
+
+            // TODO(crbug.com/dawn/685):
+            // LIMITS NOT SET:
+            // - maxBindGroups
+            // - maxVertexBufferArrayStride
+
+            return {};
+        }
+
+        NSPRef<id<MTLDevice>> mDevice;
+    };
+
+    // Implementation of the Metal backend's BackendConnection
+
+    Backend::Backend(InstanceBase* instance)
+        : BackendConnection(instance, wgpu::BackendType::Metal) {
+        if (GetInstance()->IsBackendValidationEnabled()) {
+            setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
+        }
+    }
+
+    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+        AdapterDiscoveryOptions options;
+        auto result = DiscoverAdapters(&options);
+        if (result.IsError()) {
+            GetInstance()->ConsumedError(result.AcquireError());
+            return {};
+        }
+        return result.AcquireSuccess();
+    }
+
+    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* optionsBase) {
+        ASSERT(optionsBase->backendType == WGPUBackendType_Metal);
+
+        std::vector<Ref<AdapterBase>> adapters;
+        BOOL supportedVersion = NO;
+#if defined(DAWN_PLATFORM_MACOS)
+        if (@available(macOS 10.11, *)) {
+            supportedVersion = YES;
+
+            NSRef<NSArray<id<MTLDevice>>> devices = AcquireNSRef(MTLCopyAllDevices());
+
+            for (id<MTLDevice> device in devices.Get()) {
+                Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance(), device));
+                if (!GetInstance()->ConsumedError(adapter->Initialize())) {
+                    adapters.push_back(std::move(adapter));
+                }
+            }
+        }
+#endif
+
+#if defined(DAWN_PLATFORM_IOS)
+        if (@available(iOS 8.0, *)) {
+            supportedVersion = YES;
+            // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
+            Ref<Adapter> adapter =
+                AcquireRef(new Adapter(GetInstance(), MTLCreateSystemDefaultDevice()));
+            if (!GetInstance()->ConsumedError(adapter->Initialize())) {
+                adapters.push_back(std::move(adapter));
+            }
+        }
+#endif
+        if (!supportedVersion) {
+            UNREACHABLE();
+        }
+        return adapters;
+    }
+
+    BackendConnection* Connect(InstanceBase* instance) {
+        if (!IsMetalSupported()) {
+            return nullptr;
+        }
+        return new Backend(instance);
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/BindGroupLayoutMTL.h b/src/dawn/native/metal/BindGroupLayoutMTL.h
new file mode 100644
index 0000000..bf4c3e9
--- /dev/null
+++ b/src/dawn/native/metal/BindGroupLayoutMTL.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
+#define DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/native/BindGroupLayout.h"
+
+namespace dawn::native::metal {
+
+    class BindGroup;
+    class Device;
+
+    class BindGroupLayout final : public BindGroupLayoutBase {
+      public:
+        static Ref<BindGroupLayout> Create(DeviceBase* device,
+                                           const BindGroupLayoutDescriptor* descriptor,
+                                           PipelineCompatibilityToken pipelineCompatibilityToken);
+
+        Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+        void DeallocateBindGroup(BindGroup* bindGroup);
+
+      private:
+        BindGroupLayout(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken);
+        ~BindGroupLayout() override = default;
+
+        SlabAllocator<BindGroup> mBindGroupAllocator;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
diff --git a/src/dawn/native/metal/BindGroupLayoutMTL.mm b/src/dawn/native/metal/BindGroupLayoutMTL.mm
new file mode 100644
index 0000000..e413bdd
--- /dev/null
+++ b/src/dawn/native/metal/BindGroupLayoutMTL.mm
@@ -0,0 +1,45 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BindGroupLayoutMTL.h"
+
+#include "dawn/native/metal/BindGroupMTL.h"
+
+namespace dawn::native::metal {
+
+    // static
+    Ref<BindGroupLayout> BindGroupLayout::Create(
+        DeviceBase* device,
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+    }
+
+    BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                     const BindGroupLayoutDescriptor* descriptor,
+                                     PipelineCompatibilityToken pipelineCompatibilityToken)
+        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+    }
+
+    Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+                                                      const BindGroupDescriptor* descriptor) {
+        return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+    }
+
+    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+        mBindGroupAllocator.Deallocate(bindGroup);
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/BindGroupMTL.h b/src/dawn/native/metal/BindGroupMTL.h
new file mode 100644
index 0000000..238635c
--- /dev/null
+++ b/src/dawn/native/metal/BindGroupMTL.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BINDGROUPMTL_H_
+#define DAWNNATIVE_METAL_BINDGROUPMTL_H_
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/native/BindGroup.h"
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    class BindGroup final : public BindGroupBase, public PlacementAllocated {
+      public:
+        static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+
+        BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+
+      private:
+        ~BindGroup() override;
+
+        void DestroyImpl() override;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_BINDGROUPMTL_H_
diff --git a/src/dawn/native/metal/BindGroupMTL.mm b/src/dawn/native/metal/BindGroupMTL.mm
new file mode 100644
index 0000000..a8e02a8
--- /dev/null
+++ b/src/dawn/native/metal/BindGroupMTL.mm
@@ -0,0 +1,37 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BindGroupMTL.h"
+
+#include "dawn/native/metal/BindGroupLayoutMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+namespace dawn::native::metal {
+
+    BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+        : BindGroupBase(this, device, descriptor) {
+    }
+
+    BindGroup::~BindGroup() = default;
+
+    void BindGroup::DestroyImpl() {
+        BindGroupBase::DestroyImpl();
+        ToBackend(GetLayout())->DeallocateBindGroup(this);
+    }
+
+    // static
+    Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/BufferMTL.h b/src/dawn/native/metal/BufferMTL.h
new file mode 100644
index 0000000..8eb9a36
--- /dev/null
+++ b/src/dawn/native/metal/BufferMTL.h
@@ -0,0 +1,67 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BUFFERMTL_H_
+#define DAWNNATIVE_METAL_BUFFERMTL_H_
+
+#include "dawn/common/NSRef.h"
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Buffer.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class CommandRecordingContext;
+    class Device;
+
+    class Buffer final : public BufferBase {
+      public:
+        static ResultOrError<Ref<Buffer>> Create(Device* device,
+                                                 const BufferDescriptor* descriptor);
+        id<MTLBuffer> GetMTLBuffer() const;
+
+        bool EnsureDataInitialized(CommandRecordingContext* commandContext);
+        bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                uint64_t offset,
+                                                uint64_t size);
+        bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                const CopyTextureToBufferCmd* copy);
+
+        static uint64_t QueryMaxBufferLength(id<MTLDevice> mtlDevice);
+
+      private:
+        using BufferBase::BufferBase;
+        MaybeError Initialize(bool mappedAtCreation);
+
+        ~Buffer() override;
+        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+        void UnmapImpl() override;
+        void DestroyImpl() override;
+        void* GetMappedPointerImpl() override;
+        bool IsCPUWritableAtCreation() const override;
+        MaybeError MapAtCreationImpl() override;
+
+        void InitializeToZero(CommandRecordingContext* commandContext);
+        void ClearBuffer(CommandRecordingContext* commandContext,
+                         uint8_t clearValue,
+                         uint64_t offset = 0,
+                         uint64_t size = 0);
+
+        NSPRef<id<MTLBuffer>> mMtlBuffer;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_BUFFERMTL_H_
diff --git a/src/dawn/native/metal/BufferMTL.mm b/src/dawn/native/metal/BufferMTL.mm
new file mode 100644
index 0000000..695872a
--- /dev/null
+++ b/src/dawn/native/metal/BufferMTL.mm
@@ -0,0 +1,240 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BufferMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/metal/CommandRecordingContext.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+#include <limits>
+
+namespace dawn::native::metal {
+    // The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
+    // largest alignment of supported data types
+    static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
+
+    // static
+    ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+        DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+        return std::move(buffer);
+    }
+
+    // static
+    uint64_t Buffer::QueryMaxBufferLength(id<MTLDevice> mtlDevice) {
+        if (@available(iOS 12, tvOS 12, macOS 10.14, *)) {
+            return [mtlDevice maxBufferLength];
+        }
+
+        // Earlier versions of Metal had maximums defined in the Metal feature set tables
+        // https://metalbyexample.com/wp-content/uploads/Metal-Feature-Set-Tables-2018.pdf
+#if defined(DAWN_PLATFORM_MACOS)
+        // 10.12 and 10.13 have a 1Gb limit.
+        if (@available(macOS 10.12, *)) {
+            // |maxBufferLength| isn't always available on older systems. If available, use
+            // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
+            // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
+            return 1024 * 1024 * 1024;
+        }
+        // 10.11 has a 256Mb limit
+        if (@available(maxOS 10.11, *)) {
+            return 256 * 1024 * 1024;
+        }
+#else
+        // macOS / tvOS: 256Mb limit in versions without [MTLDevice maxBufferLength]
+        return 256 * 1024 * 1024;
+#endif
+    }
+
+    MaybeError Buffer::Initialize(bool mappedAtCreation) {
+        MTLResourceOptions storageMode;
+        if (GetUsage() & kMappableBufferUsages) {
+            storageMode = MTLResourceStorageModeShared;
+        } else {
+            storageMode = MTLResourceStorageModePrivate;
+        }
+
+        uint32_t alignment = 1;
+#ifdef DAWN_PLATFORM_MACOS
+        // [MTLBlitCommandEncoder fillBuffer] requires the size to be a multiple of 4 on MacOS.
+        alignment = 4;
+#endif
+
+        // Metal validation layer requires the size of uniform buffer and storage buffer to be no
+        // less than the size of the buffer block defined in shader, and the overall size of the
+        // buffer must be aligned to the largest alignment of its members.
+        if (GetUsage() &
+            (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+            ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
+            alignment = kMinUniformOrStorageBufferAlignment;
+        }
+
+        // The vertex pulling transform requires at least 4 bytes in the buffer.
+        // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
+        // after the end.
+        NSUInteger extraBytes = 0u;
+        if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
+            extraBytes = 4u;
+        }
+
+        if (GetSize() > std::numeric_limits<NSUInteger>::max() - extraBytes) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+        }
+        NSUInteger currentSize =
+            std::max(static_cast<NSUInteger>(GetSize()) + extraBytes, NSUInteger(4));
+
+        if (currentSize > std::numeric_limits<NSUInteger>::max() - alignment) {
+            // Alignment would overlow.
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+        }
+        currentSize = Align(currentSize, alignment);
+
+        uint64_t maxBufferSize = QueryMaxBufferLength(ToBackend(GetDevice())->GetMTLDevice());
+        if (currentSize > maxBufferSize) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+        }
+
+        mAllocatedSize = currentSize;
+        mMtlBuffer.Acquire([ToBackend(GetDevice())->GetMTLDevice()
+            newBufferWithLength:currentSize
+                        options:storageMode]);
+        if (mMtlBuffer == nullptr) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
+        }
+
+        // The buffers with mappedAtCreation == true will be initialized in
+        // BufferBase::MapAtCreation().
+        if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+            !mappedAtCreation) {
+            CommandRecordingContext* commandContext =
+                ToBackend(GetDevice())->GetPendingCommandContext();
+            ClearBuffer(commandContext, uint8_t(1u));
+        }
+
+        // Initialize the padding bytes to zero.
+        if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
+            !mappedAtCreation) {
+            uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+            if (paddingBytes > 0) {
+                uint32_t clearSize = Align(paddingBytes, 4);
+                uint64_t clearOffset = GetAllocatedSize() - clearSize;
+
+                CommandRecordingContext* commandContext =
+                    ToBackend(GetDevice())->GetPendingCommandContext();
+                ClearBuffer(commandContext, 0, clearOffset, clearSize);
+            }
+        }
+        return {};
+    }
+
+    Buffer::~Buffer() = default;
+
+    id<MTLBuffer> Buffer::GetMTLBuffer() const {
+        return mMtlBuffer.Get();
+    }
+
+    bool Buffer::IsCPUWritableAtCreation() const {
+        // TODO(enga): Handle CPU-visible memory on UMA
+        return GetUsage() & kMappableBufferUsages;
+    }
+
+    MaybeError Buffer::MapAtCreationImpl() {
+        return {};
+    }
+
+    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+        CommandRecordingContext* commandContext =
+            ToBackend(GetDevice())->GetPendingCommandContext();
+        EnsureDataInitialized(commandContext);
+
+        return {};
+    }
+
+    void* Buffer::GetMappedPointerImpl() {
+        return [*mMtlBuffer contents];
+    }
+
+    void Buffer::UnmapImpl() {
+        // Nothing to do, Metal StorageModeShared buffers are always mapped.
+    }
+
+    void Buffer::DestroyImpl() {
+        BufferBase::DestroyImpl();
+        mMtlBuffer = nullptr;
+    }
+
+    bool Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        InitializeToZero(commandContext);
+        return true;
+    }
+
+    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                    uint64_t offset,
+                                                    uint64_t size) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        if (IsFullBufferRange(offset, size)) {
+            SetIsDataInitialized();
+            return false;
+        }
+
+        InitializeToZero(commandContext);
+        return true;
+    }
+
+    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                    const CopyTextureToBufferCmd* copy) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+            SetIsDataInitialized();
+            return false;
+        }
+
+        InitializeToZero(commandContext);
+        return true;
+    }
+
+    void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+        ASSERT(NeedsInitialization());
+
+        ClearBuffer(commandContext, uint8_t(0u));
+
+        SetIsDataInitialized();
+        GetDevice()->IncrementLazyClearCountForTesting();
+    }
+
+    void Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+                             uint8_t clearValue,
+                             uint64_t offset,
+                             uint64_t size) {
+        ASSERT(commandContext != nullptr);
+        size = size > 0 ? size : GetAllocatedSize();
+        ASSERT(size > 0);
+        [commandContext->EnsureBlit() fillBuffer:mMtlBuffer.Get()
+                                           range:NSMakeRange(offset, size)
+                                           value:clearValue];
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/CommandBufferMTL.h b/src/dawn/native/metal/CommandBufferMTL.h
new file mode 100644
index 0000000..29db870
--- /dev/null
+++ b/src/dawn/native/metal/CommandBufferMTL.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
+#define DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Error.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native {
+    class CommandEncoder;
+}
+
+namespace dawn::native::metal {
+
+    class CommandRecordingContext;
+    class Device;
+    class Texture;
+
+    void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+                                   id<MTLBuffer> mtlBuffer,
+                                   uint64_t bufferSize,
+                                   uint64_t offset,
+                                   uint32_t bytesPerRow,
+                                   uint32_t rowsPerImage,
+                                   Texture* texture,
+                                   uint32_t mipLevel,
+                                   const Origin3D& origin,
+                                   Aspect aspect,
+                                   const Extent3D& copySize);
+
+    class CommandBuffer final : public CommandBufferBase {
+      public:
+        static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+                                         const CommandBufferDescriptor* descriptor);
+
+        MaybeError FillCommands(CommandRecordingContext* commandContext);
+
+      private:
+        using CommandBufferBase::CommandBufferBase;
+
+        MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
+        MaybeError EncodeRenderPass(CommandRecordingContext* commandContext,
+                                    MTLRenderPassDescriptor* mtlRenderPass,
+                                    uint32_t width,
+                                    uint32_t height);
+
+        MaybeError EncodeRenderPassInternal(CommandRecordingContext* commandContext,
+                                            MTLRenderPassDescriptor* mtlRenderPass,
+                                            uint32_t width,
+                                            uint32_t height);
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
diff --git a/src/dawn/native/metal/CommandBufferMTL.mm b/src/dawn/native/metal/CommandBufferMTL.mm
new file mode 100644
index 0000000..86b88a8
--- /dev/null
+++ b/src/dawn/native/metal/CommandBufferMTL.mm
@@ -0,0 +1,1594 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/CommandBufferMTL.h"
+
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/metal/BindGroupMTL.h"
+#include "dawn/native/metal/BufferMTL.h"
+#include "dawn/native/metal/ComputePipelineMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/QuerySetMTL.h"
+#include "dawn/native/metal/RenderPipelineMTL.h"
+#include "dawn/native/metal/SamplerMTL.h"
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+#include <tint/tint.h>
+
+namespace dawn::native::metal {
+
+    namespace {
+
+        // Allows this file to use MTLStoreActionStoreAndMultismapleResolve because the logic is
+        // first to compute what the "best" Metal render pass descriptor is, then fix it up if we
+        // are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+        constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
+            MTLStoreActionStoreAndMultisampleResolve;
+#pragma clang diagnostic pop
+
+        MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
+            switch (format) {
+                case wgpu::IndexFormat::Uint16:
+                    return MTLIndexTypeUInt16;
+                case wgpu::IndexFormat::Uint32:
+                    return MTLIndexTypeUInt32;
+                case wgpu::IndexFormat::Undefined:
+                    UNREACHABLE();
+            }
+        }
+
+        NSRef<MTLRenderPassDescriptor> CreateMTLRenderPassDescriptor(
+            BeginRenderPassCmd* renderPass) {
+            // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+            NSRef<MTLRenderPassDescriptor> descriptorRef =
+                [MTLRenderPassDescriptor renderPassDescriptor];
+            MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+
+            for (ColorAttachmentIndex attachment :
+                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                uint8_t i = static_cast<uint8_t>(attachment);
+                auto& attachmentInfo = renderPass->colorAttachments[attachment];
+
+                switch (attachmentInfo.loadOp) {
+                    case wgpu::LoadOp::Clear:
+                        descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
+                        descriptor.colorAttachments[i].clearColor = MTLClearColorMake(
+                            attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
+                            attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
+                        break;
+
+                    case wgpu::LoadOp::Load:
+                        descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
+                        break;
+
+                    case wgpu::LoadOp::Undefined:
+                        UNREACHABLE();
+                        break;
+                }
+
+                descriptor.colorAttachments[i].texture =
+                    ToBackend(attachmentInfo.view->GetTexture())->GetMTLTexture();
+                descriptor.colorAttachments[i].level = attachmentInfo.view->GetBaseMipLevel();
+                descriptor.colorAttachments[i].slice = attachmentInfo.view->GetBaseArrayLayer();
+
+                bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+                if (hasResolveTarget) {
+                    descriptor.colorAttachments[i].resolveTexture =
+                        ToBackend(attachmentInfo.resolveTarget->GetTexture())->GetMTLTexture();
+                    descriptor.colorAttachments[i].resolveLevel =
+                        attachmentInfo.resolveTarget->GetBaseMipLevel();
+                    descriptor.colorAttachments[i].resolveSlice =
+                        attachmentInfo.resolveTarget->GetBaseArrayLayer();
+
+                    switch (attachmentInfo.storeOp) {
+                        case wgpu::StoreOp::Store:
+                            descriptor.colorAttachments[i].storeAction =
+                                kMTLStoreActionStoreAndMultisampleResolve;
+                            break;
+                        case wgpu::StoreOp::Discard:
+                            descriptor.colorAttachments[i].storeAction =
+                                MTLStoreActionMultisampleResolve;
+                            break;
+                        case wgpu::StoreOp::Undefined:
+                            UNREACHABLE();
+                            break;
+                    }
+                } else {
+                    switch (attachmentInfo.storeOp) {
+                        case wgpu::StoreOp::Store:
+                            descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
+                            break;
+                        case wgpu::StoreOp::Discard:
+                            descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
+                            break;
+                        case wgpu::StoreOp::Undefined:
+                            UNREACHABLE();
+                            break;
+                    }
+                }
+            }
+
+            if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+                auto& attachmentInfo = renderPass->depthStencilAttachment;
+
+                id<MTLTexture> texture =
+                    ToBackend(attachmentInfo.view->GetTexture())->GetMTLTexture();
+                const Format& format = attachmentInfo.view->GetTexture()->GetFormat();
+
+                if (format.HasDepth()) {
+                    descriptor.depthAttachment.texture = texture;
+                    descriptor.depthAttachment.level = attachmentInfo.view->GetBaseMipLevel();
+                    descriptor.depthAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
+
+                    switch (attachmentInfo.depthStoreOp) {
+                        case wgpu::StoreOp::Store:
+                            descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+                            break;
+
+                        case wgpu::StoreOp::Discard:
+                            descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
+                            break;
+
+                        case wgpu::StoreOp::Undefined:
+                            UNREACHABLE();
+                            break;
+                    }
+
+                    switch (attachmentInfo.depthLoadOp) {
+                        case wgpu::LoadOp::Clear:
+                            descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+                            descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
+                            break;
+
+                        case wgpu::LoadOp::Load:
+                            descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
+                            break;
+
+                        case wgpu::LoadOp::Undefined:
+                            UNREACHABLE();
+                            break;
+                    }
+                }
+
+                if (format.HasStencil()) {
+                    descriptor.stencilAttachment.texture = texture;
+                    descriptor.stencilAttachment.level = attachmentInfo.view->GetBaseMipLevel();
+                    descriptor.stencilAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
+
+                    switch (attachmentInfo.stencilStoreOp) {
+                        case wgpu::StoreOp::Store:
+                            descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+                            break;
+
+                        case wgpu::StoreOp::Discard:
+                            descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
+                            break;
+
+                        case wgpu::StoreOp::Undefined:
+                            UNREACHABLE();
+                            break;
+                    }
+
+                    switch (attachmentInfo.stencilLoadOp) {
+                        case wgpu::LoadOp::Clear:
+                            descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+                            descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
+                            break;
+
+                        case wgpu::LoadOp::Load:
+                            descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
+                            break;
+
+                        case wgpu::LoadOp::Undefined:
+                            UNREACHABLE();
+                            break;
+                    }
+                }
+            }
+
+            if (renderPass->occlusionQuerySet.Get() != nullptr) {
+                descriptor.visibilityResultBuffer =
+                    ToBackend(renderPass->occlusionQuerySet.Get())->GetVisibilityBuffer();
+            }
+
+            return descriptorRef;
+        }
+
+        // Helper function for Toggle EmulateStoreAndMSAAResolve
+        void ResolveInAnotherRenderPass(
+            CommandRecordingContext* commandContext,
+            const MTLRenderPassDescriptor* mtlRenderPass,
+            const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
+            // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+            NSRef<MTLRenderPassDescriptor> mtlRenderPassForResolveRef =
+                [MTLRenderPassDescriptor renderPassDescriptor];
+            MTLRenderPassDescriptor* mtlRenderPassForResolve = mtlRenderPassForResolveRef.Get();
+
+            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+                if (resolveTextures[i] == nullptr) {
+                    continue;
+                }
+
+                mtlRenderPassForResolve.colorAttachments[i].texture =
+                    mtlRenderPass.colorAttachments[i].texture;
+                mtlRenderPassForResolve.colorAttachments[i].loadAction = MTLLoadActionLoad;
+                mtlRenderPassForResolve.colorAttachments[i].storeAction =
+                    MTLStoreActionMultisampleResolve;
+                mtlRenderPassForResolve.colorAttachments[i].resolveTexture = resolveTextures[i];
+                mtlRenderPassForResolve.colorAttachments[i].resolveLevel =
+                    mtlRenderPass.colorAttachments[i].resolveLevel;
+                mtlRenderPassForResolve.colorAttachments[i].resolveSlice =
+                    mtlRenderPass.colorAttachments[i].resolveSlice;
+            }
+
+            commandContext->BeginRender(mtlRenderPassForResolve);
+            commandContext->EndRender();
+        }
+
+        // Helper functions for Toggle AlwaysResolveIntoZeroLevelAndLayer
+        ResultOrError<NSPRef<id<MTLTexture>>> CreateResolveTextureForWorkaround(
+            Device* device,
+            MTLPixelFormat mtlFormat,
+            uint32_t width,
+            uint32_t height) {
+            NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+            MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+            mtlDesc.textureType = MTLTextureType2D;
+            mtlDesc.usage = MTLTextureUsageRenderTarget;
+            mtlDesc.pixelFormat = mtlFormat;
+            mtlDesc.width = width;
+            mtlDesc.height = height;
+            mtlDesc.depth = 1;
+            mtlDesc.mipmapLevelCount = 1;
+            mtlDesc.arrayLength = 1;
+            mtlDesc.storageMode = MTLStorageModePrivate;
+            mtlDesc.sampleCount = 1;
+
+            id<MTLTexture> texture = [device->GetMTLDevice() newTextureWithDescriptor:mtlDesc];
+            if (texture == nil) {
+                return DAWN_OUT_OF_MEMORY_ERROR("Allocation of temporary texture failed.");
+            }
+
+            return AcquireNSPRef(texture);
+        }
+
+        void CopyIntoTrueResolveTarget(CommandRecordingContext* commandContext,
+                                       id<MTLTexture> mtlTrueResolveTexture,
+                                       uint32_t trueResolveLevel,
+                                       uint32_t trueResolveSlice,
+                                       id<MTLTexture> temporaryResolveTexture,
+                                       uint32_t width,
+                                       uint32_t height) {
+            [commandContext->EnsureBlit() copyFromTexture:temporaryResolveTexture
+                                              sourceSlice:0
+                                              sourceLevel:0
+                                             sourceOrigin:MTLOriginMake(0, 0, 0)
+                                               sourceSize:MTLSizeMake(width, height, 1)
+                                                toTexture:mtlTrueResolveTexture
+                                         destinationSlice:trueResolveSlice
+                                         destinationLevel:trueResolveLevel
+                                        destinationOrigin:MTLOriginMake(0, 0, 0)];
+        }
+
+        // Metal uses a physical addressing mode which means buffers in the shading language are
+        // just pointers to the virtual address of their start. This means there is no way to know
+        // the length of a buffer to compute the length() of unsized arrays at the end of storage
+        // buffers. Tint implements the length() of unsized arrays by requiring an extra
+        // buffer that contains the length of other buffers. This structure that keeps track of the
+        // length of storage buffers and can apply them to the reserved "buffer length buffer" when
+        // needed for a draw or a dispatch.
+        struct StorageBufferLengthTracker {
+            wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
+
+            // The lengths of buffers are stored as 32bit integers because that is the width the
+            // MSL code generated by Tint expects.
+            // UBOs require we align the max buffer count to 4 elements (16 bytes).
+            static constexpr size_t MaxBufferCount = ((kGenericMetalBufferSlots + 3) / 4) * 4;
+            PerStage<std::array<uint32_t, MaxBufferCount>> data;
+
+            void Apply(id<MTLRenderCommandEncoder> render,
+                       RenderPipeline* pipeline,
+                       bool enableVertexPulling) {
+                wgpu::ShaderStage stagesToApply =
+                    dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
+
+                if (stagesToApply == wgpu::ShaderStage::None) {
+                    return;
+                }
+
+                if (stagesToApply & wgpu::ShaderStage::Vertex) {
+                    uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+                                               ->GetBufferBindingCount(SingleShaderStage::Vertex);
+
+                    if (enableVertexPulling) {
+                        bufferCount += pipeline->GetVertexBufferCount();
+                    }
+
+                    bufferCount = Align(bufferCount, 4);
+                    ASSERT(bufferCount <= data[SingleShaderStage::Vertex].size());
+
+                    [render setVertexBytes:data[SingleShaderStage::Vertex].data()
+                                    length:sizeof(uint32_t) * bufferCount
+                                   atIndex:kBufferLengthBufferSlot];
+                }
+
+                if (stagesToApply & wgpu::ShaderStage::Fragment) {
+                    uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+                                               ->GetBufferBindingCount(SingleShaderStage::Fragment);
+                    bufferCount = Align(bufferCount, 4);
+                    ASSERT(bufferCount <= data[SingleShaderStage::Fragment].size());
+
+                    [render setFragmentBytes:data[SingleShaderStage::Fragment].data()
+                                      length:sizeof(uint32_t) * bufferCount
+                                     atIndex:kBufferLengthBufferSlot];
+                }
+
+                // Only mark clean stages that were actually applied.
+                dirtyStages ^= stagesToApply;
+            }
+
+            void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
+                if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
+                    return;
+                }
+
+                if (!pipeline->RequiresStorageBufferLength()) {
+                    return;
+                }
+
+                uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+                                           ->GetBufferBindingCount(SingleShaderStage::Compute);
+                bufferCount = Align(bufferCount, 4);
+                ASSERT(bufferCount <= data[SingleShaderStage::Compute].size());
+
+                [compute setBytes:data[SingleShaderStage::Compute].data()
+                           length:sizeof(uint32_t) * bufferCount
+                          atIndex:kBufferLengthBufferSlot];
+
+                dirtyStages ^= wgpu::ShaderStage::Compute;
+            }
+        };
+
+        // Keeps track of the dirty bind groups so they can be lazily applied when we know the
+        // pipeline state.
+        // Bind groups may be inherited because bind groups are packed in the buffer /
+        // texture tables in contiguous order.
+        class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
+          public:
+            explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
+                : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {
+            }
+
+            template <typename Encoder>
+            void Apply(Encoder encoder) {
+                BeforeApply();
+                for (BindGroupIndex index :
+                     IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+                    ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
+                                   mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
+                                   ToBackend(mPipelineLayout));
+                }
+                AfterApply();
+            }
+
+          private:
+            // Handles a call to SetBindGroup, directing the commands to the correct encoder.
+            // There is a single function that takes both encoders to factor code. Other approaches
+            // like templates wouldn't work because the name of methods are different between the
+            // two encoder types.
+            void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
+                                    id<MTLComputeCommandEncoder> compute,
+                                    BindGroupIndex index,
+                                    BindGroup* group,
+                                    uint32_t dynamicOffsetCount,
+                                    uint64_t* dynamicOffsets,
+                                    PipelineLayout* pipelineLayout) {
+                uint32_t currentDynamicBufferIndex = 0;
+
+                // TODO(crbug.com/dawn/854): Maintain buffers and offsets arrays in BindGroup
+                // so that we only have to do one setVertexBuffers and one setFragmentBuffers
+                // call here.
+                for (BindingIndex bindingIndex{0};
+                     bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+                    const BindingInfo& bindingInfo =
+                        group->GetLayout()->GetBindingInfo(bindingIndex);
+
+                    bool hasVertStage =
+                        bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nullptr;
+                    bool hasFragStage =
+                        bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nullptr;
+                    bool hasComputeStage =
+                        bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nullptr;
+
+                    uint32_t vertIndex = 0;
+                    uint32_t fragIndex = 0;
+                    uint32_t computeIndex = 0;
+
+                    if (hasVertStage) {
+                        vertIndex = pipelineLayout->GetBindingIndexInfo(
+                            SingleShaderStage::Vertex)[index][bindingIndex];
+                    }
+                    if (hasFragStage) {
+                        fragIndex = pipelineLayout->GetBindingIndexInfo(
+                            SingleShaderStage::Fragment)[index][bindingIndex];
+                    }
+                    if (hasComputeStage) {
+                        computeIndex = pipelineLayout->GetBindingIndexInfo(
+                            SingleShaderStage::Compute)[index][bindingIndex];
+                    }
+
+                    switch (bindingInfo.bindingType) {
+                        case BindingInfoType::Buffer: {
+                            const BufferBinding& binding =
+                                group->GetBindingAsBufferBinding(bindingIndex);
+                            const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
+                            NSUInteger offset = binding.offset;
+
+                            // TODO(crbug.com/dawn/854): Record bound buffer status to use
+                            // setBufferOffset to achieve better performance.
+                            if (bindingInfo.buffer.hasDynamicOffset) {
+                                offset += dynamicOffsets[currentDynamicBufferIndex];
+                                currentDynamicBufferIndex++;
+                            }
+
+                            if (hasVertStage) {
+                                mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
+                                    binding.size;
+                                mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
+                                [render setVertexBuffers:&buffer
+                                                 offsets:&offset
+                                               withRange:NSMakeRange(vertIndex, 1)];
+                            }
+                            if (hasFragStage) {
+                                mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
+                                    binding.size;
+                                mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
+                                [render setFragmentBuffers:&buffer
+                                                   offsets:&offset
+                                                 withRange:NSMakeRange(fragIndex, 1)];
+                            }
+                            if (hasComputeStage) {
+                                mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
+                                    binding.size;
+                                mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
+                                [compute setBuffers:&buffer
+                                            offsets:&offset
+                                          withRange:NSMakeRange(computeIndex, 1)];
+                            }
+
+                            break;
+                        }
+
+                        case BindingInfoType::Sampler: {
+                            auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+                            if (hasVertStage) {
+                                [render setVertexSamplerState:sampler->GetMTLSamplerState()
+                                                      atIndex:vertIndex];
+                            }
+                            if (hasFragStage) {
+                                [render setFragmentSamplerState:sampler->GetMTLSamplerState()
+                                                        atIndex:fragIndex];
+                            }
+                            if (hasComputeStage) {
+                                [compute setSamplerState:sampler->GetMTLSamplerState()
+                                                 atIndex:computeIndex];
+                            }
+                            break;
+                        }
+
+                        case BindingInfoType::Texture:
+                        case BindingInfoType::StorageTexture: {
+                            auto textureView =
+                                ToBackend(group->GetBindingAsTextureView(bindingIndex));
+                            if (hasVertStage) {
+                                [render setVertexTexture:textureView->GetMTLTexture()
+                                                 atIndex:vertIndex];
+                            }
+                            if (hasFragStage) {
+                                [render setFragmentTexture:textureView->GetMTLTexture()
+                                                   atIndex:fragIndex];
+                            }
+                            if (hasComputeStage) {
+                                [compute setTexture:textureView->GetMTLTexture()
+                                            atIndex:computeIndex];
+                            }
+                            break;
+                        }
+
+                        case BindingInfoType::ExternalTexture:
+                            UNREACHABLE();
+                    }
+                }
+            }
+
+            template <typename... Args>
+            void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
+                ApplyBindGroupImpl(encoder, nullptr, std::forward<Args&&>(args)...);
+            }
+
+            template <typename... Args>
+            void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
+                ApplyBindGroupImpl(nullptr, encoder, std::forward<Args&&>(args)...);
+            }
+
+            StorageBufferLengthTracker* mLengthTracker;
+        };
+
+        // Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
+        // all the relevant state.
+        class VertexBufferTracker {
+          public:
+            explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
+                : mLengthTracker(lengthTracker) {
+            }
+
+            void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset) {
+                mVertexBuffers[slot] = buffer->GetMTLBuffer();
+                mVertexBufferOffsets[slot] = offset;
+
+                ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
+                mVertexBufferBindingSizes[slot] =
+                    static_cast<uint32_t>(buffer->GetAllocatedSize() - offset);
+                mDirtyVertexBuffers.set(slot);
+            }
+
+            void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
+                // When a new pipeline is bound we must set all the vertex buffers again because
+                // they might have been offset by the pipeline layout, and they might be packed
+                // differently from the previous pipeline.
+                mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+            }
+
+            void Apply(id<MTLRenderCommandEncoder> encoder,
+                       RenderPipeline* pipeline,
+                       bool enableVertexPulling) {
+                const auto& vertexBuffersToApply =
+                    mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
+
+                for (VertexBufferSlot slot : IterateBitSet(vertexBuffersToApply)) {
+                    uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(slot);
+
+                    if (enableVertexPulling) {
+                        // Insert lengths for vertex buffers bound as storage buffers
+                        mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
+                            mVertexBufferBindingSizes[slot];
+                        mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
+                    }
+
+                    [encoder setVertexBuffers:&mVertexBuffers[slot]
+                                      offsets:&mVertexBufferOffsets[slot]
+                                    withRange:NSMakeRange(metalIndex, 1)];
+                }
+
+                mDirtyVertexBuffers.reset();
+            }
+
+          private:
+            // All the indices in these arrays are Dawn vertex buffer indices
+            ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+            ityp::array<VertexBufferSlot, id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
+            ityp::array<VertexBufferSlot, NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
+            ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
+
+            StorageBufferLengthTracker* mLengthTracker;
+        };
+
+    }  // anonymous namespace
+
+    void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+                                   id<MTLBuffer> mtlBuffer,
+                                   uint64_t bufferSize,
+                                   uint64_t offset,
+                                   uint32_t bytesPerRow,
+                                   uint32_t rowsPerImage,
+                                   Texture* texture,
+                                   uint32_t mipLevel,
+                                   const Origin3D& origin,
+                                   Aspect aspect,
+                                   const Extent3D& copySize) {
+        TextureBufferCopySplit splitCopies =
+            ComputeTextureBufferCopySplit(texture, mipLevel, origin, copySize, bufferSize, offset,
+                                          bytesPerRow, rowsPerImage, aspect);
+
+        MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
+
+        for (const auto& copyInfo : splitCopies) {
+            uint64_t bufferOffset = copyInfo.bufferOffset;
+            switch (texture->GetDimension()) {
+                case wgpu::TextureDimension::e1D: {
+                    [commandContext->EnsureBlit()
+                             copyFromBuffer:mtlBuffer
+                               sourceOffset:bufferOffset
+                          sourceBytesPerRow:copyInfo.bytesPerRow
+                        sourceBytesPerImage:copyInfo.bytesPerImage
+                                 sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1, 1)
+                                  toTexture:texture->GetMTLTexture()
+                           destinationSlice:0
+                           destinationLevel:mipLevel
+                          destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0, 0)
+                                    options:blitOption];
+                    break;
+                }
+                case wgpu::TextureDimension::e2D: {
+                    const MTLOrigin textureOrigin =
+                        MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+                    const MTLSize copyExtent =
+                        MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+                    for (uint32_t z = copyInfo.textureOrigin.z;
+                         z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
+                         ++z) {
+                        [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
+                                                        sourceOffset:bufferOffset
+                                                   sourceBytesPerRow:copyInfo.bytesPerRow
+                                                 sourceBytesPerImage:copyInfo.bytesPerImage
+                                                          sourceSize:copyExtent
+                                                           toTexture:texture->GetMTLTexture()
+                                                    destinationSlice:z
+                                                    destinationLevel:mipLevel
+                                                   destinationOrigin:textureOrigin
+                                                             options:blitOption];
+                        bufferOffset += copyInfo.bytesPerImage;
+                    }
+                    break;
+                }
+                case wgpu::TextureDimension::e3D: {
+                    [commandContext->EnsureBlit()
+                             copyFromBuffer:mtlBuffer
+                               sourceOffset:bufferOffset
+                          sourceBytesPerRow:copyInfo.bytesPerRow
+                        sourceBytesPerImage:copyInfo.bytesPerImage
+                                 sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+                                                        copyInfo.copyExtent.height,
+                                                        copyInfo.copyExtent.depthOrArrayLayers)
+                                  toTexture:texture->GetMTLTexture()
+                           destinationSlice:0
+                           destinationLevel:mipLevel
+                          destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+                                                          copyInfo.textureOrigin.y,
+                                                          copyInfo.textureOrigin.z)
+                                    options:blitOption];
+                    break;
+                }
+            }
+        }
+    }
+
+    // static
+    Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+                                             const CommandBufferDescriptor* descriptor) {
+        return AcquireRef(new CommandBuffer(encoder, descriptor));
+    }
+
+    MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
+        size_t nextComputePassNumber = 0;
+        size_t nextRenderPassNumber = 0;
+
+        auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
+                                     CommandRecordingContext* commandContext) {
+            for (size_t i = 0; i < scope.textures.size(); ++i) {
+                Texture* texture = ToBackend(scope.textures[i]);
+
+                // Clear subresources that are not render attachments. Render attachments will be
+                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+                // subresource has not been initialized before the render pass.
+                scope.textureUsages[i].Iterate(
+                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                            texture->EnsureSubresourceContentInitialized(commandContext, range);
+                        }
+                    });
+            }
+            for (BufferBase* bufferBase : scope.buffers) {
+                ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
+            }
+        };
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::BeginComputePass: {
+                    mCommands.NextCommand<BeginComputePassCmd>();
+
+                    for (const SyncScopeResourceUsage& scope :
+                         GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+                        LazyClearSyncScope(scope, commandContext);
+                    }
+                    commandContext->EndBlit();
+
+                    DAWN_TRY(EncodeComputePass(commandContext));
+
+                    nextComputePassNumber++;
+                    break;
+                }
+
+                case Command::BeginRenderPass: {
+                    BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+
+                    LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
+                                       commandContext);
+                    commandContext->EndBlit();
+
+                    LazyClearRenderPassAttachments(cmd);
+                    NSRef<MTLRenderPassDescriptor> descriptor = CreateMTLRenderPassDescriptor(cmd);
+                    DAWN_TRY(EncodeRenderPass(commandContext, descriptor.Get(), cmd->width,
+                                              cmd->height));
+
+                    nextRenderPassNumber++;
+                    break;
+                }
+
+                case Command::CopyBufferToBuffer: {
+                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                    if (copy->size == 0) {
+                        // Skip no-op copies.
+                        break;
+                    }
+
+                    ToBackend(copy->source)->EnsureDataInitialized(commandContext);
+                    ToBackend(copy->destination)
+                        ->EnsureDataInitializedAsDestination(commandContext,
+                                                             copy->destinationOffset, copy->size);
+
+                    [commandContext->EnsureBlit()
+                           copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
+                             sourceOffset:copy->sourceOffset
+                                 toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
+                        destinationOffset:copy->destinationOffset
+                                     size:copy->size];
+                    break;
+                }
+
+                case Command::CopyBufferToTexture: {
+                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    auto& src = copy->source;
+                    auto& dst = copy->destination;
+                    auto& copySize = copy->copySize;
+                    Buffer* buffer = ToBackend(src.buffer.Get());
+                    Texture* texture = ToBackend(dst.texture.Get());
+
+                    buffer->EnsureDataInitialized(commandContext);
+                    EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
+
+                    RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(),
+                                              buffer->GetSize(), src.offset, src.bytesPerRow,
+                                              src.rowsPerImage, texture, dst.mipLevel, dst.origin,
+                                              dst.aspect, copySize);
+                    break;
+                }
+
+                case Command::CopyTextureToBuffer: {
+                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    auto& src = copy->source;
+                    auto& dst = copy->destination;
+                    auto& copySize = copy->copySize;
+                    Texture* texture = ToBackend(src.texture.Get());
+                    Buffer* buffer = ToBackend(dst.buffer.Get());
+
+                    buffer->EnsureDataInitializedAsDestination(commandContext, copy);
+
+                    texture->EnsureSubresourceContentInitialized(
+                        commandContext, GetSubresourcesAffectedByCopy(src, copySize));
+
+                    TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+                        texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
+                        dst.bytesPerRow, dst.rowsPerImage, src.aspect);
+
+                    for (const auto& copyInfo : splitCopies) {
+                        MTLBlitOption blitOption =
+                            ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
+                        uint64_t bufferOffset = copyInfo.bufferOffset;
+
+                        switch (texture->GetDimension()) {
+                            case wgpu::TextureDimension::e1D: {
+                                [commandContext->EnsureBlit()
+                                             copyFromTexture:texture->GetMTLTexture()
+                                                 sourceSlice:0
+                                                 sourceLevel:src.mipLevel
+                                                sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+                                                                           0, 0)
+                                                  sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+                                                                         1, 1)
+                                                    toBuffer:buffer->GetMTLBuffer()
+                                           destinationOffset:bufferOffset
+                                      destinationBytesPerRow:copyInfo.bytesPerRow
+                                    destinationBytesPerImage:copyInfo.bytesPerImage
+                                                     options:blitOption];
+                                break;
+                            }
+
+                            case wgpu::TextureDimension::e2D: {
+                                const MTLOrigin textureOrigin = MTLOriginMake(
+                                    copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+                                const MTLSize copyExtent = MTLSizeMake(
+                                    copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+                                for (uint32_t z = copyInfo.textureOrigin.z;
+                                     z < copyInfo.textureOrigin.z +
+                                             copyInfo.copyExtent.depthOrArrayLayers;
+                                     ++z) {
+                                    [commandContext->EnsureBlit()
+                                                 copyFromTexture:texture->GetMTLTexture()
+                                                     sourceSlice:z
+                                                     sourceLevel:src.mipLevel
+                                                    sourceOrigin:textureOrigin
+                                                      sourceSize:copyExtent
+                                                        toBuffer:buffer->GetMTLBuffer()
+                                               destinationOffset:bufferOffset
+                                          destinationBytesPerRow:copyInfo.bytesPerRow
+                                        destinationBytesPerImage:copyInfo.bytesPerImage
+                                                         options:blitOption];
+                                    bufferOffset += copyInfo.bytesPerImage;
+                                }
+                                break;
+                            }
+                            case wgpu::TextureDimension::e3D: {
+                                [commandContext->EnsureBlit()
+                                             copyFromTexture:texture->GetMTLTexture()
+                                                 sourceSlice:0
+                                                 sourceLevel:src.mipLevel
+                                                sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+                                                                           copyInfo.textureOrigin.y,
+                                                                           copyInfo.textureOrigin.z)
+                                                  sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+                                                                         copyInfo.copyExtent.height,
+                                                                         copyInfo.copyExtent
+                                                                             .depthOrArrayLayers)
+                                                    toBuffer:buffer->GetMTLBuffer()
+                                           destinationOffset:bufferOffset
+                                      destinationBytesPerRow:copyInfo.bytesPerRow
+                                    destinationBytesPerImage:copyInfo.bytesPerImage
+                                                     options:blitOption];
+                                break;
+                            }
+                        }
+                    }
+                    break;
+                }
+
+                case Command::CopyTextureToTexture: {
+                    CopyTextureToTextureCmd* copy =
+                        mCommands.NextCommand<CopyTextureToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    Texture* srcTexture = ToBackend(copy->source.texture.Get());
+                    Texture* dstTexture = ToBackend(copy->destination.texture.Get());
+
+                    srcTexture->EnsureSubresourceContentInitialized(
+                        commandContext,
+                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
+                    EnsureDestinationTextureInitialized(commandContext, dstTexture,
+                                                        copy->destination, copy->copySize);
+
+                    const MTLSize sizeOneSlice =
+                        MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
+
+                    uint32_t sourceLayer = 0;
+                    uint32_t sourceOriginZ = 0;
+
+                    uint32_t destinationLayer = 0;
+                    uint32_t destinationOriginZ = 0;
+
+                    uint32_t* sourceZPtr;
+                    if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+                        sourceZPtr = &sourceLayer;
+                    } else {
+                        sourceZPtr = &sourceOriginZ;
+                    }
+
+                    uint32_t* destinationZPtr;
+                    if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+                        destinationZPtr = &destinationLayer;
+                    } else {
+                        destinationZPtr = &destinationOriginZ;
+                    }
+
+                    // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 1D or 3D.
+                    for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+                        *sourceZPtr = copy->source.origin.z + z;
+                        *destinationZPtr = copy->destination.origin.z + z;
+
+                        // Hold the ref until out of scope
+                        NSPRef<id<MTLTexture>> dstTextureView =
+                            dstTexture->CreateFormatView(srcTexture->GetFormat().format);
+
+                        [commandContext->EnsureBlit()
+                              copyFromTexture:srcTexture->GetMTLTexture()
+                                  sourceSlice:sourceLayer
+                                  sourceLevel:copy->source.mipLevel
+                                 sourceOrigin:MTLOriginMake(copy->source.origin.x,
+                                                            copy->source.origin.y, sourceOriginZ)
+                                   sourceSize:sizeOneSlice
+                                    toTexture:dstTextureView.Get()
+                             destinationSlice:destinationLayer
+                             destinationLevel:copy->destination.mipLevel
+                            destinationOrigin:MTLOriginMake(copy->destination.origin.x,
+                                                            copy->destination.origin.y,
+                                                            destinationOriginZ)];
+                    }
+                    break;
+                }
+
+                case Command::ClearBuffer: {
+                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                    if (cmd->size == 0) {
+                        // Skip no-op copies.
+                        break;
+                    }
+                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+
+                    bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+                        commandContext, cmd->offset, cmd->size);
+
+                    if (!clearedToZero) {
+                        [commandContext->EnsureBlit() fillBuffer:dstBuffer->GetMTLBuffer()
+                                                           range:NSMakeRange(cmd->offset, cmd->size)
+                                                           value:0u];
+                    }
+
+                    break;
+                }
+
+                case Command::ResolveQuerySet: {
+                    ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                    Buffer* destination = ToBackend(cmd->destination.Get());
+
+                    destination->EnsureDataInitializedAsDestination(
+                        commandContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
+
+                    if (querySet->GetQueryType() == wgpu::QueryType::Occlusion) {
+                        [commandContext->EnsureBlit()
+                               copyFromBuffer:querySet->GetVisibilityBuffer()
+                                 sourceOffset:NSUInteger(cmd->firstQuery * sizeof(uint64_t))
+                                     toBuffer:destination->GetMTLBuffer()
+                            destinationOffset:NSUInteger(cmd->destinationOffset)
+                                         size:NSUInteger(cmd->queryCount * sizeof(uint64_t))];
+                    } else {
+                        if (@available(macos 10.15, iOS 14.0, *)) {
+                            [commandContext->EnsureBlit()
+                                  resolveCounters:querySet->GetCounterSampleBuffer()
+                                          inRange:NSMakeRange(cmd->firstQuery, cmd->queryCount)
+                                destinationBuffer:destination->GetMTLBuffer()
+                                destinationOffset:NSUInteger(cmd->destinationOffset)];
+                        } else {
+                            UNREACHABLE();
+                        }
+                    }
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+                    if (@available(macos 10.15, iOS 14.0, *)) {
+                        [commandContext->EnsureBlit()
+                            sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+                                     atSampleIndex:NSUInteger(cmd->queryIndex)
+                                       withBarrier:YES];
+                    } else {
+                        UNREACHABLE();
+                    }
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    // MTLCommandBuffer does not implement insertDebugSignpost
+                    SkipCommand(&mCommands, type);
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    mCommands.NextCommand<PopDebugGroupCmd>();
+
+                    if (@available(macos 10.13, *)) {
+                        [commandContext->GetCommands() popDebugGroup];
+                    }
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                    char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                    if (@available(macos 10.13, *)) {
+                        NSRef<NSString> mtlLabel =
+                            AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                        [commandContext->GetCommands() pushDebugGroup:mtlLabel.Get()];
+                    }
+
+                    break;
+                }
+
+                case Command::WriteBuffer: {
+                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                    const uint64_t offset = write->offset;
+                    const uint64_t size = write->size;
+                    if (size == 0) {
+                        continue;
+                    }
+
+                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                    uint8_t* data = mCommands.NextData<uint8_t>(size);
+                    Device* device = ToBackend(GetDevice());
+
+                    UploadHandle uploadHandle;
+                    DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                                      size, device->GetPendingCommandSerial(),
+                                                      kCopyBufferToBufferOffsetAlignment));
+                    ASSERT(uploadHandle.mappedBuffer != nullptr);
+                    memcpy(uploadHandle.mappedBuffer, data, size);
+
+                    dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
+
+                    [commandContext->EnsureBlit()
+                           copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
+                             sourceOffset:uploadHandle.startOffset
+                                 toBuffer:dstBuffer->GetMTLBuffer()
+                        destinationOffset:offset
+                                     size:size];
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        commandContext->EndBlit();
+        return {};
+    }
+
+    MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
+        ComputePipeline* lastPipeline = nullptr;
+        StorageBufferLengthTracker storageBufferLengths = {};
+        BindGroupTracker bindGroups(&storageBufferLengths);
+
+        id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::EndComputePass: {
+                    mCommands.NextCommand<EndComputePassCmd>();
+                    commandContext->EndCompute();
+                    return {};
+                }
+
+                case Command::Dispatch: {
+                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+                    // Skip noop dispatches, it can causes issues on some systems.
+                    if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
+                        break;
+                    }
+
+                    bindGroups.Apply(encoder);
+                    storageBufferLengths.Apply(encoder, lastPipeline);
+
+                    [encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
+                            threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
+                    break;
+                }
+
+                case Command::DispatchIndirect: {
+                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+
+                    bindGroups.Apply(encoder);
+                    storageBufferLengths.Apply(encoder, lastPipeline);
+
+                    Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
+                    id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+                    [encoder dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
+                                               indirectBufferOffset:dispatch->indirectOffset
+                                              threadsPerThreadgroup:lastPipeline
+                                                                        ->GetLocalWorkGroupSize()];
+                    break;
+                }
+
+                case Command::SetComputePipeline: {
+                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                    lastPipeline = ToBackend(cmd->pipeline).Get();
+
+                    bindGroups.OnSetPipeline(lastPipeline);
+
+                    lastPipeline->Encode(encoder);
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+                    uint32_t* dynamicOffsets = nullptr;
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+
+                    bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+                                              cmd->dynamicOffsetCount, dynamicOffsets);
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                    char* label = mCommands.NextData<char>(cmd->length + 1);
+                    NSRef<NSString> mtlLabel =
+                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                    [encoder insertDebugSignpost:mtlLabel.Get()];
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    mCommands.NextCommand<PopDebugGroupCmd>();
+
+                    [encoder popDebugGroup];
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                    char* label = mCommands.NextData<char>(cmd->length + 1);
+                    NSRef<NSString> mtlLabel =
+                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                    [encoder pushDebugGroup:mtlLabel.Get()];
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+                    if (@available(macos 10.15, iOS 14.0, *)) {
+                        [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+                                          atSampleIndex:NSUInteger(cmd->queryIndex)
+                                            withBarrier:YES];
+                    } else {
+                        UNREACHABLE();
+                    }
+                    break;
+                }
+
+                default: {
+                    UNREACHABLE();
+                    break;
+                }
+            }
+        }
+
+        // EndComputePass should have been called
+        UNREACHABLE();
+    }
+
+    MaybeError CommandBuffer::EncodeRenderPass(CommandRecordingContext* commandContext,
+                                               MTLRenderPassDescriptor* mtlRenderPass,
+                                               uint32_t width,
+                                               uint32_t height) {
+        ASSERT(mtlRenderPass);
+
+        Device* device = ToBackend(GetDevice());
+
+        // Handle Toggle AlwaysResolveIntoZeroLevelAndLayer. We must handle this before applying
+        // the store + MSAA resolve workaround, otherwise this toggle will never be handled because
+        // the resolve texture is removed when applying the store + MSAA resolve workaround.
+        if (device->IsToggleEnabled(Toggle::AlwaysResolveIntoZeroLevelAndLayer)) {
+            std::array<id<MTLTexture>, kMaxColorAttachments> trueResolveTextures = {};
+            std::array<uint32_t, kMaxColorAttachments> trueResolveLevels = {};
+            std::array<uint32_t, kMaxColorAttachments> trueResolveSlices = {};
+
+            // Use temporary resolve texture on the resolve targets with non-zero resolveLevel or
+            // resolveSlice.
+            bool useTemporaryResolveTexture = false;
+            std::array<NSPRef<id<MTLTexture>>, kMaxColorAttachments> temporaryResolveTextures = {};
+            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+                if (mtlRenderPass.colorAttachments[i].resolveTexture == nullptr) {
+                    continue;
+                }
+
+                if (mtlRenderPass.colorAttachments[i].resolveLevel == 0 &&
+                    mtlRenderPass.colorAttachments[i].resolveSlice == 0) {
+                    continue;
+                }
+
+                trueResolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
+                trueResolveLevels[i] = mtlRenderPass.colorAttachments[i].resolveLevel;
+                trueResolveSlices[i] = mtlRenderPass.colorAttachments[i].resolveSlice;
+
+                const MTLPixelFormat mtlFormat = trueResolveTextures[i].pixelFormat;
+                DAWN_TRY_ASSIGN(temporaryResolveTextures[i], CreateResolveTextureForWorkaround(
+                                                                 device, mtlFormat, width, height));
+
+                mtlRenderPass.colorAttachments[i].resolveTexture =
+                    temporaryResolveTextures[i].Get();
+                mtlRenderPass.colorAttachments[i].resolveLevel = 0;
+                mtlRenderPass.colorAttachments[i].resolveSlice = 0;
+                useTemporaryResolveTexture = true;
+            }
+
+            // If we need to use a temporary resolve texture we need to copy the result of MSAA
+            // resolve back to the true resolve targets.
+            if (useTemporaryResolveTexture) {
+                DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
+                for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+                    if (trueResolveTextures[i] == nullptr) {
+                        continue;
+                    }
+
+                    ASSERT(temporaryResolveTextures[i] != nullptr);
+                    CopyIntoTrueResolveTarget(commandContext, trueResolveTextures[i],
+                                              trueResolveLevels[i], trueResolveSlices[i],
+                                              temporaryResolveTextures[i].Get(), width, height);
+                }
+                return {};
+            }
+        }
+
+        // Handle Store + MSAA resolve workaround (Toggle EmulateStoreAndMSAAResolve).
+        if (device->IsToggleEnabled(Toggle::EmulateStoreAndMSAAResolve)) {
+            bool hasStoreAndMSAAResolve = false;
+
+            // Remove any store + MSAA resolve and remember them.
+            std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
+            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+                if (mtlRenderPass.colorAttachments[i].storeAction ==
+                    kMTLStoreActionStoreAndMultisampleResolve) {
+                    hasStoreAndMSAAResolve = true;
+                    resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
+
+                    mtlRenderPass.colorAttachments[i].storeAction = MTLStoreActionStore;
+                    mtlRenderPass.colorAttachments[i].resolveTexture = nullptr;
+                }
+            }
+
+            // If we found a store + MSAA resolve we need to resolve in a different render pass.
+            if (hasStoreAndMSAAResolve) {
+                DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
+                ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
+                return {};
+            }
+        }
+
+        DAWN_TRY(EncodeRenderPassInternal(commandContext, mtlRenderPass, width, height));
+        return {};
+    }
+
+    MaybeError CommandBuffer::EncodeRenderPassInternal(CommandRecordingContext* commandContext,
+                                                       MTLRenderPassDescriptor* mtlRenderPass,
+                                                       uint32_t width,
+                                                       uint32_t height) {
+        bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
+        RenderPipeline* lastPipeline = nullptr;
+        id<MTLBuffer> indexBuffer = nullptr;
+        uint32_t indexBufferBaseOffset = 0;
+        MTLIndexType indexBufferType;
+        uint64_t indexFormatSize = 0;
+
+        StorageBufferLengthTracker storageBufferLengths = {};
+        VertexBufferTracker vertexBuffers(&storageBufferLengths);
+        BindGroupTracker bindGroups(&storageBufferLengths);
+
+        id<MTLRenderCommandEncoder> encoder = commandContext->BeginRender(mtlRenderPass);
+
+        auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+            switch (type) {
+                case Command::Draw: {
+                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                    bindGroups.Apply(encoder);
+                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+                    // The instance count must be non-zero, otherwise no-op
+                    if (draw->instanceCount != 0) {
+                        // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
+                        if (draw->firstInstance == 0) {
+                            [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                        vertexStart:draw->firstVertex
+                                        vertexCount:draw->vertexCount
+                                      instanceCount:draw->instanceCount];
+                        } else {
+                            [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                        vertexStart:draw->firstVertex
+                                        vertexCount:draw->vertexCount
+                                      instanceCount:draw->instanceCount
+                                       baseInstance:draw->firstInstance];
+                        }
+                    }
+                    break;
+                }
+
+                case Command::DrawIndexed: {
+                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                    bindGroups.Apply(encoder);
+                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+                    // The index and instance count must be non-zero, otherwise no-op
+                    if (draw->indexCount != 0 && draw->instanceCount != 0) {
+                        // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
+                        // baseVertex.
+                        if (draw->baseVertex == 0 && draw->firstInstance == 0) {
+                            [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                                indexCount:draw->indexCount
+                                                 indexType:indexBufferType
+                                               indexBuffer:indexBuffer
+                                         indexBufferOffset:indexBufferBaseOffset +
+                                                           draw->firstIndex * indexFormatSize
+                                             instanceCount:draw->instanceCount];
+                        } else {
+                            [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                                indexCount:draw->indexCount
+                                                 indexType:indexBufferType
+                                               indexBuffer:indexBuffer
+                                         indexBufferOffset:indexBufferBaseOffset +
+                                                           draw->firstIndex * indexFormatSize
+                                             instanceCount:draw->instanceCount
+                                                baseVertex:draw->baseVertex
+                                              baseInstance:draw->firstInstance];
+                        }
+                    }
+                    break;
+                }
+
+                case Command::DrawIndirect: {
+                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+
+                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                    bindGroups.Apply(encoder);
+                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                    id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+                    [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                              indirectBuffer:indirectBuffer
+                        indirectBufferOffset:draw->indirectOffset];
+                    break;
+                }
+
+                case Command::DrawIndexedIndirect: {
+                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                    bindGroups.Apply(encoder);
+                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                    ASSERT(buffer != nullptr);
+
+                    id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+                    [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                         indexType:indexBufferType
+                                       indexBuffer:indexBuffer
+                                 indexBufferOffset:indexBufferBaseOffset
+                                    indirectBuffer:indirectBuffer
+                              indirectBufferOffset:draw->indirectOffset];
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+                    char* label = iter->NextData<char>(cmd->length + 1);
+                    NSRef<NSString> mtlLabel =
+                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                    [encoder insertDebugSignpost:mtlLabel.Get()];
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    iter->NextCommand<PopDebugGroupCmd>();
+
+                    [encoder popDebugGroup];
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+                    char* label = iter->NextData<char>(cmd->length + 1);
+                    NSRef<NSString> mtlLabel =
+                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                    [encoder pushDebugGroup:mtlLabel.Get()];
+                    break;
+                }
+
+                case Command::SetRenderPipeline: {
+                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                    RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
+
+                    vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
+                    bindGroups.OnSetPipeline(newPipeline);
+
+                    [encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
+                    [encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
+                    [encoder setCullMode:newPipeline->GetMTLCullMode()];
+                    [encoder setDepthBias:newPipeline->GetDepthBias()
+                               slopeScale:newPipeline->GetDepthBiasSlopeScale()
+                                    clamp:newPipeline->GetDepthBiasClamp()];
+                    if (@available(macOS 10.11, iOS 11.0, *)) {
+                        MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth()
+                                                        ? MTLDepthClipModeClamp
+                                                        : MTLDepthClipModeClip;
+                        [encoder setDepthClipMode:clipMode];
+                    }
+                    newPipeline->Encode(encoder);
+
+                    lastPipeline = newPipeline;
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                    uint32_t* dynamicOffsets = nullptr;
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+
+                    bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+                                              cmd->dynamicOffsetCount, dynamicOffsets);
+                    break;
+                }
+
+                case Command::SetIndexBuffer: {
+                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+                    auto b = ToBackend(cmd->buffer.Get());
+                    indexBuffer = b->GetMTLBuffer();
+                    indexBufferBaseOffset = cmd->offset;
+                    indexBufferType = MTLIndexFormat(cmd->format);
+                    indexFormatSize = IndexFormatSize(cmd->format);
+                    break;
+                }
+
+                case Command::SetVertexBuffer: {
+                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+
+                    vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+                                                    cmd->offset);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+        };
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::EndRenderPass: {
+                    mCommands.NextCommand<EndRenderPassCmd>();
+                    commandContext->EndRender();
+                    return {};
+                }
+
+                case Command::SetStencilReference: {
+                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+                    [encoder setStencilReferenceValue:cmd->reference];
+                    break;
+                }
+
+                case Command::SetViewport: {
+                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                    MTLViewport viewport;
+                    viewport.originX = cmd->x;
+                    viewport.originY = cmd->y;
+                    viewport.width = cmd->width;
+                    viewport.height = cmd->height;
+                    viewport.znear = cmd->minDepth;
+                    viewport.zfar = cmd->maxDepth;
+
+                    [encoder setViewport:viewport];
+                    break;
+                }
+
+                case Command::SetScissorRect: {
+                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                    MTLScissorRect rect;
+                    rect.x = cmd->x;
+                    rect.y = cmd->y;
+                    rect.width = cmd->width;
+                    rect.height = cmd->height;
+
+                    [encoder setScissorRect:rect];
+                    break;
+                }
+
+                case Command::SetBlendConstant: {
+                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                    [encoder setBlendColorRed:cmd->color.r
+                                        green:cmd->color.g
+                                         blue:cmd->color.b
+                                        alpha:cmd->color.a];
+                    break;
+                }
+
+                case Command::ExecuteBundles: {
+                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                    for (uint32_t i = 0; i < cmd->count; ++i) {
+                        CommandIterator* iter = bundles[i]->GetCommands();
+                        iter->Reset();
+                        while (iter->NextCommandId(&type)) {
+                            EncodeRenderBundleCommand(iter, type);
+                        }
+                    }
+                    break;
+                }
+
+                case Command::BeginOcclusionQuery: {
+                    BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+
+                    [encoder setVisibilityResultMode:MTLVisibilityResultModeBoolean
+                                              offset:cmd->queryIndex * sizeof(uint64_t)];
+                    break;
+                }
+
+                case Command::EndOcclusionQuery: {
+                    EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+
+                    [encoder setVisibilityResultMode:MTLVisibilityResultModeDisabled
+                                              offset:cmd->queryIndex * sizeof(uint64_t)];
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+                    if (@available(macos 10.15, iOS 14.0, *)) {
+                        [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+                                          atSampleIndex:NSUInteger(cmd->queryIndex)
+                                            withBarrier:YES];
+                    } else {
+                        UNREACHABLE();
+                    }
+                    break;
+                }
+
+                default: {
+                    EncodeRenderBundleCommand(&mCommands, type);
+                    break;
+                }
+            }
+        }
+
+        // EndRenderPass should have been called
+        UNREACHABLE();
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/CommandRecordingContext.h b/src/dawn/native/metal/CommandRecordingContext.h
new file mode 100644
index 0000000..fb06aa8
--- /dev/null
+++ b/src/dawn/native/metal/CommandRecordingContext.h
@@ -0,0 +1,59 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
+
+#include "dawn/common/NSRef.h"
+#include "dawn/common/NonCopyable.h"
+#include "dawn/native/Error.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    // This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
+    // Only one encoder may be open at a time.
+    class CommandRecordingContext : NonMovable {
+      public:
+        CommandRecordingContext();
+        ~CommandRecordingContext();
+
+        id<MTLCommandBuffer> GetCommands();
+        void MarkUsed();
+        bool WasUsed() const;
+
+        MaybeError PrepareNextCommandBuffer(id<MTLCommandQueue> queue);
+        NSPRef<id<MTLCommandBuffer>> AcquireCommands();
+
+        id<MTLBlitCommandEncoder> EnsureBlit();
+        void EndBlit();
+
+        id<MTLComputeCommandEncoder> BeginCompute();
+        void EndCompute();
+
+        id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
+        void EndRender();
+
+      private:
+        NSPRef<id<MTLCommandBuffer>> mCommands;
+        NSPRef<id<MTLBlitCommandEncoder>> mBlit;
+        NSPRef<id<MTLComputeCommandEncoder>> mCompute;
+        NSPRef<id<MTLRenderCommandEncoder>> mRender;
+        bool mInEncoder = false;
+        bool mUsed = false;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
diff --git a/src/dawn/native/metal/CommandRecordingContext.mm b/src/dawn/native/metal/CommandRecordingContext.mm
new file mode 100644
index 0000000..cced9a7
--- /dev/null
+++ b/src/dawn/native/metal/CommandRecordingContext.mm
@@ -0,0 +1,132 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/CommandRecordingContext.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native::metal {
+
+    CommandRecordingContext::CommandRecordingContext() = default;
+
+    CommandRecordingContext::~CommandRecordingContext() {
+        // Commands must be acquired.
+        ASSERT(mCommands == nullptr);
+    }
+
+    id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
+        return mCommands.Get();
+    }
+
+    void CommandRecordingContext::MarkUsed() {
+        mUsed = true;
+    }
+    bool CommandRecordingContext::WasUsed() const {
+        return mUsed;
+    }
+
+    MaybeError CommandRecordingContext::PrepareNextCommandBuffer(id<MTLCommandQueue> queue) {
+        ASSERT(mCommands == nil);
+        ASSERT(!mUsed);
+
+        // The MTLCommandBuffer will be autoreleased by default.
+        // The autorelease pool may drain before the command buffer is submitted. Retain so it stays
+        // alive.
+        mCommands = AcquireNSPRef([[queue commandBuffer] retain]);
+        if (mCommands == nil) {
+            return DAWN_INTERNAL_ERROR("Failed to allocate an MTLCommandBuffer");
+        }
+
+        return {};
+    }
+
+    NSPRef<id<MTLCommandBuffer>> CommandRecordingContext::AcquireCommands() {
+        // A blit encoder can be left open from WriteBuffer, make sure we close it.
+        if (mCommands != nullptr) {
+            EndBlit();
+        }
+
+        ASSERT(!mInEncoder);
+        mUsed = false;
+        return std::move(mCommands);
+    }
+
+    id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
+        ASSERT(mCommands != nullptr);
+
+        if (mBlit == nullptr) {
+            ASSERT(!mInEncoder);
+            mInEncoder = true;
+
+            // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+            // draining from under us.
+            mBlit.Acquire([[*mCommands blitCommandEncoder] retain]);
+        }
+        return mBlit.Get();
+    }
+
+    void CommandRecordingContext::EndBlit() {
+        ASSERT(mCommands != nullptr);
+
+        if (mBlit != nullptr) {
+            [*mBlit endEncoding];
+            mBlit = nullptr;
+            mInEncoder = false;
+        }
+    }
+
+    id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
+        ASSERT(mCommands != nullptr);
+        ASSERT(mCompute == nullptr);
+        ASSERT(!mInEncoder);
+
+        mInEncoder = true;
+        // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+        // draining from under us.
+        mCompute.Acquire([[*mCommands computeCommandEncoder] retain]);
+        return mCompute.Get();
+    }
+
+    void CommandRecordingContext::EndCompute() {
+        ASSERT(mCommands != nullptr);
+        ASSERT(mCompute != nullptr);
+
+        [*mCompute endEncoding];
+        mCompute = nullptr;
+        mInEncoder = false;
+    }
+
+    id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
+        MTLRenderPassDescriptor* descriptor) {
+        ASSERT(mCommands != nullptr);
+        ASSERT(mRender == nullptr);
+        ASSERT(!mInEncoder);
+
+        mInEncoder = true;
+        // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+        // draining from under us.
+        mRender.Acquire([[*mCommands renderCommandEncoderWithDescriptor:descriptor] retain]);
+        return mRender.Get();
+    }
+
+    void CommandRecordingContext::EndRender() {
+        ASSERT(mCommands != nullptr);
+        ASSERT(mRender != nullptr);
+
+        [*mRender endEncoding];
+        mRender = nullptr;
+        mInEncoder = false;
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/ComputePipelineMTL.h b/src/dawn/native/metal/ComputePipelineMTL.h
new file mode 100644
index 0000000..d61db22
--- /dev/null
+++ b/src/dawn/native/metal/ComputePipelineMTL.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
+#define DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    class ComputePipeline final : public ComputePipelineBase {
+      public:
+        static Ref<ComputePipeline> CreateUninitialized(
+            Device* device,
+            const ComputePipelineDescriptor* descriptor);
+        static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                    void* userdata);
+
+        void Encode(id<MTLComputeCommandEncoder> encoder);
+        MTLSize GetLocalWorkGroupSize() const;
+        bool RequiresStorageBufferLength() const;
+
+      private:
+        using ComputePipelineBase::ComputePipelineBase;
+        MaybeError Initialize() override;
+
+        NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
+        MTLSize mLocalWorkgroupSize;
+        bool mRequiresStorageBufferLength;
+        std::vector<uint32_t> mWorkgroupAllocations;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
diff --git a/src/dawn/native/metal/ComputePipelineMTL.mm b/src/dawn/native/metal/ComputePipelineMTL.mm
new file mode 100644
index 0000000..71d5a01
--- /dev/null
+++ b/src/dawn/native/metal/ComputePipelineMTL.mm
@@ -0,0 +1,89 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/ComputePipelineMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+namespace dawn::native::metal {
+
+    // static
+    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+        Device* device,
+        const ComputePipelineDescriptor* descriptor) {
+        return AcquireRef(new ComputePipeline(device, descriptor));
+    }
+
+    MaybeError ComputePipeline::Initialize() {
+        auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+
+        const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+        ShaderModule::MetalFunctionData computeData;
+
+        DAWN_TRY(CreateMTLFunction(computeStage, SingleShaderStage::Compute, ToBackend(GetLayout()),
+                                   &computeData));
+
+        NSError* error = nullptr;
+        mMtlComputePipelineState.Acquire([mtlDevice
+            newComputePipelineStateWithFunction:computeData.function.Get()
+                                          error:&error]);
+        if (error != nullptr) {
+            return DAWN_INTERNAL_ERROR("Error creating pipeline state " +
+                                       std::string([error.localizedDescription UTF8String]));
+        }
+        ASSERT(mMtlComputePipelineState != nil);
+
+        // Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
+        Origin3D localSize = GetStage(SingleShaderStage::Compute).metadata->localWorkgroupSize;
+        mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
+
+        mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
+        mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
+        return {};
+    }
+
+    void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
+        [encoder setComputePipelineState:mMtlComputePipelineState.Get()];
+        for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
+            if (mWorkgroupAllocations[i] == 0) {
+                continue;
+            }
+            // Size must be a multiple of 16 bytes.
+            uint32_t rounded = Align<uint32_t>(mWorkgroupAllocations[i], 16);
+            [encoder setThreadgroupMemoryLength:rounded atIndex:i];
+        }
+    }
+
+    MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
+        return mLocalWorkgroupSize;
+    }
+
+    bool ComputePipeline::RequiresStorageBufferLength() const {
+        return mRequiresStorageBufferLength;
+    }
+
+    void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                          WGPUCreateComputePipelineAsyncCallback callback,
+                                          void* userdata) {
+        std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+            std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+                                                             userdata);
+        CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/DeviceMTL.h b/src/dawn/native/metal/DeviceMTL.h
new file mode 100644
index 0000000..a6b6592
--- /dev/null
+++ b/src/dawn/native/metal/DeviceMTL.h
@@ -0,0 +1,153 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_DEVICEMTL_H_
+#define DAWNNATIVE_METAL_DEVICEMTL_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/metal/CommandRecordingContext.h"
+#include "dawn/native/metal/Forward.h"
+
+#import <IOSurface/IOSurfaceRef.h>
+#import <Metal/Metal.h>
+#import <QuartzCore/QuartzCore.h>
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+
+namespace dawn::native::metal {
+
+    namespace {
+        struct KalmanInfo;
+    }
+
+    class Device final : public DeviceBase {
+      public:
+        static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+                                                 NSPRef<id<MTLDevice>> mtlDevice,
+                                                 const DeviceDescriptor* descriptor);
+        ~Device() override;
+
+        MaybeError Initialize();
+
+        MaybeError TickImpl() override;
+
+        id<MTLDevice> GetMTLDevice();
+        id<MTLCommandQueue> GetMTLQueue();
+
+        CommandRecordingContext* GetPendingCommandContext();
+        MaybeError SubmitPendingCommandBuffer();
+
+        Ref<Texture> CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+                                                    IOSurfaceRef ioSurface);
+        void WaitForCommandsToBeScheduled();
+
+        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) override;
+        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& dataLayout,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) override;
+
+        uint32_t GetOptimalBytesPerRowAlignment() const override;
+        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+        float GetTimestampPeriodInNS() const override;
+
+      private:
+        Device(AdapterBase* adapter,
+               NSPRef<id<MTLDevice>> mtlDevice,
+               const DeviceDescriptor* descriptor);
+
+        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+            const BindGroupDescriptor* descriptor) override;
+        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken) override;
+        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+            const BufferDescriptor* descriptor) override;
+        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+            CommandEncoder* encoder,
+            const CommandBufferDescriptor* descriptor) override;
+        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+            const PipelineLayoutDescriptor* descriptor) override;
+        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+            const QuerySetDescriptor* descriptor) override;
+        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+            const SamplerDescriptor* descriptor) override;
+        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+            const ShaderModuleDescriptor* descriptor,
+            ShaderModuleParseResult* parseResult) override;
+        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+            Surface* surface,
+            NewSwapChainBase* previousSwapChain,
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+            const TextureDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+            TextureBase* texture,
+            const TextureViewDescriptor* descriptor) override;
+        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+            const ComputePipelineDescriptor* descriptor) override;
+        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+            const RenderPipelineDescriptor* descriptor) override;
+        void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                WGPUCreateComputePipelineAsyncCallback callback,
+                                                void* userdata) override;
+        void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                               WGPUCreateRenderPipelineAsyncCallback callback,
+                                               void* userdata) override;
+
+        void InitTogglesFromDriver();
+        void DestroyImpl() override;
+        MaybeError WaitForIdleForDestruction() override;
+        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+        NSPRef<id<MTLDevice>> mMtlDevice;
+        NSPRef<id<MTLCommandQueue>> mCommandQueue;
+
+        CommandRecordingContext mCommandContext;
+
+        // The completed serial is updated in a Metal completion handler that can be fired on a
+        // different thread, so it needs to be atomic.
+        std::atomic<uint64_t> mCompletedSerial;
+
+        // mLastSubmittedCommands will be accessed in a Metal schedule handler that can be fired on
+        // a different thread so we guard access to it with a mutex.
+        std::mutex mLastSubmittedCommandsMutex;
+        NSPRef<id<MTLCommandBuffer>> mLastSubmittedCommands;
+
+        // The current estimation of timestamp period
+        float mTimestampPeriod = 1.0f;
+        // The base of CPU timestamp and GPU timestamp to measure the linear regression between GPU
+        // and CPU timestamps.
+        MTLTimestamp mCpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+        MTLTimestamp mGpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+        // The parameters for kalman filter
+        std::unique_ptr<KalmanInfo> mKalmanInfo;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_DEVICEMTL_H_
diff --git a/src/dawn/native/metal/DeviceMTL.mm b/src/dawn/native/metal/DeviceMTL.mm
new file mode 100644
index 0000000..e2e784e
--- /dev/null
+++ b/src/dawn/native/metal/DeviceMTL.mm
@@ -0,0 +1,504 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/DeviceMTL.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Platform.h"
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/metal/BindGroupLayoutMTL.h"
+#include "dawn/native/metal/BindGroupMTL.h"
+#include "dawn/native/metal/BufferMTL.h"
+#include "dawn/native/metal/CommandBufferMTL.h"
+#include "dawn/native/metal/ComputePipelineMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/QuerySetMTL.h"
+#include "dawn/native/metal/QueueMTL.h"
+#include "dawn/native/metal/RenderPipelineMTL.h"
+#include "dawn/native/metal/SamplerMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/SwapChainMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <type_traits>
+
+namespace dawn::native::metal {
+
+    namespace {
+
+        // The time interval for each round of kalman filter
+        static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
+
+        struct KalmanInfo {
+            float filterValue;  // The estimation value
+            float kalmanGain;   // The kalman gain
+            float R;            // The covariance of the observation noise
+            float P;            // The a posteriori estimate covariance
+        };
+
+        // A simplified kalman filter for estimating timestamp period based on measured values
+        float KalmanFilter(KalmanInfo* info, float measuredValue) {
+            // Optimize kalman gain
+            info->kalmanGain = info->P / (info->P + info->R);
+
+            // Correct filter value
+            info->filterValue =
+                info->kalmanGain * measuredValue + (1.0 - info->kalmanGain) * info->filterValue;
+            // Update estimate covariance
+            info->P = (1.0f - info->kalmanGain) * info->P;
+            return info->filterValue;
+        }
+
+        void API_AVAILABLE(macos(10.15), ios(14))
+            UpdateTimestampPeriod(id<MTLDevice> device,
+                                  KalmanInfo* info,
+                                  MTLTimestamp* cpuTimestampStart,
+                                  MTLTimestamp* gpuTimestampStart,
+                                  float* timestampPeriod) {
+            // The filter value is converged to an optimal value when the kalman gain is less than
+            // 0.01. At this time, the weight of the measured value is too small to change the next
+            // filter value, the sampling and calculations do not need to continue anymore.
+            if (info->kalmanGain < 0.01f) {
+                return;
+            }
+
+            MTLTimestamp cpuTimestampEnd = 0, gpuTimestampEnd = 0;
+            [device sampleTimestamps:&cpuTimestampEnd gpuTimestamp:&gpuTimestampEnd];
+
+            // Update the timestamp start values when timestamp reset happens
+            if (cpuTimestampEnd < *cpuTimestampStart || gpuTimestampEnd < *gpuTimestampStart) {
+                *cpuTimestampStart = cpuTimestampEnd;
+                *gpuTimestampStart = gpuTimestampEnd;
+                return;
+            }
+
+            if (cpuTimestampEnd - *cpuTimestampStart >= kFilterIntervalInMs) {
+                // The measured timestamp period
+                float measurement = (cpuTimestampEnd - *cpuTimestampStart) /
+                                    static_cast<float>(gpuTimestampEnd - *gpuTimestampStart);
+
+                // Measurement update
+                *timestampPeriod = KalmanFilter(info, measurement);
+
+                *cpuTimestampStart = cpuTimestampEnd;
+                *gpuTimestampStart = gpuTimestampEnd;
+            }
+        }
+
+    }  // namespace
+
+    // static
+    ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+                                              NSPRef<id<MTLDevice>> mtlDevice,
+                                              const DeviceDescriptor* descriptor) {
+        Ref<Device> device = AcquireRef(new Device(adapter, std::move(mtlDevice), descriptor));
+        DAWN_TRY(device->Initialize());
+        return device;
+    }
+
+    Device::Device(AdapterBase* adapter,
+                   NSPRef<id<MTLDevice>> mtlDevice,
+                   const DeviceDescriptor* descriptor)
+        : DeviceBase(adapter, descriptor), mMtlDevice(std::move(mtlDevice)), mCompletedSerial(0) {
+    }
+
+    Device::~Device() {
+        Destroy();
+    }
+
+    MaybeError Device::Initialize() {
+        InitTogglesFromDriver();
+
+        mCommandQueue.Acquire([*mMtlDevice newCommandQueue]);
+        if (mCommandQueue == nil) {
+            return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
+        }
+
+        DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
+
+        if (IsFeatureEnabled(Feature::TimestampQuery) &&
+            !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+            // Make a best guess of timestamp period based on device vendor info, and converge it to
+            // an accurate value by the following calculations.
+            mTimestampPeriod = gpu_info::IsIntel(GetAdapter()->GetVendorId()) ? 83.333f : 1.0f;
+
+            // Initialize kalman filter parameters
+            mKalmanInfo = std::make_unique<KalmanInfo>();
+            mKalmanInfo->filterValue = 0.0f;
+            mKalmanInfo->kalmanGain = 0.5f;
+            mKalmanInfo->R =
+                0.0001f;  // The smaller this value is, the smaller the error of measured value is,
+                          // the more we can trust the measured value.
+            mKalmanInfo->P = 1.0f;
+
+            if (@available(macos 10.15, iOS 14.0, *)) {
+                // Sample CPU timestamp and GPU timestamp for first time at device creation
+                [*mMtlDevice sampleTimestamps:&mCpuTimestamp gpuTimestamp:&mGpuTimestamp];
+            }
+        }
+
+        return DeviceBase::Initialize(new Queue(this));
+    }
+
+    void Device::InitTogglesFromDriver() {
+        {
+            bool haveStoreAndMSAAResolve = false;
+#if defined(DAWN_PLATFORM_MACOS)
+            if (@available(macOS 10.12, *)) {
+                haveStoreAndMSAAResolve =
+                    [*mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+            }
+#elif defined(DAWN_PLATFORM_IOS)
+            haveStoreAndMSAAResolve =
+                [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
+#endif
+            // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
+            SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
+
+            bool haveSamplerCompare = true;
+#if defined(DAWN_PLATFORM_IOS)
+            haveSamplerCompare = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+#endif
+            // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
+            SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
+
+            bool haveBaseVertexBaseInstance = true;
+#if defined(DAWN_PLATFORM_IOS)
+            haveBaseVertexBaseInstance =
+                [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+#endif
+            // TODO(crbug.com/dawn/343): Investigate emulation.
+            SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
+            SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
+        }
+
+        // Vertex buffer robustness is implemented by using programmable vertex pulling. Enable
+        // that code path if it isn't explicitly disabled.
+        if (IsRobustnessEnabled()) {
+            SetToggle(Toggle::MetalEnableVertexPulling, true);
+        }
+
+        // TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
+        SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
+
+        uint32_t deviceId = GetAdapter()->GetDeviceId();
+        uint32_t vendorId = GetAdapter()->GetVendorId();
+
+        // TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
+        // creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
+        // create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
+        if (@available(macOS 10.15, iOS 14.0, *)) {
+            bool useSharedMode = gpu_info::IsIntel(vendorId);
+            SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
+        }
+
+        // TODO(crbug.com/dawn/1071): r8unorm and rg8unorm textures with multiple mip levels don't
+        // clear properly on Intel Macs.
+        if (gpu_info::IsIntel(vendorId)) {
+            SetToggle(Toggle::DisableR8RG8Mipmaps, true);
+        }
+
+        // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
+        // shader provided. Create a dummy fragment shader module to work around this issue.
+        if (gpu_info::IsIntel(vendorId)) {
+            bool useDummyFragmentShader = true;
+            if (gpu_info::IsSkylake(deviceId)) {
+                useDummyFragmentShader = false;
+            }
+            SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, useDummyFragmentShader);
+        }
+    }
+
+    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) {
+        return BindGroup::Create(this, descriptor);
+    }
+    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+    }
+    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+        return Buffer::Create(this, descriptor);
+    }
+    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) {
+        return CommandBuffer::Create(encoder, descriptor);
+    }
+    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) {
+        return ComputePipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) {
+        return PipelineLayout::Create(this, descriptor);
+    }
+    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) {
+        return QuerySet::Create(this, descriptor);
+    }
+    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) {
+        return RenderPipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+        return Sampler::Create(this, descriptor);
+    }
+    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) {
+        return ShaderModule::Create(this, descriptor, parseResult);
+    }
+    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) {
+        return OldSwapChain::Create(this, descriptor);
+    }
+    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) {
+        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+    }
+    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+        return Texture::Create(this, descriptor);
+    }
+    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) {
+        return TextureView::Create(texture, descriptor);
+    }
+    void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                                    void* userdata) {
+        ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+    }
+    void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                                   WGPUCreateRenderPipelineAsyncCallback callback,
+                                                   void* userdata) {
+        RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+    }
+
+    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+        uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
+        if (frontendCompletedSerial > mCompletedSerial) {
+            // sometimes we increase the serials, in which case the completed serial in
+            // the device base will surpass the completed serial we have in the metal backend, so we
+            // must update ours when we see that the completed serial from device base has
+            // increased.
+            mCompletedSerial = frontendCompletedSerial;
+        }
+        return ExecutionSerial(mCompletedSerial.load());
+    }
+
+    MaybeError Device::TickImpl() {
+        DAWN_TRY(SubmitPendingCommandBuffer());
+
+        // Just run timestamp period calculation when timestamp feature is enabled.
+        if (IsFeatureEnabled(Feature::TimestampQuery)) {
+            if (@available(macos 10.15, iOS 14.0, *)) {
+                UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp,
+                                      &mGpuTimestamp, &mTimestampPeriod);
+            }
+        }
+
+        return {};
+    }
+
+    id<MTLDevice> Device::GetMTLDevice() {
+        return mMtlDevice.Get();
+    }
+
+    id<MTLCommandQueue> Device::GetMTLQueue() {
+        return mCommandQueue.Get();
+    }
+
+    CommandRecordingContext* Device::GetPendingCommandContext() {
+        mCommandContext.MarkUsed();
+        return &mCommandContext;
+    }
+
+    MaybeError Device::SubmitPendingCommandBuffer() {
+        if (!mCommandContext.WasUsed()) {
+            return {};
+        }
+
+        IncrementLastSubmittedCommandSerial();
+
+        // Acquire the pending command buffer, which is retained. It must be released later.
+        NSPRef<id<MTLCommandBuffer>> pendingCommands = mCommandContext.AcquireCommands();
+
+        // Replace mLastSubmittedCommands with the mutex held so we avoid races between the
+        // schedule handler and this code.
+        {
+            std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+            mLastSubmittedCommands = pendingCommands;
+        }
+
+        // Make a local copy of the pointer to the commands because it's not clear how ObjC blocks
+        // handle types with copy / move constructors being referenced in the block..
+        id<MTLCommandBuffer> pendingCommandsPointer = pendingCommands.Get();
+        [*pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
+            // This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
+            // is a local value (and not the member itself).
+            std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+            if (this->mLastSubmittedCommands.Get() == pendingCommandsPointer) {
+                this->mLastSubmittedCommands = nullptr;
+            }
+        }];
+
+        // Update the completed serial once the completed handler is fired. Make a local copy of
+        // mLastSubmittedSerial so it is captured by value.
+        ExecutionSerial pendingSerial = GetLastSubmittedCommandSerial();
+        // this ObjC block runs on a different thread
+        [*pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
+            TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+                                   uint64_t(pendingSerial));
+            ASSERT(uint64_t(pendingSerial) > mCompletedSerial.load());
+            this->mCompletedSerial = uint64_t(pendingSerial);
+        }];
+
+        TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+                                 uint64_t(pendingSerial));
+        [*pendingCommands commit];
+
+        return mCommandContext.PrepareNextCommandBuffer(*mCommandQueue);
+    }
+
+    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+        std::unique_ptr<StagingBufferBase> stagingBuffer =
+            std::make_unique<StagingBuffer>(size, this);
+        DAWN_TRY(stagingBuffer->Initialize());
+        return std::move(stagingBuffer);
+    }
+
+    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                               uint64_t sourceOffset,
+                                               BufferBase* destination,
+                                               uint64_t destinationOffset,
+                                               uint64_t size) {
+        // Metal validation layers forbid  0-sized copies, assert it is skipped prior to calling
+        // this function.
+        ASSERT(size != 0);
+
+        ToBackend(destination)
+            ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset,
+                                                 size);
+
+        id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
+        id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
+        [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
+                                                    sourceOffset:sourceOffset
+                                                        toBuffer:buffer
+                                               destinationOffset:destinationOffset
+                                                            size:size];
+        return {};
+    }
+
+    // In Metal we don't write from the CPU to the texture directly which can be done using the
+    // replaceRegion function, because the function requires a non-private storage mode and Dawn
+    // sets the private storage mode by default for all textures except IOSurfaces on macOS.
+    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                                const TextureDataLayout& dataLayout,
+                                                TextureCopy* dst,
+                                                const Extent3D& copySizePixels) {
+        Texture* texture = ToBackend(dst->texture.Get());
+        EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst,
+                                            copySizePixels);
+
+        RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
+                                  source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
+                                  dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
+                                  dst->aspect, copySizePixels);
+        return {};
+    }
+
+    Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+                                                        IOSurfaceRef ioSurface) {
+        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+        if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+            return nullptr;
+        }
+        if (ConsumedError(ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface))) {
+            return nullptr;
+        }
+
+        Ref<Texture> result;
+        if (ConsumedError(Texture::CreateFromIOSurface(this, descriptor, ioSurface), &result)) {
+            return nullptr;
+        }
+        return result;
+    }
+
+    void Device::WaitForCommandsToBeScheduled() {
+        if (ConsumedError(SubmitPendingCommandBuffer())) {
+            return;
+        }
+
+        // Only lock the object while we take a reference to it, otherwise we could block further
+        // progress if the driver calls the scheduled handler (which also acquires the lock) before
+        // finishing the waitUntilScheduled.
+        NSPRef<id<MTLCommandBuffer>> lastSubmittedCommands;
+        {
+            std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+            lastSubmittedCommands = mLastSubmittedCommands;
+        }
+        [*lastSubmittedCommands waitUntilScheduled];
+    }
+
+    MaybeError Device::WaitForIdleForDestruction() {
+        // Forget all pending commands.
+        mCommandContext.AcquireCommands();
+        DAWN_TRY(CheckPassedSerials());
+
+        // Wait for all commands to be finished so we can free resources
+        while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
+            usleep(100);
+            DAWN_TRY(CheckPassedSerials());
+        }
+
+        return {};
+    }
+
+    void Device::DestroyImpl() {
+        ASSERT(GetState() == State::Disconnected);
+
+        // Forget all pending commands.
+        mCommandContext.AcquireCommands();
+
+        mCommandQueue = nullptr;
+        mMtlDevice = nullptr;
+    }
+
+    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+        return 1;
+    }
+
+    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+        return 1;
+    }
+
+    float Device::GetTimestampPeriodInNS() const {
+        return mTimestampPeriod;
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/Forward.h b/src/dawn/native/metal/Forward.h
new file mode 100644
index 0000000..bdfc31d
--- /dev/null
+++ b/src/dawn/native/metal/Forward.h
@@ -0,0 +1,68 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_FORWARD_H_
+#define DAWNNATIVE_METAL_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::metal {
+
+    class Adapter;
+    class BindGroup;
+    class BindGroupLayout;
+    class Buffer;
+    class CommandBuffer;
+    class ComputePipeline;
+    class Device;
+    class Framebuffer;
+    class PipelineLayout;
+    class QuerySet;
+    class Queue;
+    class RenderPipeline;
+    class Sampler;
+    class ShaderModule;
+    class StagingBuffer;
+    class SwapChain;
+    class Texture;
+    class TextureView;
+
+    struct MetalBackendTraits {
+        using AdapterType = Adapter;
+        using BindGroupType = BindGroup;
+        using BindGroupLayoutType = BindGroupLayout;
+        using BufferType = Buffer;
+        using CommandBufferType = CommandBuffer;
+        using ComputePipelineType = ComputePipeline;
+        using DeviceType = Device;
+        using PipelineLayoutType = PipelineLayout;
+        using QuerySetType = QuerySet;
+        using QueueType = Queue;
+        using RenderPipelineType = RenderPipeline;
+        using SamplerType = Sampler;
+        using ShaderModuleType = ShaderModule;
+        using StagingBufferType = StagingBuffer;
+        using SwapChainType = SwapChain;
+        using TextureType = Texture;
+        using TextureViewType = TextureView;
+    };
+
+    template <typename T>
+    auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
+        return ToBackendBase<MetalBackendTraits>(common);
+    }
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_FORWARD_H_
diff --git a/src/dawn/native/metal/MetalBackend.mm b/src/dawn/native/metal/MetalBackend.mm
new file mode 100644
index 0000000..c0214e5
--- /dev/null
+++ b/src/dawn/native/metal/MetalBackend.mm
@@ -0,0 +1,49 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// MetalBackend.cpp: contains the definition of symbols exported by MetalBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/MetalBackend.h"
+
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+
+namespace dawn::native::metal {
+
+    id<MTLDevice> GetMetalDevice(WGPUDevice device) {
+        return ToBackend(FromAPI(device))->GetMTLDevice();
+    }
+
+    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+        : AdapterDiscoveryOptionsBase(WGPUBackendType_Metal) {
+    }
+
+    ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
+        : ExternalImageDescriptor(ExternalImageType::IOSurface) {
+    }
+
+    WGPUTexture WrapIOSurface(WGPUDevice device,
+                              const ExternalImageDescriptorIOSurface* cDescriptor) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+        Ref<TextureBase> texture =
+            backendDevice->CreateTextureWrappingIOSurface(cDescriptor, cDescriptor->ioSurface);
+        return ToAPI(texture.Detach());
+    }
+
+    void WaitForCommandsToBeScheduled(WGPUDevice device) {
+        ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/PipelineLayoutMTL.h b/src/dawn/native/metal/PipelineLayoutMTL.h
new file mode 100644
index 0000000..efd3f51
--- /dev/null
+++ b/src/dawn/native/metal/PipelineLayoutMTL.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
+#define DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
+
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/native/PerStage.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    // The number of Metal buffers usable by applications in general
+    static constexpr size_t kMetalBufferTableSize = 31;
+    // The Metal buffer slot that Dawn reserves for its own use to pass more data to shaders
+    static constexpr size_t kBufferLengthBufferSlot = kMetalBufferTableSize - 1;
+    // The number of Metal buffers Dawn can use in a generic way (i.e. that aren't reserved)
+    static constexpr size_t kGenericMetalBufferSlots = kMetalBufferTableSize - 1;
+
+    static constexpr BindGroupIndex kPullingBufferBindingSet = BindGroupIndex(kMaxBindGroups);
+
+    class PipelineLayout final : public PipelineLayoutBase {
+      public:
+        static Ref<PipelineLayout> Create(Device* device,
+                                          const PipelineLayoutDescriptor* descriptor);
+
+        using BindingIndexInfo =
+            ityp::array<BindGroupIndex,
+                        ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
+                        kMaxBindGroups>;
+        const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
+
+        // The number of Metal vertex stage buffers used for the whole pipeline layout.
+        uint32_t GetBufferBindingCount(SingleShaderStage stage);
+
+      private:
+        PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+        ~PipelineLayout() override = default;
+        PerStage<BindingIndexInfo> mIndexInfo;
+        PerStage<uint32_t> mBufferBindingCount;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
diff --git a/src/dawn/native/metal/PipelineLayoutMTL.mm b/src/dawn/native/metal/PipelineLayoutMTL.mm
new file mode 100644
index 0000000..5f789ea
--- /dev/null
+++ b/src/dawn/native/metal/PipelineLayoutMTL.mm
@@ -0,0 +1,82 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+namespace dawn::native::metal {
+
+    // static
+    Ref<PipelineLayout> PipelineLayout::Create(Device* device,
+                                               const PipelineLayoutDescriptor* descriptor) {
+        return AcquireRef(new PipelineLayout(device, descriptor));
+    }
+
+    PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+        : PipelineLayoutBase(device, descriptor) {
+        // Each stage has its own numbering namespace in CompilerMSL.
+        for (auto stage : IterateStages(kAllStages)) {
+            uint32_t bufferIndex = 0;
+            uint32_t samplerIndex = 0;
+            uint32_t textureIndex = 0;
+
+            for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+                mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
+
+                for (BindingIndex bindingIndex{0};
+                     bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
+                    const BindingInfo& bindingInfo =
+                        GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+                    if (!(bindingInfo.visibility & StageBit(stage))) {
+                        continue;
+                    }
+
+                    switch (bindingInfo.bindingType) {
+                        case BindingInfoType::Buffer:
+                            mIndexInfo[stage][group][bindingIndex] = bufferIndex;
+                            bufferIndex++;
+                            break;
+
+                        case BindingInfoType::Sampler:
+                            mIndexInfo[stage][group][bindingIndex] = samplerIndex;
+                            samplerIndex++;
+                            break;
+
+                        case BindingInfoType::Texture:
+                        case BindingInfoType::StorageTexture:
+                        case BindingInfoType::ExternalTexture:
+                            mIndexInfo[stage][group][bindingIndex] = textureIndex;
+                            textureIndex++;
+                            break;
+                    }
+                }
+            }
+
+            mBufferBindingCount[stage] = bufferIndex;
+        }
+    }
+
+    const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(
+        SingleShaderStage stage) const {
+        return mIndexInfo[stage];
+    }
+
+    uint32_t PipelineLayout::GetBufferBindingCount(SingleShaderStage stage) {
+        return mBufferBindingCount[stage];
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/QuerySetMTL.h b/src/dawn/native/metal/QuerySetMTL.h
new file mode 100644
index 0000000..23d6c44
--- /dev/null
+++ b/src/dawn/native/metal/QuerySetMTL.h
@@ -0,0 +1,54 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_QUERYSETMTL_H_
+#define DAWNNATIVE_METAL_QUERYSETMTL_H_
+
+#include "dawn/native/QuerySet.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    class QuerySet final : public QuerySetBase {
+      public:
+        static ResultOrError<Ref<QuerySet>> Create(Device* device,
+                                                   const QuerySetDescriptor* descriptor);
+
+        id<MTLBuffer> GetVisibilityBuffer() const;
+        id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
+            API_AVAILABLE(macos(10.15), ios(14.0));
+
+      private:
+        ~QuerySet() override;
+        using QuerySetBase::QuerySetBase;
+        MaybeError Initialize();
+
+        // Dawn API
+        void DestroyImpl() override;
+
+        NSPRef<id<MTLBuffer>> mVisibilityBuffer;
+        // Note that mCounterSampleBuffer cannot be an NSRef because the API_AVAILABLE macros don't
+        // propagate nicely through templates.
+        id<MTLCounterSampleBuffer> mCounterSampleBuffer API_AVAILABLE(macos(10.15),
+                                                                      ios(14.0)) = nullptr;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_QUERYSETMTL_H_
diff --git a/src/dawn/native/metal/QuerySetMTL.mm b/src/dawn/native/metal/QuerySetMTL.mm
new file mode 100644
index 0000000..4882fee
--- /dev/null
+++ b/src/dawn/native/metal/QuerySetMTL.mm
@@ -0,0 +1,139 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/QuerySetMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/common/Platform.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+namespace dawn::native::metal {
+
+    namespace {
+
+        ResultOrError<id<MTLCounterSampleBuffer>> CreateCounterSampleBuffer(
+            Device* device,
+            MTLCommonCounterSet counterSet,
+            uint32_t count) API_AVAILABLE(macos(10.15), ios(14.0)) {
+            NSRef<MTLCounterSampleBufferDescriptor> descriptorRef =
+                AcquireNSRef([MTLCounterSampleBufferDescriptor new]);
+            MTLCounterSampleBufferDescriptor* descriptor = descriptorRef.Get();
+
+            // To determine which counters are available from a device, we need to iterate through
+            // the counterSets property of a MTLDevice. Then configure which counters will be
+            // sampled by creating a MTLCounterSampleBufferDescriptor and setting its counterSet
+            // property to the matched one of the available set.
+            for (id<MTLCounterSet> set in device->GetMTLDevice().counterSets) {
+                if ([set.name isEqualToString:counterSet]) {
+                    descriptor.counterSet = set;
+                    break;
+                }
+            }
+            ASSERT(descriptor.counterSet != nullptr);
+
+            descriptor.sampleCount = static_cast<NSUInteger>(std::max(count, uint32_t(1u)));
+            descriptor.storageMode = MTLStorageModePrivate;
+            if (device->IsToggleEnabled(Toggle::MetalUseSharedModeForCounterSampleBuffer)) {
+                descriptor.storageMode = MTLStorageModeShared;
+            }
+
+            NSError* error = nullptr;
+            id<MTLCounterSampleBuffer> counterSampleBuffer =
+                [device->GetMTLDevice() newCounterSampleBufferWithDescriptor:descriptor
+                                                                       error:&error];
+            if (error != nullptr) {
+                return DAWN_OUT_OF_MEMORY_ERROR(std::string("Error creating query set: ") +
+                                                [error.localizedDescription UTF8String]);
+            }
+
+            return counterSampleBuffer;
+        }
+    }
+
+    // static
+    ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+                                                  const QuerySetDescriptor* descriptor) {
+        Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+        DAWN_TRY(queryset->Initialize());
+        return queryset;
+    }
+
+    MaybeError QuerySet::Initialize() {
+        Device* device = ToBackend(GetDevice());
+
+        switch (GetQueryType()) {
+            case wgpu::QueryType::Occlusion: {
+                // Create buffer for writing 64-bit results.
+                NSUInteger bufferSize = static_cast<NSUInteger>(
+                    std::max(GetQueryCount() * sizeof(uint64_t), size_t(4u)));
+                mVisibilityBuffer = AcquireNSPRef([device->GetMTLDevice()
+                    newBufferWithLength:bufferSize
+                                options:MTLResourceStorageModePrivate]);
+
+                if (mVisibilityBuffer == nil) {
+                    return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate query set.");
+                }
+                break;
+            }
+            case wgpu::QueryType::PipelineStatistics:
+                if (@available(macOS 10.15, iOS 14.0, *)) {
+                    DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+                                    CreateCounterSampleBuffer(device, MTLCommonCounterSetStatistic,
+                                                              GetQueryCount()));
+                } else {
+                    UNREACHABLE();
+                }
+                break;
+            case wgpu::QueryType::Timestamp:
+                if (@available(macOS 10.15, iOS 14.0, *)) {
+                    DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+                                    CreateCounterSampleBuffer(device, MTLCommonCounterSetTimestamp,
+                                                              GetQueryCount()));
+                } else {
+                    UNREACHABLE();
+                }
+                break;
+            default:
+                UNREACHABLE();
+                break;
+        }
+
+        return {};
+    }
+
+    id<MTLBuffer> QuerySet::GetVisibilityBuffer() const {
+        return mVisibilityBuffer.Get();
+    }
+
+    id<MTLCounterSampleBuffer> QuerySet::GetCounterSampleBuffer() const
+        API_AVAILABLE(macos(10.15), ios(14.0)) {
+        return mCounterSampleBuffer;
+    }
+
+    QuerySet::~QuerySet() = default;
+
+    void QuerySet::DestroyImpl() {
+        QuerySetBase::DestroyImpl();
+
+        mVisibilityBuffer = nullptr;
+
+        // mCounterSampleBuffer isn't an NSRef because API_AVAILABLE doesn't work will with
+        // templates.
+        if (@available(macOS 10.15, iOS 14.0, *)) {
+            [mCounterSampleBuffer release];
+            mCounterSampleBuffer = nullptr;
+        }
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/QueueMTL.h b/src/dawn/native/metal/QueueMTL.h
new file mode 100644
index 0000000..fd94e07
--- /dev/null
+++ b/src/dawn/native/metal/QueueMTL.h
@@ -0,0 +1,34 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_QUEUEMTL_H_
+#define DAWNNATIVE_METAL_QUEUEMTL_H_
+
+#include "dawn/native/Queue.h"
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    class Queue final : public QueueBase {
+      public:
+        Queue(Device* device);
+
+      private:
+        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_QUEUEMTL_H_
diff --git a/src/dawn/native/metal/QueueMTL.mm b/src/dawn/native/metal/QueueMTL.mm
new file mode 100644
index 0000000..d489295
--- /dev/null
+++ b/src/dawn/native/metal/QueueMTL.mm
@@ -0,0 +1,48 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/QueueMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/metal/CommandBufferMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::metal {
+
+    Queue::Queue(Device* device) : QueueBase(device) {
+    }
+
+    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+        Device* device = ToBackend(GetDevice());
+
+        DAWN_TRY(device->Tick());
+
+        CommandRecordingContext* commandContext = device->GetPendingCommandContext();
+
+        TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+        for (uint32_t i = 0; i < commandCount; ++i) {
+            DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
+        }
+        TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+
+        return device->SubmitPendingCommandBuffer();
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/RenderPipelineMTL.h b/src/dawn/native/metal/RenderPipelineMTL.h
new file mode 100644
index 0000000..a4c6296
--- /dev/null
+++ b/src/dawn/native/metal/RenderPipelineMTL.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
+#define DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    class RenderPipeline final : public RenderPipelineBase {
+      public:
+        static Ref<RenderPipelineBase> CreateUninitialized(
+            Device* device,
+            const RenderPipelineDescriptor* descriptor);
+        static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                    WGPUCreateRenderPipelineAsyncCallback callback,
+                                    void* userdata);
+
+        MTLPrimitiveType GetMTLPrimitiveTopology() const;
+        MTLWinding GetMTLFrontFace() const;
+        MTLCullMode GetMTLCullMode() const;
+
+        void Encode(id<MTLRenderCommandEncoder> encoder);
+
+        id<MTLDepthStencilState> GetMTLDepthStencilState();
+
+        // For each Dawn vertex buffer, give the index in which it will be positioned in the Metal
+        // vertex buffer table.
+        uint32_t GetMtlVertexBufferIndex(VertexBufferSlot slot) const;
+
+        wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
+
+        MaybeError Initialize() override;
+
+      private:
+        using RenderPipelineBase::RenderPipelineBase;
+
+        NSRef<MTLVertexDescriptor> MakeVertexDesc();
+
+        MTLPrimitiveType mMtlPrimitiveTopology;
+        MTLWinding mMtlFrontFace;
+        MTLCullMode mMtlCullMode;
+        NSPRef<id<MTLRenderPipelineState>> mMtlRenderPipelineState;
+        NSPRef<id<MTLDepthStencilState>> mMtlDepthStencilState;
+        ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
+
+        wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
diff --git a/src/dawn/native/metal/RenderPipelineMTL.mm b/src/dawn/native/metal/RenderPipelineMTL.mm
new file mode 100644
index 0000000..18adb69
--- /dev/null
+++ b/src/dawn/native/metal/RenderPipelineMTL.mm
@@ -0,0 +1,506 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/RenderPipelineMTL.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/VertexFormat.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+namespace dawn::native::metal {
+
+    namespace {
+        MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
+            switch (format) {
+                case wgpu::VertexFormat::Uint8x2:
+                    return MTLVertexFormatUChar2;
+                case wgpu::VertexFormat::Uint8x4:
+                    return MTLVertexFormatUChar4;
+                case wgpu::VertexFormat::Sint8x2:
+                    return MTLVertexFormatChar2;
+                case wgpu::VertexFormat::Sint8x4:
+                    return MTLVertexFormatChar4;
+                case wgpu::VertexFormat::Unorm8x2:
+                    return MTLVertexFormatUChar2Normalized;
+                case wgpu::VertexFormat::Unorm8x4:
+                    return MTLVertexFormatUChar4Normalized;
+                case wgpu::VertexFormat::Snorm8x2:
+                    return MTLVertexFormatChar2Normalized;
+                case wgpu::VertexFormat::Snorm8x4:
+                    return MTLVertexFormatChar4Normalized;
+                case wgpu::VertexFormat::Uint16x2:
+                    return MTLVertexFormatUShort2;
+                case wgpu::VertexFormat::Uint16x4:
+                    return MTLVertexFormatUShort4;
+                case wgpu::VertexFormat::Sint16x2:
+                    return MTLVertexFormatShort2;
+                case wgpu::VertexFormat::Sint16x4:
+                    return MTLVertexFormatShort4;
+                case wgpu::VertexFormat::Unorm16x2:
+                    return MTLVertexFormatUShort2Normalized;
+                case wgpu::VertexFormat::Unorm16x4:
+                    return MTLVertexFormatUShort4Normalized;
+                case wgpu::VertexFormat::Snorm16x2:
+                    return MTLVertexFormatShort2Normalized;
+                case wgpu::VertexFormat::Snorm16x4:
+                    return MTLVertexFormatShort4Normalized;
+                case wgpu::VertexFormat::Float16x2:
+                    return MTLVertexFormatHalf2;
+                case wgpu::VertexFormat::Float16x4:
+                    return MTLVertexFormatHalf4;
+                case wgpu::VertexFormat::Float32:
+                    return MTLVertexFormatFloat;
+                case wgpu::VertexFormat::Float32x2:
+                    return MTLVertexFormatFloat2;
+                case wgpu::VertexFormat::Float32x3:
+                    return MTLVertexFormatFloat3;
+                case wgpu::VertexFormat::Float32x4:
+                    return MTLVertexFormatFloat4;
+                case wgpu::VertexFormat::Uint32:
+                    return MTLVertexFormatUInt;
+                case wgpu::VertexFormat::Uint32x2:
+                    return MTLVertexFormatUInt2;
+                case wgpu::VertexFormat::Uint32x3:
+                    return MTLVertexFormatUInt3;
+                case wgpu::VertexFormat::Uint32x4:
+                    return MTLVertexFormatUInt4;
+                case wgpu::VertexFormat::Sint32:
+                    return MTLVertexFormatInt;
+                case wgpu::VertexFormat::Sint32x2:
+                    return MTLVertexFormatInt2;
+                case wgpu::VertexFormat::Sint32x3:
+                    return MTLVertexFormatInt3;
+                case wgpu::VertexFormat::Sint32x4:
+                    return MTLVertexFormatInt4;
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        MTLVertexStepFunction VertexStepModeFunction(wgpu::VertexStepMode mode) {
+            switch (mode) {
+                case wgpu::VertexStepMode::Vertex:
+                    return MTLVertexStepFunctionPerVertex;
+                case wgpu::VertexStepMode::Instance:
+                    return MTLVertexStepFunctionPerInstance;
+            }
+        }
+
+        MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+            switch (primitiveTopology) {
+                case wgpu::PrimitiveTopology::PointList:
+                    return MTLPrimitiveTypePoint;
+                case wgpu::PrimitiveTopology::LineList:
+                    return MTLPrimitiveTypeLine;
+                case wgpu::PrimitiveTopology::LineStrip:
+                    return MTLPrimitiveTypeLineStrip;
+                case wgpu::PrimitiveTopology::TriangleList:
+                    return MTLPrimitiveTypeTriangle;
+                case wgpu::PrimitiveTopology::TriangleStrip:
+                    return MTLPrimitiveTypeTriangleStrip;
+            }
+        }
+
+        MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(
+            wgpu::PrimitiveTopology primitiveTopology) {
+            switch (primitiveTopology) {
+                case wgpu::PrimitiveTopology::PointList:
+                    return MTLPrimitiveTopologyClassPoint;
+                case wgpu::PrimitiveTopology::LineList:
+                case wgpu::PrimitiveTopology::LineStrip:
+                    return MTLPrimitiveTopologyClassLine;
+                case wgpu::PrimitiveTopology::TriangleList:
+                case wgpu::PrimitiveTopology::TriangleStrip:
+                    return MTLPrimitiveTopologyClassTriangle;
+            }
+        }
+
+        MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+            switch (factor) {
+                case wgpu::BlendFactor::Zero:
+                    return MTLBlendFactorZero;
+                case wgpu::BlendFactor::One:
+                    return MTLBlendFactorOne;
+                case wgpu::BlendFactor::Src:
+                    return MTLBlendFactorSourceColor;
+                case wgpu::BlendFactor::OneMinusSrc:
+                    return MTLBlendFactorOneMinusSourceColor;
+                case wgpu::BlendFactor::SrcAlpha:
+                    return MTLBlendFactorSourceAlpha;
+                case wgpu::BlendFactor::OneMinusSrcAlpha:
+                    return MTLBlendFactorOneMinusSourceAlpha;
+                case wgpu::BlendFactor::Dst:
+                    return MTLBlendFactorDestinationColor;
+                case wgpu::BlendFactor::OneMinusDst:
+                    return MTLBlendFactorOneMinusDestinationColor;
+                case wgpu::BlendFactor::DstAlpha:
+                    return MTLBlendFactorDestinationAlpha;
+                case wgpu::BlendFactor::OneMinusDstAlpha:
+                    return MTLBlendFactorOneMinusDestinationAlpha;
+                case wgpu::BlendFactor::SrcAlphaSaturated:
+                    return MTLBlendFactorSourceAlphaSaturated;
+                case wgpu::BlendFactor::Constant:
+                    return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
+                case wgpu::BlendFactor::OneMinusConstant:
+                    return alpha ? MTLBlendFactorOneMinusBlendAlpha
+                                 : MTLBlendFactorOneMinusBlendColor;
+            }
+        }
+
+        MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
+            switch (operation) {
+                case wgpu::BlendOperation::Add:
+                    return MTLBlendOperationAdd;
+                case wgpu::BlendOperation::Subtract:
+                    return MTLBlendOperationSubtract;
+                case wgpu::BlendOperation::ReverseSubtract:
+                    return MTLBlendOperationReverseSubtract;
+                case wgpu::BlendOperation::Min:
+                    return MTLBlendOperationMin;
+                case wgpu::BlendOperation::Max:
+                    return MTLBlendOperationMax;
+            }
+        }
+
+        MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
+                                              bool isDeclaredInFragmentShader) {
+            if (!isDeclaredInFragmentShader) {
+                return MTLColorWriteMaskNone;
+            }
+
+            MTLColorWriteMask mask = MTLColorWriteMaskNone;
+
+            if (writeMask & wgpu::ColorWriteMask::Red) {
+                mask |= MTLColorWriteMaskRed;
+            }
+            if (writeMask & wgpu::ColorWriteMask::Green) {
+                mask |= MTLColorWriteMaskGreen;
+            }
+            if (writeMask & wgpu::ColorWriteMask::Blue) {
+                mask |= MTLColorWriteMaskBlue;
+            }
+            if (writeMask & wgpu::ColorWriteMask::Alpha) {
+                mask |= MTLColorWriteMaskAlpha;
+            }
+
+            return mask;
+        }
+
+        void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
+                              const ColorTargetState* state,
+                              bool isDeclaredInFragmentShader) {
+            attachment.blendingEnabled = state->blend != nullptr;
+            if (attachment.blendingEnabled) {
+                attachment.sourceRGBBlendFactor =
+                    MetalBlendFactor(state->blend->color.srcFactor, false);
+                attachment.destinationRGBBlendFactor =
+                    MetalBlendFactor(state->blend->color.dstFactor, false);
+                attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
+                attachment.sourceAlphaBlendFactor =
+                    MetalBlendFactor(state->blend->alpha.srcFactor, true);
+                attachment.destinationAlphaBlendFactor =
+                    MetalBlendFactor(state->blend->alpha.dstFactor, true);
+                attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
+            }
+            attachment.writeMask =
+                MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+        }
+
+        MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
+            switch (stencilOperation) {
+                case wgpu::StencilOperation::Keep:
+                    return MTLStencilOperationKeep;
+                case wgpu::StencilOperation::Zero:
+                    return MTLStencilOperationZero;
+                case wgpu::StencilOperation::Replace:
+                    return MTLStencilOperationReplace;
+                case wgpu::StencilOperation::Invert:
+                    return MTLStencilOperationInvert;
+                case wgpu::StencilOperation::IncrementClamp:
+                    return MTLStencilOperationIncrementClamp;
+                case wgpu::StencilOperation::DecrementClamp:
+                    return MTLStencilOperationDecrementClamp;
+                case wgpu::StencilOperation::IncrementWrap:
+                    return MTLStencilOperationIncrementWrap;
+                case wgpu::StencilOperation::DecrementWrap:
+                    return MTLStencilOperationDecrementWrap;
+            }
+        }
+
+        NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
+            NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
+                AcquireNSRef([MTLDepthStencilDescriptor new]);
+            MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
+
+            mtlDepthStencilDescriptor.depthCompareFunction =
+                ToMetalCompareFunction(descriptor->depthCompare);
+            mtlDepthStencilDescriptor.depthWriteEnabled = descriptor->depthWriteEnabled;
+
+            if (StencilTestEnabled(descriptor)) {
+                NSRef<MTLStencilDescriptor> backFaceStencilRef =
+                    AcquireNSRef([MTLStencilDescriptor new]);
+                MTLStencilDescriptor* backFaceStencil = backFaceStencilRef.Get();
+                NSRef<MTLStencilDescriptor> frontFaceStencilRef =
+                    AcquireNSRef([MTLStencilDescriptor new]);
+                MTLStencilDescriptor* frontFaceStencil = frontFaceStencilRef.Get();
+
+                backFaceStencil.stencilCompareFunction =
+                    ToMetalCompareFunction(descriptor->stencilBack.compare);
+                backFaceStencil.stencilFailureOperation =
+                    MetalStencilOperation(descriptor->stencilBack.failOp);
+                backFaceStencil.depthFailureOperation =
+                    MetalStencilOperation(descriptor->stencilBack.depthFailOp);
+                backFaceStencil.depthStencilPassOperation =
+                    MetalStencilOperation(descriptor->stencilBack.passOp);
+                backFaceStencil.readMask = descriptor->stencilReadMask;
+                backFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+                frontFaceStencil.stencilCompareFunction =
+                    ToMetalCompareFunction(descriptor->stencilFront.compare);
+                frontFaceStencil.stencilFailureOperation =
+                    MetalStencilOperation(descriptor->stencilFront.failOp);
+                frontFaceStencil.depthFailureOperation =
+                    MetalStencilOperation(descriptor->stencilFront.depthFailOp);
+                frontFaceStencil.depthStencilPassOperation =
+                    MetalStencilOperation(descriptor->stencilFront.passOp);
+                frontFaceStencil.readMask = descriptor->stencilReadMask;
+                frontFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+                mtlDepthStencilDescriptor.backFaceStencil = backFaceStencil;
+                mtlDepthStencilDescriptor.frontFaceStencil = frontFaceStencil;
+            }
+
+            return mtlDepthStencilDescRef;
+        }
+
+        MTLWinding MTLFrontFace(wgpu::FrontFace face) {
+            switch (face) {
+                case wgpu::FrontFace::CW:
+                    return MTLWindingClockwise;
+                case wgpu::FrontFace::CCW:
+                    return MTLWindingCounterClockwise;
+            }
+        }
+
+        MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
+            switch (mode) {
+                case wgpu::CullMode::None:
+                    return MTLCullModeNone;
+                case wgpu::CullMode::Front:
+                    return MTLCullModeFront;
+                case wgpu::CullMode::Back:
+                    return MTLCullModeBack;
+            }
+        }
+
+    }  // anonymous namespace
+
+    // static
+    Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
+        Device* device,
+        const RenderPipelineDescriptor* descriptor) {
+        return AcquireRef(new RenderPipeline(device, descriptor));
+    }
+
+    MaybeError RenderPipeline::Initialize() {
+        mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
+        mMtlFrontFace = MTLFrontFace(GetFrontFace());
+        mMtlCullMode = ToMTLCullMode(GetCullMode());
+        auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+
+        NSRef<MTLRenderPipelineDescriptor> descriptorMTLRef =
+            AcquireNSRef([MTLRenderPipelineDescriptor new]);
+        MTLRenderPipelineDescriptor* descriptorMTL = descriptorMTLRef.Get();
+
+        // TODO: MakeVertexDesc should be const in the future, so we don't need to call it here when
+        // vertex pulling is enabled
+        NSRef<MTLVertexDescriptor> vertexDesc = MakeVertexDesc();
+
+        // Calling MakeVertexDesc first is important since it sets indices for packed bindings
+        if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+            vertexDesc = AcquireNSRef([MTLVertexDescriptor new]);
+        }
+        descriptorMTL.vertexDescriptor = vertexDesc.Get();
+
+        const PerStage<ProgrammableStage>& allStages = GetAllStages();
+        const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
+        ShaderModule::MetalFunctionData vertexData;
+        DAWN_TRY(CreateMTLFunction(vertexStage, SingleShaderStage::Vertex, ToBackend(GetLayout()),
+                                   &vertexData, 0xFFFFFFFF, this));
+
+        descriptorMTL.vertexFunction = vertexData.function.Get();
+        if (vertexData.needsStorageBufferLength) {
+            mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
+        }
+
+        if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+            const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
+            ShaderModule::MetalFunctionData fragmentData;
+            DAWN_TRY(CreateMTLFunction(fragmentStage, SingleShaderStage::Fragment,
+                                       ToBackend(GetLayout()), &fragmentData, GetSampleMask()));
+
+            descriptorMTL.fragmentFunction = fragmentData.function.Get();
+            if (fragmentData.needsStorageBufferLength) {
+                mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
+            }
+
+            const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
+            for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+                descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
+                    MetalPixelFormat(GetColorAttachmentFormat(i));
+                const ColorTargetState* descriptor = GetColorTargetState(i);
+                ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)],
+                                 descriptor, fragmentOutputsWritten[i]);
+            }
+        }
+
+        if (HasDepthStencilAttachment()) {
+            wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
+            const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
+            MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
+
+            if (internalFormat.HasDepth()) {
+                descriptorMTL.depthAttachmentPixelFormat = metalFormat;
+            }
+            if (internalFormat.HasStencil()) {
+                descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
+            }
+        }
+
+        descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
+        descriptorMTL.sampleCount = GetSampleCount();
+        descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
+
+        NSError* error = nullptr;
+        mMtlRenderPipelineState =
+            AcquireNSPRef([mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL
+                                                                    error:&error]);
+        if (error != nullptr) {
+            return DAWN_INTERNAL_ERROR(std::string("Error creating pipeline state ") +
+                                       [error.localizedDescription UTF8String]);
+        }
+        ASSERT(mMtlRenderPipelineState != nil);
+
+        // Create depth stencil state and cache it, fetch the cached depth stencil state when we
+        // call setDepthStencilState() for a given render pipeline in CommandEncoder, in order
+        // to improve performance.
+        NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
+            MakeDepthStencilDesc(GetDepthStencilState());
+        mMtlDepthStencilState =
+            AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
+
+        return {};
+    }
+
+    MTLPrimitiveType RenderPipeline::GetMTLPrimitiveTopology() const {
+        return mMtlPrimitiveTopology;
+    }
+
+    MTLWinding RenderPipeline::GetMTLFrontFace() const {
+        return mMtlFrontFace;
+    }
+
+    MTLCullMode RenderPipeline::GetMTLCullMode() const {
+        return mMtlCullMode;
+    }
+
+    void RenderPipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
+        [encoder setRenderPipelineState:mMtlRenderPipelineState.Get()];
+    }
+
+    id<MTLDepthStencilState> RenderPipeline::GetMTLDepthStencilState() {
+        return mMtlDepthStencilState.Get();
+    }
+
+    uint32_t RenderPipeline::GetMtlVertexBufferIndex(VertexBufferSlot slot) const {
+        ASSERT(slot < kMaxVertexBuffersTyped);
+        return mMtlVertexBufferIndices[slot];
+    }
+
+    wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
+        return mStagesRequiringStorageBufferLength;
+    }
+
+    NSRef<MTLVertexDescriptor> RenderPipeline::MakeVertexDesc() {
+        MTLVertexDescriptor* mtlVertexDescriptor = [MTLVertexDescriptor new];
+
+        // Vertex buffers are packed after all the buffers for the bind groups.
+        uint32_t mtlVertexBufferIndex =
+            ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
+
+        for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+            const VertexBufferInfo& info = GetVertexBuffer(slot);
+
+            MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
+            if (info.arrayStride == 0) {
+                // For MTLVertexStepFunctionConstant, the stepRate must be 0,
+                // but the arrayStride must NOT be 0, so we made up it with
+                // max(attrib.offset + sizeof(attrib) for each attrib)
+                size_t maxArrayStride = 0;
+                for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+                    const VertexAttributeInfo& attrib = GetAttribute(loc);
+                    // Only use the attributes that use the current input
+                    if (attrib.vertexBufferSlot != slot) {
+                        continue;
+                    }
+                    maxArrayStride =
+                        std::max(maxArrayStride, GetVertexFormatInfo(attrib.format).byteSize +
+                                                     size_t(attrib.offset));
+                }
+                layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
+                layoutDesc.stepRate = 0;
+                // Metal requires the stride must be a multiple of 4 bytes, align it with next
+                // multiple of 4 if it's not.
+                layoutDesc.stride = Align(maxArrayStride, 4);
+            } else {
+                layoutDesc.stepFunction = VertexStepModeFunction(info.stepMode);
+                layoutDesc.stepRate = 1;
+                layoutDesc.stride = info.arrayStride;
+            }
+
+            mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
+            [layoutDesc release];
+
+            mMtlVertexBufferIndices[slot] = mtlVertexBufferIndex;
+            mtlVertexBufferIndex++;
+        }
+
+        for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+            const VertexAttributeInfo& info = GetAttribute(loc);
+
+            auto attribDesc = [MTLVertexAttributeDescriptor new];
+            attribDesc.format = VertexFormatType(info.format);
+            attribDesc.offset = info.offset;
+            attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
+            mtlVertexDescriptor.attributes[static_cast<uint8_t>(loc)] = attribDesc;
+            [attribDesc release];
+        }
+
+        return AcquireNSRef(mtlVertexDescriptor);
+    }
+
+    void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                         WGPUCreateRenderPipelineAsyncCallback callback,
+                                         void* userdata) {
+        std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+            std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+                                                            userdata);
+        CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/SamplerMTL.h b/src/dawn/native/metal/SamplerMTL.h
new file mode 100644
index 0000000..166fbe4
--- /dev/null
+++ b/src/dawn/native/metal/SamplerMTL.h
@@ -0,0 +1,44 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_SAMPLERMTL_H_
+#define DAWNNATIVE_METAL_SAMPLERMTL_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    class Sampler final : public SamplerBase {
+      public:
+        static ResultOrError<Ref<Sampler>> Create(Device* device,
+                                                  const SamplerDescriptor* descriptor);
+
+        id<MTLSamplerState> GetMTLSamplerState();
+
+      private:
+        using SamplerBase::SamplerBase;
+        MaybeError Initialize(const SamplerDescriptor* descriptor);
+
+        NSPRef<id<MTLSamplerState>> mMtlSamplerState;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_SAMPLERMTL_H_
diff --git a/src/dawn/native/metal/SamplerMTL.mm b/src/dawn/native/metal/SamplerMTL.mm
new file mode 100644
index 0000000..235b2f8
--- /dev/null
+++ b/src/dawn/native/metal/SamplerMTL.mm
@@ -0,0 +1,106 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/SamplerMTL.h"
+
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+namespace dawn::native::metal {
+
+    namespace {
+        MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
+            switch (mode) {
+                case wgpu::FilterMode::Nearest:
+                    return MTLSamplerMinMagFilterNearest;
+                case wgpu::FilterMode::Linear:
+                    return MTLSamplerMinMagFilterLinear;
+            }
+        }
+
+        MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
+            switch (mode) {
+                case wgpu::FilterMode::Nearest:
+                    return MTLSamplerMipFilterNearest;
+                case wgpu::FilterMode::Linear:
+                    return MTLSamplerMipFilterLinear;
+            }
+        }
+
+        MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
+            switch (mode) {
+                case wgpu::AddressMode::Repeat:
+                    return MTLSamplerAddressModeRepeat;
+                case wgpu::AddressMode::MirrorRepeat:
+                    return MTLSamplerAddressModeMirrorRepeat;
+                case wgpu::AddressMode::ClampToEdge:
+                    return MTLSamplerAddressModeClampToEdge;
+            }
+        }
+    }
+
+    // static
+    ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
+                                                const SamplerDescriptor* descriptor) {
+        DAWN_INVALID_IF(
+            descriptor->compare != wgpu::CompareFunction::Undefined &&
+                device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare),
+            "Sampler compare function (%s) not supported. Compare functions are disabled with the "
+            "Metal backend.",
+            descriptor->compare);
+
+        Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+        DAWN_TRY(sampler->Initialize(descriptor));
+        return sampler;
+    }
+
+    MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+        NSRef<MTLSamplerDescriptor> mtlDescRef = AcquireNSRef([MTLSamplerDescriptor new]);
+        MTLSamplerDescriptor* mtlDesc = mtlDescRef.Get();
+
+        mtlDesc.minFilter = FilterModeToMinMagFilter(descriptor->minFilter);
+        mtlDesc.magFilter = FilterModeToMinMagFilter(descriptor->magFilter);
+        mtlDesc.mipFilter = FilterModeToMipFilter(descriptor->mipmapFilter);
+
+        mtlDesc.sAddressMode = AddressMode(descriptor->addressModeU);
+        mtlDesc.tAddressMode = AddressMode(descriptor->addressModeV);
+        mtlDesc.rAddressMode = AddressMode(descriptor->addressModeW);
+
+        mtlDesc.lodMinClamp = descriptor->lodMinClamp;
+        mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
+        // https://developer.apple.com/documentation/metal/mtlsamplerdescriptor/1516164-maxanisotropy
+        mtlDesc.maxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+            // Sampler compare is unsupported before A9, which we validate in
+            // Sampler::Create.
+            mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
+            // The value is default-initialized in the else-case, and we don't set it or the
+            // Metal debug device errors.
+        }
+
+        mMtlSamplerState = AcquireNSPRef(
+            [ToBackend(GetDevice())->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc]);
+
+        if (mMtlSamplerState == nil) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate sampler.");
+        }
+        return {};
+    }
+
+    id<MTLSamplerState> Sampler::GetMTLSamplerState() {
+        return mMtlSamplerState.Get();
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/ShaderModuleMTL.h b/src/dawn/native/metal/ShaderModuleMTL.h
new file mode 100644
index 0000000..0188bb1
--- /dev/null
+++ b/src/dawn/native/metal/ShaderModuleMTL.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_SHADERMODULEMTL_H_
+#define DAWNNATIVE_METAL_SHADERMODULEMTL_H_
+
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/common/NSRef.h"
+#include "dawn/native/Error.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class Device;
+    class PipelineLayout;
+    class RenderPipeline;
+
+    class ShaderModule final : public ShaderModuleBase {
+      public:
+        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                       const ShaderModuleDescriptor* descriptor,
+                                                       ShaderModuleParseResult* parseResult);
+
+        struct MetalFunctionData {
+            NSPRef<id<MTLFunction>> function;
+            bool needsStorageBufferLength;
+            std::vector<uint32_t> workgroupAllocations;
+        };
+
+        // MTLFunctionConstantValues needs @available tag to compile
+        // Use id (like void*) in function signature as workaround and do static cast inside
+        MaybeError CreateFunction(const char* entryPointName,
+                                  SingleShaderStage stage,
+                                  const PipelineLayout* layout,
+                                  MetalFunctionData* out,
+                                  id constantValues = nil,
+                                  uint32_t sampleMask = 0xFFFFFFFF,
+                                  const RenderPipeline* renderPipeline = nullptr);
+
+      private:
+        ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
+                                                  SingleShaderStage stage,
+                                                  const PipelineLayout* layout,
+                                                  uint32_t sampleMask,
+                                                  const RenderPipeline* renderPipeline,
+                                                  std::string* remappedEntryPointName,
+                                                  bool* needsStorageBufferLength,
+                                                  bool* hasInvariantAttribute,
+                                                  std::vector<uint32_t>* workgroupAllocations);
+        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+        ~ShaderModule() override = default;
+        MaybeError Initialize(ShaderModuleParseResult* parseResult);
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_SHADERMODULEMTL_H_
diff --git a/src/dawn/native/metal/ShaderModuleMTL.mm b/src/dawn/native/metal/ShaderModuleMTL.mm
new file mode 100644
index 0000000..e182898
--- /dev/null
+++ b/src/dawn/native/metal/ShaderModuleMTL.mm
@@ -0,0 +1,278 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/ShaderModuleMTL.h"
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/RenderPipelineMTL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <tint/tint.h>
+
+#include <sstream>
+
+namespace dawn::native::metal {
+
+    // static
+    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                          const ShaderModuleDescriptor* descriptor,
+                                                          ShaderModuleParseResult* parseResult) {
+        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+        DAWN_TRY(module->Initialize(parseResult));
+        return module;
+    }
+
+    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+        : ShaderModuleBase(device, descriptor) {
+    }
+
+    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+        ScopedTintICEHandler scopedICEHandler(GetDevice());
+        return InitializeBase(parseResult);
+    }
+
+    ResultOrError<std::string> ShaderModule::TranslateToMSL(
+        const char* entryPointName,
+        SingleShaderStage stage,
+        const PipelineLayout* layout,
+        uint32_t sampleMask,
+        const RenderPipeline* renderPipeline,
+        std::string* remappedEntryPointName,
+        bool* needsStorageBufferLength,
+        bool* hasInvariantAttribute,
+        std::vector<uint32_t>* workgroupAllocations) {
+        ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+        std::ostringstream errorStream;
+        errorStream << "Tint MSL failure:" << std::endl;
+
+        // Remap BindingNumber to BindingIndex in WGSL shader
+        using BindingRemapper = tint::transform::BindingRemapper;
+        using BindingPoint = tint::transform::BindingPoint;
+        BindingRemapper::BindingPoints bindingPoints;
+        BindingRemapper::AccessControls accessControls;
+
+        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            const BindGroupLayoutBase::BindingMap& bindingMap =
+                layout->GetBindGroupLayout(group)->GetBindingMap();
+            for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+                const BindingInfo& bindingInfo =
+                    layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+
+                if (!(bindingInfo.visibility & StageBit(stage))) {
+                    continue;
+                }
+
+                uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
+
+                BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                             static_cast<uint32_t>(bindingNumber)};
+                BindingPoint dstBindingPoint{0, shaderIndex};
+                if (srcBindingPoint != dstBindingPoint) {
+                    bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+                }
+            }
+        }
+
+        tint::transform::Manager transformManager;
+        tint::transform::DataMap transformInputs;
+
+        // We only remap bindings for the target entry point, so we need to strip all other entry
+        // points to avoid generating invalid bindings for them.
+        transformManager.Add<tint::transform::SingleEntryPoint>();
+        transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+
+        AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+        if (stage == SingleShaderStage::Vertex &&
+            GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+            transformManager.Add<tint::transform::VertexPulling>();
+            AddVertexPullingTransformConfig(*renderPipeline, entryPointName,
+                                            kPullingBufferBindingSet, &transformInputs);
+
+            for (VertexBufferSlot slot :
+                 IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+                uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(slot);
+
+                // Tell Tint to map (kPullingBufferBindingSet, slot) to this MSL buffer index.
+                BindingPoint srcBindingPoint{static_cast<uint32_t>(kPullingBufferBindingSet),
+                                             static_cast<uint8_t>(slot)};
+                BindingPoint dstBindingPoint{0, metalIndex};
+                if (srcBindingPoint != dstBindingPoint) {
+                    bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+                }
+            }
+        }
+        if (GetDevice()->IsRobustnessEnabled()) {
+            transformManager.Add<tint::transform::Robustness>();
+        }
+        transformManager.Add<tint::transform::BindingRemapper>();
+        transformManager.Add<tint::transform::Renamer>();
+
+        if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
+            // We still need to rename MSL reserved keywords
+            transformInputs.Add<tint::transform::Renamer::Config>(
+                tint::transform::Renamer::Target::kMslKeywords);
+        }
+
+        transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+                                                         std::move(accessControls),
+                                                         /* mayCollide */ true);
+
+        tint::Program program;
+        tint::transform::DataMap transformOutputs;
+        {
+            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+            DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
+                                                   transformInputs, &transformOutputs, nullptr));
+        }
+
+        if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+            auto it = data->remappings.find(entryPointName);
+            if (it != data->remappings.end()) {
+                *remappedEntryPointName = it->second;
+            } else {
+                DAWN_INVALID_IF(!GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming),
+                                "Could not find remapped name for entry point.");
+
+                *remappedEntryPointName = entryPointName;
+            }
+        } else {
+            return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
+        }
+
+        tint::writer::msl::Options options;
+        options.buffer_size_ubo_index = kBufferLengthBufferSlot;
+        options.fixed_sample_mask = sampleMask;
+        options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+        options.emit_vertex_point_size =
+            stage == SingleShaderStage::Vertex &&
+            renderPipeline->GetPrimitiveTopology() == wgpu::PrimitiveTopology::PointList;
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::msl::Generate");
+        auto result = tint::writer::msl::Generate(&program, options);
+        DAWN_INVALID_IF(!result.success, "An error occured while generating MSL: %s.",
+                        result.error);
+
+        *needsStorageBufferLength = result.needs_storage_buffer_sizes;
+        *hasInvariantAttribute = result.has_invariant_attribute;
+        *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
+
+        return std::move(result.msl);
+    }
+
+    MaybeError ShaderModule::CreateFunction(const char* entryPointName,
+                                            SingleShaderStage stage,
+                                            const PipelineLayout* layout,
+                                            ShaderModule::MetalFunctionData* out,
+                                            id constantValuesPointer,
+                                            uint32_t sampleMask,
+                                            const RenderPipeline* renderPipeline) {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleMTL::CreateFunction");
+
+        ASSERT(!IsError());
+        ASSERT(out);
+
+        // Vertex stages must specify a renderPipeline
+        if (stage == SingleShaderStage::Vertex) {
+            ASSERT(renderPipeline != nullptr);
+        }
+
+        std::string remappedEntryPointName;
+        std::string msl;
+        bool hasInvariantAttribute = false;
+        DAWN_TRY_ASSIGN(msl,
+                        TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
+                                       &remappedEntryPointName, &out->needsStorageBufferLength,
+                                       &hasInvariantAttribute, &out->workgroupAllocations));
+
+        // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
+        // category. -Wunused-variable in particular comes up a lot in generated code, and some
+        // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
+        // of a warning.
+        msl = R"(
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wall"
+#endif
+)" + msl;
+
+        if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+            std::ostringstream dumpedMsg;
+            dumpedMsg << "/* Dumped generated MSL */" << std::endl << msl;
+            GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+        }
+
+        NSRef<NSString> mslSource = AcquireNSRef([[NSString alloc] initWithUTF8String:msl.c_str()]);
+
+        NSRef<MTLCompileOptions> compileOptions = AcquireNSRef([[MTLCompileOptions alloc] init]);
+        if (hasInvariantAttribute) {
+            if (@available(macOS 11.0, iOS 13.0, *)) {
+                (*compileOptions).preserveInvariance = true;
+            }
+        }
+        auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+        NSError* error = nullptr;
+
+        NSPRef<id<MTLLibrary>> library;
+        {
+            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLDevice::newLibraryWithSource");
+            library = AcquireNSPRef([mtlDevice newLibraryWithSource:mslSource.Get()
+                                                            options:compileOptions.Get()
+                                                              error:&error]);
+        }
+
+        if (error != nullptr) {
+            DAWN_INVALID_IF(error.code != MTLLibraryErrorCompileWarning,
+                            "Unable to create library object: %s.",
+                            [error.localizedDescription UTF8String]);
+        }
+        ASSERT(library != nil);
+
+        NSRef<NSString> name =
+            AcquireNSRef([[NSString alloc] initWithUTF8String:remappedEntryPointName.c_str()]);
+
+        {
+            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLLibrary::newFunctionWithName");
+            if (constantValuesPointer != nil) {
+                if (@available(macOS 10.12, *)) {
+                    MTLFunctionConstantValues* constantValues = constantValuesPointer;
+                    out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()
+                                                                 constantValues:constantValues
+                                                                          error:&error]);
+                    if (error != nullptr) {
+                        if (error.code != MTLLibraryErrorCompileWarning) {
+                            return DAWN_VALIDATION_ERROR(std::string("Function compile error: ") +
+                                                         [error.localizedDescription UTF8String]);
+                        }
+                    }
+                    ASSERT(out->function != nil);
+                } else {
+                    UNREACHABLE();
+                }
+            } else {
+                out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()]);
+            }
+        }
+
+        if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
+            GetEntryPoint(entryPointName).usedVertexInputs.any()) {
+            out->needsStorageBufferLength = true;
+        }
+
+        return {};
+    }
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/StagingBufferMTL.h b/src/dawn/native/metal/StagingBufferMTL.h
new file mode 100644
index 0000000..4400598
--- /dev/null
+++ b/src/dawn/native/metal/StagingBufferMTL.h
@@ -0,0 +1,42 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFERMETAL_H_
+#define DAWNNATIVE_STAGINGBUFFERMETAL_H_
+
+#include "dawn/native/StagingBuffer.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class Device;
+
+    class StagingBuffer : public StagingBufferBase {
+      public:
+        StagingBuffer(size_t size, Device* device);
+
+        id<MTLBuffer> GetBufferHandle() const;
+
+        MaybeError Initialize() override;
+
+      private:
+        Device* mDevice;
+        NSPRef<id<MTLBuffer>> mBuffer;
+    };
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_STAGINGBUFFERMETAL_H_
diff --git a/src/dawn/native/metal/StagingBufferMTL.mm b/src/dawn/native/metal/StagingBufferMTL.mm
new file mode 100644
index 0000000..a3fd91f
--- /dev/null
+++ b/src/dawn/native/metal/StagingBufferMTL.mm
@@ -0,0 +1,46 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+namespace dawn::native::metal {
+
+    StagingBuffer::StagingBuffer(size_t size, Device* device)
+        : StagingBufferBase(size), mDevice(device) {
+    }
+
+    MaybeError StagingBuffer::Initialize() {
+        const size_t bufferSize = GetSize();
+        mBuffer = AcquireNSPRef([mDevice->GetMTLDevice()
+            newBufferWithLength:bufferSize
+                        options:MTLResourceStorageModeShared]);
+
+        if (mBuffer == nullptr) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+        }
+
+        mMappedPointer = [*mBuffer contents];
+        if (mMappedPointer == nullptr) {
+            return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
+        }
+
+        return {};
+    }
+
+    id<MTLBuffer> StagingBuffer::GetBufferHandle() const {
+        return mBuffer.Get();
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/SwapChainMTL.h b/src/dawn/native/metal/SwapChainMTL.h
new file mode 100644
index 0000000..9cae564
--- /dev/null
+++ b/src/dawn/native/metal/SwapChainMTL.h
@@ -0,0 +1,67 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_SWAPCHAINMTL_H_
+#define DAWNNATIVE_METAL_SWAPCHAINMTL_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/common/NSRef.h"
+
+@class CAMetalLayer;
+@protocol CAMetalDrawable;
+
+namespace dawn::native::metal {
+
+    class Device;
+    class Texture;
+
+    class OldSwapChain final : public OldSwapChainBase {
+      public:
+        static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
+
+      protected:
+        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+        ~OldSwapChain() override;
+        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+        MaybeError OnBeforePresent(TextureViewBase* view) override;
+    };
+
+    class SwapChain final : public NewSwapChainBase {
+      public:
+        static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor);
+        ~SwapChain() override;
+
+      private:
+        void DestroyImpl() override;
+
+        using NewSwapChainBase::NewSwapChainBase;
+        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+        NSRef<CAMetalLayer> mLayer;
+
+        NSPRef<id<CAMetalDrawable>> mCurrentDrawable;
+        Ref<Texture> mTexture;
+
+        MaybeError PresentImpl() override;
+        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+        void DetachFromSurfaceImpl() override;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_SWAPCHAINMTL_H_
diff --git a/src/dawn/native/metal/SwapChainMTL.mm b/src/dawn/native/metal/SwapChainMTL.mm
new file mode 100644
index 0000000..04e66fb
--- /dev/null
+++ b/src/dawn/native/metal/SwapChainMTL.mm
@@ -0,0 +1,154 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/SwapChainMTL.h"
+
+#include "dawn/native/Surface.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+
+#include <dawn/dawn_wsi.h>
+
+#import <QuartzCore/CAMetalLayer.h>
+
+namespace dawn::native::metal {
+
+    // OldSwapChain
+
+    // static
+    Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+        return AcquireRef(new OldSwapChain(device, descriptor));
+    }
+
+    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+        : OldSwapChainBase(device, descriptor) {
+        const auto& im = GetImplementation();
+        DawnWSIContextMetal wsiContext = {};
+        wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
+        wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
+        im.Init(im.userData, &wsiContext);
+    }
+
+    OldSwapChain::~OldSwapChain() {
+    }
+
+    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+        const auto& im = GetImplementation();
+        DawnSwapChainNextTexture next = {};
+        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+        if (error) {
+            GetDevice()->HandleError(InternalErrorType::Internal, error);
+            return nullptr;
+        }
+
+        id<MTLTexture> nativeTexture = reinterpret_cast<id<MTLTexture>>(next.texture.ptr);
+
+        return Texture::CreateWrapping(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
+    }
+
+    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+        return {};
+    }
+
+    // SwapChain
+
+    // static
+    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor) {
+        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+        DAWN_TRY(swapchain->Initialize(previousSwapChain));
+        return swapchain;
+    }
+
+    SwapChain::~SwapChain() = default;
+
+    void SwapChain::DestroyImpl() {
+        SwapChainBase::DestroyImpl();
+        DetachFromSurface();
+    }
+
+    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+        ASSERT(GetSurface()->GetType() == Surface::Type::MetalLayer);
+
+        if (previousSwapChain != nullptr) {
+            // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+            // multiple backends one after the other. It probably needs to block until the backend
+            // and GPU are completely finished with the previous swapchain.
+            DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Metal,
+                            "Metal SwapChain cannot switch backend types from %s to %s.",
+                            previousSwapChain->GetBackendType(), wgpu::BackendType::Metal);
+
+            previousSwapChain->DetachFromSurface();
+        }
+
+        mLayer = static_cast<CAMetalLayer*>(GetSurface()->GetMetalLayer());
+        ASSERT(mLayer != nullptr);
+
+        CGSize size = {};
+        size.width = GetWidth();
+        size.height = GetHeight();
+        [*mLayer setDrawableSize:size];
+
+        [*mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::RenderAttachment)];
+        [*mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
+        [*mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
+
+#if defined(DAWN_PLATFORM_MACOS)
+        if (@available(macos 10.13, *)) {
+            [*mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
+        }
+#endif  // defined(DAWN_PLATFORM_MACOS)
+
+        // There is no way to control Fifo vs. Mailbox in Metal.
+
+        return {};
+    }
+
+    MaybeError SwapChain::PresentImpl() {
+        ASSERT(mCurrentDrawable != nullptr);
+        [*mCurrentDrawable present];
+
+        mTexture->APIDestroy();
+        mTexture = nullptr;
+
+        mCurrentDrawable = nullptr;
+
+        return {};
+    }
+
+    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+        ASSERT(mCurrentDrawable == nullptr);
+        mCurrentDrawable = [*mLayer nextDrawable];
+
+        TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+
+        mTexture = Texture::CreateWrapping(ToBackend(GetDevice()), &textureDesc,
+                                           [*mCurrentDrawable texture]);
+        return mTexture->CreateView();
+    }
+
+    void SwapChain::DetachFromSurfaceImpl() {
+        ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
+
+        if (mTexture != nullptr) {
+            mTexture->APIDestroy();
+            mTexture = nullptr;
+
+            mCurrentDrawable = nullptr;
+        }
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/TextureMTL.h b/src/dawn/native/metal/TextureMTL.h
new file mode 100644
index 0000000..ba7f97b
--- /dev/null
+++ b/src/dawn/native/metal/TextureMTL.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_TEXTUREMTL_H_
+#define DAWNNATIVE_METAL_TEXTUREMTL_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/common/CoreFoundationRef.h"
+#include "dawn/common/NSRef.h"
+#include "dawn/native/DawnNative.h"
+
+#include <IOSurface/IOSurfaceRef.h>
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+    class CommandRecordingContext;
+    class Device;
+
+    MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
+    MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
+                                             const TextureDescriptor* descriptor,
+                                             IOSurfaceRef ioSurface);
+
+    class Texture final : public TextureBase {
+      public:
+        static ResultOrError<Ref<Texture>> Create(Device* device,
+                                                  const TextureDescriptor* descriptor);
+        static ResultOrError<Ref<Texture>> CreateFromIOSurface(
+            Device* device,
+            const ExternalImageDescriptor* descriptor,
+            IOSurfaceRef ioSurface);
+        static Ref<Texture> CreateWrapping(Device* device,
+                                           const TextureDescriptor* descriptor,
+                                           NSPRef<id<MTLTexture>> wrapped);
+
+        id<MTLTexture> GetMTLTexture();
+        IOSurfaceRef GetIOSurface();
+        NSPRef<id<MTLTexture>> CreateFormatView(wgpu::TextureFormat format);
+
+        void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                                 const SubresourceRange& range);
+
+      private:
+        using TextureBase::TextureBase;
+        ~Texture() override;
+
+        NSRef<MTLTextureDescriptor> CreateMetalTextureDescriptor() const;
+
+        MaybeError InitializeAsInternalTexture(const TextureDescriptor* descriptor);
+        MaybeError InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+                                           const TextureDescriptor* textureDescriptor,
+                                           IOSurfaceRef ioSurface);
+        void InitializeAsWrapping(const TextureDescriptor* descriptor,
+                                  NSPRef<id<MTLTexture>> wrapped);
+
+        void DestroyImpl() override;
+
+        MaybeError ClearTexture(CommandRecordingContext* commandContext,
+                                const SubresourceRange& range,
+                                TextureBase::ClearValue clearValue);
+
+        NSPRef<id<MTLTexture>> mMtlTexture;
+
+        MTLTextureUsage mMtlUsage;
+        CFRef<IOSurfaceRef> mIOSurface = nullptr;
+    };
+
+    class TextureView final : public TextureViewBase {
+      public:
+        static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+                                                      const TextureViewDescriptor* descriptor);
+
+        id<MTLTexture> GetMTLTexture();
+
+      private:
+        using TextureViewBase::TextureViewBase;
+        MaybeError Initialize(const TextureViewDescriptor* descriptor);
+
+        NSPRef<id<MTLTexture>> mMtlTextureView;
+    };
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_TEXTUREMTL_H_
diff --git a/src/dawn/native/metal/TextureMTL.mm b/src/dawn/native/metal/TextureMTL.mm
new file mode 100644
index 0000000..c6fd75e
--- /dev/null
+++ b/src/dawn/native/metal/TextureMTL.mm
@@ -0,0 +1,1119 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/TextureMTL.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/Platform.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+#include <CoreVideo/CVPixelBuffer.h>
+
+namespace dawn::native::metal {
+
+    namespace {
+        bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
+            constexpr wgpu::TextureUsage kUsageNeedsTextureView =
+                wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+            return usage & kUsageNeedsTextureView;
+        }
+
+        MTLTextureUsage MetalTextureUsage(const Format& format,
+                                          wgpu::TextureUsage usage,
+                                          uint32_t sampleCount) {
+            MTLTextureUsage result = MTLTextureUsageUnknown;  // This is 0
+
+            if (usage & (wgpu::TextureUsage::StorageBinding)) {
+                result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
+            }
+
+            if (usage & (wgpu::TextureUsage::TextureBinding)) {
+                result |= MTLTextureUsageShaderRead;
+
+                // For sampling stencil aspect of combined depth/stencil. See TextureView
+                // constructor.
+                if (@available(macOS 10.12, iOS 10.0, *)) {
+                    if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+                        result |= MTLTextureUsagePixelFormatView;
+                    }
+                }
+            }
+
+            // MTLTextureUsageRenderTarget is needed to clear multisample textures.
+            if (usage & (wgpu::TextureUsage::RenderAttachment) || sampleCount > 1) {
+                result |= MTLTextureUsageRenderTarget;
+            }
+
+            return result;
+        }
+
+        MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
+                                            unsigned int sampleCount) {
+            switch (dimension) {
+                case wgpu::TextureViewDimension::e1D:
+                    return MTLTextureType1D;
+                case wgpu::TextureViewDimension::e2D:
+                    return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
+                case wgpu::TextureViewDimension::e2DArray:
+                    return MTLTextureType2DArray;
+                case wgpu::TextureViewDimension::Cube:
+                    return MTLTextureTypeCube;
+                case wgpu::TextureViewDimension::CubeArray:
+                    return MTLTextureTypeCubeArray;
+                case wgpu::TextureViewDimension::e3D:
+                    return MTLTextureType3D;
+
+                case wgpu::TextureViewDimension::Undefined:
+                    UNREACHABLE();
+            }
+        }
+
+        bool RequiresCreatingNewTextureView(const TextureBase* texture,
+                                            const TextureViewDescriptor* textureViewDescriptor) {
+            if (texture->GetFormat().format != textureViewDescriptor->format &&
+                !texture->GetFormat().HasDepthOrStencil()) {
+                // Color format reinterpretation required. Note: Depth/stencil formats don't support
+                // reinterpretation.
+                return true;
+            }
+
+            if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
+                (texture->GetArrayLayers() == 1 &&
+                 texture->GetDimension() == wgpu::TextureDimension::e2D &&
+                 textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
+                // If the view has a different number of array layers, we need a new view.
+                // And, if the original texture is a 2D texture with one array layer, we need a new
+                // view to view it as a 2D array texture.
+                return true;
+            }
+
+            if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+                return true;
+            }
+
+            if (IsSubset(Aspect::Depth | Aspect::Stencil, texture->GetFormat().aspects) &&
+                textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
+                return true;
+            }
+
+            switch (textureViewDescriptor->dimension) {
+                case wgpu::TextureViewDimension::Cube:
+                case wgpu::TextureViewDimension::CubeArray:
+                    return true;
+                default:
+                    break;
+            }
+
+            return false;
+        }
+
+        // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+        // between linear space and sRGB without setting MTLTextureUsagePixelFormatView flag. For
+        // example, creating bgra8Unorm texture view on rgba8Unorm texture or creating
+        // rgba8Unorm_srgb texture view on rgab8Unorm texture.
+        bool AllowFormatReinterpretationWithoutFlag(MTLPixelFormat origin,
+                                                    MTLPixelFormat reinterpretation) {
+            switch (origin) {
+                case MTLPixelFormatRGBA8Unorm:
+                    return reinterpretation == MTLPixelFormatBGRA8Unorm ||
+                           reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB;
+                case MTLPixelFormatBGRA8Unorm:
+                    return reinterpretation == MTLPixelFormatRGBA8Unorm ||
+                           reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB;
+                case MTLPixelFormatRGBA8Unorm_sRGB:
+                    return reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB ||
+                           reinterpretation == MTLPixelFormatRGBA8Unorm;
+                case MTLPixelFormatBGRA8Unorm_sRGB:
+                    return reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB ||
+                           reinterpretation == MTLPixelFormatBGRA8Unorm;
+#if defined(DAWN_PLATFORM_MACOS)
+                case MTLPixelFormatBC1_RGBA:
+                    return reinterpretation == MTLPixelFormatBC1_RGBA_sRGB;
+                case MTLPixelFormatBC1_RGBA_sRGB:
+                    return reinterpretation == MTLPixelFormatBC1_RGBA;
+                case MTLPixelFormatBC2_RGBA:
+                    return reinterpretation == MTLPixelFormatBC2_RGBA_sRGB;
+                case MTLPixelFormatBC2_RGBA_sRGB:
+                    return reinterpretation == MTLPixelFormatBC2_RGBA;
+                case MTLPixelFormatBC3_RGBA:
+                    return reinterpretation == MTLPixelFormatBC3_RGBA_sRGB;
+                case MTLPixelFormatBC3_RGBA_sRGB:
+                    return reinterpretation == MTLPixelFormatBC3_RGBA;
+                case MTLPixelFormatBC7_RGBAUnorm:
+                    return reinterpretation == MTLPixelFormatBC7_RGBAUnorm_sRGB;
+                case MTLPixelFormatBC7_RGBAUnorm_sRGB:
+                    return reinterpretation == MTLPixelFormatBC7_RGBAUnorm;
+#endif
+
+                default:
+                    return false;
+            }
+        }
+
+        ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
+            switch (format) {
+                case kCVPixelFormatType_64RGBAHalf:
+                    return wgpu::TextureFormat::RGBA16Float;
+                case kCVPixelFormatType_TwoComponent16Half:
+                    return wgpu::TextureFormat::RG16Float;
+                case kCVPixelFormatType_OneComponent16Half:
+                    return wgpu::TextureFormat::R16Float;
+                case kCVPixelFormatType_ARGB2101010LEPacked:
+                    return wgpu::TextureFormat::RGB10A2Unorm;
+                case kCVPixelFormatType_32RGBA:
+                    return wgpu::TextureFormat::RGBA8Unorm;
+                case kCVPixelFormatType_32BGRA:
+                    return wgpu::TextureFormat::BGRA8Unorm;
+                case kCVPixelFormatType_TwoComponent8:
+                    return wgpu::TextureFormat::RG8Unorm;
+                case kCVPixelFormatType_OneComponent8:
+                    return wgpu::TextureFormat::R8Unorm;
+                case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
+                    return wgpu::TextureFormat::R8BG8Biplanar420Unorm;
+                default:
+                    return DAWN_FORMAT_VALIDATION_ERROR("Unsupported IOSurface format (%x).",
+                                                        format);
+            }
+        }
+
+        uint32_t GetIOSurfacePlane(wgpu::TextureAspect aspect) {
+            switch (aspect) {
+                case wgpu::TextureAspect::Plane0Only:
+                    return 0;
+                case wgpu::TextureAspect::Plane1Only:
+                    return 1;
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+#if defined(DAWN_PLATFORM_MACOS)
+        MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
+#elif defined(DAWN_PLATFORM_IOS)
+        MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
+#else
+#    error "Unsupported Apple platform."
+#endif
+    }
+
+    MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R8Unorm:
+                return MTLPixelFormatR8Unorm;
+            case wgpu::TextureFormat::R8Snorm:
+                return MTLPixelFormatR8Snorm;
+            case wgpu::TextureFormat::R8Uint:
+                return MTLPixelFormatR8Uint;
+            case wgpu::TextureFormat::R8Sint:
+                return MTLPixelFormatR8Sint;
+
+            case wgpu::TextureFormat::R16Uint:
+                return MTLPixelFormatR16Uint;
+            case wgpu::TextureFormat::R16Sint:
+                return MTLPixelFormatR16Sint;
+            case wgpu::TextureFormat::R16Float:
+                return MTLPixelFormatR16Float;
+            case wgpu::TextureFormat::RG8Unorm:
+                return MTLPixelFormatRG8Unorm;
+            case wgpu::TextureFormat::RG8Snorm:
+                return MTLPixelFormatRG8Snorm;
+            case wgpu::TextureFormat::RG8Uint:
+                return MTLPixelFormatRG8Uint;
+            case wgpu::TextureFormat::RG8Sint:
+                return MTLPixelFormatRG8Sint;
+
+            case wgpu::TextureFormat::R32Uint:
+                return MTLPixelFormatR32Uint;
+            case wgpu::TextureFormat::R32Sint:
+                return MTLPixelFormatR32Sint;
+            case wgpu::TextureFormat::R32Float:
+                return MTLPixelFormatR32Float;
+            case wgpu::TextureFormat::RG16Uint:
+                return MTLPixelFormatRG16Uint;
+            case wgpu::TextureFormat::RG16Sint:
+                return MTLPixelFormatRG16Sint;
+            case wgpu::TextureFormat::RG16Float:
+                return MTLPixelFormatRG16Float;
+            case wgpu::TextureFormat::RGBA8Unorm:
+                return MTLPixelFormatRGBA8Unorm;
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+                return MTLPixelFormatRGBA8Unorm_sRGB;
+            case wgpu::TextureFormat::RGBA8Snorm:
+                return MTLPixelFormatRGBA8Snorm;
+            case wgpu::TextureFormat::RGBA8Uint:
+                return MTLPixelFormatRGBA8Uint;
+            case wgpu::TextureFormat::RGBA8Sint:
+                return MTLPixelFormatRGBA8Sint;
+            case wgpu::TextureFormat::BGRA8Unorm:
+                return MTLPixelFormatBGRA8Unorm;
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+                return MTLPixelFormatBGRA8Unorm_sRGB;
+            case wgpu::TextureFormat::RGB10A2Unorm:
+                return MTLPixelFormatRGB10A2Unorm;
+            case wgpu::TextureFormat::RG11B10Ufloat:
+                return MTLPixelFormatRG11B10Float;
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+                return MTLPixelFormatRGB9E5Float;
+
+            case wgpu::TextureFormat::RG32Uint:
+                return MTLPixelFormatRG32Uint;
+            case wgpu::TextureFormat::RG32Sint:
+                return MTLPixelFormatRG32Sint;
+            case wgpu::TextureFormat::RG32Float:
+                return MTLPixelFormatRG32Float;
+            case wgpu::TextureFormat::RGBA16Uint:
+                return MTLPixelFormatRGBA16Uint;
+            case wgpu::TextureFormat::RGBA16Sint:
+                return MTLPixelFormatRGBA16Sint;
+            case wgpu::TextureFormat::RGBA16Float:
+                return MTLPixelFormatRGBA16Float;
+
+            case wgpu::TextureFormat::RGBA32Uint:
+                return MTLPixelFormatRGBA32Uint;
+            case wgpu::TextureFormat::RGBA32Sint:
+                return MTLPixelFormatRGBA32Sint;
+            case wgpu::TextureFormat::RGBA32Float:
+                return MTLPixelFormatRGBA32Float;
+
+            case wgpu::TextureFormat::Depth32Float:
+                return MTLPixelFormatDepth32Float;
+            case wgpu::TextureFormat::Depth24Plus:
+                return MTLPixelFormatDepth32Float;
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                return MTLPixelFormatDepth32Float_Stencil8;
+            case wgpu::TextureFormat::Depth16Unorm:
+                if (@available(macOS 10.12, iOS 13.0, *)) {
+                    return MTLPixelFormatDepth16Unorm;
+                } else {
+                    // TODO (dawn:1181): Allow non-conformant implementation on macOS 10.11
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::Stencil8:
+                return MTLPixelFormatStencil8;
+
+#if defined(DAWN_PLATFORM_MACOS)
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                return MTLPixelFormatDepth24Unorm_Stencil8;
+
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+                return MTLPixelFormatBC1_RGBA;
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+                return MTLPixelFormatBC1_RGBA_sRGB;
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+                return MTLPixelFormatBC2_RGBA;
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+                return MTLPixelFormatBC2_RGBA_sRGB;
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+                return MTLPixelFormatBC3_RGBA;
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+                return MTLPixelFormatBC3_RGBA_sRGB;
+            case wgpu::TextureFormat::BC4RSnorm:
+                return MTLPixelFormatBC4_RSnorm;
+            case wgpu::TextureFormat::BC4RUnorm:
+                return MTLPixelFormatBC4_RUnorm;
+            case wgpu::TextureFormat::BC5RGSnorm:
+                return MTLPixelFormatBC5_RGSnorm;
+            case wgpu::TextureFormat::BC5RGUnorm:
+                return MTLPixelFormatBC5_RGUnorm;
+            case wgpu::TextureFormat::BC6HRGBFloat:
+                return MTLPixelFormatBC6H_RGBFloat;
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+                return MTLPixelFormatBC6H_RGBUfloat;
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+                return MTLPixelFormatBC7_RGBAUnorm;
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return MTLPixelFormatBC7_RGBAUnorm_sRGB;
+#else
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC4RSnorm:
+            case wgpu::TextureFormat::BC4RUnorm:
+            case wgpu::TextureFormat::BC5RGSnorm:
+            case wgpu::TextureFormat::BC5RGUnorm:
+            case wgpu::TextureFormat::BC6HRGBFloat:
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+#endif
+
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatETC2_RGB8;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatETC2_RGB8_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatETC2_RGB8A1;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatETC2_RGB8A1_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatEAC_RGBA8;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatEAC_RGBA8_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::EACR11Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatEAC_R11Unorm;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::EACR11Snorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatEAC_R11Snorm;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::EACRG11Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatEAC_RG11Unorm;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::EACRG11Snorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatEAC_RG11Snorm;
+                } else {
+                    UNREACHABLE();
+                }
+
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_4x4_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_4x4_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_5x4_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_5x4_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_5x5_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_5x5_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_6x5_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_6x5_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_6x6_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_6x6_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_8x5_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_8x5_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_8x6_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_8x6_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_8x8_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_8x8_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x5_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x5_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x6_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x6_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x8_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x8_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x10_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_10x10_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_12x10_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_12x10_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_12x12_LDR;
+                } else {
+                    UNREACHABLE();
+                }
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                if (@available(macOS 11.0, iOS 8.0, *)) {
+                    return MTLPixelFormatASTC_12x12_sRGB;
+                } else {
+                    UNREACHABLE();
+                }
+
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+            case wgpu::TextureFormat::Undefined:
+                UNREACHABLE();
+        }
+    }
+
+    MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase*,
+                                             const TextureDescriptor* descriptor,
+                                             IOSurfaceRef ioSurface) {
+        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                        "Texture dimension (%s) is not %s.", descriptor->dimension,
+                        wgpu::TextureDimension::e2D);
+
+        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                        descriptor->mipLevelCount);
+
+        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                        descriptor->sampleCount);
+
+        uint32_t surfaceWidth = IOSurfaceGetWidth(ioSurface);
+        uint32_t surfaceHeight = IOSurfaceGetHeight(ioSurface);
+
+        DAWN_INVALID_IF(
+            descriptor->size.width != surfaceWidth || descriptor->size.height != surfaceHeight ||
+                descriptor->size.depthOrArrayLayers != 1,
+            "IOSurface size (width: %u, height %u, depth: 1) doesn't match descriptor size %s.",
+            surfaceWidth, surfaceHeight, &descriptor->size);
+
+        wgpu::TextureFormat ioSurfaceFormat;
+        DAWN_TRY_ASSIGN(ioSurfaceFormat,
+                        GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
+        DAWN_INVALID_IF(descriptor->format != ioSurfaceFormat,
+                        "IOSurface format (%s) doesn't match the descriptor format (%s).",
+                        ioSurfaceFormat, descriptor->format);
+
+        return {};
+    }
+
+    NSRef<MTLTextureDescriptor> Texture::CreateMetalTextureDescriptor() const {
+        NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+        MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+        mtlDesc.width = GetWidth();
+        mtlDesc.sampleCount = GetSampleCount();
+        // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+        // between linear space and sRGB. For example, creating bgra8Unorm texture view on
+        // rgba8Unorm texture or creating rgba8Unorm_srgb texture view on rgab8Unorm texture.
+        // TODO: add MTLTextureUsagePixelFormatView when needed when we support other format
+        // reinterpretation.
+        mtlDesc.usage = MetalTextureUsage(GetFormat(), GetInternalUsage(), GetSampleCount());
+        mtlDesc.pixelFormat = MetalPixelFormat(GetFormat().format);
+        mtlDesc.mipmapLevelCount = GetNumMipLevels();
+        mtlDesc.storageMode = MTLStorageModePrivate;
+
+        // Choose the correct MTLTextureType and paper over differences in how the array layer count
+        // is specified.
+        switch (GetDimension()) {
+            case wgpu::TextureDimension::e1D:
+                mtlDesc.arrayLength = 1;
+                mtlDesc.depth = 1;
+                ASSERT(mtlDesc.sampleCount == 1);
+                mtlDesc.textureType = MTLTextureType1D;
+                break;
+
+            case wgpu::TextureDimension::e2D:
+                mtlDesc.height = GetHeight();
+                mtlDesc.arrayLength = GetArrayLayers();
+                mtlDesc.depth = 1;
+                if (mtlDesc.arrayLength > 1) {
+                    ASSERT(mtlDesc.sampleCount == 1);
+                    mtlDesc.textureType = MTLTextureType2DArray;
+                } else if (mtlDesc.sampleCount > 1) {
+                    mtlDesc.textureType = MTLTextureType2DMultisample;
+                } else {
+                    mtlDesc.textureType = MTLTextureType2D;
+                }
+                break;
+            case wgpu::TextureDimension::e3D:
+                mtlDesc.height = GetHeight();
+                mtlDesc.depth = GetDepth();
+                mtlDesc.arrayLength = 1;
+                ASSERT(mtlDesc.sampleCount == 1);
+                mtlDesc.textureType = MTLTextureType3D;
+                break;
+        }
+
+        return mtlDescRef;
+    }
+
+    // static
+    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+                                                const TextureDescriptor* descriptor) {
+        Ref<Texture> texture =
+            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+        DAWN_TRY(texture->InitializeAsInternalTexture(descriptor));
+        return texture;
+    }
+
+    // static
+    ResultOrError<Ref<Texture>> Texture::CreateFromIOSurface(
+        Device* device,
+        const ExternalImageDescriptor* descriptor,
+        IOSurfaceRef ioSurface) {
+        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+        Ref<Texture> texture =
+            AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
+        DAWN_TRY(texture->InitializeFromIOSurface(descriptor, textureDescriptor, ioSurface));
+        return texture;
+    }
+
+    // static
+    Ref<Texture> Texture::CreateWrapping(Device* device,
+                                         const TextureDescriptor* descriptor,
+                                         NSPRef<id<MTLTexture>> wrapped) {
+        Ref<Texture> texture =
+            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+        texture->InitializeAsWrapping(descriptor, std::move(wrapped));
+        return texture;
+    }
+
+    MaybeError Texture::InitializeAsInternalTexture(const TextureDescriptor* descriptor) {
+        Device* device = ToBackend(GetDevice());
+
+        NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+        mMtlUsage = [*mtlDesc usage];
+        mMtlTexture =
+            AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
+
+        if (mMtlTexture == nil) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture.");
+        }
+
+        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+            DAWN_TRY(ClearTexture(device->GetPendingCommandContext(), GetAllSubresources(),
+                                  TextureBase::ClearValue::NonZero));
+        }
+
+        return {};
+    }
+
+    void Texture::InitializeAsWrapping(const TextureDescriptor* descriptor,
+                                       NSPRef<id<MTLTexture>> wrapped) {
+        NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+        mMtlUsage = [*mtlDesc usage];
+        mMtlTexture = std::move(wrapped);
+    }
+
+    MaybeError Texture::InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+                                                const TextureDescriptor* textureDescriptor,
+                                                IOSurfaceRef ioSurface) {
+        mIOSurface = ioSurface;
+
+        // Uses WGPUTexture which wraps multiplanar ioSurface needs to create
+        // texture view explicitly. Wrap the ioSurface and delay to extract
+        // MTLTexture from the plane of it when creating texture view.
+        // WGPUTexture which wraps non-multplanar ioSurface needs to support
+        // ops that doesn't require creating texture view(e.g. copy). Extract
+        // MTLTexture from such ioSurface to support this.
+        if (!GetFormat().IsMultiPlanar()) {
+            Device* device = ToBackend(GetDevice());
+
+            NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+            [*mtlDesc setStorageMode:kIOSurfaceStorageMode];
+
+            mMtlUsage = [*mtlDesc usage];
+            mMtlTexture =
+                AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()
+                                                                     iosurface:ioSurface
+                                                                         plane:0]);
+        }
+        SetIsSubresourceContentInitialized(descriptor->isInitialized, GetAllSubresources());
+        return {};
+    }
+
+    Texture::~Texture() {
+    }
+
+    void Texture::DestroyImpl() {
+        TextureBase::DestroyImpl();
+        mMtlTexture = nullptr;
+        mIOSurface = nullptr;
+    }
+
+    id<MTLTexture> Texture::GetMTLTexture() {
+        return mMtlTexture.Get();
+    }
+
+    IOSurfaceRef Texture::GetIOSurface() {
+        return mIOSurface.Get();
+    }
+
+    NSPRef<id<MTLTexture>> Texture::CreateFormatView(wgpu::TextureFormat format) {
+        if (GetFormat().format == format) {
+            return mMtlTexture;
+        }
+
+        ASSERT(AllowFormatReinterpretationWithoutFlag(MetalPixelFormat(GetFormat().format),
+                                                      MetalPixelFormat(format)));
+        return AcquireNSPRef(
+            [mMtlTexture.Get() newTextureViewWithPixelFormat:MetalPixelFormat(format)]);
+    }
+
+    MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+                                     const SubresourceRange& range,
+                                     TextureBase::ClearValue clearValue) {
+        Device* device = ToBackend(GetDevice());
+
+        const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+        const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
+
+        if ((mMtlUsage & MTLTextureUsageRenderTarget) != 0) {
+            ASSERT(GetFormat().isRenderable);
+
+            // End the blit encoder if it is open.
+            commandContext->EndBlit();
+
+            if (GetFormat().HasDepthOrStencil()) {
+                // Create a render pass to clear each subresource.
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; ++level) {
+                    for (uint32_t arrayLayer = range.baseArrayLayer;
+                         arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+                        if (clearValue == TextureBase::ClearValue::Zero &&
+                            IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+                                level, arrayLayer, range.aspects))) {
+                            // Skip lazy clears if already initialized.
+                            continue;
+                        }
+
+                        // Note that this creates a descriptor that's autoreleased so we don't use
+                        // AcquireNSRef
+                        NSRef<MTLRenderPassDescriptor> descriptorRef =
+                            [MTLRenderPassDescriptor renderPassDescriptor];
+                        MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+
+                        // At least one aspect needs clearing. Iterate the aspects individually to
+                        // determine which to clear.
+                        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                            if (clearValue == TextureBase::ClearValue::Zero &&
+                                IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+                                    level, arrayLayer, aspect))) {
+                                // Skip lazy clears if already initialized.
+                                continue;
+                            }
+
+                            ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+                            switch (aspect) {
+                                case Aspect::Depth:
+                                    descriptor.depthAttachment.texture = GetMTLTexture();
+                                    descriptor.depthAttachment.level = level;
+                                    descriptor.depthAttachment.slice = arrayLayer;
+                                    descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+                                    descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+                                    descriptor.depthAttachment.clearDepth = dClearColor;
+                                    break;
+                                case Aspect::Stencil:
+                                    descriptor.stencilAttachment.texture = GetMTLTexture();
+                                    descriptor.stencilAttachment.level = level;
+                                    descriptor.stencilAttachment.slice = arrayLayer;
+                                    descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+                                    descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+                                    descriptor.stencilAttachment.clearStencil =
+                                        static_cast<uint32_t>(clearColor);
+                                    break;
+                                default:
+                                    UNREACHABLE();
+                            }
+                        }
+
+                        commandContext->BeginRender(descriptor);
+                        commandContext->EndRender();
+                    }
+                }
+            } else {
+                ASSERT(GetFormat().IsColor());
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; ++level) {
+                    // Create multiple render passes with each subresource as a color attachment to
+                    // clear them all. Only do this for array layers to ensure all attachments have
+                    // the same size.
+                    NSRef<MTLRenderPassDescriptor> descriptor;
+                    uint32_t attachment = 0;
+
+                    uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
+
+                    for (uint32_t arrayLayer = range.baseArrayLayer;
+                         arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+                        if (clearValue == TextureBase::ClearValue::Zero &&
+                            IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+                                level, arrayLayer, Aspect::Color))) {
+                            // Skip lazy clears if already initialized.
+                            continue;
+                        }
+
+                        for (uint32_t z = 0; z < numZSlices; ++z) {
+                            if (descriptor == nullptr) {
+                                // Note that this creates a descriptor that's autoreleased so we
+                                // don't use AcquireNSRef
+                                descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
+                            }
+
+                            [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
+                            [*descriptor colorAttachments][attachment].loadAction =
+                                MTLLoadActionClear;
+                            [*descriptor colorAttachments][attachment].storeAction =
+                                MTLStoreActionStore;
+                            [*descriptor colorAttachments][attachment].clearColor =
+                                MTLClearColorMake(dClearColor, dClearColor, dClearColor,
+                                                  dClearColor);
+                            [*descriptor colorAttachments][attachment].level = level;
+                            [*descriptor colorAttachments][attachment].slice = arrayLayer;
+                            [*descriptor colorAttachments][attachment].depthPlane = z;
+
+                            attachment++;
+
+                            if (attachment == kMaxColorAttachments) {
+                                attachment = 0;
+                                commandContext->BeginRender(descriptor.Get());
+                                commandContext->EndRender();
+                                descriptor = nullptr;
+                            }
+                        }
+                    }
+
+                    if (descriptor != nullptr) {
+                        commandContext->BeginRender(descriptor.Get());
+                        commandContext->EndRender();
+                    }
+                }
+            }
+        } else {
+            Extent3D largestMipSize = GetMipLevelVirtualSize(range.baseMipLevel);
+
+            // Encode a buffer to texture copy to clear each subresource.
+            for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                // Compute the buffer size big enough to fill the largest mip.
+                const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+
+                // Metal validation layers: sourceBytesPerRow must be at least 64.
+                uint32_t largestMipBytesPerRow =
+                    std::max((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 64u);
+
+                // Metal validation layers: sourceBytesPerImage must be at least 512.
+                uint64_t largestMipBytesPerImage =
+                    std::max(static_cast<uint64_t>(largestMipBytesPerRow) *
+                                 (largestMipSize.height / blockInfo.height),
+                             512llu);
+
+                uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
+
+                if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
+                    return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+                }
+
+                DynamicUploader* uploader = device->GetDynamicUploader();
+                UploadHandle uploadHandle;
+                DAWN_TRY_ASSIGN(uploadHandle,
+                                uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+                                                   blockInfo.byteSize));
+                memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+                id<MTLBuffer> uploadBuffer =
+                    ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
+
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; ++level) {
+                    Extent3D virtualSize = GetMipLevelVirtualSize(level);
+
+                    for (uint32_t arrayLayer = range.baseArrayLayer;
+                         arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+                        if (clearValue == TextureBase::ClearValue::Zero &&
+                            IsSubresourceContentInitialized(
+                                SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
+                            // Skip lazy clears if already initialized.
+                            continue;
+                        }
+
+                        MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
+                        [commandContext->EnsureBlit()
+                                 copyFromBuffer:uploadBuffer
+                                   sourceOffset:uploadHandle.startOffset
+                              sourceBytesPerRow:largestMipBytesPerRow
+                            sourceBytesPerImage:largestMipBytesPerImage
+                                     sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
+                                                            virtualSize.depthOrArrayLayers)
+                                      toTexture:GetMTLTexture()
+                               destinationSlice:arrayLayer
+                               destinationLevel:level
+                              destinationOrigin:MTLOriginMake(0, 0, 0)
+                                        options:blitOption];
+                    }
+                }
+            }
+        }
+
+        if (clearValue == TextureBase::ClearValue::Zero) {
+            SetIsSubresourceContentInitialized(true, range);
+            device->IncrementLazyClearCountForTesting();
+        }
+        return {};
+    }
+
+    void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                                      const SubresourceRange& range) {
+        if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+            return;
+        }
+        if (!IsSubresourceContentInitialized(range)) {
+            // If subresource has not been initialized, clear it to black as it could
+            // contain dirty bits from recycled memory
+            GetDevice()->ConsumedError(
+                ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
+        }
+    }
+
+    // static
+    ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+                                                        const TextureViewDescriptor* descriptor) {
+        Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+        DAWN_TRY(view->Initialize(descriptor));
+        return view;
+    }
+
+    MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+        Texture* texture = ToBackend(GetTexture());
+
+        // Texture could be destroyed by the time we make a view.
+        if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+            return {};
+        }
+
+        id<MTLTexture> mtlTexture = texture->GetMTLTexture();
+
+        if (!UsageNeedsTextureView(texture->GetInternalUsage())) {
+            mMtlTextureView = nullptr;
+        } else if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+            mMtlTextureView = mtlTexture;
+        } else if (texture->GetFormat().IsMultiPlanar()) {
+            NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+            MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+            mtlDesc.sampleCount = texture->GetSampleCount();
+            mtlDesc.usage = MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage(),
+                                              texture->GetSampleCount());
+            mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
+            mtlDesc.mipmapLevelCount = texture->GetNumMipLevels();
+            mtlDesc.storageMode = kIOSurfaceStorageMode;
+
+            uint32_t plane = GetIOSurfacePlane(descriptor->aspect);
+            mtlDesc.width = IOSurfaceGetWidthOfPlane(texture->GetIOSurface(), plane);
+            mtlDesc.height = IOSurfaceGetHeightOfPlane(texture->GetIOSurface(), plane);
+
+            // Multiplanar texture is validated to only have single layer, single mipLevel
+            // and 2d textures (depth == 1)
+            ASSERT(texture->GetArrayLayers() == 1 &&
+                   texture->GetDimension() == wgpu::TextureDimension::e2D &&
+                   texture->GetNumMipLevels() == 1);
+            mtlDesc.arrayLength = 1;
+            mtlDesc.depth = 1;
+
+            mMtlTextureView = AcquireNSPRef([ToBackend(GetDevice())->GetMTLDevice()
+                newTextureWithDescriptor:mtlDesc
+                               iosurface:texture->GetIOSurface()
+                                   plane:plane]);
+            if (mMtlTextureView == nil) {
+                return DAWN_INTERNAL_ERROR(
+                    "Failed to create MTLTexture view for external texture.");
+            }
+        } else {
+            MTLPixelFormat viewFormat = MetalPixelFormat(descriptor->format);
+            MTLPixelFormat textureFormat = MetalPixelFormat(GetTexture()->GetFormat().format);
+            if (descriptor->aspect == wgpu::TextureAspect::StencilOnly &&
+                textureFormat != MTLPixelFormatStencil8) {
+                if (@available(macOS 10.12, iOS 10.0, *)) {
+                    if (textureFormat == MTLPixelFormatDepth32Float_Stencil8) {
+                        viewFormat = MTLPixelFormatX32_Stencil8;
+                    }
+#if defined(DAWN_PLATFORM_MACOS)
+                    else if (textureFormat == MTLPixelFormatDepth24Unorm_Stencil8) {
+                        viewFormat = MTLPixelFormatX24_Stencil8;
+                    }
+#endif
+                    else {
+                        UNREACHABLE();
+                    }
+                } else {
+                    // TODO(enga): Add a workaround to back combined depth/stencil textures
+                    // with Sampled usage using two separate textures.
+                    // Or, consider always using the workaround for D32S8.
+                    GetDevice()->ConsumedError(
+                        DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
+                                               "combined depth/stencil format."));
+                }
+            } else if (GetTexture()->GetFormat().HasDepth() &&
+                       GetTexture()->GetFormat().HasStencil()) {
+                // Depth-only views for depth/stencil textures in Metal simply use the original
+                // texture's format.
+                viewFormat = textureFormat;
+            }
+
+            MTLTextureType textureViewType =
+                MetalTextureViewType(descriptor->dimension, texture->GetSampleCount());
+            auto mipLevelRange = NSMakeRange(descriptor->baseMipLevel, descriptor->mipLevelCount);
+            auto arrayLayerRange =
+                NSMakeRange(descriptor->baseArrayLayer, descriptor->arrayLayerCount);
+
+            mMtlTextureView =
+                AcquireNSPRef([mtlTexture newTextureViewWithPixelFormat:viewFormat
+                                                            textureType:textureViewType
+                                                                 levels:mipLevelRange
+                                                                 slices:arrayLayerRange]);
+            if (mMtlTextureView == nil) {
+                return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view.");
+            }
+        }
+
+        return {};
+    }
+
+    id<MTLTexture> TextureView::GetMTLTexture() {
+        ASSERT(mMtlTextureView != nullptr);
+        return mMtlTextureView.Get();
+    }
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/UtilsMetal.h b/src/dawn/native/metal/UtilsMetal.h
new file mode 100644
index 0000000..5c4ae9c
--- /dev/null
+++ b/src/dawn/native/metal/UtilsMetal.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_UTILSMETAL_H_
+#define DAWNNATIVE_METAL_UTILSMETAL_H_
+
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native {
+    struct ProgrammableStage;
+    struct EntryPointMetadata;
+    enum class SingleShaderStage;
+}
+
+namespace dawn::native::metal {
+
+    MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
+
+    struct TextureBufferCopySplit {
+        static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
+
+        struct CopyInfo {
+            NSUInteger bufferOffset;
+            NSUInteger bytesPerRow;
+            NSUInteger bytesPerImage;
+            Origin3D textureOrigin;
+            Extent3D copyExtent;
+        };
+
+        uint32_t count = 0;
+        std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
+
+        auto begin() const {
+            return copies.begin();
+        }
+
+        auto end() const {
+            return copies.begin() + count;
+        }
+    };
+
+    TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+                                                         uint32_t mipLevel,
+                                                         Origin3D origin,
+                                                         Extent3D copyExtent,
+                                                         uint64_t bufferSize,
+                                                         uint64_t bufferOffset,
+                                                         uint32_t bytesPerRow,
+                                                         uint32_t rowsPerImage,
+                                                         Aspect aspect);
+
+    void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+                                             Texture* texture,
+                                             const TextureCopy& dst,
+                                             const Extent3D& size);
+
+    MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
+
+    // Helper function to create function with constant values wrapped in
+    // if available branch
+    MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+                                 SingleShaderStage singleShaderStage,
+                                 PipelineLayout* pipelineLayout,
+                                 ShaderModule::MetalFunctionData* functionData,
+                                 uint32_t sampleMask = 0xFFFFFFFF,
+                                 const RenderPipeline* renderPipeline = nullptr);
+
+}  // namespace dawn::native::metal
+
+#endif  // DAWNNATIVE_METAL_UTILSMETAL_H_
diff --git a/src/dawn/native/metal/UtilsMetal.mm b/src/dawn/native/metal/UtilsMetal.mm
new file mode 100644
index 0000000..e2e0ba3
--- /dev/null
+++ b/src/dawn/native/metal/UtilsMetal.mm
@@ -0,0 +1,288 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/UtilsMetal.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native::metal {
+
+    MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
+        switch (compareFunction) {
+            case wgpu::CompareFunction::Never:
+                return MTLCompareFunctionNever;
+            case wgpu::CompareFunction::Less:
+                return MTLCompareFunctionLess;
+            case wgpu::CompareFunction::LessEqual:
+                return MTLCompareFunctionLessEqual;
+            case wgpu::CompareFunction::Greater:
+                return MTLCompareFunctionGreater;
+            case wgpu::CompareFunction::GreaterEqual:
+                return MTLCompareFunctionGreaterEqual;
+            case wgpu::CompareFunction::NotEqual:
+                return MTLCompareFunctionNotEqual;
+            case wgpu::CompareFunction::Equal:
+                return MTLCompareFunctionEqual;
+            case wgpu::CompareFunction::Always:
+                return MTLCompareFunctionAlways;
+
+            case wgpu::CompareFunction::Undefined:
+                UNREACHABLE();
+        }
+    }
+
+    TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+                                                         uint32_t mipLevel,
+                                                         Origin3D origin,
+                                                         Extent3D copyExtent,
+                                                         uint64_t bufferSize,
+                                                         uint64_t bufferOffset,
+                                                         uint32_t bytesPerRow,
+                                                         uint32_t rowsPerImage,
+                                                         Aspect aspect) {
+        TextureBufferCopySplit copy;
+        const Format textureFormat = texture->GetFormat();
+        const TexelBlockInfo& blockInfo = textureFormat.GetAspectInfo(aspect).block;
+
+        // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
+        // compute the correct range when checking if the buffer is big enough to contain the
+        // data for the whole copy. Instead of looking at the position of the last texel in the
+        // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
+        // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
+        // buffer below where in memory, each row data (D) of the texture is followed by some
+        // padding data (P):
+        //     |DDDDDDD|PP|
+        //     |DDDDDDD|PP|
+        //     |DDDDDDD|PP|
+        //     |DDDDDDD|PP|
+        //     |DDDDDDA|PP|
+        // The last pixel read will be A, but the driver will think it is the whole last padding
+        // row, causing it to generate an error when the pixel buffer is just big enough.
+
+        // We work around this limitation by detecting when Metal would complain and copy the
+        // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
+        uint32_t bytesPerImage = bytesPerRow * rowsPerImage;
+
+        // Metal validation layer requires that if the texture's pixel format is a compressed
+        // format, the sourceSize must be a multiple of the pixel format's block size or be
+        // clamped to the edge of the texture if the block extends outside the bounds of a
+        // texture.
+        const Extent3D clampedCopyExtent =
+            texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
+
+        // Check whether buffer size is big enough.
+        bool needWorkaround =
+            bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
+        if (!needWorkaround) {
+            copy.count = 1;
+            copy.copies[0].bufferOffset = bufferOffset;
+            copy.copies[0].bytesPerRow = bytesPerRow;
+            copy.copies[0].bytesPerImage = bytesPerImage;
+            copy.copies[0].textureOrigin = origin;
+            copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+                                         copyExtent.depthOrArrayLayers};
+            return copy;
+        }
+
+        uint64_t currentOffset = bufferOffset;
+
+        // Doing all the copy except the last image.
+        if (copyExtent.depthOrArrayLayers > 1) {
+            copy.copies[copy.count].bufferOffset = currentOffset;
+            copy.copies[copy.count].bytesPerRow = bytesPerRow;
+            copy.copies[copy.count].bytesPerImage = bytesPerImage;
+            copy.copies[copy.count].textureOrigin = origin;
+            copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+                                                  copyExtent.depthOrArrayLayers - 1};
+
+            ++copy.count;
+
+            // Update offset to copy to the last image.
+            currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
+        }
+
+        // Doing all the copy in last image except the last row.
+        uint32_t copyBlockRowCount = copyExtent.height / blockInfo.height;
+        if (copyBlockRowCount > 1) {
+            copy.copies[copy.count].bufferOffset = currentOffset;
+            copy.copies[copy.count].bytesPerRow = bytesPerRow;
+            copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
+            copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
+                                                     origin.z + copyExtent.depthOrArrayLayers - 1};
+
+            ASSERT(copyExtent.height - blockInfo.height <
+                   texture->GetMipLevelVirtualSize(mipLevel).height);
+            copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
+                                                  copyExtent.height - blockInfo.height, 1};
+
+            ++copy.count;
+
+            // Update offset to copy to the last row.
+            currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
+        }
+
+        // Doing the last row copy with the exact number of bytes in last row.
+        // Workaround this issue in a way just like the copy to a 1D texture.
+        uint32_t lastRowDataSize = (copyExtent.width / blockInfo.width) * blockInfo.byteSize;
+        uint32_t lastRowCopyExtentHeight =
+            blockInfo.height + clampedCopyExtent.height - copyExtent.height;
+        ASSERT(lastRowCopyExtentHeight <= blockInfo.height);
+
+        copy.copies[copy.count].bufferOffset = currentOffset;
+        copy.copies[copy.count].bytesPerRow = lastRowDataSize;
+        copy.copies[copy.count].bytesPerImage = lastRowDataSize;
+        copy.copies[copy.count].textureOrigin = {origin.x,
+                                                 origin.y + copyExtent.height - blockInfo.height,
+                                                 origin.z + copyExtent.depthOrArrayLayers - 1};
+        copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
+        ++copy.count;
+
+        return copy;
+    }
+
+    void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+                                             Texture* texture,
+                                             const TextureCopy& dst,
+                                             const Extent3D& size) {
+        ASSERT(texture == dst.texture.Get());
+        SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
+        if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
+            texture->SetIsSubresourceContentInitialized(true, range);
+        } else {
+            texture->EnsureSubresourceContentInitialized(commandContext, range);
+        }
+    }
+
+    MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
+        ASSERT(HasOneBit(aspect));
+        ASSERT(format.aspects & aspect);
+
+        if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+            // We only provide a blit option if the format has both depth and stencil.
+            // It is invalid to provide a blit option otherwise.
+            switch (aspect) {
+                case Aspect::Depth:
+                    return MTLBlitOptionDepthFromDepthStencil;
+                case Aspect::Stencil:
+                    return MTLBlitOptionStencilFromDepthStencil;
+                default:
+                    UNREACHABLE();
+            }
+        }
+        return MTLBlitOptionNone;
+    }
+
+    MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+                                 SingleShaderStage singleShaderStage,
+                                 PipelineLayout* pipelineLayout,
+                                 ShaderModule::MetalFunctionData* functionData,
+                                 uint32_t sampleMask,
+                                 const RenderPipeline* renderPipeline) {
+        ShaderModule* shaderModule = ToBackend(programmableStage.module.Get());
+        const char* shaderEntryPoint = programmableStage.entryPoint.c_str();
+        const auto& entryPointMetadata = programmableStage.module->GetEntryPoint(shaderEntryPoint);
+        if (entryPointMetadata.overridableConstants.size() == 0) {
+            DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage,
+                                                  pipelineLayout, functionData, nil, sampleMask,
+                                                  renderPipeline));
+            return {};
+        }
+
+        if (@available(macOS 10.12, *)) {
+            // MTLFunctionConstantValues can only be created within the if available branch
+            NSRef<MTLFunctionConstantValues> constantValues =
+                AcquireNSRef([MTLFunctionConstantValues new]);
+
+            std::unordered_set<std::string> overriddenConstants;
+
+            auto switchType = [&](EntryPointMetadata::OverridableConstant::Type dawnType,
+                                  MTLDataType* type, OverridableConstantScalar* entry,
+                                  double value = 0) {
+                switch (dawnType) {
+                    case EntryPointMetadata::OverridableConstant::Type::Boolean:
+                        *type = MTLDataTypeBool;
+                        if (entry) {
+                            entry->b = static_cast<int32_t>(value);
+                        }
+                        break;
+                    case EntryPointMetadata::OverridableConstant::Type::Float32:
+                        *type = MTLDataTypeFloat;
+                        if (entry) {
+                            entry->f32 = static_cast<float>(value);
+                        }
+                        break;
+                    case EntryPointMetadata::OverridableConstant::Type::Int32:
+                        *type = MTLDataTypeInt;
+                        if (entry) {
+                            entry->i32 = static_cast<int32_t>(value);
+                        }
+                        break;
+                    case EntryPointMetadata::OverridableConstant::Type::Uint32:
+                        *type = MTLDataTypeUInt;
+                        if (entry) {
+                            entry->u32 = static_cast<uint32_t>(value);
+                        }
+                        break;
+                    default:
+                        UNREACHABLE();
+                }
+            };
+
+            for (const auto& [name, value] : programmableStage.constants) {
+                overriddenConstants.insert(name);
+
+                // This is already validated so `name` must exist
+                const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+
+                MTLDataType type;
+                OverridableConstantScalar entry{};
+
+                switchType(moduleConstant.type, &type, &entry, value);
+
+                [constantValues.Get() setConstantValue:&entry type:type atIndex:moduleConstant.id];
+            }
+
+            // Set shader initialized default values because MSL function_constant
+            // has no default value
+            for (const std::string& name : entryPointMetadata.initializedOverridableConstants) {
+                if (overriddenConstants.count(name) != 0) {
+                    // This constant already has overridden value
+                    continue;
+                }
+
+                // Must exist because it is validated
+                const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+                ASSERT(moduleConstant.isInitialized);
+                MTLDataType type;
+
+                switchType(moduleConstant.type, &type, nullptr);
+
+                [constantValues.Get() setConstantValue:&moduleConstant.defaultValue
+                                                  type:type
+                                               atIndex:moduleConstant.id];
+            }
+
+            DAWN_TRY(shaderModule->CreateFunction(
+                shaderEntryPoint, singleShaderStage, pipelineLayout, functionData,
+                constantValues.Get(), sampleMask, renderPipeline));
+        } else {
+            UNREACHABLE();
+        }
+        return {};
+    }
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/null/DeviceNull.cpp b/src/dawn/native/null/DeviceNull.cpp
new file mode 100644
index 0000000..bb1d2f2
--- /dev/null
+++ b/src/dawn/native/null/DeviceNull.cpp
@@ -0,0 +1,518 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/null/DeviceNull.h"
+
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/Surface.h"
+
+namespace dawn::native::null {
+
+    // Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
+
+    Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
+        mVendorId = 0;
+        mDeviceId = 0;
+        mName = "Null backend";
+        mAdapterType = wgpu::AdapterType::CPU;
+        MaybeError err = Initialize();
+        ASSERT(err.IsSuccess());
+    }
+
+    Adapter::~Adapter() = default;
+
+    bool Adapter::SupportsExternalImages() const {
+        return false;
+    }
+
+    // Used for the tests that intend to use an adapter without all features enabled.
+    void Adapter::SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures) {
+        mSupportedFeatures = {};
+        for (wgpu::FeatureName f : requiredFeatures) {
+            mSupportedFeatures.EnableFeature(f);
+        }
+    }
+
+    MaybeError Adapter::InitializeImpl() {
+        return {};
+    }
+
+    MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+        // Enable all features by default for the convenience of tests.
+        mSupportedFeatures.featuresBitSet.set();
+        return {};
+    }
+
+    MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+        GetDefaultLimits(&limits->v1);
+        return {};
+    }
+
+    ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+        return Device::Create(this, descriptor);
+    }
+
+    class Backend : public BackendConnection {
+      public:
+        Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) {
+        }
+
+        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override {
+            // There is always a single Null adapter because it is purely CPU based and doesn't
+            // depend on the system.
+            std::vector<Ref<AdapterBase>> adapters;
+            Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance()));
+            adapters.push_back(std::move(adapter));
+            return adapters;
+        }
+    };
+
+    BackendConnection* Connect(InstanceBase* instance) {
+        return new Backend(instance);
+    }
+
+    struct CopyFromStagingToBufferOperation : PendingOperation {
+        virtual void Execute() {
+            destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
+        }
+
+        StagingBufferBase* staging;
+        Ref<Buffer> destination;
+        uint64_t sourceOffset;
+        uint64_t destinationOffset;
+        uint64_t size;
+    };
+
+    // Device
+
+    // static
+    ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
+                                              const DeviceDescriptor* descriptor) {
+        Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+        DAWN_TRY(device->Initialize());
+        return device;
+    }
+
+    Device::~Device() {
+        Destroy();
+    }
+
+    MaybeError Device::Initialize() {
+        return DeviceBase::Initialize(new Queue(this));
+    }
+
+    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) {
+        return AcquireRef(new BindGroup(this, descriptor));
+    }
+    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+    }
+    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+        DAWN_TRY(IncrementMemoryUsage(descriptor->size));
+        return AcquireRef(new Buffer(this, descriptor));
+    }
+    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) {
+        return AcquireRef(new CommandBuffer(encoder, descriptor));
+    }
+    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) {
+        return AcquireRef(new ComputePipeline(this, descriptor));
+    }
+    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) {
+        return AcquireRef(new PipelineLayout(this, descriptor));
+    }
+    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) {
+        return AcquireRef(new QuerySet(this, descriptor));
+    }
+    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) {
+        return AcquireRef(new RenderPipeline(this, descriptor));
+    }
+    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+        return AcquireRef(new Sampler(this, descriptor));
+    }
+    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) {
+        Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
+        DAWN_TRY(module->Initialize(parseResult));
+        return module;
+    }
+    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) {
+        return AcquireRef(new OldSwapChain(this, descriptor));
+    }
+    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) {
+        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+    }
+    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+        return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
+    }
+    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) {
+        return AcquireRef(new TextureView(texture, descriptor));
+    }
+
+    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+        std::unique_ptr<StagingBufferBase> stagingBuffer =
+            std::make_unique<StagingBuffer>(size, this);
+        DAWN_TRY(stagingBuffer->Initialize());
+        return std::move(stagingBuffer);
+    }
+
+    void Device::DestroyImpl() {
+        ASSERT(GetState() == State::Disconnected);
+
+        // Clear pending operations before checking mMemoryUsage because some operations keep a
+        // reference to Buffers.
+        mPendingOperations.clear();
+        ASSERT(mMemoryUsage == 0);
+    }
+
+    MaybeError Device::WaitForIdleForDestruction() {
+        mPendingOperations.clear();
+        return {};
+    }
+
+    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                               uint64_t sourceOffset,
+                                               BufferBase* destination,
+                                               uint64_t destinationOffset,
+                                               uint64_t size) {
+        if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+            destination->SetIsDataInitialized();
+        }
+
+        auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
+        operation->staging = source;
+        operation->destination = ToBackend(destination);
+        operation->sourceOffset = sourceOffset;
+        operation->destinationOffset = destinationOffset;
+        operation->size = size;
+
+        AddPendingOperation(std::move(operation));
+
+        return {};
+    }
+
+    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                                const TextureDataLayout& src,
+                                                TextureCopy* dst,
+                                                const Extent3D& copySizePixels) {
+        return {};
+    }
+
+    MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
+        static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max());
+        if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
+        }
+        mMemoryUsage += bytes;
+        return {};
+    }
+
+    void Device::DecrementMemoryUsage(uint64_t bytes) {
+        ASSERT(mMemoryUsage >= bytes);
+        mMemoryUsage -= bytes;
+    }
+
+    MaybeError Device::TickImpl() {
+        return SubmitPendingOperations();
+    }
+
+    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+        return GetLastSubmittedCommandSerial();
+    }
+
+    void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
+        mPendingOperations.emplace_back(std::move(operation));
+    }
+
+    MaybeError Device::SubmitPendingOperations() {
+        for (auto& operation : mPendingOperations) {
+            operation->Execute();
+        }
+        mPendingOperations.clear();
+
+        DAWN_TRY(CheckPassedSerials());
+        IncrementLastSubmittedCommandSerial();
+
+        return {};
+    }
+
+    // BindGroupDataHolder
+
+    BindGroupDataHolder::BindGroupDataHolder(size_t size)
+        : mBindingDataAllocation(malloc(size))  // malloc is guaranteed to return a
+                                                // pointer aligned enough for the allocation
+    {
+    }
+
+    BindGroupDataHolder::~BindGroupDataHolder() {
+        free(mBindingDataAllocation);
+    }
+
+    // BindGroup
+
+    BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
+        : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
+          BindGroupBase(device, descriptor, mBindingDataAllocation) {
+    }
+
+    // BindGroupLayout
+
+    BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                     const BindGroupLayoutDescriptor* descriptor,
+                                     PipelineCompatibilityToken pipelineCompatibilityToken)
+        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {
+    }
+
+    // Buffer
+
+    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+        : BufferBase(device, descriptor) {
+        mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
+        mAllocatedSize = GetSize();
+    }
+
+    bool Buffer::IsCPUWritableAtCreation() const {
+        // Only return true for mappable buffers so we can test cases that need / don't need a
+        // staging buffer.
+        return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
+    }
+
+    MaybeError Buffer::MapAtCreationImpl() {
+        return {};
+    }
+
+    void Buffer::CopyFromStaging(StagingBufferBase* staging,
+                                 uint64_t sourceOffset,
+                                 uint64_t destinationOffset,
+                                 uint64_t size) {
+        uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
+        memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
+    }
+
+    void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
+        ASSERT(bufferOffset + size <= GetSize());
+        ASSERT(mBackingData);
+        memcpy(mBackingData.get() + bufferOffset, data, size);
+    }
+
+    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+        return {};
+    }
+
+    void* Buffer::GetMappedPointerImpl() {
+        return mBackingData.get();
+    }
+
+    void Buffer::UnmapImpl() {
+    }
+
+    void Buffer::DestroyImpl() {
+        BufferBase::DestroyImpl();
+        ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
+    }
+
+    // CommandBuffer
+
+    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+        : CommandBufferBase(encoder, descriptor) {
+    }
+
+    // QuerySet
+
+    QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+        : QuerySetBase(device, descriptor) {
+    }
+
+    // Queue
+
+    Queue::Queue(Device* device) : QueueBase(device) {
+    }
+
+    Queue::~Queue() {
+    }
+
+    MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
+        Device* device = ToBackend(GetDevice());
+
+        // The Vulkan, D3D12 and Metal implementation all tick the device here,
+        // for testing purposes we should also tick in the null implementation.
+        DAWN_TRY(device->Tick());
+
+        return device->SubmitPendingOperations();
+    }
+
+    MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+                                      uint64_t bufferOffset,
+                                      const void* data,
+                                      size_t size) {
+        ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
+        return {};
+    }
+
+    // ComputePipeline
+    MaybeError ComputePipeline::Initialize() {
+        return {};
+    }
+
+    // RenderPipeline
+    MaybeError RenderPipeline::Initialize() {
+        return {};
+    }
+
+    // SwapChain
+
+    // static
+    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor) {
+        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+        DAWN_TRY(swapchain->Initialize(previousSwapChain));
+        return swapchain;
+    }
+
+    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+        if (previousSwapChain != nullptr) {
+            // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+            // multiple backends one after the other. It probably needs to block until the backend
+            // and GPU are completely finished with the previous swapchain.
+            if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
+                return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
+            }
+        }
+
+        return {};
+    }
+
+    SwapChain::~SwapChain() = default;
+
+    MaybeError SwapChain::PresentImpl() {
+        mTexture->APIDestroy();
+        mTexture = nullptr;
+        return {};
+    }
+
+    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+        TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+        mTexture = AcquireRef(
+            new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
+        return mTexture->CreateView();
+    }
+
+    void SwapChain::DetachFromSurfaceImpl() {
+        if (mTexture != nullptr) {
+            mTexture->APIDestroy();
+            mTexture = nullptr;
+        }
+    }
+
+    // ShaderModule
+
+    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+        return InitializeBase(parseResult);
+    }
+
+    // OldSwapChain
+
+    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+        : OldSwapChainBase(device, descriptor) {
+        const auto& im = GetImplementation();
+        im.Init(im.userData, nullptr);
+    }
+
+    OldSwapChain::~OldSwapChain() {
+    }
+
+    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+        return GetDevice()->APICreateTexture(descriptor);
+    }
+
+    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+        return {};
+    }
+
+    // NativeSwapChainImpl
+
+    void NativeSwapChainImpl::Init(WSIContext* context) {
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                      WGPUTextureUsage,
+                                                      uint32_t width,
+                                                      uint32_t height) {
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Present() {
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+        return wgpu::TextureFormat::RGBA8Unorm;
+    }
+
+    // StagingBuffer
+
+    StagingBuffer::StagingBuffer(size_t size, Device* device)
+        : StagingBufferBase(size), mDevice(device) {
+    }
+
+    StagingBuffer::~StagingBuffer() {
+        if (mBuffer) {
+            mDevice->DecrementMemoryUsage(GetSize());
+        }
+    }
+
+    MaybeError StagingBuffer::Initialize() {
+        DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
+        mBuffer = std::make_unique<uint8_t[]>(GetSize());
+        mMappedPointer = mBuffer.get();
+        return {};
+    }
+
+    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+        return 1;
+    }
+
+    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+        return 1;
+    }
+
+    float Device::GetTimestampPeriodInNS() const {
+        return 1.0f;
+    }
+
+}  // namespace dawn::native::null
diff --git a/src/dawn/native/null/DeviceNull.h b/src/dawn/native/null/DeviceNull.h
new file mode 100644
index 0000000..d810c06
--- /dev/null
+++ b/src/dawn/native/null/DeviceNull.h
@@ -0,0 +1,340 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_NULL_DEVICENULL_H_
+#define DAWNNATIVE_NULL_DEVICENULL_H_
+
+#include "dawn/native/Adapter.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/RingBufferAllocator.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/ShaderModule.h"
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/SwapChain.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ToBackend.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::null {
+
+    class Adapter;
+    class BindGroup;
+    class BindGroupLayout;
+    class Buffer;
+    class CommandBuffer;
+    class ComputePipeline;
+    class Device;
+    using PipelineLayout = PipelineLayoutBase;
+    class QuerySet;
+    class Queue;
+    class RenderPipeline;
+    using Sampler = SamplerBase;
+    class ShaderModule;
+    class SwapChain;
+    using Texture = TextureBase;
+    using TextureView = TextureViewBase;
+
+    struct NullBackendTraits {
+        using AdapterType = Adapter;
+        using BindGroupType = BindGroup;
+        using BindGroupLayoutType = BindGroupLayout;
+        using BufferType = Buffer;
+        using CommandBufferType = CommandBuffer;
+        using ComputePipelineType = ComputePipeline;
+        using DeviceType = Device;
+        using PipelineLayoutType = PipelineLayout;
+        using QuerySetType = QuerySet;
+        using QueueType = Queue;
+        using RenderPipelineType = RenderPipeline;
+        using SamplerType = Sampler;
+        using ShaderModuleType = ShaderModule;
+        using SwapChainType = SwapChain;
+        using TextureType = Texture;
+        using TextureViewType = TextureView;
+    };
+
+    template <typename T>
+    auto ToBackend(T&& common) -> decltype(ToBackendBase<NullBackendTraits>(common)) {
+        return ToBackendBase<NullBackendTraits>(common);
+    }
+
+    struct PendingOperation {
+        virtual ~PendingOperation() = default;
+        virtual void Execute() = 0;
+    };
+
+    class Device final : public DeviceBase {
+      public:
+        static ResultOrError<Ref<Device>> Create(Adapter* adapter,
+                                                 const DeviceDescriptor* descriptor);
+        ~Device() override;
+
+        MaybeError Initialize();
+
+        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+            CommandEncoder* encoder,
+            const CommandBufferDescriptor* descriptor) override;
+
+        MaybeError TickImpl() override;
+
+        void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
+        MaybeError SubmitPendingOperations();
+
+        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) override;
+        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) override;
+
+        MaybeError IncrementMemoryUsage(uint64_t bytes);
+        void DecrementMemoryUsage(uint64_t bytes);
+
+        uint32_t GetOptimalBytesPerRowAlignment() const override;
+        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+        float GetTimestampPeriodInNS() const override;
+
+      private:
+        using DeviceBase::DeviceBase;
+
+        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+            const BindGroupDescriptor* descriptor) override;
+        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken) override;
+        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+            const BufferDescriptor* descriptor) override;
+        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+            const ComputePipelineDescriptor* descriptor) override;
+        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+            const PipelineLayoutDescriptor* descriptor) override;
+        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+            const QuerySetDescriptor* descriptor) override;
+        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+            const RenderPipelineDescriptor* descriptor) override;
+        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+            const SamplerDescriptor* descriptor) override;
+        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+            const ShaderModuleDescriptor* descriptor,
+            ShaderModuleParseResult* parseResult) override;
+        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+            Surface* surface,
+            NewSwapChainBase* previousSwapChain,
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+            const TextureDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+            TextureBase* texture,
+            const TextureViewDescriptor* descriptor) override;
+
+        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+        void DestroyImpl() override;
+        MaybeError WaitForIdleForDestruction() override;
+
+        std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
+
+        static constexpr uint64_t kMaxMemoryUsage = 512 * 1024 * 1024;
+        size_t mMemoryUsage = 0;
+    };
+
+    class Adapter : public AdapterBase {
+      public:
+        Adapter(InstanceBase* instance);
+        ~Adapter() override;
+
+        // AdapterBase Implementation
+        bool SupportsExternalImages() const override;
+
+        // Used for the tests that intend to use an adapter without all features enabled.
+        void SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures);
+
+      private:
+        MaybeError InitializeImpl() override;
+        MaybeError InitializeSupportedFeaturesImpl() override;
+        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+
+        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+            const DeviceDescriptor* descriptor) override;
+    };
+
+    // Helper class so |BindGroup| can allocate memory for its binding data,
+    // before calling the BindGroupBase base class constructor.
+    class BindGroupDataHolder {
+      protected:
+        explicit BindGroupDataHolder(size_t size);
+        ~BindGroupDataHolder();
+
+        void* mBindingDataAllocation;
+    };
+
+    // We don't have the complexity of placement-allocation of bind group data in
+    // the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
+    class BindGroup final : private BindGroupDataHolder, public BindGroupBase {
+      public:
+        BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
+
+      private:
+        ~BindGroup() override = default;
+    };
+
+    class BindGroupLayout final : public BindGroupLayoutBase {
+      public:
+        BindGroupLayout(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken);
+
+      private:
+        ~BindGroupLayout() override = default;
+    };
+
+    class Buffer final : public BufferBase {
+      public:
+        Buffer(Device* device, const BufferDescriptor* descriptor);
+
+        void CopyFromStaging(StagingBufferBase* staging,
+                             uint64_t sourceOffset,
+                             uint64_t destinationOffset,
+                             uint64_t size);
+
+        void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
+
+      private:
+        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+        void UnmapImpl() override;
+        void DestroyImpl() override;
+        bool IsCPUWritableAtCreation() const override;
+        MaybeError MapAtCreationImpl() override;
+        void* GetMappedPointerImpl() override;
+
+        std::unique_ptr<uint8_t[]> mBackingData;
+    };
+
+    class CommandBuffer final : public CommandBufferBase {
+      public:
+        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+    };
+
+    class QuerySet final : public QuerySetBase {
+      public:
+        QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+    };
+
+    class Queue final : public QueueBase {
+      public:
+        Queue(Device* device);
+
+      private:
+        ~Queue() override;
+        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+        MaybeError WriteBufferImpl(BufferBase* buffer,
+                                   uint64_t bufferOffset,
+                                   const void* data,
+                                   size_t size) override;
+    };
+
+    class ComputePipeline final : public ComputePipelineBase {
+      public:
+        using ComputePipelineBase::ComputePipelineBase;
+
+        MaybeError Initialize() override;
+    };
+
+    class RenderPipeline final : public RenderPipelineBase {
+      public:
+        using RenderPipelineBase::RenderPipelineBase;
+
+        MaybeError Initialize() override;
+    };
+
+    class ShaderModule final : public ShaderModuleBase {
+      public:
+        using ShaderModuleBase::ShaderModuleBase;
+
+        MaybeError Initialize(ShaderModuleParseResult* parseResult);
+    };
+
+    class SwapChain final : public NewSwapChainBase {
+      public:
+        static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor);
+        ~SwapChain() override;
+
+      private:
+        using NewSwapChainBase::NewSwapChainBase;
+        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+        Ref<Texture> mTexture;
+
+        MaybeError PresentImpl() override;
+        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+        void DetachFromSurfaceImpl() override;
+    };
+
+    class OldSwapChain final : public OldSwapChainBase {
+      public:
+        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+
+      protected:
+        ~OldSwapChain() override;
+        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+        MaybeError OnBeforePresent(TextureViewBase*) override;
+    };
+
+    class NativeSwapChainImpl {
+      public:
+        using WSIContext = struct {};
+        void Init(WSIContext* context);
+        DawnSwapChainError Configure(WGPUTextureFormat format,
+                                     WGPUTextureUsage,
+                                     uint32_t width,
+                                     uint32_t height);
+        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+        DawnSwapChainError Present();
+        wgpu::TextureFormat GetPreferredFormat() const;
+    };
+
+    class StagingBuffer : public StagingBufferBase {
+      public:
+        StagingBuffer(size_t size, Device* device);
+        ~StagingBuffer() override;
+        MaybeError Initialize() override;
+
+      private:
+        Device* mDevice;
+        std::unique_ptr<uint8_t[]> mBuffer;
+    };
+
+}  // namespace dawn::native::null
+
+#endif  // DAWNNATIVE_NULL_DEVICENULL_H_
diff --git a/src/dawn/native/null/NullBackend.cpp b/src/dawn/native/null/NullBackend.cpp
new file mode 100644
index 0000000..43637cd
--- /dev/null
+++ b/src/dawn/native/null/NullBackend.cpp
@@ -0,0 +1,32 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NullBackend.cpp: contains the definition of symbols exported by NullBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/NullBackend.h"
+
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/null/DeviceNull.h"
+
+namespace dawn::native::null {
+
+    DawnSwapChainImplementation CreateNativeSwapChainImpl() {
+        DawnSwapChainImplementation impl;
+        impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
+        impl.textureUsage = WGPUTextureUsage_Present;
+        return impl;
+    }
+
+}  // namespace dawn::native::null
diff --git a/src/dawn/native/opengl/BackendGL.cpp b/src/dawn/native/opengl/BackendGL.cpp
new file mode 100644
index 0000000..aac0c14
--- /dev/null
+++ b/src/dawn/native/opengl/BackendGL.cpp
@@ -0,0 +1,306 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BackendGL.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/OpenGLBackend.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+#include <cstring>
+
+namespace dawn::native::opengl {
+
+    namespace {
+
+        struct Vendor {
+            const char* vendorName;
+            uint32_t vendorId;
+        };
+
+        const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
+                                   {"ARM", gpu_info::kVendorID_ARM},
+                                   {"Imagination", gpu_info::kVendorID_ImgTec},
+                                   {"Intel", gpu_info::kVendorID_Intel},
+                                   {"NVIDIA", gpu_info::kVendorID_Nvidia},
+                                   {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
+
+        uint32_t GetVendorIdFromVendors(const char* vendor) {
+            uint32_t vendorId = 0;
+            for (const auto& it : kVendors) {
+                // Matching vendor name with vendor string
+                if (strstr(vendor, it.vendorName) != nullptr) {
+                    vendorId = it.vendorId;
+                    break;
+                }
+            }
+            return vendorId;
+        }
+
+        void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
+                                               GLenum type,
+                                               GLuint id,
+                                               GLenum severity,
+                                               GLsizei length,
+                                               const GLchar* message,
+                                               const void* userParam) {
+            const char* sourceText;
+            switch (source) {
+                case GL_DEBUG_SOURCE_API:
+                    sourceText = "OpenGL";
+                    break;
+                case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
+                    sourceText = "Window System";
+                    break;
+                case GL_DEBUG_SOURCE_SHADER_COMPILER:
+                    sourceText = "Shader Compiler";
+                    break;
+                case GL_DEBUG_SOURCE_THIRD_PARTY:
+                    sourceText = "Third Party";
+                    break;
+                case GL_DEBUG_SOURCE_APPLICATION:
+                    sourceText = "Application";
+                    break;
+                case GL_DEBUG_SOURCE_OTHER:
+                    sourceText = "Other";
+                    break;
+                default:
+                    sourceText = "UNKNOWN";
+                    break;
+            }
+
+            const char* severityText;
+            switch (severity) {
+                case GL_DEBUG_SEVERITY_HIGH:
+                    severityText = "High";
+                    break;
+                case GL_DEBUG_SEVERITY_MEDIUM:
+                    severityText = "Medium";
+                    break;
+                case GL_DEBUG_SEVERITY_LOW:
+                    severityText = "Low";
+                    break;
+                case GL_DEBUG_SEVERITY_NOTIFICATION:
+                    severityText = "Notification";
+                    break;
+                default:
+                    severityText = "UNKNOWN";
+                    break;
+            }
+
+            if (type == GL_DEBUG_TYPE_ERROR) {
+                dawn::WarningLog() << "OpenGL error:"
+                                   << "\n    Source: " << sourceText      //
+                                   << "\n    ID: " << id                  //
+                                   << "\n    Severity: " << severityText  //
+                                   << "\n    Message: " << message;
+
+                // Abort on an error when in Debug mode.
+                UNREACHABLE();
+            }
+        }
+
+    }  // anonymous namespace
+
+    // The OpenGL backend's Adapter.
+
+    class Adapter : public AdapterBase {
+      public:
+        Adapter(InstanceBase* instance, wgpu::BackendType backendType)
+            : AdapterBase(instance, backendType) {
+        }
+
+        MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
+            // Use getProc to populate the dispatch table
+            return mFunctions.Initialize(getProc);
+        }
+
+        ~Adapter() override = default;
+
+        // AdapterBase Implementation
+        bool SupportsExternalImages() const override {
+            // Via dawn::native::opengl::WrapExternalEGLImage
+            return GetBackendType() == wgpu::BackendType::OpenGLES;
+        }
+
+      private:
+        MaybeError InitializeImpl() override {
+            if (mFunctions.GetVersion().IsES()) {
+                ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
+            } else {
+                ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
+            }
+
+            // Use the debug output functionality to get notified about GL errors
+            // TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
+            // extensions
+            bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
+
+            if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
+                mFunctions.Enable(GL_DEBUG_OUTPUT);
+                mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
+
+                // Any GL error; dangerous undefined behavior; any shader compiler and linker errors
+                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH,
+                                               0, nullptr, GL_TRUE);
+
+                // Severe performance warnings; GLSL or other shader compiler and linker warnings;
+                // use of currently deprecated behavior
+                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM,
+                                               0, nullptr, GL_TRUE);
+
+                // Performance warnings from redundant state changes; trivial undefined behavior
+                // This is disabled because we do an incredible amount of redundant state changes.
+                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
+                                               nullptr, GL_FALSE);
+
+                // Any message which is not an error or performance concern
+                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
+                                               GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr,
+                                               GL_FALSE);
+                mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
+            }
+
+            // Set state that never changes between devices.
+            mFunctions.Enable(GL_DEPTH_TEST);
+            mFunctions.Enable(GL_SCISSOR_TEST);
+            mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
+            if (mFunctions.GetVersion().IsDesktop()) {
+                // These are not necessary on GLES. The functionality is enabled by default, and
+                // works by specifying sample counts and SRGB textures, respectively.
+                mFunctions.Enable(GL_MULTISAMPLE);
+                mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
+            }
+            mFunctions.Enable(GL_SAMPLE_MASK);
+
+            mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
+
+            // Workaroud to find vendor id from vendor name
+            const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
+            mVendorId = GetVendorIdFromVendors(vendor);
+
+            mDriverDescription = std::string("OpenGL version ") +
+                                 reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
+
+            if (mName.find("SwiftShader") != std::string::npos) {
+                mAdapterType = wgpu::AdapterType::CPU;
+            }
+
+            return {};
+        }
+
+        MaybeError InitializeSupportedFeaturesImpl() override {
+            // TextureCompressionBC
+            {
+                // BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
+                bool supportsS3TC =
+                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
+                    (mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
+                     mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
+                     mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
+
+                // COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
+                // COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
+                // GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
+                // (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
+                bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
+
+                // GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
+                // NVidia GLES drivers don't support this extension, but they do support
+                // GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
+                // GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
+                // SRGB support even if S3TC is supported; see
+                // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
+                bool supportsS3TCSRGB =
+                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
+                    mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
+
+                // BC4 and BC5
+                bool supportsRGTC =
+                    mFunctions.IsAtLeastGL(3, 0) ||
+                    mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
+                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
+
+                // BC6 and BC7
+                bool supportsBPTC =
+                    mFunctions.IsAtLeastGL(4, 2) ||
+                    mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
+                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
+
+                if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
+                    supportsBPTC) {
+                    mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
+                }
+                mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+            }
+
+            return {};
+        }
+
+        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
+            GetDefaultLimits(&limits->v1);
+            return {};
+        }
+
+        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+            const DeviceDescriptor* descriptor) override {
+            // There is no limit on the number of devices created from this adapter because they can
+            // all share the same backing OpenGL context.
+            return Device::Create(this, descriptor, mFunctions);
+        }
+
+        OpenGLFunctions mFunctions;
+    };
+
+    // Implementation of the OpenGL backend's BackendConnection
+
+    Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
+        : BackendConnection(instance, backendType) {
+    }
+
+    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+        // The OpenGL backend needs at least "getProcAddress" to discover an adapter.
+        return {};
+    }
+
+    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* optionsBase) {
+        // TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
+        // know how to handle MakeCurrent.
+        DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
+
+        ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
+        const AdapterDiscoveryOptions* options =
+            static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+        DAWN_INVALID_IF(options->getProc == nullptr,
+                        "AdapterDiscoveryOptions::getProc must be set");
+
+        Ref<Adapter> adapter = AcquireRef(
+            new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
+        DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
+        DAWN_TRY(adapter->Initialize());
+
+        mCreatedAdapter = true;
+        std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
+        return std::move(adapters);
+    }
+
+    BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
+        return new Backend(instance, backendType);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BackendGL.h b/src/dawn/native/opengl/BackendGL.h
new file mode 100644
index 0000000..12e7b47
--- /dev/null
+++ b/src/dawn/native/opengl/BackendGL.h
@@ -0,0 +1,36 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BACKENDGL_H_
+#define DAWNNATIVE_OPENGL_BACKENDGL_H_
+
+#include "dawn/native/BackendConnection.h"
+
+namespace dawn::native::opengl {
+
+    class Backend : public BackendConnection {
+      public:
+        Backend(InstanceBase* instance, wgpu::BackendType backendType);
+
+        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+            const AdapterDiscoveryOptionsBase* options) override;
+
+      private:
+        bool mCreatedAdapter = false;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_BACKENDGL_H_
diff --git a/src/dawn/native/opengl/BindGroupGL.cpp b/src/dawn/native/opengl/BindGroupGL.cpp
new file mode 100644
index 0000000..6573a92
--- /dev/null
+++ b/src/dawn/native/opengl/BindGroupGL.cpp
@@ -0,0 +1,65 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BindGroupGL.h"
+
+#include "dawn/native/Texture.h"
+#include "dawn/native/opengl/BindGroupLayoutGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+    MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
+        const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+            const BindGroupEntry& entry = descriptor->entries[i];
+
+            const auto& it = bindingMap.find(BindingNumber(entry.binding));
+            BindingIndex bindingIndex = it->second;
+            ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+            const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+            if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
+                ASSERT(entry.textureView != nullptr);
+                const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
+                DAWN_INVALID_IF(
+                    textureViewLayerCount != 1 &&
+                        textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
+                    "%s binds %u layers. Currently the OpenGL backend only supports either binding "
+                    "1 layer or the all layers (%u) for storage texture.",
+                    entry.textureView, textureViewLayerCount,
+                    entry.textureView->GetTexture()->GetArrayLayers());
+            }
+        }
+
+        return {};
+    }
+
+    BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+        : BindGroupBase(this, device, descriptor) {
+    }
+
+    BindGroup::~BindGroup() = default;
+
+    void BindGroup::DestroyImpl() {
+        BindGroupBase::DestroyImpl();
+        ToBackend(GetLayout())->DeallocateBindGroup(this);
+    }
+
+    // static
+    Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BindGroupGL.h b/src/dawn/native/opengl/BindGroupGL.h
new file mode 100644
index 0000000..9d6ccec
--- /dev/null
+++ b/src/dawn/native/opengl/BindGroupGL.h
@@ -0,0 +1,41 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BINDGROUPGL_H_
+#define DAWNNATIVE_OPENGL_BINDGROUPGL_H_
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/native/BindGroup.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
+
+    class BindGroup final : public BindGroupBase, public PlacementAllocated {
+      public:
+        static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+
+        BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+
+      private:
+        ~BindGroup() override;
+
+        void DestroyImpl() override;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_BINDGROUPGL_H_
diff --git a/src/dawn/native/opengl/BindGroupLayoutGL.cpp b/src/dawn/native/opengl/BindGroupLayoutGL.cpp
new file mode 100644
index 0000000..1cc1474
--- /dev/null
+++ b/src/dawn/native/opengl/BindGroupLayoutGL.cpp
@@ -0,0 +1,37 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BindGroupLayoutGL.h"
+
+#include "dawn/native/opengl/BindGroupGL.h"
+
+namespace dawn::native::opengl {
+
+    BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                     const BindGroupLayoutDescriptor* descriptor,
+                                     PipelineCompatibilityToken pipelineCompatibilityToken)
+        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+    }
+
+    Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+                                                      const BindGroupDescriptor* descriptor) {
+        return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+    }
+
+    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+        mBindGroupAllocator.Deallocate(bindGroup);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BindGroupLayoutGL.h b/src/dawn/native/opengl/BindGroupLayoutGL.h
new file mode 100644
index 0000000..1cb3cc2
--- /dev/null
+++ b/src/dawn/native/opengl/BindGroupLayoutGL.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
+#define DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/native/BindGroupLayout.h"
+
+namespace dawn::native::opengl {
+
+    class BindGroup;
+    class Device;
+
+    class BindGroupLayout final : public BindGroupLayoutBase {
+      public:
+        BindGroupLayout(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken);
+
+        Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+        void DeallocateBindGroup(BindGroup* bindGroup);
+
+      private:
+        ~BindGroupLayout() override = default;
+        SlabAllocator<BindGroup> mBindGroupAllocator;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
diff --git a/src/dawn/native/opengl/BufferGL.cpp b/src/dawn/native/opengl/BufferGL.cpp
new file mode 100644
index 0000000..fde83bc
--- /dev/null
+++ b/src/dawn/native/opengl/BufferGL.cpp
@@ -0,0 +1,184 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BufferGL.h"
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+    // Buffer
+
+    // static
+    ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
+                                                            const BufferDescriptor* descriptor,
+                                                            bool shouldLazyClear) {
+        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
+        if (descriptor->mappedAtCreation) {
+            DAWN_TRY(buffer->MapAtCreationInternal());
+        }
+
+        return std::move(buffer);
+    }
+
+    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+        : BufferBase(device, descriptor) {
+        // Allocate at least 4 bytes so clamped accesses are always in bounds.
+        mAllocatedSize = std::max(GetSize(), uint64_t(4u));
+
+        device->gl.GenBuffers(1, &mBuffer);
+        device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+
+        // The buffers with mappedAtCreation == true will be initialized in
+        // BufferBase::MapAtCreation().
+        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+            !descriptor->mappedAtCreation) {
+            std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
+            device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(),
+                                  GL_STATIC_DRAW);
+        } else {
+            // Buffers start zeroed if you pass nullptr to glBufferData.
+            device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
+        }
+    }
+
+    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
+        : Buffer(device, descriptor) {
+        if (!shouldLazyClear) {
+            SetIsDataInitialized();
+        }
+    }
+
+    Buffer::~Buffer() = default;
+
+    GLuint Buffer::GetHandle() const {
+        return mBuffer;
+    }
+
+    bool Buffer::EnsureDataInitialized() {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        InitializeToZero();
+        return true;
+    }
+
+    bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        if (IsFullBufferRange(offset, size)) {
+            SetIsDataInitialized();
+            return false;
+        }
+
+        InitializeToZero();
+        return true;
+    }
+
+    bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+            SetIsDataInitialized();
+            return false;
+        }
+
+        InitializeToZero();
+        return true;
+    }
+
+    void Buffer::InitializeToZero() {
+        ASSERT(NeedsInitialization());
+
+        const uint64_t size = GetAllocatedSize();
+        Device* device = ToBackend(GetDevice());
+
+        const std::vector<uint8_t> clearValues(size, 0u);
+        device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+        device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
+        device->IncrementLazyClearCountForTesting();
+
+        SetIsDataInitialized();
+    }
+
+    bool Buffer::IsCPUWritableAtCreation() const {
+        // TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
+        // driver to migrate it to shared memory.
+        return true;
+    }
+
+    MaybeError Buffer::MapAtCreationImpl() {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+        gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+        mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
+        return {};
+    }
+
+    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
+        // so we extend the range to be 4 bytes.
+        if (size == 0) {
+            if (offset != 0) {
+                offset -= 4;
+            }
+            size = 4;
+        }
+
+        EnsureDataInitialized();
+
+        // This does GPU->CPU synchronization, we could require a high
+        // version of OpenGL that would let us map the buffer unsynchronized.
+        gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+        void* mappedData = nullptr;
+        if (mode & wgpu::MapMode::Read) {
+            mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
+        } else {
+            ASSERT(mode & wgpu::MapMode::Write);
+            mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
+        }
+
+        // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
+        // the resource but OpenGL gives us the pointer at offset. Remove the offset.
+        mMappedData = static_cast<uint8_t*>(mappedData) - offset;
+        return {};
+    }
+
+    void* Buffer::GetMappedPointerImpl() {
+        // The mapping offset has already been removed.
+        return mMappedData;
+    }
+
+    void Buffer::UnmapImpl() {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+        gl.UnmapBuffer(GL_ARRAY_BUFFER);
+        mMappedData = nullptr;
+    }
+
+    void Buffer::DestroyImpl() {
+        BufferBase::DestroyImpl();
+        ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
+        mBuffer = 0;
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BufferGL.h b/src/dawn/native/opengl/BufferGL.h
new file mode 100644
index 0000000..2cd1ae6
--- /dev/null
+++ b/src/dawn/native/opengl/BufferGL.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BUFFERGL_H_
+#define DAWNNATIVE_OPENGL_BUFFERGL_H_
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class Buffer final : public BufferBase {
+      public:
+        static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
+                                                               const BufferDescriptor* descriptor,
+                                                               bool shouldLazyClear);
+
+        Buffer(Device* device, const BufferDescriptor* descriptor);
+
+        GLuint GetHandle() const;
+
+        bool EnsureDataInitialized();
+        bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
+        bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
+
+      private:
+        Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
+        ~Buffer() override;
+        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+        void UnmapImpl() override;
+        void DestroyImpl() override;
+        bool IsCPUWritableAtCreation() const override;
+        MaybeError MapAtCreationImpl() override;
+        void* GetMappedPointerImpl() override;
+
+        void InitializeToZero();
+
+        GLuint mBuffer = 0;
+        void* mMappedData = nullptr;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_BUFFERGL_H_
diff --git a/src/dawn/native/opengl/CommandBufferGL.cpp b/src/dawn/native/opengl/CommandBufferGL.cpp
new file mode 100644
index 0000000..8971979
--- /dev/null
+++ b/src/dawn/native/opengl/CommandBufferGL.cpp
@@ -0,0 +1,1348 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/CommandBufferGL.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/VertexFormat.h"
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/ComputePipelineGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/PersistentPipelineStateGL.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/native/opengl/RenderPipelineGL.h"
+#include "dawn/native/opengl/SamplerGL.h"
+#include "dawn/native/opengl/TextureGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+#include <cstring>
+
+namespace dawn::native::opengl {
+
+    namespace {
+
+        GLenum IndexFormatType(wgpu::IndexFormat format) {
+            switch (format) {
+                case wgpu::IndexFormat::Uint16:
+                    return GL_UNSIGNED_SHORT;
+                case wgpu::IndexFormat::Uint32:
+                    return GL_UNSIGNED_INT;
+                case wgpu::IndexFormat::Undefined:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        GLenum VertexFormatType(wgpu::VertexFormat format) {
+            switch (format) {
+                case wgpu::VertexFormat::Uint8x2:
+                case wgpu::VertexFormat::Uint8x4:
+                case wgpu::VertexFormat::Unorm8x2:
+                case wgpu::VertexFormat::Unorm8x4:
+                    return GL_UNSIGNED_BYTE;
+                case wgpu::VertexFormat::Sint8x2:
+                case wgpu::VertexFormat::Sint8x4:
+                case wgpu::VertexFormat::Snorm8x2:
+                case wgpu::VertexFormat::Snorm8x4:
+                    return GL_BYTE;
+                case wgpu::VertexFormat::Uint16x2:
+                case wgpu::VertexFormat::Uint16x4:
+                case wgpu::VertexFormat::Unorm16x2:
+                case wgpu::VertexFormat::Unorm16x4:
+                    return GL_UNSIGNED_SHORT;
+                case wgpu::VertexFormat::Sint16x2:
+                case wgpu::VertexFormat::Sint16x4:
+                case wgpu::VertexFormat::Snorm16x2:
+                case wgpu::VertexFormat::Snorm16x4:
+                    return GL_SHORT;
+                case wgpu::VertexFormat::Float16x2:
+                case wgpu::VertexFormat::Float16x4:
+                    return GL_HALF_FLOAT;
+                case wgpu::VertexFormat::Float32:
+                case wgpu::VertexFormat::Float32x2:
+                case wgpu::VertexFormat::Float32x3:
+                case wgpu::VertexFormat::Float32x4:
+                    return GL_FLOAT;
+                case wgpu::VertexFormat::Uint32:
+                case wgpu::VertexFormat::Uint32x2:
+                case wgpu::VertexFormat::Uint32x3:
+                case wgpu::VertexFormat::Uint32x4:
+                    return GL_UNSIGNED_INT;
+                case wgpu::VertexFormat::Sint32:
+                case wgpu::VertexFormat::Sint32x2:
+                case wgpu::VertexFormat::Sint32x3:
+                case wgpu::VertexFormat::Sint32x4:
+                    return GL_INT;
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
+            switch (format) {
+                case wgpu::VertexFormat::Unorm8x2:
+                case wgpu::VertexFormat::Unorm8x4:
+                case wgpu::VertexFormat::Snorm8x2:
+                case wgpu::VertexFormat::Snorm8x4:
+                case wgpu::VertexFormat::Unorm16x2:
+                case wgpu::VertexFormat::Unorm16x4:
+                case wgpu::VertexFormat::Snorm16x2:
+                case wgpu::VertexFormat::Snorm16x4:
+                    return GL_TRUE;
+                default:
+                    return GL_FALSE;
+            }
+        }
+
+        bool VertexFormatIsInt(wgpu::VertexFormat format) {
+            switch (format) {
+                case wgpu::VertexFormat::Uint8x2:
+                case wgpu::VertexFormat::Uint8x4:
+                case wgpu::VertexFormat::Sint8x2:
+                case wgpu::VertexFormat::Sint8x4:
+                case wgpu::VertexFormat::Uint16x2:
+                case wgpu::VertexFormat::Uint16x4:
+                case wgpu::VertexFormat::Sint16x2:
+                case wgpu::VertexFormat::Sint16x4:
+                case wgpu::VertexFormat::Uint32:
+                case wgpu::VertexFormat::Uint32x2:
+                case wgpu::VertexFormat::Uint32x3:
+                case wgpu::VertexFormat::Uint32x4:
+                case wgpu::VertexFormat::Sint32:
+                case wgpu::VertexFormat::Sint32x2:
+                case wgpu::VertexFormat::Sint32x3:
+                case wgpu::VertexFormat::Sint32x4:
+                    return true;
+                default:
+                    return false;
+            }
+        }
+
+        // Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
+        // corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
+        // This means that we have to re-apply these buffers on a VertexState change.
+        class VertexStateBufferBindingTracker {
+          public:
+            void OnSetIndexBuffer(BufferBase* buffer) {
+                mIndexBufferDirty = true;
+                mIndexBuffer = ToBackend(buffer);
+            }
+
+            void OnSetVertexBuffer(VertexBufferSlot slot, BufferBase* buffer, uint64_t offset) {
+                mVertexBuffers[slot] = ToBackend(buffer);
+                mVertexBufferOffsets[slot] = offset;
+                mDirtyVertexBuffers.set(slot);
+            }
+
+            void OnSetPipeline(RenderPipelineBase* pipeline) {
+                if (mLastPipeline == pipeline) {
+                    return;
+                }
+
+                mIndexBufferDirty = true;
+                mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+
+                mLastPipeline = pipeline;
+            }
+
+            void Apply(const OpenGLFunctions& gl) {
+                if (mIndexBufferDirty && mIndexBuffer != nullptr) {
+                    gl.BindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer->GetHandle());
+                    mIndexBufferDirty = false;
+                }
+
+                for (VertexBufferSlot slot : IterateBitSet(
+                         mDirtyVertexBuffers & mLastPipeline->GetVertexBufferSlotsUsed())) {
+                    for (VertexAttributeLocation location : IterateBitSet(
+                             ToBackend(mLastPipeline)->GetAttributesUsingVertexBuffer(slot))) {
+                        const VertexAttributeInfo& attribute =
+                            mLastPipeline->GetAttribute(location);
+
+                        GLuint attribIndex = static_cast<GLuint>(static_cast<uint8_t>(location));
+                        GLuint buffer = mVertexBuffers[slot]->GetHandle();
+                        uint64_t offset = mVertexBufferOffsets[slot];
+
+                        const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
+                        uint32_t components = GetVertexFormatInfo(attribute.format).componentCount;
+                        GLenum formatType = VertexFormatType(attribute.format);
+
+                        GLboolean normalized = VertexFormatIsNormalized(attribute.format);
+                        gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
+                        if (VertexFormatIsInt(attribute.format)) {
+                            gl.VertexAttribIPointer(
+                                attribIndex, components, formatType, vertexBuffer.arrayStride,
+                                reinterpret_cast<void*>(
+                                    static_cast<intptr_t>(offset + attribute.offset)));
+                        } else {
+                            gl.VertexAttribPointer(attribIndex, components, formatType, normalized,
+                                                   vertexBuffer.arrayStride,
+                                                   reinterpret_cast<void*>(static_cast<intptr_t>(
+                                                       offset + attribute.offset)));
+                        }
+                    }
+                }
+
+                mDirtyVertexBuffers.reset();
+            }
+
+          private:
+            bool mIndexBufferDirty = false;
+            Buffer* mIndexBuffer = nullptr;
+
+            ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+            ityp::array<VertexBufferSlot, Buffer*, kMaxVertexBuffers> mVertexBuffers;
+            ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferOffsets;
+
+            RenderPipelineBase* mLastPipeline = nullptr;
+        };
+
+        class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
+          public:
+            void OnSetPipeline(RenderPipeline* pipeline) {
+                BindGroupTrackerBase::OnSetPipeline(pipeline);
+                mPipeline = pipeline;
+            }
+
+            void OnSetPipeline(ComputePipeline* pipeline) {
+                BindGroupTrackerBase::OnSetPipeline(pipeline);
+                mPipeline = pipeline;
+            }
+
+            void Apply(const OpenGLFunctions& gl) {
+                BeforeApply();
+                for (BindGroupIndex index :
+                     IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+                    ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
+                                   mDynamicOffsets[index].data());
+                }
+                AfterApply();
+            }
+
+          private:
+            void ApplyBindGroup(const OpenGLFunctions& gl,
+                                BindGroupIndex index,
+                                BindGroupBase* group,
+                                uint32_t dynamicOffsetCount,
+                                uint64_t* dynamicOffsets) {
+                const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
+                uint32_t currentDynamicOffsetIndex = 0;
+
+                for (BindingIndex bindingIndex{0};
+                     bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+                    const BindingInfo& bindingInfo =
+                        group->GetLayout()->GetBindingInfo(bindingIndex);
+
+                    switch (bindingInfo.bindingType) {
+                        case BindingInfoType::Buffer: {
+                            BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+                            GLuint buffer = ToBackend(binding.buffer)->GetHandle();
+                            GLuint index = indices[bindingIndex];
+                            GLuint offset = binding.offset;
+
+                            if (bindingInfo.buffer.hasDynamicOffset) {
+                                offset += dynamicOffsets[currentDynamicOffsetIndex];
+                                ++currentDynamicOffsetIndex;
+                            }
+
+                            GLenum target;
+                            switch (bindingInfo.buffer.type) {
+                                case wgpu::BufferBindingType::Uniform:
+                                    target = GL_UNIFORM_BUFFER;
+                                    break;
+                                case wgpu::BufferBindingType::Storage:
+                                case kInternalStorageBufferBinding:
+                                case wgpu::BufferBindingType::ReadOnlyStorage:
+                                    target = GL_SHADER_STORAGE_BUFFER;
+                                    break;
+                                case wgpu::BufferBindingType::Undefined:
+                                    UNREACHABLE();
+                            }
+
+                            gl.BindBufferRange(target, index, buffer, offset, binding.size);
+                            break;
+                        }
+
+                        case BindingInfoType::Sampler: {
+                            Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+                            GLuint samplerIndex = indices[bindingIndex];
+
+                            for (PipelineGL::SamplerUnit unit :
+                                 mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
+                                // Only use filtering for certain texture units, because int
+                                // and uint texture are only complete without filtering
+                                if (unit.shouldUseFiltering) {
+                                    gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
+                                } else {
+                                    gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
+                                }
+                            }
+                            break;
+                        }
+
+                        case BindingInfoType::Texture: {
+                            TextureView* view =
+                                ToBackend(group->GetBindingAsTextureView(bindingIndex));
+                            GLuint handle = view->GetHandle();
+                            GLenum target = view->GetGLTarget();
+                            GLuint viewIndex = indices[bindingIndex];
+
+                            for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
+                                gl.ActiveTexture(GL_TEXTURE0 + unit);
+                                gl.BindTexture(target, handle);
+                                if (ToBackend(view->GetTexture())->GetGLFormat().format ==
+                                    GL_DEPTH_STENCIL) {
+                                    Aspect aspect = view->GetAspects();
+                                    ASSERT(HasOneBit(aspect));
+                                    switch (aspect) {
+                                        case Aspect::None:
+                                        case Aspect::Color:
+                                        case Aspect::CombinedDepthStencil:
+                                        case Aspect::Plane0:
+                                        case Aspect::Plane1:
+                                            UNREACHABLE();
+                                        case Aspect::Depth:
+                                            gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+                                                             GL_DEPTH_COMPONENT);
+                                            break;
+                                        case Aspect::Stencil:
+                                            gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+                                                             GL_STENCIL_INDEX);
+                                            break;
+                                    }
+                                }
+                            }
+                            break;
+                        }
+
+                        case BindingInfoType::StorageTexture: {
+                            TextureView* view =
+                                ToBackend(group->GetBindingAsTextureView(bindingIndex));
+                            Texture* texture = ToBackend(view->GetTexture());
+                            GLuint handle = texture->GetHandle();
+                            GLuint imageIndex = indices[bindingIndex];
+
+                            GLenum access;
+                            switch (bindingInfo.storageTexture.access) {
+                                case wgpu::StorageTextureAccess::WriteOnly:
+                                    access = GL_WRITE_ONLY;
+                                    break;
+                                case wgpu::StorageTextureAccess::Undefined:
+                                    UNREACHABLE();
+                            }
+
+                            // OpenGL ES only supports either binding a layer or the entire
+                            // texture in glBindImageTexture().
+                            GLboolean isLayered;
+                            if (view->GetLayerCount() == 1) {
+                                isLayered = GL_FALSE;
+                            } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
+                                isLayered = GL_TRUE;
+                            } else {
+                                UNREACHABLE();
+                            }
+
+                            gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(),
+                                                isLayered, view->GetBaseArrayLayer(), access,
+                                                texture->GetGLFormat().internalFormat);
+                            break;
+                        }
+
+                        case BindingInfoType::ExternalTexture:
+                            UNREACHABLE();
+                            break;
+                    }
+                }
+            }
+
+            PipelineGL* mPipeline = nullptr;
+        };
+
+        void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
+                                              const BeginRenderPassCmd* renderPass) {
+            ASSERT(renderPass != nullptr);
+
+            GLuint readFbo = 0;
+            GLuint writeFbo = 0;
+
+            for (ColorAttachmentIndex i :
+                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+                    if (readFbo == 0) {
+                        ASSERT(writeFbo == 0);
+                        gl.GenFramebuffers(1, &readFbo);
+                        gl.GenFramebuffers(1, &writeFbo);
+                    }
+
+                    TextureView* colorView = ToBackend(renderPass->colorAttachments[i].view.Get());
+
+                    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
+                    colorView->BindToFramebuffer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
+
+                    TextureView* resolveView =
+                        ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
+                    gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, writeFbo);
+                    resolveView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
+                    gl.BlitFramebuffer(0, 0, renderPass->width, renderPass->height, 0, 0,
+                                       renderPass->width, renderPass->height, GL_COLOR_BUFFER_BIT,
+                                       GL_NEAREST);
+                }
+            }
+
+            gl.DeleteFramebuffers(1, &readFbo);
+            gl.DeleteFramebuffers(1, &writeFbo);
+        }
+
+        // OpenGL SPEC requires the source/destination region must be a region that is contained
+        // within srcImage/dstImage. Here the size of the image refers to the virtual size, while
+        // Dawn validates texture copy extent with the physical size, so we need to re-calculate the
+        // texture copy extent to ensure it should fit in the virtual size of the subresource.
+        Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy,
+                                          const Extent3D& copySize) {
+            Extent3D validTextureCopyExtent = copySize;
+            const TextureBase* texture = textureCopy.texture.Get();
+            Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
+            ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+            ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+            if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+                ASSERT(texture->GetFormat().isCompressed);
+                validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+            }
+            if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+                ASSERT(texture->GetFormat().isCompressed);
+                validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
+            }
+
+            return validTextureCopyExtent;
+        }
+
+        bool TextureFormatIsSnorm(wgpu::TextureFormat format) {
+            return format == wgpu::TextureFormat::RGBA8Snorm ||
+                   format == wgpu::TextureFormat::RG8Snorm ||
+                   format == wgpu::TextureFormat::R8Snorm;
+        }
+    }  // namespace
+
+    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+        : CommandBufferBase(encoder, descriptor) {
+    }
+
+    MaybeError CommandBuffer::Execute() {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
+            for (size_t i = 0; i < scope.textures.size(); i++) {
+                Texture* texture = ToBackend(scope.textures[i]);
+
+                // Clear subresources that are not render attachments. Render attachments will be
+                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+                // subresource has not been initialized before the render pass.
+                scope.textureUsages[i].Iterate(
+                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                            texture->EnsureSubresourceContentInitialized(range);
+                        }
+                    });
+            }
+
+            for (BufferBase* bufferBase : scope.buffers) {
+                ToBackend(bufferBase)->EnsureDataInitialized();
+            }
+        };
+
+        size_t nextComputePassNumber = 0;
+        size_t nextRenderPassNumber = 0;
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::BeginComputePass: {
+                    mCommands.NextCommand<BeginComputePassCmd>();
+                    for (const SyncScopeResourceUsage& scope :
+                         GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+                        LazyClearSyncScope(scope);
+                    }
+                    DAWN_TRY(ExecuteComputePass());
+
+                    nextComputePassNumber++;
+                    break;
+                }
+
+                case Command::BeginRenderPass: {
+                    auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+                    LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
+                    LazyClearRenderPassAttachments(cmd);
+                    DAWN_TRY(ExecuteRenderPass(cmd));
+
+                    nextRenderPassNumber++;
+                    break;
+                }
+
+                case Command::CopyBufferToBuffer: {
+                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                    if (copy->size == 0) {
+                        // Skip no-op copies.
+                        break;
+                    }
+
+                    ToBackend(copy->source)->EnsureDataInitialized();
+                    ToBackend(copy->destination)
+                        ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
+
+                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
+                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER,
+                                  ToBackend(copy->destination)->GetHandle());
+                    gl.CopyBufferSubData(GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
+                                         copy->sourceOffset, copy->destinationOffset, copy->size);
+
+                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+                    break;
+                }
+
+                case Command::CopyBufferToTexture: {
+                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    auto& src = copy->source;
+                    auto& dst = copy->destination;
+                    Buffer* buffer = ToBackend(src.buffer.Get());
+
+                    DAWN_INVALID_IF(
+                        dst.aspect == Aspect::Stencil,
+                        "Copies to stencil textures are unsupported on the OpenGL backend.");
+
+                    ASSERT(dst.aspect == Aspect::Color);
+
+                    buffer->EnsureDataInitialized();
+                    SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+                    if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+                                                      dst.mipLevel)) {
+                        dst.texture->SetIsSubresourceContentInitialized(true, range);
+                    } else {
+                        ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
+                    }
+
+                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
+
+                    TextureDataLayout dataLayout;
+                    dataLayout.offset = 0;
+                    dataLayout.bytesPerRow = src.bytesPerRow;
+                    dataLayout.rowsPerImage = src.rowsPerImage;
+
+                    DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
+                                  copy->copySize);
+                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+                    break;
+                }
+
+                case Command::CopyTextureToBuffer: {
+                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    auto& src = copy->source;
+                    auto& dst = copy->destination;
+                    auto& copySize = copy->copySize;
+                    Texture* texture = ToBackend(src.texture.Get());
+                    Buffer* buffer = ToBackend(dst.buffer.Get());
+                    const Format& formatInfo = texture->GetFormat();
+                    const GLFormat& format = texture->GetGLFormat();
+                    GLenum target = texture->GetGLTarget();
+
+                    // TODO(crbug.com/dawn/667): Implement validation in WebGPU/Compat to
+                    // avoid this codepath. OpenGL does not support readback from non-renderable
+                    // texture formats.
+                    if (formatInfo.isCompressed ||
+                        (TextureFormatIsSnorm(formatInfo.format) &&
+                         GetDevice()->IsToggleEnabled(Toggle::DisableSnormRead))) {
+                        UNREACHABLE();
+                    }
+
+                    buffer->EnsureDataInitializedAsDestination(copy);
+
+                    ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+                    SubresourceRange subresources =
+                        GetSubresourcesAffectedByCopy(src, copy->copySize);
+                    texture->EnsureSubresourceContentInitialized(subresources);
+                    // The only way to move data from a texture to a buffer in GL is via
+                    // glReadPixels with a pack buffer. Create a temporary FBO for the copy.
+                    gl.BindTexture(target, texture->GetHandle());
+
+                    GLuint readFBO = 0;
+                    gl.GenFramebuffers(1, &readFBO);
+                    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+
+                    const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(src.aspect).block;
+
+                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
+                    gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.byteSize);
+
+                    GLenum glAttachment;
+                    GLenum glFormat;
+                    GLenum glType;
+                    switch (src.aspect) {
+                        case Aspect::Color:
+                            glAttachment = GL_COLOR_ATTACHMENT0;
+                            glFormat = format.format;
+                            glType = format.type;
+                            break;
+                        case Aspect::Depth:
+                            glAttachment = GL_DEPTH_ATTACHMENT;
+                            glFormat = GL_DEPTH_COMPONENT;
+                            glType = GL_FLOAT;
+                            break;
+                        case Aspect::Stencil:
+                            glAttachment = GL_STENCIL_ATTACHMENT;
+                            glFormat = GL_STENCIL_INDEX;
+                            glType = GL_UNSIGNED_BYTE;
+                            break;
+
+                        case Aspect::CombinedDepthStencil:
+                        case Aspect::None:
+                        case Aspect::Plane0:
+                        case Aspect::Plane1:
+                            UNREACHABLE();
+                    }
+
+                    uint8_t* offset =
+                        reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
+                    switch (texture->GetDimension()) {
+                        case wgpu::TextureDimension::e2D: {
+                            if (texture->GetArrayLayers() == 1) {
+                                gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
+                                                        texture->GetHandle(), src.mipLevel);
+                                gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+                                              copySize.height, glFormat, glType, offset);
+                                break;
+                            }
+                            // Implementation for 2D array is the same as 3D.
+                            [[fallthrough]];
+                        }
+
+                        case wgpu::TextureDimension::e3D: {
+                            const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
+                            for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
+                                gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
+                                                           texture->GetHandle(), src.mipLevel,
+                                                           src.origin.z + z);
+                                gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+                                              copySize.height, glFormat, glType, offset);
+
+                                offset += bytesPerImage;
+                            }
+                            break;
+                        }
+
+                        case wgpu::TextureDimension::e1D:
+                            UNREACHABLE();
+                    }
+
+                    gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
+
+                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+                    gl.DeleteFramebuffers(1, &readFBO);
+                    break;
+                }
+
+                case Command::CopyTextureToTexture: {
+                    CopyTextureToTextureCmd* copy =
+                        mCommands.NextCommand<CopyTextureToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    auto& src = copy->source;
+                    auto& dst = copy->destination;
+
+                    // TODO(crbug.com/dawn/817): add workaround for the case that imageExtentSrc
+                    // is not equal to imageExtentDst. For example when copySize fits in the virtual
+                    // size of the source image but does not fit in the one of the destination
+                    // image.
+                    Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
+                    Texture* srcTexture = ToBackend(src.texture.Get());
+                    Texture* dstTexture = ToBackend(dst.texture.Get());
+
+                    SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+                    SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+
+                    srcTexture->EnsureSubresourceContentInitialized(srcRange);
+                    if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
+                        dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
+                    } else {
+                        dstTexture->EnsureSubresourceContentInitialized(dstRange);
+                    }
+                    CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(),
+                                     srcTexture->GetGLTarget(), src.mipLevel, src.origin,
+                                     dstTexture->GetHandle(), dstTexture->GetGLTarget(),
+                                     dst.mipLevel, dst.origin, copySize);
+                    break;
+                }
+
+                case Command::ClearBuffer: {
+                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                    if (cmd->size == 0) {
+                        // Skip no-op fills.
+                        break;
+                    }
+                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+
+                    bool clearedToZero =
+                        dstBuffer->EnsureDataInitializedAsDestination(cmd->offset, cmd->size);
+
+                    if (!clearedToZero) {
+                        const std::vector<uint8_t> clearValues(cmd->size, 0u);
+                        gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+                        gl.BufferSubData(GL_ARRAY_BUFFER, cmd->offset, cmd->size,
+                                         clearValues.data());
+                    }
+
+                    break;
+                }
+
+                case Command::ResolveQuerySet: {
+                    // TODO(crbug.com/dawn/434): Resolve non-precise occlusion query.
+                    SkipCommand(&mCommands, type);
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+                }
+
+                case Command::InsertDebugMarker:
+                case Command::PopDebugGroup:
+                case Command::PushDebugGroup: {
+                    // Due to lack of linux driver support for GL_EXT_debug_marker
+                    // extension these functions are skipped.
+                    SkipCommand(&mCommands, type);
+                    break;
+                }
+
+                case Command::WriteBuffer: {
+                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                    uint64_t offset = write->offset;
+                    uint64_t size = write->size;
+                    if (size == 0) {
+                        continue;
+                    }
+
+                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                    uint8_t* data = mCommands.NextData<uint8_t>(size);
+                    dstBuffer->EnsureDataInitializedAsDestination(offset, size);
+
+                    gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+                    gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError CommandBuffer::ExecuteComputePass() {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+        ComputePipeline* lastPipeline = nullptr;
+        BindGroupTracker bindGroupTracker = {};
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::EndComputePass: {
+                    mCommands.NextCommand<EndComputePassCmd>();
+                    return {};
+                }
+
+                case Command::Dispatch: {
+                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+                    bindGroupTracker.Apply(gl);
+
+                    gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
+                    gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+                    break;
+                }
+
+                case Command::DispatchIndirect: {
+                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+                    bindGroupTracker.Apply(gl);
+
+                    uint64_t indirectBufferOffset = dispatch->indirectOffset;
+                    Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
+
+                    gl.BindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+                    gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
+                    gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+                    break;
+                }
+
+                case Command::SetComputePipeline: {
+                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                    lastPipeline = ToBackend(cmd->pipeline).Get();
+                    lastPipeline->ApplyNow();
+
+                    bindGroupTracker.OnSetPipeline(lastPipeline);
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+                    uint32_t* dynamicOffsets = nullptr;
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+                    bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+                                                    cmd->dynamicOffsetCount, dynamicOffsets);
+                    break;
+                }
+
+                case Command::InsertDebugMarker:
+                case Command::PopDebugGroup:
+                case Command::PushDebugGroup: {
+                    // Due to lack of linux driver support for GL_EXT_debug_marker
+                    // extension these functions are skipped.
+                    SkipCommand(&mCommands, type);
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+                }
+
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        // EndComputePass should have been called
+        UNREACHABLE();
+    }
+
+    MaybeError CommandBuffer::ExecuteRenderPass(BeginRenderPassCmd* renderPass) {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+        GLuint fbo = 0;
+
+        // Create the framebuffer used for this render pass and calls the correct glDrawBuffers
+        {
+            // TODO(kainino@chromium.org): This is added to possibly work around an issue seen on
+            // Windows/Intel. It should break any feedback loop before the clears, even if there
+            // shouldn't be any negative effects from this. Investigate whether it's actually
+            // needed.
+            gl.BindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+            // TODO(kainino@chromium.org): possible future optimization: create these framebuffers
+            // at Framebuffer build time (or maybe CommandBuffer build time) so they don't have to
+            // be created and destroyed at draw time.
+            gl.GenFramebuffers(1, &fbo);
+            gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
+
+            // Mapping from attachmentSlot to GL framebuffer attachment points. Defaults to zero
+            // (GL_NONE).
+            ityp::array<ColorAttachmentIndex, GLenum, kMaxColorAttachments> drawBuffers = {};
+
+            // Construct GL framebuffer
+
+            ColorAttachmentIndex attachmentCount(uint8_t(0));
+            for (ColorAttachmentIndex i :
+                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                TextureView* textureView = ToBackend(renderPass->colorAttachments[i].view.Get());
+                GLenum glAttachment = GL_COLOR_ATTACHMENT0 + static_cast<uint8_t>(i);
+
+                // Attach color buffers.
+                textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
+                drawBuffers[i] = glAttachment;
+                attachmentCount = i;
+                attachmentCount++;
+            }
+            gl.DrawBuffers(static_cast<uint8_t>(attachmentCount), drawBuffers.data());
+
+            if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+                TextureView* textureView = ToBackend(renderPass->depthStencilAttachment.view.Get());
+                const Format& format = textureView->GetTexture()->GetFormat();
+
+                // Attach depth/stencil buffer.
+                GLenum glAttachment = 0;
+                if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
+                    glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
+                } else if (format.aspects == Aspect::Depth) {
+                    glAttachment = GL_DEPTH_ATTACHMENT;
+                } else if (format.aspects == Aspect::Stencil) {
+                    glAttachment = GL_STENCIL_ATTACHMENT;
+                } else {
+                    UNREACHABLE();
+                }
+
+                textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
+            }
+        }
+
+        ASSERT(gl.CheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
+
+        // Set defaults for dynamic state before executing clears and commands.
+        PersistentPipelineState persistentPipelineState;
+        persistentPipelineState.SetDefaultState(gl);
+        gl.BlendColor(0, 0, 0, 0);
+        gl.Viewport(0, 0, renderPass->width, renderPass->height);
+        gl.DepthRangef(0.0, 1.0);
+        gl.Scissor(0, 0, renderPass->width, renderPass->height);
+
+        // Clear framebuffer attachments as needed
+        {
+            for (ColorAttachmentIndex index :
+                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                uint8_t i = static_cast<uint8_t>(index);
+                auto* attachmentInfo = &renderPass->colorAttachments[index];
+
+                // Load op - color
+                if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
+                    gl.ColorMask(true, true, true, true);
+
+                    wgpu::TextureComponentType baseType =
+                        attachmentInfo->view->GetFormat().GetAspectInfo(Aspect::Color).baseType;
+                    switch (baseType) {
+                        case wgpu::TextureComponentType::Float: {
+                            const std::array<float, 4> appliedClearColor =
+                                ConvertToFloatColor(attachmentInfo->clearColor);
+                            gl.ClearBufferfv(GL_COLOR, i, appliedClearColor.data());
+                            break;
+                        }
+                        case wgpu::TextureComponentType::Uint: {
+                            const std::array<uint32_t, 4> appliedClearColor =
+                                ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
+                            gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
+                            break;
+                        }
+                        case wgpu::TextureComponentType::Sint: {
+                            const std::array<int32_t, 4> appliedClearColor =
+                                ConvertToSignedIntegerColor(attachmentInfo->clearColor);
+                            gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
+                            break;
+                        }
+
+                        case wgpu::TextureComponentType::DepthComparison:
+                            UNREACHABLE();
+                    }
+                }
+
+                if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
+                    // TODO(natlee@microsoft.com): call glDiscard to do optimization
+                }
+            }
+
+            if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+                auto* attachmentInfo = &renderPass->depthStencilAttachment;
+                const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
+
+                // Load op - depth/stencil
+                bool doDepthClear = attachmentFormat.HasDepth() &&
+                                    (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
+                bool doStencilClear = attachmentFormat.HasStencil() &&
+                                      (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
+
+                if (doDepthClear) {
+                    gl.DepthMask(GL_TRUE);
+                }
+                if (doStencilClear) {
+                    gl.StencilMask(GetStencilMaskFromStencilFormat(attachmentFormat.format));
+                }
+
+                if (doDepthClear && doStencilClear) {
+                    gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
+                                     attachmentInfo->clearStencil);
+                } else if (doDepthClear) {
+                    gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
+                } else if (doStencilClear) {
+                    const GLint clearStencil = attachmentInfo->clearStencil;
+                    gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
+                }
+            }
+        }
+
+        RenderPipeline* lastPipeline = nullptr;
+        uint64_t indexBufferBaseOffset = 0;
+        GLenum indexBufferFormat;
+        uint32_t indexFormatSize;
+
+        VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
+        BindGroupTracker bindGroupTracker = {};
+
+        auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+            switch (type) {
+                case Command::Draw: {
+                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
+                    vertexStateBufferBindingTracker.Apply(gl);
+                    bindGroupTracker.Apply(gl);
+
+                    if (draw->firstInstance > 0) {
+                        gl.DrawArraysInstancedBaseInstance(
+                            lastPipeline->GetGLPrimitiveTopology(), draw->firstVertex,
+                            draw->vertexCount, draw->instanceCount, draw->firstInstance);
+                    } else {
+                        // This branch is only needed on OpenGL < 4.2
+                        gl.DrawArraysInstanced(lastPipeline->GetGLPrimitiveTopology(),
+                                               draw->firstVertex, draw->vertexCount,
+                                               draw->instanceCount);
+                    }
+                    break;
+                }
+
+                case Command::DrawIndexed: {
+                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+                    vertexStateBufferBindingTracker.Apply(gl);
+                    bindGroupTracker.Apply(gl);
+
+                    if (draw->firstInstance > 0) {
+                        gl.DrawElementsInstancedBaseVertexBaseInstance(
+                            lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+                            indexBufferFormat,
+                            reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+                                                    indexBufferBaseOffset),
+                            draw->instanceCount, draw->baseVertex, draw->firstInstance);
+                    } else {
+                        // This branch is only needed on OpenGL < 4.2; ES < 3.2
+                        if (draw->baseVertex != 0) {
+                            gl.DrawElementsInstancedBaseVertex(
+                                lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+                                indexBufferFormat,
+                                reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+                                                        indexBufferBaseOffset),
+                                draw->instanceCount, draw->baseVertex);
+                        } else {
+                            // This branch is only needed on OpenGL < 3.2; ES < 3.2
+                            gl.DrawElementsInstanced(
+                                lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+                                indexBufferFormat,
+                                reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+                                                        indexBufferBaseOffset),
+                                draw->instanceCount);
+                        }
+                    }
+                    break;
+                }
+
+                case Command::DrawIndirect: {
+                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+                    vertexStateBufferBindingTracker.Apply(gl);
+                    bindGroupTracker.Apply(gl);
+
+                    uint64_t indirectBufferOffset = draw->indirectOffset;
+                    Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+
+                    gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+                    gl.DrawArraysIndirect(
+                        lastPipeline->GetGLPrimitiveTopology(),
+                        reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
+                    break;
+                }
+
+                case Command::DrawIndexedIndirect: {
+                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+                    vertexStateBufferBindingTracker.Apply(gl);
+                    bindGroupTracker.Apply(gl);
+
+                    Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+                    ASSERT(indirectBuffer != nullptr);
+
+                    gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+                    gl.DrawElementsIndirect(
+                        lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
+                        reinterpret_cast<void*>(static_cast<intptr_t>(draw->indirectOffset)));
+                    break;
+                }
+
+                case Command::InsertDebugMarker:
+                case Command::PopDebugGroup:
+                case Command::PushDebugGroup: {
+                    // Due to lack of linux driver support for GL_EXT_debug_marker
+                    // extension these functions are skipped.
+                    SkipCommand(iter, type);
+                    break;
+                }
+
+                case Command::SetRenderPipeline: {
+                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                    lastPipeline = ToBackend(cmd->pipeline).Get();
+                    lastPipeline->ApplyNow(persistentPipelineState);
+
+                    vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
+                    bindGroupTracker.OnSetPipeline(lastPipeline);
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                    uint32_t* dynamicOffsets = nullptr;
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+                    bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+                                                    cmd->dynamicOffsetCount, dynamicOffsets);
+                    break;
+                }
+
+                case Command::SetIndexBuffer: {
+                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+
+                    indexBufferBaseOffset = cmd->offset;
+                    indexBufferFormat = IndexFormatType(cmd->format);
+                    indexFormatSize = IndexFormatSize(cmd->format);
+                    vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
+                    break;
+                }
+
+                case Command::SetVertexBuffer: {
+                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+                    vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
+                                                                      cmd->offset);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+        };
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::EndRenderPass: {
+                    mCommands.NextCommand<EndRenderPassCmd>();
+
+                    if (renderPass->attachmentState->GetSampleCount() > 1) {
+                        ResolveMultisampledRenderTargets(gl, renderPass);
+                    }
+                    gl.DeleteFramebuffers(1, &fbo);
+                    return {};
+                }
+
+                case Command::SetStencilReference: {
+                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+                    persistentPipelineState.SetStencilReference(gl, cmd->reference);
+                    break;
+                }
+
+                case Command::SetViewport: {
+                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                    if (gl.IsAtLeastGL(4, 1)) {
+                        gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
+                    } else {
+                        // Floating-point viewport coords are unsupported on OpenGL ES, but
+                        // truncation is ok because other APIs do not guarantee subpixel precision
+                        // either.
+                        gl.Viewport(static_cast<int>(cmd->x), static_cast<int>(cmd->y),
+                                    static_cast<int>(cmd->width), static_cast<int>(cmd->height));
+                    }
+                    gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
+                    break;
+                }
+
+                case Command::SetScissorRect: {
+                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                    gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
+                    break;
+                }
+
+                case Command::SetBlendConstant: {
+                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                    const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
+                    gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
+                    break;
+                }
+
+                case Command::ExecuteBundles: {
+                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                    for (uint32_t i = 0; i < cmd->count; ++i) {
+                        CommandIterator* iter = bundles[i]->GetCommands();
+                        iter->Reset();
+                        while (iter->NextCommandId(&type)) {
+                            DoRenderBundleCommand(iter, type);
+                        }
+                    }
+                    break;
+                }
+
+                case Command::BeginOcclusionQuery: {
+                    return DAWN_UNIMPLEMENTED_ERROR("BeginOcclusionQuery unimplemented.");
+                }
+
+                case Command::EndOcclusionQuery: {
+                    return DAWN_UNIMPLEMENTED_ERROR("EndOcclusionQuery unimplemented.");
+                }
+
+                case Command::WriteTimestamp:
+                    return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+
+                default: {
+                    DoRenderBundleCommand(&mCommands, type);
+                    break;
+                }
+            }
+        }
+
+        // EndRenderPass should have been called
+        UNREACHABLE();
+    }
+
+    void DoTexSubImage(const OpenGLFunctions& gl,
+                       const TextureCopy& destination,
+                       const void* data,
+                       const TextureDataLayout& dataLayout,
+                       const Extent3D& copySize) {
+        Texture* texture = ToBackend(destination.texture.Get());
+        ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+
+        const GLFormat& format = texture->GetGLFormat();
+        GLenum target = texture->GetGLTarget();
+        data = static_cast<const uint8_t*>(data) + dataLayout.offset;
+        gl.ActiveTexture(GL_TEXTURE0);
+        gl.BindTexture(target, texture->GetHandle());
+        const TexelBlockInfo& blockInfo =
+            texture->GetFormat().GetAspectInfo(destination.aspect).block;
+
+        uint32_t x = destination.origin.x;
+        uint32_t y = destination.origin.y;
+        uint32_t z = destination.origin.z;
+        if (texture->GetFormat().isCompressed) {
+            size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
+            Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
+            uint32_t width = std::min(copySize.width, virtSize.width - x);
+
+            // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
+            // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
+            // this limitation by copying the compressed texture data once per row.
+            // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
+            // Buffer Objects" for more details. For Desktop GL, we use row-by-row
+            // copies only for uploads where bytesPerRow is not a multiple of byteSize.
+            if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
+                size_t imageSize =
+                    rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
+
+                uint32_t height = std::min(copySize.height, virtSize.height - y);
+
+                gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+                               dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
+
+                if (texture->GetArrayLayers() == 1 &&
+                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                    gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
+                                               format.internalFormat, imageSize, data);
+                } else {
+                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
+                                   dataLayout.rowsPerImage * blockInfo.height);
+                    gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+                                               copySize.depthOrArrayLayers, format.internalFormat,
+                                               imageSize, data);
+                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+                }
+
+                gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
+                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
+            } else {
+                if (texture->GetArrayLayers() == 1 &&
+                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                    const uint8_t* d = static_cast<const uint8_t*>(data);
+
+                    for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
+                        uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+                        gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width,
+                                                   height, format.internalFormat, rowSize, d);
+                        d += dataLayout.bytesPerRow;
+                    }
+                } else {
+                    const uint8_t* slice = static_cast<const uint8_t*>(data);
+
+                    for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+                        const uint8_t* d = slice;
+
+                        for (y = destination.origin.y; y < destination.origin.y + copySize.height;
+                             y += blockInfo.height) {
+                            uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+                            gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
+                                                       height, 1, format.internalFormat, rowSize,
+                                                       d);
+                            d += dataLayout.bytesPerRow;
+                        }
+
+                        slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
+                    }
+                }
+            }
+        } else {
+            uint32_t width = copySize.width;
+            uint32_t height = copySize.height;
+            if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
+                gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+                               dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+                if (texture->GetArrayLayers() == 1 &&
+                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                    gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height,
+                                     format.format, format.type, data);
+                } else {
+                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
+                                   dataLayout.rowsPerImage * blockInfo.height);
+                    gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+                                     copySize.depthOrArrayLayers, format.format, format.type, data);
+                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+                }
+                gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+            } else {
+                if (texture->GetArrayLayers() == 1 &&
+                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                    const uint8_t* d = static_cast<const uint8_t*>(data);
+                    for (; y < destination.origin.y + height; ++y) {
+                        gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1,
+                                         format.format, format.type, d);
+                        d += dataLayout.bytesPerRow;
+                    }
+                } else {
+                    const uint8_t* slice = static_cast<const uint8_t*>(data);
+                    for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+                        const uint8_t* d = slice;
+                        for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
+                            gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
+                                             format.format, format.type, d);
+                            d += dataLayout.bytesPerRow;
+                        }
+                        slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
+                    }
+                }
+            }
+        }
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/CommandBufferGL.h b/src/dawn/native/opengl/CommandBufferGL.h
new file mode 100644
index 0000000..e0f3193
--- /dev/null
+++ b/src/dawn/native/opengl/CommandBufferGL.h
@@ -0,0 +1,49 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
+#define DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
+
+#include "dawn/native/CommandBuffer.h"
+
+namespace dawn::native {
+    struct BeginRenderPassCmd;
+}  // namespace dawn::native
+
+namespace dawn::native::opengl {
+
+    class Device;
+    struct OpenGLFunctions;
+
+    class CommandBuffer final : public CommandBufferBase {
+      public:
+        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+        MaybeError Execute();
+
+      private:
+        MaybeError ExecuteComputePass();
+        MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
+    };
+
+    // Like glTexSubImage*, the "data" argument is either a pointer to image data or
+    // an offset if a PBO is bound.
+    void DoTexSubImage(const OpenGLFunctions& gl,
+                       const TextureCopy& destination,
+                       const void* data,
+                       const TextureDataLayout& dataLayout,
+                       const Extent3D& copySize);
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
diff --git a/src/dawn/native/opengl/ComputePipelineGL.cpp b/src/dawn/native/opengl/ComputePipelineGL.cpp
new file mode 100644
index 0000000..b535411
--- /dev/null
+++ b/src/dawn/native/opengl/ComputePipelineGL.cpp
@@ -0,0 +1,45 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/ComputePipelineGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+    // static
+    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+        Device* device,
+        const ComputePipelineDescriptor* descriptor) {
+        return AcquireRef(new ComputePipeline(device, descriptor));
+    }
+
+    ComputePipeline::~ComputePipeline() = default;
+
+    void ComputePipeline::DestroyImpl() {
+        ComputePipelineBase::DestroyImpl();
+        DeleteProgram(ToBackend(GetDevice())->gl);
+    }
+
+    MaybeError ComputePipeline::Initialize() {
+        DAWN_TRY(
+            InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+        return {};
+    }
+
+    void ComputePipeline::ApplyNow() {
+        PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/ComputePipelineGL.h b/src/dawn/native/opengl/ComputePipelineGL.h
new file mode 100644
index 0000000..23be225
--- /dev/null
+++ b/src/dawn/native/opengl/ComputePipelineGL.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
+#define DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/native/opengl/PipelineGL.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
+      public:
+        static Ref<ComputePipeline> CreateUninitialized(
+            Device* device,
+            const ComputePipelineDescriptor* descriptor);
+
+        void ApplyNow();
+
+        MaybeError Initialize() override;
+
+      private:
+        using ComputePipelineBase::ComputePipelineBase;
+        ~ComputePipeline() override;
+        void DestroyImpl() override;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
diff --git a/src/dawn/native/opengl/DeviceGL.cpp b/src/dawn/native/opengl/DeviceGL.cpp
new file mode 100644
index 0000000..00222d1
--- /dev/null
+++ b/src/dawn/native/opengl/DeviceGL.cpp
@@ -0,0 +1,315 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/opengl/BindGroupGL.h"
+#include "dawn/native/opengl/BindGroupLayoutGL.h"
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/CommandBufferGL.h"
+#include "dawn/native/opengl/ComputePipelineGL.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/native/opengl/QuerySetGL.h"
+#include "dawn/native/opengl/QueueGL.h"
+#include "dawn/native/opengl/RenderPipelineGL.h"
+#include "dawn/native/opengl/SamplerGL.h"
+#include "dawn/native/opengl/ShaderModuleGL.h"
+#include "dawn/native/opengl/SwapChainGL.h"
+#include "dawn/native/opengl/TextureGL.h"
+
+namespace dawn::native::opengl {
+
+    // static
+    ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+                                              const DeviceDescriptor* descriptor,
+                                              const OpenGLFunctions& functions) {
+        Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
+        DAWN_TRY(device->Initialize());
+        return device;
+    }
+
+    Device::Device(AdapterBase* adapter,
+                   const DeviceDescriptor* descriptor,
+                   const OpenGLFunctions& functions)
+        : DeviceBase(adapter, descriptor), gl(functions) {
+    }
+
+    Device::~Device() {
+        Destroy();
+    }
+
+    MaybeError Device::Initialize() {
+        InitTogglesFromDriver();
+        mFormatTable = BuildGLFormatTable();
+
+        return DeviceBase::Initialize(new Queue(this));
+    }
+
+    void Device::InitTogglesFromDriver() {
+        bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
+
+        bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
+
+        // TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
+        bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
+
+        bool supportsSnormRead =
+            gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
+
+        bool supportsDepthStencilRead =
+            gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
+
+        bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
+                                       gl.IsGLExtensionSupported("GL_OES_sample_variables");
+
+        // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
+        // procs without the extension suffix.
+        // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
+
+        // supportsBaseVertex |=
+        //     (gl.IsAtLeastGLES(2, 0) &&
+        //      (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
+        //       gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
+        //     (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
+
+        // supportsBaseInstance |=
+        //     (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
+        //     (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
+
+        // TODO(crbug.com/dawn/343): Investigate emulation.
+        SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
+        SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
+        SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
+        SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
+        SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
+        SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
+        SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
+        // For OpenGL ES, we must use dummy fragment shader for vertex-only render pipeline.
+        SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
+    }
+
+    const GLFormat& Device::GetGLFormat(const Format& format) {
+        ASSERT(format.isSupported);
+        ASSERT(format.GetIndex() < mFormatTable.size());
+
+        const GLFormat& result = mFormatTable[format.GetIndex()];
+        ASSERT(result.isSupportedOnBackend);
+        return result;
+    }
+
+    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) {
+        DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
+        return BindGroup::Create(this, descriptor);
+    }
+    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+    }
+    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+        return AcquireRef(new Buffer(this, descriptor));
+    }
+    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) {
+        return AcquireRef(new CommandBuffer(encoder, descriptor));
+    }
+    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) {
+        return ComputePipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) {
+        return AcquireRef(new PipelineLayout(this, descriptor));
+    }
+    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) {
+        return AcquireRef(new QuerySet(this, descriptor));
+    }
+    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) {
+        return RenderPipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+        return AcquireRef(new Sampler(this, descriptor));
+    }
+    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) {
+        return ShaderModule::Create(this, descriptor, parseResult);
+    }
+    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) {
+        return AcquireRef(new SwapChain(this, descriptor));
+    }
+    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) {
+        return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
+    }
+    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+        return AcquireRef(new Texture(this, descriptor));
+    }
+    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) {
+        return AcquireRef(new TextureView(texture, descriptor));
+    }
+
+    void Device::SubmitFenceSync() {
+        GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+        IncrementLastSubmittedCommandSerial();
+        mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
+    }
+
+    MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
+                                                    ::EGLImage image) {
+        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                        "Texture dimension (%s) is not %s.", descriptor->dimension,
+                        wgpu::TextureDimension::e2D);
+
+        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                        descriptor->mipLevelCount);
+
+        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                        descriptor->sampleCount);
+
+        DAWN_INVALID_IF(descriptor->usage & (wgpu::TextureUsage::TextureBinding |
+                                             wgpu::TextureUsage::StorageBinding),
+                        "Texture usage (%s) cannot have %s or %s.", descriptor->usage,
+                        wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
+
+        return {};
+    }
+    TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+                                                       ::EGLImage image) {
+        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+        if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+            return nullptr;
+        }
+        if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
+            return nullptr;
+        }
+
+        GLuint tex;
+        gl.GenTextures(1, &tex);
+        gl.BindTexture(GL_TEXTURE_2D, tex);
+        gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+
+        GLint width, height, internalFormat;
+        gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
+        gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
+        gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
+
+        if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
+            textureDescriptor->size.height != static_cast<uint32_t>(height) ||
+            textureDescriptor->size.depthOrArrayLayers != 1) {
+            ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
+                "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
+                width, height, &textureDescriptor->size));
+            gl.DeleteTextures(1, &tex);
+            return nullptr;
+        }
+
+        // TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
+        // in the passed-in TextureDescriptor.
+        return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
+    }
+
+    MaybeError Device::TickImpl() {
+        return {};
+    }
+
+    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+        ExecutionSerial fenceSerial{0};
+        while (!mFencesInFlight.empty()) {
+            auto [sync, tentativeSerial] = mFencesInFlight.front();
+
+            // Fence are added in order, so we can stop searching as soon
+            // as we see one that's not ready.
+
+            // TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
+            if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
+                gl.Flush();
+            }
+            GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
+            if (result == GL_TIMEOUT_EXPIRED) {
+                return fenceSerial;
+            }
+            // Update fenceSerial since fence is ready.
+            fenceSerial = tentativeSerial;
+
+            gl.DeleteSync(sync);
+
+            mFencesInFlight.pop();
+
+            ASSERT(fenceSerial > GetCompletedCommandSerial());
+        }
+        return fenceSerial;
+    }
+
+    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+        return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
+    }
+
+    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                               uint64_t sourceOffset,
+                                               BufferBase* destination,
+                                               uint64_t destinationOffset,
+                                               uint64_t size) {
+        return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
+    }
+
+    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                                const TextureDataLayout& src,
+                                                TextureCopy* dst,
+                                                const Extent3D& copySizePixels) {
+        return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
+    }
+
+    void Device::DestroyImpl() {
+        ASSERT(GetState() == State::Disconnected);
+    }
+
+    MaybeError Device::WaitForIdleForDestruction() {
+        gl.Finish();
+        DAWN_TRY(CheckPassedSerials());
+        ASSERT(mFencesInFlight.empty());
+
+        return {};
+    }
+
+    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+        return 1;
+    }
+
+    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+        return 1;
+    }
+
+    float Device::GetTimestampPeriodInNS() const {
+        return 1.0f;
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/DeviceGL.h b/src/dawn/native/opengl/DeviceGL.h
new file mode 100644
index 0000000..f6c673c
--- /dev/null
+++ b/src/dawn/native/opengl/DeviceGL.h
@@ -0,0 +1,131 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_DEVICEGL_H_
+#define DAWNNATIVE_OPENGL_DEVICEGL_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/Platform.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/GLFormat.h"
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+#include <queue>
+
+// Remove windows.h macros after glad's include of windows.h
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    include "dawn/common/windows_with_undefs.h"
+#endif
+
+typedef void* EGLImage;
+
+namespace dawn::native::opengl {
+
+    class Device final : public DeviceBase {
+      public:
+        static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+                                                 const DeviceDescriptor* descriptor,
+                                                 const OpenGLFunctions& functions);
+        ~Device() override;
+
+        MaybeError Initialize();
+
+        // Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
+        const OpenGLFunctions gl;
+
+        const GLFormat& GetGLFormat(const Format& format);
+
+        void SubmitFenceSync();
+
+        MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
+                                                ::EGLImage image);
+        TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+                                                   ::EGLImage image);
+
+        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+            CommandEncoder* encoder,
+            const CommandBufferDescriptor* descriptor) override;
+
+        MaybeError TickImpl() override;
+
+        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) override;
+
+        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) override;
+
+        uint32_t GetOptimalBytesPerRowAlignment() const override;
+        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+        float GetTimestampPeriodInNS() const override;
+
+      private:
+        Device(AdapterBase* adapter,
+               const DeviceDescriptor* descriptor,
+               const OpenGLFunctions& functions);
+
+        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+            const BindGroupDescriptor* descriptor) override;
+        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken) override;
+        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+            const BufferDescriptor* descriptor) override;
+        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+            const PipelineLayoutDescriptor* descriptor) override;
+        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+            const QuerySetDescriptor* descriptor) override;
+        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+            const SamplerDescriptor* descriptor) override;
+        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+            const ShaderModuleDescriptor* descriptor,
+            ShaderModuleParseResult* parseResult) override;
+        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+            Surface* surface,
+            NewSwapChainBase* previousSwapChain,
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+            const TextureDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+            TextureBase* texture,
+            const TextureViewDescriptor* descriptor) override;
+        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+            const ComputePipelineDescriptor* descriptor) override;
+        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+            const RenderPipelineDescriptor* descriptor) override;
+
+        void InitTogglesFromDriver();
+        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+        void DestroyImpl() override;
+        MaybeError WaitForIdleForDestruction() override;
+
+        std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
+
+        GLFormatTable mFormatTable;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_DEVICEGL_H_
diff --git a/src/dawn/native/opengl/Forward.h b/src/dawn/native/opengl/Forward.h
new file mode 100644
index 0000000..daf2dc3
--- /dev/null
+++ b/src/dawn/native/opengl/Forward.h
@@ -0,0 +1,66 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_FORWARD_H_
+#define DAWNNATIVE_OPENGL_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::opengl {
+
+    class Adapter;
+    class BindGroup;
+    class BindGroupLayout;
+    class Buffer;
+    class CommandBuffer;
+    class ComputePipeline;
+    class Device;
+    class PersistentPipelineState;
+    class PipelineLayout;
+    class QuerySet;
+    class Queue;
+    class RenderPipeline;
+    class Sampler;
+    class ShaderModule;
+    class SwapChain;
+    class Texture;
+    class TextureView;
+
+    struct OpenGLBackendTraits {
+        using AdapterType = Adapter;
+        using BindGroupType = BindGroup;
+        using BindGroupLayoutType = BindGroupLayout;
+        using BufferType = Buffer;
+        using CommandBufferType = CommandBuffer;
+        using ComputePipelineType = ComputePipeline;
+        using DeviceType = Device;
+        using PipelineLayoutType = PipelineLayout;
+        using QuerySetType = QuerySet;
+        using QueueType = Queue;
+        using RenderPipelineType = RenderPipeline;
+        using SamplerType = Sampler;
+        using ShaderModuleType = ShaderModule;
+        using SwapChainType = SwapChain;
+        using TextureType = Texture;
+        using TextureViewType = TextureView;
+    };
+
+    template <typename T>
+    auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
+        return ToBackendBase<OpenGLBackendTraits>(common);
+    }
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_FORWARD_H_
diff --git a/src/dawn/native/opengl/GLFormat.cpp b/src/dawn/native/opengl/GLFormat.cpp
new file mode 100644
index 0000000..dac02a6
--- /dev/null
+++ b/src/dawn/native/opengl/GLFormat.cpp
@@ -0,0 +1,122 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/GLFormat.h"
+
+namespace dawn::native::opengl {
+
+    GLFormatTable BuildGLFormatTable() {
+        GLFormatTable table;
+
+        using Type = GLFormat::ComponentType;
+
+        auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat,
+                                  GLenum format, GLenum type, Type componentType) {
+            FormatIndex index = ComputeFormatIndex(dawnFormat);
+            ASSERT(index < table.size());
+
+            table[index].internalFormat = internalFormat;
+            table[index].format = format;
+            table[index].type = type;
+            table[index].componentType = componentType;
+            table[index].isSupportedOnBackend = true;
+        };
+
+        // It's dangerous to go alone, take this:
+        //
+        //     [ANGLE's formatutils.cpp]
+        //     [ANGLE's formatutilsgl.cpp]
+        //
+        // The format tables in these files are extremely complete and the best reference on GL
+        // format support, enums, etc.
+
+        // clang-format off
+
+        // 1 byte color formats
+        AddFormat(wgpu::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::R8Snorm, GL_R8_SNORM, GL_RED, GL_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::R8Uint, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+        AddFormat(wgpu::TextureFormat::R8Sint, GL_R8I, GL_RED_INTEGER, GL_BYTE, Type::Int);
+
+        // 2 bytes color formats
+        AddFormat(wgpu::TextureFormat::R16Uint, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+        AddFormat(wgpu::TextureFormat::R16Sint, GL_R16I, GL_RED_INTEGER, GL_SHORT, Type::Int);
+        AddFormat(wgpu::TextureFormat::R16Float, GL_R16F, GL_RED, GL_HALF_FLOAT, Type::Float);
+        AddFormat(wgpu::TextureFormat::RG8Unorm, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::RG8Snorm, GL_RG8_SNORM, GL_RG, GL_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::RG8Uint, GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+        AddFormat(wgpu::TextureFormat::RG8Sint, GL_RG8I, GL_RG_INTEGER, GL_BYTE, Type::Int);
+
+        // 4 bytes color formats
+        AddFormat(wgpu::TextureFormat::R32Uint, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+        AddFormat(wgpu::TextureFormat::R32Sint, GL_R32I, GL_RED_INTEGER, GL_INT, Type::Int);
+        AddFormat(wgpu::TextureFormat::R32Float, GL_R32F, GL_RED, GL_FLOAT, Type::Float);
+        AddFormat(wgpu::TextureFormat::RG16Uint, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+        AddFormat(wgpu::TextureFormat::RG16Sint, GL_RG16I, GL_RG_INTEGER, GL_SHORT, Type::Int);
+        AddFormat(wgpu::TextureFormat::RG16Float, GL_RG16F, GL_RG, GL_HALF_FLOAT, Type::Float);
+        AddFormat(wgpu::TextureFormat::RGBA8Unorm, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::RGBA8UnormSrgb, GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::RGBA8Snorm, GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::RGBA8Uint, GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+        AddFormat(wgpu::TextureFormat::RGBA8Sint, GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE, Type::Int);
+
+        // This doesn't have an enum for the internal format in OpenGL, so use RGBA8.
+        AddFormat(wgpu::TextureFormat::BGRA8Unorm, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::RGB10A2Unorm, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, Type::Float);
+        AddFormat(wgpu::TextureFormat::RG11B10Ufloat, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
+        AddFormat(wgpu::TextureFormat::RGB9E5Ufloat, GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, Type::Float);
+
+        // 8 bytes color formats
+        AddFormat(wgpu::TextureFormat::RG32Uint, GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+        AddFormat(wgpu::TextureFormat::RG32Sint, GL_RG32I, GL_RG_INTEGER, GL_INT, Type::Int);
+        AddFormat(wgpu::TextureFormat::RG32Float, GL_RG32F, GL_RG, GL_FLOAT, Type::Float);
+        AddFormat(wgpu::TextureFormat::RGBA16Uint, GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+        AddFormat(wgpu::TextureFormat::RGBA16Sint, GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT, Type::Int);
+        AddFormat(wgpu::TextureFormat::RGBA16Float, GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, Type::Float);
+
+        // 16 bytes color formats
+        AddFormat(wgpu::TextureFormat::RGBA32Uint, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+        AddFormat(wgpu::TextureFormat::RGBA32Sint, GL_RGBA32I, GL_RGBA_INTEGER, GL_INT, Type::Int);
+        AddFormat(wgpu::TextureFormat::RGBA32Float, GL_RGBA32F, GL_RGBA, GL_FLOAT, Type::Float);
+
+        // Depth stencil formats
+        AddFormat(wgpu::TextureFormat::Depth32Float, GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, Type::DepthStencil);
+        AddFormat(wgpu::TextureFormat::Depth24Plus, GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, Type::DepthStencil);
+        AddFormat(wgpu::TextureFormat::Depth24UnormStencil8, GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, Type::DepthStencil);
+        AddFormat(wgpu::TextureFormat::Depth24PlusStencil8, GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, Type::DepthStencil);
+        AddFormat(wgpu::TextureFormat::Depth16Unorm, GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, Type::DepthStencil);
+        AddFormat(wgpu::TextureFormat::Stencil8, GL_STENCIL_INDEX8, GL_STENCIL, GL_UNSIGNED_BYTE, Type::DepthStencil);
+
+        // Block compressed formats
+        AddFormat(wgpu::TextureFormat::BC1RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC2RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC3RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC4RSnorm, GL_COMPRESSED_SIGNED_RED_RGTC1, GL_RED, GL_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC4RUnorm, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC5RGSnorm, GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC5RGUnorm, GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC6HRGBFloat, GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC6HRGBUfloat, GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+        AddFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+
+        // clang-format on
+
+        return table;
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/GLFormat.h b/src/dawn/native/opengl/GLFormat.h
new file mode 100644
index 0000000..e3e3195
--- /dev/null
+++ b/src/dawn/native/opengl/GLFormat.h
@@ -0,0 +1,42 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_GLFORMAT_H_
+#define DAWNNATIVE_OPENGL_GLFORMAT_H_
+
+#include "dawn/native/Format.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    struct GLFormat {
+        GLenum internalFormat = 0;
+        GLenum format = 0;
+        GLenum type = 0;
+        bool isSupportedOnBackend = false;
+
+        // OpenGL has different functions depending on the format component type, for example
+        // glClearBufferfv is only valid on formats with the Float ComponentType
+        enum ComponentType { Float, Int, Uint, DepthStencil };
+        ComponentType componentType;
+    };
+
+    using GLFormatTable = ityp::array<FormatIndex, GLFormat, kKnownFormatCount>;
+    GLFormatTable BuildGLFormatTable();
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_GLFORMAT_H_
diff --git a/src/dawn/native/opengl/NativeSwapChainImplGL.cpp b/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
new file mode 100644
index 0000000..b01e7e3
--- /dev/null
+++ b/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
@@ -0,0 +1,88 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/NativeSwapChainImplGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+    NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
+                                             PresentCallback present,
+                                             void* presentUserdata)
+        : mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {
+    }
+
+    NativeSwapChainImpl::~NativeSwapChainImpl() {
+        const OpenGLFunctions& gl = mDevice->gl;
+        gl.DeleteTextures(1, &mBackTexture);
+        gl.DeleteFramebuffers(1, &mBackFBO);
+    }
+
+    void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
+        const OpenGLFunctions& gl = mDevice->gl;
+        gl.GenTextures(1, &mBackTexture);
+        gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+        gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+
+        gl.GenFramebuffers(1, &mBackFBO);
+        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+        gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+                                mBackTexture, 0);
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                      WGPUTextureUsage usage,
+                                                      uint32_t width,
+                                                      uint32_t height) {
+        if (format != WGPUTextureFormat_RGBA8Unorm) {
+            return "unsupported format";
+        }
+        ASSERT(width > 0);
+        ASSERT(height > 0);
+        mWidth = width;
+        mHeight = height;
+
+        const OpenGLFunctions& gl = mDevice->gl;
+        gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+        // Reallocate the texture
+        gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+                      nullptr);
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+        nextTexture->texture.u32 = mBackTexture;
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Present() {
+        const OpenGLFunctions& gl = mDevice->gl;
+        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+        gl.Scissor(0, 0, mWidth, mHeight);
+        gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
+                           GL_NEAREST);
+
+        mPresentCallback(mPresentUserdata);
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+        return wgpu::TextureFormat::RGBA8Unorm;
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/NativeSwapChainImplGL.h b/src/dawn/native/opengl/NativeSwapChainImplGL.h
new file mode 100644
index 0000000..bd7bc9d
--- /dev/null
+++ b/src/dawn/native/opengl/NativeSwapChainImplGL.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
+#define DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
+
+#include "dawn/native/OpenGLBackend.h"
+
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class NativeSwapChainImpl {
+      public:
+        using WSIContext = DawnWSIContextGL;
+
+        NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
+        ~NativeSwapChainImpl();
+
+        void Init(DawnWSIContextGL* context);
+        DawnSwapChainError Configure(WGPUTextureFormat format,
+                                     WGPUTextureUsage,
+                                     uint32_t width,
+                                     uint32_t height);
+        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+        DawnSwapChainError Present();
+
+        wgpu::TextureFormat GetPreferredFormat() const;
+
+      private:
+        PresentCallback mPresentCallback;
+        void* mPresentUserdata;
+
+        uint32_t mWidth = 0;
+        uint32_t mHeight = 0;
+        GLuint mBackFBO = 0;
+        GLuint mBackTexture = 0;
+
+        Device* mDevice = nullptr;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
diff --git a/src/dawn/native/opengl/OWNERS b/src/dawn/native/opengl/OWNERS
new file mode 100644
index 0000000..d6d6510
--- /dev/null
+++ b/src/dawn/native/opengl/OWNERS
@@ -0,0 +1 @@
+senorblanco@chromium.org
diff --git a/src/dawn/native/opengl/OpenGLBackend.cpp b/src/dawn/native/opengl/OpenGLBackend.cpp
new file mode 100644
index 0000000..739de62
--- /dev/null
+++ b/src/dawn/native/opengl/OpenGLBackend.cpp
@@ -0,0 +1,65 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// OpenGLBackend.cpp: contains the definition of symbols exported by OpenGLBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/OpenGLBackend.h"
+
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/NativeSwapChainImplGL.h"
+
+namespace dawn::native::opengl {
+
+    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+        : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {
+    }
+
+    AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
+        : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {
+    }
+
+    DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+                                                          PresentCallback present,
+                                                          void* presentUserdata) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+
+        DawnSwapChainImplementation impl;
+        impl = CreateSwapChainImplementation(
+            new NativeSwapChainImpl(backendDevice, present, presentUserdata));
+        impl.textureUsage = WGPUTextureUsage_Present;
+
+        return impl;
+    }
+
+    WGPUTextureFormat GetNativeSwapChainPreferredFormat(
+        const DawnSwapChainImplementation* swapChain) {
+        NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+        return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+    }
+
+    ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
+        : ExternalImageDescriptor(ExternalImageType::EGLImage) {
+    }
+
+    WGPUTexture WrapExternalEGLImage(WGPUDevice device,
+                                     const ExternalImageDescriptorEGLImage* descriptor) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+        TextureBase* texture =
+            backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
+        return ToAPI(texture);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/OpenGLFunctions.cpp b/src/dawn/native/opengl/OpenGLFunctions.cpp
new file mode 100644
index 0000000..45f8354
--- /dev/null
+++ b/src/dawn/native/opengl/OpenGLFunctions.cpp
@@ -0,0 +1,61 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+#include <cctype>
+
+namespace dawn::native::opengl {
+
+    MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
+        DAWN_TRY(mVersion.Initialize(getProc));
+        if (mVersion.IsES()) {
+            DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
+        } else {
+            DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
+        }
+
+        InitializeSupportedGLExtensions();
+
+        return {};
+    }
+
+    void OpenGLFunctions::InitializeSupportedGLExtensions() {
+        int32_t numExtensions;
+        GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
+
+        for (int32_t i = 0; i < numExtensions; ++i) {
+            const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
+            mSupportedGLExtensionsSet.insert(extensionName);
+        }
+    }
+
+    bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
+        ASSERT(extension != nullptr);
+        return mSupportedGLExtensionsSet.count(extension) != 0;
+    }
+
+    const OpenGLVersion& OpenGLFunctions::GetVersion() const {
+        return mVersion;
+    }
+
+    bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
+        return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
+    }
+
+    bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
+        return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/OpenGLFunctions.h b/src/dawn/native/opengl/OpenGLFunctions.h
new file mode 100644
index 0000000..3da6c86
--- /dev/null
+++ b/src/dawn/native/opengl/OpenGLFunctions.h
@@ -0,0 +1,45 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
+#define DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
+
+#include <unordered_set>
+
+#include "dawn/native/opengl/OpenGLFunctionsBase_autogen.h"
+#include "dawn/native/opengl/OpenGLVersion.h"
+
+namespace dawn::native::opengl {
+
+    struct OpenGLFunctions : OpenGLFunctionsBase {
+      public:
+        MaybeError Initialize(GetProcAddress getProc);
+
+        const OpenGLVersion& GetVersion() const;
+        bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
+        bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
+
+        bool IsGLExtensionSupported(const char* extension) const;
+
+      private:
+        void InitializeSupportedGLExtensions();
+
+        OpenGLVersion mVersion;
+
+        std::unordered_set<std::string> mSupportedGLExtensionsSet;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
diff --git a/src/dawn/native/opengl/OpenGLVersion.cpp b/src/dawn/native/opengl/OpenGLVersion.cpp
new file mode 100644
index 0000000..60fffff
--- /dev/null
+++ b/src/dawn/native/opengl/OpenGLVersion.cpp
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/OpenGLVersion.h"
+
+#include <cctype>
+#include <tuple>
+
+namespace dawn::native::opengl {
+
+    MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
+        PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
+        if (getString == nullptr) {
+            return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
+        }
+
+        std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
+
+        if (version.find("OpenGL ES") != std::string::npos) {
+            // ES spec states that the GL_VERSION string will be in the following format:
+            // "OpenGL ES N.M vendor-specific information"
+            mStandard = Standard::ES;
+            mMajorVersion = version[10] - '0';
+            mMinorVersion = version[12] - '0';
+
+            // The minor version shouldn't get to two digits.
+            ASSERT(version.size() <= 13 || !isdigit(version[13]));
+        } else {
+            // OpenGL spec states the GL_VERSION string will be in the following format:
+            // <version number><space><vendor-specific information>
+            // The version number is either of the form major number.minor number or major
+            // number.minor number.release number, where the numbers all have one or more
+            // digits
+            mStandard = Standard::Desktop;
+            mMajorVersion = version[0] - '0';
+            mMinorVersion = version[2] - '0';
+
+            // The minor version shouldn't get to two digits.
+            ASSERT(version.size() <= 3 || !isdigit(version[3]));
+        }
+
+        return {};
+    }
+
+    bool OpenGLVersion::IsDesktop() const {
+        return mStandard == Standard::Desktop;
+    }
+
+    bool OpenGLVersion::IsES() const {
+        return mStandard == Standard::ES;
+    }
+
+    uint32_t OpenGLVersion::GetMajor() const {
+        return mMajorVersion;
+    }
+
+    uint32_t OpenGLVersion::GetMinor() const {
+        return mMinorVersion;
+    }
+
+    bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
+        return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/OpenGLVersion.h b/src/dawn/native/opengl/OpenGLVersion.h
new file mode 100644
index 0000000..6b1e91b
--- /dev/null
+++ b/src/dawn/native/opengl/OpenGLVersion.h
@@ -0,0 +1,44 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_OPENGLVERSION_H_
+#define DAWNNATIVE_OPENGL_OPENGLVERSION_H_
+
+#include "dawn/native/opengl/OpenGLFunctionsBase_autogen.h"
+
+namespace dawn::native::opengl {
+
+    struct OpenGLVersion {
+      public:
+        MaybeError Initialize(GetProcAddress getProc);
+
+        bool IsDesktop() const;
+        bool IsES() const;
+        uint32_t GetMajor() const;
+        uint32_t GetMinor() const;
+        bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
+
+      private:
+        enum class Standard {
+            Desktop,
+            ES,
+        };
+        uint32_t mMajorVersion;
+        uint32_t mMinorVersion;
+        Standard mStandard;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_OPENGLVERSION_H_
diff --git a/src/dawn/native/opengl/PersistentPipelineStateGL.cpp b/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
new file mode 100644
index 0000000..446ab1a
--- /dev/null
+++ b/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/PersistentPipelineStateGL.h"
+
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+namespace dawn::native::opengl {
+
+    void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
+        CallGLStencilFunc(gl);
+    }
+
+    void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+                                                         GLenum stencilBackCompareFunction,
+                                                         GLenum stencilFrontCompareFunction,
+                                                         uint32_t stencilReadMask) {
+        if (mStencilBackCompareFunction == stencilBackCompareFunction &&
+            mStencilFrontCompareFunction == stencilFrontCompareFunction &&
+            mStencilReadMask == stencilReadMask) {
+            return;
+        }
+
+        mStencilBackCompareFunction = stencilBackCompareFunction;
+        mStencilFrontCompareFunction = stencilFrontCompareFunction;
+        mStencilReadMask = stencilReadMask;
+        CallGLStencilFunc(gl);
+    }
+
+    void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
+                                                      uint32_t stencilReference) {
+        if (mStencilReference == stencilReference) {
+            return;
+        }
+
+        mStencilReference = stencilReference;
+        CallGLStencilFunc(gl);
+    }
+
+    void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
+        gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
+                               mStencilReadMask);
+        gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
+                               mStencilReadMask);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/PersistentPipelineStateGL.h b/src/dawn/native/opengl/PersistentPipelineStateGL.h
new file mode 100644
index 0000000..959e7f9
--- /dev/null
+++ b/src/dawn/native/opengl/PersistentPipelineStateGL.h
@@ -0,0 +1,45 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
+#define DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
+
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    struct OpenGLFunctions;
+
+    class PersistentPipelineState {
+      public:
+        void SetDefaultState(const OpenGLFunctions& gl);
+        void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+                                    GLenum stencilBackCompareFunction,
+                                    GLenum stencilFrontCompareFunction,
+                                    uint32_t stencilReadMask);
+        void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
+
+      private:
+        void CallGLStencilFunc(const OpenGLFunctions& gl);
+
+        GLenum mStencilBackCompareFunction = GL_ALWAYS;
+        GLenum mStencilFrontCompareFunction = GL_ALWAYS;
+        GLuint mStencilReadMask = 0xffffffff;
+        GLuint mStencilReference = 0;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
diff --git a/src/dawn/native/opengl/PipelineGL.cpp b/src/dawn/native/opengl/PipelineGL.cpp
new file mode 100644
index 0000000..8890e68
--- /dev/null
+++ b/src/dawn/native/opengl/PipelineGL.cpp
@@ -0,0 +1,218 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/PipelineGL.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/OpenGLFunctions.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/native/opengl/SamplerGL.h"
+#include "dawn/native/opengl/ShaderModuleGL.h"
+
+#include <set>
+#include <sstream>
+
+namespace dawn::native::opengl {
+
+    namespace {
+
+        GLenum GLShaderType(SingleShaderStage stage) {
+            switch (stage) {
+                case SingleShaderStage::Vertex:
+                    return GL_VERTEX_SHADER;
+                case SingleShaderStage::Fragment:
+                    return GL_FRAGMENT_SHADER;
+                case SingleShaderStage::Compute:
+                    return GL_COMPUTE_SHADER;
+            }
+            UNREACHABLE();
+        }
+
+    }  // namespace
+
+    PipelineGL::PipelineGL() : mProgram(0) {
+    }
+
+    PipelineGL::~PipelineGL() = default;
+
+    MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
+                                          const PipelineLayout* layout,
+                                          const PerStage<ProgrammableStage>& stages) {
+        auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
+                               const char* source) -> ResultOrError<GLuint> {
+            GLuint shader = gl.CreateShader(type);
+            gl.ShaderSource(shader, 1, &source, nullptr);
+            gl.CompileShader(shader);
+
+            GLint compileStatus = GL_FALSE;
+            gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
+            if (compileStatus == GL_FALSE) {
+                GLint infoLogLength = 0;
+                gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
+
+                if (infoLogLength > 1) {
+                    std::vector<char> buffer(infoLogLength);
+                    gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
+                    return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s",
+                                                        source, buffer.data());
+                }
+            }
+            return shader;
+        };
+
+        mProgram = gl.CreateProgram();
+
+        // Compute the set of active stages.
+        wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
+        for (SingleShaderStage stage : IterateStages(kAllStages)) {
+            if (stages[stage].module != nullptr) {
+                activeStages |= StageBit(stage);
+            }
+        }
+
+        // Create an OpenGL shader for each stage and gather the list of combined samplers.
+        PerStage<CombinedSamplerInfo> combinedSamplers;
+        bool needsDummySampler = false;
+        std::vector<GLuint> glShaders;
+        for (SingleShaderStage stage : IterateStages(activeStages)) {
+            const ShaderModule* module = ToBackend(stages[stage].module.Get());
+            std::string glsl;
+            DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
+                                                          &combinedSamplers[stage], layout,
+                                                          &needsDummySampler));
+            GLuint shader;
+            DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
+            gl.AttachShader(mProgram, shader);
+            glShaders.push_back(shader);
+        }
+
+        if (needsDummySampler) {
+            SamplerDescriptor desc = {};
+            ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
+            ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
+            ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
+            mDummySampler =
+                ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
+        }
+
+        // Link all the shaders together.
+        gl.LinkProgram(mProgram);
+
+        GLint linkStatus = GL_FALSE;
+        gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
+        if (linkStatus == GL_FALSE) {
+            GLint infoLogLength = 0;
+            gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
+
+            if (infoLogLength > 1) {
+                std::vector<char> buffer(infoLogLength);
+                gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
+                return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
+            }
+        }
+
+        // Compute links between stages for combined samplers, then bind them to texture units
+        gl.UseProgram(mProgram);
+        const auto& indices = layout->GetBindingIndexInfo();
+
+        std::set<CombinedSampler> combinedSamplersSet;
+        for (SingleShaderStage stage : IterateStages(activeStages)) {
+            for (const CombinedSampler& combined : combinedSamplers[stage]) {
+                combinedSamplersSet.insert(combined);
+            }
+        }
+
+        mUnitsForSamplers.resize(layout->GetNumSamplers());
+        mUnitsForTextures.resize(layout->GetNumSampledTextures());
+
+        GLuint textureUnit = layout->GetTextureUnitsUsed();
+        for (const auto& combined : combinedSamplersSet) {
+            const std::string& name = combined.GetName();
+            GLint location = gl.GetUniformLocation(mProgram, name.c_str());
+
+            if (location == -1) {
+                continue;
+            }
+
+            gl.Uniform1i(location, textureUnit);
+
+            bool shouldUseFiltering;
+            {
+                const BindGroupLayoutBase* bgl =
+                    layout->GetBindGroupLayout(combined.textureLocation.group);
+                BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
+
+                GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
+                mUnitsForTextures[textureIndex].push_back(textureUnit);
+
+                shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
+                                     wgpu::TextureSampleType::Float;
+            }
+            {
+                if (combined.useDummySampler) {
+                    mDummySamplerUnits.push_back(textureUnit);
+                } else {
+                    const BindGroupLayoutBase* bgl =
+                        layout->GetBindGroupLayout(combined.samplerLocation.group);
+                    BindingIndex bindingIndex =
+                        bgl->GetBindingIndex(combined.samplerLocation.binding);
+
+                    GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
+                    mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
+                }
+            }
+
+            textureUnit++;
+        }
+
+        for (GLuint glShader : glShaders) {
+            gl.DetachShader(mProgram, glShader);
+            gl.DeleteShader(glShader);
+        }
+
+        return {};
+    }
+
+    void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
+        gl.DeleteProgram(mProgram);
+    }
+
+    const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
+        GLuint index) const {
+        ASSERT(index < mUnitsForSamplers.size());
+        return mUnitsForSamplers[index];
+    }
+
+    const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
+        ASSERT(index < mUnitsForTextures.size());
+        return mUnitsForTextures[index];
+    }
+
+    GLuint PipelineGL::GetProgramHandle() const {
+        return mProgram;
+    }
+
+    void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
+        gl.UseProgram(mProgram);
+        for (GLuint unit : mDummySamplerUnits) {
+            ASSERT(mDummySampler.Get() != nullptr);
+            gl.BindSampler(unit, mDummySampler->GetNonFilteringHandle());
+        }
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/PipelineGL.h b/src/dawn/native/opengl/PipelineGL.h
new file mode 100644
index 0000000..9bbfffa
--- /dev/null
+++ b/src/dawn/native/opengl/PipelineGL.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_PIPELINEGL_H_
+#define DAWNNATIVE_OPENGL_PIPELINEGL_H_
+
+#include "dawn/native/Pipeline.h"
+
+#include "dawn/native/PerStage.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+#include <vector>
+
+namespace dawn::native {
+    struct ProgrammableStage;
+}  // namespace dawn::native
+
+namespace dawn::native::opengl {
+
+    struct OpenGLFunctions;
+    class PipelineLayout;
+    class Sampler;
+
+    class PipelineGL {
+      public:
+        PipelineGL();
+        ~PipelineGL();
+
+        // For each unit a sampler is bound to we need to know if we should use filtering or not
+        // because int and uint texture are only complete without filtering.
+        struct SamplerUnit {
+            GLuint unit;
+            bool shouldUseFiltering;
+        };
+        const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
+        const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
+        GLuint GetProgramHandle() const;
+
+      protected:
+        void ApplyNow(const OpenGLFunctions& gl);
+        MaybeError InitializeBase(const OpenGLFunctions& gl,
+                                  const PipelineLayout* layout,
+                                  const PerStage<ProgrammableStage>& stages);
+        void DeleteProgram(const OpenGLFunctions& gl);
+
+      private:
+        GLuint mProgram;
+        std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
+        std::vector<std::vector<GLuint>> mUnitsForTextures;
+        std::vector<GLuint> mDummySamplerUnits;
+        // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
+        // destruction complex as it requires the sampler to be destroyed before the sampler cache.
+        Ref<Sampler> mDummySampler;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_PIPELINEGL_H_
diff --git a/src/dawn/native/opengl/PipelineLayoutGL.cpp b/src/dawn/native/opengl/PipelineLayoutGL.cpp
new file mode 100644
index 0000000..7dd54ab
--- /dev/null
+++ b/src/dawn/native/opengl/PipelineLayoutGL.cpp
@@ -0,0 +1,95 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+    PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+        : PipelineLayoutBase(device, descriptor) {
+        GLuint uboIndex = 0;
+        GLuint samplerIndex = 0;
+        GLuint sampledTextureIndex = 0;
+        GLuint ssboIndex = 0;
+        GLuint storageTextureIndex = 0;
+
+        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+            const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+            mIndexInfo[group].resize(bgl->GetBindingCount());
+
+            for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
+                 ++bindingIndex) {
+                const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+                switch (bindingInfo.bindingType) {
+                    case BindingInfoType::Buffer:
+                        switch (bindingInfo.buffer.type) {
+                            case wgpu::BufferBindingType::Uniform:
+                                mIndexInfo[group][bindingIndex] = uboIndex;
+                                uboIndex++;
+                                break;
+                            case wgpu::BufferBindingType::Storage:
+                            case kInternalStorageBufferBinding:
+                            case wgpu::BufferBindingType::ReadOnlyStorage:
+                                mIndexInfo[group][bindingIndex] = ssboIndex;
+                                ssboIndex++;
+                                break;
+                            case wgpu::BufferBindingType::Undefined:
+                                UNREACHABLE();
+                        }
+                        break;
+
+                    case BindingInfoType::Sampler:
+                        mIndexInfo[group][bindingIndex] = samplerIndex;
+                        samplerIndex++;
+                        break;
+
+                    case BindingInfoType::Texture:
+                    case BindingInfoType::ExternalTexture:
+                        mIndexInfo[group][bindingIndex] = sampledTextureIndex;
+                        sampledTextureIndex++;
+                        break;
+
+                    case BindingInfoType::StorageTexture:
+                        mIndexInfo[group][bindingIndex] = storageTextureIndex;
+                        storageTextureIndex++;
+                        break;
+                }
+            }
+        }
+
+        mNumSamplers = samplerIndex;
+        mNumSampledTextures = sampledTextureIndex;
+    }
+
+    const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
+        return mIndexInfo;
+    }
+
+    GLuint PipelineLayout::GetTextureUnitsUsed() const {
+        return 0;
+    }
+
+    size_t PipelineLayout::GetNumSamplers() const {
+        return mNumSamplers;
+    }
+
+    size_t PipelineLayout::GetNumSampledTextures() const {
+        return mNumSampledTextures;
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/PipelineLayoutGL.h b/src/dawn/native/opengl/PipelineLayoutGL.h
new file mode 100644
index 0000000..f743d6a
--- /dev/null
+++ b/src/dawn/native/opengl/PipelineLayoutGL.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
+#define DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
+
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_vector.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class PipelineLayout final : public PipelineLayoutBase {
+      public:
+        PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+
+        using BindingIndexInfo =
+            ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
+        const BindingIndexInfo& GetBindingIndexInfo() const;
+
+        GLuint GetTextureUnitsUsed() const;
+        size_t GetNumSamplers() const;
+        size_t GetNumSampledTextures() const;
+
+      private:
+        ~PipelineLayout() override = default;
+        BindingIndexInfo mIndexInfo;
+        size_t mNumSamplers;
+        size_t mNumSampledTextures;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
diff --git a/src/dawn/native/opengl/QuerySetGL.cpp b/src/dawn/native/opengl/QuerySetGL.cpp
new file mode 100644
index 0000000..cdf9858
--- /dev/null
+++ b/src/dawn/native/opengl/QuerySetGL.cpp
@@ -0,0 +1,27 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/QuerySetGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+    QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+        : QuerySetBase(device, descriptor) {
+    }
+
+    QuerySet::~QuerySet() = default;
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/QuerySetGL.h b/src/dawn/native/opengl/QuerySetGL.h
new file mode 100644
index 0000000..1bef7c5
--- /dev/null
+++ b/src/dawn/native/opengl/QuerySetGL.h
@@ -0,0 +1,34 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_QUERYSETGL_H_
+#define DAWNNATIVE_OPENGL_QUERYSETGL_H_
+
+#include "dawn/native/QuerySet.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class QuerySet final : public QuerySetBase {
+      public:
+        QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+
+      private:
+        ~QuerySet() override;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_QUERYSETGL_H_
diff --git a/src/dawn/native/opengl/QueueGL.cpp b/src/dawn/native/opengl/QueueGL.cpp
new file mode 100644
index 0000000..541d93b
--- /dev/null
+++ b/src/dawn/native/opengl/QueueGL.cpp
@@ -0,0 +1,80 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/QueueGL.h"
+
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/CommandBufferGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/TextureGL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::opengl {
+
+    Queue::Queue(Device* device) : QueueBase(device) {
+    }
+
+    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+        Device* device = ToBackend(GetDevice());
+
+        TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+        for (uint32_t i = 0; i < commandCount; ++i) {
+            DAWN_TRY(ToBackend(commands[i])->Execute());
+        }
+        TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+
+        device->SubmitFenceSync();
+        return {};
+    }
+
+    MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+                                      uint64_t bufferOffset,
+                                      const void* data,
+                                      size_t size) {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
+
+        gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
+        gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
+        return {};
+    }
+
+    MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
+                                       const void* data,
+                                       const TextureDataLayout& dataLayout,
+                                       const Extent3D& writeSizePixel) {
+        DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
+                        "Writes to stencil textures unsupported on the OpenGL backend.");
+
+        TextureCopy textureCopy;
+        textureCopy.texture = destination.texture;
+        textureCopy.mipLevel = destination.mipLevel;
+        textureCopy.origin = destination.origin;
+        textureCopy.aspect =
+            SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
+
+        SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
+        if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
+                                          destination.mipLevel)) {
+            destination.texture->SetIsSubresourceContentInitialized(true, range);
+        } else {
+            ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
+        }
+        DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
+        return {};
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/QueueGL.h b/src/dawn/native/opengl/QueueGL.h
new file mode 100644
index 0000000..f83278d
--- /dev/null
+++ b/src/dawn/native/opengl/QueueGL.h
@@ -0,0 +1,42 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_QUEUEGL_H_
+#define DAWNNATIVE_OPENGL_QUEUEGL_H_
+
+#include "dawn/native/Queue.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class Queue final : public QueueBase {
+      public:
+        Queue(Device* device);
+
+      private:
+        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+        MaybeError WriteBufferImpl(BufferBase* buffer,
+                                   uint64_t bufferOffset,
+                                   const void* data,
+                                   size_t size) override;
+        MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
+                                    const void* data,
+                                    const TextureDataLayout& dataLayout,
+                                    const Extent3D& writeSizePixel) override;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_QUEUEGL_H_
diff --git a/src/dawn/native/opengl/RenderPipelineGL.cpp b/src/dawn/native/opengl/RenderPipelineGL.cpp
new file mode 100644
index 0000000..5e4ddce
--- /dev/null
+++ b/src/dawn/native/opengl/RenderPipelineGL.cpp
@@ -0,0 +1,345 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/RenderPipelineGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/PersistentPipelineStateGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+namespace dawn::native::opengl {
+
+    namespace {
+
+        GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+            switch (primitiveTopology) {
+                case wgpu::PrimitiveTopology::PointList:
+                    return GL_POINTS;
+                case wgpu::PrimitiveTopology::LineList:
+                    return GL_LINES;
+                case wgpu::PrimitiveTopology::LineStrip:
+                    return GL_LINE_STRIP;
+                case wgpu::PrimitiveTopology::TriangleList:
+                    return GL_TRIANGLES;
+                case wgpu::PrimitiveTopology::TriangleStrip:
+                    return GL_TRIANGLE_STRIP;
+            }
+            UNREACHABLE();
+        }
+
+        void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
+                                      wgpu::FrontFace face,
+                                      wgpu::CullMode mode) {
+            // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
+            // which is different from WebGPU and other backends (Y axis is down).
+            GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
+            gl.FrontFace(direction);
+
+            if (mode == wgpu::CullMode::None) {
+                gl.Disable(GL_CULL_FACE);
+            } else {
+                gl.Enable(GL_CULL_FACE);
+
+                GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
+                gl.CullFace(cullMode);
+            }
+        }
+
+        GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+            switch (factor) {
+                case wgpu::BlendFactor::Zero:
+                    return GL_ZERO;
+                case wgpu::BlendFactor::One:
+                    return GL_ONE;
+                case wgpu::BlendFactor::Src:
+                    return GL_SRC_COLOR;
+                case wgpu::BlendFactor::OneMinusSrc:
+                    return GL_ONE_MINUS_SRC_COLOR;
+                case wgpu::BlendFactor::SrcAlpha:
+                    return GL_SRC_ALPHA;
+                case wgpu::BlendFactor::OneMinusSrcAlpha:
+                    return GL_ONE_MINUS_SRC_ALPHA;
+                case wgpu::BlendFactor::Dst:
+                    return GL_DST_COLOR;
+                case wgpu::BlendFactor::OneMinusDst:
+                    return GL_ONE_MINUS_DST_COLOR;
+                case wgpu::BlendFactor::DstAlpha:
+                    return GL_DST_ALPHA;
+                case wgpu::BlendFactor::OneMinusDstAlpha:
+                    return GL_ONE_MINUS_DST_ALPHA;
+                case wgpu::BlendFactor::SrcAlphaSaturated:
+                    return GL_SRC_ALPHA_SATURATE;
+                case wgpu::BlendFactor::Constant:
+                    return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
+                case wgpu::BlendFactor::OneMinusConstant:
+                    return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
+            }
+            UNREACHABLE();
+        }
+
+        GLenum GLBlendMode(wgpu::BlendOperation operation) {
+            switch (operation) {
+                case wgpu::BlendOperation::Add:
+                    return GL_FUNC_ADD;
+                case wgpu::BlendOperation::Subtract:
+                    return GL_FUNC_SUBTRACT;
+                case wgpu::BlendOperation::ReverseSubtract:
+                    return GL_FUNC_REVERSE_SUBTRACT;
+                case wgpu::BlendOperation::Min:
+                    return GL_MIN;
+                case wgpu::BlendOperation::Max:
+                    return GL_MAX;
+            }
+            UNREACHABLE();
+        }
+
+        void ApplyColorState(const OpenGLFunctions& gl,
+                             ColorAttachmentIndex attachment,
+                             const ColorTargetState* state) {
+            GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
+            if (state->blend != nullptr) {
+                gl.Enablei(GL_BLEND, colorBuffer);
+                gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
+                                          GLBlendMode(state->blend->alpha.operation));
+                gl.BlendFuncSeparatei(colorBuffer,
+                                      GLBlendFactor(state->blend->color.srcFactor, false),
+                                      GLBlendFactor(state->blend->color.dstFactor, false),
+                                      GLBlendFactor(state->blend->alpha.srcFactor, true),
+                                      GLBlendFactor(state->blend->alpha.dstFactor, true));
+            } else {
+                gl.Disablei(GL_BLEND, colorBuffer);
+            }
+            gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
+                          state->writeMask & wgpu::ColorWriteMask::Green,
+                          state->writeMask & wgpu::ColorWriteMask::Blue,
+                          state->writeMask & wgpu::ColorWriteMask::Alpha);
+        }
+
+        void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
+            if (state->blend != nullptr) {
+                gl.Enable(GL_BLEND);
+                gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
+                                         GLBlendMode(state->blend->alpha.operation));
+                gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
+                                     GLBlendFactor(state->blend->color.dstFactor, false),
+                                     GLBlendFactor(state->blend->alpha.srcFactor, true),
+                                     GLBlendFactor(state->blend->alpha.dstFactor, true));
+            } else {
+                gl.Disable(GL_BLEND);
+            }
+            gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
+                         state->writeMask & wgpu::ColorWriteMask::Green,
+                         state->writeMask & wgpu::ColorWriteMask::Blue,
+                         state->writeMask & wgpu::ColorWriteMask::Alpha);
+        }
+
+        bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
+            return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
+                   lhs.dstFactor == rhs.dstFactor;
+        }
+
+        GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
+            switch (stencilOperation) {
+                case wgpu::StencilOperation::Keep:
+                    return GL_KEEP;
+                case wgpu::StencilOperation::Zero:
+                    return GL_ZERO;
+                case wgpu::StencilOperation::Replace:
+                    return GL_REPLACE;
+                case wgpu::StencilOperation::Invert:
+                    return GL_INVERT;
+                case wgpu::StencilOperation::IncrementClamp:
+                    return GL_INCR;
+                case wgpu::StencilOperation::DecrementClamp:
+                    return GL_DECR;
+                case wgpu::StencilOperation::IncrementWrap:
+                    return GL_INCR_WRAP;
+                case wgpu::StencilOperation::DecrementWrap:
+                    return GL_DECR_WRAP;
+            }
+            UNREACHABLE();
+        }
+
+        void ApplyDepthStencilState(const OpenGLFunctions& gl,
+                                    const DepthStencilState* descriptor,
+                                    PersistentPipelineState* persistentPipelineState) {
+            // Depth writes only occur if depth is enabled
+            if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+                !descriptor->depthWriteEnabled) {
+                gl.Disable(GL_DEPTH_TEST);
+            } else {
+                gl.Enable(GL_DEPTH_TEST);
+            }
+
+            if (descriptor->depthWriteEnabled) {
+                gl.DepthMask(GL_TRUE);
+            } else {
+                gl.DepthMask(GL_FALSE);
+            }
+
+            gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
+
+            if (StencilTestEnabled(descriptor)) {
+                gl.Enable(GL_STENCIL_TEST);
+            } else {
+                gl.Disable(GL_STENCIL_TEST);
+            }
+
+            GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
+            GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
+            persistentPipelineState->SetStencilFuncsAndMask(
+                gl, backCompareFunction, frontCompareFunction, descriptor->stencilReadMask);
+
+            gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
+                                 OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
+                                 OpenGLStencilOperation(descriptor->stencilBack.passOp));
+            gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
+                                 OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
+                                 OpenGLStencilOperation(descriptor->stencilFront.passOp));
+
+            gl.StencilMask(descriptor->stencilWriteMask);
+        }
+
+    }  // anonymous namespace
+
+    // static
+    Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+        Device* device,
+        const RenderPipelineDescriptor* descriptor) {
+        return AcquireRef(new RenderPipeline(device, descriptor));
+    }
+
+    RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
+        : RenderPipelineBase(device, descriptor),
+          mVertexArrayObject(0),
+          mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {
+    }
+
+    MaybeError RenderPipeline::Initialize() {
+        DAWN_TRY(
+            InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+        CreateVAOForVertexState();
+        return {};
+    }
+
+    RenderPipeline::~RenderPipeline() = default;
+
+    void RenderPipeline::DestroyImpl() {
+        RenderPipelineBase::DestroyImpl();
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+        gl.DeleteVertexArrays(1, &mVertexArrayObject);
+        gl.BindVertexArray(0);
+        DeleteProgram(gl);
+    }
+
+    GLenum RenderPipeline::GetGLPrimitiveTopology() const {
+        return mGlPrimitiveTopology;
+    }
+
+    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
+    RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
+        ASSERT(!IsError());
+        return mAttributesUsingVertexBuffer[slot];
+    }
+
+    void RenderPipeline::CreateVAOForVertexState() {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        gl.GenVertexArrays(1, &mVertexArrayObject);
+        gl.BindVertexArray(mVertexArrayObject);
+
+        for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
+            const auto& attribute = GetAttribute(location);
+            GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
+            gl.EnableVertexAttribArray(glAttrib);
+
+            mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
+            const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
+
+            if (vertexBuffer.arrayStride == 0) {
+                // Emulate a stride of zero (constant vertex attribute) by
+                // setting the attribute instance divisor to a huge number.
+                gl.VertexAttribDivisor(glAttrib, 0xffffffff);
+            } else {
+                switch (vertexBuffer.stepMode) {
+                    case wgpu::VertexStepMode::Vertex:
+                        break;
+                    case wgpu::VertexStepMode::Instance:
+                        gl.VertexAttribDivisor(glAttrib, 1);
+                        break;
+                }
+            }
+        }
+    }
+
+    void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+        PipelineGL::ApplyNow(gl);
+
+        ASSERT(mVertexArrayObject);
+        gl.BindVertexArray(mVertexArrayObject);
+
+        ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
+
+        ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
+
+        gl.SampleMaski(0, GetSampleMask());
+        if (IsAlphaToCoverageEnabled()) {
+            gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+        } else {
+            gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+        }
+
+        if (IsDepthBiasEnabled()) {
+            gl.Enable(GL_POLYGON_OFFSET_FILL);
+            float depthBias = GetDepthBias();
+            float slopeScale = GetDepthBiasSlopeScale();
+            if (gl.PolygonOffsetClamp != nullptr) {
+                gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
+            } else {
+                gl.PolygonOffset(slopeScale, depthBias);
+            }
+        } else {
+            gl.Disable(GL_POLYGON_OFFSET_FILL);
+        }
+
+        if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
+            for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+                ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
+            }
+        } else {
+            const ColorTargetState* prevDescriptor = nullptr;
+            for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+                const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
+                if (!prevDescriptor) {
+                    ApplyColorState(gl, descriptor);
+                    prevDescriptor = descriptor;
+                } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
+                    // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
+                    // per color target. Add validation to prevent this as it is not.
+                    ASSERT(false);
+                } else if (descriptor->blend != nullptr) {
+                    if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
+                        !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
+                        descriptor->writeMask != prevDescriptor->writeMask) {
+                        // TODO(crbug.com/dawn/582)
+                        ASSERT(false);
+                    }
+                }
+            }
+        }
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/RenderPipelineGL.h b/src/dawn/native/opengl/RenderPipelineGL.h
new file mode 100644
index 0000000..1ee3f81
--- /dev/null
+++ b/src/dawn/native/opengl/RenderPipelineGL.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
+#define DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/native/opengl/PipelineGL.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+#include <vector>
+
+namespace dawn::native::opengl {
+
+    class Device;
+    class PersistentPipelineState;
+
+    class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
+      public:
+        static Ref<RenderPipeline> CreateUninitialized(Device* device,
+                                                       const RenderPipelineDescriptor* descriptor);
+
+        GLenum GetGLPrimitiveTopology() const;
+        ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
+            VertexBufferSlot slot) const;
+
+        void ApplyNow(PersistentPipelineState& persistentPipelineState);
+
+        MaybeError Initialize() override;
+
+      private:
+        RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+        ~RenderPipeline() override;
+        void DestroyImpl() override;
+
+        void CreateVAOForVertexState();
+
+        // TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
+        GLuint mVertexArrayObject;
+        GLenum mGlPrimitiveTopology;
+
+        ityp::array<VertexBufferSlot,
+                    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
+                    kMaxVertexBuffers>
+            mAttributesUsingVertexBuffer;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
diff --git a/src/dawn/native/opengl/SamplerGL.cpp b/src/dawn/native/opengl/SamplerGL.cpp
new file mode 100644
index 0000000..7790530
--- /dev/null
+++ b/src/dawn/native/opengl/SamplerGL.cpp
@@ -0,0 +1,130 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/SamplerGL.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+namespace dawn::native::opengl {
+
+    namespace {
+        GLenum MagFilterMode(wgpu::FilterMode filter) {
+            switch (filter) {
+                case wgpu::FilterMode::Nearest:
+                    return GL_NEAREST;
+                case wgpu::FilterMode::Linear:
+                    return GL_LINEAR;
+            }
+            UNREACHABLE();
+        }
+
+        GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
+            switch (minFilter) {
+                case wgpu::FilterMode::Nearest:
+                    switch (mipMapFilter) {
+                        case wgpu::FilterMode::Nearest:
+                            return GL_NEAREST_MIPMAP_NEAREST;
+                        case wgpu::FilterMode::Linear:
+                            return GL_NEAREST_MIPMAP_LINEAR;
+                    }
+                case wgpu::FilterMode::Linear:
+                    switch (mipMapFilter) {
+                        case wgpu::FilterMode::Nearest:
+                            return GL_LINEAR_MIPMAP_NEAREST;
+                        case wgpu::FilterMode::Linear:
+                            return GL_LINEAR_MIPMAP_LINEAR;
+                    }
+            }
+            UNREACHABLE();
+        }
+
+        GLenum WrapMode(wgpu::AddressMode mode) {
+            switch (mode) {
+                case wgpu::AddressMode::Repeat:
+                    return GL_REPEAT;
+                case wgpu::AddressMode::MirrorRepeat:
+                    return GL_MIRRORED_REPEAT;
+                case wgpu::AddressMode::ClampToEdge:
+                    return GL_CLAMP_TO_EDGE;
+            }
+            UNREACHABLE();
+        }
+
+    }  // namespace
+
+    Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+        : SamplerBase(device, descriptor) {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        gl.GenSamplers(1, &mFilteringHandle);
+        SetupGLSampler(mFilteringHandle, descriptor, false);
+
+        gl.GenSamplers(1, &mNonFilteringHandle);
+        SetupGLSampler(mNonFilteringHandle, descriptor, true);
+    }
+
+    Sampler::~Sampler() = default;
+
+    void Sampler::DestroyImpl() {
+        SamplerBase::DestroyImpl();
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+        gl.DeleteSamplers(1, &mFilteringHandle);
+        gl.DeleteSamplers(1, &mNonFilteringHandle);
+    }
+
+    void Sampler::SetupGLSampler(GLuint sampler,
+                                 const SamplerDescriptor* descriptor,
+                                 bool forceNearest) {
+        Device* device = ToBackend(GetDevice());
+        const OpenGLFunctions& gl = device->gl;
+
+        if (forceNearest) {
+            gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+            gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
+        } else {
+            gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER,
+                                 MagFilterMode(descriptor->magFilter));
+            gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
+                                 MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
+        }
+        gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
+        gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
+        gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
+
+        gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
+        gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
+
+        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+            gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
+            gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
+                                 ToOpenGLCompareFunction(descriptor->compare));
+        }
+
+        if (gl.IsAtLeastGL(4, 6) ||
+            gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
+            gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
+        }
+    }
+
+    GLuint Sampler::GetFilteringHandle() const {
+        return mFilteringHandle;
+    }
+
+    GLuint Sampler::GetNonFilteringHandle() const {
+        return mNonFilteringHandle;
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/SamplerGL.h b/src/dawn/native/opengl/SamplerGL.h
new file mode 100644
index 0000000..5d07ecb
--- /dev/null
+++ b/src/dawn/native/opengl/SamplerGL.h
@@ -0,0 +1,48 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_SAMPLERGL_H_
+#define DAWNNATIVE_OPENGL_SAMPLERGL_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class Sampler final : public SamplerBase {
+      public:
+        Sampler(Device* device, const SamplerDescriptor* descriptor);
+
+        GLuint GetFilteringHandle() const;
+        GLuint GetNonFilteringHandle() const;
+
+      private:
+        ~Sampler() override;
+        void DestroyImpl() override;
+
+        void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
+
+        GLuint mFilteringHandle;
+
+        // This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
+        // for everything, which is important to preserve texture completeness for u/int textures.
+        GLuint mNonFilteringHandle;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_SAMPLERGL_H_
diff --git a/src/dawn/native/opengl/ShaderModuleGL.cpp b/src/dawn/native/opengl/ShaderModuleGL.cpp
new file mode 100644
index 0000000..6bda26b
--- /dev/null
+++ b/src/dawn/native/opengl/ShaderModuleGL.cpp
@@ -0,0 +1,177 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/ShaderModuleGL.h"
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <tint/tint.h>
+
+#include <sstream>
+
+namespace dawn::native::opengl {
+
+    std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
+        std::ostringstream o;
+        o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
+          << static_cast<uint32_t>(bindingNumber);
+        return o.str();
+    }
+
+    bool operator<(const BindingLocation& a, const BindingLocation& b) {
+        return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
+    }
+
+    bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
+        return std::tie(a.useDummySampler, a.samplerLocation, a.textureLocation) <
+               std::tie(b.useDummySampler, a.samplerLocation, b.textureLocation);
+    }
+
+    std::string CombinedSampler::GetName() const {
+        std::ostringstream o;
+        o << "dawn_combined";
+        if (useDummySampler) {
+            o << "_dummy_sampler";
+        } else {
+            o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
+              << static_cast<uint32_t>(samplerLocation.binding);
+        }
+        o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
+          << static_cast<uint32_t>(textureLocation.binding);
+        return o.str();
+    }
+
+    // static
+    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                          const ShaderModuleDescriptor* descriptor,
+                                                          ShaderModuleParseResult* parseResult) {
+        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+        DAWN_TRY(module->Initialize(parseResult));
+        return module;
+    }
+
+    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+        : ShaderModuleBase(device, descriptor) {
+    }
+
+    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+        ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+        DAWN_TRY(InitializeBase(parseResult));
+
+        return {};
+    }
+
+    ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
+                                                             SingleShaderStage stage,
+                                                             CombinedSamplerInfo* combinedSamplers,
+                                                             const PipelineLayout* layout,
+                                                             bool* needsDummySampler) const {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
+        tint::transform::Manager transformManager;
+        tint::transform::DataMap transformInputs;
+
+        AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+        tint::Program program;
+        DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+                                               nullptr, nullptr));
+        const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
+
+        tint::writer::glsl::Options tintOptions;
+        using Version = tint::writer::glsl::Version;
+        tintOptions.version =
+            Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
+                    version.GetMajor(), version.GetMinor());
+
+        using tint::transform::BindingPoint;
+        // When textures are accessed without a sampler (e.g., textureLoad()),
+        // GetSamplerTextureUses() will return this sentinel value.
+        BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
+
+        tint::inspector::Inspector inspector(&program);
+        // Find all the sampler/texture pairs for this entry point, and create
+        // CombinedSamplers for them. CombinedSampler records the binding points
+        // of the original texture and sampler, and generates a unique name. The
+        // corresponding uniforms will be retrieved by these generated names
+        // in PipelineGL. Any texture-only references will have
+        // "useDummySampler" set to true, and only the texture binding point
+        // will be used in naming them. In addition, Dawn will bind a
+        // non-filtering sampler for them (see PipelineGL).
+        auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
+        for (const auto& use : uses) {
+            combinedSamplers->emplace_back();
+
+            CombinedSampler* info = &combinedSamplers->back();
+            if (use.sampler_binding_point == placeholderBindingPoint) {
+                info->useDummySampler = true;
+                *needsDummySampler = true;
+            } else {
+                info->useDummySampler = false;
+            }
+            info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
+            info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
+            info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
+            info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
+            tintOptions.binding_map[use] = info->GetName();
+        }
+        if (*needsDummySampler) {
+            tintOptions.placeholder_binding_point = placeholderBindingPoint;
+        }
+
+        // Since (non-Vulkan) GLSL does not support descriptor sets, generate a
+        // mapping from the original group/binding pair to a binding-only
+        // value. This mapping will be used by Tint to remap all global
+        // variables to the 1D space.
+        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            const BindGroupLayoutBase::BindingMap& bindingMap =
+                layout->GetBindGroupLayout(group)->GetBindingMap();
+            for (const auto& it : bindingMap) {
+                BindingNumber bindingNumber = it.first;
+                BindingIndex bindingIndex = it.second;
+                const BindingInfo& bindingInfo =
+                    layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+                if (!(bindingInfo.visibility & StageBit(stage))) {
+                    continue;
+                }
+
+                uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
+                BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                             static_cast<uint32_t>(bindingNumber)};
+                BindingPoint dstBindingPoint{0, shaderIndex};
+                tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
+            }
+            tintOptions.allow_collisions = true;
+        }
+        auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
+        DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.",
+                        result.error);
+        std::string glsl = std::move(result.glsl);
+
+        if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+            std::ostringstream dumpedMsg;
+            dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
+
+            GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+        }
+
+        return glsl;
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/ShaderModuleGL.h b/src/dawn/native/opengl/ShaderModuleGL.h
new file mode 100644
index 0000000..44cd0d4
--- /dev/null
+++ b/src/dawn/native/opengl/ShaderModuleGL.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
+#define DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
+
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+    class PipelineLayout;
+
+    std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
+
+    struct BindingLocation {
+        BindGroupIndex group;
+        BindingNumber binding;
+    };
+    bool operator<(const BindingLocation& a, const BindingLocation& b);
+
+    struct CombinedSampler {
+        BindingLocation samplerLocation;
+        BindingLocation textureLocation;
+        // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
+        // one and Dawn should bind a dummy non-filtering sampler. |samplerLocation| is unused.
+        bool useDummySampler;
+        std::string GetName() const;
+    };
+    bool operator<(const CombinedSampler& a, const CombinedSampler& b);
+
+    using CombinedSamplerInfo = std::vector<CombinedSampler>;
+
+    using BindingInfoArrayTable =
+        std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
+
+    class ShaderModule final : public ShaderModuleBase {
+      public:
+        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                       const ShaderModuleDescriptor* descriptor,
+                                                       ShaderModuleParseResult* parseResult);
+
+        ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
+                                                   SingleShaderStage stage,
+                                                   CombinedSamplerInfo* combinedSamplers,
+                                                   const PipelineLayout* layout,
+                                                   bool* needsDummySampler) const;
+
+      private:
+        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+        ~ShaderModule() override = default;
+        MaybeError Initialize(ShaderModuleParseResult* parseResult);
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
diff --git a/src/dawn/native/opengl/SwapChainGL.cpp b/src/dawn/native/opengl/SwapChainGL.cpp
new file mode 100644
index 0000000..e59bb9f
--- /dev/null
+++ b/src/dawn/native/opengl/SwapChainGL.cpp
@@ -0,0 +1,51 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/SwapChainGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/TextureGL.h"
+
+#include <dawn/dawn_wsi.h>
+
+namespace dawn::native::opengl {
+
+    SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
+        : OldSwapChainBase(device, descriptor) {
+        const auto& im = GetImplementation();
+        im.Init(im.userData, nullptr);
+    }
+
+    SwapChain::~SwapChain() {
+    }
+
+    TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+        const auto& im = GetImplementation();
+        DawnSwapChainNextTexture next = {};
+        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+        if (error) {
+            GetDevice()->HandleError(InternalErrorType::Internal, error);
+            return nullptr;
+        }
+        GLuint nativeTexture = next.texture.u32;
+        return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
+                           TextureBase::TextureState::OwnedExternal);
+    }
+
+    MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
+        return {};
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/SwapChainGL.h b/src/dawn/native/opengl/SwapChainGL.h
new file mode 100644
index 0000000..2c6c91a
--- /dev/null
+++ b/src/dawn/native/opengl/SwapChainGL.h
@@ -0,0 +1,38 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
+#define DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+
+    class SwapChain final : public OldSwapChainBase {
+      public:
+        SwapChain(Device* device, const SwapChainDescriptor* descriptor);
+
+      protected:
+        ~SwapChain() override;
+        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+        MaybeError OnBeforePresent(TextureViewBase* view) override;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
diff --git a/src/dawn/native/opengl/TextureGL.cpp b/src/dawn/native/opengl/TextureGL.cpp
new file mode 100644
index 0000000..fc4431f
--- /dev/null
+++ b/src/dawn/native/opengl/TextureGL.cpp
@@ -0,0 +1,616 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/TextureGL.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/CommandBufferGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+namespace dawn::native::opengl {
+
+    namespace {
+
+        GLenum TargetForTexture(const TextureDescriptor* descriptor) {
+            switch (descriptor->dimension) {
+                case wgpu::TextureDimension::e2D:
+                    if (descriptor->size.depthOrArrayLayers > 1) {
+                        ASSERT(descriptor->sampleCount == 1);
+                        return GL_TEXTURE_2D_ARRAY;
+                    } else {
+                        if (descriptor->sampleCount > 1) {
+                            return GL_TEXTURE_2D_MULTISAMPLE;
+                        } else {
+                            return GL_TEXTURE_2D;
+                        }
+                    }
+                case wgpu::TextureDimension::e3D:
+                    ASSERT(descriptor->sampleCount == 1);
+                    return GL_TEXTURE_3D;
+
+                case wgpu::TextureDimension::e1D:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
+                                             uint32_t arrayLayerCount,
+                                             uint32_t sampleCount) {
+            switch (dimension) {
+                case wgpu::TextureViewDimension::e2D:
+                    return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
+                case wgpu::TextureViewDimension::e2DArray:
+                    if (sampleCount > 1) {
+                        ASSERT(arrayLayerCount == 1);
+                        return GL_TEXTURE_2D_MULTISAMPLE;
+                    }
+                    ASSERT(sampleCount == 1);
+                    return GL_TEXTURE_2D_ARRAY;
+                case wgpu::TextureViewDimension::Cube:
+                    ASSERT(sampleCount == 1);
+                    ASSERT(arrayLayerCount == 6);
+                    return GL_TEXTURE_CUBE_MAP;
+                case wgpu::TextureViewDimension::CubeArray:
+                    ASSERT(sampleCount == 1);
+                    ASSERT(arrayLayerCount % 6 == 0);
+                    return GL_TEXTURE_CUBE_MAP_ARRAY;
+                case wgpu::TextureViewDimension::e3D:
+                    return GL_TEXTURE_3D;
+
+                case wgpu::TextureViewDimension::e1D:
+                case wgpu::TextureViewDimension::Undefined:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        GLuint GenTexture(const OpenGLFunctions& gl) {
+            GLuint handle = 0;
+            gl.GenTextures(1, &handle);
+            return handle;
+        }
+
+        bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
+            constexpr wgpu::TextureUsage kUsageNeedingTextureView =
+                wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+            return usage & kUsageNeedingTextureView;
+        }
+
+        bool RequiresCreatingNewTextureView(const TextureBase* texture,
+                                            const TextureViewDescriptor* textureViewDescriptor) {
+            if (texture->GetFormat().format != textureViewDescriptor->format &&
+                !texture->GetFormat().HasDepthOrStencil()) {
+                // Color format reinterpretation required. Note: Depth/stencil formats don't support
+                // reinterpretation.
+                return true;
+            }
+
+            if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
+                (texture->GetArrayLayers() == 1 &&
+                 texture->GetDimension() == wgpu::TextureDimension::e2D &&
+                 textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
+                // If the view has a different number of array layers, we need a new view.
+                // And, if the original texture is a 2D texture with one array layer, we need a new
+                // view to view it as a 2D array texture.
+                return true;
+            }
+
+            if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+                return true;
+            }
+
+            if (ToBackend(texture)->GetGLFormat().format == GL_DEPTH_STENCIL &&
+                (texture->GetUsage() & wgpu::TextureUsage::TextureBinding) != 0 &&
+                textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
+                // We need a separate view for one of the depth or stencil planes
+                // because each glTextureView needs it's own handle to set
+                // GL_DEPTH_STENCIL_TEXTURE_MODE. Choose the stencil aspect for the
+                // extra handle since it is likely sampled less often.
+                return true;
+            }
+
+            switch (textureViewDescriptor->dimension) {
+                case wgpu::TextureViewDimension::Cube:
+                case wgpu::TextureViewDimension::CubeArray:
+                    return true;
+                default:
+                    break;
+            }
+
+            return false;
+        }
+
+        void AllocateTexture(const OpenGLFunctions& gl,
+                             GLenum target,
+                             GLsizei samples,
+                             GLuint levels,
+                             GLenum internalFormat,
+                             const Extent3D& size) {
+            // glTextureView() requires the value of GL_TEXTURE_IMMUTABLE_FORMAT for origtexture to
+            // be GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
+            // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
+            switch (target) {
+                case GL_TEXTURE_2D_ARRAY:
+                case GL_TEXTURE_3D:
+                    gl.TexStorage3D(target, levels, internalFormat, size.width, size.height,
+                                    size.depthOrArrayLayers);
+                    break;
+                case GL_TEXTURE_2D:
+                case GL_TEXTURE_CUBE_MAP:
+                    gl.TexStorage2D(target, levels, internalFormat, size.width, size.height);
+                    break;
+                case GL_TEXTURE_2D_MULTISAMPLE:
+                    gl.TexStorage2DMultisample(target, samples, internalFormat, size.width,
+                                               size.height, true);
+                    break;
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+    }  // namespace
+
+    // Texture
+
+    Texture::Texture(Device* device, const TextureDescriptor* descriptor)
+        : Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        uint32_t levels = GetNumMipLevels();
+
+        const GLFormat& glFormat = GetGLFormat();
+
+        gl.BindTexture(mTarget, mHandle);
+
+        AllocateTexture(gl, mTarget, GetSampleCount(), levels, glFormat.internalFormat, GetSize());
+
+        // The texture is not complete if it uses mipmapping and not all levels up to
+        // MAX_LEVEL have been defined.
+        gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
+
+        if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+            GetDevice()->ConsumedError(
+                ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
+        }
+    }
+
+    Texture::Texture(Device* device,
+                     const TextureDescriptor* descriptor,
+                     GLuint handle,
+                     TextureState state)
+        : TextureBase(device, descriptor, state), mHandle(handle) {
+        mTarget = TargetForTexture(descriptor);
+    }
+
+    Texture::~Texture() {
+    }
+
+    void Texture::DestroyImpl() {
+        TextureBase::DestroyImpl();
+        if (GetTextureState() == TextureState::OwnedInternal) {
+            ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+            mHandle = 0;
+        }
+    }
+
+    GLuint Texture::GetHandle() const {
+        return mHandle;
+    }
+
+    GLenum Texture::GetGLTarget() const {
+        return mTarget;
+    }
+
+    const GLFormat& Texture::GetGLFormat() const {
+        return ToBackend(GetDevice())->GetGLFormat(GetFormat());
+    }
+
+    MaybeError Texture::ClearTexture(const SubresourceRange& range,
+                                     TextureBase::ClearValue clearValue) {
+        // TODO(crbug.com/dawn/850): initialize the textures with compressed formats.
+        if (GetFormat().isCompressed) {
+            return {};
+        }
+
+        Device* device = ToBackend(GetDevice());
+        const OpenGLFunctions& gl = device->gl;
+
+        uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+        float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+
+        if (GetFormat().isRenderable) {
+            if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
+                GLfloat depth = fClearColor;
+                GLint stencil = clearColor;
+                if (range.aspects & Aspect::Depth) {
+                    gl.DepthMask(GL_TRUE);
+                }
+                if (range.aspects & Aspect::Stencil) {
+                    gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
+                }
+
+                auto DoClear = [&](Aspect aspects) {
+                    if (aspects == (Aspect::Depth | Aspect::Stencil)) {
+                        gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
+                    } else if (aspects == Aspect::Depth) {
+                        gl.ClearBufferfv(GL_DEPTH, 0, &depth);
+                    } else if (aspects == Aspect::Stencil) {
+                        gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
+                    } else {
+                        UNREACHABLE();
+                    }
+                };
+
+                GLuint framebuffer = 0;
+                gl.GenFramebuffers(1, &framebuffer);
+                gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+                gl.Disable(GL_SCISSOR_TEST);
+
+                GLenum attachment;
+                if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
+                    attachment = GL_DEPTH_STENCIL_ATTACHMENT;
+                } else if (range.aspects == Aspect::Depth) {
+                    attachment = GL_DEPTH_ATTACHMENT;
+                } else if (range.aspects == Aspect::Stencil) {
+                    attachment = GL_STENCIL_ATTACHMENT;
+                } else {
+                    UNREACHABLE();
+                }
+
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; ++level) {
+                    switch (GetDimension()) {
+                        case wgpu::TextureDimension::e2D:
+                            if (GetArrayLayers() == 1) {
+                                Aspect aspectsToClear = Aspect::None;
+                                for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                                    if (clearValue == TextureBase::ClearValue::Zero &&
+                                        IsSubresourceContentInitialized(
+                                            SubresourceRange::SingleMipAndLayer(level, 0,
+                                                                                aspect))) {
+                                        // Skip lazy clears if already initialized.
+                                        continue;
+                                    }
+                                    aspectsToClear |= aspect;
+                                }
+
+                                if (aspectsToClear == Aspect::None) {
+                                    continue;
+                                }
+
+                                gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+                                                        GetGLTarget(), GetHandle(),
+                                                        static_cast<GLint>(level));
+                                DoClear(aspectsToClear);
+                            } else {
+                                for (uint32_t layer = range.baseArrayLayer;
+                                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                                    Aspect aspectsToClear = Aspect::None;
+                                    for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                                        if (clearValue == TextureBase::ClearValue::Zero &&
+                                            IsSubresourceContentInitialized(
+                                                SubresourceRange::SingleMipAndLayer(level, layer,
+                                                                                    aspect))) {
+                                            // Skip lazy clears if already initialized.
+                                            continue;
+                                        }
+                                        aspectsToClear |= aspect;
+                                    }
+
+                                    if (aspectsToClear == Aspect::None) {
+                                        continue;
+                                    }
+
+                                    gl.FramebufferTextureLayer(
+                                        GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+                                        static_cast<GLint>(level), static_cast<GLint>(layer));
+                                    DoClear(aspectsToClear);
+                                }
+                            }
+                            break;
+
+                        case wgpu::TextureDimension::e1D:
+                        case wgpu::TextureDimension::e3D:
+                            UNREACHABLE();
+                    }
+                }
+
+                gl.Enable(GL_SCISSOR_TEST);
+                gl.DeleteFramebuffers(1, &framebuffer);
+            } else {
+                ASSERT(range.aspects == Aspect::Color);
+
+                // For gl.ClearBufferiv/uiv calls
+                constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
+                constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
+                std::array<GLuint, 4> clearColorData;
+                clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
+
+                // For gl.ClearBufferfv calls
+                constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
+                constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
+                std::array<GLfloat, 4> fClearColorData;
+                fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
+
+                static constexpr uint32_t MAX_TEXEL_SIZE = 16;
+                const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
+                ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
+
+                // For gl.ClearTexSubImage calls
+                constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
+                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+                constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
+                    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+                wgpu::TextureComponentType baseType =
+                    GetFormat().GetAspectInfo(Aspect::Color).baseType;
+
+                const GLFormat& glFormat = GetGLFormat();
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; ++level) {
+                    Extent3D mipSize = GetMipLevelPhysicalSize(level);
+                    for (uint32_t layer = range.baseArrayLayer;
+                         layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                        if (clearValue == TextureBase::ClearValue::Zero &&
+                            IsSubresourceContentInitialized(
+                                SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+                            // Skip lazy clears if already initialized.
+                            continue;
+                        }
+                        if (gl.IsAtLeastGL(4, 4)) {
+                            gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
+                                                static_cast<GLint>(layer), mipSize.width,
+                                                mipSize.height, mipSize.depthOrArrayLayers,
+                                                glFormat.format, glFormat.type,
+                                                clearValue == TextureBase::ClearValue::Zero
+                                                    ? kClearColorDataBytes0.data()
+                                                    : kClearColorDataBytes255.data());
+                            continue;
+                        }
+
+                        GLuint framebuffer = 0;
+                        gl.GenFramebuffers(1, &framebuffer);
+                        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+
+                        GLenum attachment = GL_COLOR_ATTACHMENT0;
+                        gl.DrawBuffers(1, &attachment);
+
+                        gl.Disable(GL_SCISSOR_TEST);
+                        gl.ColorMask(true, true, true, true);
+
+                        auto DoClear = [&]() {
+                            switch (baseType) {
+                                case wgpu::TextureComponentType::Float: {
+                                    gl.ClearBufferfv(GL_COLOR, 0,
+                                                     clearValue == TextureBase::ClearValue::Zero
+                                                         ? kClearColorDataFloat0.data()
+                                                         : kClearColorDataFloat1.data());
+                                    break;
+                                }
+                                case wgpu::TextureComponentType::Uint: {
+                                    gl.ClearBufferuiv(GL_COLOR, 0,
+                                                      clearValue == TextureBase::ClearValue::Zero
+                                                          ? kClearColorDataUint0.data()
+                                                          : kClearColorDataUint1.data());
+                                    break;
+                                }
+                                case wgpu::TextureComponentType::Sint: {
+                                    gl.ClearBufferiv(GL_COLOR, 0,
+                                                     reinterpret_cast<const GLint*>(
+                                                         clearValue == TextureBase::ClearValue::Zero
+                                                             ? kClearColorDataUint0.data()
+                                                             : kClearColorDataUint1.data()));
+                                    break;
+                                }
+
+                                case wgpu::TextureComponentType::DepthComparison:
+                                    UNREACHABLE();
+                            }
+                        };
+
+                        if (GetArrayLayers() == 1) {
+                            switch (GetDimension()) {
+                                case wgpu::TextureDimension::e1D:
+                                    UNREACHABLE();
+                                case wgpu::TextureDimension::e2D:
+                                    gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+                                                            GetGLTarget(), GetHandle(), level);
+                                    DoClear();
+                                    break;
+                                case wgpu::TextureDimension::e3D:
+                                    uint32_t depth =
+                                        GetMipLevelVirtualSize(level).depthOrArrayLayers;
+                                    for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
+                                        gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
+                                                                   GetHandle(), level, z);
+                                        DoClear();
+                                    }
+                                    break;
+                            }
+
+                        } else {
+                            ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+                            gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+                                                       level, layer);
+                            DoClear();
+                        }
+
+                        gl.Enable(GL_SCISSOR_TEST);
+                        gl.DeleteFramebuffers(1, &framebuffer);
+                        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+                    }
+                }
+            }
+        } else {
+            ASSERT(range.aspects == Aspect::Color);
+
+            // create temp buffer with clear color to copy to the texture image
+            const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
+            ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
+
+            Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+            uint32_t bytesPerRow =
+                Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
+
+            // Make sure that we are not rounding
+            ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+            ASSERT(largestMipSize.height % blockInfo.height == 0);
+
+            uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
+                                    (largestMipSize.height / blockInfo.height) *
+                                    largestMipSize.depthOrArrayLayers;
+            if (bufferSize64 > std::numeric_limits<size_t>::max()) {
+                return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+            }
+            size_t bufferSize = static_cast<size_t>(bufferSize64);
+
+            dawn::native::BufferDescriptor descriptor = {};
+            descriptor.mappedAtCreation = true;
+            descriptor.usage = wgpu::BufferUsage::CopySrc;
+            descriptor.size = bufferSize;
+
+            // We don't count the lazy clear of srcBuffer because it is an internal buffer.
+            // TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
+            Ref<Buffer> srcBuffer;
+            DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
+
+            // Fill the buffer with clear color
+            memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
+            srcBuffer->Unmap();
+
+            gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                TextureCopy textureCopy;
+                textureCopy.texture = this;
+                textureCopy.mipLevel = level;
+                textureCopy.origin = {};
+                textureCopy.aspect = Aspect::Color;
+
+                TextureDataLayout dataLayout;
+                dataLayout.offset = 0;
+                dataLayout.bytesPerRow = bytesPerRow;
+                dataLayout.rowsPerImage = largestMipSize.height;
+
+                Extent3D mipSize = GetMipLevelPhysicalSize(level);
+
+                for (uint32_t layer = range.baseArrayLayer;
+                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(
+                            SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
+                    }
+
+                    textureCopy.origin.z = layer;
+                    DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
+                }
+            }
+            gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+        }
+        if (clearValue == TextureBase::ClearValue::Zero) {
+            SetIsSubresourceContentInitialized(true, range);
+            device->IncrementLazyClearCountForTesting();
+        }
+        return {};
+    }
+
+    void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
+        if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+            return;
+        }
+        if (!IsSubresourceContentInitialized(range)) {
+            GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
+        }
+    }
+
+    // TextureView
+
+    TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+        : TextureViewBase(texture, descriptor), mOwnsHandle(false) {
+        mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
+                                                texture->GetSampleCount());
+
+        // Texture could be destroyed by the time we make a view.
+        if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+            return;
+        }
+
+        if (!UsageNeedsTextureView(texture->GetUsage())) {
+            mHandle = 0;
+        } else if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+            mHandle = ToBackend(texture)->GetHandle();
+        } else {
+            // glTextureView() is supported on OpenGL version >= 4.3
+            // TODO(crbug.com/dawn/593): support texture view on OpenGL version <= 4.2 and ES
+            const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+            mHandle = GenTexture(gl);
+            const Texture* textureGL = ToBackend(texture);
+
+            const Format& textureFormat = GetTexture()->GetFormat();
+            // Depth/stencil don't support reinterpretation, and the aspect is specified at
+            // bind time. In that case, we use the base texture format.
+            const GLFormat& glFormat = textureFormat.HasDepthOrStencil()
+                                           ? ToBackend(GetDevice())->GetGLFormat(textureFormat)
+                                           : ToBackend(GetDevice())->GetGLFormat(GetFormat());
+
+            gl.TextureView(mHandle, mTarget, textureGL->GetHandle(), glFormat.internalFormat,
+                           descriptor->baseMipLevel, descriptor->mipLevelCount,
+                           descriptor->baseArrayLayer, descriptor->arrayLayerCount);
+            mOwnsHandle = true;
+        }
+    }
+
+    TextureView::~TextureView() {
+    }
+
+    void TextureView::DestroyImpl() {
+        TextureViewBase::DestroyImpl();
+        if (mOwnsHandle) {
+            ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+        }
+    }
+
+    GLuint TextureView::GetHandle() const {
+        ASSERT(mHandle != 0);
+        return mHandle;
+    }
+
+    GLenum TextureView::GetGLTarget() const {
+        return mTarget;
+    }
+
+    void TextureView::BindToFramebuffer(GLenum target, GLenum attachment) {
+        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+        // Use the texture's handle and target, and the view's base mip level and base array layer
+        GLuint handle = ToBackend(GetTexture())->GetHandle();
+        GLuint textarget = ToBackend(GetTexture())->GetGLTarget();
+        GLuint mipLevel = GetBaseMipLevel();
+
+        if (textarget == GL_TEXTURE_2D_ARRAY || textarget == GL_TEXTURE_3D) {
+            gl.FramebufferTextureLayer(target, attachment, handle, mipLevel, GetBaseArrayLayer());
+        } else {
+            gl.FramebufferTexture2D(target, attachment, textarget, handle, mipLevel);
+        }
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/TextureGL.h b/src/dawn/native/opengl/TextureGL.h
new file mode 100644
index 0000000..897022c
--- /dev/null
+++ b/src/dawn/native/opengl/TextureGL.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_TEXTUREGL_H_
+#define DAWNNATIVE_OPENGL_TEXTUREGL_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+    class Device;
+    struct GLFormat;
+
+    class Texture final : public TextureBase {
+      public:
+        Texture(Device* device, const TextureDescriptor* descriptor);
+        Texture(Device* device,
+                const TextureDescriptor* descriptor,
+                GLuint handle,
+                TextureState state);
+
+        GLuint GetHandle() const;
+        GLenum GetGLTarget() const;
+        const GLFormat& GetGLFormat() const;
+
+        void EnsureSubresourceContentInitialized(const SubresourceRange& range);
+
+      private:
+        ~Texture() override;
+
+        void DestroyImpl() override;
+        MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
+
+        GLuint mHandle;
+        GLenum mTarget;
+    };
+
+    class TextureView final : public TextureViewBase {
+      public:
+        TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+        GLuint GetHandle() const;
+        GLenum GetGLTarget() const;
+        void BindToFramebuffer(GLenum target, GLenum attachment);
+
+      private:
+        ~TextureView() override;
+        void DestroyImpl() override;
+
+        GLuint mHandle;
+        GLenum mTarget;
+        bool mOwnsHandle;
+    };
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_TEXTUREGL_H_
diff --git a/src/dawn/native/opengl/UtilsGL.cpp b/src/dawn/native/opengl/UtilsGL.cpp
new file mode 100644
index 0000000..746f93b
--- /dev/null
+++ b/src/dawn/native/opengl/UtilsGL.cpp
@@ -0,0 +1,153 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/UtilsGL.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+namespace dawn::native::opengl {
+
+    GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
+        switch (compareFunction) {
+            case wgpu::CompareFunction::Never:
+                return GL_NEVER;
+            case wgpu::CompareFunction::Less:
+                return GL_LESS;
+            case wgpu::CompareFunction::LessEqual:
+                return GL_LEQUAL;
+            case wgpu::CompareFunction::Greater:
+                return GL_GREATER;
+            case wgpu::CompareFunction::GreaterEqual:
+                return GL_GEQUAL;
+            case wgpu::CompareFunction::NotEqual:
+                return GL_NOTEQUAL;
+            case wgpu::CompareFunction::Equal:
+                return GL_EQUAL;
+            case wgpu::CompareFunction::Always:
+                return GL_ALWAYS;
+
+            case wgpu::CompareFunction::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
+        switch (depthStencilFormat) {
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+            case wgpu::TextureFormat::Stencil8:
+                return 0xFF;
+
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    void CopyImageSubData(const OpenGLFunctions& gl,
+                          Aspect srcAspects,
+                          GLuint srcHandle,
+                          GLenum srcTarget,
+                          GLint srcLevel,
+                          const Origin3D& src,
+                          GLuint dstHandle,
+                          GLenum dstTarget,
+                          GLint dstLevel,
+                          const Origin3D& dst,
+                          const Extent3D& size) {
+        if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
+            gl.CopyImageSubData(srcHandle, srcTarget, srcLevel, src.x, src.y, src.z, dstHandle,
+                                dstTarget, dstLevel, dst.x, dst.y, dst.z, size.width, size.height,
+                                size.depthOrArrayLayers);
+            return;
+        }
+
+        GLint prevReadFBO = 0, prevDrawFBO = 0;
+        gl.GetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &prevReadFBO);
+        gl.GetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &prevDrawFBO);
+
+        // Generate temporary framebuffers for the blits.
+        GLuint readFBO = 0, drawFBO = 0;
+        gl.GenFramebuffers(1, &readFBO);
+        gl.GenFramebuffers(1, &drawFBO);
+        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
+
+        // Reset state that may affect glBlitFramebuffer().
+        gl.Disable(GL_SCISSOR_TEST);
+        GLenum blitMask = 0;
+        if (srcAspects & Aspect::Color) {
+            blitMask |= GL_COLOR_BUFFER_BIT;
+        }
+        if (srcAspects & Aspect::Depth) {
+            blitMask |= GL_DEPTH_BUFFER_BIT;
+        }
+        if (srcAspects & Aspect::Stencil) {
+            blitMask |= GL_STENCIL_BUFFER_BIT;
+        }
+
+        // Iterate over all layers, doing a single blit for each.
+        for (uint32_t layer = 0; layer < size.depthOrArrayLayers; ++layer) {
+            // Set attachments for all aspects.
+            for (Aspect aspect : IterateEnumMask(srcAspects)) {
+                GLenum glAttachment;
+                switch (aspect) {
+                    case Aspect::Color:
+                        glAttachment = GL_COLOR_ATTACHMENT0;
+                        break;
+                    case Aspect::Depth:
+                        glAttachment = GL_DEPTH_ATTACHMENT;
+                        break;
+                    case Aspect::Stencil:
+                        glAttachment = GL_STENCIL_ATTACHMENT;
+                        break;
+                    case Aspect::CombinedDepthStencil:
+                    case Aspect::None:
+                    case Aspect::Plane0:
+                    case Aspect::Plane1:
+                        UNREACHABLE();
+                }
+                if (srcTarget == GL_TEXTURE_2D) {
+                    gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, srcTarget, srcHandle,
+                                            srcLevel);
+                } else {
+                    gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment, srcHandle,
+                                               srcLevel, src.z + layer);
+                }
+                if (dstTarget == GL_TEXTURE_2D) {
+                    gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, dstTarget, dstHandle,
+                                            dstLevel);
+                } else if (dstTarget == GL_TEXTURE_CUBE_MAP) {
+                    GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
+                    gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, dstHandle,
+                                            dstLevel);
+                } else {
+                    gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, dstHandle,
+                                               dstLevel, dst.z + layer);
+                }
+            }
+            gl.BlitFramebuffer(src.x, src.y, src.x + size.width, src.y + size.height, dst.x, dst.y,
+                               dst.x + size.width, dst.y + size.height, blitMask, GL_NEAREST);
+        }
+        gl.Enable(GL_SCISSOR_TEST);
+        gl.DeleteFramebuffers(1, &readFBO);
+        gl.DeleteFramebuffers(1, &drawFBO);
+        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, prevReadFBO);
+        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, prevDrawFBO);
+    }
+
+}  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/UtilsGL.h b/src/dawn/native/opengl/UtilsGL.h
new file mode 100644
index 0000000..0abaff7
--- /dev/null
+++ b/src/dawn/native/opengl/UtilsGL.h
@@ -0,0 +1,41 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_UTILSGL_H_
+#define DAWNNATIVE_OPENGL_UTILSGL_H_
+
+#include "dawn/native/Format.h"
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+    struct OpenGLFunctions;
+
+    GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
+    GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
+    void CopyImageSubData(const OpenGLFunctions& gl,
+                          Aspect srcAspects,
+                          GLuint srcHandle,
+                          GLenum srcTarget,
+                          GLint srcLevel,
+                          const Origin3D& src,
+                          GLuint dstHandle,
+                          GLenum dstTarget,
+                          GLint dstLevel,
+                          const Origin3D& dst,
+                          const Extent3D& size);
+
+}  // namespace dawn::native::opengl
+
+#endif  // DAWNNATIVE_OPENGL_UTILSGL_H_
diff --git a/src/dawn/native/opengl/opengl_platform.h b/src/dawn/native/opengl/opengl_platform.h
new file mode 100644
index 0000000..04d9126
--- /dev/null
+++ b/src/dawn/native/opengl/opengl_platform.h
@@ -0,0 +1,15 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/opengl_platform_autogen.h"
diff --git a/src/dawn/native/opengl/supported_extensions.json b/src/dawn/native/opengl/supported_extensions.json
new file mode 100644
index 0000000..8e00633
--- /dev/null
+++ b/src/dawn/native/opengl/supported_extensions.json
@@ -0,0 +1,23 @@
+{
+    "_comment": [
+        "Copyright 2019 The Dawn Authors",
+        "",
+        "Licensed under the Apache License, Version 2.0 (the \"License\");",
+        "you may not use this file except in compliance with the License.",
+        "You may obtain a copy of the License at",
+        "",
+        "    http://www.apache.org/licenses/LICENSE-2.0",
+        "",
+        "Unless required by applicable law or agreed to in writing, software",
+        "distributed under the License is distributed on an \"AS IS\" BASIS,",
+        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
+        "See the License for the specific language governing permissions and",
+        "limitations under the License."
+    ],
+
+    "supported_extensions": [
+        "GL_EXT_texture_compression_s3tc",
+        "GL_EXT_texture_compression_s3tc_srgb",
+        "GL_OES_EGL_image"
+    ]
+}
diff --git a/src/dawn/native/utils/WGPUHelpers.cpp b/src/dawn/native/utils/WGPUHelpers.cpp
new file mode 100644
index 0000000..a7ab910
--- /dev/null
+++ b/src/dawn/native/utils/WGPUHelpers.cpp
@@ -0,0 +1,192 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/ShaderModule.h"
+
+#include <cstring>
+#include <iomanip>
+#include <limits>
+#include <mutex>
+#include <sstream>
+
+namespace dawn::native::utils {
+
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device,
+                                                            const char* source) {
+        ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = source;
+        ShaderModuleDescriptor descriptor;
+        descriptor.nextInChain = &wgslDesc;
+        return device->CreateShaderModule(&descriptor);
+    }
+
+    ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+                                                        wgpu::BufferUsage usage,
+                                                        const void* data,
+                                                        uint64_t size) {
+        BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
+        descriptor.mappedAtCreation = true;
+        Ref<BufferBase> buffer;
+        DAWN_TRY_ASSIGN(buffer, device->CreateBuffer(&descriptor));
+        memcpy(buffer->GetMappedRange(0, size), data, size);
+        buffer->Unmap();
+        return buffer;
+    }
+
+    ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+        DeviceBase* device,
+        const Ref<BindGroupLayoutBase>& bindGroupLayout) {
+        PipelineLayoutDescriptor descriptor;
+        descriptor.bindGroupLayoutCount = 1;
+        BindGroupLayoutBase* bgl = bindGroupLayout.Get();
+        descriptor.bindGroupLayouts = &bgl;
+        return device->CreatePipelineLayout(&descriptor);
+    }
+
+    ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+        DeviceBase* device,
+        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+        bool allowInternalBinding) {
+        std::vector<BindGroupLayoutEntry> entries;
+        for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+            entries.push_back(entry);
+        }
+
+        BindGroupLayoutDescriptor descriptor;
+        descriptor.entryCount = static_cast<uint32_t>(entries.size());
+        descriptor.entries = entries.data();
+        return device->CreateBindGroupLayout(&descriptor, allowInternalBinding);
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::BufferBindingType bufferType,
+        bool bufferHasDynamicOffset,
+        uint64_t bufferMinBindingSize) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        buffer.type = bufferType;
+        buffer.hasDynamicOffset = bufferHasDynamicOffset;
+        buffer.minBindingSize = bufferMinBindingSize;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::SamplerBindingType samplerType) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        sampler.type = samplerType;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::TextureSampleType textureSampleType,
+        wgpu::TextureViewDimension textureViewDimension,
+        bool textureMultisampled) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        texture.sampleType = textureSampleType;
+        texture.viewDimension = textureViewDimension;
+        texture.multisampled = textureMultisampled;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::StorageTextureAccess storageTextureAccess,
+        wgpu::TextureFormat format,
+        wgpu::TextureViewDimension textureViewDimension) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        storageTexture.access = storageTextureAccess;
+        storageTexture.format = format;
+        storageTexture.viewDimension = textureViewDimension;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        const BindGroupLayoutEntry& entry)
+        : BindGroupLayoutEntry(entry) {
+    }
+
+    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                             const Ref<SamplerBase>& sampler)
+        : binding(binding), sampler(sampler) {
+    }
+
+    BindingInitializationHelper::BindingInitializationHelper(
+        uint32_t binding,
+        const Ref<TextureViewBase>& textureView)
+        : binding(binding), textureView(textureView) {
+    }
+
+    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                             const Ref<BufferBase>& buffer,
+                                                             uint64_t offset,
+                                                             uint64_t size)
+        : binding(binding), buffer(buffer), offset(offset), size(size) {
+    }
+
+    BindingInitializationHelper::~BindingInitializationHelper() = default;
+
+    BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+        BindGroupEntry result;
+
+        result.binding = binding;
+        result.sampler = sampler.Get();
+        result.textureView = textureView.Get();
+        result.buffer = buffer.Get();
+        result.offset = offset;
+        result.size = size;
+
+        return result;
+    }
+
+    ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+        DeviceBase* device,
+        const Ref<BindGroupLayoutBase>& layout,
+        std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+        std::vector<BindGroupEntry> entries;
+        for (const BindingInitializationHelper& helper : entriesInitializer) {
+            entries.push_back(helper.GetAsBinding());
+        }
+
+        BindGroupDescriptor descriptor;
+        descriptor.layout = layout.Get();
+        descriptor.entryCount = entries.size();
+        descriptor.entries = entries.data();
+
+        return device->CreateBindGroup(&descriptor);
+    }
+
+    const char* GetLabelForTrace(const char* label) {
+        return (label == nullptr || strlen(label) == 0) ? "None" : label;
+    }
+
+}  // namespace dawn::native::utils
diff --git a/src/dawn/native/utils/WGPUHelpers.h b/src/dawn/native/utils/WGPUHelpers.h
new file mode 100644
index 0000000..6e1fad2
--- /dev/null
+++ b/src/dawn/native/utils/WGPUHelpers.h
@@ -0,0 +1,123 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_UTILS_WGPUHELPERS_H_
+#define DAWNNATIVE_UTILS_WGPUHELPERS_H_
+
+#include <dawn/native/dawn_platform.h>
+
+#include <array>
+#include <initializer_list>
+#include <vector>
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::utils {
+
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source);
+
+    ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+                                                        wgpu::BufferUsage usage,
+                                                        const void* data,
+                                                        uint64_t size);
+
+    template <typename T>
+    ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+                                                        wgpu::BufferUsage usage,
+                                                        std::initializer_list<T> data) {
+        return CreateBufferFromData(device, usage, data.begin(), uint32_t(sizeof(T) * data.size()));
+    }
+
+    ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+        DeviceBase* device,
+        const Ref<BindGroupLayoutBase>& bindGroupLayout);
+
+    // Helpers to make creating bind group layouts look nicer:
+    //
+    //   utils::MakeBindGroupLayout(device, {
+    //       {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+    //       {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+    //       {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+    //   });
+
+    struct BindingLayoutEntryInitializationHelper : BindGroupLayoutEntry {
+        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                               wgpu::ShaderStage entryVisibility,
+                                               wgpu::BufferBindingType bufferType,
+                                               bool bufferHasDynamicOffset = false,
+                                               uint64_t bufferMinBindingSize = 0);
+        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                               wgpu::ShaderStage entryVisibility,
+                                               wgpu::SamplerBindingType samplerType);
+        BindingLayoutEntryInitializationHelper(
+            uint32_t entryBinding,
+            wgpu::ShaderStage entryVisibility,
+            wgpu::TextureSampleType textureSampleType,
+            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+            bool textureMultisampled = false);
+        BindingLayoutEntryInitializationHelper(
+            uint32_t entryBinding,
+            wgpu::ShaderStage entryVisibility,
+            wgpu::StorageTextureAccess storageTextureAccess,
+            wgpu::TextureFormat format,
+            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+
+        BindingLayoutEntryInitializationHelper(const BindGroupLayoutEntry& entry);
+    };
+
+    ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+        DeviceBase* device,
+        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+        bool allowInternalBinding = false);
+
+    // Helpers to make creating bind groups look nicer:
+    //
+    //   utils::MakeBindGroup(device, layout, {
+    //       {0, mySampler},
+    //       {1, myBuffer, offset, size},
+    //       {3, myTextureView}
+    //   });
+
+    // Structure with one constructor per-type of bindings, so that the initializer_list accepts
+    // bindings with the right type and no extra information.
+    struct BindingInitializationHelper {
+        BindingInitializationHelper(uint32_t binding, const Ref<SamplerBase>& sampler);
+        BindingInitializationHelper(uint32_t binding, const Ref<TextureViewBase>& textureView);
+        BindingInitializationHelper(uint32_t binding,
+                                    const Ref<BufferBase>& buffer,
+                                    uint64_t offset = 0,
+                                    uint64_t size = wgpu::kWholeSize);
+        ~BindingInitializationHelper();
+
+        BindGroupEntry GetAsBinding() const;
+
+        uint32_t binding;
+        Ref<SamplerBase> sampler;
+        Ref<TextureViewBase> textureView;
+        Ref<BufferBase> buffer;
+        uint64_t offset = 0;
+        uint64_t size = 0;
+    };
+
+    ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+        DeviceBase* device,
+        const Ref<BindGroupLayoutBase>& layout,
+        std::initializer_list<BindingInitializationHelper> entriesInitializer);
+
+    const char* GetLabelForTrace(const char* label);
+
+}  // namespace dawn::native::utils
+
+#endif  // DAWNNATIVE_UTILS_WGPUHELPERS_H_
diff --git a/src/dawn/native/vulkan/AdapterVk.cpp b/src/dawn/native/vulkan/AdapterVk.cpp
new file mode 100644
index 0000000..5862bf8
--- /dev/null
+++ b/src/dawn/native/vulkan/AdapterVk.cpp
@@ -0,0 +1,353 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/AdapterVk.h"
+
+#include "dawn/native/Limits.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+
+#include "dawn/common/GPUInfo.h"
+
+namespace dawn::native::vulkan {
+
+    Adapter::Adapter(InstanceBase* instance,
+                     VulkanInstance* vulkanInstance,
+                     VkPhysicalDevice physicalDevice)
+        : AdapterBase(instance, wgpu::BackendType::Vulkan),
+          mPhysicalDevice(physicalDevice),
+          mVulkanInstance(vulkanInstance) {
+    }
+
+    const VulkanDeviceInfo& Adapter::GetDeviceInfo() const {
+        return mDeviceInfo;
+    }
+
+    VkPhysicalDevice Adapter::GetPhysicalDevice() const {
+        return mPhysicalDevice;
+    }
+
+    VulkanInstance* Adapter::GetVulkanInstance() const {
+        return mVulkanInstance.Get();
+    }
+
+    bool Adapter::IsDepthStencilFormatSupported(VkFormat format) {
+        ASSERT(format == VK_FORMAT_D16_UNORM_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT ||
+               format == VK_FORMAT_D32_SFLOAT_S8_UINT || format == VK_FORMAT_S8_UINT);
+
+        VkFormatProperties properties;
+        mVulkanInstance->GetFunctions().GetPhysicalDeviceFormatProperties(mPhysicalDevice, format,
+                                                                          &properties);
+        return properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+    }
+
+    MaybeError Adapter::InitializeImpl() {
+        DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+
+        if (mDeviceInfo.HasExt(DeviceExt::DriverProperties)) {
+            mDriverDescription = mDeviceInfo.driverProperties.driverName;
+            if (mDeviceInfo.driverProperties.driverInfo[0] != '\0') {
+                mDriverDescription += std::string(": ") + mDeviceInfo.driverProperties.driverInfo;
+            }
+        } else {
+            mDriverDescription =
+                "Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
+        }
+
+        mDeviceId = mDeviceInfo.properties.deviceID;
+        mVendorId = mDeviceInfo.properties.vendorID;
+        mName = mDeviceInfo.properties.deviceName;
+
+        switch (mDeviceInfo.properties.deviceType) {
+            case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
+                mAdapterType = wgpu::AdapterType::IntegratedGPU;
+                break;
+            case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
+                mAdapterType = wgpu::AdapterType::DiscreteGPU;
+                break;
+            case VK_PHYSICAL_DEVICE_TYPE_CPU:
+                mAdapterType = wgpu::AdapterType::CPU;
+                break;
+            default:
+                mAdapterType = wgpu::AdapterType::Unknown;
+                break;
+        }
+
+        return {};
+    }
+
+    MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+        // Needed for viewport Y-flip.
+        if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
+            return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
+        }
+
+        // Needed for security
+        if (!mDeviceInfo.features.robustBufferAccess) {
+            return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
+        }
+
+        if (!mDeviceInfo.features.textureCompressionBC &&
+            !(mDeviceInfo.features.textureCompressionETC2 &&
+              mDeviceInfo.features.textureCompressionASTC_LDR)) {
+            return DAWN_INTERNAL_ERROR(
+                "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
+                "textureCompressionASTC required.");
+        }
+
+        // Needed for the respective WebGPU features.
+        if (!mDeviceInfo.features.depthBiasClamp) {
+            return DAWN_INTERNAL_ERROR("Vulkan depthBiasClamp feature required.");
+        }
+        if (!mDeviceInfo.features.fragmentStoresAndAtomics) {
+            return DAWN_INTERNAL_ERROR("Vulkan fragmentStoresAndAtomics feature required.");
+        }
+        if (!mDeviceInfo.features.fullDrawIndexUint32) {
+            return DAWN_INTERNAL_ERROR("Vulkan fullDrawIndexUint32 feature required.");
+        }
+        if (!mDeviceInfo.features.imageCubeArray) {
+            return DAWN_INTERNAL_ERROR("Vulkan imageCubeArray feature required.");
+        }
+        if (!mDeviceInfo.features.independentBlend) {
+            return DAWN_INTERNAL_ERROR("Vulkan independentBlend feature required.");
+        }
+        if (!mDeviceInfo.features.sampleRateShading) {
+            return DAWN_INTERNAL_ERROR("Vulkan sampleRateShading feature required.");
+        }
+
+        // Initialize supported extensions
+        if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
+            mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+        }
+
+        if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
+            mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+        }
+
+        if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
+            mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
+        }
+
+        if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
+            mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+        }
+
+        if (mDeviceInfo.features.depthClamp == VK_TRUE) {
+            mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+        }
+
+        if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
+            mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+        }
+
+        if (IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT)) {
+            mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+        }
+
+        if (IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT)) {
+            mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+        }
+
+#if defined(DAWN_USE_SYNC_FDS)
+        // TODO(chromium:1258986): Precisely enable the feature by querying the device's format
+        // features.
+        mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+#endif
+
+        return {};
+    }
+
+    MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+        GetDefaultLimits(&limits->v1);
+        CombinedLimits baseLimits = *limits;
+
+        const VkPhysicalDeviceLimits& vkLimits = mDeviceInfo.properties.limits;
+
+#define CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, compareOp, msgSegment)   \
+    do {                                                                             \
+        if (vkLimits.vulkanName compareOp baseLimits.v1.webgpuName) {                \
+            return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for " #webgpuName \
+                                       "."                                           \
+                                       " VkPhysicalDeviceLimits::" #vulkanName       \
+                                       " must be at " msgSegment " " +               \
+                                       std::to_string(baseLimits.v1.webgpuName));    \
+        }                                                                            \
+        limits->v1.webgpuName = vkLimits.vulkanName;                                 \
+    } while (false)
+
+#define CHECK_AND_SET_V1_MAX_LIMIT(vulkanName, webgpuName) \
+    CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, <, "least")
+#define CHECK_AND_SET_V1_MIN_LIMIT(vulkanName, webgpuName) \
+    CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, >, "most")
+
+        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension1D, maxTextureDimension1D);
+
+        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension2D, maxTextureDimension2D);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimensionCube, maxTextureDimension2D);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferWidth, maxTextureDimension2D);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferHeight, maxTextureDimension2D);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[0], maxTextureDimension2D);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[1], maxTextureDimension2D);
+        CHECK_AND_SET_V1_MAX_LIMIT(viewportBoundsRange[1], maxTextureDimension2D);
+        limits->v1.maxTextureDimension2D = std::min({
+            static_cast<uint32_t>(vkLimits.maxImageDimension2D),
+            static_cast<uint32_t>(vkLimits.maxImageDimensionCube),
+            static_cast<uint32_t>(vkLimits.maxFramebufferWidth),
+            static_cast<uint32_t>(vkLimits.maxFramebufferHeight),
+            static_cast<uint32_t>(vkLimits.maxViewportDimensions[0]),
+            static_cast<uint32_t>(vkLimits.maxViewportDimensions[1]),
+            static_cast<uint32_t>(vkLimits.viewportBoundsRange[1]),
+        });
+
+        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension3D, maxTextureDimension3D);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxImageArrayLayers, maxTextureArrayLayers);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxBoundDescriptorSets, maxBindGroups);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetUniformBuffersDynamic,
+                                   maxDynamicUniformBuffersPerPipelineLayout);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetStorageBuffersDynamic,
+                                   maxDynamicStorageBuffersPerPipelineLayout);
+
+        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSampledImages,
+                                   maxSampledTexturesPerShaderStage);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSamplers, maxSamplersPerShaderStage);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageBuffers,
+                                   maxStorageBuffersPerShaderStage);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageImages,
+                                   maxStorageTexturesPerShaderStage);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorUniformBuffers,
+                                   maxUniformBuffersPerShaderStage);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxUniformBufferRange, maxUniformBufferBindingSize);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxStorageBufferRange, maxStorageBufferBindingSize);
+
+        CHECK_AND_SET_V1_MIN_LIMIT(minUniformBufferOffsetAlignment,
+                                   minUniformBufferOffsetAlignment);
+        CHECK_AND_SET_V1_MIN_LIMIT(minStorageBufferOffsetAlignment,
+                                   minStorageBufferOffsetAlignment);
+
+        CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputBindings, maxVertexBuffers);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputAttributes, maxVertexAttributes);
+
+        if (vkLimits.maxVertexInputBindingStride < baseLimits.v1.maxVertexBufferArrayStride ||
+            vkLimits.maxVertexInputAttributeOffset < baseLimits.v1.maxVertexBufferArrayStride - 1) {
+            return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
+        }
+        limits->v1.maxVertexBufferArrayStride = std::min(
+            vkLimits.maxVertexInputBindingStride, vkLimits.maxVertexInputAttributeOffset + 1);
+
+        if (vkLimits.maxVertexOutputComponents < baseLimits.v1.maxInterStageShaderComponents ||
+            vkLimits.maxFragmentInputComponents < baseLimits.v1.maxInterStageShaderComponents) {
+            return DAWN_INTERNAL_ERROR(
+                "Insufficient Vulkan limits for maxInterStageShaderComponents");
+        }
+        limits->v1.maxInterStageShaderComponents =
+            std::min(vkLimits.maxVertexOutputComponents, vkLimits.maxFragmentInputComponents);
+
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeSharedMemorySize, maxComputeWorkgroupStorageSize);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupInvocations,
+                                   maxComputeInvocationsPerWorkgroup);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[0], maxComputeWorkgroupSizeX);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[1], maxComputeWorkgroupSizeY);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[2], maxComputeWorkgroupSizeZ);
+
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[0], maxComputeWorkgroupsPerDimension);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[1], maxComputeWorkgroupsPerDimension);
+        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[2], maxComputeWorkgroupsPerDimension);
+        limits->v1.maxComputeWorkgroupsPerDimension = std::min({
+            vkLimits.maxComputeWorkGroupCount[0],
+            vkLimits.maxComputeWorkGroupCount[1],
+            vkLimits.maxComputeWorkGroupCount[2],
+        });
+
+        if (vkLimits.maxColorAttachments < kMaxColorAttachments) {
+            return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
+        }
+        if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+                      vkLimits.framebufferColorSampleCounts)) {
+            return DAWN_INTERNAL_ERROR(
+                "Insufficient Vulkan limits for framebufferColorSampleCounts");
+        }
+        if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+                      vkLimits.framebufferDepthSampleCounts)) {
+            return DAWN_INTERNAL_ERROR(
+                "Insufficient Vulkan limits for framebufferDepthSampleCounts");
+        }
+
+        // Only check maxFragmentCombinedOutputResources on mobile GPUs. Desktop GPUs drivers seem
+        // to put incorrect values for this limit with things like 8 or 16 when they can do bindless
+        // storage buffers. Mesa llvmpipe driver also puts 8 here.
+        uint32_t vendorId = mDeviceInfo.properties.vendorID;
+        if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) &&
+            !gpu_info::IsMesa(vendorId) && !gpu_info::IsNvidia(vendorId)) {
+            if (vkLimits.maxFragmentCombinedOutputResources <
+                kMaxColorAttachments + baseLimits.v1.maxStorageTexturesPerShaderStage +
+                    baseLimits.v1.maxStorageBuffersPerShaderStage) {
+                return DAWN_INTERNAL_ERROR(
+                    "Insufficient Vulkan maxFragmentCombinedOutputResources limit");
+            }
+
+            uint32_t maxFragmentCombinedOutputResources =
+                kMaxColorAttachments + limits->v1.maxStorageTexturesPerShaderStage +
+                limits->v1.maxStorageBuffersPerShaderStage;
+
+            if (maxFragmentCombinedOutputResources > vkLimits.maxFragmentCombinedOutputResources) {
+                // WebGPU's maxFragmentCombinedOutputResources exceeds the Vulkan limit.
+                // Decrease |maxStorageTexturesPerShaderStage| and |maxStorageBuffersPerShaderStage|
+                // to fit within the Vulkan limit.
+                uint32_t countOverLimit = maxFragmentCombinedOutputResources -
+                                          vkLimits.maxFragmentCombinedOutputResources;
+
+                uint32_t maxStorageTexturesOverBase =
+                    limits->v1.maxStorageTexturesPerShaderStage -
+                    baseLimits.v1.maxStorageTexturesPerShaderStage;
+                uint32_t maxStorageBuffersOverBase = limits->v1.maxStorageBuffersPerShaderStage -
+                                                     baseLimits.v1.maxStorageBuffersPerShaderStage;
+
+                // Reduce the number of resources by half the overage count, but clamp to
+                // to ensure we don't go below the base limits.
+                uint32_t numFewerStorageTextures =
+                    std::min(countOverLimit / 2, maxStorageTexturesOverBase);
+                uint32_t numFewerStorageBuffers =
+                    std::min((countOverLimit + 1) / 2, maxStorageBuffersOverBase);
+
+                if (numFewerStorageTextures == maxStorageTexturesOverBase) {
+                    // If |numFewerStorageTextures| was clamped, subtract the remaining
+                    // from the storage buffers.
+                    numFewerStorageBuffers = countOverLimit - numFewerStorageTextures;
+                    ASSERT(numFewerStorageBuffers <= maxStorageBuffersOverBase);
+                } else if (numFewerStorageBuffers == maxStorageBuffersOverBase) {
+                    // If |numFewerStorageBuffers| was clamped, subtract the remaining
+                    // from the storage textures.
+                    numFewerStorageTextures = countOverLimit - numFewerStorageBuffers;
+                    ASSERT(numFewerStorageTextures <= maxStorageTexturesOverBase);
+                }
+                limits->v1.maxStorageTexturesPerShaderStage -= numFewerStorageTextures;
+                limits->v1.maxStorageBuffersPerShaderStage -= numFewerStorageBuffers;
+            }
+        }
+
+        return {};
+    }
+
+    bool Adapter::SupportsExternalImages() const {
+        // Via dawn::native::vulkan::WrapVulkanImage
+        return external_memory::Service::CheckSupport(mDeviceInfo) &&
+               external_semaphore::Service::CheckSupport(mDeviceInfo, mPhysicalDevice,
+                                                         mVulkanInstance->GetFunctions());
+    }
+
+    ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+        return Device::Create(this, descriptor);
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/AdapterVk.h b/src/dawn/native/vulkan/AdapterVk.h
new file mode 100644
index 0000000..2f3948e
--- /dev/null
+++ b/src/dawn/native/vulkan/AdapterVk.h
@@ -0,0 +1,59 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_ADAPTERVK_H_
+#define DAWNNATIVE_VULKAN_ADAPTERVK_H_
+
+#include "dawn/native/Adapter.h"
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+
+    class VulkanInstance;
+
+    class Adapter : public AdapterBase {
+      public:
+        Adapter(InstanceBase* instance,
+                VulkanInstance* vulkanInstance,
+                VkPhysicalDevice physicalDevice);
+        ~Adapter() override = default;
+
+        // AdapterBase Implementation
+        bool SupportsExternalImages() const override;
+
+        const VulkanDeviceInfo& GetDeviceInfo() const;
+        VkPhysicalDevice GetPhysicalDevice() const;
+        VulkanInstance* GetVulkanInstance() const;
+
+        bool IsDepthStencilFormatSupported(VkFormat format);
+
+      private:
+        MaybeError InitializeImpl() override;
+        MaybeError InitializeSupportedFeaturesImpl() override;
+        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+
+        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+            const DeviceDescriptor* descriptor) override;
+
+        VkPhysicalDevice mPhysicalDevice;
+        Ref<VulkanInstance> mVulkanInstance;
+        VulkanDeviceInfo mDeviceInfo = {};
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_ADAPTERVK_H_
diff --git a/src/dawn/native/vulkan/BackendVk.cpp b/src/dawn/native/vulkan/BackendVk.cpp
new file mode 100644
index 0000000..b8307a5
--- /dev/null
+++ b/src/dawn/native/vulkan/BackendVk.cpp
@@ -0,0 +1,447 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BackendVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+// TODO(crbug.com/dawn/283): Link against the Vulkan Loader and remove this.
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+#    if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_FUSCHIA)
+constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.so";
+#    elif defined(DAWN_PLATFORM_WINDOWS)
+constexpr char kSwiftshaderLibName[] = "vk_swiftshader.dll";
+#    elif defined(DAWN_PLATFORM_MACOS)
+constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.dylib";
+#    else
+#        error "Unimplemented Swiftshader Vulkan backend platform"
+#    endif
+#endif
+
+#if defined(DAWN_PLATFORM_LINUX)
+#    if defined(DAWN_PLATFORM_ANDROID)
+constexpr char kVulkanLibName[] = "libvulkan.so";
+#    else
+constexpr char kVulkanLibName[] = "libvulkan.so.1";
+#    endif
+#elif defined(DAWN_PLATFORM_WINDOWS)
+constexpr char kVulkanLibName[] = "vulkan-1.dll";
+#elif defined(DAWN_PLATFORM_MACOS)
+constexpr char kVulkanLibName[] = "libvulkan.dylib";
+#elif defined(DAWN_PLATFORM_FUCHSIA)
+constexpr char kVulkanLibName[] = "libvulkan.so";
+#else
+#    error "Unimplemented Vulkan backend platform"
+#endif
+
+struct SkippedMessage {
+    const char* messageId;
+    const char* messageContents;
+};
+
+// Array of Validation error/warning messages that will be ignored, should include bugID
+constexpr SkippedMessage kSkippedMessages[] = {
+    // These errors are generated when simultaneously using a read-only depth/stencil attachment as
+    // a texture binding. This is valid Vulkan.
+    // The substring matching matches both
+    // VK_PIPELINE_STAGE_2_NONE and VK_PIPELINE_STAGE_2_NONE_KHR.
+    //
+    // When storeOp=NONE is not present, Dawn uses storeOp=STORE, but Vulkan validation layer
+    // considers the image read-only and produces a hazard. Dawn can't rely on storeOp=NONE and
+    // so this is not expected to be worked around.
+    // See http://crbug.com/dawn/1225 for more details.
+    {"SYNC-HAZARD-WRITE_AFTER_READ",
+     "depth aspect during store with storeOp VK_ATTACHMENT_STORE_OP_STORE. Access info (usage: "
+     "SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, prior_usage: "
+     "SYNC_FRAGMENT_SHADER_SHADER_STORAGE_READ, read_barriers: VK_PIPELINE_STAGE_2_NONE"},
+
+    {"SYNC-HAZARD-WRITE_AFTER_READ",
+     "stencil aspect during store with stencilStoreOp VK_ATTACHMENT_STORE_OP_STORE. Access info "
+     "(usage: SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, prior_usage: "
+     "SYNC_FRAGMENT_SHADER_SHADER_STORAGE_READ, read_barriers: VK_PIPELINE_STAGE_2_NONE"},
+
+    // http://crbug.com/1310052
+    {"", "VUID-vkCmdDraw-None-06538"},
+};
+
+namespace dawn::native::vulkan {
+
+    namespace {
+
+        static constexpr ICD kICDs[] = {
+            ICD::None,
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+            ICD::SwiftShader,
+#endif  // defined(DAWN_ENABLE_SWIFTSHADER)
+        };
+
+        // Suppress validation errors that are known. Returns false in that case.
+        bool ShouldReportDebugMessage(const char* messageId, const char* message) {
+            for (const SkippedMessage& msg : kSkippedMessages) {
+                if (strstr(messageId, msg.messageId) != nullptr &&
+                    strstr(message, msg.messageContents) != nullptr) {
+                    return false;
+                }
+            }
+            return true;
+        }
+
+        VKAPI_ATTR VkBool32 VKAPI_CALL
+        OnDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+                             VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+                             const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+                             void* /* pUserData */) {
+            if (ShouldReportDebugMessage(pCallbackData->pMessageIdName, pCallbackData->pMessage)) {
+                dawn::WarningLog() << pCallbackData->pMessage;
+                ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
+            }
+            return VK_FALSE;
+        }
+
+        // A debug callback specifically for instance creation so that we don't fire an ASSERT when
+        // the instance fails creation in an expected manner (for example the system not having
+        // Vulkan drivers).
+        VKAPI_ATTR VkBool32 VKAPI_CALL OnInstanceCreationDebugUtilsCallback(
+            VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+            VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+            const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+            void* /* pUserData */) {
+            dawn::WarningLog() << pCallbackData->pMessage;
+            return VK_FALSE;
+        }
+
+    }  // anonymous namespace
+
+    VulkanInstance::VulkanInstance() = default;
+
+    VulkanInstance::~VulkanInstance() {
+        if (mDebugUtilsMessenger != VK_NULL_HANDLE) {
+            mFunctions.DestroyDebugUtilsMessengerEXT(mInstance, mDebugUtilsMessenger, nullptr);
+            mDebugUtilsMessenger = VK_NULL_HANDLE;
+        }
+
+        // VkPhysicalDevices are destroyed when the VkInstance is destroyed
+        if (mInstance != VK_NULL_HANDLE) {
+            mFunctions.DestroyInstance(mInstance, nullptr);
+            mInstance = VK_NULL_HANDLE;
+        }
+    }
+
+    const VulkanFunctions& VulkanInstance::GetFunctions() const {
+        return mFunctions;
+    }
+
+    VkInstance VulkanInstance::GetVkInstance() const {
+        return mInstance;
+    }
+
+    const VulkanGlobalInfo& VulkanInstance::GetGlobalInfo() const {
+        return mGlobalInfo;
+    }
+
+    const std::vector<VkPhysicalDevice>& VulkanInstance::GetPhysicalDevices() const {
+        return mPhysicalDevices;
+    }
+
+    // static
+    ResultOrError<Ref<VulkanInstance>> VulkanInstance::Create(const InstanceBase* instance,
+                                                              ICD icd) {
+        Ref<VulkanInstance> vulkanInstance = AcquireRef(new VulkanInstance());
+        DAWN_TRY(vulkanInstance->Initialize(instance, icd));
+        return std::move(vulkanInstance);
+    }
+
+    MaybeError VulkanInstance::Initialize(const InstanceBase* instance, ICD icd) {
+        // These environment variables need only be set while loading procs and gathering device
+        // info.
+        ScopedEnvironmentVar vkICDFilenames;
+        ScopedEnvironmentVar vkLayerPath;
+
+        const std::vector<std::string>& searchPaths = instance->GetRuntimeSearchPaths();
+
+        auto CommaSeparatedResolvedSearchPaths = [&](const char* name) {
+            std::string list;
+            bool first = true;
+            for (const std::string& path : searchPaths) {
+                if (!first) {
+                    list += ", ";
+                }
+                first = false;
+                list += (path + name);
+            }
+            return list;
+        };
+
+        auto LoadVulkan = [&](const char* libName) -> MaybeError {
+            for (const std::string& path : searchPaths) {
+                std::string resolvedPath = path + libName;
+                if (mVulkanLib.Open(resolvedPath)) {
+                    return {};
+                }
+            }
+            return DAWN_FORMAT_INTERNAL_ERROR("Couldn't load Vulkan. Searched %s.",
+                                              CommaSeparatedResolvedSearchPaths(libName));
+        };
+
+        switch (icd) {
+            case ICD::None: {
+                DAWN_TRY(LoadVulkan(kVulkanLibName));
+                // Succesfully loaded driver; break.
+                break;
+            }
+            case ICD::SwiftShader: {
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+                DAWN_TRY(LoadVulkan(kSwiftshaderLibName));
+                break;
+#endif  // defined(DAWN_ENABLE_SWIFTSHADER)
+        // ICD::SwiftShader should not be passed if SwiftShader is not enabled.
+                UNREACHABLE();
+            }
+        }
+
+        if (instance->IsBackendValidationEnabled()) {
+#if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
+            auto execDir = GetExecutableDirectory();
+            std::string vkDataDir = execDir.value_or("") + DAWN_VK_DATA_DIR;
+            if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
+                return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
+            }
+#else
+            dawn::WarningLog() << "Backend validation enabled but Dawn was not built with "
+                                  "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS.";
+#endif
+        }
+
+        DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
+
+        DAWN_TRY_ASSIGN(mGlobalInfo, GatherGlobalInfo(mFunctions));
+
+        VulkanGlobalKnobs usedGlobalKnobs = {};
+        DAWN_TRY_ASSIGN(usedGlobalKnobs, CreateVkInstance(instance));
+        *static_cast<VulkanGlobalKnobs*>(&mGlobalInfo) = usedGlobalKnobs;
+
+        DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
+
+        if (usedGlobalKnobs.HasExt(InstanceExt::DebugUtils)) {
+            DAWN_TRY(RegisterDebugUtils());
+        }
+
+        DAWN_TRY_ASSIGN(mPhysicalDevices, GatherPhysicalDevices(mInstance, mFunctions));
+
+        return {};
+    }
+
+    ResultOrError<VulkanGlobalKnobs> VulkanInstance::CreateVkInstance(
+        const InstanceBase* instance) {
+        VulkanGlobalKnobs usedKnobs = {};
+        std::vector<const char*> layerNames;
+        InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
+
+        auto UseLayerIfAvailable = [&](VulkanLayer layer) {
+            if (mGlobalInfo.layers[layer]) {
+                layerNames.push_back(GetVulkanLayerInfo(layer).name);
+                usedKnobs.layers.set(layer, true);
+                extensionsToRequest |= mGlobalInfo.layerExtensions[layer];
+            }
+        };
+
+        // vktrace works by instering a layer, but we hide it behind a macro because the vktrace
+        // layer crashes when used without vktrace server started. See this vktrace issue:
+        // https://github.com/LunarG/VulkanTools/issues/254
+        // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
+        // by other layers.
+#if defined(DAWN_USE_VKTRACE)
+        UseLayerIfAvailable(VulkanLayer::LunargVkTrace);
+#endif
+        // RenderDoc installs a layer at the system level for its capture but we don't want to use
+        // it unless we are debugging in RenderDoc so we hide it behind a macro.
+#if defined(DAWN_USE_RENDERDOC)
+        UseLayerIfAvailable(VulkanLayer::RenderDocCapture);
+#endif
+
+        if (instance->IsBackendValidationEnabled()) {
+            UseLayerIfAvailable(VulkanLayer::Validation);
+        }
+
+        // Always use the Fuchsia swapchain layer if available.
+        UseLayerIfAvailable(VulkanLayer::FuchsiaImagePipeSwapchain);
+
+        // Available and known instance extensions default to being requested, but some special
+        // cases are removed.
+        usedKnobs.extensions = extensionsToRequest;
+
+        std::vector<const char*> extensionNames;
+        for (InstanceExt ext : IterateBitSet(extensionsToRequest)) {
+            const InstanceExtInfo& info = GetInstanceExtInfo(ext);
+
+            if (info.versionPromoted > mGlobalInfo.apiVersion) {
+                extensionNames.push_back(info.name);
+            }
+        }
+
+        VkApplicationInfo appInfo;
+        appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
+        appInfo.pNext = nullptr;
+        appInfo.pApplicationName = nullptr;
+        appInfo.applicationVersion = 0;
+        appInfo.pEngineName = nullptr;
+        appInfo.engineVersion = 0;
+        // Vulkan 1.0 implementations were required to return VK_ERROR_INCOMPATIBLE_DRIVER if
+        // apiVersion was larger than 1.0. Meanwhile, as long as the instance supports at least
+        // Vulkan 1.1, an application can use different versions of Vulkan with an instance than
+        // it does with a device or physical device. So we should set apiVersion to Vulkan 1.0
+        // if the instance only supports Vulkan 1.0. Otherwise we set apiVersion to Vulkan 1.2,
+        // treat 1.2 as the highest API version dawn targets.
+        if (mGlobalInfo.apiVersion == VK_MAKE_VERSION(1, 0, 0)) {
+            appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0);
+        } else {
+            appInfo.apiVersion = VK_MAKE_VERSION(1, 2, 0);
+        }
+
+        VkInstanceCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.pApplicationInfo = &appInfo;
+        createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
+        createInfo.ppEnabledLayerNames = layerNames.data();
+        createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+        createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+        PNextChainBuilder createInfoChain(&createInfo);
+
+        // Register the debug callback for instance creation so we receive message for any errors
+        // (validation or other).
+        VkDebugUtilsMessengerCreateInfoEXT utilsMessengerCreateInfo;
+        if (usedKnobs.HasExt(InstanceExt::DebugUtils)) {
+            utilsMessengerCreateInfo.flags = 0;
+            utilsMessengerCreateInfo.messageSeverity =
+                VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+                VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+            utilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+                                                   VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+            utilsMessengerCreateInfo.pfnUserCallback = OnInstanceCreationDebugUtilsCallback;
+            utilsMessengerCreateInfo.pUserData = nullptr;
+
+            createInfoChain.Add(&utilsMessengerCreateInfo,
+                                VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
+        }
+
+        // Try to turn on synchronization validation if the instance was created with backend
+        // validation enabled.
+        VkValidationFeaturesEXT validationFeatures;
+        VkValidationFeatureEnableEXT kEnableSynchronizationValidation =
+            VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT;
+        if (instance->IsBackendValidationEnabled() &&
+            usedKnobs.HasExt(InstanceExt::ValidationFeatures)) {
+            validationFeatures.enabledValidationFeatureCount = 1;
+            validationFeatures.pEnabledValidationFeatures = &kEnableSynchronizationValidation;
+            validationFeatures.disabledValidationFeatureCount = 0;
+            validationFeatures.pDisabledValidationFeatures = nullptr;
+
+            createInfoChain.Add(&validationFeatures, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT);
+        }
+
+        DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
+                                "vkCreateInstance"));
+
+        return usedKnobs;
+    }
+
+    MaybeError VulkanInstance::RegisterDebugUtils() {
+        VkDebugUtilsMessengerCreateInfoEXT createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+                                     VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+        createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+                                 VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+        createInfo.pfnUserCallback = OnDebugUtilsCallback;
+        createInfo.pUserData = nullptr;
+
+        return CheckVkSuccess(mFunctions.CreateDebugUtilsMessengerEXT(
+                                  mInstance, &createInfo, nullptr, &*mDebugUtilsMessenger),
+                              "vkCreateDebugUtilsMessengerEXT");
+    }
+
+    Backend::Backend(InstanceBase* instance)
+        : BackendConnection(instance, wgpu::BackendType::Vulkan) {
+    }
+
+    Backend::~Backend() = default;
+
+    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+        AdapterDiscoveryOptions options;
+        auto result = DiscoverAdapters(&options);
+        if (result.IsError()) {
+            GetInstance()->ConsumedError(result.AcquireError());
+            return {};
+        }
+        return result.AcquireSuccess();
+    }
+
+    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* optionsBase) {
+        ASSERT(optionsBase->backendType == WGPUBackendType_Vulkan);
+
+        const AdapterDiscoveryOptions* options =
+            static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+        std::vector<Ref<AdapterBase>> adapters;
+
+        InstanceBase* instance = GetInstance();
+        for (ICD icd : kICDs) {
+#if defined(DAWN_PLATFORM_MACOS)
+            // On Mac, we don't expect non-Swiftshader Vulkan to be available.
+            if (icd == ICD::None) {
+                continue;
+            }
+#endif  // defined(DAWN_PLATFORM_MACOS)
+            if (options->forceSwiftShader && icd != ICD::SwiftShader) {
+                continue;
+            }
+            if (mVulkanInstances[icd] == nullptr && instance->ConsumedError([&]() -> MaybeError {
+                    DAWN_TRY_ASSIGN(mVulkanInstances[icd], VulkanInstance::Create(instance, icd));
+                    return {};
+                }())) {
+                // Instance failed to initialize.
+                continue;
+            }
+            const std::vector<VkPhysicalDevice>& physicalDevices =
+                mVulkanInstances[icd]->GetPhysicalDevices();
+            for (uint32_t i = 0; i < physicalDevices.size(); ++i) {
+                Ref<Adapter> adapter = AcquireRef(
+                    new Adapter(instance, mVulkanInstances[icd].Get(), physicalDevices[i]));
+                if (instance->ConsumedError(adapter->Initialize())) {
+                    continue;
+                }
+                adapters.push_back(std::move(adapter));
+            }
+        }
+        return adapters;
+    }
+
+    BackendConnection* Connect(InstanceBase* instance) {
+        return new Backend(instance);
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BackendVk.h b/src/dawn/native/vulkan/BackendVk.h
new file mode 100644
index 0000000..2902dbb
--- /dev/null
+++ b/src/dawn/native/vulkan/BackendVk.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BACKENDVK_H_
+#define DAWNNATIVE_VULKAN_BACKENDVK_H_
+
+#include "dawn/native/BackendConnection.h"
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+
+    enum class ICD {
+        None,
+        SwiftShader,
+    };
+
+    // VulkanInstance holds the reference to the Vulkan library, the VkInstance, VkPhysicalDevices
+    // on that instance, Vulkan functions loaded from the library, and global information
+    // gathered from the instance. VkPhysicalDevices bound to the VkInstance are bound to the GPU
+    // and GPU driver, keeping them active. It is RefCounted so that (eventually) when all adapters
+    // on an instance are no longer in use, the instance is deleted. This can be particuarly useful
+    // when we create multiple instances to selectively discover ICDs (like only
+    // SwiftShader/iGPU/dGPU/eGPU), and only one physical device on one instance remains in use. We
+    // can delete the VkInstances that are not in use to avoid holding the discrete GPU active.
+    class VulkanInstance : public RefCounted {
+      public:
+        static ResultOrError<Ref<VulkanInstance>> Create(const InstanceBase* instance, ICD icd);
+        ~VulkanInstance();
+
+        const VulkanFunctions& GetFunctions() const;
+        VkInstance GetVkInstance() const;
+        const VulkanGlobalInfo& GetGlobalInfo() const;
+        const std::vector<VkPhysicalDevice>& GetPhysicalDevices() const;
+
+      private:
+        VulkanInstance();
+
+        MaybeError Initialize(const InstanceBase* instance, ICD icd);
+        ResultOrError<VulkanGlobalKnobs> CreateVkInstance(const InstanceBase* instance);
+
+        MaybeError RegisterDebugUtils();
+
+        DynamicLib mVulkanLib;
+        VulkanGlobalInfo mGlobalInfo = {};
+        VkInstance mInstance = VK_NULL_HANDLE;
+        VulkanFunctions mFunctions;
+
+        VkDebugUtilsMessengerEXT mDebugUtilsMessenger = VK_NULL_HANDLE;
+
+        std::vector<VkPhysicalDevice> mPhysicalDevices;
+    };
+
+    class Backend : public BackendConnection {
+      public:
+        Backend(InstanceBase* instance);
+        ~Backend() override;
+
+        MaybeError Initialize();
+
+        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+            const AdapterDiscoveryOptionsBase* optionsBase) override;
+
+      private:
+        ityp::array<ICD, Ref<VulkanInstance>, 2> mVulkanInstances = {};
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_BACKENDVK_H_
diff --git a/src/dawn/native/vulkan/BindGroupLayoutVk.cpp b/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
new file mode 100644
index 0000000..8ed4340
--- /dev/null
+++ b/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
@@ -0,0 +1,195 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_vector.h"
+#include "dawn/native/vulkan/BindGroupVk.h"
+#include "dawn/native/vulkan/DescriptorSetAllocator.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <map>
+
+namespace dawn::native::vulkan {
+
+    namespace {
+
+        VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
+            VkShaderStageFlags flags = 0;
+
+            if (stages & wgpu::ShaderStage::Vertex) {
+                flags |= VK_SHADER_STAGE_VERTEX_BIT;
+            }
+            if (stages & wgpu::ShaderStage::Fragment) {
+                flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+            }
+            if (stages & wgpu::ShaderStage::Compute) {
+                flags |= VK_SHADER_STAGE_COMPUTE_BIT;
+            }
+
+            return flags;
+        }
+
+    }  // anonymous namespace
+
+    VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo) {
+        switch (bindingInfo.bindingType) {
+            case BindingInfoType::Buffer:
+                switch (bindingInfo.buffer.type) {
+                    case wgpu::BufferBindingType::Uniform:
+                        if (bindingInfo.buffer.hasDynamicOffset) {
+                            return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
+                        }
+                        return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+                    case wgpu::BufferBindingType::Storage:
+                    case kInternalStorageBufferBinding:
+                    case wgpu::BufferBindingType::ReadOnlyStorage:
+                        if (bindingInfo.buffer.hasDynamicOffset) {
+                            return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+                        }
+                        return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+                    case wgpu::BufferBindingType::Undefined:
+                        UNREACHABLE();
+                }
+            case BindingInfoType::Sampler:
+                return VK_DESCRIPTOR_TYPE_SAMPLER;
+            case BindingInfoType::Texture:
+            case BindingInfoType::ExternalTexture:
+                return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+            case BindingInfoType::StorageTexture:
+                return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+        }
+        UNREACHABLE();
+    }
+
+    // static
+    ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
+        Device* device,
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        Ref<BindGroupLayout> bgl =
+            AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+        DAWN_TRY(bgl->Initialize());
+        return bgl;
+    }
+
+    MaybeError BindGroupLayout::Initialize() {
+        // Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
+        // one entry per binding set. This might be optimized by computing continuous ranges of
+        // bindings of the same type.
+        ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
+        bindings.reserve(GetBindingCount());
+
+        for (const auto& [_, bindingIndex] : GetBindingMap()) {
+            const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+            VkDescriptorSetLayoutBinding vkBinding;
+            vkBinding.binding = static_cast<uint32_t>(bindingIndex);
+            vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
+            vkBinding.descriptorCount = 1;
+            vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
+            vkBinding.pImmutableSamplers = nullptr;
+
+            bindings.emplace_back(vkBinding);
+        }
+
+        VkDescriptorSetLayoutCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
+        createInfo.pBindings = bindings.data();
+
+        Device* device = ToBackend(GetDevice());
+        DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(
+                                    device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+                                "CreateDescriptorSetLayout"));
+
+        // Compute the size of descriptor pools used for this layout.
+        std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
+
+        for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+            VkDescriptorType vulkanType = VulkanDescriptorType(GetBindingInfo(bindingIndex));
+
+            // map::operator[] will return 0 if the key doesn't exist.
+            descriptorCountPerType[vulkanType]++;
+        }
+
+        // TODO(enga): Consider deduping allocators for layouts with the same descriptor type
+        // counts.
+        mDescriptorSetAllocator =
+            DescriptorSetAllocator::Create(this, std::move(descriptorCountPerType));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                     const BindGroupLayoutDescriptor* descriptor,
+                                     PipelineCompatibilityToken pipelineCompatibilityToken)
+        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+    }
+
+    BindGroupLayout::~BindGroupLayout() = default;
+
+    void BindGroupLayout::DestroyImpl() {
+        BindGroupLayoutBase::DestroyImpl();
+
+        Device* device = ToBackend(GetDevice());
+
+        // DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
+        // so we can destroy mHandle immediately instead of using the FencedDeleter.
+        // (Swiftshader implements this wrong b/154522740).
+        // In practice, the GPU is done with all descriptor sets because bind group deallocation
+        // refs the bind group layout so that once the bind group is finished being used, we can
+        // recycle its descriptor set.
+        if (mHandle != VK_NULL_HANDLE) {
+            device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
+            mHandle = VK_NULL_HANDLE;
+        }
+        mDescriptorSetAllocator = nullptr;
+    }
+
+    VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
+        return mHandle;
+    }
+
+    ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+        Device* device,
+        const BindGroupDescriptor* descriptor) {
+        DescriptorSetAllocation descriptorSetAllocation;
+        DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
+
+        return AcquireRef(
+            mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
+    }
+
+    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+                                              DescriptorSetAllocation* descriptorSetAllocation) {
+        mDescriptorSetAllocator->Deallocate(descriptorSetAllocation);
+        mBindGroupAllocator.Deallocate(bindGroup);
+    }
+
+    void BindGroupLayout::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_BindGroupLayout", GetLabel());
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BindGroupLayoutVk.h b/src/dawn/native/vulkan/BindGroupLayoutVk.h
new file mode 100644
index 0000000..558ff7f
--- /dev/null
+++ b/src/dawn/native/vulkan/BindGroupLayoutVk.h
@@ -0,0 +1,80 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
+#define DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
+
+#include "dawn/native/BindGroupLayout.h"
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/common/vulkan_platform.h"
+
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+    class BindGroup;
+    struct DescriptorSetAllocation;
+    class DescriptorSetAllocator;
+    class Device;
+
+    VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo);
+
+    // In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
+    // it's hard to have something where we can mix different types of descriptor sets because
+    // we don't know if their vector of number of descriptors will be similar.
+    //
+    // That's why that in addition to containing the VkDescriptorSetLayout to create
+    // VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
+    // sets.
+    //
+    // The allocations is done with one pool per descriptor set, which is inefficient, but at least
+    // the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
+    // is important because creating them can incur GPU memory allocation which is usually an
+    // expensive syscall.
+    class BindGroupLayout final : public BindGroupLayoutBase {
+      public:
+        static ResultOrError<Ref<BindGroupLayout>> Create(
+            Device* device,
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken);
+
+        BindGroupLayout(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken);
+
+        VkDescriptorSetLayout GetHandle() const;
+
+        ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+                                                        const BindGroupDescriptor* descriptor);
+        void DeallocateBindGroup(BindGroup* bindGroup,
+                                 DescriptorSetAllocation* descriptorSetAllocation);
+
+      private:
+        ~BindGroupLayout() override;
+        MaybeError Initialize();
+        void DestroyImpl() override;
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+        VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
+
+        SlabAllocator<BindGroup> mBindGroupAllocator;
+        Ref<DescriptorSetAllocator> mDescriptorSetAllocator;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
diff --git a/src/dawn/native/vulkan/BindGroupVk.cpp b/src/dawn/native/vulkan/BindGroupVk.cpp
new file mode 100644
index 0000000..00e70cf
--- /dev/null
+++ b/src/dawn/native/vulkan/BindGroupVk.cpp
@@ -0,0 +1,165 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BindGroupVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/SamplerVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    // static
+    ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+                                                    const BindGroupDescriptor* descriptor) {
+        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+    }
+
+    BindGroup::BindGroup(Device* device,
+                         const BindGroupDescriptor* descriptor,
+                         DescriptorSetAllocation descriptorSetAllocation)
+        : BindGroupBase(this, device, descriptor),
+          mDescriptorSetAllocation(descriptorSetAllocation) {
+        // Now do a write of a single descriptor set with all possible chained data allocated on the
+        // stack.
+        const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
+        ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
+            bindingCount);
+        ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup>
+            writeBufferInfo(bindingCount);
+        ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup>
+            writeImageInfo(bindingCount);
+
+        uint32_t numWrites = 0;
+        for (const auto [_, bindingIndex] : GetLayout()->GetBindingMap()) {
+            const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
+
+            auto& write = writes[numWrites];
+            write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+            write.pNext = nullptr;
+            write.dstSet = GetHandle();
+            write.dstBinding = static_cast<uint32_t>(bindingIndex);
+            write.dstArrayElement = 0;
+            write.descriptorCount = 1;
+            write.descriptorType = VulkanDescriptorType(bindingInfo);
+
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer: {
+                    BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+
+                    VkBuffer handle = ToBackend(binding.buffer)->GetHandle();
+                    if (handle == VK_NULL_HANDLE) {
+                        // The Buffer was destroyed. Skip this descriptor write since it would be
+                        // a Vulkan Validation Layers error. This bind group won't be used as it
+                        // is an error to submit a command buffer that references destroyed
+                        // resources.
+                        continue;
+                    }
+                    writeBufferInfo[numWrites].buffer = handle;
+                    writeBufferInfo[numWrites].offset = binding.offset;
+                    writeBufferInfo[numWrites].range = binding.size;
+                    write.pBufferInfo = &writeBufferInfo[numWrites];
+                    break;
+                }
+
+                case BindingInfoType::Sampler: {
+                    Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
+                    writeImageInfo[numWrites].sampler = sampler->GetHandle();
+                    write.pImageInfo = &writeImageInfo[numWrites];
+                    break;
+                }
+
+                case BindingInfoType::Texture: {
+                    TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+                    VkImageView handle = view->GetHandle();
+                    if (handle == VK_NULL_HANDLE) {
+                        // The Texture was destroyed before the TextureView was created.
+                        // Skip this descriptor write since it would be
+                        // a Vulkan Validation Layers error. This bind group won't be used as it
+                        // is an error to submit a command buffer that references destroyed
+                        // resources.
+                        continue;
+                    }
+                    writeImageInfo[numWrites].imageView = handle;
+
+                    // The layout may be GENERAL here because of interactions between the Sampled
+                    // and ReadOnlyStorage usages. See the logic in VulkanImageLayout.
+                    writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
+                        ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
+
+                    write.pImageInfo = &writeImageInfo[numWrites];
+                    break;
+                }
+
+                case BindingInfoType::StorageTexture: {
+                    TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+                    VkImageView handle = view->GetHandle();
+                    if (handle == VK_NULL_HANDLE) {
+                        // The Texture was destroyed before the TextureView was created.
+                        // Skip this descriptor write since it would be
+                        // a Vulkan Validation Layers error. This bind group won't be used as it
+                        // is an error to submit a command buffer that references destroyed
+                        // resources.
+                        continue;
+                    }
+                    writeImageInfo[numWrites].imageView = handle;
+                    writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+                    write.pImageInfo = &writeImageInfo[numWrites];
+                    break;
+                }
+
+                case BindingInfoType::ExternalTexture:
+                    UNREACHABLE();
+                    break;
+            }
+
+            numWrites++;
+        }
+
+        // TODO(crbug.com/dawn/855): Batch these updates
+        device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
+                                        nullptr);
+
+        SetLabelImpl();
+    }
+
+    BindGroup::~BindGroup() = default;
+
+    void BindGroup::DestroyImpl() {
+        BindGroupBase::DestroyImpl();
+        ToBackend(GetLayout())->DeallocateBindGroup(this, &mDescriptorSetAllocation);
+    }
+
+    VkDescriptorSet BindGroup::GetHandle() const {
+        return mDescriptorSetAllocation.set;
+    }
+
+    void BindGroup::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_DESCRIPTOR_SET,
+                     reinterpret_cast<uint64_t&>(mDescriptorSetAllocation.set), "Dawn_BindGroup",
+                     GetLabel());
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BindGroupVk.h b/src/dawn/native/vulkan/BindGroupVk.h
new file mode 100644
index 0000000..100ea85
--- /dev/null
+++ b/src/dawn/native/vulkan/BindGroupVk.h
@@ -0,0 +1,55 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BINDGROUPVK_H_
+#define DAWNNATIVE_VULKAN_BINDGROUPVK_H_
+
+#include "dawn/native/BindGroup.h"
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DescriptorSetAllocation.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class BindGroup final : public BindGroupBase, public PlacementAllocated {
+      public:
+        static ResultOrError<Ref<BindGroup>> Create(Device* device,
+                                                    const BindGroupDescriptor* descriptor);
+
+        BindGroup(Device* device,
+                  const BindGroupDescriptor* descriptor,
+                  DescriptorSetAllocation descriptorSetAllocation);
+
+        VkDescriptorSet GetHandle() const;
+
+      private:
+        ~BindGroup() override;
+
+        void DestroyImpl() override;
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+        // The descriptor set in this allocation outlives the BindGroup because it is owned by
+        // the BindGroupLayout which is referenced by the BindGroup.
+        DescriptorSetAllocation mDescriptorSetAllocation;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_BINDGROUPVK_H_
diff --git a/src/dawn/native/vulkan/BufferVk.cpp b/src/dawn/native/vulkan/BufferVk.cpp
new file mode 100644
index 0000000..c7e9fb0
--- /dev/null
+++ b/src/dawn/native/vulkan/BufferVk.cpp
@@ -0,0 +1,413 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BufferVk.h"
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <cstring>
+
+namespace dawn::native::vulkan {
+
+    namespace {
+
+        VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
+            VkBufferUsageFlags flags = 0;
+
+            if (usage & wgpu::BufferUsage::CopySrc) {
+                flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+            }
+            if (usage & wgpu::BufferUsage::CopyDst) {
+                flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Index) {
+                flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Vertex) {
+                flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Uniform) {
+                flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+            }
+            if (usage &
+                (wgpu::BufferUsage::Storage | kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
+                flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Indirect) {
+                flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
+            }
+            if (usage & wgpu::BufferUsage::QueryResolve) {
+                flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+            }
+
+            return flags;
+        }
+
+        VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
+            VkPipelineStageFlags flags = 0;
+
+            if (usage & kMappableBufferUsages) {
+                flags |= VK_PIPELINE_STAGE_HOST_BIT;
+            }
+            if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
+                flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+            }
+            if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
+                flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+            }
+            if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage |
+                         kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
+                flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+                         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+                         VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Indirect) {
+                flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+            }
+            if (usage & wgpu::BufferUsage::QueryResolve) {
+                flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+            }
+
+            return flags;
+        }
+
+        VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
+            VkAccessFlags flags = 0;
+
+            if (usage & wgpu::BufferUsage::MapRead) {
+                flags |= VK_ACCESS_HOST_READ_BIT;
+            }
+            if (usage & wgpu::BufferUsage::MapWrite) {
+                flags |= VK_ACCESS_HOST_WRITE_BIT;
+            }
+            if (usage & wgpu::BufferUsage::CopySrc) {
+                flags |= VK_ACCESS_TRANSFER_READ_BIT;
+            }
+            if (usage & wgpu::BufferUsage::CopyDst) {
+                flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Index) {
+                flags |= VK_ACCESS_INDEX_READ_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Vertex) {
+                flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Uniform) {
+                flags |= VK_ACCESS_UNIFORM_READ_BIT;
+            }
+            if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+                flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+            }
+            if (usage & kReadOnlyStorageBuffer) {
+                flags |= VK_ACCESS_SHADER_READ_BIT;
+            }
+            if (usage & wgpu::BufferUsage::Indirect) {
+                flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+            }
+            if (usage & wgpu::BufferUsage::QueryResolve) {
+                flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+            }
+
+            return flags;
+        }
+
+    }  // namespace
+
+    // static
+    ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+        DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+        return std::move(buffer);
+    }
+
+    MaybeError Buffer::Initialize(bool mappedAtCreation) {
+        // vkCmdFillBuffer requires the size to be a multiple of 4.
+        constexpr size_t kAlignment = 4u;
+
+        uint32_t extraBytes = 0u;
+        if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
+            // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
+            // is equal to the whole buffer size. Allocate at least one more byte so it
+            // is valid to setVertex/IndexBuffer with a zero-sized range at the end
+            // of the buffer with (offset=buffer.size, size=0).
+            extraBytes = 1u;
+        }
+
+        uint64_t size = GetSize();
+        if (size > std::numeric_limits<uint64_t>::max() - extraBytes) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+        }
+
+        size += extraBytes;
+
+        // Allocate at least 4 bytes so clamped accesses are always in bounds.
+        // Also, Vulkan requires the size to be non-zero.
+        size = std::max(size, uint64_t(4u));
+
+        if (size > std::numeric_limits<uint64_t>::max() - kAlignment) {
+            // Alignment would overlow.
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+        }
+        mAllocatedSize = Align(size, kAlignment);
+
+        // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
+        // some constants to the size passed and align it, but for values close to the maximum
+        // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
+        // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
+        // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
+        // safely return an OOM error.
+        if (mAllocatedSize & (uint64_t(3) << uint64_t(62))) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
+        }
+
+        VkBufferCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.size = mAllocatedSize;
+        // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+        // and robust resource initialization.
+        createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
+        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        createInfo.queueFamilyIndexCount = 0;
+        createInfo.pQueueFamilyIndices = 0;
+
+        Device* device = ToBackend(GetDevice());
+        DAWN_TRY(CheckVkOOMThenSuccess(
+            device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+            "vkCreateBuffer"));
+
+        // Gather requirements for the buffer's memory and allocate it.
+        VkMemoryRequirements requirements;
+        device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+
+        MemoryKind requestKind = MemoryKind::Linear;
+        if (GetUsage() & kMappableBufferUsages) {
+            requestKind = MemoryKind::LinearMappable;
+        }
+        DAWN_TRY_ASSIGN(mMemoryAllocation,
+                        device->GetResourceMemoryAllocator()->Allocate(requirements, requestKind));
+
+        // Finally associate it with the buffer.
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
+                                        ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+                                        mMemoryAllocation.GetOffset()),
+            "vkBindBufferMemory"));
+
+        // The buffers with mappedAtCreation == true will be initialized in
+        // BufferBase::MapAtCreation().
+        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+            !mappedAtCreation) {
+            ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
+        }
+
+        // Initialize the padding bytes to zero.
+        if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+            uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+            if (paddingBytes > 0) {
+                uint32_t clearSize = Align(paddingBytes, 4);
+                uint64_t clearOffset = GetAllocatedSize() - clearSize;
+
+                CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+                ClearBuffer(recordingContext, 0, clearOffset, clearSize);
+            }
+        }
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    Buffer::~Buffer() = default;
+
+    VkBuffer Buffer::GetHandle() const {
+        return mHandle;
+    }
+
+    void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
+                                    wgpu::BufferUsage usage) {
+        VkBufferMemoryBarrier barrier;
+        VkPipelineStageFlags srcStages = 0;
+        VkPipelineStageFlags dstStages = 0;
+
+        if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
+            ASSERT(srcStages != 0 && dstStages != 0);
+            ToBackend(GetDevice())
+                ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+                                        nullptr, 1u, &barrier, 0, nullptr);
+        }
+    }
+
+    bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+                                                      VkBufferMemoryBarrier* barrier,
+                                                      VkPipelineStageFlags* srcStages,
+                                                      VkPipelineStageFlags* dstStages) {
+        bool lastIncludesTarget = IsSubset(usage, mLastUsage);
+        bool lastReadOnly = IsSubset(mLastUsage, kReadOnlyBufferUsages);
+
+        // We can skip transitions to already current read-only usages.
+        if (lastIncludesTarget && lastReadOnly) {
+            return false;
+        }
+
+        // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
+        if (mLastUsage == wgpu::BufferUsage::None) {
+            mLastUsage = usage;
+            return false;
+        }
+
+        *srcStages |= VulkanPipelineStage(mLastUsage);
+        *dstStages |= VulkanPipelineStage(usage);
+
+        barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+        barrier->pNext = nullptr;
+        barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
+        barrier->dstAccessMask = VulkanAccessFlags(usage);
+        barrier->srcQueueFamilyIndex = 0;
+        barrier->dstQueueFamilyIndex = 0;
+        barrier->buffer = mHandle;
+        barrier->offset = 0;
+        // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+        barrier->size = GetAllocatedSize();
+
+        mLastUsage = usage;
+
+        return true;
+    }
+
+    bool Buffer::IsCPUWritableAtCreation() const {
+        // TODO(enga): Handle CPU-visible memory on UMA
+        return mMemoryAllocation.GetMappedPointer() != nullptr;
+    }
+
+    MaybeError Buffer::MapAtCreationImpl() {
+        return {};
+    }
+
+    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+        Device* device = ToBackend(GetDevice());
+
+        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+
+        // TODO(crbug.com/dawn/852): initialize mapped buffer in CPU side.
+        EnsureDataInitialized(recordingContext);
+
+        if (mode & wgpu::MapMode::Read) {
+            TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
+        } else {
+            ASSERT(mode & wgpu::MapMode::Write);
+            TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
+        }
+        return {};
+    }
+
+    void Buffer::UnmapImpl() {
+        // No need to do anything, we keep CPU-visible memory mapped at all time.
+    }
+
+    void* Buffer::GetMappedPointerImpl() {
+        uint8_t* memory = mMemoryAllocation.GetMappedPointer();
+        ASSERT(memory != nullptr);
+        return memory;
+    }
+
+    void Buffer::DestroyImpl() {
+        BufferBase::DestroyImpl();
+
+        ToBackend(GetDevice())->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
+
+        if (mHandle != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            mHandle = VK_NULL_HANDLE;
+        }
+    }
+
+    bool Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        InitializeToZero(recordingContext);
+        return true;
+    }
+
+    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                                    uint64_t offset,
+                                                    uint64_t size) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        if (IsFullBufferRange(offset, size)) {
+            SetIsDataInitialized();
+            return false;
+        }
+
+        InitializeToZero(recordingContext);
+        return true;
+    }
+
+    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                                    const CopyTextureToBufferCmd* copy) {
+        if (!NeedsInitialization()) {
+            return false;
+        }
+
+        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+            SetIsDataInitialized();
+            return false;
+        }
+
+        InitializeToZero(recordingContext);
+        return true;
+    }
+
+    void Buffer::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_BUFFER,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_Buffer", GetLabel());
+    }
+
+    void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
+        ASSERT(NeedsInitialization());
+
+        ClearBuffer(recordingContext, 0u);
+        GetDevice()->IncrementLazyClearCountForTesting();
+        SetIsDataInitialized();
+    }
+
+    void Buffer::ClearBuffer(CommandRecordingContext* recordingContext,
+                             uint32_t clearValue,
+                             uint64_t offset,
+                             uint64_t size) {
+        ASSERT(recordingContext != nullptr);
+        size = size > 0 ? size : GetAllocatedSize();
+        ASSERT(size > 0);
+
+        TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+        Device* device = ToBackend(GetDevice());
+        // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+        // Note: Allocated size must be a multiple of 4.
+        ASSERT(size % 4 == 0);
+        device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, offset, size,
+                                 clearValue);
+    }
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BufferVk.h b/src/dawn/native/vulkan/BufferVk.h
new file mode 100644
index 0000000..1f7ae74
--- /dev/null
+++ b/src/dawn/native/vulkan/BufferVk.h
@@ -0,0 +1,82 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BUFFERVK_H_
+#define DAWNNATIVE_VULKAN_BUFFERVK_H_
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+
+namespace dawn::native::vulkan {
+
+    struct CommandRecordingContext;
+    class Device;
+
+    class Buffer final : public BufferBase {
+      public:
+        static ResultOrError<Ref<Buffer>> Create(Device* device,
+                                                 const BufferDescriptor* descriptor);
+
+        VkBuffer GetHandle() const;
+
+        // Transitions the buffer to be used as `usage`, recording any necessary barrier in
+        // `commands`.
+        // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+        void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
+        bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+                                                  VkBufferMemoryBarrier* barrier,
+                                                  VkPipelineStageFlags* srcStages,
+                                                  VkPipelineStageFlags* dstStages);
+
+        // All the Ensure methods return true if the buffer was initialized to zero.
+        bool EnsureDataInitialized(CommandRecordingContext* recordingContext);
+        bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                                uint64_t offset,
+                                                uint64_t size);
+        bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                                const CopyTextureToBufferCmd* copy);
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+      private:
+        ~Buffer() override;
+        using BufferBase::BufferBase;
+
+        MaybeError Initialize(bool mappedAtCreation);
+        void InitializeToZero(CommandRecordingContext* recordingContext);
+        void ClearBuffer(CommandRecordingContext* recordingContext,
+                         uint32_t clearValue,
+                         uint64_t offset = 0,
+                         uint64_t size = 0);
+
+        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+        void UnmapImpl() override;
+        void DestroyImpl() override;
+        bool IsCPUWritableAtCreation() const override;
+        MaybeError MapAtCreationImpl() override;
+        void* GetMappedPointerImpl() override;
+
+        VkBuffer mHandle = VK_NULL_HANDLE;
+        ResourceMemoryAllocation mMemoryAllocation;
+
+        wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_BUFFERVK_H_
diff --git a/src/dawn/native/vulkan/CommandBufferVk.cpp b/src/dawn/native/vulkan/CommandBufferVk.cpp
new file mode 100644
index 0000000..2e94f6a
--- /dev/null
+++ b/src/dawn/native/vulkan/CommandBufferVk.cpp
@@ -0,0 +1,1331 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/CommandBufferVk.h"
+
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/vulkan/BindGroupVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/ComputePipelineVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/QuerySetVk.h"
+#include "dawn/native/vulkan/RenderPassCache.h"
+#include "dawn/native/vulkan/RenderPipelineVk.h"
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <algorithm>
+
+namespace dawn::native::vulkan {
+
+    namespace {
+
+        VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
+            switch (format) {
+                case wgpu::IndexFormat::Uint16:
+                    return VK_INDEX_TYPE_UINT16;
+                case wgpu::IndexFormat::Uint32:
+                    return VK_INDEX_TYPE_UINT32;
+                case wgpu::IndexFormat::Undefined:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
+                                      const TextureCopy& dstCopy,
+                                      const Extent3D& copySize) {
+            Extent3D imageExtentSrc = ComputeTextureCopyExtent(srcCopy, copySize);
+            Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
+            return imageExtentSrc.width == imageExtentDst.width &&
+                   imageExtentSrc.height == imageExtentDst.height &&
+                   imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
+        }
+
+        VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
+                                           const TextureCopy& dstCopy,
+                                           const Extent3D& copySize,
+                                           Aspect aspect) {
+            const Texture* srcTexture = ToBackend(srcCopy.texture.Get());
+            const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
+
+            VkImageCopy region;
+            region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
+            region.srcSubresource.mipLevel = srcCopy.mipLevel;
+            region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
+            region.dstSubresource.mipLevel = dstCopy.mipLevel;
+
+            bool has3DTextureInCopy = false;
+
+            region.srcOffset.x = srcCopy.origin.x;
+            region.srcOffset.y = srcCopy.origin.y;
+            switch (srcTexture->GetDimension()) {
+                case wgpu::TextureDimension::e1D:
+                    region.srcSubresource.baseArrayLayer = 0;
+                    region.srcSubresource.layerCount = 1;
+                    region.srcOffset.z = 0;
+                    break;
+                case wgpu::TextureDimension::e2D:
+                    region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
+                    region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
+                    region.srcOffset.z = 0;
+                    break;
+                case wgpu::TextureDimension::e3D:
+                    has3DTextureInCopy = true;
+                    region.srcSubresource.baseArrayLayer = 0;
+                    region.srcSubresource.layerCount = 1;
+                    region.srcOffset.z = srcCopy.origin.z;
+                    break;
+            }
+
+            region.dstOffset.x = dstCopy.origin.x;
+            region.dstOffset.y = dstCopy.origin.y;
+            switch (dstTexture->GetDimension()) {
+                case wgpu::TextureDimension::e1D:
+                    region.dstSubresource.baseArrayLayer = 0;
+                    region.dstSubresource.layerCount = 1;
+                    region.dstOffset.z = 0;
+                    break;
+                case wgpu::TextureDimension::e2D:
+                    region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
+                    region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
+                    region.dstOffset.z = 0;
+                    break;
+                case wgpu::TextureDimension::e3D:
+                    has3DTextureInCopy = true;
+                    region.dstSubresource.baseArrayLayer = 0;
+                    region.dstSubresource.layerCount = 1;
+                    region.dstOffset.z = dstCopy.origin.z;
+                    break;
+            }
+
+            ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
+            Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
+            region.extent.width = imageExtent.width;
+            region.extent.height = imageExtent.height;
+            region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
+
+            return region;
+        }
+
+        class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
+          public:
+            DescriptorSetTracker() = default;
+
+            void Apply(Device* device,
+                       CommandRecordingContext* recordingContext,
+                       VkPipelineBindPoint bindPoint) {
+                BeforeApply();
+                for (BindGroupIndex dirtyIndex :
+                     IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+                    VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
+                    const uint32_t* dynamicOffset = mDynamicOffsetCounts[dirtyIndex] > 0
+                                                        ? mDynamicOffsets[dirtyIndex].data()
+                                                        : nullptr;
+                    device->fn.CmdBindDescriptorSets(
+                        recordingContext->commandBuffer, bindPoint,
+                        ToBackend(mPipelineLayout)->GetHandle(), static_cast<uint32_t>(dirtyIndex),
+                        1, &*set, mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
+                }
+                AfterApply();
+            }
+        };
+
+        // Records the necessary barriers for a synchronization scope using the resource usage
+        // data pre-computed in the frontend. Also performs lazy initialization if required.
+        void TransitionAndClearForSyncScope(Device* device,
+                                            CommandRecordingContext* recordingContext,
+                                            const SyncScopeResourceUsage& scope) {
+            std::vector<VkBufferMemoryBarrier> bufferBarriers;
+            std::vector<VkImageMemoryBarrier> imageBarriers;
+            VkPipelineStageFlags srcStages = 0;
+            VkPipelineStageFlags dstStages = 0;
+
+            for (size_t i = 0; i < scope.buffers.size(); ++i) {
+                Buffer* buffer = ToBackend(scope.buffers[i]);
+                buffer->EnsureDataInitialized(recordingContext);
+
+                VkBufferMemoryBarrier bufferBarrier;
+                if (buffer->TransitionUsageAndGetResourceBarrier(
+                        scope.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
+                    bufferBarriers.push_back(bufferBarrier);
+                }
+            }
+
+            for (size_t i = 0; i < scope.textures.size(); ++i) {
+                Texture* texture = ToBackend(scope.textures[i]);
+
+                // Clear subresources that are not render attachments. Render attachments will be
+                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+                // subresource has not been initialized before the render pass.
+                scope.textureUsages[i].Iterate(
+                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                            texture->EnsureSubresourceContentInitialized(recordingContext, range);
+                        }
+                    });
+                texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i],
+                                                &imageBarriers, &srcStages, &dstStages);
+            }
+
+            if (bufferBarriers.size() || imageBarriers.size()) {
+                device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
+                                              0, 0, nullptr, bufferBarriers.size(),
+                                              bufferBarriers.data(), imageBarriers.size(),
+                                              imageBarriers.data());
+            }
+        }
+
+        MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
+                                         Device* device,
+                                         BeginRenderPassCmd* renderPass) {
+            VkCommandBuffer commands = recordingContext->commandBuffer;
+
+            // Query a VkRenderPass from the cache
+            VkRenderPass renderPassVK = VK_NULL_HANDLE;
+            {
+                RenderPassCacheQuery query;
+
+                for (ColorAttachmentIndex i :
+                     IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                    const auto& attachmentInfo = renderPass->colorAttachments[i];
+
+                    bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+
+                    query.SetColor(i, attachmentInfo.view->GetFormat().format,
+                                   attachmentInfo.loadOp, attachmentInfo.storeOp, hasResolveTarget);
+                }
+
+                if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+                    const auto& attachmentInfo = renderPass->depthStencilAttachment;
+
+                    query.SetDepthStencil(
+                        attachmentInfo.view->GetTexture()->GetFormat().format,
+                        attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+                        attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+                        attachmentInfo.depthReadOnly || attachmentInfo.stencilReadOnly);
+                }
+
+                query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
+
+                DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
+            }
+
+            // Create a framebuffer that will be used once for the render pass and gather the clear
+            // values for the attachments at the same time.
+            std::array<VkClearValue, kMaxColorAttachments + 1> clearValues;
+            VkFramebuffer framebuffer = VK_NULL_HANDLE;
+            uint32_t attachmentCount = 0;
+            {
+                // Fill in the attachment info that will be chained in the framebuffer create info.
+                std::array<VkImageView, kMaxColorAttachments * 2 + 1> attachments;
+
+                for (ColorAttachmentIndex i :
+                     IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                    auto& attachmentInfo = renderPass->colorAttachments[i];
+                    TextureView* view = ToBackend(attachmentInfo.view.Get());
+                    if (view == nullptr) {
+                        continue;
+                    }
+
+                    attachments[attachmentCount] = view->GetHandle();
+
+                    switch (view->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+                        case wgpu::TextureComponentType::Float: {
+                            const std::array<float, 4> appliedClearColor =
+                                ConvertToFloatColor(attachmentInfo.clearColor);
+                            for (uint32_t i = 0; i < 4; ++i) {
+                                clearValues[attachmentCount].color.float32[i] =
+                                    appliedClearColor[i];
+                            }
+                            break;
+                        }
+                        case wgpu::TextureComponentType::Uint: {
+                            const std::array<uint32_t, 4> appliedClearColor =
+                                ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
+                            for (uint32_t i = 0; i < 4; ++i) {
+                                clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
+                            }
+                            break;
+                        }
+                        case wgpu::TextureComponentType::Sint: {
+                            const std::array<int32_t, 4> appliedClearColor =
+                                ConvertToSignedIntegerColor(attachmentInfo.clearColor);
+                            for (uint32_t i = 0; i < 4; ++i) {
+                                clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
+                            }
+                            break;
+                        }
+
+                        case wgpu::TextureComponentType::DepthComparison:
+                            UNREACHABLE();
+                    }
+                    attachmentCount++;
+                }
+
+                if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+                    auto& attachmentInfo = renderPass->depthStencilAttachment;
+                    TextureView* view = ToBackend(attachmentInfo.view.Get());
+
+                    attachments[attachmentCount] = view->GetHandle();
+
+                    clearValues[attachmentCount].depthStencil.depth = attachmentInfo.clearDepth;
+                    clearValues[attachmentCount].depthStencil.stencil = attachmentInfo.clearStencil;
+
+                    attachmentCount++;
+                }
+
+                for (ColorAttachmentIndex i :
+                     IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                    if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+                        TextureView* view =
+                            ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
+
+                        attachments[attachmentCount] = view->GetHandle();
+
+                        attachmentCount++;
+                    }
+                }
+
+                // Chain attachments and create the framebuffer
+                VkFramebufferCreateInfo createInfo;
+                createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+                createInfo.pNext = nullptr;
+                createInfo.flags = 0;
+                createInfo.renderPass = renderPassVK;
+                createInfo.attachmentCount = attachmentCount;
+                createInfo.pAttachments = AsVkArray(attachments.data());
+                createInfo.width = renderPass->width;
+                createInfo.height = renderPass->height;
+                createInfo.layers = 1;
+
+                DAWN_TRY(
+                    CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
+                                                                nullptr, &*framebuffer),
+                                   "CreateFramebuffer"));
+
+                // We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
+                // commands currently being recorded are finished.
+                device->GetFencedDeleter()->DeleteWhenUnused(framebuffer);
+            }
+
+            VkRenderPassBeginInfo beginInfo;
+            beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+            beginInfo.pNext = nullptr;
+            beginInfo.renderPass = renderPassVK;
+            beginInfo.framebuffer = framebuffer;
+            beginInfo.renderArea.offset.x = 0;
+            beginInfo.renderArea.offset.y = 0;
+            beginInfo.renderArea.extent.width = renderPass->width;
+            beginInfo.renderArea.extent.height = renderPass->height;
+            beginInfo.clearValueCount = attachmentCount;
+            beginInfo.pClearValues = clearValues.data();
+
+            device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
+
+            return {};
+        }
+
+        // Reset the query sets used on render pass because the reset command must be called outside
+        // render pass.
+        void ResetUsedQuerySetsOnRenderPass(Device* device,
+                                            VkCommandBuffer commands,
+                                            QuerySetBase* querySet,
+                                            const std::vector<bool>& availability) {
+            ASSERT(availability.size() == querySet->GetQueryAvailability().size());
+
+            auto currentIt = availability.begin();
+            auto lastIt = availability.end();
+            // Traverse the used queries which availability are true.
+            while (currentIt != lastIt) {
+                auto firstTrueIt = std::find(currentIt, lastIt, true);
+                // No used queries need to be reset
+                if (firstTrueIt == lastIt) {
+                    break;
+                }
+
+                auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+                uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
+                uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
+
+                // Reset the queries between firstTrueIt and nextFalseIt (which is at most
+                // lastIt)
+                device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
+                                             queryCount);
+
+                // Set current iterator to next false
+                currentIt = nextFalseIt;
+            }
+        }
+
+        void RecordWriteTimestampCmd(CommandRecordingContext* recordingContext,
+                                     Device* device,
+                                     WriteTimestampCmd* cmd) {
+            VkCommandBuffer commands = recordingContext->commandBuffer;
+            QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+            device->fn.CmdWriteTimestamp(commands, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+                                         querySet->GetHandle(), cmd->queryIndex);
+        }
+
+        void RecordResolveQuerySetCmd(VkCommandBuffer commands,
+                                      Device* device,
+                                      QuerySet* querySet,
+                                      uint32_t firstQuery,
+                                      uint32_t queryCount,
+                                      Buffer* destination,
+                                      uint64_t destinationOffset) {
+            const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+            auto currentIt = availability.begin() + firstQuery;
+            auto lastIt = availability.begin() + firstQuery + queryCount;
+
+            // Traverse available queries in the range of [firstQuery, firstQuery +  queryCount - 1]
+            while (currentIt != lastIt) {
+                auto firstTrueIt = std::find(currentIt, lastIt, true);
+                // No available query found for resolving
+                if (firstTrueIt == lastIt) {
+                    break;
+                }
+                auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+                // The query index of firstTrueIt where the resolving starts
+                uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+                // The queries count between firstTrueIt and nextFalseIt need to be resolved
+                uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+
+                // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+                uint32_t resolveDestinationOffset =
+                    destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+
+                // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+                device->fn.CmdCopyQueryPoolResults(
+                    commands, querySet->GetHandle(), resolveQueryIndex, resolveQueryCount,
+                    destination->GetHandle(), resolveDestinationOffset, sizeof(uint64_t),
+                    VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+
+                // Set current iterator to next false
+                currentIt = nextFalseIt;
+            }
+        }
+
+    }  // anonymous namespace
+
+    // static
+    Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+                                             const CommandBufferDescriptor* descriptor) {
+        return AcquireRef(new CommandBuffer(encoder, descriptor));
+    }
+
+    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+        : CommandBufferBase(encoder, descriptor) {
+    }
+
+    MaybeError CommandBuffer::RecordCopyImageWithTemporaryBuffer(
+        CommandRecordingContext* recordingContext,
+        const TextureCopy& srcCopy,
+        const TextureCopy& dstCopy,
+        const Extent3D& copySize) {
+        ASSERT(srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
+        ASSERT(srcCopy.aspect == dstCopy.aspect);
+        dawn::native::Format format = srcCopy.texture->GetFormat();
+        const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+        ASSERT(copySize.width % blockInfo.width == 0);
+        uint32_t widthInBlocks = copySize.width / blockInfo.width;
+        ASSERT(copySize.height % blockInfo.height == 0);
+        uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+        // Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
+        // because it isn't a hard constraint in Vulkan.
+        uint64_t tempBufferSize =
+            widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
+        BufferDescriptor tempBufferDescriptor;
+        tempBufferDescriptor.size = tempBufferSize;
+        tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+
+        Device* device = ToBackend(GetDevice());
+        Ref<BufferBase> tempBufferBase;
+        DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+        Buffer* tempBuffer = ToBackend(tempBufferBase.Get());
+
+        BufferCopy tempBufferCopy;
+        tempBufferCopy.buffer = tempBuffer;
+        tempBufferCopy.rowsPerImage = heightInBlocks;
+        tempBufferCopy.offset = 0;
+        tempBufferCopy.bytesPerRow = copySize.width / blockInfo.width * blockInfo.byteSize;
+
+        VkCommandBuffer commands = recordingContext->commandBuffer;
+        VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
+        VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
+
+        tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+        VkBufferImageCopy srcToTempBufferRegion =
+            ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
+
+        // The Dawn CopySrc usage is always mapped to GENERAL
+        device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+                                        tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
+
+        tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+        VkBufferImageCopy tempBufferToDstRegion =
+            ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
+
+        // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+        // copy command.
+        device->fn.CmdCopyBufferToImage(commands, tempBuffer->GetHandle(), dstImage,
+                                        VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+                                        &tempBufferToDstRegion);
+
+        recordingContext->tempBuffers.emplace_back(tempBuffer);
+
+        return {};
+    }
+
+    MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
+        Device* device = ToBackend(GetDevice());
+        VkCommandBuffer commands = recordingContext->commandBuffer;
+
+        // Records the necessary barriers for the resource usage pre-computed by the frontend.
+        // And resets the used query sets which are rewritten on the render pass.
+        auto PrepareResourcesForRenderPass = [](Device* device,
+                                                CommandRecordingContext* recordingContext,
+                                                const RenderPassResourceUsage& usages) {
+            TransitionAndClearForSyncScope(device, recordingContext, usages);
+
+            // Reset all query set used on current render pass together before beginning render pass
+            // because the reset command must be called outside render pass
+            for (size_t i = 0; i < usages.querySets.size(); ++i) {
+                ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
+                                               usages.querySets[i], usages.queryAvailabilities[i]);
+            }
+        };
+
+        size_t nextComputePassNumber = 0;
+        size_t nextRenderPassNumber = 0;
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::CopyBufferToBuffer: {
+                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                    if (copy->size == 0) {
+                        // Skip no-op copies.
+                        break;
+                    }
+
+                    Buffer* srcBuffer = ToBackend(copy->source.Get());
+                    Buffer* dstBuffer = ToBackend(copy->destination.Get());
+
+                    srcBuffer->EnsureDataInitialized(recordingContext);
+                    dstBuffer->EnsureDataInitializedAsDestination(
+                        recordingContext, copy->destinationOffset, copy->size);
+
+                    srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+                    dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+                    VkBufferCopy region;
+                    region.srcOffset = copy->sourceOffset;
+                    region.dstOffset = copy->destinationOffset;
+                    region.size = copy->size;
+
+                    VkBuffer srcHandle = srcBuffer->GetHandle();
+                    VkBuffer dstHandle = dstBuffer->GetHandle();
+                    device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
+                    break;
+                }
+
+                case Command::CopyBufferToTexture: {
+                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    auto& src = copy->source;
+                    auto& dst = copy->destination;
+
+                    ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
+
+                    VkBufferImageCopy region =
+                        ComputeBufferImageCopyRegion(src, dst, copy->copySize);
+                    VkImageSubresourceLayers subresource = region.imageSubresource;
+
+                    SubresourceRange range =
+                        GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+                    if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+                                                      subresource.mipLevel)) {
+                        // Since texture has been overwritten, it has been "initialized"
+                        dst.texture->SetIsSubresourceContentInitialized(true, range);
+                    } else {
+                        ToBackend(dst.texture)
+                            ->EnsureSubresourceContentInitialized(recordingContext, range);
+                    }
+                    ToBackend(src.buffer)
+                        ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+                    ToBackend(dst.texture)
+                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+                    VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
+                    VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+
+                    // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+                    // copy command.
+                    device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
+                                                    VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+                                                    &region);
+                    break;
+                }
+
+                case Command::CopyTextureToBuffer: {
+                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    auto& src = copy->source;
+                    auto& dst = copy->destination;
+
+                    ToBackend(dst.buffer)
+                        ->EnsureDataInitializedAsDestination(recordingContext, copy);
+
+                    VkBufferImageCopy region =
+                        ComputeBufferImageCopyRegion(dst, src, copy->copySize);
+
+                    SubresourceRange range =
+                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
+                    ToBackend(src.texture)
+                        ->EnsureSubresourceContentInitialized(recordingContext, range);
+
+                    ToBackend(src.texture)
+                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
+                    ToBackend(dst.buffer)
+                        ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+                    VkImage srcImage = ToBackend(src.texture)->GetHandle();
+                    VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
+                    // The Dawn CopySrc usage is always mapped to GENERAL
+                    device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+                                                    dstBuffer, 1, &region);
+                    break;
+                }
+
+                case Command::CopyTextureToTexture: {
+                    CopyTextureToTextureCmd* copy =
+                        mCommands.NextCommand<CopyTextureToTextureCmd>();
+                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                        copy->copySize.depthOrArrayLayers == 0) {
+                        // Skip no-op copies.
+                        continue;
+                    }
+                    TextureCopy& src = copy->source;
+                    TextureCopy& dst = copy->destination;
+                    SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+                    SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+
+                    ToBackend(src.texture)
+                        ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
+                    if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+                                                      dst.mipLevel)) {
+                        // Since destination texture has been overwritten, it has been "initialized"
+                        dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
+                    } else {
+                        ToBackend(dst.texture)
+                            ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
+                    }
+
+                    if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
+                        // When there are overlapped subresources, the layout of the overlapped
+                        // subresources should all be GENERAL instead of what we set now. Currently
+                        // it is not allowed to copy with overlapped subresources, but we still
+                        // add the ASSERT here as a reminder for this possible misuse.
+                        ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
+                                                  copy->copySize.depthOrArrayLayers));
+                    }
+
+                    // TODO after Yunchao's CL
+                    ToBackend(src.texture)
+                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
+                                             srcRange);
+                    ToBackend(dst.texture)
+                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
+                                             dstRange);
+
+                    // In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
+                    // because as Vulkan SPEC always validates image copies with the virtual size of
+                    // the image subresource, when the extent that fits in the copy region of one
+                    // subresource but does not fit in the one of another subresource, we will fail
+                    // to find a valid extent to satisfy the requirements on both source and
+                    // destination image subresource. For example, when the source is the first
+                    // level of a 16x16 texture in BC format, and the destination is the third level
+                    // of a 60x60 texture in the same format, neither 16x16 nor 15x15 is valid as
+                    // the extent of vkCmdCopyImage.
+                    // Our workaround for this issue is replacing the texture-to-texture copy with
+                    // one texture-to-buffer copy and one buffer-to-texture copy.
+                    bool copyUsingTemporaryBuffer =
+                        device->IsToggleEnabled(
+                            Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy) &&
+                        src.texture->GetFormat().isCompressed &&
+                        !HasSameTextureCopyExtent(src, dst, copy->copySize);
+
+                    if (!copyUsingTemporaryBuffer) {
+                        VkImage srcImage = ToBackend(src.texture)->GetHandle();
+                        VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+
+                        for (Aspect aspect : IterateEnumMask(src.texture->GetFormat().aspects)) {
+                            ASSERT(dst.texture->GetFormat().aspects & aspect);
+                            VkImageCopy region =
+                                ComputeImageCopyRegion(src, dst, copy->copySize, aspect);
+
+                            // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after
+                            // the copy command.
+                            device->fn.CmdCopyImage(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+                                                    dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+                                                    1, &region);
+                        }
+                    } else {
+                        DAWN_TRY(RecordCopyImageWithTemporaryBuffer(recordingContext, src, dst,
+                                                                    copy->copySize));
+                    }
+
+                    break;
+                }
+
+                case Command::ClearBuffer: {
+                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                    if (cmd->size == 0) {
+                        // Skip no-op fills.
+                        break;
+                    }
+
+                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+                    bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+                        recordingContext, cmd->offset, cmd->size);
+
+                    if (!clearedToZero) {
+                        dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+                        device->fn.CmdFillBuffer(recordingContext->commandBuffer,
+                                                 dstBuffer->GetHandle(), cmd->offset, cmd->size,
+                                                 0u);
+                    }
+
+                    break;
+                }
+
+                case Command::BeginRenderPass: {
+                    BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+
+                    PrepareResourcesForRenderPass(
+                        device, recordingContext,
+                        GetResourceUsages().renderPasses[nextRenderPassNumber]);
+
+                    LazyClearRenderPassAttachments(cmd);
+                    DAWN_TRY(RecordRenderPass(recordingContext, cmd));
+
+                    nextRenderPassNumber++;
+                    break;
+                }
+
+                case Command::BeginComputePass: {
+                    mCommands.NextCommand<BeginComputePassCmd>();
+
+                    DAWN_TRY(RecordComputePass(
+                        recordingContext,
+                        GetResourceUsages().computePasses[nextComputePassNumber]));
+
+                    nextComputePassNumber++;
+                    break;
+                }
+
+                case Command::ResolveQuerySet: {
+                    ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                    Buffer* destination = ToBackend(cmd->destination.Get());
+
+                    destination->EnsureDataInitializedAsDestination(
+                        recordingContext, cmd->destinationOffset,
+                        cmd->queryCount * sizeof(uint64_t));
+
+                    // vkCmdCopyQueryPoolResults only can retrieve available queries because
+                    // VK_QUERY_RESULT_WAIT_BIT is set. In order to resolve the unavailable queries
+                    // as 0s, we need to clear the resolving region of the destination buffer to 0s.
+                    auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
+                    auto endIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery +
+                                 cmd->queryCount;
+                    bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+                    if (hasUnavailableQueries) {
+                        destination->TransitionUsageNow(recordingContext,
+                                                        wgpu::BufferUsage::CopyDst);
+                        device->fn.CmdFillBuffer(commands, destination->GetHandle(),
+                                                 cmd->destinationOffset,
+                                                 cmd->queryCount * sizeof(uint64_t), 0u);
+                    }
+
+                    destination->TransitionUsageNow(recordingContext,
+                                                    wgpu::BufferUsage::QueryResolve);
+
+                    RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
+                                             cmd->queryCount, destination, cmd->destinationOffset);
+
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                    // The query must be reset between uses.
+                    device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+                                                 cmd->queryIndex, 1);
+
+                    RecordWriteTimestampCmd(recordingContext, device, cmd);
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                        const char* label = mCommands.NextData<char>(cmd->length + 1);
+                        VkDebugUtilsLabelEXT utilsLabel;
+                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                        utilsLabel.pNext = nullptr;
+                        utilsLabel.pLabelName = label;
+                        // Default color to black
+                        utilsLabel.color[0] = 0.0;
+                        utilsLabel.color[1] = 0.0;
+                        utilsLabel.color[2] = 0.0;
+                        utilsLabel.color[3] = 1.0;
+                        device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+                    } else {
+                        SkipCommand(&mCommands, Command::InsertDebugMarker);
+                    }
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        mCommands.NextCommand<PopDebugGroupCmd>();
+                        device->fn.CmdEndDebugUtilsLabelEXT(commands);
+                    } else {
+                        SkipCommand(&mCommands, Command::PopDebugGroup);
+                    }
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                        const char* label = mCommands.NextData<char>(cmd->length + 1);
+                        VkDebugUtilsLabelEXT utilsLabel;
+                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                        utilsLabel.pNext = nullptr;
+                        utilsLabel.pLabelName = label;
+                        // Default color to black
+                        utilsLabel.color[0] = 0.0;
+                        utilsLabel.color[1] = 0.0;
+                        utilsLabel.color[2] = 0.0;
+                        utilsLabel.color[3] = 1.0;
+                        device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+                    } else {
+                        SkipCommand(&mCommands, Command::PushDebugGroup);
+                    }
+                    break;
+                }
+
+                case Command::WriteBuffer: {
+                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                    const uint64_t offset = write->offset;
+                    const uint64_t size = write->size;
+                    if (size == 0) {
+                        continue;
+                    }
+
+                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                    uint8_t* data = mCommands.NextData<uint8_t>(size);
+                    Device* device = ToBackend(GetDevice());
+
+                    UploadHandle uploadHandle;
+                    DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                                      size, device->GetPendingCommandSerial(),
+                                                      kCopyBufferToBufferOffsetAlignment));
+                    ASSERT(uploadHandle.mappedBuffer != nullptr);
+                    memcpy(uploadHandle.mappedBuffer, data, size);
+
+                    dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
+
+                    dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+                    VkBufferCopy copy;
+                    copy.srcOffset = uploadHandle.startOffset;
+                    copy.dstOffset = offset;
+                    copy.size = size;
+
+                    device->fn.CmdCopyBuffer(
+                        commands, ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
+                        dstBuffer->GetHandle(), 1, &copy);
+                    break;
+                }
+
+                default:
+                    break;
+            }
+        }
+
+        return {};
+    }
+
+    MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
+                                                const ComputePassResourceUsage& resourceUsages) {
+        Device* device = ToBackend(GetDevice());
+        VkCommandBuffer commands = recordingContext->commandBuffer;
+
+        uint64_t currentDispatch = 0;
+        DescriptorSetTracker descriptorSets = {};
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::EndComputePass: {
+                    mCommands.NextCommand<EndComputePassCmd>();
+                    return {};
+                }
+
+                case Command::Dispatch: {
+                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+                    TransitionAndClearForSyncScope(device, recordingContext,
+                                                   resourceUsages.dispatchUsages[currentDispatch]);
+                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
+                    device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
+                    currentDispatch++;
+                    break;
+                }
+
+                case Command::DispatchIndirect: {
+                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+                    VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
+
+                    TransitionAndClearForSyncScope(device, recordingContext,
+                                                   resourceUsages.dispatchUsages[currentDispatch]);
+                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
+                    device->fn.CmdDispatchIndirect(
+                        commands, indirectBuffer,
+                        static_cast<VkDeviceSize>(dispatch->indirectOffset));
+                    currentDispatch++;
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+
+                    BindGroup* bindGroup = ToBackend(cmd->group.Get());
+                    uint32_t* dynamicOffsets = nullptr;
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+
+                    descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+                                                  dynamicOffsets);
+                    break;
+                }
+
+                case Command::SetComputePipeline: {
+                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                    ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                    device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
+                                               pipeline->GetHandle());
+                    descriptorSets.OnSetPipeline(pipeline);
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                        const char* label = mCommands.NextData<char>(cmd->length + 1);
+                        VkDebugUtilsLabelEXT utilsLabel;
+                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                        utilsLabel.pNext = nullptr;
+                        utilsLabel.pLabelName = label;
+                        // Default color to black
+                        utilsLabel.color[0] = 0.0;
+                        utilsLabel.color[1] = 0.0;
+                        utilsLabel.color[2] = 0.0;
+                        utilsLabel.color[3] = 1.0;
+                        device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+                    } else {
+                        SkipCommand(&mCommands, Command::InsertDebugMarker);
+                    }
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        mCommands.NextCommand<PopDebugGroupCmd>();
+                        device->fn.CmdEndDebugUtilsLabelEXT(commands);
+                    } else {
+                        SkipCommand(&mCommands, Command::PopDebugGroup);
+                    }
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                        const char* label = mCommands.NextData<char>(cmd->length + 1);
+                        VkDebugUtilsLabelEXT utilsLabel;
+                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                        utilsLabel.pNext = nullptr;
+                        utilsLabel.pLabelName = label;
+                        // Default color to black
+                        utilsLabel.color[0] = 0.0;
+                        utilsLabel.color[1] = 0.0;
+                        utilsLabel.color[2] = 0.0;
+                        utilsLabel.color[3] = 1.0;
+                        device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+                    } else {
+                        SkipCommand(&mCommands, Command::PushDebugGroup);
+                    }
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                    // The query must be reset between uses.
+                    device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+                                                 cmd->queryIndex, 1);
+
+                    RecordWriteTimestampCmd(recordingContext, device, cmd);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        // EndComputePass should have been called
+        UNREACHABLE();
+    }
+
+    MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
+                                               BeginRenderPassCmd* renderPassCmd) {
+        Device* device = ToBackend(GetDevice());
+        VkCommandBuffer commands = recordingContext->commandBuffer;
+
+        DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
+
+        // Set the default value for the dynamic state
+        {
+            device->fn.CmdSetLineWidth(commands, 1.0f);
+            device->fn.CmdSetDepthBounds(commands, 0.0f, 1.0f);
+
+            device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK, 0);
+
+            float blendConstants[4] = {
+                0.0f,
+                0.0f,
+                0.0f,
+                0.0f,
+            };
+            device->fn.CmdSetBlendConstants(commands, blendConstants);
+
+            // The viewport and scissor default to cover all of the attachments
+            VkViewport viewport;
+            viewport.x = 0.0f;
+            viewport.y = static_cast<float>(renderPassCmd->height);
+            viewport.width = static_cast<float>(renderPassCmd->width);
+            viewport.height = -static_cast<float>(renderPassCmd->height);
+            viewport.minDepth = 0.0f;
+            viewport.maxDepth = 1.0f;
+            device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+
+            VkRect2D scissorRect;
+            scissorRect.offset.x = 0;
+            scissorRect.offset.y = 0;
+            scissorRect.extent.width = renderPassCmd->width;
+            scissorRect.extent.height = renderPassCmd->height;
+            device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
+        }
+
+        DescriptorSetTracker descriptorSets = {};
+        RenderPipeline* lastPipeline = nullptr;
+
+        auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+            switch (type) {
+                case Command::Draw: {
+                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                    device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
+                                       draw->firstVertex, draw->firstInstance);
+                    break;
+                }
+
+                case Command::DrawIndexed: {
+                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                    device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
+                                              draw->firstIndex, draw->baseVertex,
+                                              draw->firstInstance);
+                    break;
+                }
+
+                case Command::DrawIndirect: {
+                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+
+                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                    device->fn.CmdDrawIndirect(commands, buffer->GetHandle(),
+                                               static_cast<VkDeviceSize>(draw->indirectOffset), 1,
+                                               0);
+                    break;
+                }
+
+                case Command::DrawIndexedIndirect: {
+                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                    ASSERT(buffer != nullptr);
+
+                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                    device->fn.CmdDrawIndexedIndirect(
+                        commands, buffer->GetHandle(),
+                        static_cast<VkDeviceSize>(draw->indirectOffset), 1, 0);
+                    break;
+                }
+
+                case Command::InsertDebugMarker: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+                        const char* label = iter->NextData<char>(cmd->length + 1);
+                        VkDebugUtilsLabelEXT utilsLabel;
+                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                        utilsLabel.pNext = nullptr;
+                        utilsLabel.pLabelName = label;
+                        // Default color to black
+                        utilsLabel.color[0] = 0.0;
+                        utilsLabel.color[1] = 0.0;
+                        utilsLabel.color[2] = 0.0;
+                        utilsLabel.color[3] = 1.0;
+                        device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+                    } else {
+                        SkipCommand(iter, Command::InsertDebugMarker);
+                    }
+                    break;
+                }
+
+                case Command::PopDebugGroup: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        iter->NextCommand<PopDebugGroupCmd>();
+                        device->fn.CmdEndDebugUtilsLabelEXT(commands);
+                    } else {
+                        SkipCommand(iter, Command::PopDebugGroup);
+                    }
+                    break;
+                }
+
+                case Command::PushDebugGroup: {
+                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                        PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+                        const char* label = iter->NextData<char>(cmd->length + 1);
+                        VkDebugUtilsLabelEXT utilsLabel;
+                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                        utilsLabel.pNext = nullptr;
+                        utilsLabel.pLabelName = label;
+                        // Default color to black
+                        utilsLabel.color[0] = 0.0;
+                        utilsLabel.color[1] = 0.0;
+                        utilsLabel.color[2] = 0.0;
+                        utilsLabel.color[3] = 1.0;
+                        device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+                    } else {
+                        SkipCommand(iter, Command::PushDebugGroup);
+                    }
+                    break;
+                }
+
+                case Command::SetBindGroup: {
+                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                    BindGroup* bindGroup = ToBackend(cmd->group.Get());
+                    uint32_t* dynamicOffsets = nullptr;
+                    if (cmd->dynamicOffsetCount > 0) {
+                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                    }
+
+                    descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+                                                  dynamicOffsets);
+                    break;
+                }
+
+                case Command::SetIndexBuffer: {
+                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+                    VkBuffer indexBuffer = ToBackend(cmd->buffer)->GetHandle();
+
+                    device->fn.CmdBindIndexBuffer(commands, indexBuffer, cmd->offset,
+                                                  VulkanIndexType(cmd->format));
+                    break;
+                }
+
+                case Command::SetRenderPipeline: {
+                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                    RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                    device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_GRAPHICS,
+                                               pipeline->GetHandle());
+                    lastPipeline = pipeline;
+
+                    descriptorSets.OnSetPipeline(pipeline);
+                    break;
+                }
+
+                case Command::SetVertexBuffer: {
+                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+                    VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
+                    VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
+
+                    device->fn.CmdBindVertexBuffers(commands, static_cast<uint8_t>(cmd->slot), 1,
+                                                    &*buffer, &offset);
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+        };
+
+        Command type;
+        while (mCommands.NextCommandId(&type)) {
+            switch (type) {
+                case Command::EndRenderPass: {
+                    mCommands.NextCommand<EndRenderPassCmd>();
+                    device->fn.CmdEndRenderPass(commands);
+                    return {};
+                }
+
+                case Command::SetBlendConstant: {
+                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                    const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
+                    device->fn.CmdSetBlendConstants(commands, blendConstants.data());
+                    break;
+                }
+
+                case Command::SetStencilReference: {
+                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+                    device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
+                                                      cmd->reference);
+                    break;
+                }
+
+                case Command::SetViewport: {
+                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                    VkViewport viewport;
+                    viewport.x = cmd->x;
+                    viewport.y = cmd->y + cmd->height;
+                    viewport.width = cmd->width;
+                    viewport.height = -cmd->height;
+                    viewport.minDepth = cmd->minDepth;
+                    viewport.maxDepth = cmd->maxDepth;
+
+                    // Vulkan disallows width = 0, but VK_KHR_maintenance1 which we require allows
+                    // height = 0 so use that to do an empty viewport.
+                    if (viewport.width == 0) {
+                        viewport.height = 0;
+
+                        // Set the viewport x range to a range that's always valid.
+                        viewport.x = 0;
+                        viewport.width = 1;
+                    }
+
+                    device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+                    break;
+                }
+
+                case Command::SetScissorRect: {
+                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                    VkRect2D rect;
+                    rect.offset.x = cmd->x;
+                    rect.offset.y = cmd->y;
+                    rect.extent.width = cmd->width;
+                    rect.extent.height = cmd->height;
+
+                    device->fn.CmdSetScissor(commands, 0, 1, &rect);
+                    break;
+                }
+
+                case Command::ExecuteBundles: {
+                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                    for (uint32_t i = 0; i < cmd->count; ++i) {
+                        CommandIterator* iter = bundles[i]->GetCommands();
+                        iter->Reset();
+                        while (iter->NextCommandId(&type)) {
+                            EncodeRenderBundleCommand(iter, type);
+                        }
+                    }
+                    break;
+                }
+
+                case Command::BeginOcclusionQuery: {
+                    BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+
+                    device->fn.CmdBeginQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+                                             cmd->queryIndex, 0);
+                    break;
+                }
+
+                case Command::EndOcclusionQuery: {
+                    EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+
+                    device->fn.CmdEndQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+                                           cmd->queryIndex);
+                    break;
+                }
+
+                case Command::WriteTimestamp: {
+                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                    RecordWriteTimestampCmd(recordingContext, device, cmd);
+                    break;
+                }
+
+                default: {
+                    EncodeRenderBundleCommand(&mCommands, type);
+                    break;
+                }
+            }
+        }
+
+        // EndRenderPass should have been called
+        UNREACHABLE();
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/CommandBufferVk.h b/src/dawn/native/vulkan/CommandBufferVk.h
new file mode 100644
index 0000000..d329a41
--- /dev/null
+++ b/src/dawn/native/vulkan/CommandBufferVk.h
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
+#define DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Error.h"
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native {
+    struct BeginRenderPassCmd;
+    struct TextureCopy;
+}  // namespace dawn::native
+
+namespace dawn::native::vulkan {
+
+    struct CommandRecordingContext;
+    class Device;
+
+    class CommandBuffer final : public CommandBufferBase {
+      public:
+        static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+                                         const CommandBufferDescriptor* descriptor);
+
+        MaybeError RecordCommands(CommandRecordingContext* recordingContext);
+
+      private:
+        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+        MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
+                                     const ComputePassResourceUsage& resourceUsages);
+        MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
+                                    BeginRenderPassCmd* renderPass);
+        MaybeError RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+                                                      const TextureCopy& srcCopy,
+                                                      const TextureCopy& dstCopy,
+                                                      const Extent3D& copySize);
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
diff --git a/src/dawn/native/vulkan/CommandRecordingContext.h b/src/dawn/native/vulkan/CommandRecordingContext.h
new file mode 100644
index 0000000..44f1c90
--- /dev/null
+++ b/src/dawn/native/vulkan/CommandRecordingContext.h
@@ -0,0 +1,40 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+#include "dawn/native/vulkan/BufferVk.h"
+
+namespace dawn::native::vulkan {
+    // Used to track operations that are handled after recording.
+    // Currently only tracks semaphores, but may be used to do barrier coalescing in the future.
+    struct CommandRecordingContext {
+        VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
+        std::vector<VkSemaphore> waitSemaphores = {};
+        std::vector<VkSemaphore> signalSemaphores = {};
+
+        // The internal buffers used in the workaround of texture-to-texture copies with compressed
+        // formats.
+        std::vector<Ref<Buffer>> tempBuffers;
+
+        // For Device state tracking only.
+        VkCommandPool commandPool = VK_NULL_HANDLE;
+        bool used = false;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
diff --git a/src/dawn/native/vulkan/ComputePipelineVk.cpp b/src/dawn/native/vulkan/ComputePipelineVk.cpp
new file mode 100644
index 0000000..fa13e26
--- /dev/null
+++ b/src/dawn/native/vulkan/ComputePipelineVk.cpp
@@ -0,0 +1,116 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ComputePipelineVk.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    // static
+    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+        Device* device,
+        const ComputePipelineDescriptor* descriptor) {
+        return AcquireRef(new ComputePipeline(device, descriptor));
+    }
+
+    MaybeError ComputePipeline::Initialize() {
+        VkComputePipelineCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.layout = ToBackend(GetLayout())->GetHandle();
+        createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
+        createInfo.basePipelineIndex = -1;
+
+        createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+        createInfo.stage.pNext = nullptr;
+        createInfo.stage.flags = 0;
+        createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
+        // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
+        const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+        DAWN_TRY_ASSIGN(createInfo.stage.module,
+                        ToBackend(computeStage.module.Get())
+                            ->GetTransformedModuleHandle(computeStage.entryPoint.c_str(),
+                                                         ToBackend(GetLayout())));
+
+        createInfo.stage.pName = computeStage.entryPoint.c_str();
+
+        std::vector<OverridableConstantScalar> specializationDataEntries;
+        std::vector<VkSpecializationMapEntry> specializationMapEntries;
+        VkSpecializationInfo specializationInfo{};
+        createInfo.stage.pSpecializationInfo =
+            GetVkSpecializationInfo(computeStage, &specializationInfo, &specializationDataEntries,
+                                    &specializationMapEntries);
+
+        Device* device = ToBackend(GetDevice());
+
+        PNextChainBuilder stageExtChain(&createInfo.stage);
+
+        VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
+        uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
+        if (computeSubgroupSize != 0u) {
+            ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
+            subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
+            stageExtChain.Add(
+                &subgroupSizeInfo,
+                VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
+        }
+
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
+                                              &createInfo, nullptr, &*mHandle),
+            "CreateComputePipeline"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    void ComputePipeline::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_ComputePipeline", GetLabel());
+    }
+
+    ComputePipeline::~ComputePipeline() = default;
+
+    void ComputePipeline::DestroyImpl() {
+        ComputePipelineBase::DestroyImpl();
+
+        if (mHandle != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            mHandle = VK_NULL_HANDLE;
+        }
+    }
+
+    VkPipeline ComputePipeline::GetHandle() const {
+        return mHandle;
+    }
+
+    void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                          WGPUCreateComputePipelineAsyncCallback callback,
+                                          void* userdata) {
+        std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+            std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+                                                             userdata);
+        CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ComputePipelineVk.h b/src/dawn/native/vulkan/ComputePipelineVk.h
new file mode 100644
index 0000000..ef4aeff
--- /dev/null
+++ b/src/dawn/native/vulkan/ComputePipelineVk.h
@@ -0,0 +1,53 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
+#define DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class ComputePipeline final : public ComputePipelineBase {
+      public:
+        static Ref<ComputePipeline> CreateUninitialized(
+            Device* device,
+            const ComputePipelineDescriptor* descriptor);
+        static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                    void* userdata);
+
+        VkPipeline GetHandle() const;
+
+        MaybeError Initialize() override;
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+      private:
+        ~ComputePipeline() override;
+        void DestroyImpl() override;
+        using ComputePipelineBase::ComputePipelineBase;
+
+        VkPipeline mHandle = VK_NULL_HANDLE;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
diff --git a/src/dawn/native/vulkan/DescriptorSetAllocation.h b/src/dawn/native/vulkan/DescriptorSetAllocation.h
new file mode 100644
index 0000000..ef72e86
--- /dev/null
+++ b/src/dawn/native/vulkan/DescriptorSetAllocation.h
@@ -0,0 +1,31 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
+#define DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native::vulkan {
+
+    // Contains a descriptor set along with data necessary to track its allocation.
+    struct DescriptorSetAllocation {
+        VkDescriptorSet set = VK_NULL_HANDLE;
+        uint32_t poolIndex;
+        uint16_t setIndex;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
diff --git a/src/dawn/native/vulkan/DescriptorSetAllocator.cpp b/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
new file mode 100644
index 0000000..0f89d61
--- /dev/null
+++ b/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
@@ -0,0 +1,188 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DescriptorSetAllocator.h"
+
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    // TODO(enga): Figure out this value.
+    static constexpr uint32_t kMaxDescriptorsPerPool = 512;
+
+    // static
+    Ref<DescriptorSetAllocator> DescriptorSetAllocator::Create(
+        BindGroupLayout* layout,
+        std::map<VkDescriptorType, uint32_t> descriptorCountPerType) {
+        return AcquireRef(new DescriptorSetAllocator(layout, descriptorCountPerType));
+    }
+
+    DescriptorSetAllocator::DescriptorSetAllocator(
+        BindGroupLayout* layout,
+        std::map<VkDescriptorType, uint32_t> descriptorCountPerType)
+        : ObjectBase(layout->GetDevice()), mLayout(layout) {
+        ASSERT(layout != nullptr);
+
+        // Compute the total number of descriptors for this layout.
+        uint32_t totalDescriptorCount = 0;
+        mPoolSizes.reserve(descriptorCountPerType.size());
+        for (const auto& [type, count] : descriptorCountPerType) {
+            ASSERT(count > 0);
+            totalDescriptorCount += count;
+            mPoolSizes.push_back(VkDescriptorPoolSize{type, count});
+        }
+
+        if (totalDescriptorCount == 0) {
+            // Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
+            // number of pools, each of which has non-zero descriptor counts.
+            // Since the descriptor set layout is empty, we should be able to allocate
+            // |kMaxDescriptorsPerPool| sets from this 1-sized descriptor pool.
+            // The type of this descriptor pool doesn't matter because it is never used.
+            mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
+            mMaxSets = kMaxDescriptorsPerPool;
+        } else {
+            ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
+            static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool);
+
+            // Compute the total number of descriptors sets that fits given the max.
+            mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
+            ASSERT(mMaxSets > 0);
+
+            // Grow the number of desciptors in the pool to fit the computed |mMaxSets|.
+            for (auto& poolSize : mPoolSizes) {
+                poolSize.descriptorCount *= mMaxSets;
+            }
+        }
+    }
+
+    DescriptorSetAllocator::~DescriptorSetAllocator() {
+        for (auto& pool : mDescriptorPools) {
+            ASSERT(pool.freeSetIndices.size() == mMaxSets);
+            if (pool.vkPool != VK_NULL_HANDLE) {
+                Device* device = ToBackend(GetDevice());
+                device->GetFencedDeleter()->DeleteWhenUnused(pool.vkPool);
+            }
+        }
+    }
+
+    ResultOrError<DescriptorSetAllocation> DescriptorSetAllocator::Allocate() {
+        if (mAvailableDescriptorPoolIndices.empty()) {
+            DAWN_TRY(AllocateDescriptorPool());
+        }
+
+        ASSERT(!mAvailableDescriptorPoolIndices.empty());
+
+        const PoolIndex poolIndex = mAvailableDescriptorPoolIndices.back();
+        DescriptorPool* pool = &mDescriptorPools[poolIndex];
+
+        ASSERT(!pool->freeSetIndices.empty());
+
+        SetIndex setIndex = pool->freeSetIndices.back();
+        pool->freeSetIndices.pop_back();
+
+        if (pool->freeSetIndices.empty()) {
+            mAvailableDescriptorPoolIndices.pop_back();
+        }
+
+        return DescriptorSetAllocation{pool->sets[setIndex], poolIndex, setIndex};
+    }
+
+    void DescriptorSetAllocator::Deallocate(DescriptorSetAllocation* allocationInfo) {
+        ASSERT(allocationInfo != nullptr);
+        ASSERT(allocationInfo->set != VK_NULL_HANDLE);
+
+        // We can't reuse the descriptor set right away because the Vulkan spec says in the
+        // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
+        // host execution of the command and the end of the draw/dispatch.
+        Device* device = ToBackend(GetDevice());
+        const ExecutionSerial serial = device->GetPendingCommandSerial();
+        mPendingDeallocations.Enqueue({allocationInfo->poolIndex, allocationInfo->setIndex},
+                                      serial);
+
+        if (mLastDeallocationSerial != serial) {
+            device->EnqueueDeferredDeallocation(this);
+            mLastDeallocationSerial = serial;
+        }
+
+        // Clear the content of allocation so that use after frees are more visible.
+        *allocationInfo = {};
+    }
+
+    void DescriptorSetAllocator::FinishDeallocation(ExecutionSerial completedSerial) {
+        for (const Deallocation& dealloc : mPendingDeallocations.IterateUpTo(completedSerial)) {
+            ASSERT(dealloc.poolIndex < mDescriptorPools.size());
+
+            auto& freeSetIndices = mDescriptorPools[dealloc.poolIndex].freeSetIndices;
+            if (freeSetIndices.empty()) {
+                mAvailableDescriptorPoolIndices.emplace_back(dealloc.poolIndex);
+            }
+            freeSetIndices.emplace_back(dealloc.setIndex);
+        }
+        mPendingDeallocations.ClearUpTo(completedSerial);
+    }
+
+    MaybeError DescriptorSetAllocator::AllocateDescriptorPool() {
+        VkDescriptorPoolCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.maxSets = mMaxSets;
+        createInfo.poolSizeCount = static_cast<PoolIndex>(mPoolSizes.size());
+        createInfo.pPoolSizes = mPoolSizes.data();
+
+        Device* device = ToBackend(GetDevice());
+
+        VkDescriptorPool descriptorPool;
+        DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
+                                                                nullptr, &*descriptorPool),
+                                "CreateDescriptorPool"));
+
+        std::vector<VkDescriptorSetLayout> layouts(mMaxSets, mLayout->GetHandle());
+
+        VkDescriptorSetAllocateInfo allocateInfo;
+        allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+        allocateInfo.pNext = nullptr;
+        allocateInfo.descriptorPool = descriptorPool;
+        allocateInfo.descriptorSetCount = mMaxSets;
+        allocateInfo.pSetLayouts = AsVkArray(layouts.data());
+
+        std::vector<VkDescriptorSet> sets(mMaxSets);
+        MaybeError result =
+            CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
+                                                             AsVkArray(sets.data())),
+                           "AllocateDescriptorSets");
+        if (result.IsError()) {
+            // On an error we can destroy the pool immediately because no command references it.
+            device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
+            DAWN_TRY(std::move(result));
+        }
+
+        std::vector<SetIndex> freeSetIndices;
+        freeSetIndices.reserve(mMaxSets);
+
+        for (SetIndex i = 0; i < mMaxSets; ++i) {
+            freeSetIndices.push_back(i);
+        }
+
+        mAvailableDescriptorPoolIndices.push_back(mDescriptorPools.size());
+        mDescriptorPools.emplace_back(
+            DescriptorPool{descriptorPool, std::move(sets), std::move(freeSetIndices)});
+
+        return {};
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/DescriptorSetAllocator.h b/src/dawn/native/vulkan/DescriptorSetAllocator.h
new file mode 100644
index 0000000..b6cd495
--- /dev/null
+++ b/src/dawn/native/vulkan/DescriptorSetAllocator.h
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
+#define DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/vulkan/DescriptorSetAllocation.h"
+
+#include <map>
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+    class BindGroupLayout;
+
+    class DescriptorSetAllocator : public ObjectBase {
+        using PoolIndex = uint32_t;
+        using SetIndex = uint16_t;
+
+      public:
+        static Ref<DescriptorSetAllocator> Create(
+            BindGroupLayout* layout,
+            std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+
+        ResultOrError<DescriptorSetAllocation> Allocate();
+        void Deallocate(DescriptorSetAllocation* allocationInfo);
+        void FinishDeallocation(ExecutionSerial completedSerial);
+
+      private:
+        DescriptorSetAllocator(BindGroupLayout* layout,
+                               std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+        ~DescriptorSetAllocator();
+
+        MaybeError AllocateDescriptorPool();
+
+        BindGroupLayout* mLayout;
+
+        std::vector<VkDescriptorPoolSize> mPoolSizes;
+        SetIndex mMaxSets;
+
+        struct DescriptorPool {
+            VkDescriptorPool vkPool;
+            std::vector<VkDescriptorSet> sets;
+            std::vector<SetIndex> freeSetIndices;
+        };
+
+        std::vector<PoolIndex> mAvailableDescriptorPoolIndices;
+        std::vector<DescriptorPool> mDescriptorPools;
+
+        struct Deallocation {
+            PoolIndex poolIndex;
+            SetIndex setIndex;
+        };
+        SerialQueue<ExecutionSerial, Deallocation> mPendingDeallocations;
+        ExecutionSerial mLastDeallocationSerial = ExecutionSerial(0);
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
diff --git a/src/dawn/native/vulkan/DeviceVk.cpp b/src/dawn/native/vulkan/DeviceVk.cpp
new file mode 100644
index 0000000..7660f4c
--- /dev/null
+++ b/src/dawn/native/vulkan/DeviceVk.cpp
@@ -0,0 +1,1053 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DeviceVk.h"
+
+#include "dawn/common/Platform.h"
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/BindGroupVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/CommandBufferVk.h"
+#include "dawn/native/vulkan/ComputePipelineVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/QuerySetVk.h"
+#include "dawn/native/vulkan/QueueVk.h"
+#include "dawn/native/vulkan/RenderPassCache.h"
+#include "dawn/native/vulkan/RenderPipelineVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/SamplerVk.h"
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/SwapChainVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    // static
+    ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
+                                              const DeviceDescriptor* descriptor) {
+        Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+        DAWN_TRY(device->Initialize());
+        return device;
+    }
+
+    Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
+        : DeviceBase(adapter, descriptor) {
+        InitTogglesFromDriver();
+    }
+
+    MaybeError Device::Initialize() {
+        // Copy the adapter's device info to the device so that we can change the "knobs"
+        mDeviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+
+        // Initialize the "instance" procs of our local function table.
+        VulkanFunctions* functions = GetMutableFunctions();
+        *functions = ToBackend(GetAdapter())->GetVulkanInstance()->GetFunctions();
+
+        // Two things are crucial if device initialization fails: the function pointers to destroy
+        // objects, and the fence deleter that calls these functions. Do not do anything before
+        // these two are set up, so that a failed initialization doesn't cause a crash in
+        // DestroyImpl()
+        {
+            VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
+
+            VulkanDeviceKnobs usedDeviceKnobs = {};
+            DAWN_TRY_ASSIGN(usedDeviceKnobs, CreateDevice(physicalDevice));
+            *static_cast<VulkanDeviceKnobs*>(&mDeviceInfo) = usedDeviceKnobs;
+
+            DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
+
+            // The queue can be loaded before the fenced deleter because their lifetime is tied to
+            // the device.
+            GatherQueueFromDevice();
+
+            mDeleter = std::make_unique<FencedDeleter>(this);
+        }
+
+        mRenderPassCache = std::make_unique<RenderPassCache>(this);
+        mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
+
+        mExternalMemoryService = std::make_unique<external_memory::Service>(this);
+        mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
+
+        DAWN_TRY(PrepareRecordingContext());
+
+        // The environment can request to various options for depth-stencil formats that could be
+        // unavailable. Override the decision if it is not applicable.
+        ApplyDepthStencilFormatToggles();
+
+        // The environment can only request to use VK_KHR_zero_initialize_workgroup_memory when the
+        // extension is available. Override the decision if it is no applicable.
+        ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
+
+        return DeviceBase::Initialize(Queue::Create(this));
+    }
+
+    Device::~Device() {
+        Destroy();
+    }
+
+    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) {
+        return BindGroup::Create(this, descriptor);
+    }
+    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) {
+        return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+    }
+    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+        return Buffer::Create(this, descriptor);
+    }
+    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) {
+        return CommandBuffer::Create(encoder, descriptor);
+    }
+    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) {
+        return ComputePipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) {
+        return PipelineLayout::Create(this, descriptor);
+    }
+    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) {
+        return QuerySet::Create(this, descriptor);
+    }
+    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) {
+        return RenderPipeline::CreateUninitialized(this, descriptor);
+    }
+    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+        return Sampler::Create(this, descriptor);
+    }
+    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) {
+        return ShaderModule::Create(this, descriptor, parseResult);
+    }
+    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) {
+        return OldSwapChain::Create(this, descriptor);
+    }
+    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) {
+        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+    }
+    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+        return Texture::Create(this, descriptor);
+    }
+    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) {
+        return TextureView::Create(texture, descriptor);
+    }
+    void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                                    void* userdata) {
+        ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+    }
+    void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                                   WGPUCreateRenderPipelineAsyncCallback callback,
+                                                   void* userdata) {
+        RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+    }
+
+    MaybeError Device::TickImpl() {
+        RecycleCompletedCommands();
+
+        ExecutionSerial completedSerial = GetCompletedCommandSerial();
+
+        for (Ref<DescriptorSetAllocator>& allocator :
+             mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+            allocator->FinishDeallocation(completedSerial);
+        }
+
+        mResourceMemoryAllocator->Tick(completedSerial);
+        mDeleter->Tick(completedSerial);
+        mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+
+        if (mRecordingContext.used) {
+            DAWN_TRY(SubmitPendingCommands());
+        }
+
+        return {};
+    }
+
+    VkInstance Device::GetVkInstance() const {
+        return ToBackend(GetAdapter())->GetVulkanInstance()->GetVkInstance();
+    }
+    const VulkanDeviceInfo& Device::GetDeviceInfo() const {
+        return mDeviceInfo;
+    }
+
+    const VulkanGlobalInfo& Device::GetGlobalInfo() const {
+        return ToBackend(GetAdapter())->GetVulkanInstance()->GetGlobalInfo();
+    }
+
+    VkDevice Device::GetVkDevice() const {
+        return mVkDevice;
+    }
+
+    uint32_t Device::GetGraphicsQueueFamily() const {
+        return mQueueFamily;
+    }
+
+    VkQueue Device::GetQueue() const {
+        return mQueue;
+    }
+
+    FencedDeleter* Device::GetFencedDeleter() const {
+        return mDeleter.get();
+    }
+
+    RenderPassCache* Device::GetRenderPassCache() const {
+        return mRenderPassCache.get();
+    }
+
+    ResourceMemoryAllocator* Device::GetResourceMemoryAllocator() const {
+        return mResourceMemoryAllocator.get();
+    }
+
+    void Device::EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator) {
+        mDescriptorAllocatorsPendingDeallocation.Enqueue(allocator, GetPendingCommandSerial());
+    }
+
+    CommandRecordingContext* Device::GetPendingRecordingContext() {
+        ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
+        mRecordingContext.used = true;
+        return &mRecordingContext;
+    }
+
+    MaybeError Device::SubmitPendingCommands() {
+        if (!mRecordingContext.used) {
+            return {};
+        }
+
+        DAWN_TRY(CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer),
+                                "vkEndCommandBuffer"));
+
+        std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
+                                                        VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+
+        VkSubmitInfo submitInfo;
+        submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+        submitInfo.pNext = nullptr;
+        submitInfo.waitSemaphoreCount =
+            static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
+        submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
+        submitInfo.pWaitDstStageMask = dstStageMasks.data();
+        submitInfo.commandBufferCount = 1;
+        submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
+        submitInfo.signalSemaphoreCount =
+            static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
+        submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
+
+        VkFence fence = VK_NULL_HANDLE;
+        DAWN_TRY_ASSIGN(fence, GetUnusedFence());
+        DAWN_TRY_WITH_CLEANUP(
+            CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"), {
+                // If submitting to the queue fails, move the fence back into the unused fence
+                // list, as if it were never acquired. Not doing so would leak the fence since
+                // it would be neither in the unused list nor in the in-flight list.
+                mUnusedFences.push_back(fence);
+            });
+
+        // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
+        // soon as the current submission is finished.
+        for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+            mDeleter->DeleteWhenUnused(semaphore);
+        }
+        for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+            mDeleter->DeleteWhenUnused(semaphore);
+        }
+
+        IncrementLastSubmittedCommandSerial();
+        ExecutionSerial lastSubmittedSerial = GetLastSubmittedCommandSerial();
+        mFencesInFlight.emplace(fence, lastSubmittedSerial);
+
+        CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
+                                                  mRecordingContext.commandBuffer};
+        mCommandsInFlight.Enqueue(submittedCommands, lastSubmittedSerial);
+        mRecordingContext = CommandRecordingContext();
+        DAWN_TRY(PrepareRecordingContext());
+
+        return {};
+    }
+
+    ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
+        VulkanDeviceKnobs usedKnobs = {};
+
+        // Default to asking for all avilable known extensions.
+        usedKnobs.extensions = mDeviceInfo.extensions;
+
+        // However only request the extensions that haven't been promoted in the device's apiVersion
+        std::vector<const char*> extensionNames;
+        for (DeviceExt ext : IterateBitSet(usedKnobs.extensions)) {
+            const DeviceExtInfo& info = GetDeviceExtInfo(ext);
+
+            if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
+                extensionNames.push_back(info.name);
+            }
+        }
+
+        // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
+        // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
+        // promoted as a core API in Vulkan 1.1.
+        //
+        // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
+        // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
+        VkPhysicalDeviceFeatures2 features2 = {};
+        features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+        features2.pNext = nullptr;
+        PNextChainBuilder featuresChain(&features2);
+
+        // Required for core WebGPU features.
+        usedKnobs.features.depthBiasClamp = VK_TRUE;
+        usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
+        usedKnobs.features.fullDrawIndexUint32 = VK_TRUE;
+        usedKnobs.features.imageCubeArray = VK_TRUE;
+        usedKnobs.features.independentBlend = VK_TRUE;
+        usedKnobs.features.sampleRateShading = VK_TRUE;
+
+        if (IsRobustnessEnabled()) {
+            usedKnobs.features.robustBufferAccess = VK_TRUE;
+        }
+
+        if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+            ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
+
+            // Always request all the features from VK_EXT_subgroup_size_control when available.
+            usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
+            featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
+
+            mComputeSubgroupSize = FindComputeSubgroupSize();
+        }
+
+        if (mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
+            ASSERT(usedKnobs.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory));
+
+            usedKnobs.zeroInitializeWorkgroupMemoryFeatures.sType =
+                VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR;
+
+            // Always allow initializing workgroup memory with OpConstantNull when available.
+            // Note that the driver still won't initialize workgroup memory unless the workgroup
+            // variable is explicitly initialized with OpConstantNull.
+            usedKnobs.zeroInitializeWorkgroupMemoryFeatures.shaderZeroInitializeWorkgroupMemory =
+                VK_TRUE;
+            featuresChain.Add(&usedKnobs.zeroInitializeWorkgroupMemoryFeatures);
+        }
+
+        if (mDeviceInfo.features.samplerAnisotropy == VK_TRUE) {
+            usedKnobs.features.samplerAnisotropy = VK_TRUE;
+        }
+
+        if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
+            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC ==
+                   VK_TRUE);
+            usedKnobs.features.textureCompressionBC = VK_TRUE;
+        }
+
+        if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
+            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 ==
+                   VK_TRUE);
+            usedKnobs.features.textureCompressionETC2 = VK_TRUE;
+        }
+
+        if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
+            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
+                   VK_TRUE);
+            usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
+        }
+
+        if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
+            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
+                   VK_TRUE);
+            usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
+        }
+
+        if (IsFeatureEnabled(Feature::ShaderFloat16)) {
+            const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+            ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
+                   deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
+                   deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+                   deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
+                   deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
+
+            usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
+            usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
+            usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
+
+            featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
+                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+            featuresChain.Add(&usedKnobs._16BitStorageFeatures,
+                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+        }
+
+        if (IsFeatureEnabled(Feature::DepthClamping)) {
+            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
+            usedKnobs.features.depthClamp = VK_TRUE;
+        }
+
+        // Find a universal queue family
+        {
+            // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
+            constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
+            int universalQueueFamily = -1;
+            for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
+                if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) ==
+                    kUniversalFlags) {
+                    universalQueueFamily = i;
+                    break;
+                }
+            }
+
+            if (universalQueueFamily == -1) {
+                return DAWN_INTERNAL_ERROR("No universal queue family");
+            }
+            mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
+        }
+
+        // Choose to create a single universal queue
+        std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
+        float zero = 0.0f;
+        {
+            VkDeviceQueueCreateInfo queueCreateInfo;
+            queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+            queueCreateInfo.pNext = nullptr;
+            queueCreateInfo.flags = 0;
+            queueCreateInfo.queueFamilyIndex = static_cast<uint32_t>(mQueueFamily);
+            queueCreateInfo.queueCount = 1;
+            queueCreateInfo.pQueuePriorities = &zero;
+
+            queuesToRequest.push_back(queueCreateInfo);
+        }
+
+        VkDeviceCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
+        createInfo.pQueueCreateInfos = queuesToRequest.data();
+        createInfo.enabledLayerCount = 0;
+        createInfo.ppEnabledLayerNames = nullptr;
+        createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+        createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+        // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
+        // covered by VkPhysicalDeviceFeatures can be enabled.
+        if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
+            features2.features = usedKnobs.features;
+            createInfo.pNext = &features2;
+            createInfo.pEnabledFeatures = nullptr;
+        } else {
+            ASSERT(features2.pNext == nullptr);
+            createInfo.pEnabledFeatures = &usedKnobs.features;
+        }
+
+        DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
+                                "vkCreateDevice"));
+
+        return usedKnobs;
+    }
+
+    uint32_t Device::FindComputeSubgroupSize() const {
+        if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+            return 0;
+        }
+
+        const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
+            mDeviceInfo.subgroupSizeControlProperties;
+
+        if (ext.minSubgroupSize == ext.maxSubgroupSize) {
+            return 0;
+        }
+
+        // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
+        // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
+        // following heuristics, which may need to be adjusted in the future for other
+        // architectures, or if a specific API is added to let client code select the size..
+        //
+        // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
+        uint32_t subgroupSize = ext.minSubgroupSize * 2;
+        if (subgroupSize <= ext.maxSubgroupSize) {
+            return subgroupSize;
+        } else {
+            return ext.minSubgroupSize;
+        }
+    }
+
+    void Device::GatherQueueFromDevice() {
+        fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
+    }
+
+    // Note that this function is called before mDeviceInfo is initialized.
+    void Device::InitTogglesFromDriver() {
+        // TODO(crbug.com/dawn/857): tighten this workaround when this issue is fixed in both
+        // Vulkan SPEC and drivers.
+        SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
+
+        // By default try to use D32S8 for Depth24PlusStencil8
+        SetToggle(Toggle::VulkanUseD32S8, true);
+
+        // By default try to initialize workgroup memory with OpConstantNull according to the Vulkan
+        // extension VK_KHR_zero_initialize_workgroup_memory.
+        SetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, true);
+
+        // By default try to use S8 if available.
+        SetToggle(Toggle::VulkanUseS8, true);
+    }
+
+    void Device::ApplyDepthStencilFormatToggles() {
+        bool supportsD32s8 =
+            ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT);
+        bool supportsD24s8 =
+            ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT);
+        bool supportsS8 = ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_S8_UINT);
+
+        ASSERT(supportsD32s8 || supportsD24s8);
+
+        if (!supportsD24s8) {
+            ForceSetToggle(Toggle::VulkanUseD32S8, true);
+        }
+        if (!supportsD32s8) {
+            ForceSetToggle(Toggle::VulkanUseD32S8, false);
+        }
+        if (!supportsS8) {
+            ForceSetToggle(Toggle::VulkanUseS8, false);
+        }
+    }
+
+    void Device::ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle() {
+        if (!mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
+            ForceSetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, false);
+        }
+    }
+
+    VulkanFunctions* Device::GetMutableFunctions() {
+        return const_cast<VulkanFunctions*>(&fn);
+    }
+
+    ResultOrError<VkFence> Device::GetUnusedFence() {
+        if (!mUnusedFences.empty()) {
+            VkFence fence = mUnusedFences.back();
+            DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
+
+            mUnusedFences.pop_back();
+            return fence;
+        }
+
+        VkFenceCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+
+        VkFence fence = VK_NULL_HANDLE;
+        DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence),
+                                "vkCreateFence"));
+
+        return fence;
+    }
+
+    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+        ExecutionSerial fenceSerial(0);
+        while (!mFencesInFlight.empty()) {
+            VkFence fence = mFencesInFlight.front().first;
+            ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
+            VkResult result = VkResult::WrapUnsafe(
+                INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
+
+            // Fence are added in order, so we can stop searching as soon
+            // as we see one that's not ready.
+            if (result == VK_NOT_READY) {
+                return fenceSerial;
+            } else {
+                DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
+            }
+
+            // Update fenceSerial since fence is ready.
+            fenceSerial = tentativeSerial;
+
+            mUnusedFences.push_back(fence);
+
+            ASSERT(fenceSerial > GetCompletedCommandSerial());
+            mFencesInFlight.pop();
+        }
+        return fenceSerial;
+    }
+
+    MaybeError Device::PrepareRecordingContext() {
+        ASSERT(!mRecordingContext.used);
+        ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
+        ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
+
+        // First try to recycle unused command pools.
+        if (!mUnusedCommands.empty()) {
+            CommandPoolAndBuffer commands = mUnusedCommands.back();
+            mUnusedCommands.pop_back();
+            DAWN_TRY_WITH_CLEANUP(CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0),
+                                                 "vkResetCommandPool"),
+                                  {
+                                      // vkResetCommandPool failed (it may return out-of-memory).
+                                      // Free the commands in the cleanup step before returning to
+                                      // reclaim memory.
+
+                                      // The VkCommandBuffer memory should be wholly owned by the
+                                      // pool and freed when it is destroyed, but that's not the
+                                      // case in some drivers and they leak memory. So we call
+                                      // FreeCommandBuffers before DestroyCommandPool to be safe.
+                                      // TODO(enga): Only do this on a known list of bad drivers.
+                                      fn.FreeCommandBuffers(mVkDevice, commands.pool, 1,
+                                                            &commands.commandBuffer);
+                                      fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+                                  });
+
+            mRecordingContext.commandBuffer = commands.commandBuffer;
+            mRecordingContext.commandPool = commands.pool;
+        } else {
+            // Create a new command pool for our commands and allocate the command buffer.
+            VkCommandPoolCreateInfo createInfo;
+            createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+            createInfo.pNext = nullptr;
+            createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
+            createInfo.queueFamilyIndex = mQueueFamily;
+
+            DAWN_TRY(CheckVkSuccess(fn.CreateCommandPool(mVkDevice, &createInfo, nullptr,
+                                                         &*mRecordingContext.commandPool),
+                                    "vkCreateCommandPool"));
+
+            VkCommandBufferAllocateInfo allocateInfo;
+            allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+            allocateInfo.pNext = nullptr;
+            allocateInfo.commandPool = mRecordingContext.commandPool;
+            allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+            allocateInfo.commandBufferCount = 1;
+
+            DAWN_TRY(CheckVkSuccess(fn.AllocateCommandBuffers(mVkDevice, &allocateInfo,
+                                                              &mRecordingContext.commandBuffer),
+                                    "vkAllocateCommandBuffers"));
+        }
+
+        // Start the recording of commands in the command buffer.
+        VkCommandBufferBeginInfo beginInfo;
+        beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+        beginInfo.pNext = nullptr;
+        beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+        beginInfo.pInheritanceInfo = nullptr;
+
+        return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
+                              "vkBeginCommandBuffer");
+    }
+
+    void Device::RecycleCompletedCommands() {
+        for (auto& commands : mCommandsInFlight.IterateUpTo(GetCompletedCommandSerial())) {
+            mUnusedCommands.push_back(commands);
+        }
+        mCommandsInFlight.ClearUpTo(GetCompletedCommandSerial());
+    }
+
+    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+        std::unique_ptr<StagingBufferBase> stagingBuffer =
+            std::make_unique<StagingBuffer>(size, this);
+        DAWN_TRY(stagingBuffer->Initialize());
+        return std::move(stagingBuffer);
+    }
+
+    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                               uint64_t sourceOffset,
+                                               BufferBase* destination,
+                                               uint64_t destinationOffset,
+                                               uint64_t size) {
+        // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
+        // calling this function.
+        ASSERT(size != 0);
+
+        CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+        ToBackend(destination)
+            ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
+
+        // There is no need of a barrier to make host writes available and visible to the copy
+        // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+        // does an implicit availability, visibility and domain operation.
+
+        // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+        // buffer.
+        ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+        VkBufferCopy copy;
+        copy.srcOffset = sourceOffset;
+        copy.dstOffset = destinationOffset;
+        copy.size = size;
+
+        this->fn.CmdCopyBuffer(recordingContext->commandBuffer,
+                               ToBackend(source)->GetBufferHandle(),
+                               ToBackend(destination)->GetHandle(), 1, &copy);
+
+        return {};
+    }
+
+    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                                const TextureDataLayout& src,
+                                                TextureCopy* dst,
+                                                const Extent3D& copySizePixels) {
+        // There is no need of a barrier to make host writes available and visible to the copy
+        // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+        // does an implicit availability, visibility and domain operation.
+
+        CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+        VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
+        VkImageSubresourceLayers subresource = region.imageSubresource;
+
+        SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+        if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
+                                          subresource.mipLevel)) {
+            // Since texture has been overwritten, it has been "initialized"
+            dst->texture->SetIsSubresourceContentInitialized(true, range);
+        } else {
+            ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
+        }
+        // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+        // texture.
+        ToBackend(dst->texture)
+            ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+        VkImage dstImage = ToBackend(dst->texture)->GetHandle();
+
+        // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+        // copy command.
+        this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
+                                      ToBackend(source)->GetBufferHandle(), dstImage,
+                                      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+        return {};
+    }
+
+    MaybeError Device::ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+                                           ExternalMemoryHandle memoryHandle,
+                                           VkImage image,
+                                           const std::vector<ExternalSemaphoreHandle>& waitHandles,
+                                           VkSemaphore* outSignalSemaphore,
+                                           VkDeviceMemory* outAllocation,
+                                           std::vector<VkSemaphore>* outWaitSemaphores) {
+        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+        const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+        FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
+
+        wgpu::TextureUsage usage = textureDescriptor->usage;
+        if (internalUsageDesc != nullptr) {
+            usage |= internalUsageDesc->internalUsage;
+        }
+
+        // Check services support this combination of handle type / image info
+        DAWN_INVALID_IF(!mExternalSemaphoreService->Supported(),
+                        "External semaphore usage not supported");
+
+        DAWN_INVALID_IF(
+            !mExternalMemoryService->SupportsImportMemory(
+                VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
+                VK_IMAGE_TILING_OPTIMAL,
+                VulkanImageUsage(usage, GetValidInternalFormat(textureDescriptor->format)),
+                VK_IMAGE_CREATE_ALIAS_BIT_KHR),
+            "External memory usage not supported");
+
+        // Create an external semaphore to signal when the texture is done being used
+        DAWN_TRY_ASSIGN(*outSignalSemaphore,
+                        mExternalSemaphoreService->CreateExportableSemaphore());
+
+        // Import the external image's memory
+        external_memory::MemoryImportParams importParams;
+        DAWN_TRY_ASSIGN(importParams,
+                        mExternalMemoryService->GetMemoryImportParams(descriptor, image));
+        DAWN_TRY_ASSIGN(*outAllocation,
+                        mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
+
+        // Import semaphores we have to wait on before using the texture
+        for (const ExternalSemaphoreHandle& handle : waitHandles) {
+            VkSemaphore semaphore = VK_NULL_HANDLE;
+            DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
+            outWaitSemaphores->push_back(semaphore);
+        }
+
+        return {};
+    }
+
+    bool Device::SignalAndExportExternalTexture(
+        Texture* texture,
+        VkImageLayout desiredLayout,
+        ExternalImageExportInfoVk* info,
+        std::vector<ExternalSemaphoreHandle>* semaphoreHandles) {
+        return !ConsumedError([&]() -> MaybeError {
+            DAWN_TRY(ValidateObject(texture));
+
+            VkSemaphore signalSemaphore;
+            VkImageLayout releasedOldLayout;
+            VkImageLayout releasedNewLayout;
+            DAWN_TRY(texture->ExportExternalTexture(desiredLayout, &signalSemaphore,
+                                                    &releasedOldLayout, &releasedNewLayout));
+
+            ExternalSemaphoreHandle semaphoreHandle;
+            DAWN_TRY_ASSIGN(semaphoreHandle,
+                            mExternalSemaphoreService->ExportSemaphore(signalSemaphore));
+            semaphoreHandles->push_back(semaphoreHandle);
+            info->releasedOldLayout = releasedOldLayout;
+            info->releasedNewLayout = releasedNewLayout;
+            info->isInitialized =
+                texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
+
+            return {};
+        }());
+    }
+
+    TextureBase* Device::CreateTextureWrappingVulkanImage(
+        const ExternalImageDescriptorVk* descriptor,
+        ExternalMemoryHandle memoryHandle,
+        const std::vector<ExternalSemaphoreHandle>& waitHandles) {
+        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+        // Initial validation
+        if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+            return nullptr;
+        }
+        if (ConsumedError(ValidateVulkanImageCanBeWrapped(this, textureDescriptor),
+                          "validating that a Vulkan image can be wrapped with %s.",
+                          textureDescriptor)) {
+            return nullptr;
+        }
+
+        VkSemaphore signalSemaphore = VK_NULL_HANDLE;
+        VkDeviceMemory allocation = VK_NULL_HANDLE;
+        std::vector<VkSemaphore> waitSemaphores;
+        waitSemaphores.reserve(waitHandles.size());
+
+        // Cleanup in case of a failure, the image creation doesn't acquire the external objects
+        // if a failure happems.
+        Texture* result = nullptr;
+        // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
+        if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
+                                                      mExternalMemoryService.get()),
+                          &result) ||
+            ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
+                                              waitHandles, &signalSemaphore, &allocation,
+                                              &waitSemaphores)) ||
+            ConsumedError(result->BindExternalMemory(descriptor, signalSemaphore, allocation,
+                                                     waitSemaphores))) {
+            // Delete the Texture if it was created
+            if (result != nullptr) {
+                result->Release();
+            }
+
+            // Clear the signal semaphore
+            fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
+
+            // Clear image memory
+            fn.FreeMemory(GetVkDevice(), allocation, nullptr);
+
+            // Clear any wait semaphores we were able to import
+            for (VkSemaphore semaphore : waitSemaphores) {
+                fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
+            }
+            return nullptr;
+        }
+
+        return result;
+    }
+
+    uint32_t Device::GetComputeSubgroupSize() const {
+        return mComputeSubgroupSize;
+    }
+
+    MaybeError Device::WaitForIdleForDestruction() {
+        // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+        // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
+        // ShutDownImpl
+        if (mRecordingContext.used) {
+            CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
+                                             mRecordingContext.commandBuffer};
+            mUnusedCommands.push_back(commands);
+            mRecordingContext = CommandRecordingContext();
+        }
+
+        VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
+        // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
+        // about, Device lost, which means workloads running on the GPU are no longer accessible
+        // (so they are as good as waited on) or success.
+        DAWN_UNUSED(waitIdleResult);
+
+        // Make sure all fences are complete by explicitly waiting on them all
+        while (!mFencesInFlight.empty()) {
+            VkFence fence = mFencesInFlight.front().first;
+            ExecutionSerial fenceSerial = mFencesInFlight.front().second;
+            ASSERT(fenceSerial > GetCompletedCommandSerial());
+
+            VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
+            do {
+                // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
+                // the device lost came from the ErrorInjector and we need to wait without allowing
+                // any more error to be injected. This is because the device lost was "fake" and
+                // commands might still be running.
+                if (GetState() == State::Disconnected) {
+                    result = VkResult::WrapUnsafe(
+                        fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
+                    continue;
+                }
+
+                result = VkResult::WrapUnsafe(
+                    INJECT_ERROR_OR_RUN(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX),
+                                        VK_ERROR_DEVICE_LOST));
+            } while (result == VK_TIMEOUT);
+            // Ignore errors from vkWaitForFences: it can be either OOM which we can't do anything
+            // about (and we need to keep going with the destruction of all fences), or device
+            // loss, which means the workload on the GPU is no longer accessible and we can
+            // safely destroy the fence.
+
+            fn.DestroyFence(mVkDevice, fence, nullptr);
+            mFencesInFlight.pop();
+        }
+        return {};
+    }
+
+    void Device::DestroyImpl() {
+        ASSERT(GetState() == State::Disconnected);
+
+        // We failed during initialization so early that we don't even have a VkDevice. There is
+        // nothing to do.
+        if (mVkDevice == VK_NULL_HANDLE) {
+            return;
+        }
+
+        // The deleter is the second thing we initialize. If it is not present, it means that
+        // only the VkDevice was created and nothing else. Destroy the device and do nothing else
+        // because the function pointers might not have been loaded (and there is nothing to
+        // destroy anyway).
+        if (mDeleter == nullptr) {
+            fn.DestroyDevice(mVkDevice, nullptr);
+            mVkDevice = VK_NULL_HANDLE;
+            return;
+        }
+
+        // Enough of the Device's initialization happened that we can now do regular robust
+        // deinitialization.
+
+        // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+        mRecordingContext.used = false;
+        if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
+            // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+            // destroyed, but that's not the case in some drivers and the leak memory.
+            // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+            // TODO(enga): Only do this on a known list of bad drivers.
+            fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
+                                  &mRecordingContext.commandBuffer);
+            fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
+        }
+
+        for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+            fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+        }
+        mRecordingContext.waitSemaphores.clear();
+
+        for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+            fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+        }
+        mRecordingContext.signalSemaphores.clear();
+
+        // Some commands might still be marked as in-flight if we shut down because of a device
+        // loss. Recycle them as unused so that we free them below.
+        RecycleCompletedCommands();
+        ASSERT(mCommandsInFlight.Empty());
+
+        for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
+            // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+            // destroyed, but that's not the case in some drivers and the leak memory.
+            // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+            // TODO(enga): Only do this on a known list of bad drivers.
+            fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
+            fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+        }
+        mUnusedCommands.clear();
+
+        // Some fences might still be marked as in-flight if we shut down because of a device loss.
+        // Delete them since at this point all commands are complete.
+        while (!mFencesInFlight.empty()) {
+            fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
+            mFencesInFlight.pop();
+        }
+
+        for (VkFence fence : mUnusedFences) {
+            fn.DestroyFence(mVkDevice, fence, nullptr);
+        }
+        mUnusedFences.clear();
+
+        ExecutionSerial completedSerial = GetCompletedCommandSerial();
+        for (Ref<DescriptorSetAllocator>& allocator :
+             mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+            allocator->FinishDeallocation(completedSerial);
+        }
+
+        // Releasing the uploader enqueues buffers to be released.
+        // Call Tick() again to clear them before releasing the deleter.
+        mResourceMemoryAllocator->Tick(completedSerial);
+        mDeleter->Tick(completedSerial);
+        mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+
+        // Allow recycled memory to be deleted.
+        mResourceMemoryAllocator->DestroyPool();
+
+        // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
+        // to them are guaranteed to be finished executing.
+        mRenderPassCache = nullptr;
+
+        // We need handle deleting all child objects by calling Tick() again with a large serial to
+        // force all operations to look as if they were completed, and delete all objects before
+        // destroying the Deleter and vkDevice.
+        ASSERT(mDeleter != nullptr);
+        mDeleter->Tick(kMaxExecutionSerial);
+        mDeleter = nullptr;
+
+        // VkQueues are destroyed when the VkDevice is destroyed
+        // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
+        // child objects have been deleted.
+        ASSERT(mVkDevice != VK_NULL_HANDLE);
+        fn.DestroyDevice(mVkDevice, nullptr);
+        mVkDevice = VK_NULL_HANDLE;
+    }
+
+    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+        return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
+    }
+
+    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+        return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
+    }
+
+    float Device::GetTimestampPeriodInNS() const {
+        return mDeviceInfo.properties.limits.timestampPeriod;
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/DeviceVk.h b/src/dawn/native/vulkan/DeviceVk.h
new file mode 100644
index 0000000..55697efe
--- /dev/null
+++ b/src/dawn/native/vulkan/DeviceVk.h
@@ -0,0 +1,214 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_DEVICEVK_H_
+#define DAWNNATIVE_VULKAN_DEVICEVK_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/DescriptorSetAllocator.h"
+#include "dawn/native/vulkan/Forward.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+#include <memory>
+#include <queue>
+
+namespace dawn::native::vulkan {
+
+    class Adapter;
+    class BindGroupLayout;
+    class BufferUploader;
+    class FencedDeleter;
+    class RenderPassCache;
+    class ResourceMemoryAllocator;
+
+    class Device final : public DeviceBase {
+      public:
+        static ResultOrError<Ref<Device>> Create(Adapter* adapter,
+                                                 const DeviceDescriptor* descriptor);
+        ~Device() override;
+
+        MaybeError Initialize();
+
+        // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
+        const VulkanFunctions fn;
+
+        VkInstance GetVkInstance() const;
+        const VulkanDeviceInfo& GetDeviceInfo() const;
+        const VulkanGlobalInfo& GetGlobalInfo() const;
+        VkDevice GetVkDevice() const;
+        uint32_t GetGraphicsQueueFamily() const;
+        VkQueue GetQueue() const;
+
+        FencedDeleter* GetFencedDeleter() const;
+        RenderPassCache* GetRenderPassCache() const;
+        ResourceMemoryAllocator* GetResourceMemoryAllocator() const;
+
+        CommandRecordingContext* GetPendingRecordingContext();
+        MaybeError SubmitPendingCommands();
+
+        void EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator);
+
+        // Dawn Native API
+
+        TextureBase* CreateTextureWrappingVulkanImage(
+            const ExternalImageDescriptorVk* descriptor,
+            ExternalMemoryHandle memoryHandle,
+            const std::vector<ExternalSemaphoreHandle>& waitHandles);
+        bool SignalAndExportExternalTexture(Texture* texture,
+                                            VkImageLayout desiredLayout,
+                                            ExternalImageExportInfoVk* info,
+                                            std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
+
+        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+            CommandEncoder* encoder,
+            const CommandBufferDescriptor* descriptor) override;
+
+        MaybeError TickImpl() override;
+
+        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) override;
+        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) override;
+
+        // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
+        // needs to be set.
+        uint32_t GetComputeSubgroupSize() const;
+
+        uint32_t GetOptimalBytesPerRowAlignment() const override;
+        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+        float GetTimestampPeriodInNS() const override;
+
+      private:
+        Device(Adapter* adapter, const DeviceDescriptor* descriptor);
+
+        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+            const BindGroupDescriptor* descriptor) override;
+        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+            const BindGroupLayoutDescriptor* descriptor,
+            PipelineCompatibilityToken pipelineCompatibilityToken) override;
+        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+            const BufferDescriptor* descriptor) override;
+        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+            const PipelineLayoutDescriptor* descriptor) override;
+        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+            const QuerySetDescriptor* descriptor) override;
+        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+            const SamplerDescriptor* descriptor) override;
+        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+            const ShaderModuleDescriptor* descriptor,
+            ShaderModuleParseResult* parseResult) override;
+        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+            Surface* surface,
+            NewSwapChainBase* previousSwapChain,
+            const SwapChainDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+            const TextureDescriptor* descriptor) override;
+        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+            TextureBase* texture,
+            const TextureViewDescriptor* descriptor) override;
+        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+            const ComputePipelineDescriptor* descriptor) override;
+        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+            const RenderPipelineDescriptor* descriptor) override;
+        void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                WGPUCreateComputePipelineAsyncCallback callback,
+                                                void* userdata) override;
+        void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                               WGPUCreateRenderPipelineAsyncCallback callback,
+                                               void* userdata) override;
+
+        ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
+        void GatherQueueFromDevice();
+
+        uint32_t FindComputeSubgroupSize() const;
+        void InitTogglesFromDriver();
+        void ApplyDepthStencilFormatToggles();
+        void ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
+
+        void DestroyImpl() override;
+        MaybeError WaitForIdleForDestruction() override;
+
+        // To make it easier to use fn it is a public const member. However
+        // the Device is allowed to mutate them through these private methods.
+        VulkanFunctions* GetMutableFunctions();
+
+        VulkanDeviceInfo mDeviceInfo = {};
+        VkDevice mVkDevice = VK_NULL_HANDLE;
+        uint32_t mQueueFamily = 0;
+        VkQueue mQueue = VK_NULL_HANDLE;
+        uint32_t mComputeSubgroupSize = 0;
+
+        SerialQueue<ExecutionSerial, Ref<DescriptorSetAllocator>>
+            mDescriptorAllocatorsPendingDeallocation;
+        std::unique_ptr<FencedDeleter> mDeleter;
+        std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
+        std::unique_ptr<RenderPassCache> mRenderPassCache;
+
+        std::unique_ptr<external_memory::Service> mExternalMemoryService;
+        std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
+
+        ResultOrError<VkFence> GetUnusedFence();
+        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+        // We track which operations are in flight on the GPU with an increasing serial.
+        // This works only because we have a single queue. Each submit to a queue is associated
+        // to a serial and a fence, such that when the fence is "ready" we know the operations
+        // have finished.
+        std::queue<std::pair<VkFence, ExecutionSerial>> mFencesInFlight;
+        // Fences in the unused list aren't reset yet.
+        std::vector<VkFence> mUnusedFences;
+
+        MaybeError PrepareRecordingContext();
+        void RecycleCompletedCommands();
+
+        struct CommandPoolAndBuffer {
+            VkCommandPool pool = VK_NULL_HANDLE;
+            VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
+        };
+        SerialQueue<ExecutionSerial, CommandPoolAndBuffer> mCommandsInFlight;
+        // Command pools in the unused list haven't been reset yet.
+        std::vector<CommandPoolAndBuffer> mUnusedCommands;
+        // There is always a valid recording context stored in mRecordingContext
+        CommandRecordingContext mRecordingContext;
+
+        MaybeError ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+                                       ExternalMemoryHandle memoryHandle,
+                                       VkImage image,
+                                       const std::vector<ExternalSemaphoreHandle>& waitHandles,
+                                       VkSemaphore* outSignalSemaphore,
+                                       VkDeviceMemory* outAllocation,
+                                       std::vector<VkSemaphore>* outWaitSemaphores);
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_DEVICEVK_H_
diff --git a/src/dawn/native/vulkan/ExternalHandle.h b/src/dawn/native/vulkan/ExternalHandle.h
new file mode 100644
index 0000000..24edf42
--- /dev/null
+++ b/src/dawn/native/vulkan/ExternalHandle.h
@@ -0,0 +1,26 @@
+#ifndef DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
+#define DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native::vulkan {
+
+#if DAWN_PLATFORM_LINUX
+    // File descriptor
+    using ExternalMemoryHandle = int;
+    // File descriptor
+    using ExternalSemaphoreHandle = int;
+#elif DAWN_PLATFORM_FUCHSIA
+    // Really a Zircon vmo handle.
+    using ExternalMemoryHandle = zx_handle_t;
+    // Really a Zircon event handle.
+    using ExternalSemaphoreHandle = zx_handle_t;
+#else
+    // Generic types so that the Null service can compile, not used for real handles
+    using ExternalMemoryHandle = void*;
+    using ExternalSemaphoreHandle = void*;
+#endif
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
diff --git a/src/dawn/native/vulkan/FencedDeleter.cpp b/src/dawn/native/vulkan/FencedDeleter.cpp
new file mode 100644
index 0000000..09c91b4
--- /dev/null
+++ b/src/dawn/native/vulkan/FencedDeleter.cpp
@@ -0,0 +1,183 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/FencedDeleter.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+
+namespace dawn::native::vulkan {
+
+    FencedDeleter::FencedDeleter(Device* device) : mDevice(device) {
+    }
+
+    FencedDeleter::~FencedDeleter() {
+        ASSERT(mBuffersToDelete.Empty());
+        ASSERT(mDescriptorPoolsToDelete.Empty());
+        ASSERT(mFramebuffersToDelete.Empty());
+        ASSERT(mImagesToDelete.Empty());
+        ASSERT(mImageViewsToDelete.Empty());
+        ASSERT(mMemoriesToDelete.Empty());
+        ASSERT(mPipelinesToDelete.Empty());
+        ASSERT(mPipelineLayoutsToDelete.Empty());
+        ASSERT(mQueryPoolsToDelete.Empty());
+        ASSERT(mRenderPassesToDelete.Empty());
+        ASSERT(mSamplersToDelete.Empty());
+        ASSERT(mSemaphoresToDelete.Empty());
+        ASSERT(mShaderModulesToDelete.Empty());
+        ASSERT(mSurfacesToDelete.Empty());
+        ASSERT(mSwapChainsToDelete.Empty());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkBuffer buffer) {
+        mBuffersToDelete.Enqueue(buffer, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkDescriptorPool pool) {
+        mDescriptorPoolsToDelete.Enqueue(pool, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkDeviceMemory memory) {
+        mMemoriesToDelete.Enqueue(memory, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkFramebuffer framebuffer) {
+        mFramebuffersToDelete.Enqueue(framebuffer, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkImage image) {
+        mImagesToDelete.Enqueue(image, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkImageView view) {
+        mImageViewsToDelete.Enqueue(view, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkPipeline pipeline) {
+        mPipelinesToDelete.Enqueue(pipeline, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkPipelineLayout layout) {
+        mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
+        mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
+        mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkSampler sampler) {
+        mSamplersToDelete.Enqueue(sampler, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkSemaphore semaphore) {
+        mSemaphoresToDelete.Enqueue(semaphore, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkShaderModule module) {
+        mShaderModulesToDelete.Enqueue(module, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkSurfaceKHR surface) {
+        mSurfacesToDelete.Enqueue(surface, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::DeleteWhenUnused(VkSwapchainKHR swapChain) {
+        mSwapChainsToDelete.Enqueue(swapChain, mDevice->GetPendingCommandSerial());
+    }
+
+    void FencedDeleter::Tick(ExecutionSerial completedSerial) {
+        VkDevice vkDevice = mDevice->GetVkDevice();
+        VkInstance instance = mDevice->GetVkInstance();
+
+        // Buffers and images must be deleted before memories because it is invalid to free memory
+        // that still have resources bound to it.
+        for (VkBuffer buffer : mBuffersToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyBuffer(vkDevice, buffer, nullptr);
+        }
+        mBuffersToDelete.ClearUpTo(completedSerial);
+        for (VkImage image : mImagesToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyImage(vkDevice, image, nullptr);
+        }
+        mImagesToDelete.ClearUpTo(completedSerial);
+
+        for (VkDeviceMemory memory : mMemoriesToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.FreeMemory(vkDevice, memory, nullptr);
+        }
+        mMemoriesToDelete.ClearUpTo(completedSerial);
+
+        for (VkPipelineLayout layout : mPipelineLayoutsToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyPipelineLayout(vkDevice, layout, nullptr);
+        }
+        mPipelineLayoutsToDelete.ClearUpTo(completedSerial);
+
+        for (VkRenderPass renderPass : mRenderPassesToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyRenderPass(vkDevice, renderPass, nullptr);
+        }
+        mRenderPassesToDelete.ClearUpTo(completedSerial);
+
+        for (VkFramebuffer framebuffer : mFramebuffersToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyFramebuffer(vkDevice, framebuffer, nullptr);
+        }
+        mFramebuffersToDelete.ClearUpTo(completedSerial);
+
+        for (VkImageView view : mImageViewsToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyImageView(vkDevice, view, nullptr);
+        }
+        mImageViewsToDelete.ClearUpTo(completedSerial);
+
+        for (VkShaderModule module : mShaderModulesToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyShaderModule(vkDevice, module, nullptr);
+        }
+        mShaderModulesToDelete.ClearUpTo(completedSerial);
+
+        for (VkPipeline pipeline : mPipelinesToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyPipeline(vkDevice, pipeline, nullptr);
+        }
+        mPipelinesToDelete.ClearUpTo(completedSerial);
+
+        // Vulkan swapchains must be destroyed before their corresponding VkSurface
+        for (VkSwapchainKHR swapChain : mSwapChainsToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroySwapchainKHR(vkDevice, swapChain, nullptr);
+        }
+        mSwapChainsToDelete.ClearUpTo(completedSerial);
+        for (VkSurfaceKHR surface : mSurfacesToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroySurfaceKHR(instance, surface, nullptr);
+        }
+        mSurfacesToDelete.ClearUpTo(completedSerial);
+
+        for (VkSemaphore semaphore : mSemaphoresToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroySemaphore(vkDevice, semaphore, nullptr);
+        }
+        mSemaphoresToDelete.ClearUpTo(completedSerial);
+
+        for (VkDescriptorPool pool : mDescriptorPoolsToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyDescriptorPool(vkDevice, pool, nullptr);
+        }
+        mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
+
+        for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
+        }
+        mQueryPoolsToDelete.ClearUpTo(completedSerial);
+
+        for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
+            mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
+        }
+        mSamplersToDelete.ClearUpTo(completedSerial);
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/FencedDeleter.h b/src/dawn/native/vulkan/FencedDeleter.h
new file mode 100644
index 0000000..bd4c88a
--- /dev/null
+++ b/src/dawn/native/vulkan/FencedDeleter.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_FENCEDDELETER_H_
+#define DAWNNATIVE_VULKAN_FENCEDDELETER_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/IntegerTypes.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class FencedDeleter {
+      public:
+        FencedDeleter(Device* device);
+        ~FencedDeleter();
+
+        void DeleteWhenUnused(VkBuffer buffer);
+        void DeleteWhenUnused(VkDescriptorPool pool);
+        void DeleteWhenUnused(VkDeviceMemory memory);
+        void DeleteWhenUnused(VkFramebuffer framebuffer);
+        void DeleteWhenUnused(VkImage image);
+        void DeleteWhenUnused(VkImageView view);
+        void DeleteWhenUnused(VkPipelineLayout layout);
+        void DeleteWhenUnused(VkRenderPass renderPass);
+        void DeleteWhenUnused(VkPipeline pipeline);
+        void DeleteWhenUnused(VkQueryPool querypool);
+        void DeleteWhenUnused(VkSampler sampler);
+        void DeleteWhenUnused(VkSemaphore semaphore);
+        void DeleteWhenUnused(VkShaderModule module);
+        void DeleteWhenUnused(VkSurfaceKHR surface);
+        void DeleteWhenUnused(VkSwapchainKHR swapChain);
+
+        void Tick(ExecutionSerial completedSerial);
+
+      private:
+        Device* mDevice = nullptr;
+        SerialQueue<ExecutionSerial, VkBuffer> mBuffersToDelete;
+        SerialQueue<ExecutionSerial, VkDescriptorPool> mDescriptorPoolsToDelete;
+        SerialQueue<ExecutionSerial, VkDeviceMemory> mMemoriesToDelete;
+        SerialQueue<ExecutionSerial, VkFramebuffer> mFramebuffersToDelete;
+        SerialQueue<ExecutionSerial, VkImage> mImagesToDelete;
+        SerialQueue<ExecutionSerial, VkImageView> mImageViewsToDelete;
+        SerialQueue<ExecutionSerial, VkPipeline> mPipelinesToDelete;
+        SerialQueue<ExecutionSerial, VkPipelineLayout> mPipelineLayoutsToDelete;
+        SerialQueue<ExecutionSerial, VkQueryPool> mQueryPoolsToDelete;
+        SerialQueue<ExecutionSerial, VkRenderPass> mRenderPassesToDelete;
+        SerialQueue<ExecutionSerial, VkSampler> mSamplersToDelete;
+        SerialQueue<ExecutionSerial, VkSemaphore> mSemaphoresToDelete;
+        SerialQueue<ExecutionSerial, VkShaderModule> mShaderModulesToDelete;
+        SerialQueue<ExecutionSerial, VkSurfaceKHR> mSurfacesToDelete;
+        SerialQueue<ExecutionSerial, VkSwapchainKHR> mSwapChainsToDelete;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_FENCEDDELETER_H_
diff --git a/src/dawn/native/vulkan/Forward.h b/src/dawn/native/vulkan/Forward.h
new file mode 100644
index 0000000..35f6ade
--- /dev/null
+++ b/src/dawn/native/vulkan/Forward.h
@@ -0,0 +1,69 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_FORWARD_H_
+#define DAWNNATIVE_VULKAN_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::vulkan {
+
+    class Adapter;
+    class BindGroup;
+    class BindGroupLayout;
+    class Buffer;
+    class CommandBuffer;
+    class ComputePipeline;
+    class Device;
+    class PipelineLayout;
+    class QuerySet;
+    class Queue;
+    class RenderPipeline;
+    class ResourceHeap;
+    class Sampler;
+    class ShaderModule;
+    class StagingBuffer;
+    class SwapChain;
+    class Texture;
+    class TextureView;
+
+    struct VulkanBackendTraits {
+        using AdapterType = Adapter;
+        using BindGroupType = BindGroup;
+        using BindGroupLayoutType = BindGroupLayout;
+        using BufferType = Buffer;
+        using CommandBufferType = CommandBuffer;
+        using ComputePipelineType = ComputePipeline;
+        using DeviceType = Device;
+        using PipelineLayoutType = PipelineLayout;
+        using QuerySetType = QuerySet;
+        using QueueType = Queue;
+        using RenderPipelineType = RenderPipeline;
+        using ResourceHeapType = ResourceHeap;
+        using SamplerType = Sampler;
+        using ShaderModuleType = ShaderModule;
+        using StagingBufferType = StagingBuffer;
+        using SwapChainType = SwapChain;
+        using TextureType = Texture;
+        using TextureViewType = TextureView;
+    };
+
+    template <typename T>
+    auto ToBackend(T&& common) -> decltype(ToBackendBase<VulkanBackendTraits>(common)) {
+        return ToBackendBase<VulkanBackendTraits>(common);
+    }
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_FORWARD_H_
diff --git a/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp b/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
new file mode 100644
index 0000000..e16ae2c
--- /dev/null
+++ b/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
@@ -0,0 +1,225 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/NativeSwapChainImplVk.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/TextureVk.h"
+
+#include <limits>
+
+namespace dawn::native::vulkan {
+
+    namespace {
+
+        bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
+                                   bool turnOffVsync,
+                                   VkPresentModeKHR* presentMode) {
+            if (turnOffVsync) {
+                for (const auto& availablePresentMode : availablePresentModes) {
+                    if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
+                        *presentMode = availablePresentMode;
+                        return true;
+                    }
+                }
+                return false;
+            }
+
+            *presentMode = VK_PRESENT_MODE_FIFO_KHR;
+            return true;
+        }
+
+        bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
+                                 NativeSwapChainImpl::ChosenConfig* config,
+                                 bool turnOffVsync) {
+            VkPresentModeKHR presentMode;
+            if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
+                return false;
+            }
+            // TODO(crbug.com/dawn/269): For now this is hardcoded to what works with one NVIDIA
+            // driver. Need to generalize
+            config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
+            config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+            config->format = wgpu::TextureFormat::BGRA8Unorm;
+            config->minImageCount = 3;
+            // TODO(crbug.com/dawn/269): This is upside down compared to what we want, at least
+            // on Linux
+            config->preTransform = info.capabilities.currentTransform;
+            config->presentMode = presentMode;
+            config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+
+            return true;
+        }
+    }  // anonymous namespace
+
+    NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
+        : mSurface(surface), mDevice(device) {
+        // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
+        // will return a correct result before a SwapChain is created.
+        UpdateSurfaceConfig();
+    }
+
+    NativeSwapChainImpl::~NativeSwapChainImpl() {
+        if (mSwapChain != VK_NULL_HANDLE) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+            mSwapChain = VK_NULL_HANDLE;
+        }
+        if (mSurface != VK_NULL_HANDLE) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
+            mSurface = VK_NULL_HANDLE;
+        }
+    }
+
+    void NativeSwapChainImpl::UpdateSurfaceConfig() {
+        if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
+                                   &mInfo)) {
+            ASSERT(false);
+        }
+
+        if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
+            ASSERT(false);
+        }
+    }
+
+    void NativeSwapChainImpl::Init(DawnWSIContextVulkan* /*context*/) {
+        UpdateSurfaceConfig();
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                      WGPUTextureUsage usage,
+                                                      uint32_t width,
+                                                      uint32_t height) {
+        UpdateSurfaceConfig();
+
+        ASSERT(mInfo.capabilities.minImageExtent.width <= width);
+        ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
+        ASSERT(mInfo.capabilities.minImageExtent.height <= height);
+        ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
+
+        ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+        // TODO(crbug.com/dawn/269): need to check usage works too
+
+        // Create the swapchain with the configuration we chose
+        VkSwapchainKHR oldSwapchain = mSwapChain;
+        VkSwapchainCreateInfoKHR createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.surface = mSurface;
+        createInfo.minImageCount = mConfig.minImageCount;
+        createInfo.imageFormat = mConfig.nativeFormat;
+        createInfo.imageColorSpace = mConfig.colorSpace;
+        createInfo.imageExtent.width = width;
+        createInfo.imageExtent.height = height;
+        createInfo.imageArrayLayers = 1;
+        createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
+                                                 mDevice->GetValidInternalFormat(mConfig.format));
+        createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        createInfo.queueFamilyIndexCount = 0;
+        createInfo.pQueueFamilyIndices = nullptr;
+        createInfo.preTransform = mConfig.preTransform;
+        createInfo.compositeAlpha = mConfig.compositeAlpha;
+        createInfo.presentMode = mConfig.presentMode;
+        createInfo.clipped = false;
+        createInfo.oldSwapchain = oldSwapchain;
+
+        if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
+                                           &*mSwapChain) != VK_SUCCESS) {
+            ASSERT(false);
+        }
+
+        // Gather the swapchain's images. Implementations are allowed to return more images than the
+        // number we asked for.
+        uint32_t count = 0;
+        if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
+                                              nullptr) != VK_SUCCESS) {
+            ASSERT(false);
+        }
+
+        ASSERT(count >= mConfig.minImageCount);
+        mSwapChainImages.resize(count);
+        if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
+                                              AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
+            ASSERT(false);
+        }
+
+        if (oldSwapchain != VK_NULL_HANDLE) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(oldSwapchain);
+        }
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+        // Transiently create a semaphore that will be signaled when the presentation engine is done
+        // with the swapchain image. Further operations on the image will wait for this semaphore.
+        VkSemaphore semaphore = VK_NULL_HANDLE;
+        {
+            VkSemaphoreCreateInfo createInfo;
+            createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+            createInfo.pNext = nullptr;
+            createInfo.flags = 0;
+            if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
+                                            &*semaphore) != VK_SUCCESS) {
+                ASSERT(false);
+            }
+        }
+
+        if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
+                                            std::numeric_limits<uint64_t>::max(), semaphore,
+                                            VkFence{}, &mLastImageIndex) != VK_SUCCESS) {
+            ASSERT(false);
+        }
+
+        nextTexture->texture.u64 =
+#if defined(DAWN_PLATFORM_64_BIT)
+            reinterpret_cast<uint64_t>
+#endif
+            (*mSwapChainImages[mLastImageIndex]);
+        mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError NativeSwapChainImpl::Present() {
+        // This assumes that the image has already been transitioned to the PRESENT layout and
+        // writes were made available to the stage.
+
+        // Assuming that the present queue is the same as the graphics queue, the proper
+        // synchronization has already been done on the queue so we don't need to wait on any
+        // semaphores.
+        VkPresentInfoKHR presentInfo;
+        presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+        presentInfo.pNext = nullptr;
+        presentInfo.waitSemaphoreCount = 0;
+        presentInfo.pWaitSemaphores = nullptr;
+        presentInfo.swapchainCount = 1;
+        presentInfo.pSwapchains = &*mSwapChain;
+        presentInfo.pImageIndices = &mLastImageIndex;
+        presentInfo.pResults = nullptr;
+
+        VkQueue queue = mDevice->GetQueue();
+        if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
+            ASSERT(false);
+        }
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+        return mConfig.format;
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/NativeSwapChainImplVk.h b/src/dawn/native/vulkan/NativeSwapChainImplVk.h
new file mode 100644
index 0000000..5291465
--- /dev/null
+++ b/src/dawn/native/vulkan/NativeSwapChainImplVk.h
@@ -0,0 +1,71 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
+#define DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
+
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class NativeSwapChainImpl {
+      public:
+        using WSIContext = DawnWSIContextVulkan;
+
+        NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
+        ~NativeSwapChainImpl();
+
+        void Init(DawnWSIContextVulkan* context);
+        DawnSwapChainError Configure(WGPUTextureFormat format,
+                                     WGPUTextureUsage,
+                                     uint32_t width,
+                                     uint32_t height);
+        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+        DawnSwapChainError Present();
+
+        wgpu::TextureFormat GetPreferredFormat() const;
+
+        struct ChosenConfig {
+            VkFormat nativeFormat;
+            wgpu::TextureFormat format;
+            VkColorSpaceKHR colorSpace;
+            VkSurfaceTransformFlagBitsKHR preTransform;
+            uint32_t minImageCount;
+            VkPresentModeKHR presentMode;
+            VkCompositeAlphaFlagBitsKHR compositeAlpha;
+        };
+
+      private:
+        void UpdateSurfaceConfig();
+
+        VkSurfaceKHR mSurface = VK_NULL_HANDLE;
+        VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+        std::vector<VkImage> mSwapChainImages;
+        uint32_t mLastImageIndex = 0;
+
+        VulkanSurfaceInfo mInfo;
+
+        ChosenConfig mConfig;
+
+        Device* mDevice = nullptr;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
diff --git a/src/dawn/native/vulkan/PipelineLayoutVk.cpp b/src/dawn/native/vulkan/PipelineLayoutVk.cpp
new file mode 100644
index 0000000..245f2c9
--- /dev/null
+++ b/src/dawn/native/vulkan/PipelineLayoutVk.cpp
@@ -0,0 +1,84 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    // static
+    ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+        Device* device,
+        const PipelineLayoutDescriptor* descriptor) {
+        Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+        DAWN_TRY(layout->Initialize());
+        return layout;
+    }
+
+    MaybeError PipelineLayout::Initialize() {
+        // Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
+        // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
+        // this constraints at the Dawn level?
+        uint32_t numSetLayouts = 0;
+        std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
+        for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
+            setLayouts[numSetLayouts] = ToBackend(GetBindGroupLayout(setIndex))->GetHandle();
+            numSetLayouts++;
+        }
+
+        VkPipelineLayoutCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.setLayoutCount = numSetLayouts;
+        createInfo.pSetLayouts = AsVkArray(setLayouts.data());
+        createInfo.pushConstantRangeCount = 0;
+        createInfo.pPushConstantRanges = nullptr;
+
+        Device* device = ToBackend(GetDevice());
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+            "CreatePipelineLayout"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    PipelineLayout::~PipelineLayout() = default;
+
+    void PipelineLayout::DestroyImpl() {
+        PipelineLayoutBase::DestroyImpl();
+        if (mHandle != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            mHandle = VK_NULL_HANDLE;
+        }
+    }
+
+    VkPipelineLayout PipelineLayout::GetHandle() const {
+        return mHandle;
+    }
+
+    void PipelineLayout::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE_LAYOUT,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_PipelineLayout", GetLabel());
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/PipelineLayoutVk.h b/src/dawn/native/vulkan/PipelineLayoutVk.h
new file mode 100644
index 0000000..56d51e5
--- /dev/null
+++ b/src/dawn/native/vulkan/PipelineLayoutVk.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
+#define DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
+
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class PipelineLayout final : public PipelineLayoutBase {
+      public:
+        static ResultOrError<Ref<PipelineLayout>> Create(
+            Device* device,
+            const PipelineLayoutDescriptor* descriptor);
+
+        VkPipelineLayout GetHandle() const;
+
+      private:
+        ~PipelineLayout() override;
+        void DestroyImpl() override;
+
+        using PipelineLayoutBase::PipelineLayoutBase;
+        MaybeError Initialize();
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+        VkPipelineLayout mHandle = VK_NULL_HANDLE;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
diff --git a/src/dawn/native/vulkan/QuerySetVk.cpp b/src/dawn/native/vulkan/QuerySetVk.cpp
new file mode 100644
index 0000000..3981793
--- /dev/null
+++ b/src/dawn/native/vulkan/QuerySetVk.cpp
@@ -0,0 +1,117 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/QuerySetVk.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::native::vulkan {
+
+    namespace {
+        VkQueryType VulkanQueryType(wgpu::QueryType type) {
+            switch (type) {
+                case wgpu::QueryType::Occlusion:
+                    return VK_QUERY_TYPE_OCCLUSION;
+                case wgpu::QueryType::PipelineStatistics:
+                    return VK_QUERY_TYPE_PIPELINE_STATISTICS;
+                case wgpu::QueryType::Timestamp:
+                    return VK_QUERY_TYPE_TIMESTAMP;
+            }
+            UNREACHABLE();
+        }
+
+        VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
+            std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
+            VkQueryPipelineStatisticFlags pipelineStatistics = 0;
+            for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
+                switch (pipelineStatisticsSet[i]) {
+                    case wgpu::PipelineStatisticName::ClipperInvocations:
+                        pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
+                        break;
+                    case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
+                        pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
+                        break;
+                    case wgpu::PipelineStatisticName::ComputeShaderInvocations:
+                        pipelineStatistics |=
+                            VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
+                        break;
+                    case wgpu::PipelineStatisticName::FragmentShaderInvocations:
+                        pipelineStatistics |=
+                            VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
+                        break;
+                    case wgpu::PipelineStatisticName::VertexShaderInvocations:
+                        pipelineStatistics |=
+                            VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
+                        break;
+                }
+            }
+
+            return pipelineStatistics;
+        }
+    }  // anonymous namespace
+
+    // static
+    ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+                                                  const QuerySetDescriptor* descriptor) {
+        Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+        DAWN_TRY(queryset->Initialize());
+        return queryset;
+    }
+
+    MaybeError QuerySet::Initialize() {
+        VkQueryPoolCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+        createInfo.pNext = NULL;
+        createInfo.flags = 0;
+        createInfo.queryType = VulkanQueryType(GetQueryType());
+        createInfo.queryCount = std::max(GetQueryCount(), uint32_t(1u));
+        if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
+            createInfo.pipelineStatistics =
+                VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
+        }
+
+        Device* device = ToBackend(GetDevice());
+        DAWN_TRY(CheckVkOOMThenSuccess(
+            device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+            "vkCreateQueryPool"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    VkQueryPool QuerySet::GetHandle() const {
+        return mHandle;
+    }
+
+    QuerySet::~QuerySet() = default;
+
+    void QuerySet::DestroyImpl() {
+        QuerySetBase::DestroyImpl();
+        if (mHandle != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            mHandle = VK_NULL_HANDLE;
+        }
+    }
+
+    void QuerySet::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_QUERY_POOL,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_QuerySet", GetLabel());
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/QuerySetVk.h b/src/dawn/native/vulkan/QuerySetVk.h
new file mode 100644
index 0000000..78a52c0
--- /dev/null
+++ b/src/dawn/native/vulkan/QuerySetVk.h
@@ -0,0 +1,47 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_QUERYSETVK_H_
+#define DAWNNATIVE_VULKAN_QUERYSETVK_H_
+
+#include "dawn/native/QuerySet.h"
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class QuerySet final : public QuerySetBase {
+      public:
+        static ResultOrError<Ref<QuerySet>> Create(Device* device,
+                                                   const QuerySetDescriptor* descriptor);
+
+        VkQueryPool GetHandle() const;
+
+      private:
+        ~QuerySet() override;
+        using QuerySetBase::QuerySetBase;
+        MaybeError Initialize();
+
+        // Dawn API
+        void DestroyImpl() override;
+        void SetLabelImpl() override;
+
+        VkQueryPool mHandle = VK_NULL_HANDLE;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_QUERYSETVK_H_
diff --git a/src/dawn/native/vulkan/QueueVk.cpp b/src/dawn/native/vulkan/QueueVk.cpp
new file mode 100644
index 0000000..875b771
--- /dev/null
+++ b/src/dawn/native/vulkan/QueueVk.cpp
@@ -0,0 +1,59 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/QueueVk.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/vulkan/CommandBufferVk.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::vulkan {
+
+    // static
+    Queue* Queue::Create(Device* device) {
+        return new Queue(device);
+    }
+
+    Queue::Queue(Device* device) : QueueBase(device) {
+    }
+
+    Queue::~Queue() {
+    }
+
+    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+        Device* device = ToBackend(GetDevice());
+
+        DAWN_TRY(device->Tick());
+
+        TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
+                           "CommandBufferVk::RecordCommands");
+        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+        for (uint32_t i = 0; i < commandCount; ++i) {
+            DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
+        }
+        TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
+
+        DAWN_TRY(device->SubmitPendingCommands());
+
+        return {};
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/QueueVk.h b/src/dawn/native/vulkan/QueueVk.h
new file mode 100644
index 0000000..a80b875
--- /dev/null
+++ b/src/dawn/native/vulkan/QueueVk.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_QUEUEVK_H_
+#define DAWNNATIVE_VULKAN_QUEUEVK_H_
+
+#include "dawn/native/Queue.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class Queue final : public QueueBase {
+      public:
+        static Queue* Create(Device* device);
+
+      private:
+        Queue(Device* device);
+        ~Queue() override;
+        using QueueBase::QueueBase;
+
+        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_QUEUEVK_H_
diff --git a/src/dawn/native/vulkan/RenderPassCache.cpp b/src/dawn/native/vulkan/RenderPassCache.cpp
new file mode 100644
index 0000000..f1735ee
--- /dev/null
+++ b/src/dawn/native/vulkan/RenderPassCache.cpp
@@ -0,0 +1,302 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/RenderPassCache.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/HashUtils.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    namespace {
+        VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
+            switch (op) {
+                case wgpu::LoadOp::Load:
+                    return VK_ATTACHMENT_LOAD_OP_LOAD;
+                case wgpu::LoadOp::Clear:
+                    return VK_ATTACHMENT_LOAD_OP_CLEAR;
+                case wgpu::LoadOp::Undefined:
+                    UNREACHABLE();
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+        VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
+            // TODO(crbug.com/dawn/485): return STORE_OP_STORE_NONE_QCOM if the device has required
+            // extension.
+            switch (op) {
+                case wgpu::StoreOp::Store:
+                    return VK_ATTACHMENT_STORE_OP_STORE;
+                case wgpu::StoreOp::Discard:
+                    return VK_ATTACHMENT_STORE_OP_DONT_CARE;
+                case wgpu::StoreOp::Undefined:
+                    UNREACHABLE();
+                    break;
+            }
+            UNREACHABLE();
+        }
+    }  // anonymous namespace
+
+    // RenderPassCacheQuery
+
+    void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
+                                        wgpu::TextureFormat format,
+                                        wgpu::LoadOp loadOp,
+                                        wgpu::StoreOp storeOp,
+                                        bool hasResolveTarget) {
+        colorMask.set(index);
+        colorFormats[index] = format;
+        colorLoadOp[index] = loadOp;
+        colorStoreOp[index] = storeOp;
+        resolveTargetMask[index] = hasResolveTarget;
+    }
+
+    void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
+                                               wgpu::LoadOp depthLoadOpIn,
+                                               wgpu::StoreOp depthStoreOpIn,
+                                               wgpu::LoadOp stencilLoadOpIn,
+                                               wgpu::StoreOp stencilStoreOpIn,
+                                               bool readOnly) {
+        hasDepthStencil = true;
+        depthStencilFormat = format;
+        depthLoadOp = depthLoadOpIn;
+        depthStoreOp = depthStoreOpIn;
+        stencilLoadOp = stencilLoadOpIn;
+        stencilStoreOp = stencilStoreOpIn;
+        readOnlyDepthStencil = readOnly;
+    }
+
+    void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
+        this->sampleCount = sampleCount;
+    }
+
+    // RenderPassCache
+
+    RenderPassCache::RenderPassCache(Device* device) : mDevice(device) {
+    }
+
+    RenderPassCache::~RenderPassCache() {
+        std::lock_guard<std::mutex> lock(mMutex);
+        for (auto [_, renderPass] : mCache) {
+            mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), renderPass, nullptr);
+        }
+
+        mCache.clear();
+    }
+
+    ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        auto it = mCache.find(query);
+        if (it != mCache.end()) {
+            return VkRenderPass(it->second);
+        }
+
+        VkRenderPass renderPass;
+        DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
+        mCache.emplace(query, renderPass);
+        return renderPass;
+    }
+
+    ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
+        const RenderPassCacheQuery& query) const {
+        // The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
+        // Precompute them as they must be pointer-chained in VkSubpassDescription.
+        // Note that both colorAttachmentRefs and resolveAttachmentRefs can be sparse with holes
+        // filled with VK_ATTACHMENT_UNUSED.
+        ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
+            colorAttachmentRefs;
+        ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
+            resolveAttachmentRefs;
+        VkAttachmentReference depthStencilAttachmentRef;
+
+        for (ColorAttachmentIndex i(uint8_t(0)); i < kMaxColorAttachmentsTyped; i++) {
+            colorAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
+            resolveAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
+            // The Khronos Vulkan validation layer will complain if not set
+            colorAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+            resolveAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+        }
+
+        // Contains the attachment description that will be chained in the create info
+        // The order of all attachments in attachmentDescs is "color-depthstencil-resolve".
+        constexpr uint8_t kMaxAttachmentCount = kMaxColorAttachments * 2 + 1;
+        std::array<VkAttachmentDescription, kMaxAttachmentCount> attachmentDescs = {};
+
+        VkSampleCountFlagBits vkSampleCount = VulkanSampleCount(query.sampleCount);
+
+        uint32_t attachmentCount = 0;
+        ColorAttachmentIndex highestColorAttachmentIndexPlusOne(static_cast<uint8_t>(0));
+        for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+            auto& attachmentRef = colorAttachmentRefs[i];
+            auto& attachmentDesc = attachmentDescs[attachmentCount];
+
+            attachmentRef.attachment = attachmentCount;
+            attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+            attachmentDesc.flags = 0;
+            attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+            attachmentDesc.samples = vkSampleCount;
+            attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
+            attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
+            attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+            attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+            attachmentCount++;
+            highestColorAttachmentIndexPlusOne =
+                ColorAttachmentIndex(static_cast<uint8_t>(static_cast<uint8_t>(i) + 1u));
+        }
+
+        VkAttachmentReference* depthStencilAttachment = nullptr;
+        if (query.hasDepthStencil) {
+            auto& attachmentDesc = attachmentDescs[attachmentCount];
+
+            depthStencilAttachment = &depthStencilAttachmentRef;
+
+            depthStencilAttachmentRef.attachment = attachmentCount;
+            depthStencilAttachmentRef.layout =
+                query.readOnlyDepthStencil ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL
+                                           : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+            attachmentDesc.flags = 0;
+            attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
+            attachmentDesc.samples = vkSampleCount;
+
+            attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
+            attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
+            attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
+            attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
+
+            // There is only one subpass, so it is safe to set both initialLayout and finalLayout to
+            // the only subpass's layout.
+            attachmentDesc.initialLayout = depthStencilAttachmentRef.layout;
+            attachmentDesc.finalLayout = depthStencilAttachmentRef.layout;
+
+            attachmentCount++;
+        }
+
+        for (ColorAttachmentIndex i : IterateBitSet(query.resolveTargetMask)) {
+            auto& attachmentRef = resolveAttachmentRefs[i];
+            auto& attachmentDesc = attachmentDescs[attachmentCount];
+
+            attachmentRef.attachment = attachmentCount;
+            attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+            attachmentDesc.flags = 0;
+            attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+            attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
+            attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+            attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+            attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+            attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+            attachmentCount++;
+        }
+
+        // Create the VkSubpassDescription that will be chained in the VkRenderPassCreateInfo
+        VkSubpassDescription subpassDesc;
+        subpassDesc.flags = 0;
+        subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+        subpassDesc.inputAttachmentCount = 0;
+        subpassDesc.pInputAttachments = nullptr;
+        subpassDesc.colorAttachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+        subpassDesc.pColorAttachments = colorAttachmentRefs.data();
+        subpassDesc.pResolveAttachments = resolveAttachmentRefs.data();
+        subpassDesc.pDepthStencilAttachment = depthStencilAttachment;
+        subpassDesc.preserveAttachmentCount = 0;
+        subpassDesc.pPreserveAttachments = nullptr;
+
+        // Chain everything in VkRenderPassCreateInfo
+        VkRenderPassCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.attachmentCount = attachmentCount;
+        createInfo.pAttachments = attachmentDescs.data();
+        createInfo.subpassCount = 1;
+        createInfo.pSubpasses = &subpassDesc;
+        createInfo.dependencyCount = 0;
+        createInfo.pDependencies = nullptr;
+
+        // Create the render pass from the zillion parameters
+        VkRenderPass renderPass;
+        DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo,
+                                                             nullptr, &*renderPass),
+                                "CreateRenderPass"));
+        return renderPass;
+    }
+
+    // RenderPassCache
+
+    size_t RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& query) const {
+        size_t hash = Hash(query.colorMask);
+
+        HashCombine(&hash, Hash(query.resolveTargetMask));
+
+        for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+            HashCombine(&hash, query.colorFormats[i], query.colorLoadOp[i], query.colorStoreOp[i]);
+        }
+
+        HashCombine(&hash, query.hasDepthStencil);
+        if (query.hasDepthStencil) {
+            HashCombine(&hash, query.depthStencilFormat, query.depthLoadOp, query.depthStoreOp,
+                        query.stencilLoadOp, query.stencilStoreOp, query.readOnlyDepthStencil);
+        }
+
+        HashCombine(&hash, query.sampleCount);
+
+        return hash;
+    }
+
+    bool RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& a,
+                                                 const RenderPassCacheQuery& b) const {
+        if (a.colorMask != b.colorMask) {
+            return false;
+        }
+
+        if (a.resolveTargetMask != b.resolveTargetMask) {
+            return false;
+        }
+
+        if (a.sampleCount != b.sampleCount) {
+            return false;
+        }
+
+        for (ColorAttachmentIndex i : IterateBitSet(a.colorMask)) {
+            if ((a.colorFormats[i] != b.colorFormats[i]) ||
+                (a.colorLoadOp[i] != b.colorLoadOp[i]) ||
+                (a.colorStoreOp[i] != b.colorStoreOp[i])) {
+                return false;
+            }
+        }
+
+        if (a.hasDepthStencil != b.hasDepthStencil) {
+            return false;
+        }
+
+        if (a.hasDepthStencil) {
+            if ((a.depthStencilFormat != b.depthStencilFormat) ||
+                (a.depthLoadOp != b.depthLoadOp) || (a.stencilLoadOp != b.stencilLoadOp) ||
+                (a.depthStoreOp != b.depthStoreOp) || (a.stencilStoreOp != b.stencilStoreOp) ||
+                (a.readOnlyDepthStencil != b.readOnlyDepthStencil)) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/RenderPassCache.h b/src/dawn/native/vulkan/RenderPassCache.h
new file mode 100644
index 0000000..aaf9fc8
--- /dev/null
+++ b/src/dawn/native/vulkan/RenderPassCache.h
@@ -0,0 +1,106 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
+#define DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+#include <mutex>
+#include <unordered_map>
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    // This is a key to query the RenderPassCache, it can be sparse meaning that only the
+    // information for bits set in colorMask or hasDepthStencil need to be provided and the rest can
+    // be uninintialized.
+    struct RenderPassCacheQuery {
+        // Use these helpers to build the query, they make sure all relevant data is initialized and
+        // masks set.
+        void SetColor(ColorAttachmentIndex index,
+                      wgpu::TextureFormat format,
+                      wgpu::LoadOp loadOp,
+                      wgpu::StoreOp storeOp,
+                      bool hasResolveTarget);
+        void SetDepthStencil(wgpu::TextureFormat format,
+                             wgpu::LoadOp depthLoadOp,
+                             wgpu::StoreOp depthStoreOp,
+                             wgpu::LoadOp stencilLoadOp,
+                             wgpu::StoreOp stencilStoreOp,
+                             bool readOnly);
+        void SetSampleCount(uint32_t sampleCount);
+
+        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
+        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
+        ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
+        ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
+        ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
+
+        bool hasDepthStencil = false;
+        wgpu::TextureFormat depthStencilFormat;
+        wgpu::LoadOp depthLoadOp;
+        wgpu::StoreOp depthStoreOp;
+        wgpu::LoadOp stencilLoadOp;
+        wgpu::StoreOp stencilStoreOp;
+        bool readOnlyDepthStencil;
+
+        uint32_t sampleCount;
+    };
+
+    // Caches VkRenderPasses so that we don't create duplicate ones for every RenderPipeline or
+    // render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
+    // when creating render pass and framebuffer so that we can always make sure the order of
+    // attachments in the rendering pipeline matches the one of the framebuffer.
+    // All the operations on RenderPassCache are guaranteed to be thread-safe.
+    // TODO(cwallez@chromium.org): Make it an LRU cache somehow?
+    class RenderPassCache {
+      public:
+        RenderPassCache(Device* device);
+        ~RenderPassCache();
+
+        ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
+
+      private:
+        // Does the actual VkRenderPass creation on a cache miss.
+        ResultOrError<VkRenderPass> CreateRenderPassForQuery(
+            const RenderPassCacheQuery& query) const;
+
+        // Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
+        // keys.
+        struct CacheFuncs {
+            size_t operator()(const RenderPassCacheQuery& query) const;
+            bool operator()(const RenderPassCacheQuery& a, const RenderPassCacheQuery& b) const;
+        };
+        using Cache =
+            std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
+
+        Device* mDevice = nullptr;
+
+        std::mutex mMutex;
+        Cache mCache;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
diff --git a/src/dawn/native/vulkan/RenderPipelineVk.cpp b/src/dawn/native/vulkan/RenderPipelineVk.cpp
new file mode 100644
index 0000000..4f30496
--- /dev/null
+++ b/src/dawn/native/vulkan/RenderPipelineVk.cpp
@@ -0,0 +1,636 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/RenderPipelineVk.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/RenderPassCache.h"
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    namespace {
+
+        VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) {
+            switch (stepMode) {
+                case wgpu::VertexStepMode::Vertex:
+                    return VK_VERTEX_INPUT_RATE_VERTEX;
+                case wgpu::VertexStepMode::Instance:
+                    return VK_VERTEX_INPUT_RATE_INSTANCE;
+            }
+            UNREACHABLE();
+        }
+
+        VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
+            switch (format) {
+                case wgpu::VertexFormat::Uint8x2:
+                    return VK_FORMAT_R8G8_UINT;
+                case wgpu::VertexFormat::Uint8x4:
+                    return VK_FORMAT_R8G8B8A8_UINT;
+                case wgpu::VertexFormat::Sint8x2:
+                    return VK_FORMAT_R8G8_SINT;
+                case wgpu::VertexFormat::Sint8x4:
+                    return VK_FORMAT_R8G8B8A8_SINT;
+                case wgpu::VertexFormat::Unorm8x2:
+                    return VK_FORMAT_R8G8_UNORM;
+                case wgpu::VertexFormat::Unorm8x4:
+                    return VK_FORMAT_R8G8B8A8_UNORM;
+                case wgpu::VertexFormat::Snorm8x2:
+                    return VK_FORMAT_R8G8_SNORM;
+                case wgpu::VertexFormat::Snorm8x4:
+                    return VK_FORMAT_R8G8B8A8_SNORM;
+                case wgpu::VertexFormat::Uint16x2:
+                    return VK_FORMAT_R16G16_UINT;
+                case wgpu::VertexFormat::Uint16x4:
+                    return VK_FORMAT_R16G16B16A16_UINT;
+                case wgpu::VertexFormat::Sint16x2:
+                    return VK_FORMAT_R16G16_SINT;
+                case wgpu::VertexFormat::Sint16x4:
+                    return VK_FORMAT_R16G16B16A16_SINT;
+                case wgpu::VertexFormat::Unorm16x2:
+                    return VK_FORMAT_R16G16_UNORM;
+                case wgpu::VertexFormat::Unorm16x4:
+                    return VK_FORMAT_R16G16B16A16_UNORM;
+                case wgpu::VertexFormat::Snorm16x2:
+                    return VK_FORMAT_R16G16_SNORM;
+                case wgpu::VertexFormat::Snorm16x4:
+                    return VK_FORMAT_R16G16B16A16_SNORM;
+                case wgpu::VertexFormat::Float16x2:
+                    return VK_FORMAT_R16G16_SFLOAT;
+                case wgpu::VertexFormat::Float16x4:
+                    return VK_FORMAT_R16G16B16A16_SFLOAT;
+                case wgpu::VertexFormat::Float32:
+                    return VK_FORMAT_R32_SFLOAT;
+                case wgpu::VertexFormat::Float32x2:
+                    return VK_FORMAT_R32G32_SFLOAT;
+                case wgpu::VertexFormat::Float32x3:
+                    return VK_FORMAT_R32G32B32_SFLOAT;
+                case wgpu::VertexFormat::Float32x4:
+                    return VK_FORMAT_R32G32B32A32_SFLOAT;
+                case wgpu::VertexFormat::Uint32:
+                    return VK_FORMAT_R32_UINT;
+                case wgpu::VertexFormat::Uint32x2:
+                    return VK_FORMAT_R32G32_UINT;
+                case wgpu::VertexFormat::Uint32x3:
+                    return VK_FORMAT_R32G32B32_UINT;
+                case wgpu::VertexFormat::Uint32x4:
+                    return VK_FORMAT_R32G32B32A32_UINT;
+                case wgpu::VertexFormat::Sint32:
+                    return VK_FORMAT_R32_SINT;
+                case wgpu::VertexFormat::Sint32x2:
+                    return VK_FORMAT_R32G32_SINT;
+                case wgpu::VertexFormat::Sint32x3:
+                    return VK_FORMAT_R32G32B32_SINT;
+                case wgpu::VertexFormat::Sint32x4:
+                    return VK_FORMAT_R32G32B32A32_SINT;
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
+            switch (topology) {
+                case wgpu::PrimitiveTopology::PointList:
+                    return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+                case wgpu::PrimitiveTopology::LineList:
+                    return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+                case wgpu::PrimitiveTopology::LineStrip:
+                    return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+                case wgpu::PrimitiveTopology::TriangleList:
+                    return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+                case wgpu::PrimitiveTopology::TriangleStrip:
+                    return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+            }
+            UNREACHABLE();
+        }
+
+        bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
+            // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
+            // primitive restart be only enabled on primitive topologies that support restarting.
+            switch (topology) {
+                case wgpu::PrimitiveTopology::PointList:
+                case wgpu::PrimitiveTopology::LineList:
+                case wgpu::PrimitiveTopology::TriangleList:
+                    return false;
+                case wgpu::PrimitiveTopology::LineStrip:
+                case wgpu::PrimitiveTopology::TriangleStrip:
+                    return true;
+            }
+            UNREACHABLE();
+        }
+
+        VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
+            switch (face) {
+                case wgpu::FrontFace::CCW:
+                    return VK_FRONT_FACE_COUNTER_CLOCKWISE;
+                case wgpu::FrontFace::CW:
+                    return VK_FRONT_FACE_CLOCKWISE;
+            }
+            UNREACHABLE();
+        }
+
+        VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
+            switch (mode) {
+                case wgpu::CullMode::None:
+                    return VK_CULL_MODE_NONE;
+                case wgpu::CullMode::Front:
+                    return VK_CULL_MODE_FRONT_BIT;
+                case wgpu::CullMode::Back:
+                    return VK_CULL_MODE_BACK_BIT;
+            }
+            UNREACHABLE();
+        }
+
+        VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
+            switch (factor) {
+                case wgpu::BlendFactor::Zero:
+                    return VK_BLEND_FACTOR_ZERO;
+                case wgpu::BlendFactor::One:
+                    return VK_BLEND_FACTOR_ONE;
+                case wgpu::BlendFactor::Src:
+                    return VK_BLEND_FACTOR_SRC_COLOR;
+                case wgpu::BlendFactor::OneMinusSrc:
+                    return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
+                case wgpu::BlendFactor::SrcAlpha:
+                    return VK_BLEND_FACTOR_SRC_ALPHA;
+                case wgpu::BlendFactor::OneMinusSrcAlpha:
+                    return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+                case wgpu::BlendFactor::Dst:
+                    return VK_BLEND_FACTOR_DST_COLOR;
+                case wgpu::BlendFactor::OneMinusDst:
+                    return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
+                case wgpu::BlendFactor::DstAlpha:
+                    return VK_BLEND_FACTOR_DST_ALPHA;
+                case wgpu::BlendFactor::OneMinusDstAlpha:
+                    return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+                case wgpu::BlendFactor::SrcAlphaSaturated:
+                    return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
+                case wgpu::BlendFactor::Constant:
+                    return VK_BLEND_FACTOR_CONSTANT_COLOR;
+                case wgpu::BlendFactor::OneMinusConstant:
+                    return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+            }
+            UNREACHABLE();
+        }
+
+        VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
+            switch (operation) {
+                case wgpu::BlendOperation::Add:
+                    return VK_BLEND_OP_ADD;
+                case wgpu::BlendOperation::Subtract:
+                    return VK_BLEND_OP_SUBTRACT;
+                case wgpu::BlendOperation::ReverseSubtract:
+                    return VK_BLEND_OP_REVERSE_SUBTRACT;
+                case wgpu::BlendOperation::Min:
+                    return VK_BLEND_OP_MIN;
+                case wgpu::BlendOperation::Max:
+                    return VK_BLEND_OP_MAX;
+            }
+            UNREACHABLE();
+        }
+
+        VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
+                                                   bool isDeclaredInFragmentShader) {
+            // Vulkan and Dawn color write masks match, static assert it and return the mask
+            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
+                          VK_COLOR_COMPONENT_R_BIT);
+            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
+                          VK_COLOR_COMPONENT_G_BIT);
+            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
+                          VK_COLOR_COMPONENT_B_BIT);
+            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
+                          VK_COLOR_COMPONENT_A_BIT);
+
+            // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
+            // attachment writes are undefined for components which do not correspond to a fragment
+            // shader outputs", we set the color write mask to 0 to prevent such undefined values
+            // being written into the color attachments.
+            return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
+                                              : static_cast<VkColorComponentFlags>(0);
+        }
+
+        VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
+                                                             bool isDeclaredInFragmentShader) {
+            VkPipelineColorBlendAttachmentState attachment;
+            attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
+            if (attachment.blendEnable) {
+                attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
+                attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
+                attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
+                attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
+                attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
+                attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
+            } else {
+                // Swiftshader's Vulkan implementation appears to expect these values to be valid
+                // even when blending is not enabled.
+                attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+                attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+                attachment.colorBlendOp = VK_BLEND_OP_ADD;
+                attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+                attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+                attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+            }
+            attachment.colorWriteMask =
+                VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+            return attachment;
+        }
+
+        VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
+            switch (op) {
+                case wgpu::StencilOperation::Keep:
+                    return VK_STENCIL_OP_KEEP;
+                case wgpu::StencilOperation::Zero:
+                    return VK_STENCIL_OP_ZERO;
+                case wgpu::StencilOperation::Replace:
+                    return VK_STENCIL_OP_REPLACE;
+                case wgpu::StencilOperation::IncrementClamp:
+                    return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+                case wgpu::StencilOperation::DecrementClamp:
+                    return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+                case wgpu::StencilOperation::Invert:
+                    return VK_STENCIL_OP_INVERT;
+                case wgpu::StencilOperation::IncrementWrap:
+                    return VK_STENCIL_OP_INCREMENT_AND_WRAP;
+                case wgpu::StencilOperation::DecrementWrap:
+                    return VK_STENCIL_OP_DECREMENT_AND_WRAP;
+            }
+            UNREACHABLE();
+        }
+
+        VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(
+            const DepthStencilState* descriptor) {
+            VkPipelineDepthStencilStateCreateInfo depthStencilState;
+            depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+            depthStencilState.pNext = nullptr;
+            depthStencilState.flags = 0;
+
+            // Depth writes only occur if depth is enabled
+            depthStencilState.depthTestEnable =
+                (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+                 !descriptor->depthWriteEnabled)
+                    ? VK_FALSE
+                    : VK_TRUE;
+            depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE;
+            depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare);
+            depthStencilState.depthBoundsTestEnable = false;
+            depthStencilState.minDepthBounds = 0.0f;
+            depthStencilState.maxDepthBounds = 1.0f;
+
+            depthStencilState.stencilTestEnable =
+                StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE;
+
+            depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp);
+            depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp);
+            depthStencilState.front.depthFailOp =
+                VulkanStencilOp(descriptor->stencilFront.depthFailOp);
+            depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare);
+
+            depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp);
+            depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp);
+            depthStencilState.back.depthFailOp =
+                VulkanStencilOp(descriptor->stencilBack.depthFailOp);
+            depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare);
+
+            // Dawn doesn't have separate front and back stencil masks.
+            depthStencilState.front.compareMask = descriptor->stencilReadMask;
+            depthStencilState.back.compareMask = descriptor->stencilReadMask;
+            depthStencilState.front.writeMask = descriptor->stencilWriteMask;
+            depthStencilState.back.writeMask = descriptor->stencilWriteMask;
+
+            // The stencil reference is always dynamic
+            depthStencilState.front.reference = 0;
+            depthStencilState.back.reference = 0;
+
+            return depthStencilState;
+        }
+
+    }  // anonymous namespace
+
+    // static
+    Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+        Device* device,
+        const RenderPipelineDescriptor* descriptor) {
+        return AcquireRef(new RenderPipeline(device, descriptor));
+    }
+
+    MaybeError RenderPipeline::Initialize() {
+        Device* device = ToBackend(GetDevice());
+
+        // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
+        std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
+        std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages;
+        std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
+        std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
+        uint32_t stageCount = 0;
+
+        for (auto stage : IterateStages(this->GetStageMask())) {
+            VkPipelineShaderStageCreateInfo shaderStage;
+
+            const ProgrammableStage& programmableStage = GetStage(stage);
+            DAWN_TRY_ASSIGN(shaderStage.module,
+                            ToBackend(programmableStage.module)
+                                ->GetTransformedModuleHandle(programmableStage.entryPoint.c_str(),
+                                                             ToBackend(GetLayout())));
+
+            shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+            shaderStage.pNext = nullptr;
+            shaderStage.flags = 0;
+            shaderStage.pSpecializationInfo = nullptr;
+            shaderStage.pName = programmableStage.entryPoint.c_str();
+
+            switch (stage) {
+                case dawn::native::SingleShaderStage::Vertex: {
+                    shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
+                    break;
+                }
+                case dawn::native::SingleShaderStage::Fragment: {
+                    shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+                    break;
+                }
+                default: {
+                    // For render pipeline only Vertex and Fragment stage is possible
+                    DAWN_UNREACHABLE();
+                    break;
+                }
+            }
+
+            shaderStage.pSpecializationInfo =
+                GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
+                                        &specializationDataEntriesPerStages[stageCount],
+                                        &specializationMapEntriesPerStages[stageCount]);
+
+            DAWN_ASSERT(stageCount < 2);
+            shaderStages[stageCount] = shaderStage;
+            stageCount++;
+        }
+
+        PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
+        VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
+            ComputeVertexInputDesc(&tempAllocations);
+
+        VkPipelineInputAssemblyStateCreateInfo inputAssembly;
+        inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+        inputAssembly.pNext = nullptr;
+        inputAssembly.flags = 0;
+        inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
+        inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
+
+        // A dummy viewport/scissor info. The validation layers force use to provide at least one
+        // scissor and one viewport here, even if we choose to make them dynamic.
+        VkViewport viewportDesc;
+        viewportDesc.x = 0.0f;
+        viewportDesc.y = 0.0f;
+        viewportDesc.width = 1.0f;
+        viewportDesc.height = 1.0f;
+        viewportDesc.minDepth = 0.0f;
+        viewportDesc.maxDepth = 1.0f;
+        VkRect2D scissorRect;
+        scissorRect.offset.x = 0;
+        scissorRect.offset.y = 0;
+        scissorRect.extent.width = 1;
+        scissorRect.extent.height = 1;
+        VkPipelineViewportStateCreateInfo viewport;
+        viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+        viewport.pNext = nullptr;
+        viewport.flags = 0;
+        viewport.viewportCount = 1;
+        viewport.pViewports = &viewportDesc;
+        viewport.scissorCount = 1;
+        viewport.pScissors = &scissorRect;
+
+        VkPipelineRasterizationStateCreateInfo rasterization;
+        rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+        rasterization.pNext = nullptr;
+        rasterization.flags = 0;
+        rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
+        rasterization.rasterizerDiscardEnable = VK_FALSE;
+        rasterization.polygonMode = VK_POLYGON_MODE_FILL;
+        rasterization.cullMode = VulkanCullMode(GetCullMode());
+        rasterization.frontFace = VulkanFrontFace(GetFrontFace());
+        rasterization.depthBiasEnable = IsDepthBiasEnabled();
+        rasterization.depthBiasConstantFactor = GetDepthBias();
+        rasterization.depthBiasClamp = GetDepthBiasClamp();
+        rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale();
+        rasterization.lineWidth = 1.0f;
+
+        VkPipelineMultisampleStateCreateInfo multisample;
+        multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+        multisample.pNext = nullptr;
+        multisample.flags = 0;
+        multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
+        multisample.sampleShadingEnable = VK_FALSE;
+        multisample.minSampleShading = 0.0f;
+        // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
+        // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
+        // we have to assert that this length is indeed 1.
+        ASSERT(multisample.rasterizationSamples <= 32);
+        VkSampleMask sampleMask = GetSampleMask();
+        multisample.pSampleMask = &sampleMask;
+        multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
+        multisample.alphaToOneEnable = VK_FALSE;
+
+        VkPipelineDepthStencilStateCreateInfo depthStencilState =
+            ComputeDepthStencilDesc(GetDepthStencilState());
+
+        VkPipelineColorBlendStateCreateInfo colorBlend;
+        // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
+        // definition scope as same as colorBlend
+        ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
+            colorBlendAttachments;
+        if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+            // Initialize the "blend state info" that will be chained in the "create info" from the
+            // data pre-computed in the ColorState
+            for (auto& blend : colorBlendAttachments) {
+                blend.blendEnable = VK_FALSE;
+                blend.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+                blend.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+                blend.colorBlendOp = VK_BLEND_OP_ADD;
+                blend.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+                blend.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+                blend.alphaBlendOp = VK_BLEND_OP_ADD;
+                blend.colorWriteMask = 0;
+            }
+
+            const auto& fragmentOutputsWritten =
+                GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
+            ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
+                GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
+            for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+                const ColorTargetState* target = GetColorTargetState(i);
+                colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
+            }
+
+            colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+            colorBlend.pNext = nullptr;
+            colorBlend.flags = 0;
+            // LogicOp isn't supported so we disable it.
+            colorBlend.logicOpEnable = VK_FALSE;
+            colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
+            colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+            colorBlend.pAttachments = colorBlendAttachments.data();
+            // The blend constant is always dynamic so we fill in a dummy value
+            colorBlend.blendConstants[0] = 0.0f;
+            colorBlend.blendConstants[1] = 0.0f;
+            colorBlend.blendConstants[2] = 0.0f;
+            colorBlend.blendConstants[3] = 0.0f;
+        }
+
+        // Tag all state as dynamic but stencil masks and depth bias.
+        VkDynamicState dynamicStates[] = {
+            VK_DYNAMIC_STATE_VIEWPORT,     VK_DYNAMIC_STATE_SCISSOR,
+            VK_DYNAMIC_STATE_LINE_WIDTH,   VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+            VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+        };
+        VkPipelineDynamicStateCreateInfo dynamic;
+        dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+        dynamic.pNext = nullptr;
+        dynamic.flags = 0;
+        dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
+        dynamic.pDynamicStates = dynamicStates;
+
+        // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
+        // don't matter so set them all to LoadOp::Load / StoreOp::Store. Whether the render pass
+        // has resolve target and whether depth/stencil attachment is read-only also don't matter,
+        // so set them both to false.
+        VkRenderPass renderPass = VK_NULL_HANDLE;
+        {
+            RenderPassCacheQuery query;
+
+            for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+                query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load,
+                               wgpu::StoreOp::Store, false);
+            }
+
+            if (HasDepthStencilAttachment()) {
+                query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load,
+                                      wgpu::StoreOp::Store, wgpu::LoadOp::Load,
+                                      wgpu::StoreOp::Store, false);
+            }
+
+            query.SetSampleCount(GetSampleCount());
+
+            DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
+        }
+
+        // The create info chains in a bunch of things created on the stack here or inside state
+        // objects.
+        VkGraphicsPipelineCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.stageCount = stageCount;
+        createInfo.pStages = shaderStages.data();
+        createInfo.pVertexInputState = &vertexInputCreateInfo;
+        createInfo.pInputAssemblyState = &inputAssembly;
+        createInfo.pTessellationState = nullptr;
+        createInfo.pViewportState = &viewport;
+        createInfo.pRasterizationState = &rasterization;
+        createInfo.pMultisampleState = &multisample;
+        createInfo.pDepthStencilState = &depthStencilState;
+        createInfo.pColorBlendState =
+            (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
+        createInfo.pDynamicState = &dynamic;
+        createInfo.layout = ToBackend(GetLayout())->GetHandle();
+        createInfo.renderPass = renderPass;
+        createInfo.subpass = 0;
+        createInfo.basePipelineHandle = VkPipeline{};
+        createInfo.basePipelineIndex = -1;
+
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1,
+                                               &createInfo, nullptr, &*mHandle),
+            "CreateGraphicsPipeline"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    void RenderPipeline::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_RenderPipeline", GetLabel());
+    }
+
+    VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
+        PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
+        // Fill in the "binding info" that will be chained in the create info
+        uint32_t bindingCount = 0;
+        for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+            const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot);
+
+            VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
+            bindingDesc->binding = static_cast<uint8_t>(slot);
+            bindingDesc->stride = bindingInfo.arrayStride;
+            bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
+
+            bindingCount++;
+        }
+
+        // Fill in the "attribute info" that will be chained in the create info
+        uint32_t attributeCount = 0;
+        for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+            const VertexAttributeInfo& attributeInfo = GetAttribute(loc);
+
+            VkVertexInputAttributeDescription* attributeDesc =
+                &tempAllocations->attributes[attributeCount];
+            attributeDesc->location = static_cast<uint8_t>(loc);
+            attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot);
+            attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
+            attributeDesc->offset = attributeInfo.offset;
+
+            attributeCount++;
+        }
+
+        // Build the create info
+        VkPipelineVertexInputStateCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.vertexBindingDescriptionCount = bindingCount;
+        createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
+        createInfo.vertexAttributeDescriptionCount = attributeCount;
+        createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
+        return createInfo;
+    }
+
+    RenderPipeline::~RenderPipeline() = default;
+
+    void RenderPipeline::DestroyImpl() {
+        RenderPipelineBase::DestroyImpl();
+        if (mHandle != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            mHandle = VK_NULL_HANDLE;
+        }
+    }
+
+    VkPipeline RenderPipeline::GetHandle() const {
+        return mHandle;
+    }
+
+    void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                         WGPUCreateRenderPipelineAsyncCallback callback,
+                                         void* userdata) {
+        std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+            std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+                                                            userdata);
+        CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/RenderPipelineVk.h b/src/dawn/native/vulkan/RenderPipelineVk.h
new file mode 100644
index 0000000..7d87caca
--- /dev/null
+++ b/src/dawn/native/vulkan/RenderPipelineVk.h
@@ -0,0 +1,59 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
+#define DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class RenderPipeline final : public RenderPipelineBase {
+      public:
+        static Ref<RenderPipeline> CreateUninitialized(Device* device,
+                                                       const RenderPipelineDescriptor* descriptor);
+        static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                    WGPUCreateRenderPipelineAsyncCallback callback,
+                                    void* userdata);
+
+        VkPipeline GetHandle() const;
+
+        MaybeError Initialize() override;
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+      private:
+        ~RenderPipeline() override;
+        void DestroyImpl() override;
+        using RenderPipelineBase::RenderPipelineBase;
+
+        struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
+            std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
+            std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
+        };
+        VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
+            PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
+
+        VkPipeline mHandle = VK_NULL_HANDLE;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
diff --git a/src/dawn/native/vulkan/ResourceHeapVk.cpp b/src/dawn/native/vulkan/ResourceHeapVk.cpp
new file mode 100644
index 0000000..94ce7fc
--- /dev/null
+++ b/src/dawn/native/vulkan/ResourceHeapVk.cpp
@@ -0,0 +1,31 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+
+namespace dawn::native::vulkan {
+
+    ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
+        : mMemory(memory), mMemoryType(memoryType) {
+    }
+
+    VkDeviceMemory ResourceHeap::GetMemory() const {
+        return mMemory;
+    }
+
+    size_t ResourceHeap::GetMemoryType() const {
+        return mMemoryType;
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ResourceHeapVk.h b/src/dawn/native/vulkan/ResourceHeapVk.h
new file mode 100644
index 0000000..5b822c8
--- /dev/null
+++ b/src/dawn/native/vulkan/ResourceHeapVk.h
@@ -0,0 +1,39 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
+#define DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/ResourceHeap.h"
+
+namespace dawn::native::vulkan {
+
+    // Wrapper for physical memory used with or without a resource object.
+    class ResourceHeap : public ResourceHeapBase {
+      public:
+        ResourceHeap(VkDeviceMemory memory, size_t memoryType);
+        ~ResourceHeap() = default;
+
+        VkDeviceMemory GetMemory() const;
+        size_t GetMemoryType() const;
+
+      private:
+        VkDeviceMemory mMemory = VK_NULL_HANDLE;
+        size_t mMemoryType = 0;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
diff --git a/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
new file mode 100644
index 0000000..783300f
--- /dev/null
+++ b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -0,0 +1,293 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/BuddyMemoryAllocator.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    namespace {
+
+        // TODO(crbug.com/dawn/849): This is a hardcoded heurstic to choose when to
+        // suballocate but it should ideally depend on the size of the memory heaps and other
+        // factors.
+        constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull;  // 4MiB
+
+        // Have each bucket of the buddy system allocate at least some resource of the maximum
+        // size
+        constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
+
+    }  // anonymous namespace
+
+    // SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
+    // service suballocation requests, but for a single Vulkan memory type.
+
+    class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
+      public:
+        SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
+            : mDevice(device),
+              mMemoryTypeIndex(memoryTypeIndex),
+              mMemoryHeapSize(memoryHeapSize),
+              mPooledMemoryAllocator(this),
+              mBuddySystem(
+                  // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
+                  // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
+                  uint64_t(1) << Log2(mMemoryHeapSize),
+                  // Take the min in the very unlikely case the memory heap is tiny.
+                  std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
+                  &mPooledMemoryAllocator) {
+            ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
+        }
+        ~SingleTypeAllocator() override = default;
+
+        void DestroyPool() {
+            mPooledMemoryAllocator.DestroyPool();
+        }
+
+        ResultOrError<ResourceMemoryAllocation> AllocateMemory(uint64_t size, uint64_t alignment) {
+            return mBuddySystem.Allocate(size, alignment);
+        }
+
+        void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
+            mBuddySystem.Deallocate(allocation);
+        }
+
+        // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
+
+        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+            uint64_t size) override {
+            if (size > mMemoryHeapSize) {
+                return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
+            }
+
+            VkMemoryAllocateInfo allocateInfo;
+            allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+            allocateInfo.pNext = nullptr;
+            allocateInfo.allocationSize = size;
+            allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
+
+            VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+
+            // First check OOM that we want to surface to the application.
+            DAWN_TRY(CheckVkOOMThenSuccess(
+                mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo, nullptr,
+                                           &*allocatedMemory),
+                "vkAllocateMemory"));
+
+            ASSERT(allocatedMemory != VK_NULL_HANDLE);
+            return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
+        }
+
+        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
+        }
+
+      private:
+        Device* mDevice;
+        size_t mMemoryTypeIndex;
+        VkDeviceSize mMemoryHeapSize;
+        PooledResourceMemoryAllocator mPooledMemoryAllocator;
+        BuddyMemoryAllocator mBuddySystem;
+    };
+
+    // Implementation of ResourceMemoryAllocator
+
+    ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
+        const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+        mAllocatorsPerType.reserve(info.memoryTypes.size());
+
+        for (size_t i = 0; i < info.memoryTypes.size(); i++) {
+            mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
+                mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
+        }
+    }
+
+    ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
+
+    ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
+        const VkMemoryRequirements& requirements,
+        MemoryKind kind) {
+        // The Vulkan spec guarantees at least on memory type is valid.
+        int memoryType = FindBestTypeIndex(requirements, kind);
+        ASSERT(memoryType >= 0);
+
+        VkDeviceSize size = requirements.size;
+
+        // Sub-allocate non-mappable resources because at the moment the mapped pointer
+        // is part of the resource and not the heap, which doesn't match the Vulkan model.
+        // TODO(crbug.com/dawn/849): allow sub-allocating mappable resources, maybe.
+        if (requirements.size < kMaxSizeForSubAllocation && kind != MemoryKind::LinearMappable &&
+            !mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
+            // When sub-allocating, Vulkan requires that we respect bufferImageGranularity. Some
+            // hardware puts information on the memory's page table entry and allocating a linear
+            // resource in the same page as a non-linear (aka opaque) resource can cause issues.
+            // Probably because some texture compression flags are stored on the page table entry,
+            // and allocating a linear resource removes these flags.
+            //
+            // Anyway, just to be safe we ask that all sub-allocated resources are allocated with at
+            // least this alignment. TODO(crbug.com/dawn/849): this is suboptimal because multiple
+            // linear (resp. opaque) resources can coexist in the same page. In particular Nvidia
+            // GPUs often use a granularity of 64k which will lead to a lot of wasted spec. Revisit
+            // with a more efficient algorithm later.
+            uint64_t alignment =
+                std::max(requirements.alignment,
+                         mDevice->GetDeviceInfo().properties.limits.bufferImageGranularity);
+
+            ResourceMemoryAllocation subAllocation;
+            DAWN_TRY_ASSIGN(subAllocation, mAllocatorsPerType[memoryType]->AllocateMemory(
+                                               requirements.size, alignment));
+            if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+                return std::move(subAllocation);
+            }
+        }
+
+        // If sub-allocation failed, allocate memory just for it.
+        std::unique_ptr<ResourceHeapBase> resourceHeap;
+        DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
+
+        void* mappedPointer = nullptr;
+        if (kind == MemoryKind::LinearMappable) {
+            DAWN_TRY_WITH_CLEANUP(
+                CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
+                                                     ToBackend(resourceHeap.get())->GetMemory(), 0,
+                                                     size, 0, &mappedPointer),
+                               "vkMapMemory"),
+                {
+                    mAllocatorsPerType[memoryType]->DeallocateResourceHeap(std::move(resourceHeap));
+                });
+        }
+
+        AllocationInfo info;
+        info.mMethod = AllocationMethod::kDirect;
+        return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
+                                        static_cast<uint8_t*>(mappedPointer));
+    }
+
+    void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
+        switch (allocation->GetInfo().mMethod) {
+            // Some memory allocation can never be initialized, for example when wrapping
+            // swapchain VkImages with a Texture.
+            case AllocationMethod::kInvalid:
+                break;
+
+            // For direct allocation we can put the memory for deletion immediately and the fence
+            // deleter will make sure the resources are freed before the memory.
+            case AllocationMethod::kDirect: {
+                ResourceHeap* heap = ToBackend(allocation->GetResourceHeap());
+                allocation->Invalidate();
+                mDevice->GetFencedDeleter()->DeleteWhenUnused(heap->GetMemory());
+                delete heap;
+                break;
+            }
+
+            // Suballocations aren't freed immediately, otherwise another resource allocation could
+            // happen just after that aliases the old one and would require a barrier.
+            // TODO(crbug.com/dawn/851): Maybe we can produce the correct barriers to reduce the
+            // latency to reclaim memory.
+            case AllocationMethod::kSubAllocated:
+                mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
+                break;
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+
+        // Invalidate the underlying resource heap in case the client accidentally
+        // calls DeallocateMemory again using the same allocation.
+        allocation->Invalidate();
+    }
+
+    void ResourceMemoryAllocator::Tick(ExecutionSerial completedSerial) {
+        for (const ResourceMemoryAllocation& allocation :
+             mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
+            ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+            size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
+
+            mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
+        }
+
+        mSubAllocationsToDelete.ClearUpTo(completedSerial);
+    }
+
+    int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
+                                                   MemoryKind kind) {
+        const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+        bool mappable = kind == MemoryKind::LinearMappable;
+
+        // Find a suitable memory type for this allocation
+        int bestType = -1;
+        for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
+            // Resource must support this memory type
+            if ((requirements.memoryTypeBits & (1 << i)) == 0) {
+                continue;
+            }
+
+            // Mappable resource must be host visible
+            if (mappable &&
+                (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+                continue;
+            }
+
+            // Mappable must also be host coherent.
+            if (mappable &&
+                (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
+                continue;
+            }
+
+            // Found the first candidate memory type
+            if (bestType == -1) {
+                bestType = static_cast<int>(i);
+                continue;
+            }
+
+            // For non-mappable resources, favor device local memory.
+            bool currentDeviceLocal =
+                info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+            bool bestDeviceLocal =
+                info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+            if (!mappable && (currentDeviceLocal != bestDeviceLocal)) {
+                if (currentDeviceLocal) {
+                    bestType = static_cast<int>(i);
+                }
+                continue;
+            }
+
+            // All things equal favor the memory in the biggest heap
+            VkDeviceSize bestTypeHeapSize =
+                info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
+            VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
+            if (candidateHeapSize > bestTypeHeapSize) {
+                bestType = static_cast<int>(i);
+                continue;
+            }
+        }
+
+        return bestType;
+    }
+
+    void ResourceMemoryAllocator::DestroyPool() {
+        for (auto& alloc : mAllocatorsPerType) {
+            alloc->DestroyPool();
+        }
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
new file mode 100644
index 0000000..81864fd
--- /dev/null
+++ b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
@@ -0,0 +1,66 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
+#define DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PooledResourceMemoryAllocator.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+
+#include <memory>
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    // Various kinds of memory that influence the result of the allocation. For example, to take
+    // into account mappability and Vulkan's bufferImageGranularity.
+    enum class MemoryKind {
+        Linear,
+        LinearMappable,
+        Opaque,
+    };
+
+    class ResourceMemoryAllocator {
+      public:
+        ResourceMemoryAllocator(Device* device);
+        ~ResourceMemoryAllocator();
+
+        ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
+                                                         MemoryKind kind);
+        void Deallocate(ResourceMemoryAllocation* allocation);
+
+        void DestroyPool();
+
+        void Tick(ExecutionSerial completedSerial);
+
+        int FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind);
+
+      private:
+        Device* mDevice;
+
+        class SingleTypeAllocator;
+        std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
+
+        SerialQueue<ExecutionSerial, ResourceMemoryAllocation> mSubAllocationsToDelete;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
diff --git a/src/dawn/native/vulkan/SamplerVk.cpp b/src/dawn/native/vulkan/SamplerVk.cpp
new file mode 100644
index 0000000..c7fc1a3
--- /dev/null
+++ b/src/dawn/native/vulkan/SamplerVk.cpp
@@ -0,0 +1,131 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/SamplerVk.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    namespace {
+        VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
+            switch (mode) {
+                case wgpu::AddressMode::Repeat:
+                    return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+                case wgpu::AddressMode::MirrorRepeat:
+                    return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+                case wgpu::AddressMode::ClampToEdge:
+                    return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+            }
+            UNREACHABLE();
+        }
+
+        VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
+            switch (filter) {
+                case wgpu::FilterMode::Linear:
+                    return VK_FILTER_LINEAR;
+                case wgpu::FilterMode::Nearest:
+                    return VK_FILTER_NEAREST;
+            }
+            UNREACHABLE();
+        }
+
+        VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
+            switch (filter) {
+                case wgpu::FilterMode::Linear:
+                    return VK_SAMPLER_MIPMAP_MODE_LINEAR;
+                case wgpu::FilterMode::Nearest:
+                    return VK_SAMPLER_MIPMAP_MODE_NEAREST;
+            }
+            UNREACHABLE();
+        }
+    }  // anonymous namespace
+
+    // static
+    ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
+                                                const SamplerDescriptor* descriptor) {
+        Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+        DAWN_TRY(sampler->Initialize(descriptor));
+        return sampler;
+    }
+
+    MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+        VkSamplerCreateInfo createInfo = {};
+        createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.magFilter = VulkanSamplerFilter(descriptor->magFilter);
+        createInfo.minFilter = VulkanSamplerFilter(descriptor->minFilter);
+        createInfo.mipmapMode = VulkanMipMapMode(descriptor->mipmapFilter);
+        createInfo.addressModeU = VulkanSamplerAddressMode(descriptor->addressModeU);
+        createInfo.addressModeV = VulkanSamplerAddressMode(descriptor->addressModeV);
+        createInfo.addressModeW = VulkanSamplerAddressMode(descriptor->addressModeW);
+        createInfo.mipLodBias = 0.0f;
+        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+            createInfo.compareOp = ToVulkanCompareOp(descriptor->compare);
+            createInfo.compareEnable = VK_TRUE;
+        } else {
+            // Still set the compareOp so it's not garbage.
+            createInfo.compareOp = VK_COMPARE_OP_NEVER;
+            createInfo.compareEnable = VK_FALSE;
+        }
+        createInfo.minLod = descriptor->lodMinClamp;
+        createInfo.maxLod = descriptor->lodMaxClamp;
+        createInfo.unnormalizedCoordinates = VK_FALSE;
+
+        Device* device = ToBackend(GetDevice());
+        uint16_t maxAnisotropy = GetMaxAnisotropy();
+        if (device->GetDeviceInfo().features.samplerAnisotropy == VK_TRUE && maxAnisotropy > 1) {
+            createInfo.anisotropyEnable = VK_TRUE;
+            // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSamplerCreateInfo.html
+            createInfo.maxAnisotropy =
+                std::min(static_cast<float>(maxAnisotropy),
+                         device->GetDeviceInfo().properties.limits.maxSamplerAnisotropy);
+        } else {
+            createInfo.anisotropyEnable = VK_FALSE;
+            createInfo.maxAnisotropy = 1;
+        }
+
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+            "CreateSampler"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    Sampler::~Sampler() = default;
+
+    void Sampler::DestroyImpl() {
+        SamplerBase::DestroyImpl();
+        if (mHandle != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            mHandle = VK_NULL_HANDLE;
+        }
+    }
+
+    VkSampler Sampler::GetHandle() const {
+        return mHandle;
+    }
+
+    void Sampler::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_SAMPLER,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_Sampler", GetLabel());
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/SamplerVk.h b/src/dawn/native/vulkan/SamplerVk.h
new file mode 100644
index 0000000..1b246c9
--- /dev/null
+++ b/src/dawn/native/vulkan/SamplerVk.h
@@ -0,0 +1,48 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_SAMPLERVK_H_
+#define DAWNNATIVE_VULKAN_SAMPLERVK_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class Sampler final : public SamplerBase {
+      public:
+        static ResultOrError<Ref<Sampler>> Create(Device* device,
+                                                  const SamplerDescriptor* descriptor);
+
+        VkSampler GetHandle() const;
+
+      private:
+        ~Sampler() override;
+        void DestroyImpl() override;
+        using SamplerBase::SamplerBase;
+        MaybeError Initialize(const SamplerDescriptor* descriptor);
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+        VkSampler mHandle = VK_NULL_HANDLE;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_SAMPLERVK_H_
diff --git a/src/dawn/native/vulkan/ShaderModuleVk.cpp b/src/dawn/native/vulkan/ShaderModuleVk.cpp
new file mode 100644
index 0000000..9b8c291
--- /dev/null
+++ b/src/dawn/native/vulkan/ShaderModuleVk.cpp
@@ -0,0 +1,250 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+
+#include "dawn/native/SpirvValidation.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <tint/tint.h>
+#include <spirv-tools/libspirv.hpp>
+
+namespace dawn::native::vulkan {
+
+    ShaderModule::ConcurrentTransformedShaderModuleCache::ConcurrentTransformedShaderModuleCache(
+        Device* device)
+        : mDevice(device) {
+    }
+
+    ShaderModule::ConcurrentTransformedShaderModuleCache::
+        ~ConcurrentTransformedShaderModuleCache() {
+        std::lock_guard<std::mutex> lock(mMutex);
+        for (const auto& [_, module] : mTransformedShaderModuleCache) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(module);
+        }
+    }
+
+    VkShaderModule ShaderModule::ConcurrentTransformedShaderModuleCache::FindShaderModule(
+        const PipelineLayoutEntryPointPair& key) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        auto iter = mTransformedShaderModuleCache.find(key);
+        if (iter != mTransformedShaderModuleCache.end()) {
+            auto cached = iter->second;
+            return cached;
+        }
+        return VK_NULL_HANDLE;
+    }
+
+    VkShaderModule ShaderModule::ConcurrentTransformedShaderModuleCache::AddOrGetCachedShaderModule(
+        const PipelineLayoutEntryPointPair& key,
+        VkShaderModule value) {
+        ASSERT(value != VK_NULL_HANDLE);
+        std::lock_guard<std::mutex> lock(mMutex);
+        auto iter = mTransformedShaderModuleCache.find(key);
+        if (iter == mTransformedShaderModuleCache.end()) {
+            mTransformedShaderModuleCache.emplace(key, value);
+            return value;
+        } else {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(value);
+            return iter->second;
+        }
+    }
+
+    // static
+    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                          const ShaderModuleDescriptor* descriptor,
+                                                          ShaderModuleParseResult* parseResult) {
+        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+        DAWN_TRY(module->Initialize(parseResult));
+        return module;
+    }
+
+    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+        : ShaderModuleBase(device, descriptor),
+          mTransformedShaderModuleCache(
+              std::make_unique<ConcurrentTransformedShaderModuleCache>(device)) {
+    }
+
+    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+        if (GetDevice()->IsRobustnessEnabled()) {
+            ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+            tint::transform::Robustness robustness;
+            tint::transform::DataMap transformInputs;
+
+            tint::Program program;
+            DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
+                                                   transformInputs, nullptr, nullptr));
+            // Rather than use a new ParseResult object, we just reuse the original parseResult
+            parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+        }
+
+        return InitializeBase(parseResult);
+    }
+
+    void ShaderModule::DestroyImpl() {
+        ShaderModuleBase::DestroyImpl();
+        // Remove reference to internal cache to trigger cleanup.
+        mTransformedShaderModuleCache = nullptr;
+    }
+
+    ShaderModule::~ShaderModule() = default;
+
+    ResultOrError<VkShaderModule> ShaderModule::GetTransformedModuleHandle(
+        const char* entryPointName,
+        PipelineLayout* layout) {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General,
+                     "ShaderModuleVk::GetTransformedModuleHandle");
+
+        // If the shader was destroyed, we should never call this function.
+        ASSERT(IsAlive());
+
+        ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+        auto cacheKey = std::make_pair(layout, entryPointName);
+        VkShaderModule cachedShaderModule =
+            mTransformedShaderModuleCache->FindShaderModule(cacheKey);
+        if (cachedShaderModule != VK_NULL_HANDLE) {
+            return cachedShaderModule;
+        }
+
+        // Creation of VkShaderModule is deferred to this point when using tint generator
+
+        // Remap BindingNumber to BindingIndex in WGSL shader
+        using BindingRemapper = tint::transform::BindingRemapper;
+        using BindingPoint = tint::transform::BindingPoint;
+        BindingRemapper::BindingPoints bindingPoints;
+        BindingRemapper::AccessControls accessControls;
+
+        const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
+
+        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+            const auto& groupBindingInfo = moduleBindingInfo[group];
+            for (const auto& it : groupBindingInfo) {
+                BindingNumber binding = it.first;
+                BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+                BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                             static_cast<uint32_t>(binding)};
+
+                BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+                                             static_cast<uint32_t>(bindingIndex)};
+                if (srcBindingPoint != dstBindingPoint) {
+                    bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+                }
+            }
+        }
+
+        tint::transform::Manager transformManager;
+        transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
+        // Many Vulkan drivers can't handle multi-entrypoint shader modules.
+        transformManager.append(std::make_unique<tint::transform::SingleEntryPoint>());
+
+        tint::transform::DataMap transformInputs;
+        transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+                                                         std::move(accessControls),
+                                                         /* mayCollide */ false);
+        transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+
+        // Transform external textures into the binding locations specified in the bgl
+        // TODO(dawn:1082): Replace this block with ShaderModuleBase::AddExternalTextureTransform.
+        tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+        for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+            ExternalTextureBindingExpansionMap expansions =
+                bgl->GetExternalTextureBindingExpansionMap();
+
+            std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+                expansions.begin();
+
+            while (it != expansions.end()) {
+                newBindingsMap[{static_cast<uint32_t>(i),
+                                static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane0))}] = {
+                    {static_cast<uint32_t>(i),
+                     static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane1))},
+                    {static_cast<uint32_t>(i),
+                     static_cast<uint32_t>(bgl->GetBindingIndex(it->second.params))}};
+                it++;
+            }
+        }
+
+        if (!newBindingsMap.empty()) {
+            transformManager.Add<tint::transform::MultiplanarExternalTexture>();
+            transformInputs.Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+                newBindingsMap);
+        }
+
+        tint::Program program;
+        {
+            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+            DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
+                                                   transformInputs, nullptr, nullptr));
+        }
+
+        tint::writer::spirv::Options options;
+        options.emit_vertex_point_size = true;
+        options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+        options.use_zero_initialize_workgroup_memory_extension =
+            GetDevice()->IsToggleEnabled(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension);
+
+        std::vector<uint32_t> spirv;
+        {
+            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::spirv::Generate()");
+            auto result = tint::writer::spirv::Generate(&program, options);
+            DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
+                            result.error);
+
+            spirv = std::move(result.spirv);
+        }
+
+        DAWN_TRY(
+            ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
+
+        VkShaderModuleCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.codeSize = spirv.size() * sizeof(uint32_t);
+        createInfo.pCode = spirv.data();
+
+        Device* device = ToBackend(GetDevice());
+
+        VkShaderModule newHandle = VK_NULL_HANDLE;
+        {
+            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "vkCreateShaderModule");
+            DAWN_TRY(CheckVkSuccess(device->fn.CreateShaderModule(
+                                        device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
+                                    "CreateShaderModule"));
+        }
+        if (newHandle != VK_NULL_HANDLE) {
+            newHandle =
+                mTransformedShaderModuleCache->AddOrGetCachedShaderModule(cacheKey, newHandle);
+        }
+
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_SHADER_MODULE,
+                     reinterpret_cast<uint64_t&>(newHandle), "Dawn_ShaderModule", GetLabel());
+
+        return newHandle;
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ShaderModuleVk.h b/src/dawn/native/vulkan/ShaderModuleVk.h
new file mode 100644
index 0000000..7040b74
--- /dev/null
+++ b/src/dawn/native/vulkan/ShaderModuleVk.h
@@ -0,0 +1,67 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
+#define DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
+
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+#include <mutex>
+
+namespace dawn::native::vulkan {
+
+    class Device;
+    class PipelineLayout;
+
+    class ShaderModule final : public ShaderModuleBase {
+      public:
+        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                       const ShaderModuleDescriptor* descriptor,
+                                                       ShaderModuleParseResult* parseResult);
+
+        ResultOrError<VkShaderModule> GetTransformedModuleHandle(const char* entryPointName,
+                                                                 PipelineLayout* layout);
+
+      private:
+        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+        ~ShaderModule() override;
+        MaybeError Initialize(ShaderModuleParseResult* parseResult);
+        void DestroyImpl() override;
+
+        // New handles created by GetTransformedModuleHandle at pipeline creation time
+        class ConcurrentTransformedShaderModuleCache {
+          public:
+            explicit ConcurrentTransformedShaderModuleCache(Device* device);
+            ~ConcurrentTransformedShaderModuleCache();
+            VkShaderModule FindShaderModule(const PipelineLayoutEntryPointPair& key);
+            VkShaderModule AddOrGetCachedShaderModule(const PipelineLayoutEntryPointPair& key,
+                                                      VkShaderModule value);
+
+          private:
+            Device* mDevice;
+            std::mutex mMutex;
+            std::unordered_map<PipelineLayoutEntryPointPair,
+                               VkShaderModule,
+                               PipelineLayoutEntryPointPairHashFunc>
+                mTransformedShaderModuleCache;
+        };
+        std::unique_ptr<ConcurrentTransformedShaderModuleCache> mTransformedShaderModuleCache;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
diff --git a/src/dawn/native/vulkan/StagingBufferVk.cpp b/src/dawn/native/vulkan/StagingBufferVk.cpp
new file mode 100644
index 0000000..fb66315
--- /dev/null
+++ b/src/dawn/native/vulkan/StagingBufferVk.cpp
@@ -0,0 +1,77 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    StagingBuffer::StagingBuffer(size_t size, Device* device)
+        : StagingBufferBase(size), mDevice(device) {
+    }
+
+    MaybeError StagingBuffer::Initialize() {
+        VkBufferCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.size = GetSize();
+        createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        createInfo.queueFamilyIndexCount = 0;
+        createInfo.pQueueFamilyIndices = 0;
+
+        DAWN_TRY(CheckVkSuccess(
+            mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
+            "vkCreateBuffer"));
+
+        VkMemoryRequirements requirements;
+        mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
+
+        DAWN_TRY_ASSIGN(mAllocation, mDevice->GetResourceMemoryAllocator()->Allocate(
+                                         requirements, MemoryKind::LinearMappable));
+
+        DAWN_TRY(CheckVkSuccess(
+            mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
+                                         ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
+                                         mAllocation.GetOffset()),
+            "vkBindBufferMemory"));
+
+        mMappedPointer = mAllocation.GetMappedPointer();
+        if (mMappedPointer == nullptr) {
+            return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
+        }
+
+        SetDebugName(mDevice, VK_OBJECT_TYPE_BUFFER, reinterpret_cast<uint64_t&>(mBuffer),
+                     "Dawn_StagingBuffer");
+
+        return {};
+    }
+
+    StagingBuffer::~StagingBuffer() {
+        mMappedPointer = nullptr;
+        mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
+        mDevice->GetResourceMemoryAllocator()->Deallocate(&mAllocation);
+    }
+
+    VkBuffer StagingBuffer::GetBufferHandle() const {
+        return mBuffer;
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/StagingBufferVk.h b/src/dawn/native/vulkan/StagingBufferVk.h
new file mode 100644
index 0000000..b6ad68b
--- /dev/null
+++ b/src/dawn/native/vulkan/StagingBufferVk.h
@@ -0,0 +1,42 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFERVK_H_
+#define DAWNNATIVE_STAGINGBUFFERVK_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/native/StagingBuffer.h"
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    class StagingBuffer : public StagingBufferBase {
+      public:
+        StagingBuffer(size_t size, Device* device);
+        ~StagingBuffer() override;
+
+        VkBuffer GetBufferHandle() const;
+
+        MaybeError Initialize() override;
+
+      private:
+        Device* mDevice;
+        VkBuffer mBuffer;
+        ResourceMemoryAllocation mAllocation;
+    };
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_STAGINGBUFFERVK_H_
diff --git a/src/dawn/native/vulkan/SwapChainVk.cpp b/src/dawn/native/vulkan/SwapChainVk.cpp
new file mode 100644
index 0000000..c09cb0d
--- /dev/null
+++ b/src/dawn/native/vulkan/SwapChainVk.cpp
@@ -0,0 +1,701 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/SwapChainVk.h"
+
+#include "dawn/common/Compiler.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <algorithm>
+
+#if defined(DAWN_USE_X11)
+#    include "dawn/native/XlibXcbFunctions.h"
+#endif  // defined(DAWN_USE_X11)
+
+namespace dawn::native::vulkan {
+
+    // OldSwapChain
+
+    // static
+    Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+        return AcquireRef(new OldSwapChain(device, descriptor));
+    }
+
+    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+        : OldSwapChainBase(device, descriptor) {
+        const auto& im = GetImplementation();
+        DawnWSIContextVulkan wsiContext = {};
+        im.Init(im.userData, &wsiContext);
+
+        ASSERT(im.textureUsage != WGPUTextureUsage_None);
+        mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+    }
+
+    OldSwapChain::~OldSwapChain() {
+    }
+
+    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+        const auto& im = GetImplementation();
+        DawnSwapChainNextTexture next = {};
+        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+
+        if (error) {
+            GetDevice()->HandleError(InternalErrorType::Internal, error);
+            return nullptr;
+        }
+
+        ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
+        VkImage nativeTexture = VkImage::CreateFromHandle(image);
+        return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture)
+            .Detach();
+    }
+
+    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+        Device* device = ToBackend(GetDevice());
+
+        // Perform the necessary pipeline barriers for the texture to be used with the usage
+        // requested by the implementation.
+        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+        ToBackend(view->GetTexture())
+            ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
+
+        DAWN_TRY(device->SubmitPendingCommands());
+
+        return {};
+    }
+
+    // SwapChain
+
+    namespace {
+
+        ResultOrError<VkSurfaceKHR> CreateVulkanSurface(Adapter* adapter, Surface* surface) {
+            const VulkanGlobalInfo& info = adapter->GetVulkanInstance()->GetGlobalInfo();
+            const VulkanFunctions& fn = adapter->GetVulkanInstance()->GetFunctions();
+            VkInstance instance = adapter->GetVulkanInstance()->GetVkInstance();
+
+            // May not be used in the platform-specific switches below.
+            DAWN_UNUSED(info);
+            DAWN_UNUSED(fn);
+            DAWN_UNUSED(instance);
+
+            switch (surface->GetType()) {
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+                case Surface::Type::MetalLayer:
+                    if (info.HasExt(InstanceExt::MetalSurface)) {
+                        VkMetalSurfaceCreateInfoEXT createInfo;
+                        createInfo.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
+                        createInfo.pNext = nullptr;
+                        createInfo.flags = 0;
+                        createInfo.pLayer = surface->GetMetalLayer();
+
+                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                        DAWN_TRY(CheckVkSuccess(
+                            fn.CreateMetalSurfaceEXT(instance, &createInfo, nullptr, &*vkSurface),
+                            "CreateMetalSurface"));
+                        return vkSurface;
+                    }
+                    break;
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+                case Surface::Type::WindowsHWND:
+                    if (info.HasExt(InstanceExt::Win32Surface)) {
+                        VkWin32SurfaceCreateInfoKHR createInfo;
+                        createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+                        createInfo.pNext = nullptr;
+                        createInfo.flags = 0;
+                        createInfo.hinstance = static_cast<HINSTANCE>(surface->GetHInstance());
+                        createInfo.hwnd = static_cast<HWND>(surface->GetHWND());
+
+                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                        DAWN_TRY(CheckVkSuccess(
+                            fn.CreateWin32SurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                            "CreateWin32Surface"));
+                        return vkSurface;
+                    }
+                    break;
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_PLATFORM_ANDROID)
+                case Surface::Type::AndroidWindow: {
+                    if (info.HasExt(InstanceExt::AndroidSurface)) {
+                        ASSERT(surface->GetAndroidNativeWindow() != nullptr);
+
+                        VkAndroidSurfaceCreateInfoKHR createInfo;
+                        createInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
+                        createInfo.pNext = nullptr;
+                        createInfo.flags = 0;
+                        createInfo.window =
+                            static_cast<struct ANativeWindow*>(surface->GetAndroidNativeWindow());
+
+                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                        DAWN_TRY(CheckVkSuccess(
+                            fn.CreateAndroidSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                            "CreateAndroidSurfaceKHR"));
+                        return vkSurface;
+                    }
+
+                    break;
+                }
+
+#endif  // defined(DAWN_PLATFORM_ANDROID)
+
+#if defined(DAWN_USE_X11)
+                case Surface::Type::XlibWindow: {
+                    if (info.HasExt(InstanceExt::XlibSurface)) {
+                        VkXlibSurfaceCreateInfoKHR createInfo;
+                        createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
+                        createInfo.pNext = nullptr;
+                        createInfo.flags = 0;
+                        createInfo.dpy = static_cast<Display*>(surface->GetXDisplay());
+                        createInfo.window = surface->GetXWindow();
+
+                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                        DAWN_TRY(CheckVkSuccess(
+                            fn.CreateXlibSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                            "CreateXlibSurface"));
+                        return vkSurface;
+                    }
+
+                    // Fall back to using XCB surfaces if the Xlib extension isn't available.
+                    // See https://xcb.freedesktop.org/MixingCalls/ for more information about
+                    // interoperability between Xlib and XCB
+                    const XlibXcbFunctions* xlibXcb =
+                        adapter->GetInstance()->GetOrCreateXlibXcbFunctions();
+                    ASSERT(xlibXcb != nullptr);
+
+                    if (info.HasExt(InstanceExt::XcbSurface) && xlibXcb->IsLoaded()) {
+                        VkXcbSurfaceCreateInfoKHR createInfo;
+                        createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
+                        createInfo.pNext = nullptr;
+                        createInfo.flags = 0;
+                        // The XCB connection lives as long as the X11 display.
+                        createInfo.connection = xlibXcb->xGetXCBConnection(
+                            static_cast<Display*>(surface->GetXDisplay()));
+                        createInfo.window = surface->GetXWindow();
+
+                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                        DAWN_TRY(CheckVkSuccess(
+                            fn.CreateXcbSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                            "CreateXcbSurfaceKHR"));
+                        return vkSurface;
+                    }
+                    break;
+                }
+#endif  // defined(DAWN_USE_X11)
+
+                default:
+                    break;
+            }
+
+            return DAWN_FORMAT_VALIDATION_ERROR("Unsupported surface type (%s) for Vulkan.",
+                                                surface->GetType());
+        }
+
+        VkPresentModeKHR ToVulkanPresentMode(wgpu::PresentMode mode) {
+            switch (mode) {
+                case wgpu::PresentMode::Fifo:
+                    return VK_PRESENT_MODE_FIFO_KHR;
+                case wgpu::PresentMode::Immediate:
+                    return VK_PRESENT_MODE_IMMEDIATE_KHR;
+                case wgpu::PresentMode::Mailbox:
+                    return VK_PRESENT_MODE_MAILBOX_KHR;
+            }
+            UNREACHABLE();
+        }
+
+        uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
+            switch (mode) {
+                case VK_PRESENT_MODE_FIFO_KHR:
+                case VK_PRESENT_MODE_IMMEDIATE_KHR:
+                    return 2;
+                case VK_PRESENT_MODE_MAILBOX_KHR:
+                    return 3;
+                default:
+                    break;
+            }
+            UNREACHABLE();
+        }
+
+    }  // anonymous namespace
+
+    // static
+    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor) {
+        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+        DAWN_TRY(swapchain->Initialize(previousSwapChain));
+        return swapchain;
+    }
+
+    SwapChain::~SwapChain() = default;
+
+    void SwapChain::DestroyImpl() {
+        SwapChainBase::DestroyImpl();
+        DetachFromSurface();
+    }
+
+    // Note that when we need to re-create the swapchain because it is out of date,
+    // previousSwapChain can be set to `this`.
+    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+        Device* device = ToBackend(GetDevice());
+        Adapter* adapter = ToBackend(GetDevice()->GetAdapter());
+
+        VkSwapchainKHR previousVkSwapChain = VK_NULL_HANDLE;
+
+        if (previousSwapChain != nullptr) {
+            // TODO(crbug.com/dawn/269): The first time a surface is used with a Device, check
+            // it is supported with vkGetPhysicalDeviceSurfaceSupportKHR.
+
+            // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+            // multiple backends one after the other. It probably needs to block until the backend
+            // and GPU are completely finished with the previous swapchain.
+            DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Vulkan,
+                            "Vulkan SwapChain cannot switch backend types from %s to %s.",
+                            previousSwapChain->GetBackendType(), wgpu::BackendType::Vulkan);
+
+            // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+            SwapChain* previousVulkanSwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+            // TODO(crbug.com/dawn/269): Figure out switching a single surface between multiple
+            // Vulkan devices on different VkInstances. Probably needs to block too!
+            VkInstance previousInstance =
+                ToBackend(previousSwapChain->GetDevice())->GetVkInstance();
+            DAWN_INVALID_IF(previousInstance != ToBackend(GetDevice())->GetVkInstance(),
+                            "Vulkan SwapChain cannot switch between Vulkan instances.");
+
+            // The previous swapchain is a dawn::native::vulkan::SwapChain so we can reuse its
+            // VkSurfaceKHR provided since they are on the same instance.
+            std::swap(previousVulkanSwapChain->mVkSurface, mVkSurface);
+
+            // The previous swapchain was on the same Vulkan instance so we can use Vulkan's
+            // "oldSwapchain" mechanism to ensure a seamless transition. We track the previous
+            // swapchain for release immediately so it is not leaked in case of an error. (Vulkan
+            // allows destroying it immediately after the call to vkCreateSwapChainKHR but tracking
+            // using the fenced deleter makes the code simpler).
+            std::swap(previousVulkanSwapChain->mSwapChain, previousVkSwapChain);
+            ToBackend(previousSwapChain->GetDevice())
+                ->GetFencedDeleter()
+                ->DeleteWhenUnused(previousVkSwapChain);
+        }
+
+        if (mVkSurface == VK_NULL_HANDLE) {
+            DAWN_TRY_ASSIGN(mVkSurface, CreateVulkanSurface(adapter, GetSurface()));
+        }
+
+        VulkanSurfaceInfo surfaceInfo;
+        DAWN_TRY_ASSIGN(surfaceInfo, GatherSurfaceInfo(*adapter, mVkSurface));
+
+        DAWN_TRY_ASSIGN(mConfig, ChooseConfig(surfaceInfo));
+
+        // TODO Choose config instead of hardcoding
+        VkSwapchainCreateInfoKHR createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.surface = mVkSurface;
+        createInfo.minImageCount = mConfig.targetImageCount;
+        createInfo.imageFormat = mConfig.format;
+        createInfo.imageColorSpace = mConfig.colorSpace;
+        createInfo.imageExtent = mConfig.extent;
+        createInfo.imageArrayLayers = 1;
+        createInfo.imageUsage = mConfig.usage;
+        createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        createInfo.queueFamilyIndexCount = 0;
+        createInfo.pQueueFamilyIndices = nullptr;
+        createInfo.preTransform = mConfig.transform;
+        createInfo.compositeAlpha = mConfig.alphaMode;
+        createInfo.presentMode = mConfig.presentMode;
+        createInfo.clipped = false;
+        createInfo.oldSwapchain = previousVkSwapChain;
+
+        DAWN_TRY(CheckVkSuccess(device->fn.CreateSwapchainKHR(device->GetVkDevice(), &createInfo,
+                                                              nullptr, &*mSwapChain),
+                                "CreateSwapChain"));
+
+        // Gather the swapchain's images. Implementations are allowed to return more images than the
+        // number we asked for.
+        uint32_t count = 0;
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count, nullptr),
+            "GetSwapChainImages1"));
+
+        mSwapChainImages.resize(count);
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count,
+                                             AsVkArray(mSwapChainImages.data())),
+            "GetSwapChainImages2"));
+
+        return {};
+    }
+
+    ResultOrError<SwapChain::Config> SwapChain::ChooseConfig(
+        const VulkanSurfaceInfo& surfaceInfo) const {
+        Config config;
+
+        // Choose the present mode. The only guaranteed one is FIFO so it has to be the fallback for
+        // all other present modes. IMMEDIATE has tearing which is generally undesirable so it can't
+        // be the fallback for MAILBOX. So the fallback order is always IMMEDIATE -> MAILBOX ->
+        // FIFO.
+        {
+            auto HasPresentMode = [](const std::vector<VkPresentModeKHR>& modes,
+                                     VkPresentModeKHR target) -> bool {
+                return std::find(modes.begin(), modes.end(), target) != modes.end();
+            };
+
+            VkPresentModeKHR targetMode = ToVulkanPresentMode(GetPresentMode());
+            const std::array<VkPresentModeKHR, 3> kPresentModeFallbacks = {
+                VK_PRESENT_MODE_IMMEDIATE_KHR,
+                VK_PRESENT_MODE_MAILBOX_KHR,
+                VK_PRESENT_MODE_FIFO_KHR,
+            };
+
+            // Go to the target mode.
+            size_t modeIndex = 0;
+            while (kPresentModeFallbacks[modeIndex] != targetMode) {
+                modeIndex++;
+            }
+
+            // Find the first available fallback.
+            while (!HasPresentMode(surfaceInfo.presentModes, kPresentModeFallbacks[modeIndex])) {
+                modeIndex++;
+            }
+
+            ASSERT(modeIndex < kPresentModeFallbacks.size());
+            config.presentMode = kPresentModeFallbacks[modeIndex];
+        }
+
+        // Choose the target width or do a blit.
+        if (GetWidth() < surfaceInfo.capabilities.minImageExtent.width ||
+            GetWidth() > surfaceInfo.capabilities.maxImageExtent.width ||
+            GetHeight() < surfaceInfo.capabilities.minImageExtent.height ||
+            GetHeight() > surfaceInfo.capabilities.maxImageExtent.height) {
+            config.needsBlit = true;
+        } else {
+            config.extent.width = GetWidth();
+            config.extent.height = GetHeight();
+        }
+
+        // Choose the target usage or do a blit.
+        VkImageUsageFlags targetUsages =
+            VulkanImageUsage(GetUsage(), GetDevice()->GetValidInternalFormat(GetFormat()));
+        VkImageUsageFlags supportedUsages = surfaceInfo.capabilities.supportedUsageFlags;
+        if (!IsSubset(targetUsages, supportedUsages)) {
+            config.needsBlit = true;
+        } else {
+            config.usage = targetUsages;
+            config.wgpuUsage = GetUsage();
+        }
+
+        // Only support BGRA8Unorm (and RGBA8Unorm on android) with SRGB color space for now.
+        config.wgpuFormat = GetFormat();
+        config.format = VulkanImageFormat(ToBackend(GetDevice()), config.wgpuFormat);
+        config.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+
+        bool formatIsSupported = false;
+        for (const VkSurfaceFormatKHR& format : surfaceInfo.formats) {
+            if (format.format == config.format && format.colorSpace == config.colorSpace) {
+                formatIsSupported = true;
+                break;
+            }
+        }
+        if (!formatIsSupported) {
+            return DAWN_INTERNAL_ERROR(absl::StrFormat(
+                "Vulkan SwapChain must support %s with sRGB colorspace.", config.wgpuFormat));
+        }
+
+        // Only the identity transform with opaque alpha is supported for now.
+        DAWN_INVALID_IF((surfaceInfo.capabilities.supportedTransforms &
+                         VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) == 0,
+                        "Vulkan SwapChain must support the identity transform.");
+
+        config.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+
+        config.alphaMode = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+#if !defined(DAWN_PLATFORM_ANDROID)
+        DAWN_INVALID_IF((surfaceInfo.capabilities.supportedCompositeAlpha &
+                            VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) == 0,
+                        "Vulkan SwapChain must support opaque alpha.");
+#else
+        // TODO(dawn:286): investigate composite alpha for WebGPU native
+        VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
+            VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+            VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
+            VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
+            VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
+        };
+        for (uint32_t i = 0; i < 4; i++) {
+            if (surfaceInfo.capabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
+                config.alphaMode = compositeAlphaFlags[i];
+                break;
+            }
+        }
+#endif  // #if !defined(DAWN_PLATFORM_ANDROID)
+
+        // Choose the number of images for the swapchain= and clamp it to the min and max from the
+        // surface capabilities. maxImageCount = 0 means there is no limit.
+        ASSERT(surfaceInfo.capabilities.maxImageCount == 0 ||
+               surfaceInfo.capabilities.minImageCount <= surfaceInfo.capabilities.maxImageCount);
+        uint32_t targetCount = MinImageCountForPresentMode(config.presentMode);
+
+        targetCount = std::max(targetCount, surfaceInfo.capabilities.minImageCount);
+        if (surfaceInfo.capabilities.maxImageCount != 0) {
+            targetCount = std::min(targetCount, surfaceInfo.capabilities.maxImageCount);
+        }
+
+        config.targetImageCount = targetCount;
+
+        // Choose a valid config for the swapchain texture that will receive the blit.
+        if (config.needsBlit) {
+            // Vulkan has provisions to have surfaces that adapt to the swapchain size. If that's
+            // the case it is very likely that the target extent works, but clamp it just in case.
+            // Using the target extent for the blit is better when possible so that texels don't
+            // get stretched. This case is exposed by having the special "-1" value in both
+            // dimensions of the extent.
+            constexpr uint32_t kSpecialValue = 0xFFFF'FFFF;
+            if (surfaceInfo.capabilities.currentExtent.width == kSpecialValue &&
+                surfaceInfo.capabilities.currentExtent.height == kSpecialValue) {
+                // extent = clamp(targetExtent, minExtent, maxExtent)
+                config.extent.width = GetWidth();
+                config.extent.width =
+                    std::min(config.extent.width, surfaceInfo.capabilities.maxImageExtent.width);
+                config.extent.width =
+                    std::max(config.extent.width, surfaceInfo.capabilities.minImageExtent.width);
+
+                config.extent.height = GetHeight();
+                config.extent.height =
+                    std::min(config.extent.height, surfaceInfo.capabilities.maxImageExtent.height);
+                config.extent.height =
+                    std::max(config.extent.height, surfaceInfo.capabilities.minImageExtent.height);
+            } else {
+                // If it is not an adaptable swapchain, just use the current extent for the blit
+                // texture.
+                config.extent = surfaceInfo.capabilities.currentExtent;
+            }
+
+            // TODO(crbug.com/dawn/269): If the swapchain image doesn't support TRANSFER_DST
+            // then we'll need to have a second fallback that uses a blit shader :(
+            if ((supportedUsages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
+                return DAWN_INTERNAL_ERROR(
+                    "SwapChain cannot fallback to a blit because of a missing "
+                    "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
+            }
+            config.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+            config.wgpuUsage = wgpu::TextureUsage::CopyDst;
+        }
+
+        return config;
+    }
+
+    MaybeError SwapChain::PresentImpl() {
+        Device* device = ToBackend(GetDevice());
+
+        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+
+        if (mConfig.needsBlit) {
+            // TODO ditto same as present below: eagerly transition the blit texture to CopySrc.
+            mBlitTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
+                                             mBlitTexture->GetAllSubresources());
+            mTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
+                                         mTexture->GetAllSubresources());
+
+            VkImageBlit region;
+            region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+            region.srcSubresource.mipLevel = 0;
+            region.srcSubresource.baseArrayLayer = 0;
+            region.srcSubresource.layerCount = 1;
+            region.srcOffsets[0] = {0, 0, 0};
+            region.srcOffsets[1] = {static_cast<int32_t>(mBlitTexture->GetWidth()),
+                                    static_cast<int32_t>(mBlitTexture->GetHeight()), 1};
+
+            region.dstSubresource = region.srcSubresource;
+            region.dstOffsets[0] = {0, 0, 0};
+            region.dstOffsets[1] = {static_cast<int32_t>(mTexture->GetWidth()),
+                                    static_cast<int32_t>(mTexture->GetHeight()), 1};
+
+            device->fn.CmdBlitImage(recordingContext->commandBuffer, mBlitTexture->GetHandle(),
+                                    mBlitTexture->GetCurrentLayoutForSwapChain(),
+                                    mTexture->GetHandle(), mTexture->GetCurrentLayoutForSwapChain(),
+                                    1, &region, VK_FILTER_LINEAR);
+
+            // TODO(crbug.com/dawn/269): Find a way to reuse the blit texture between frames
+            // instead of creating a new one every time. This will involve "un-destroying" the
+            // texture or making the blit texture "external".
+            mBlitTexture->APIDestroy();
+            mBlitTexture = nullptr;
+        }
+
+        // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+        // presentable texture to present at the end of submits that use them and ideally even
+        // folding that in the free layout transition at the end of render passes.
+        mTexture->TransitionUsageNow(recordingContext, kPresentTextureUsage,
+                                     mTexture->GetAllSubresources());
+
+        DAWN_TRY(device->SubmitPendingCommands());
+
+        // Assuming that the present queue is the same as the graphics queue, the proper
+        // synchronization has already been done on the queue so we don't need to wait on any
+        // semaphores.
+        // TODO(crbug.com/dawn/269): Support the present queue not being the main queue.
+        VkPresentInfoKHR presentInfo;
+        presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+        presentInfo.pNext = nullptr;
+        presentInfo.waitSemaphoreCount = 0;
+        presentInfo.pWaitSemaphores = nullptr;
+        presentInfo.swapchainCount = 1;
+        presentInfo.pSwapchains = &*mSwapChain;
+        presentInfo.pImageIndices = &mLastImageIndex;
+        presentInfo.pResults = nullptr;
+
+        // Free the texture before present so error handling doesn't skip that step.
+        mTexture->APIDestroy();
+        mTexture = nullptr;
+
+        VkResult result =
+            VkResult::WrapUnsafe(device->fn.QueuePresentKHR(device->GetQueue(), &presentInfo));
+
+        switch (result) {
+            case VK_SUCCESS:
+            // VK_SUBOPTIMAL_KHR means "a swapchain no longer matches the surface properties
+            // exactly, but can still be used to present to the surface successfully", so we
+            // can also treat it as a "success" error code of vkQueuePresentKHR().
+            case VK_SUBOPTIMAL_KHR:
+                return {};
+
+            // This present cannot be recovered. Re-initialize the VkSwapchain so that future
+            // presents work..
+            case VK_ERROR_OUT_OF_DATE_KHR:
+                return Initialize(this);
+
+            // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+            case VK_ERROR_SURFACE_LOST_KHR:
+            default:
+                return CheckVkSuccess(::VkResult(result), "QueuePresent");
+        }
+    }
+
+    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+        return GetCurrentTextureViewInternal();
+    }
+
+    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewInternal(bool isReentrant) {
+        Device* device = ToBackend(GetDevice());
+
+        // Transiently create a semaphore that will be signaled when the presentation engine is done
+        // with the swapchain image. Further operations on the image will wait for this semaphore.
+        VkSemaphoreCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+
+        VkSemaphore semaphore = VK_NULL_HANDLE;
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreateSemaphore(device->GetVkDevice(), &createInfo, nullptr, &*semaphore),
+            "CreateSemaphore"));
+
+        VkResult result = VkResult::WrapUnsafe(device->fn.AcquireNextImageKHR(
+            device->GetVkDevice(), mSwapChain, std::numeric_limits<uint64_t>::max(), semaphore,
+            VkFence{}, &mLastImageIndex));
+
+        if (result == VK_SUCCESS) {
+            // TODO(crbug.com/dawn/269) put the semaphore on the texture so it is waited on when
+            // used instead of directly on the recording context?
+            device->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+        } else {
+            // The semaphore wasn't actually used (? this is unclear in the spec). Delete it when
+            // we get a chance.
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(semaphore);
+        }
+
+        switch (result) {
+            // TODO(crbug.com/dawn/269): Introduce a mechanism to notify the application that
+            // the swapchain is in a suboptimal state?
+            case VK_SUBOPTIMAL_KHR:
+            case VK_SUCCESS:
+                break;
+
+            case VK_ERROR_OUT_OF_DATE_KHR: {
+                // Prevent infinite recursive calls to GetCurrentTextureViewInternal when the
+                // swapchains always return that they are out of date.
+                if (isReentrant) {
+                    // TODO(crbug.com/dawn/269): Allow losing the surface instead?
+                    return DAWN_INTERNAL_ERROR(
+                        "Wasn't able to recuperate the surface after a VK_ERROR_OUT_OF_DATE_KHR");
+                }
+
+                // Re-initialize the VkSwapchain and try getting the texture again.
+                DAWN_TRY(Initialize(this));
+                return GetCurrentTextureViewInternal(true);
+            }
+
+            // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+            case VK_ERROR_SURFACE_LOST_KHR:
+            default:
+                DAWN_TRY(CheckVkSuccess(::VkResult(result), "AcquireNextImage"));
+        }
+
+        TextureDescriptor textureDesc;
+        textureDesc.size.width = mConfig.extent.width;
+        textureDesc.size.height = mConfig.extent.height;
+        textureDesc.format = mConfig.wgpuFormat;
+        textureDesc.usage = mConfig.wgpuUsage;
+
+        VkImage currentImage = mSwapChainImages[mLastImageIndex];
+        mTexture = Texture::CreateForSwapChain(device, &textureDesc, currentImage);
+
+        // In the happy path we can use the swapchain image directly.
+        if (!mConfig.needsBlit) {
+            return mTexture->CreateView();
+        }
+
+        // The blit texture always perfectly matches what the user requested for the swapchain.
+        // We need to add the Vulkan TRANSFER_SRC flag for the vkCmdBlitImage call.
+        TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
+        DAWN_TRY_ASSIGN(mBlitTexture,
+                        Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
+        return mBlitTexture->CreateView();
+    }
+
+    void SwapChain::DetachFromSurfaceImpl() {
+        if (mTexture != nullptr) {
+            mTexture->APIDestroy();
+            mTexture = nullptr;
+        }
+
+        if (mBlitTexture != nullptr) {
+            mBlitTexture->APIDestroy();
+            mBlitTexture = nullptr;
+        }
+
+        // The swapchain images are destroyed with the swapchain.
+        if (mSwapChain != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+            mSwapChain = VK_NULL_HANDLE;
+        }
+
+        if (mVkSurface != VK_NULL_HANDLE) {
+            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mVkSurface);
+            mVkSurface = VK_NULL_HANDLE;
+        }
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/SwapChainVk.h b/src/dawn/native/vulkan/SwapChainVk.h
new file mode 100644
index 0000000..bfab4b7
--- /dev/null
+++ b/src/dawn/native/vulkan/SwapChainVk.h
@@ -0,0 +1,98 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
+#define DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/common/vulkan_platform.h"
+
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+    class Device;
+    class Texture;
+    struct VulkanSurfaceInfo;
+
+    class OldSwapChain : public OldSwapChainBase {
+      public:
+        static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+
+      protected:
+        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+        ~OldSwapChain() override;
+
+        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+        MaybeError OnBeforePresent(TextureViewBase* texture) override;
+
+      private:
+        wgpu::TextureUsage mTextureUsage;
+    };
+
+    class SwapChain : public NewSwapChainBase {
+      public:
+        static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                    Surface* surface,
+                                                    NewSwapChainBase* previousSwapChain,
+                                                    const SwapChainDescriptor* descriptor);
+        ~SwapChain() override;
+
+      private:
+        using NewSwapChainBase::NewSwapChainBase;
+        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+        void DestroyImpl() override;
+
+        struct Config {
+            // Information that's passed to vulkan swapchain creation.
+            VkPresentModeKHR presentMode;
+            VkExtent2D extent;
+            VkImageUsageFlags usage;
+            VkFormat format;
+            VkColorSpaceKHR colorSpace;
+            uint32_t targetImageCount;
+            VkSurfaceTransformFlagBitsKHR transform;
+            VkCompositeAlphaFlagBitsKHR alphaMode;
+
+            // Redundant information but as WebGPU enums to create the wgpu::Texture that
+            // encapsulates the native swapchain texture.
+            wgpu::TextureUsage wgpuUsage;
+            wgpu::TextureFormat wgpuFormat;
+
+            // Information about the blit workarounds we need to do (if any)
+            bool needsBlit = false;
+        };
+        ResultOrError<Config> ChooseConfig(const VulkanSurfaceInfo& surfaceInfo) const;
+        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewInternal(bool isReentrant = false);
+
+        // NewSwapChainBase implementation
+        MaybeError PresentImpl() override;
+        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+        void DetachFromSurfaceImpl() override;
+
+        Config mConfig;
+
+        VkSurfaceKHR mVkSurface = VK_NULL_HANDLE;
+        VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+        std::vector<VkImage> mSwapChainImages;
+        uint32_t mLastImageIndex = 0;
+
+        Ref<Texture> mBlitTexture;
+        Ref<Texture> mTexture;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
diff --git a/src/dawn/native/vulkan/TextureVk.cpp b/src/dawn/native/vulkan/TextureVk.cpp
new file mode 100644
index 0000000..e6adf14
--- /dev/null
+++ b/src/dawn/native/vulkan/TextureVk.cpp
@@ -0,0 +1,1430 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/TextureVk.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    namespace {
+        // Converts an Dawn texture dimension to a Vulkan image view type.
+        // Contrary to image types, image view types include arrayness and cubemapness
+        VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
+            switch (dimension) {
+                case wgpu::TextureViewDimension::e1D:
+                    return VK_IMAGE_VIEW_TYPE_1D;
+                case wgpu::TextureViewDimension::e2D:
+                    return VK_IMAGE_VIEW_TYPE_2D;
+                case wgpu::TextureViewDimension::e2DArray:
+                    return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+                case wgpu::TextureViewDimension::Cube:
+                    return VK_IMAGE_VIEW_TYPE_CUBE;
+                case wgpu::TextureViewDimension::CubeArray:
+                    return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+                case wgpu::TextureViewDimension::e3D:
+                    return VK_IMAGE_VIEW_TYPE_3D;
+
+                case wgpu::TextureViewDimension::Undefined:
+                    UNREACHABLE();
+            }
+        }
+
+        // Computes which vulkan access type could be required for the given Dawn usage.
+        // TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
+        // the previous usage is readonly because an execution dependency is sufficient.
+        VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
+            VkAccessFlags flags = 0;
+
+            if (usage & wgpu::TextureUsage::CopySrc) {
+                flags |= VK_ACCESS_TRANSFER_READ_BIT;
+            }
+            if (usage & wgpu::TextureUsage::CopyDst) {
+                flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+            }
+            if (usage & wgpu::TextureUsage::TextureBinding) {
+                flags |= VK_ACCESS_SHADER_READ_BIT;
+            }
+            if (usage & wgpu::TextureUsage::StorageBinding) {
+                flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+            }
+            if (usage & wgpu::TextureUsage::RenderAttachment) {
+                if (format.HasDepthOrStencil()) {
+                    flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+                             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+                } else {
+                    flags |=
+                        VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+                }
+            }
+            if (usage & kReadOnlyRenderAttachment) {
+                flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+            }
+            if (usage & kPresentTextureUsage) {
+                // The present usage is only used internally by the swapchain and is never used in
+                // combination with other usages.
+                ASSERT(usage == kPresentTextureUsage);
+                // The Vulkan spec has the following note:
+                //
+                //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+                //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+                //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
+                //   automatic visibility operations). To achieve this, the dstAccessMask member of
+                //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+                //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+                //
+                // So on the transition to Present we don't need an access flag. The other
+                // direction doesn't matter because swapchain textures always start a new frame
+                // as uninitialized.
+                flags |= 0;
+            }
+
+            return flags;
+        }
+
+        // Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
+        VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
+            VkPipelineStageFlags flags = 0;
+
+            if (usage == wgpu::TextureUsage::None) {
+                // This only happens when a texture is initially created (and for srcAccessMask) in
+                // which case there is no need to wait on anything to stop accessing this texture.
+                return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+            }
+            if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
+                flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+            }
+            if (usage & wgpu::TextureUsage::TextureBinding) {
+                // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
+                // introducing FS -> VS dependencies that would prevent parallelization on tiler
+                // GPUs
+                flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+                         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+                         VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+            }
+            if (usage & wgpu::TextureUsage::StorageBinding) {
+                flags |=
+                    VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+            }
+            if (usage & (wgpu::TextureUsage::RenderAttachment | kReadOnlyRenderAttachment)) {
+                if (format.HasDepthOrStencil()) {
+                    flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
+                             VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+                } else {
+                    flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+                }
+            }
+            if (usage & kPresentTextureUsage) {
+                // The present usage is only used internally by the swapchain and is never used in
+                // combination with other usages.
+                ASSERT(usage == kPresentTextureUsage);
+                // The Vulkan spec has the following note:
+                //
+                //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+                //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+                //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
+                //   automatic visibility operations). To achieve this, the dstAccessMask member of
+                //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+                //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+                //
+                // So on the transition to Present we use the "bottom of pipe" stage. The other
+                // direction doesn't matter because swapchain textures always start a new frame
+                // as uninitialized.
+                flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+            }
+
+            // A zero value isn't a valid pipeline stage mask
+            ASSERT(flags != 0);
+            return flags;
+        }
+
+        VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
+                                                wgpu::TextureUsage lastUsage,
+                                                wgpu::TextureUsage usage,
+                                                const SubresourceRange& range) {
+            VkImageMemoryBarrier barrier;
+            barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+            barrier.pNext = nullptr;
+            barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
+            barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
+            barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
+            barrier.newLayout = VulkanImageLayout(texture, usage);
+            barrier.image = texture->GetHandle();
+            barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
+            barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
+            barrier.subresourceRange.levelCount = range.levelCount;
+            barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
+            barrier.subresourceRange.layerCount = range.layerCount;
+
+            barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+            barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+            return barrier;
+        }
+
+        void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
+            const Extent3D& size = texture.GetSize();
+
+            info->mipLevels = texture.GetNumMipLevels();
+            info->samples = VulkanSampleCount(texture.GetSampleCount());
+
+            // Fill in the image type, and paper over differences in how the array layer count is
+            // specified between WebGPU and Vulkan.
+            switch (texture.GetDimension()) {
+                case wgpu::TextureDimension::e1D:
+                    info->imageType = VK_IMAGE_TYPE_1D;
+                    info->extent = {size.width, 1, 1};
+                    info->arrayLayers = 1;
+                    break;
+
+                case wgpu::TextureDimension::e2D:
+                    info->imageType = VK_IMAGE_TYPE_2D;
+                    info->extent = {size.width, size.height, 1};
+                    info->arrayLayers = size.depthOrArrayLayers;
+                    break;
+
+                case wgpu::TextureDimension::e3D:
+                    info->imageType = VK_IMAGE_TYPE_3D;
+                    info->extent = {size.width, size.height, size.depthOrArrayLayers};
+                    info->arrayLayers = 1;
+                    break;
+            }
+        }
+
+    }  // namespace
+
+    // Converts Dawn texture format to Vulkan formats.
+    VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R8Unorm:
+                return VK_FORMAT_R8_UNORM;
+            case wgpu::TextureFormat::R8Snorm:
+                return VK_FORMAT_R8_SNORM;
+            case wgpu::TextureFormat::R8Uint:
+                return VK_FORMAT_R8_UINT;
+            case wgpu::TextureFormat::R8Sint:
+                return VK_FORMAT_R8_SINT;
+
+            case wgpu::TextureFormat::R16Uint:
+                return VK_FORMAT_R16_UINT;
+            case wgpu::TextureFormat::R16Sint:
+                return VK_FORMAT_R16_SINT;
+            case wgpu::TextureFormat::R16Float:
+                return VK_FORMAT_R16_SFLOAT;
+            case wgpu::TextureFormat::RG8Unorm:
+                return VK_FORMAT_R8G8_UNORM;
+            case wgpu::TextureFormat::RG8Snorm:
+                return VK_FORMAT_R8G8_SNORM;
+            case wgpu::TextureFormat::RG8Uint:
+                return VK_FORMAT_R8G8_UINT;
+            case wgpu::TextureFormat::RG8Sint:
+                return VK_FORMAT_R8G8_SINT;
+
+            case wgpu::TextureFormat::R32Uint:
+                return VK_FORMAT_R32_UINT;
+            case wgpu::TextureFormat::R32Sint:
+                return VK_FORMAT_R32_SINT;
+            case wgpu::TextureFormat::R32Float:
+                return VK_FORMAT_R32_SFLOAT;
+            case wgpu::TextureFormat::RG16Uint:
+                return VK_FORMAT_R16G16_UINT;
+            case wgpu::TextureFormat::RG16Sint:
+                return VK_FORMAT_R16G16_SINT;
+            case wgpu::TextureFormat::RG16Float:
+                return VK_FORMAT_R16G16_SFLOAT;
+            case wgpu::TextureFormat::RGBA8Unorm:
+                return VK_FORMAT_R8G8B8A8_UNORM;
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+                return VK_FORMAT_R8G8B8A8_SRGB;
+            case wgpu::TextureFormat::RGBA8Snorm:
+                return VK_FORMAT_R8G8B8A8_SNORM;
+            case wgpu::TextureFormat::RGBA8Uint:
+                return VK_FORMAT_R8G8B8A8_UINT;
+            case wgpu::TextureFormat::RGBA8Sint:
+                return VK_FORMAT_R8G8B8A8_SINT;
+            case wgpu::TextureFormat::BGRA8Unorm:
+                return VK_FORMAT_B8G8R8A8_UNORM;
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+                return VK_FORMAT_B8G8R8A8_SRGB;
+            case wgpu::TextureFormat::RGB10A2Unorm:
+                return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+            case wgpu::TextureFormat::RG11B10Ufloat:
+                return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+                return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
+
+            case wgpu::TextureFormat::RG32Uint:
+                return VK_FORMAT_R32G32_UINT;
+            case wgpu::TextureFormat::RG32Sint:
+                return VK_FORMAT_R32G32_SINT;
+            case wgpu::TextureFormat::RG32Float:
+                return VK_FORMAT_R32G32_SFLOAT;
+            case wgpu::TextureFormat::RGBA16Uint:
+                return VK_FORMAT_R16G16B16A16_UINT;
+            case wgpu::TextureFormat::RGBA16Sint:
+                return VK_FORMAT_R16G16B16A16_SINT;
+            case wgpu::TextureFormat::RGBA16Float:
+                return VK_FORMAT_R16G16B16A16_SFLOAT;
+
+            case wgpu::TextureFormat::RGBA32Uint:
+                return VK_FORMAT_R32G32B32A32_UINT;
+            case wgpu::TextureFormat::RGBA32Sint:
+                return VK_FORMAT_R32G32B32A32_SINT;
+            case wgpu::TextureFormat::RGBA32Float:
+                return VK_FORMAT_R32G32B32A32_SFLOAT;
+
+            case wgpu::TextureFormat::Depth16Unorm:
+                return VK_FORMAT_D16_UNORM;
+            case wgpu::TextureFormat::Depth32Float:
+                return VK_FORMAT_D32_SFLOAT;
+            case wgpu::TextureFormat::Depth24Plus:
+                return VK_FORMAT_D32_SFLOAT;
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+                // Depth24PlusStencil8 maps to either of these two formats because only requires
+                // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
+                // the environment, default to using D32S8, and availability information so we know
+                // that the format is available.
+                if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
+                    return VK_FORMAT_D32_SFLOAT_S8_UINT;
+                } else {
+                    return VK_FORMAT_D24_UNORM_S8_UINT;
+                }
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                return VK_FORMAT_D24_UNORM_S8_UINT;
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                return VK_FORMAT_D32_SFLOAT_S8_UINT;
+            case wgpu::TextureFormat::Stencil8:
+                // Try to use the stencil8 format if possible, otherwise use whatever format we can
+                // use that contains a stencil8 component.
+                if (device->IsToggleEnabled(Toggle::VulkanUseS8)) {
+                    return VK_FORMAT_S8_UINT;
+                } else {
+                    return VulkanImageFormat(device, wgpu::TextureFormat::Depth24PlusStencil8);
+                }
+
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+                return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+                return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+                return VK_FORMAT_BC2_UNORM_BLOCK;
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+                return VK_FORMAT_BC2_SRGB_BLOCK;
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+                return VK_FORMAT_BC3_UNORM_BLOCK;
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+                return VK_FORMAT_BC3_SRGB_BLOCK;
+            case wgpu::TextureFormat::BC4RSnorm:
+                return VK_FORMAT_BC4_SNORM_BLOCK;
+            case wgpu::TextureFormat::BC4RUnorm:
+                return VK_FORMAT_BC4_UNORM_BLOCK;
+            case wgpu::TextureFormat::BC5RGSnorm:
+                return VK_FORMAT_BC5_SNORM_BLOCK;
+            case wgpu::TextureFormat::BC5RGUnorm:
+                return VK_FORMAT_BC5_UNORM_BLOCK;
+            case wgpu::TextureFormat::BC6HRGBFloat:
+                return VK_FORMAT_BC6H_SFLOAT_BLOCK;
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+                return VK_FORMAT_BC6H_UFLOAT_BLOCK;
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+                return VK_FORMAT_BC7_UNORM_BLOCK;
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return VK_FORMAT_BC7_SRGB_BLOCK;
+
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+                return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+                return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+                return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+                return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+                return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+                return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
+            case wgpu::TextureFormat::EACR11Unorm:
+                return VK_FORMAT_EAC_R11_UNORM_BLOCK;
+            case wgpu::TextureFormat::EACR11Snorm:
+                return VK_FORMAT_EAC_R11_SNORM_BLOCK;
+            case wgpu::TextureFormat::EACRG11Unorm:
+                return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
+            case wgpu::TextureFormat::EACRG11Snorm:
+                return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
+
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+                return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+                return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+                return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+                return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+                return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+                return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+                return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+                return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+                return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+                return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+                return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+                return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+                return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+                return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+                return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+                return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+                return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+                return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+                return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+                return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+                return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+                return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+                return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+                return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+                return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+                return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+                return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
+
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
+
+            case wgpu::TextureFormat::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    // Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
+    // between color and depth attachment usages.
+    VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
+        VkImageUsageFlags flags = 0;
+
+        if (usage & wgpu::TextureUsage::CopySrc) {
+            flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+        }
+        if (usage & wgpu::TextureUsage::CopyDst) {
+            flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+        }
+        if (usage & wgpu::TextureUsage::TextureBinding) {
+            flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
+            // If the sampled texture is a depth/stencil texture, its image layout will be set
+            // to DEPTH_STENCIL_READ_ONLY_OPTIMAL in order to support readonly depth/stencil
+            // attachment. That layout requires DEPTH_STENCIL_ATTACHMENT_BIT image usage.
+            if (format.HasDepthOrStencil() && format.isRenderable) {
+                flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+            }
+        }
+        if (usage & wgpu::TextureUsage::StorageBinding) {
+            flags |= VK_IMAGE_USAGE_STORAGE_BIT;
+        }
+        if (usage & wgpu::TextureUsage::RenderAttachment) {
+            if (format.HasDepthOrStencil()) {
+                flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+            } else {
+                flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+            }
+        }
+        if (usage & kReadOnlyRenderAttachment) {
+            flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+        }
+
+        return flags;
+    }
+
+    // Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
+    // layout must match the layout given to various Vulkan operations as well as the layout given
+    // to descriptor set writes.
+    VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
+        if (usage == wgpu::TextureUsage::None) {
+            return VK_IMAGE_LAYOUT_UNDEFINED;
+        }
+
+        if (!wgpu::HasZeroOrOneBits(usage)) {
+            // Sampled | kReadOnlyRenderAttachment is the only possible multi-bit usage, if more
+            // appear we might need additional special-casing.
+            ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment));
+
+            // WebGPU requires both aspects to be readonly if the attachment's format does have
+            // both depth and stencil aspects. Vulkan 1.0 supports readonly for both aspects too
+            // via DEPTH_STENCIL_READ_ONLY image layout. Vulkan 1.1 and above can support separate
+            // readonly for a single aspect via DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL and
+            // DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layouts. But Vulkan 1.0 cannot support
+            // it, and WebGPU doesn't need that currently.
+            return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+        }
+
+        // Usage has a single bit so we can switch on its value directly.
+        switch (usage) {
+            case wgpu::TextureUsage::CopyDst:
+                return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+                // The layout returned here is the one that will be used at bindgroup creation time.
+                // The bindgrpup's layout must match the runtime layout of the image when it is
+                // used via the bindgroup, but we don't know exactly what it will be yet. So we
+                // have to prepare for the pessimistic case.
+            case wgpu::TextureUsage::TextureBinding:
+                // Only VK_IMAGE_LAYOUT_GENERAL can do sampling and storage access of texture at the
+                // same time.
+                if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
+                    return VK_IMAGE_LAYOUT_GENERAL;
+                }
+                // The sampled image can be used as a readonly depth/stencil attachment at the same
+                // time if it is a depth/stencil renderable format, so the image layout need to be
+                // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL.
+                if (texture->GetFormat().HasDepthOrStencil() && texture->GetFormat().isRenderable) {
+                    return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+                }
+                return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+                // Vulkan texture copy functions require the image to be in _one_  known layout.
+                // Depending on whether parts of the texture have been transitioned to only CopySrc
+                // or a combination with something else, the texture could be in a combination of
+                // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
+                // GENERAL.
+                // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
+                // once and can instead track subresources so we should lift this limitation.
+            case wgpu::TextureUsage::CopySrc:
+                // Read-only and write-only storage textures must use general layout because load
+                // and store operations on storage images can only be done on the images in
+                // VK_IMAGE_LAYOUT_GENERAL layout.
+            case wgpu::TextureUsage::StorageBinding:
+                return VK_IMAGE_LAYOUT_GENERAL;
+
+            case wgpu::TextureUsage::RenderAttachment:
+                if (texture->GetFormat().HasDepthOrStencil()) {
+                    return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+                } else {
+                    return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+                }
+
+            case kReadOnlyRenderAttachment:
+                return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+
+            case kPresentTextureUsage:
+                return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+
+            case wgpu::TextureUsage::None:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
+        switch (sampleCount) {
+            case 1:
+                return VK_SAMPLE_COUNT_1_BIT;
+            case 4:
+                return VK_SAMPLE_COUNT_4_BIT;
+        }
+        UNREACHABLE();
+    }
+
+    MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
+                                               const TextureDescriptor* descriptor) {
+        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                        "Texture dimension (%s) is not %s.", descriptor->dimension,
+                        wgpu::TextureDimension::e2D);
+
+        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                        descriptor->mipLevelCount);
+
+        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                        descriptor->sampleCount);
+
+        return {};
+    }
+
+    bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+                                const VkImageCreateInfo& imageCreateInfo) {
+        ASSERT(device);
+
+        VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
+        VkImageFormatProperties properties;
+        if (device->fn.GetPhysicalDeviceImageFormatProperties(
+                physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
+                imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
+                &properties) != VK_SUCCESS) {
+            UNREACHABLE();
+        }
+
+        return properties.sampleCounts & imageCreateInfo.samples;
+    }
+
+    // static
+    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+                                                const TextureDescriptor* descriptor,
+                                                VkImageUsageFlags extraUsages) {
+        Ref<Texture> texture =
+            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+        DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
+        return std::move(texture);
+    }
+
+    // static
+    ResultOrError<Texture*> Texture::CreateFromExternal(
+        Device* device,
+        const ExternalImageDescriptorVk* descriptor,
+        const TextureDescriptor* textureDescriptor,
+        external_memory::Service* externalMemoryService) {
+        Ref<Texture> texture =
+            AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
+        DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
+        return texture.Detach();
+    }
+
+    // static
+    Ref<Texture> Texture::CreateForSwapChain(Device* device,
+                                             const TextureDescriptor* descriptor,
+                                             VkImage nativeImage) {
+        Ref<Texture> texture =
+            AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+        texture->InitializeForSwapChain(nativeImage);
+        return texture;
+    }
+
+    Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+        : TextureBase(device, descriptor, state),
+          // A usage of none will make sure the texture is transitioned before its first use as
+          // required by the Vulkan spec.
+          mSubresourceLastUsages(std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+              (ShouldCombineDepthStencilBarriers() ? Aspect::CombinedDepthStencil
+                                                   : GetFormat().aspects),
+              GetArrayLayers(),
+              GetNumMipLevels(),
+              wgpu::TextureUsage::None)) {
+    }
+
+    MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
+        Device* device = ToBackend(GetDevice());
+
+        // Create the Vulkan image "container". We don't need to check that the format supports the
+        // combination of sample, usage etc. because validation should have been done in the Dawn
+        // frontend already based on the minimum supported formats in the Vulkan spec
+        VkImageCreateInfo createInfo = {};
+        FillVulkanCreateInfoSizesAndType(*this, &createInfo);
+
+        PNextChainBuilder createInfoChain(&createInfo);
+
+        createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+        createInfo.format = VulkanImageFormat(device, GetFormat().format);
+        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+        createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
+        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+        VkImageFormatListCreateInfo imageFormatListInfo = {};
+        std::vector<VkFormat> viewFormats;
+        if (GetViewFormats().any()) {
+            createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+            if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
+                createInfoChain.Add(&imageFormatListInfo,
+                                    VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+                viewFormats.push_back(VulkanImageFormat(device, GetFormat().format));
+                for (FormatIndex i : IterateBitSet(GetViewFormats())) {
+                    const Format& viewFormat = device->GetValidInternalFormat(i);
+                    viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
+                }
+
+                imageFormatListInfo.viewFormatCount = viewFormats.size();
+                imageFormatListInfo.pViewFormats = viewFormats.data();
+            }
+        }
+
+        ASSERT(IsSampleCountSupported(device, createInfo));
+
+        if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
+            createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+        }
+
+        // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+        // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+        // also required for the implementation of robust resource initialization.
+        createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+            "CreateImage"));
+
+        // Create the image memory and associate it with the container
+        VkMemoryRequirements requirements;
+        device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+
+        DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
+                                               requirements, MemoryKind::Opaque));
+
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
+                                       ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+                                       mMemoryAllocation.GetOffset()),
+            "BindImageMemory"));
+
+        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+            DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
+                                  GetAllSubresources(), TextureBase::ClearValue::NonZero));
+        }
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    // Internally managed, but imported from external handle
+    MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+                                               external_memory::Service* externalMemoryService) {
+        Device* device = ToBackend(GetDevice());
+        VkFormat format = VulkanImageFormat(device, GetFormat().format);
+        VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
+        DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage,
+                                                                    &mSupportsDisjointVkImage),
+                        "Creating an image from external memory is not supported.");
+        // mSubresourceLastUsages was initialized with Plane0/Plane1 in the constructor for
+        // multiplanar formats, so we need to correct it to Color here.
+        if (ShouldCombineMultiPlaneBarriers()) {
+            mSubresourceLastUsages = std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+                ComputeAspectsForSubresourceStorage(), GetArrayLayers(), GetNumMipLevels(),
+                wgpu::TextureUsage::None);
+        }
+
+        mExternalState = ExternalState::PendingAcquire;
+
+        mPendingAcquireOldLayout = descriptor->releasedOldLayout;
+        mPendingAcquireNewLayout = descriptor->releasedNewLayout;
+
+        VkImageCreateInfo baseCreateInfo = {};
+        FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
+
+        PNextChainBuilder createInfoChain(&baseCreateInfo);
+
+        baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+        baseCreateInfo.format = format;
+        baseCreateInfo.usage = usage;
+        baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        baseCreateInfo.queueFamilyIndexCount = 0;
+        baseCreateInfo.pQueueFamilyIndices = nullptr;
+
+        // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+        // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+        // also required for the implementation of robust resource initialization.
+        baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+        VkImageFormatListCreateInfo imageFormatListInfo = {};
+        std::vector<VkFormat> viewFormats;
+        if (GetViewFormats().any()) {
+            baseCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+            if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
+                createInfoChain.Add(&imageFormatListInfo,
+                                    VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+                for (FormatIndex i : IterateBitSet(GetViewFormats())) {
+                    const Format& viewFormat = device->GetValidInternalFormat(i);
+                    viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
+                }
+
+                imageFormatListInfo.viewFormatCount = viewFormats.size();
+                imageFormatListInfo.pViewFormats = viewFormats.data();
+            }
+        }
+
+        DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
+
+        SetLabelHelper("Dawn_ExternalTexture");
+
+        return {};
+    }
+
+    void Texture::InitializeForSwapChain(VkImage nativeImage) {
+        mHandle = nativeImage;
+        SetLabelHelper("Dawn_SwapChainTexture");
+    }
+
+    MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+                                           VkSemaphore signalSemaphore,
+                                           VkDeviceMemory externalMemoryAllocation,
+                                           std::vector<VkSemaphore> waitSemaphores) {
+        Device* device = ToBackend(GetDevice());
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
+            "BindImageMemory (external)"));
+
+        // Don't clear imported texture if already initialized
+        if (descriptor->isInitialized) {
+            SetIsSubresourceContentInitialized(true, GetAllSubresources());
+        }
+
+        // Success, acquire all the external objects.
+        mExternalAllocation = externalMemoryAllocation;
+        mSignalSemaphore = signalSemaphore;
+        mWaitRequirements = std::move(waitSemaphores);
+        return {};
+    }
+
+    MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
+                                              VkSemaphore* signalSemaphore,
+                                              VkImageLayout* releasedOldLayout,
+                                              VkImageLayout* releasedNewLayout) {
+        Device* device = ToBackend(GetDevice());
+
+        DAWN_INVALID_IF(mExternalState == ExternalState::Released,
+                        "Can't export a signal semaphore from signaled texture %s.", this);
+
+        DAWN_INVALID_IF(
+            mExternalAllocation == VK_NULL_HANDLE,
+            "Can't export a signal semaphore from destroyed or non-external texture %s.", this);
+
+        ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
+
+        // Release the texture
+        mExternalState = ExternalState::Released;
+
+        Aspect aspects = ComputeAspectsForSubresourceStorage();
+        ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+        wgpu::TextureUsage usage = mSubresourceLastUsages->Get(aspects, 0, 0);
+
+        VkImageMemoryBarrier barrier;
+        barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+        barrier.pNext = nullptr;
+        barrier.image = GetHandle();
+        barrier.subresourceRange.aspectMask = VulkanAspectMask(aspects);
+        barrier.subresourceRange.baseMipLevel = 0;
+        barrier.subresourceRange.levelCount = 1;
+        barrier.subresourceRange.baseArrayLayer = 0;
+        barrier.subresourceRange.layerCount = 1;
+
+        barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
+        barrier.dstAccessMask = 0;  // The barrier must be paired with another barrier that will
+                                    // specify the dst access mask on the importing queue.
+
+        barrier.oldLayout = VulkanImageLayout(this, usage);
+        if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
+            // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
+            // special value to indicate no layout transition should be done.
+            barrier.newLayout = barrier.oldLayout;
+        } else {
+            barrier.newLayout = desiredLayout;
+        }
+
+        barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
+        barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+
+        VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
+        VkPipelineStageFlags dstStages =
+            VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;  // We don't know when the importing queue will need
+                                                // the texture, so pass
+                                                // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
+                                                // the barrier happens-before any usage in the
+                                                // importing queue.
+
+        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+        device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+                                      nullptr, 0, nullptr, 1, &barrier);
+
+        // Queue submit to signal we are done with the texture
+        recordingContext->signalSemaphores.push_back(mSignalSemaphore);
+        DAWN_TRY(device->SubmitPendingCommands());
+
+        // Write out the layouts and signal semaphore
+        *releasedOldLayout = barrier.oldLayout;
+        *releasedNewLayout = barrier.newLayout;
+        *signalSemaphore = mSignalSemaphore;
+
+        mSignalSemaphore = VK_NULL_HANDLE;
+
+        // Destroy the texture so it can't be used again
+        Destroy();
+        return {};
+    }
+
+    Texture::~Texture() {
+    }
+
+    void Texture::SetLabelHelper(const char* prefix) {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE,
+                     reinterpret_cast<uint64_t&>(mHandle), prefix, GetLabel());
+    }
+
+    void Texture::SetLabelImpl() {
+        SetLabelHelper("Dawn_InternalTexture");
+    }
+
+    void Texture::DestroyImpl() {
+        if (GetTextureState() == TextureState::OwnedInternal) {
+            Device* device = ToBackend(GetDevice());
+
+            // For textures created from a VkImage, the allocation if kInvalid so the Device knows
+            // to skip the deallocation of the (absence of) VkDeviceMemory.
+            device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
+
+            if (mHandle != VK_NULL_HANDLE) {
+                device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            }
+
+            if (mExternalAllocation != VK_NULL_HANDLE) {
+                device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
+            }
+
+            mHandle = VK_NULL_HANDLE;
+            mExternalAllocation = VK_NULL_HANDLE;
+            // If a signal semaphore exists it should be requested before we delete the texture
+            ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
+        }
+        // For Vulkan, we currently run the base destruction code after the internal changes because
+        // of the dependency on the texture state which the base code overwrites too early.
+        TextureBase::DestroyImpl();
+    }
+
+    VkImage Texture::GetHandle() const {
+        return mHandle;
+    }
+
+    void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+                                                  std::vector<VkImageMemoryBarrier>* barriers,
+                                                  size_t transitionBarrierStart) {
+        ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+
+        // transitionBarrierStart specify the index where barriers for current transition start in
+        // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
+        // have already added into the vector during current transition.
+        ASSERT(barriers->size() - transitionBarrierStart <= 1);
+
+        if (mExternalState == ExternalState::PendingAcquire) {
+            if (barriers->size() == transitionBarrierStart) {
+                barriers->push_back(
+                    BuildMemoryBarrier(this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
+                                       SubresourceRange::SingleMipAndLayer(
+                                           0, 0, ComputeAspectsForSubresourceStorage())));
+            }
+
+            VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
+            // Transfer texture from external queue to graphics queue
+            barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+            barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
+
+            // srcAccessMask means nothing when importing. Queue transfers require a barrier on
+            // both the importing and exporting queues. The exporting queue should have specified
+            // this.
+            barrier->srcAccessMask = 0;
+
+            // This should be the first barrier after import.
+            ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
+
+            // Save the desired layout. We may need to transition through an intermediate
+            // |mPendingAcquireLayout| first.
+            VkImageLayout desiredLayout = barrier->newLayout;
+
+            bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
+
+            // We don't care about the pending old layout if the texture is uninitialized. The
+            // driver is free to discard it. Also it is invalid to transition to layout UNDEFINED or
+            // PREINITIALIZED. If the embedder provided no new layout, or we don't care about the
+            // previous contents, we can skip the layout transition.
+            // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-VkImageMemoryBarrier-newLayout-01198
+            if (!isInitialized || mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_UNDEFINED ||
+                mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
+                barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+                barrier->newLayout = desiredLayout;
+            } else {
+                barrier->oldLayout = mPendingAcquireOldLayout;
+                barrier->newLayout = mPendingAcquireNewLayout;
+            }
+
+            // If these are unequal, we need an another barrier to transition the layout.
+            if (barrier->newLayout != desiredLayout) {
+                VkImageMemoryBarrier layoutBarrier;
+                layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+                layoutBarrier.pNext = nullptr;
+                layoutBarrier.image = GetHandle();
+                layoutBarrier.subresourceRange = barrier->subresourceRange;
+
+                // Transition from the acquired new layout to the desired layout.
+                layoutBarrier.oldLayout = barrier->newLayout;
+                layoutBarrier.newLayout = desiredLayout;
+
+                // We already transitioned these.
+                layoutBarrier.srcAccessMask = 0;
+                layoutBarrier.dstAccessMask = 0;
+                layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+                layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+
+                barriers->push_back(layoutBarrier);
+            }
+
+            mExternalState = ExternalState::Acquired;
+        }
+
+        mLastExternalState = mExternalState;
+
+        recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
+                                                mWaitRequirements.begin(), mWaitRequirements.end());
+        mWaitRequirements.clear();
+    }
+
+    bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
+        // Reuse the texture directly and avoid encoding barriers when it isn't needed.
+        bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
+        if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
+            return true;
+        }
+        return false;
+    }
+
+    // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
+    // this limitation by combining the usages in the two planes of `textureUsages` into a
+    // single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
+    // for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
+    bool Texture::ShouldCombineDepthStencilBarriers() const {
+        // If the Stencil8 format is being emulated then memory barriers also need to include
+        // the depth aspect. (See: crbug.com/dawn/1331)
+        if (GetFormat().format == wgpu::TextureFormat::Stencil8 &&
+            !GetDevice()->IsToggleEnabled(Toggle::VulkanUseS8)) {
+            return true;
+        }
+        return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
+    }
+
+    // The Vulkan spec requires:
+    // "If image has a single-plane color format or is not disjoint, then the aspectMask member of
+    // subresourceRange must be VK_IMAGE_ASPECT_COLOR_BIT.".
+    // For multi-planar formats, we currently only support import them in non-disjoint way.
+    bool Texture::ShouldCombineMultiPlaneBarriers() const {
+        // TODO(chromium:1258986): Figure out how to support disjoint vkImage.
+        ASSERT(!mSupportsDisjointVkImage);
+        return GetFormat().aspects == (Aspect::Plane0 | Aspect::Plane1);
+    }
+
+    Aspect Texture::ComputeAspectsForSubresourceStorage() const {
+        if (ShouldCombineDepthStencilBarriers()) {
+            return Aspect::CombinedDepthStencil;
+        }
+        // Force to use Aspect::Color for Aspect::Plane0/1.
+        if (ShouldCombineMultiPlaneBarriers()) {
+            return Aspect::Color;
+        }
+        return GetFormat().aspects;
+    }
+
+    void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
+                                         const TextureSubresourceUsage& textureUsages,
+                                         std::vector<VkImageMemoryBarrier>* imageBarriers,
+                                         VkPipelineStageFlags* srcStages,
+                                         VkPipelineStageFlags* dstStages) {
+        if (ShouldCombineBarriers()) {
+            Aspect combinedAspect = ComputeAspectsForSubresourceStorage();
+            SubresourceStorage<wgpu::TextureUsage> combinedUsages(combinedAspect, GetArrayLayers(),
+                                                                  GetNumMipLevels());
+            textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                SubresourceRange updateRange = range;
+                updateRange.aspects = combinedAspect;
+
+                combinedUsages.Update(
+                    updateRange, [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
+                        *combinedUsage |= usage;
+                    });
+            });
+
+            TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
+                                       dstStages);
+        } else {
+            TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
+                                       dstStages);
+        }
+    }
+
+    void Texture::TransitionUsageForPassImpl(
+        CommandRecordingContext* recordingContext,
+        const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
+        std::vector<VkImageMemoryBarrier>* imageBarriers,
+        VkPipelineStageFlags* srcStages,
+        VkPipelineStageFlags* dstStages) {
+        size_t transitionBarrierStart = imageBarriers->size();
+        const Format& format = GetFormat();
+
+        wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
+        wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+
+        mSubresourceLastUsages->Merge(
+            subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage,
+                                   const wgpu::TextureUsage& newUsage) {
+                if (newUsage == wgpu::TextureUsage::None ||
+                    CanReuseWithoutBarrier(*lastUsage, newUsage)) {
+                    return;
+                }
+
+                imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
+
+                allLastUsages |= *lastUsage;
+                allUsages |= newUsage;
+
+                *lastUsage = newUsage;
+            });
+
+        if (mExternalState != ExternalState::InternalOnly) {
+            TweakTransitionForExternalUsage(recordingContext, imageBarriers,
+                                            transitionBarrierStart);
+        }
+
+        *srcStages |= VulkanPipelineStage(allLastUsages, format);
+        *dstStages |= VulkanPipelineStage(allUsages, format);
+    }
+
+    void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
+                                     wgpu::TextureUsage usage,
+                                     const SubresourceRange& range) {
+        std::vector<VkImageMemoryBarrier> barriers;
+
+        VkPipelineStageFlags srcStages = 0;
+        VkPipelineStageFlags dstStages = 0;
+
+        TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
+
+        if (mExternalState != ExternalState::InternalOnly) {
+            TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
+        }
+
+        if (!barriers.empty()) {
+            ASSERT(srcStages != 0 && dstStages != 0);
+            ToBackend(GetDevice())
+                ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+                                        nullptr, 0, nullptr, barriers.size(), barriers.data());
+        }
+    }
+
+    void Texture::TransitionUsageAndGetResourceBarrier(
+        wgpu::TextureUsage usage,
+        const SubresourceRange& range,
+        std::vector<VkImageMemoryBarrier>* imageBarriers,
+        VkPipelineStageFlags* srcStages,
+        VkPipelineStageFlags* dstStages) {
+        if (ShouldCombineBarriers()) {
+            SubresourceRange updatedRange = range;
+            updatedRange.aspects = ComputeAspectsForSubresourceStorage();
+            TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
+                                                     dstStages);
+        } else {
+            TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages,
+                                                     dstStages);
+        }
+    }
+
+    void Texture::TransitionUsageAndGetResourceBarrierImpl(
+        wgpu::TextureUsage usage,
+        const SubresourceRange& range,
+        std::vector<VkImageMemoryBarrier>* imageBarriers,
+        VkPipelineStageFlags* srcStages,
+        VkPipelineStageFlags* dstStages) {
+        ASSERT(imageBarriers != nullptr);
+        const Format& format = GetFormat();
+
+        wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+        mSubresourceLastUsages->Update(
+            range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
+                if (CanReuseWithoutBarrier(*lastUsage, usage)) {
+                    return;
+                }
+
+                imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
+
+                allLastUsages |= *lastUsage;
+                *lastUsage = usage;
+            });
+
+        *srcStages |= VulkanPipelineStage(allLastUsages, format);
+        *dstStages |= VulkanPipelineStage(usage, format);
+    }
+
+    MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
+                                     const SubresourceRange& range,
+                                     TextureBase::ClearValue clearValue) {
+        Device* device = ToBackend(GetDevice());
+
+        const bool isZero = clearValue == TextureBase::ClearValue::Zero;
+        uint32_t uClearColor = isZero ? 0 : 1;
+        int32_t sClearColor = isZero ? 0 : 1;
+        float fClearColor = isZero ? 0.f : 1.f;
+
+        TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+
+        VkImageSubresourceRange imageRange = {};
+        imageRange.levelCount = 1;
+        imageRange.layerCount = 1;
+
+        if (GetFormat().isCompressed) {
+            if (range.aspects == Aspect::None) {
+                return {};
+            }
+            // need to clear the texture with a copy from buffer
+            ASSERT(range.aspects == Aspect::Color);
+            const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
+
+            Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+            uint32_t bytesPerRow =
+                Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+                      device->GetOptimalBytesPerRowAlignment());
+            uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+                                  largestMipSize.depthOrArrayLayers;
+            DynamicUploader* uploader = device->GetDynamicUploader();
+            UploadHandle uploadHandle;
+            DAWN_TRY_ASSIGN(uploadHandle,
+                            uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+                                               blockInfo.byteSize));
+            memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
+
+            std::vector<VkBufferImageCopy> regions;
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                Extent3D copySize = GetMipLevelPhysicalSize(level);
+                imageRange.baseMipLevel = level;
+                for (uint32_t layer = range.baseArrayLayer;
+                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(
+                            SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
+                    }
+
+                    TextureDataLayout dataLayout;
+                    dataLayout.offset = uploadHandle.startOffset;
+                    dataLayout.rowsPerImage = copySize.height / blockInfo.height;
+                    dataLayout.bytesPerRow = bytesPerRow;
+                    TextureCopy textureCopy;
+                    textureCopy.aspect = range.aspects;
+                    textureCopy.mipLevel = level;
+                    textureCopy.origin = {0, 0, layer};
+                    textureCopy.texture = this;
+
+                    regions.push_back(
+                        ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
+                }
+            }
+            device->fn.CmdCopyBufferToImage(
+                recordingContext->commandBuffer,
+                ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
+                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions.size(), regions.data());
+        } else {
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                imageRange.baseMipLevel = level;
+                for (uint32_t layer = range.baseArrayLayer;
+                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                    Aspect aspects = Aspect::None;
+                    for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                        if (clearValue == TextureBase::ClearValue::Zero &&
+                            IsSubresourceContentInitialized(
+                                SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+                            // Skip lazy clears if already initialized.
+                            continue;
+                        }
+                        aspects |= aspect;
+                    }
+
+                    if (aspects == Aspect::None) {
+                        continue;
+                    }
+
+                    imageRange.aspectMask = VulkanAspectMask(aspects);
+                    imageRange.baseArrayLayer = layer;
+
+                    if (aspects &
+                        (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
+                        VkClearDepthStencilValue clearDepthStencilValue[1];
+                        clearDepthStencilValue[0].depth = fClearColor;
+                        clearDepthStencilValue[0].stencil = uClearColor;
+                        device->fn.CmdClearDepthStencilImage(
+                            recordingContext->commandBuffer, GetHandle(),
+                            VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
+                            &imageRange);
+                    } else {
+                        ASSERT(aspects == Aspect::Color);
+                        VkClearColorValue clearColorValue;
+                        switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+                            case wgpu::TextureComponentType::Float:
+                                clearColorValue.float32[0] = fClearColor;
+                                clearColorValue.float32[1] = fClearColor;
+                                clearColorValue.float32[2] = fClearColor;
+                                clearColorValue.float32[3] = fClearColor;
+                                break;
+                            case wgpu::TextureComponentType::Sint:
+                                clearColorValue.int32[0] = sClearColor;
+                                clearColorValue.int32[1] = sClearColor;
+                                clearColorValue.int32[2] = sClearColor;
+                                clearColorValue.int32[3] = sClearColor;
+                                break;
+                            case wgpu::TextureComponentType::Uint:
+                                clearColorValue.uint32[0] = uClearColor;
+                                clearColorValue.uint32[1] = uClearColor;
+                                clearColorValue.uint32[2] = uClearColor;
+                                clearColorValue.uint32[3] = uClearColor;
+                                break;
+                            case wgpu::TextureComponentType::DepthComparison:
+                                UNREACHABLE();
+                        }
+                        device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
+                                                      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+                                                      &clearColorValue, 1, &imageRange);
+                    }
+                }
+            }
+        }
+
+        if (clearValue == TextureBase::ClearValue::Zero) {
+            SetIsSubresourceContentInitialized(true, range);
+            device->IncrementLazyClearCountForTesting();
+        }
+        return {};
+    }
+
+    void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+                                                      const SubresourceRange& range) {
+        if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+            return;
+        }
+        if (!IsSubresourceContentInitialized(range)) {
+            // If subresource has not been initialized, clear it to black as it could contain dirty
+            // bits from recycled memory
+            GetDevice()->ConsumedError(
+                ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
+        }
+    }
+
+    VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
+        ASSERT(GetFormat().aspects == Aspect::Color);
+        return VulkanImageLayout(this, mSubresourceLastUsages->Get(Aspect::Color, 0, 0));
+    }
+
+    // static
+    ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+                                                        const TextureViewDescriptor* descriptor) {
+        Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+        DAWN_TRY(view->Initialize(descriptor));
+        return view;
+    }
+
+    MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+        if ((GetTexture()->GetInternalUsage() &
+             ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
+            // If the texture view has no other usage than CopySrc and CopyDst, then it can't
+            // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
+            // validation errors warn if you create such a vkImageView, so return early.
+            return {};
+        }
+
+        // Texture could be destroyed by the time we make a view.
+        if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+            return {};
+        }
+
+        Device* device = ToBackend(GetTexture()->GetDevice());
+
+        VkImageViewCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.image = ToBackend(GetTexture())->GetHandle();
+        createInfo.viewType = VulkanImageViewType(descriptor->dimension);
+
+        const Format& textureFormat = GetTexture()->GetFormat();
+        if (textureFormat.HasStencil() &&
+            (textureFormat.HasDepth() || !device->IsToggleEnabled(Toggle::VulkanUseS8))) {
+            // Unlike multi-planar formats, depth-stencil formats have multiple aspects but are not
+            // created with VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.
+            // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VkImageViewCreateInfo.html#VUID-VkImageViewCreateInfo-image-01762
+            // Without, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, the view format must match the texture
+            // format.
+            createInfo.format = VulkanImageFormat(device, textureFormat.format);
+        } else {
+            createInfo.format = VulkanImageFormat(device, descriptor->format);
+        }
+
+        createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
+                                                   VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
+
+        const SubresourceRange& subresources = GetSubresourceRange();
+        createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
+        createInfo.subresourceRange.levelCount = subresources.levelCount;
+        createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
+        createInfo.subresourceRange.layerCount = subresources.layerCount;
+        createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
+
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+            "CreateImageView"));
+
+        SetLabelImpl();
+
+        return {};
+    }
+
+    TextureView::~TextureView() {
+    }
+
+    void TextureView::DestroyImpl() {
+        Device* device = ToBackend(GetTexture()->GetDevice());
+
+        if (mHandle != VK_NULL_HANDLE) {
+            device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            mHandle = VK_NULL_HANDLE;
+        }
+    }
+
+    VkImageView TextureView::GetHandle() const {
+        return mHandle;
+    }
+
+    void TextureView::SetLabelImpl() {
+        SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE_VIEW,
+                     reinterpret_cast<uint64_t&>(mHandle), "Dawn_InternalTextureView", GetLabel());
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/TextureVk.h b/src/dawn/native/vulkan/TextureVk.h
new file mode 100644
index 0000000..2452ade
--- /dev/null
+++ b/src/dawn/native/vulkan/TextureVk.h
@@ -0,0 +1,197 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_TEXTUREVK_H_
+#define DAWNNATIVE_VULKAN_TEXTUREVK_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/native/vulkan/ExternalHandle.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native::vulkan {
+
+    struct CommandRecordingContext;
+    class Device;
+    class Texture;
+
+    VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
+    VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
+    VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage);
+    VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
+
+    MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
+                                               const TextureDescriptor* descriptor);
+
+    bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+                                const VkImageCreateInfo& imageCreateInfo);
+
+    class Texture final : public TextureBase {
+      public:
+        // Used to create a regular texture from a descriptor.
+        static ResultOrError<Ref<Texture>> Create(Device* device,
+                                                  const TextureDescriptor* descriptor,
+                                                  VkImageUsageFlags extraUsages = 0);
+
+        // Creates a texture and initializes it with a VkImage that references an external memory
+        // object. Before the texture can be used, the VkDeviceMemory associated with the external
+        // image must be bound via Texture::BindExternalMemory.
+        static ResultOrError<Texture*> CreateFromExternal(
+            Device* device,
+            const ExternalImageDescriptorVk* descriptor,
+            const TextureDescriptor* textureDescriptor,
+            external_memory::Service* externalMemoryService);
+
+        // Creates a texture that wraps a swapchain-allocated VkImage.
+        static Ref<Texture> CreateForSwapChain(Device* device,
+                                               const TextureDescriptor* descriptor,
+                                               VkImage nativeImage);
+
+        VkImage GetHandle() const;
+
+        // Transitions the texture to be used as `usage`, recording any necessary barrier in
+        // `commands`.
+        // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+        void TransitionUsageNow(CommandRecordingContext* recordingContext,
+                                wgpu::TextureUsage usage,
+                                const SubresourceRange& range);
+        void TransitionUsageForPass(CommandRecordingContext* recordingContext,
+                                    const TextureSubresourceUsage& textureUsages,
+                                    std::vector<VkImageMemoryBarrier>* imageBarriers,
+                                    VkPipelineStageFlags* srcStages,
+                                    VkPipelineStageFlags* dstStages);
+
+        void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+                                                 const SubresourceRange& range);
+
+        VkImageLayout GetCurrentLayoutForSwapChain() const;
+
+        // Binds externally allocated memory to the VkImage and on success, takes ownership of
+        // semaphores.
+        MaybeError BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+                                      VkSemaphore signalSemaphore,
+                                      VkDeviceMemory externalMemoryAllocation,
+                                      std::vector<VkSemaphore> waitSemaphores);
+
+        MaybeError ExportExternalTexture(VkImageLayout desiredLayout,
+                                         VkSemaphore* signalSemaphore,
+                                         VkImageLayout* releasedOldLayout,
+                                         VkImageLayout* releasedNewLayout);
+
+        void SetLabelHelper(const char* prefix);
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+      private:
+        ~Texture() override;
+        Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+
+        MaybeError InitializeAsInternalTexture(VkImageUsageFlags extraUsages);
+        MaybeError InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+                                          external_memory::Service* externalMemoryService);
+        void InitializeForSwapChain(VkImage nativeImage);
+
+        void DestroyImpl() override;
+        MaybeError ClearTexture(CommandRecordingContext* recordingContext,
+                                const SubresourceRange& range,
+                                TextureBase::ClearValue);
+
+        // Implementation details of the barrier computations for the texture.
+        void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+                                                  const SubresourceRange& range,
+                                                  std::vector<VkImageMemoryBarrier>* imageBarriers,
+                                                  VkPipelineStageFlags* srcStages,
+                                                  VkPipelineStageFlags* dstStages);
+        void TransitionUsageForPassImpl(
+            CommandRecordingContext* recordingContext,
+            const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
+            std::vector<VkImageMemoryBarrier>* imageBarriers,
+            VkPipelineStageFlags* srcStages,
+            VkPipelineStageFlags* dstStages);
+        void TransitionUsageAndGetResourceBarrierImpl(
+            wgpu::TextureUsage usage,
+            const SubresourceRange& range,
+            std::vector<VkImageMemoryBarrier>* imageBarriers,
+            VkPipelineStageFlags* srcStages,
+            VkPipelineStageFlags* dstStages);
+        void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+                                             std::vector<VkImageMemoryBarrier>* barriers,
+                                             size_t transitionBarrierStart);
+        bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
+
+        // In base Vulkan, Depth and stencil can only be transitioned together. This function
+        // indicates whether we should combine depth and stencil barriers to accommodate this
+        // limitation.
+        bool ShouldCombineDepthStencilBarriers() const;
+
+        // This indicates whether the VK_IMAGE_ASPECT_COLOR_BIT instead of
+        // VK_IMAGE_ASPECT_PLANE_n_BIT must be used.
+        bool ShouldCombineMultiPlaneBarriers() const;
+
+        bool ShouldCombineBarriers() const {
+            return ShouldCombineDepthStencilBarriers() || ShouldCombineMultiPlaneBarriers();
+        }
+
+        // Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
+        // doing the workaround for combined depth and stencil barriers, or combining multi-plane
+        // barriers.
+        Aspect ComputeAspectsForSubresourceStorage() const;
+
+        VkImage mHandle = VK_NULL_HANDLE;
+        ResourceMemoryAllocation mMemoryAllocation;
+        VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
+
+        enum class ExternalState { InternalOnly, PendingAcquire, Acquired, Released };
+        ExternalState mExternalState = ExternalState::InternalOnly;
+        ExternalState mLastExternalState = ExternalState::InternalOnly;
+
+        VkImageLayout mPendingAcquireOldLayout;
+        VkImageLayout mPendingAcquireNewLayout;
+
+        VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
+        std::vector<VkSemaphore> mWaitRequirements;
+
+        // Note that in early Vulkan versions it is not possible to transition depth and stencil
+        // separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
+        // storage.
+        std::unique_ptr<SubresourceStorage<wgpu::TextureUsage>> mSubresourceLastUsages;
+
+        bool mSupportsDisjointVkImage = false;
+    };
+
+    class TextureView final : public TextureViewBase {
+      public:
+        static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+                                                      const TextureViewDescriptor* descriptor);
+        VkImageView GetHandle() const;
+
+      private:
+        ~TextureView() override;
+        void DestroyImpl() override;
+        using TextureViewBase::TextureViewBase;
+        MaybeError Initialize(const TextureViewDescriptor* descriptor);
+
+        // Dawn API
+        void SetLabelImpl() override;
+
+        VkImageView mHandle = VK_NULL_HANDLE;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_TEXTUREVK_H_
diff --git a/src/dawn/native/vulkan/UtilsVulkan.cpp b/src/dawn/native/vulkan/UtilsVulkan.cpp
new file mode 100644
index 0000000..c5290d9
--- /dev/null
+++ b/src/dawn/native/vulkan/UtilsVulkan.cpp
@@ -0,0 +1,273 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/UtilsVulkan.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/ShaderModule.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/Forward.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+    VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
+        switch (op) {
+            case wgpu::CompareFunction::Never:
+                return VK_COMPARE_OP_NEVER;
+            case wgpu::CompareFunction::Less:
+                return VK_COMPARE_OP_LESS;
+            case wgpu::CompareFunction::LessEqual:
+                return VK_COMPARE_OP_LESS_OR_EQUAL;
+            case wgpu::CompareFunction::Greater:
+                return VK_COMPARE_OP_GREATER;
+            case wgpu::CompareFunction::GreaterEqual:
+                return VK_COMPARE_OP_GREATER_OR_EQUAL;
+            case wgpu::CompareFunction::Equal:
+                return VK_COMPARE_OP_EQUAL;
+            case wgpu::CompareFunction::NotEqual:
+                return VK_COMPARE_OP_NOT_EQUAL;
+            case wgpu::CompareFunction::Always:
+                return VK_COMPARE_OP_ALWAYS;
+
+            case wgpu::CompareFunction::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    // Convert Dawn texture aspects to  Vulkan texture aspect flags
+    VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
+        VkImageAspectFlags flags = 0;
+        for (Aspect aspect : IterateEnumMask(aspects)) {
+            switch (aspect) {
+                case Aspect::Color:
+                    flags |= VK_IMAGE_ASPECT_COLOR_BIT;
+                    break;
+                case Aspect::Depth:
+                    flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
+                    break;
+                case Aspect::Stencil:
+                    flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
+                    break;
+
+                case Aspect::CombinedDepthStencil:
+                    flags |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+                    break;
+
+                case Aspect::Plane0:
+                    flags |= VK_IMAGE_ASPECT_PLANE_0_BIT;
+                    break;
+                case Aspect::Plane1:
+                    flags |= VK_IMAGE_ASPECT_PLANE_1_BIT;
+                    break;
+
+                case Aspect::None:
+                    UNREACHABLE();
+            }
+        }
+        return flags;
+    }
+
+    // Vulkan SPEC requires the source/destination region specified by each element of
+    // pRegions must be a region that is contained within srcImage/dstImage. Here the size of
+    // the image refers to the virtual size, while Dawn validates texture copy extent with the
+    // physical size, so we need to re-calculate the texture copy extent to ensure it should fit
+    // in the virtual size of the subresource.
+    Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
+        Extent3D validTextureCopyExtent = copySize;
+        const TextureBase* texture = textureCopy.texture.Get();
+        Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
+        ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+        ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+        if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+            ASSERT(texture->GetFormat().isCompressed);
+            validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+        }
+        if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+            ASSERT(texture->GetFormat().isCompressed);
+            validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
+        }
+
+        return validTextureCopyExtent;
+    }
+
+    VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+                                                   const TextureCopy& textureCopy,
+                                                   const Extent3D& copySize) {
+        TextureDataLayout passDataLayout;
+        passDataLayout.offset = bufferCopy.offset;
+        passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
+        passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
+        return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
+    }
+
+    VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+                                                   const TextureCopy& textureCopy,
+                                                   const Extent3D& copySize) {
+        const Texture* texture = ToBackend(textureCopy.texture.Get());
+
+        VkBufferImageCopy region;
+
+        region.bufferOffset = dataLayout.offset;
+        // In Vulkan the row length is in texels while it is in bytes for Dawn
+        const TexelBlockInfo& blockInfo =
+            texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+        ASSERT(dataLayout.bytesPerRow % blockInfo.byteSize == 0);
+        region.bufferRowLength = dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width;
+        region.bufferImageHeight = dataLayout.rowsPerImage * blockInfo.height;
+
+        region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
+        region.imageSubresource.mipLevel = textureCopy.mipLevel;
+
+        switch (textureCopy.texture->GetDimension()) {
+            case wgpu::TextureDimension::e1D:
+                ASSERT(textureCopy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+                region.imageOffset.x = textureCopy.origin.x;
+                region.imageOffset.y = 0;
+                region.imageOffset.z = 0;
+                region.imageSubresource.baseArrayLayer = 0;
+                region.imageSubresource.layerCount = 1;
+
+                ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+                region.imageExtent.width = copySize.width;
+                region.imageExtent.height = 1;
+                region.imageExtent.depth = 1;
+                break;
+
+            case wgpu::TextureDimension::e2D: {
+                region.imageOffset.x = textureCopy.origin.x;
+                region.imageOffset.y = textureCopy.origin.y;
+                region.imageOffset.z = 0;
+                region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
+                region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
+
+                Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
+                region.imageExtent.width = imageExtent.width;
+                region.imageExtent.height = imageExtent.height;
+                region.imageExtent.depth = 1;
+                break;
+            }
+
+            case wgpu::TextureDimension::e3D: {
+                region.imageOffset.x = textureCopy.origin.x;
+                region.imageOffset.y = textureCopy.origin.y;
+                region.imageOffset.z = textureCopy.origin.z;
+                region.imageSubresource.baseArrayLayer = 0;
+                region.imageSubresource.layerCount = 1;
+
+                ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+                region.imageExtent.width = copySize.width;
+                region.imageExtent.height = copySize.height;
+                region.imageExtent.depth = copySize.depthOrArrayLayers;
+                break;
+            }
+        }
+
+        return region;
+    }
+
+    void SetDebugName(Device* device,
+                      VkObjectType objectType,
+                      uint64_t objectHandle,
+                      const char* prefix,
+                      std::string label) {
+        if (!objectHandle) {
+            return;
+        }
+
+        if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+            VkDebugUtilsObjectNameInfoEXT objectNameInfo;
+            objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+            objectNameInfo.pNext = nullptr;
+            objectNameInfo.objectType = objectType;
+            objectNameInfo.objectHandle = objectHandle;
+
+            if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+                objectNameInfo.pObjectName = prefix;
+                device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+                return;
+            }
+
+            std::string objectName = prefix;
+            objectName += "_";
+            objectName += label;
+            objectNameInfo.pObjectName = objectName.c_str();
+            device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+        }
+    }
+
+    VkSpecializationInfo* GetVkSpecializationInfo(
+        const ProgrammableStage& programmableStage,
+        VkSpecializationInfo* specializationInfo,
+        std::vector<OverridableConstantScalar>* specializationDataEntries,
+        std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
+        ASSERT(specializationInfo);
+        ASSERT(specializationDataEntries);
+        ASSERT(specializationMapEntries);
+
+        if (programmableStage.constants.size() == 0) {
+            return nullptr;
+        }
+
+        const EntryPointMetadata& entryPointMetaData =
+            programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
+
+        for (const auto& pipelineConstant : programmableStage.constants) {
+            const std::string& identifier = pipelineConstant.first;
+            double value = pipelineConstant.second;
+
+            // This is already validated so `identifier` must exist
+            const auto& moduleConstant = entryPointMetaData.overridableConstants.at(identifier);
+
+            specializationMapEntries->push_back(
+                VkSpecializationMapEntry{moduleConstant.id,
+                                         static_cast<uint32_t>(specializationDataEntries->size() *
+                                                               sizeof(OverridableConstantScalar)),
+                                         sizeof(OverridableConstantScalar)});
+
+            OverridableConstantScalar entry{};
+            switch (moduleConstant.type) {
+                case EntryPointMetadata::OverridableConstant::Type::Boolean:
+                    entry.b = static_cast<int32_t>(value);
+                    break;
+                case EntryPointMetadata::OverridableConstant::Type::Float32:
+                    entry.f32 = static_cast<float>(value);
+                    break;
+                case EntryPointMetadata::OverridableConstant::Type::Int32:
+                    entry.i32 = static_cast<int32_t>(value);
+                    break;
+                case EntryPointMetadata::OverridableConstant::Type::Uint32:
+                    entry.u32 = static_cast<uint32_t>(value);
+                    break;
+                default:
+                    UNREACHABLE();
+            }
+            specializationDataEntries->push_back(entry);
+        }
+
+        specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
+        specializationInfo->pMapEntries = specializationMapEntries->data();
+        specializationInfo->dataSize =
+            specializationDataEntries->size() * sizeof(OverridableConstantScalar);
+        specializationInfo->pData = specializationDataEntries->data();
+
+        return specializationInfo;
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/UtilsVulkan.h b/src/dawn/native/vulkan/UtilsVulkan.h
new file mode 100644
index 0000000..c18a5b6
--- /dev/null
+++ b/src/dawn/native/vulkan/UtilsVulkan.h
@@ -0,0 +1,123 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_UTILSVULKAN_H_
+#define DAWNNATIVE_VULKAN_UTILSVULKAN_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+    struct ProgrammableStage;
+    union OverridableConstantScalar;
+}  // namespace dawn::native
+
+namespace dawn::native::vulkan {
+
+    class Device;
+
+    // A Helper type used to build a pNext chain of extension structs.
+    // Usage is:
+    //   1) Create instance, passing the address of the first struct in the chain. This requires
+    //      pNext to be nullptr. If you already have a chain you need to pass a pointer to the tail
+    //      of it.
+    //
+    //   2) Call Add(&vk_struct) every time a new struct needs to be appended to the chain.
+    //
+    //   3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to initialize the struct
+    //      with a given VkStructureType value while appending it to the chain.
+    //
+    // Examples:
+    //     VkPhysicalFeatures2 features2 = {
+    //       .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
+    //       .pNext = nullptr,
+    //     };
+    //
+    //     PNextChainBuilder featuresChain(&features2);
+    //
+    //     featuresChain.Add(&featuresExtensions.subgroupSizeControl,
+    //                       VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+    //
+    struct PNextChainBuilder {
+        // Constructor takes the address of a Vulkan structure instance, and
+        // walks its pNext chain to record the current location of its tail.
+        //
+        // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
+        // which is why the VkBaseOutStructure* casts below are necessary.
+        template <typename VK_STRUCT_TYPE>
+        explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
+            : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
+            while (mCurrent->pNext != nullptr) {
+                mCurrent = mCurrent->pNext;
+            }
+        }
+
+        // Add one item to the chain. |vk_struct| must be a Vulkan structure
+        // that is already initialized.
+        template <typename VK_STRUCT_TYPE>
+        void Add(VK_STRUCT_TYPE* vkStruct) {
+            // Sanity checks to ensure proper type safety.
+            static_assert(
+                offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
+                    offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
+                "Argument type is not a proper Vulkan structure type");
+            vkStruct->pNext = nullptr;
+
+            mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
+            mCurrent = mCurrent->pNext;
+        }
+
+        // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
+        template <typename VK_STRUCT_TYPE>
+        void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
+            vkStruct->sType = sType;
+            Add(vkStruct);
+        }
+
+      private:
+        VkBaseOutStructure* mCurrent;
+    };
+
+    VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
+
+    VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
+
+    Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
+
+    VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+                                                   const TextureCopy& textureCopy,
+                                                   const Extent3D& copySize);
+    VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+                                                   const TextureCopy& textureCopy,
+                                                   const Extent3D& copySize);
+
+    void SetDebugName(Device* device,
+                      VkObjectType objectType,
+                      uint64_t objectHandle,
+                      const char* prefix,
+                      std::string label = "");
+
+    // Returns nullptr or &specializationInfo
+    // specializationInfo, specializationDataEntries, specializationMapEntries needs to
+    // be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
+    VkSpecializationInfo* GetVkSpecializationInfo(
+        const ProgrammableStage& programmableStage,
+        VkSpecializationInfo* specializationInfo,
+        std::vector<OverridableConstantScalar>* specializationDataEntries,
+        std::vector<VkSpecializationMapEntry>* specializationMapEntries);
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_UTILSVULKAN_H_
diff --git a/src/dawn/native/vulkan/VulkanBackend.cpp b/src/dawn/native/vulkan/VulkanBackend.cpp
new file mode 100644
index 0000000..e8f630a
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanBackend.cpp
@@ -0,0 +1,129 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// VulkanBackend.cpp: contains the definition of symbols exported by VulkanBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+// Include vulkan_platform.h before VulkanBackend.h includes vulkan.h so that we use our version
+// of the non-dispatchable handles.
+#include "dawn/common/vulkan_platform.h"
+
+#include "dawn/native/VulkanBackend.h"
+
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/NativeSwapChainImplVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+
+namespace dawn::native::vulkan {
+
+    VkInstance GetInstance(WGPUDevice device) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+        return backendDevice->GetVkInstance();
+    }
+
+    DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
+                                                              const char* pName) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+        return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
+    }
+
+    // Explicitly export this function because it uses the "native" type for surfaces while the
+    // header as seen in this file uses the wrapped type.
+    DAWN_NATIVE_EXPORT DawnSwapChainImplementation
+    CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
+        Device* backendDevice = ToBackend(FromAPI(device));
+        VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
+
+        DawnSwapChainImplementation impl;
+        impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
+        impl.textureUsage = WGPUTextureUsage_Present;
+
+        return impl;
+    }
+
+    WGPUTextureFormat GetNativeSwapChainPreferredFormat(
+        const DawnSwapChainImplementation* swapChain) {
+        NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+        return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+    }
+
+    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+        : AdapterDiscoveryOptionsBase(WGPUBackendType_Vulkan) {
+    }
+
+#if defined(DAWN_PLATFORM_LINUX)
+    ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
+        : ExternalImageDescriptorFD(ExternalImageType::OpaqueFD) {
+    }
+
+    ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
+        : ExternalImageDescriptorFD(ExternalImageType::DmaBuf) {
+    }
+
+    ExternalImageExportInfoOpaqueFD::ExternalImageExportInfoOpaqueFD()
+        : ExternalImageExportInfoFD(ExternalImageType::OpaqueFD) {
+    }
+
+    ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
+        : ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {
+    }
+#endif  // DAWN_PLATFORM_LINUX
+
+    WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
+#if defined(DAWN_PLATFORM_LINUX)
+        switch (descriptor->GetType()) {
+            case ExternalImageType::OpaqueFD:
+            case ExternalImageType::DmaBuf: {
+                Device* backendDevice = ToBackend(FromAPI(device));
+                const ExternalImageDescriptorFD* fdDescriptor =
+                    static_cast<const ExternalImageDescriptorFD*>(descriptor);
+
+                return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
+                    fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
+            }
+            default:
+                return nullptr;
+        }
+#else
+        return nullptr;
+#endif  // DAWN_PLATFORM_LINUX
+    }
+
+    bool ExportVulkanImage(WGPUTexture texture,
+                           VkImageLayout desiredLayout,
+                           ExternalImageExportInfoVk* info) {
+        if (texture == nullptr) {
+            return false;
+        }
+#if defined(DAWN_PLATFORM_LINUX)
+        switch (info->GetType()) {
+            case ExternalImageType::OpaqueFD:
+            case ExternalImageType::DmaBuf: {
+                Texture* backendTexture = ToBackend(FromAPI(texture));
+                Device* device = ToBackend(backendTexture->GetDevice());
+                ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
+
+                return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
+                                                              &fdInfo->semaphoreHandles);
+            }
+            default:
+                return false;
+        }
+#else
+        return false;
+#endif  // DAWN_PLATFORM_LINUX
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanError.cpp b/src/dawn/native/vulkan/VulkanError.cpp
new file mode 100644
index 0000000..49416b9
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanError.cpp
@@ -0,0 +1,109 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <string>
+
+namespace dawn::native::vulkan {
+
+    const char* VkResultAsString(::VkResult result) {
+        // Convert to a int32_t to silence and MSVC warning that the fake errors don't appear in
+        // the original VkResult enum.
+        int32_t code = static_cast<int32_t>(result);
+
+        switch (code) {
+            case VK_SUCCESS:
+                return "VK_SUCCESS";
+            case VK_NOT_READY:
+                return "VK_NOT_READY";
+            case VK_TIMEOUT:
+                return "VK_TIMEOUT";
+            case VK_EVENT_SET:
+                return "VK_EVENT_SET";
+            case VK_EVENT_RESET:
+                return "VK_EVENT_RESET";
+            case VK_INCOMPLETE:
+                return "VK_INCOMPLETE";
+            case VK_ERROR_OUT_OF_HOST_MEMORY:
+                return "VK_ERROR_OUT_OF_HOST_MEMORY";
+            case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+                return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+            case VK_ERROR_INITIALIZATION_FAILED:
+                return "VK_ERROR_INITIALIZATION_FAILED";
+            case VK_ERROR_DEVICE_LOST:
+                return "VK_ERROR_DEVICE_LOST";
+            case VK_ERROR_MEMORY_MAP_FAILED:
+                return "VK_ERROR_MEMORY_MAP_FAILED";
+            case VK_ERROR_LAYER_NOT_PRESENT:
+                return "VK_ERROR_LAYER_NOT_PRESENT";
+            case VK_ERROR_EXTENSION_NOT_PRESENT:
+                return "VK_ERROR_EXTENSION_NOT_PRESENT";
+            case VK_ERROR_FEATURE_NOT_PRESENT:
+                return "VK_ERROR_FEATURE_NOT_PRESENT";
+            case VK_ERROR_INCOMPATIBLE_DRIVER:
+                return "VK_ERROR_INCOMPATIBLE_DRIVER";
+            case VK_ERROR_TOO_MANY_OBJECTS:
+                return "VK_ERROR_TOO_MANY_OBJECTS";
+            case VK_ERROR_FORMAT_NOT_SUPPORTED:
+                return "VK_ERROR_FORMAT_NOT_SUPPORTED";
+            case VK_ERROR_FRAGMENTED_POOL:
+                return "VK_ERROR_FRAGMENTED_POOL";
+
+            case VK_ERROR_SURFACE_LOST_KHR:
+                return "VK_ERROR_SURFACE_LOST_KHR";
+            case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
+                return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
+
+            case VK_FAKE_DEVICE_OOM_FOR_TESTING:
+                return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
+            case VK_FAKE_ERROR_FOR_TESTING:
+                return "VK_FAKE_ERROR_FOR_TESTING";
+            default:
+                return "<Unknown VkResult>";
+        }
+    }
+
+    MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
+        if (DAWN_LIKELY(result == VK_SUCCESS)) {
+            return {};
+        }
+
+        std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+
+        if (result == VK_ERROR_DEVICE_LOST) {
+            return DAWN_DEVICE_LOST_ERROR(message);
+        } else {
+            return DAWN_INTERNAL_ERROR(message);
+        }
+    }
+
+    MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
+        if (DAWN_LIKELY(result == VK_SUCCESS)) {
+            return {};
+        }
+
+        std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+
+        if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
+            result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
+            return DAWN_OUT_OF_MEMORY_ERROR(message);
+        } else if (result == VK_ERROR_DEVICE_LOST) {
+            return DAWN_DEVICE_LOST_ERROR(message);
+        } else {
+            return DAWN_INTERNAL_ERROR(message);
+        }
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanError.h b/src/dawn/native/vulkan/VulkanError.h
new file mode 100644
index 0000000..e17e73b
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanError.h
@@ -0,0 +1,50 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANERROR_H_
+#define DAWNNATIVE_VULKAN_VULKANERROR_H_
+
+#include "dawn/native/ErrorInjector.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+
+constexpr VkResult VK_FAKE_ERROR_FOR_TESTING = VK_RESULT_MAX_ENUM;
+constexpr VkResult VK_FAKE_DEVICE_OOM_FOR_TESTING = static_cast<VkResult>(VK_RESULT_MAX_ENUM - 1);
+
+namespace dawn::native::vulkan {
+
+    // Returns a string version of the result.
+    const char* VkResultAsString(::VkResult result);
+
+    MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
+    MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
+
+// Returns a success only if result if VK_SUCCESS, an error with the context and stringified
+// result value instead. Can be used like this:
+//
+//   DAWN_TRY(CheckVkSuccess(vkDoSomething, "doing something"));
+#define CheckVkSuccess(resultIn, contextIn)                            \
+    ::dawn::native::vulkan::CheckVkSuccessImpl(                        \
+        ::dawn::native::vulkan::VkResult::WrapUnsafe(                  \
+            INJECT_ERROR_OR_RUN(resultIn, VK_FAKE_ERROR_FOR_TESTING)), \
+        contextIn)
+
+#define CheckVkOOMThenSuccess(resultIn, contextIn)                                 \
+    ::dawn::native::vulkan::CheckVkOOMThenSuccessImpl(                             \
+        ::dawn::native::vulkan::VkResult::WrapUnsafe(INJECT_ERROR_OR_RUN(          \
+            resultIn, VK_FAKE_DEVICE_OOM_FOR_TESTING, VK_FAKE_ERROR_FOR_TESTING)), \
+        contextIn)
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_VULKANERROR_H_
diff --git a/src/dawn/native/vulkan/VulkanExtensions.cpp b/src/dawn/native/vulkan/VulkanExtensions.cpp
new file mode 100644
index 0000000..3f54e54
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanExtensions.cpp
@@ -0,0 +1,336 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanExtensions.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/vulkan_platform.h"
+
+#include <array>
+#include <limits>
+
+namespace dawn::native::vulkan {
+
+    static constexpr uint32_t VulkanVersion_1_1 = VK_MAKE_VERSION(1, 1, 0);
+    static constexpr uint32_t VulkanVersion_1_2 = VK_MAKE_VERSION(1, 2, 0);
+    static constexpr uint32_t VulkanVersion_1_3 = VK_MAKE_VERSION(1, 3, 0);
+    static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
+
+    // A static array for InstanceExtInfo that can be indexed with InstanceExts.
+    // GetInstanceExtInfo checks that "index" matches the index used to access this array so an
+    // assert will fire if it isn't in the correct order.
+    static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
+    static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
+        //
+        {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+         VulkanVersion_1_1},
+        {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+         VulkanVersion_1_1},
+        {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+         VulkanVersion_1_1},
+
+        {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
+        {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
+        {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
+        {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
+        {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
+        {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
+        {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
+        {InstanceExt::AndroidSurface, "VK_KHR_android_surface", NeverPromoted},
+
+        {InstanceExt::DebugUtils, "VK_EXT_debug_utils", NeverPromoted},
+        {InstanceExt::ValidationFeatures, "VK_EXT_validation_features", NeverPromoted},
+        //
+    }};
+
+    const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
+        uint32_t index = static_cast<uint32_t>(ext);
+        ASSERT(index < sInstanceExtInfos.size());
+        ASSERT(sInstanceExtInfos[index].index == ext);
+        return sInstanceExtInfos[index];
+    }
+
+    std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
+        std::unordered_map<std::string, InstanceExt> result;
+        for (const InstanceExtInfo& info : sInstanceExtInfos) {
+            result[info.name] = info.index;
+        }
+        return result;
+    }
+
+    InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
+        // We need to check that all transitive dependencies of extensions are advertised.
+        // To do that in a single pass and no data structures, the extensions are topologically
+        // sorted in the definition of InstanceExt.
+        // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
+        // dependency check will first assert all its dependents have been visited.
+        InstanceExtSet visitedSet;
+        InstanceExtSet trimmedSet;
+
+        auto HasDep = [&](InstanceExt ext) -> bool {
+            ASSERT(visitedSet[ext]);
+            return trimmedSet[ext];
+        };
+
+        for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
+            InstanceExt ext = static_cast<InstanceExt>(i);
+
+            bool hasDependencies = false;
+            switch (ext) {
+                case InstanceExt::GetPhysicalDeviceProperties2:
+                case InstanceExt::Surface:
+                case InstanceExt::DebugUtils:
+                case InstanceExt::ValidationFeatures:
+                    hasDependencies = true;
+                    break;
+
+                case InstanceExt::ExternalMemoryCapabilities:
+                case InstanceExt::ExternalSemaphoreCapabilities:
+                    hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
+                    break;
+
+                case InstanceExt::AndroidSurface:
+                case InstanceExt::FuchsiaImagePipeSurface:
+                case InstanceExt::MetalSurface:
+                case InstanceExt::WaylandSurface:
+                case InstanceExt::Win32Surface:
+                case InstanceExt::XcbSurface:
+                case InstanceExt::XlibSurface:
+                    hasDependencies = HasDep(InstanceExt::Surface);
+                    break;
+
+                case InstanceExt::EnumCount:
+                    UNREACHABLE();
+            }
+
+            trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+            visitedSet.set(ext, true);
+        }
+
+        return trimmedSet;
+    }
+
+    void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
+        for (const InstanceExtInfo& info : sInstanceExtInfos) {
+            if (info.versionPromoted <= version) {
+                extensions->set(info.index, true);
+            }
+        }
+    }
+
+    static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
+    static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
+        //
+        {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
+        {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
+        {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
+         VulkanVersion_1_1},
+        {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+         VulkanVersion_1_1},
+        {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
+        {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+         VulkanVersion_1_1},
+        {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+         VulkanVersion_1_1},
+        {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
+        {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
+        {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
+        {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
+
+        {DeviceExt::DriverProperties, "VK_KHR_driver_properties", VulkanVersion_1_2},
+        {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
+        {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
+
+        {DeviceExt::ZeroInitializeWorkgroupMemory, "VK_KHR_zero_initialize_workgroup_memory",
+         VulkanVersion_1_3},
+
+        {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
+        {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
+        {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
+        {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
+        {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
+
+        {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
+        {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
+        {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
+        //
+    }};
+
+    const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
+        uint32_t index = static_cast<uint32_t>(ext);
+        ASSERT(index < sDeviceExtInfos.size());
+        ASSERT(sDeviceExtInfos[index].index == ext);
+        return sDeviceExtInfos[index];
+    }
+
+    std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
+        std::unordered_map<std::string, DeviceExt> result;
+        for (const DeviceExtInfo& info : sDeviceExtInfos) {
+            result[info.name] = info.index;
+        }
+        return result;
+    }
+
+    DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+                                    const InstanceExtSet& instanceExts,
+                                    uint32_t icdVersion) {
+        // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
+        // an explanation of what happens.
+        DeviceExtSet visitedSet;
+        DeviceExtSet trimmedSet;
+
+        auto HasDep = [&](DeviceExt ext) -> bool {
+            ASSERT(visitedSet[ext]);
+            return trimmedSet[ext];
+        };
+
+        for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
+            DeviceExt ext = static_cast<DeviceExt>(i);
+
+            bool hasDependencies = false;
+            switch (ext) {
+                // Happy extensions don't need anybody else!
+                case DeviceExt::BindMemory2:
+                case DeviceExt::GetMemoryRequirements2:
+                case DeviceExt::Maintenance1:
+                case DeviceExt::ImageFormatList:
+                case DeviceExt::StorageBufferStorageClass:
+                    hasDependencies = true;
+                    break;
+
+                // Physical device extensions technically don't require the instance to support
+                // them but VulkanFunctions only loads the function pointers if the instance
+                // advertises the extension. So if we didn't have this check, we'd risk a calling
+                // a nullptr.
+                case DeviceExt::GetPhysicalDeviceProperties2:
+                    hasDependencies = instanceExts[InstanceExt::GetPhysicalDeviceProperties2];
+                    break;
+                case DeviceExt::ExternalMemoryCapabilities:
+                    hasDependencies = instanceExts[InstanceExt::ExternalMemoryCapabilities] &&
+                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                    break;
+                case DeviceExt::ExternalSemaphoreCapabilities:
+                    hasDependencies = instanceExts[InstanceExt::ExternalSemaphoreCapabilities] &&
+                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                    break;
+
+                case DeviceExt::ImageDrmFormatModifier:
+                    hasDependencies = HasDep(DeviceExt::BindMemory2) &&
+                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+                                      HasDep(DeviceExt::ImageFormatList) &&
+                                      HasDep(DeviceExt::SamplerYCbCrConversion);
+                    break;
+
+                case DeviceExt::Swapchain:
+                    hasDependencies = instanceExts[InstanceExt::Surface];
+                    break;
+
+                case DeviceExt::SamplerYCbCrConversion:
+                    hasDependencies = HasDep(DeviceExt::Maintenance1) &&
+                                      HasDep(DeviceExt::BindMemory2) &&
+                                      HasDep(DeviceExt::GetMemoryRequirements2) &&
+                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                    break;
+
+                case DeviceExt::DriverProperties:
+                case DeviceExt::ShaderFloat16Int8:
+                    hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                    break;
+
+                case DeviceExt::ExternalMemory:
+                    hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
+                    break;
+
+                case DeviceExt::ExternalSemaphore:
+                    hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
+                    break;
+
+                case DeviceExt::ExternalMemoryFD:
+                case DeviceExt::ExternalMemoryZirconHandle:
+                    hasDependencies = HasDep(DeviceExt::ExternalMemory);
+                    break;
+
+                case DeviceExt::ExternalMemoryDmaBuf:
+                    hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
+                    break;
+
+                case DeviceExt::ExternalSemaphoreFD:
+                case DeviceExt::ExternalSemaphoreZirconHandle:
+                    hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
+                    break;
+
+                case DeviceExt::_16BitStorage:
+                    hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+                                      HasDep(DeviceExt::StorageBufferStorageClass);
+                    break;
+
+                case DeviceExt::SubgroupSizeControl:
+                    // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
+                    // don't need to check for it as it also requires Vulkan 1.1 in which
+                    // VK_KHR_get_physical_device_properties2 was promoted.
+                    hasDependencies = icdVersion >= VulkanVersion_1_1;
+                    break;
+
+                case DeviceExt::ZeroInitializeWorkgroupMemory:
+                    hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                    break;
+
+                case DeviceExt::EnumCount:
+                    UNREACHABLE();
+            }
+
+            trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+            visitedSet.set(ext, true);
+        }
+
+        return trimmedSet;
+    }
+
+    void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
+        for (const DeviceExtInfo& info : sDeviceExtInfos) {
+            if (info.versionPromoted <= version) {
+                extensions->set(info.index, true);
+            }
+        }
+    }
+
+    // A static array for VulkanLayerInfo that can be indexed with VulkanLayers.
+    // GetVulkanLayerInfo checks that "index" matches the index used to access this array so an
+    // assert will fire if it isn't in the correct order.
+    static constexpr size_t kVulkanLayerCount = static_cast<size_t>(VulkanLayer::EnumCount);
+    static constexpr std::array<VulkanLayerInfo, kVulkanLayerCount> sVulkanLayerInfos{{
+        //
+        {VulkanLayer::Validation, "VK_LAYER_KHRONOS_validation"},
+        {VulkanLayer::LunargVkTrace, "VK_LAYER_LUNARG_vktrace"},
+        {VulkanLayer::RenderDocCapture, "VK_LAYER_RENDERDOC_Capture"},
+        {VulkanLayer::FuchsiaImagePipeSwapchain, "VK_LAYER_FUCHSIA_imagepipe_swapchain"},
+        //
+    }};
+
+    const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer) {
+        uint32_t index = static_cast<uint32_t>(layer);
+        ASSERT(index < sVulkanLayerInfos.size());
+        ASSERT(sVulkanLayerInfos[index].layer == layer);
+        return sVulkanLayerInfos[index];
+    }
+
+    std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap() {
+        std::unordered_map<std::string, VulkanLayer> result;
+        for (const VulkanLayerInfo& info : sVulkanLayerInfos) {
+            result[info.name] = info.layer;
+        }
+        return result;
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanExtensions.h b/src/dawn/native/vulkan/VulkanExtensions.h
new file mode 100644
index 0000000..d58c35e
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanExtensions.h
@@ -0,0 +1,166 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
+#define DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
+
+#include "dawn/common/ityp_bitset.h"
+
+#include <unordered_map>
+
+namespace dawn::native::vulkan {
+
+    // The list of known instance extensions. They must be in dependency order (this is checked
+    // inside EnsureDependencies)
+    enum class InstanceExt {
+        // Promoted to 1.1
+        GetPhysicalDeviceProperties2,
+        ExternalMemoryCapabilities,
+        ExternalSemaphoreCapabilities,
+
+        // Surface extensions
+        Surface,
+        FuchsiaImagePipeSurface,
+        MetalSurface,
+        WaylandSurface,
+        Win32Surface,
+        XcbSurface,
+        XlibSurface,
+        AndroidSurface,
+
+        // Others
+        DebugUtils,
+        ValidationFeatures,
+
+        EnumCount,
+    };
+
+    // A bitset that is indexed with InstanceExt.
+    using InstanceExtSet = ityp::bitset<InstanceExt, static_cast<uint32_t>(InstanceExt::EnumCount)>;
+
+    // Information about a known instance extension.
+    struct InstanceExtInfo {
+        InstanceExt index;
+        const char* name;
+        // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+        // or NeverPromoted if it was never promoted.
+        uint32_t versionPromoted;
+    };
+
+    // Returns the information about a known InstanceExt
+    const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
+    // Returns a map that maps a Vulkan extension name to its InstanceExt.
+    std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
+
+    // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+    void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
+    // From a set of extensions advertised as supported by the instance (or promoted), remove all
+    // extensions that don't have all their transitive dependencies in advertisedExts.
+    InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
+
+    // The list of known device extensions. They must be in dependency order (this is checked
+    // inside EnsureDependencies)
+    enum class DeviceExt {
+        // Promoted to 1.1
+        BindMemory2,
+        Maintenance1,
+        StorageBufferStorageClass,
+        GetPhysicalDeviceProperties2,
+        GetMemoryRequirements2,
+        ExternalMemoryCapabilities,
+        ExternalSemaphoreCapabilities,
+        ExternalMemory,
+        ExternalSemaphore,
+        _16BitStorage,
+        SamplerYCbCrConversion,
+
+        // Promoted to 1.2
+        DriverProperties,
+        ImageFormatList,
+        ShaderFloat16Int8,
+
+        // Promoted to 1.3
+        ZeroInitializeWorkgroupMemory,
+
+        // External* extensions
+        ExternalMemoryFD,
+        ExternalMemoryDmaBuf,
+        ExternalMemoryZirconHandle,
+        ExternalSemaphoreFD,
+        ExternalSemaphoreZirconHandle,
+
+        // Others
+        ImageDrmFormatModifier,
+        Swapchain,
+        SubgroupSizeControl,
+
+        EnumCount,
+    };
+
+    // A bitset that is indexed with DeviceExt.
+    using DeviceExtSet = ityp::bitset<DeviceExt, static_cast<uint32_t>(DeviceExt::EnumCount)>;
+
+    // Information about a known device extension.
+    struct DeviceExtInfo {
+        DeviceExt index;
+        const char* name;
+        // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+        // or NeverPromoted if it was never promoted.
+        uint32_t versionPromoted;
+    };
+
+    // Returns the information about a known DeviceExt
+    const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
+    // Returns a map that maps a Vulkan extension name to its DeviceExt.
+    std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
+
+    // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+    void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
+    // From a set of extensions advertised as supported by the device (or promoted), remove all
+    // extensions that don't have all their transitive dependencies in advertisedExts or in
+    // instanceExts.
+    DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+                                    const InstanceExtSet& instanceExts,
+                                    uint32_t icdVersion);
+
+    // The list of all known Vulkan layers.
+    enum class VulkanLayer {
+        Validation,
+        LunargVkTrace,
+        RenderDocCapture,
+
+        // Fuchsia implements the swapchain through a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain),
+        // which adds an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
+        FuchsiaImagePipeSwapchain,
+
+        EnumCount,
+    };
+
+    // A bitset that is indexed with VulkanLayer.
+    using VulkanLayerSet = ityp::bitset<VulkanLayer, static_cast<uint32_t>(VulkanLayer::EnumCount)>;
+
+    // Information about a known layer
+    struct VulkanLayerInfo {
+        VulkanLayer layer;
+        const char* name;
+    };
+
+    // Returns the information about a known VulkanLayer
+    const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer);
+    // Returns a map that maps a Vulkan layer name to its VulkanLayer.
+    std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap();
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
diff --git a/src/dawn/native/vulkan/VulkanFunctions.cpp b/src/dawn/native/vulkan/VulkanFunctions.cpp
new file mode 100644
index 0000000..48e9709
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanFunctions.cpp
@@ -0,0 +1,338 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanFunctions.h"
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+
+#define GET_GLOBAL_PROC(name)                                                              \
+    do {                                                                                   \
+        name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(nullptr, "vk" #name)); \
+        if (name == nullptr) {                                                             \
+            return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name);       \
+        }                                                                                  \
+    } while (0)
+
+    MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
+        if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
+            return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
+        }
+
+        GET_GLOBAL_PROC(CreateInstance);
+        GET_GLOBAL_PROC(EnumerateInstanceExtensionProperties);
+        GET_GLOBAL_PROC(EnumerateInstanceLayerProperties);
+
+        // Is not available in Vulkan 1.0, so allow nullptr
+        EnumerateInstanceVersion = reinterpret_cast<decltype(EnumerateInstanceVersion)>(
+            GetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
+
+        return {};
+    }
+
+#define GET_INSTANCE_PROC_BASE(name, procName)                                                  \
+    do {                                                                                        \
+        name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(instance, "vk" #procName)); \
+        if (name == nullptr) {                                                                  \
+            return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #procName);        \
+        }                                                                                       \
+    } while (0)
+
+#define GET_INSTANCE_PROC(name) GET_INSTANCE_PROC_BASE(name, name)
+#define GET_INSTANCE_PROC_VENDOR(name, vendor) GET_INSTANCE_PROC_BASE(name, name##vendor)
+
+    MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
+                                                  const VulkanGlobalInfo& globalInfo) {
+        // Load this proc first so that we can destroy the instance even if some other
+        // GET_INSTANCE_PROC fails
+        GET_INSTANCE_PROC(DestroyInstance);
+
+        GET_INSTANCE_PROC(CreateDevice);
+        GET_INSTANCE_PROC(DestroyDevice);
+        GET_INSTANCE_PROC(EnumerateDeviceExtensionProperties);
+        GET_INSTANCE_PROC(EnumerateDeviceLayerProperties);
+        GET_INSTANCE_PROC(EnumeratePhysicalDevices);
+        GET_INSTANCE_PROC(GetDeviceProcAddr);
+        GET_INSTANCE_PROC(GetPhysicalDeviceFeatures);
+        GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties);
+        GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties);
+        GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties);
+        GET_INSTANCE_PROC(GetPhysicalDeviceProperties);
+        GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
+        GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
+
+        if (globalInfo.HasExt(InstanceExt::DebugUtils)) {
+            GET_INSTANCE_PROC(CmdBeginDebugUtilsLabelEXT);
+            GET_INSTANCE_PROC(CmdEndDebugUtilsLabelEXT);
+            GET_INSTANCE_PROC(CmdInsertDebugUtilsLabelEXT);
+            GET_INSTANCE_PROC(CreateDebugUtilsMessengerEXT);
+            GET_INSTANCE_PROC(DestroyDebugUtilsMessengerEXT);
+            GET_INSTANCE_PROC(QueueBeginDebugUtilsLabelEXT);
+            GET_INSTANCE_PROC(QueueEndDebugUtilsLabelEXT);
+            GET_INSTANCE_PROC(QueueInsertDebugUtilsLabelEXT);
+            GET_INSTANCE_PROC(SetDebugUtilsObjectNameEXT);
+            GET_INSTANCE_PROC(SetDebugUtilsObjectTagEXT);
+            GET_INSTANCE_PROC(SubmitDebugUtilsMessageEXT);
+        }
+
+        // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
+        // support the vendor entrypoint in GetProcAddress.
+        if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+            GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
+        } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
+        }
+
+        if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+            GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
+        } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
+        }
+
+        if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+            GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
+            GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
+            GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
+            GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
+            GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
+            GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
+            GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
+        } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
+            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
+        }
+
+        if (globalInfo.HasExt(InstanceExt::Surface)) {
+            GET_INSTANCE_PROC(DestroySurfaceKHR);
+            GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
+            GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
+            GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
+            GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
+        }
+
+#if defined(VK_USE_PLATFORM_FUCHSIA)
+        if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
+            GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
+        }
+#endif  // defined(VK_USE_PLATFORM_FUCHSIA)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+        if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
+            GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
+        }
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+        if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
+            GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
+            GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
+        }
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_PLATFORM_ANDROID)
+        if (globalInfo.HasExt(InstanceExt::AndroidSurface)) {
+            GET_INSTANCE_PROC(CreateAndroidSurfaceKHR);
+        }
+#endif  // defined(DAWN_PLATFORM_ANDROID)
+
+#if defined(DAWN_USE_X11)
+        if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
+            GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
+            GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
+        }
+        if (globalInfo.HasExt(InstanceExt::XcbSurface)) {
+            GET_INSTANCE_PROC(CreateXcbSurfaceKHR);
+            GET_INSTANCE_PROC(GetPhysicalDeviceXcbPresentationSupportKHR);
+        }
+#endif  // defined(DAWN_USE_X11)
+        return {};
+    }
+
+#define GET_DEVICE_PROC(name)                                                           \
+    do {                                                                                \
+        name = reinterpret_cast<decltype(name)>(GetDeviceProcAddr(device, "vk" #name)); \
+        if (name == nullptr) {                                                          \
+            return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name);    \
+        }                                                                               \
+    } while (0)
+
+    MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device,
+                                                const VulkanDeviceInfo& deviceInfo) {
+        GET_DEVICE_PROC(AllocateCommandBuffers);
+        GET_DEVICE_PROC(AllocateDescriptorSets);
+        GET_DEVICE_PROC(AllocateMemory);
+        GET_DEVICE_PROC(BeginCommandBuffer);
+        GET_DEVICE_PROC(BindBufferMemory);
+        GET_DEVICE_PROC(BindImageMemory);
+        GET_DEVICE_PROC(CmdBeginQuery);
+        GET_DEVICE_PROC(CmdBeginRenderPass);
+        GET_DEVICE_PROC(CmdBindDescriptorSets);
+        GET_DEVICE_PROC(CmdBindIndexBuffer);
+        GET_DEVICE_PROC(CmdBindPipeline);
+        GET_DEVICE_PROC(CmdBindVertexBuffers);
+        GET_DEVICE_PROC(CmdBlitImage);
+        GET_DEVICE_PROC(CmdClearAttachments);
+        GET_DEVICE_PROC(CmdClearColorImage);
+        GET_DEVICE_PROC(CmdClearDepthStencilImage);
+        GET_DEVICE_PROC(CmdCopyBuffer);
+        GET_DEVICE_PROC(CmdCopyBufferToImage);
+        GET_DEVICE_PROC(CmdCopyImage);
+        GET_DEVICE_PROC(CmdCopyImageToBuffer);
+        GET_DEVICE_PROC(CmdCopyQueryPoolResults);
+        GET_DEVICE_PROC(CmdDispatch);
+        GET_DEVICE_PROC(CmdDispatchIndirect);
+        GET_DEVICE_PROC(CmdDraw);
+        GET_DEVICE_PROC(CmdDrawIndexed);
+        GET_DEVICE_PROC(CmdDrawIndexedIndirect);
+        GET_DEVICE_PROC(CmdDrawIndirect);
+        GET_DEVICE_PROC(CmdEndQuery);
+        GET_DEVICE_PROC(CmdEndRenderPass);
+        GET_DEVICE_PROC(CmdExecuteCommands);
+        GET_DEVICE_PROC(CmdFillBuffer);
+        GET_DEVICE_PROC(CmdNextSubpass);
+        GET_DEVICE_PROC(CmdPipelineBarrier);
+        GET_DEVICE_PROC(CmdPushConstants);
+        GET_DEVICE_PROC(CmdResetEvent);
+        GET_DEVICE_PROC(CmdResetQueryPool);
+        GET_DEVICE_PROC(CmdResolveImage);
+        GET_DEVICE_PROC(CmdSetBlendConstants);
+        GET_DEVICE_PROC(CmdSetDepthBias);
+        GET_DEVICE_PROC(CmdSetDepthBounds);
+        GET_DEVICE_PROC(CmdSetEvent);
+        GET_DEVICE_PROC(CmdSetLineWidth);
+        GET_DEVICE_PROC(CmdSetScissor);
+        GET_DEVICE_PROC(CmdSetStencilCompareMask);
+        GET_DEVICE_PROC(CmdSetStencilReference);
+        GET_DEVICE_PROC(CmdSetStencilWriteMask);
+        GET_DEVICE_PROC(CmdSetViewport);
+        GET_DEVICE_PROC(CmdUpdateBuffer);
+        GET_DEVICE_PROC(CmdWaitEvents);
+        GET_DEVICE_PROC(CmdWriteTimestamp);
+        GET_DEVICE_PROC(CreateBuffer);
+        GET_DEVICE_PROC(CreateBufferView);
+        GET_DEVICE_PROC(CreateCommandPool);
+        GET_DEVICE_PROC(CreateComputePipelines);
+        GET_DEVICE_PROC(CreateDescriptorPool);
+        GET_DEVICE_PROC(CreateDescriptorSetLayout);
+        GET_DEVICE_PROC(CreateEvent);
+        GET_DEVICE_PROC(CreateFence);
+        GET_DEVICE_PROC(CreateFramebuffer);
+        GET_DEVICE_PROC(CreateGraphicsPipelines);
+        GET_DEVICE_PROC(CreateImage);
+        GET_DEVICE_PROC(CreateImageView);
+        GET_DEVICE_PROC(CreatePipelineCache);
+        GET_DEVICE_PROC(CreatePipelineLayout);
+        GET_DEVICE_PROC(CreateQueryPool);
+        GET_DEVICE_PROC(CreateRenderPass);
+        GET_DEVICE_PROC(CreateSampler);
+        GET_DEVICE_PROC(CreateSemaphore);
+        GET_DEVICE_PROC(CreateShaderModule);
+        GET_DEVICE_PROC(DestroyBuffer);
+        GET_DEVICE_PROC(DestroyBufferView);
+        GET_DEVICE_PROC(DestroyCommandPool);
+        GET_DEVICE_PROC(DestroyDescriptorPool);
+        GET_DEVICE_PROC(DestroyDescriptorSetLayout);
+        GET_DEVICE_PROC(DestroyEvent);
+        GET_DEVICE_PROC(DestroyFence);
+        GET_DEVICE_PROC(DestroyFramebuffer);
+        GET_DEVICE_PROC(DestroyImage);
+        GET_DEVICE_PROC(DestroyImageView);
+        GET_DEVICE_PROC(DestroyPipeline);
+        GET_DEVICE_PROC(DestroyPipelineCache);
+        GET_DEVICE_PROC(DestroyPipelineLayout);
+        GET_DEVICE_PROC(DestroyQueryPool);
+        GET_DEVICE_PROC(DestroyRenderPass);
+        GET_DEVICE_PROC(DestroySampler);
+        GET_DEVICE_PROC(DestroySemaphore);
+        GET_DEVICE_PROC(DestroyShaderModule);
+        GET_DEVICE_PROC(DeviceWaitIdle);
+        GET_DEVICE_PROC(EndCommandBuffer);
+        GET_DEVICE_PROC(FlushMappedMemoryRanges);
+        GET_DEVICE_PROC(FreeCommandBuffers);
+        GET_DEVICE_PROC(FreeDescriptorSets);
+        GET_DEVICE_PROC(FreeMemory);
+        GET_DEVICE_PROC(GetBufferMemoryRequirements);
+        GET_DEVICE_PROC(GetDeviceMemoryCommitment);
+        GET_DEVICE_PROC(GetDeviceQueue);
+        GET_DEVICE_PROC(GetEventStatus);
+        GET_DEVICE_PROC(GetFenceStatus);
+        GET_DEVICE_PROC(GetImageMemoryRequirements);
+        GET_DEVICE_PROC(GetImageSparseMemoryRequirements);
+        GET_DEVICE_PROC(GetImageSubresourceLayout);
+        GET_DEVICE_PROC(GetPipelineCacheData);
+        GET_DEVICE_PROC(GetQueryPoolResults);
+        GET_DEVICE_PROC(GetRenderAreaGranularity);
+        GET_DEVICE_PROC(InvalidateMappedMemoryRanges);
+        GET_DEVICE_PROC(MapMemory);
+        GET_DEVICE_PROC(MergePipelineCaches);
+        GET_DEVICE_PROC(QueueBindSparse);
+        GET_DEVICE_PROC(QueueSubmit);
+        GET_DEVICE_PROC(QueueWaitIdle);
+        GET_DEVICE_PROC(ResetCommandBuffer);
+        GET_DEVICE_PROC(ResetCommandPool);
+        GET_DEVICE_PROC(ResetDescriptorPool);
+        GET_DEVICE_PROC(ResetEvent);
+        GET_DEVICE_PROC(ResetFences);
+        GET_DEVICE_PROC(SetEvent);
+        GET_DEVICE_PROC(UnmapMemory);
+        GET_DEVICE_PROC(UpdateDescriptorSets);
+        GET_DEVICE_PROC(WaitForFences);
+
+        if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
+            GET_DEVICE_PROC(GetMemoryFdKHR);
+            GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
+        }
+
+        if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+            GET_DEVICE_PROC(ImportSemaphoreFdKHR);
+            GET_DEVICE_PROC(GetSemaphoreFdKHR);
+        }
+
+        if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
+            GET_DEVICE_PROC(CreateSwapchainKHR);
+            GET_DEVICE_PROC(DestroySwapchainKHR);
+            GET_DEVICE_PROC(GetSwapchainImagesKHR);
+            GET_DEVICE_PROC(AcquireNextImageKHR);
+            GET_DEVICE_PROC(QueuePresentKHR);
+        }
+
+        if (deviceInfo.HasExt(DeviceExt::GetMemoryRequirements2)) {
+            GET_DEVICE_PROC(GetBufferMemoryRequirements2);
+            GET_DEVICE_PROC(GetImageMemoryRequirements2);
+            GET_DEVICE_PROC(GetImageSparseMemoryRequirements2);
+        }
+
+#if VK_USE_PLATFORM_FUCHSIA
+        if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
+            GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
+            GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
+        }
+
+        if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+            GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
+            GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
+        }
+#endif
+
+        return {};
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanFunctions.h b/src/dawn/native/vulkan/VulkanFunctions.h
new file mode 100644
index 0000000..0de5192
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanFunctions.h
@@ -0,0 +1,330 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
+#define DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+#include "dawn/native/Error.h"
+
+class DynamicLib;
+
+namespace dawn::native::vulkan {
+
+    struct VulkanGlobalInfo;
+    struct VulkanDeviceInfo;
+
+    // Stores the Vulkan entry points. Also loads them from the dynamic library
+    // and the vkGet*ProcAddress entry points.
+    struct VulkanFunctions {
+        MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
+        MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
+        MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
+
+        // ---------- Global procs
+
+        // Initial proc from which we can get all the others
+        PFN_vkGetInstanceProcAddr GetInstanceProcAddr = nullptr;
+
+        PFN_vkCreateInstance CreateInstance = nullptr;
+        PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties = nullptr;
+        PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties = nullptr;
+        // DestroyInstance isn't technically a global proc but we want to be able to use it
+        // before querying the instance procs in case we need to error out during initialization.
+        PFN_vkDestroyInstance DestroyInstance = nullptr;
+
+        // Core Vulkan 1.1
+        PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion = nullptr;
+
+        // ---------- Instance procs
+
+        // Core Vulkan 1.0
+        PFN_vkCreateDevice CreateDevice = nullptr;
+        PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties = nullptr;
+        PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties = nullptr;
+        PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices = nullptr;
+        PFN_vkGetDeviceProcAddr GetDeviceProcAddr = nullptr;
+        PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures = nullptr;
+        PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties = nullptr;
+        PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties =
+            nullptr;
+        PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties = nullptr;
+        PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties = nullptr;
+        PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties =
+            nullptr;
+        PFN_vkGetPhysicalDeviceSparseImageFormatProperties
+            GetPhysicalDeviceSparseImageFormatProperties = nullptr;
+        // Not technically an instance proc but we want to be able to use it as soon as the
+        // device is created.
+        PFN_vkDestroyDevice DestroyDevice = nullptr;
+
+        // VK_EXT_debug_utils
+        PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT = nullptr;
+        PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT = nullptr;
+        PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT = nullptr;
+        PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT = nullptr;
+        PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT = nullptr;
+        PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT = nullptr;
+        PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT = nullptr;
+        PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT = nullptr;
+        PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT = nullptr;
+        PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT = nullptr;
+        PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT = nullptr;
+
+        // VK_KHR_surface
+        PFN_vkDestroySurfaceKHR DestroySurfaceKHR = nullptr;
+        PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR = nullptr;
+        PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR =
+            nullptr;
+        PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR = nullptr;
+        PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR =
+            nullptr;
+
+        // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
+        // present.
+
+        // VK_KHR_external_memory_capabilities
+        PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties =
+            nullptr;
+
+        // VK_KHR_external_semaphore_capabilities
+        PFN_vkGetPhysicalDeviceExternalSemaphoreProperties
+            GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
+
+        // VK_KHR_get_physical_device_properties2
+        PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2 = nullptr;
+        PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2 = nullptr;
+        PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2 = nullptr;
+        PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2 =
+            nullptr;
+        PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2 =
+            nullptr;
+        PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2 = nullptr;
+        PFN_vkGetPhysicalDeviceSparseImageFormatProperties2
+            GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
+
+#if defined(VK_USE_PLATFORM_FUCHSIA)
+        // FUCHSIA_image_pipe_surface
+        PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
+#endif  // defined(VK_USE_PLATFORM_FUCHSIA)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+        // EXT_metal_surface
+        PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT = nullptr;
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+        // KHR_win32_surface
+        PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR = nullptr;
+        PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
+            GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_PLATFORM_ANDROID)
+        PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR = nullptr;
+#endif  // defined(DAWN_PLATFORM_ANDROID)
+
+#if defined(DAWN_USE_X11)
+        // KHR_xlib_surface
+        PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR = nullptr;
+        PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
+            GetPhysicalDeviceXlibPresentationSupportKHR = nullptr;
+
+        // KHR_xcb_surface
+        PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR = nullptr;
+        PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR
+            GetPhysicalDeviceXcbPresentationSupportKHR = nullptr;
+#endif  // defined(DAWN_USE_X11)
+
+        // ---------- Device procs
+
+        // Core Vulkan 1.0
+        PFN_vkAllocateCommandBuffers AllocateCommandBuffers = nullptr;
+        PFN_vkAllocateDescriptorSets AllocateDescriptorSets = nullptr;
+        PFN_vkAllocateMemory AllocateMemory = nullptr;
+        PFN_vkBeginCommandBuffer BeginCommandBuffer = nullptr;
+        PFN_vkBindBufferMemory BindBufferMemory = nullptr;
+        PFN_vkBindImageMemory BindImageMemory = nullptr;
+        PFN_vkCmdBeginQuery CmdBeginQuery = nullptr;
+        PFN_vkCmdBeginRenderPass CmdBeginRenderPass = nullptr;
+        PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets = nullptr;
+        PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer = nullptr;
+        PFN_vkCmdBindPipeline CmdBindPipeline = nullptr;
+        PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers = nullptr;
+        PFN_vkCmdBlitImage CmdBlitImage = nullptr;
+        PFN_vkCmdClearAttachments CmdClearAttachments = nullptr;
+        PFN_vkCmdClearColorImage CmdClearColorImage = nullptr;
+        PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage = nullptr;
+        PFN_vkCmdCopyBuffer CmdCopyBuffer = nullptr;
+        PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage = nullptr;
+        PFN_vkCmdCopyImage CmdCopyImage = nullptr;
+        PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer = nullptr;
+        PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults = nullptr;
+        PFN_vkCmdDispatch CmdDispatch = nullptr;
+        PFN_vkCmdDispatchIndirect CmdDispatchIndirect = nullptr;
+        PFN_vkCmdDraw CmdDraw = nullptr;
+        PFN_vkCmdDrawIndexed CmdDrawIndexed = nullptr;
+        PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect = nullptr;
+        PFN_vkCmdDrawIndirect CmdDrawIndirect = nullptr;
+        PFN_vkCmdEndQuery CmdEndQuery = nullptr;
+        PFN_vkCmdEndRenderPass CmdEndRenderPass = nullptr;
+        PFN_vkCmdExecuteCommands CmdExecuteCommands = nullptr;
+        PFN_vkCmdFillBuffer CmdFillBuffer = nullptr;
+        PFN_vkCmdNextSubpass CmdNextSubpass = nullptr;
+        PFN_vkCmdPipelineBarrier CmdPipelineBarrier = nullptr;
+        PFN_vkCmdPushConstants CmdPushConstants = nullptr;
+        PFN_vkCmdResetEvent CmdResetEvent = nullptr;
+        PFN_vkCmdResetQueryPool CmdResetQueryPool = nullptr;
+        PFN_vkCmdResolveImage CmdResolveImage = nullptr;
+        PFN_vkCmdSetBlendConstants CmdSetBlendConstants = nullptr;
+        PFN_vkCmdSetDepthBias CmdSetDepthBias = nullptr;
+        PFN_vkCmdSetDepthBounds CmdSetDepthBounds = nullptr;
+        PFN_vkCmdSetEvent CmdSetEvent = nullptr;
+        PFN_vkCmdSetLineWidth CmdSetLineWidth = nullptr;
+        PFN_vkCmdSetScissor CmdSetScissor = nullptr;
+        PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask = nullptr;
+        PFN_vkCmdSetStencilReference CmdSetStencilReference = nullptr;
+        PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask = nullptr;
+        PFN_vkCmdSetViewport CmdSetViewport = nullptr;
+        PFN_vkCmdUpdateBuffer CmdUpdateBuffer = nullptr;
+        PFN_vkCmdWaitEvents CmdWaitEvents = nullptr;
+        PFN_vkCmdWriteTimestamp CmdWriteTimestamp = nullptr;
+        PFN_vkCreateBuffer CreateBuffer = nullptr;
+        PFN_vkCreateBufferView CreateBufferView = nullptr;
+        PFN_vkCreateCommandPool CreateCommandPool = nullptr;
+        PFN_vkCreateComputePipelines CreateComputePipelines = nullptr;
+        PFN_vkCreateDescriptorPool CreateDescriptorPool = nullptr;
+        PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout = nullptr;
+        PFN_vkCreateEvent CreateEvent = nullptr;
+        PFN_vkCreateFence CreateFence = nullptr;
+        PFN_vkCreateFramebuffer CreateFramebuffer = nullptr;
+        PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines = nullptr;
+        PFN_vkCreateImage CreateImage = nullptr;
+        PFN_vkCreateImageView CreateImageView = nullptr;
+        PFN_vkCreatePipelineCache CreatePipelineCache = nullptr;
+        PFN_vkCreatePipelineLayout CreatePipelineLayout = nullptr;
+        PFN_vkCreateQueryPool CreateQueryPool = nullptr;
+        PFN_vkCreateRenderPass CreateRenderPass = nullptr;
+        PFN_vkCreateSampler CreateSampler = nullptr;
+        PFN_vkCreateSemaphore CreateSemaphore = nullptr;
+        PFN_vkCreateShaderModule CreateShaderModule = nullptr;
+        PFN_vkDestroyBuffer DestroyBuffer = nullptr;
+        PFN_vkDestroyBufferView DestroyBufferView = nullptr;
+        PFN_vkDestroyCommandPool DestroyCommandPool = nullptr;
+        PFN_vkDestroyDescriptorPool DestroyDescriptorPool = nullptr;
+        PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout = nullptr;
+        PFN_vkDestroyEvent DestroyEvent = nullptr;
+        PFN_vkDestroyFence DestroyFence = nullptr;
+        PFN_vkDestroyFramebuffer DestroyFramebuffer = nullptr;
+        PFN_vkDestroyImage DestroyImage = nullptr;
+        PFN_vkDestroyImageView DestroyImageView = nullptr;
+        PFN_vkDestroyPipeline DestroyPipeline = nullptr;
+        PFN_vkDestroyPipelineCache DestroyPipelineCache = nullptr;
+        PFN_vkDestroyPipelineLayout DestroyPipelineLayout = nullptr;
+        PFN_vkDestroyQueryPool DestroyQueryPool = nullptr;
+        PFN_vkDestroyRenderPass DestroyRenderPass = nullptr;
+        PFN_vkDestroySampler DestroySampler = nullptr;
+        PFN_vkDestroySemaphore DestroySemaphore = nullptr;
+        PFN_vkDestroyShaderModule DestroyShaderModule = nullptr;
+        PFN_vkDeviceWaitIdle DeviceWaitIdle = nullptr;
+        PFN_vkEndCommandBuffer EndCommandBuffer = nullptr;
+        PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges = nullptr;
+        PFN_vkFreeCommandBuffers FreeCommandBuffers = nullptr;
+        PFN_vkFreeDescriptorSets FreeDescriptorSets = nullptr;
+        PFN_vkFreeMemory FreeMemory = nullptr;
+        PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements = nullptr;
+        PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment = nullptr;
+        PFN_vkGetDeviceQueue GetDeviceQueue = nullptr;
+        PFN_vkGetEventStatus GetEventStatus = nullptr;
+        PFN_vkGetFenceStatus GetFenceStatus = nullptr;
+        PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements = nullptr;
+        PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements = nullptr;
+        PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout = nullptr;
+        PFN_vkGetPipelineCacheData GetPipelineCacheData = nullptr;
+        PFN_vkGetQueryPoolResults GetQueryPoolResults = nullptr;
+        PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity = nullptr;
+        PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges = nullptr;
+        PFN_vkMapMemory MapMemory = nullptr;
+        PFN_vkMergePipelineCaches MergePipelineCaches = nullptr;
+        PFN_vkQueueBindSparse QueueBindSparse = nullptr;
+        PFN_vkQueueSubmit QueueSubmit = nullptr;
+        PFN_vkQueueWaitIdle QueueWaitIdle = nullptr;
+        PFN_vkResetCommandBuffer ResetCommandBuffer = nullptr;
+        PFN_vkResetCommandPool ResetCommandPool = nullptr;
+        PFN_vkResetDescriptorPool ResetDescriptorPool = nullptr;
+        PFN_vkResetEvent ResetEvent = nullptr;
+        PFN_vkResetFences ResetFences = nullptr;
+        PFN_vkSetEvent SetEvent = nullptr;
+        PFN_vkUnmapMemory UnmapMemory = nullptr;
+        PFN_vkUpdateDescriptorSets UpdateDescriptorSets = nullptr;
+        PFN_vkWaitForFences WaitForFences = nullptr;
+
+        // VK_KHR_external_memory_fd
+        PFN_vkGetMemoryFdKHR GetMemoryFdKHR = nullptr;
+        PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR = nullptr;
+
+        // VK_KHR_external_semaphore_fd
+        PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR = nullptr;
+        PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR = nullptr;
+
+        // VK_KHR_get_memory_requirements2
+        PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2 = nullptr;
+        PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2 = nullptr;
+        PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2 = nullptr;
+
+        // VK_KHR_swapchain
+        PFN_vkCreateSwapchainKHR CreateSwapchainKHR = nullptr;
+        PFN_vkDestroySwapchainKHR DestroySwapchainKHR = nullptr;
+        PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR = nullptr;
+        PFN_vkAcquireNextImageKHR AcquireNextImageKHR = nullptr;
+        PFN_vkQueuePresentKHR QueuePresentKHR = nullptr;
+
+#if VK_USE_PLATFORM_FUCHSIA
+        // VK_FUCHSIA_external_memory
+        PFN_vkGetMemoryZirconHandleFUCHSIA GetMemoryZirconHandleFUCHSIA = nullptr;
+        PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA GetMemoryZirconHandlePropertiesFUCHSIA =
+            nullptr;
+
+        // VK_FUCHSIA_external_semaphore
+        PFN_vkImportSemaphoreZirconHandleFUCHSIA ImportSemaphoreZirconHandleFUCHSIA = nullptr;
+        PFN_vkGetSemaphoreZirconHandleFUCHSIA GetSemaphoreZirconHandleFUCHSIA = nullptr;
+#endif
+    };
+
+    // Create a wrapper around VkResult in the dawn::native::vulkan namespace. This shadows the
+    // default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
+    // ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
+    // about handling error cases.
+    class VkResult {
+      public:
+        constexpr static VkResult WrapUnsafe(::VkResult value) {
+            return VkResult(value);
+        }
+
+        constexpr operator ::VkResult() const {
+            return mValue;
+        }
+
+      private:
+        // Private. Use VkResult::WrapUnsafe instead.
+        constexpr VkResult(::VkResult value) : mValue(value) {
+        }
+
+        ::VkResult mValue;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
diff --git a/src/dawn/native/vulkan/VulkanInfo.cpp b/src/dawn/native/vulkan/VulkanInfo.cpp
new file mode 100644
index 0000000..a734a9b
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanInfo.cpp
@@ -0,0 +1,334 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <cstring>
+
+namespace dawn::native::vulkan {
+
+    namespace {
+        ResultOrError<InstanceExtSet> GatherInstanceExtensions(
+            const char* layerName,
+            const dawn::native::vulkan::VulkanFunctions& vkFunctions,
+            const std::unordered_map<std::string, InstanceExt>& knownExts) {
+            uint32_t count = 0;
+            VkResult vkResult = VkResult::WrapUnsafe(
+                vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
+            if (vkResult != VK_SUCCESS && vkResult != VK_INCOMPLETE) {
+                return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
+            }
+
+            std::vector<VkExtensionProperties> extensions(count);
+            DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceExtensionProperties(
+                                        layerName, &count, extensions.data()),
+                                    "vkEnumerateInstanceExtensionProperties"));
+
+            InstanceExtSet result;
+            for (const VkExtensionProperties& extension : extensions) {
+                auto it = knownExts.find(extension.extensionName);
+                if (it != knownExts.end()) {
+                    result.set(it->second, true);
+                }
+            }
+
+            return result;
+        }
+
+    }  // namespace
+
+    bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
+        return extensions[ext];
+    }
+
+    bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
+        return extensions[ext];
+    }
+
+    ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions) {
+        VulkanGlobalInfo info = {};
+        // Gather info on available API version
+        {
+            info.apiVersion = VK_MAKE_VERSION(1, 0, 0);
+            if (vkFunctions.EnumerateInstanceVersion != nullptr) {
+                DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceVersion(&info.apiVersion),
+                                        "vkEnumerateInstanceVersion"));
+            }
+        }
+
+        // Gather the info about the instance layers
+        {
+            uint32_t count = 0;
+            VkResult result =
+                VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
+            // From the Vulkan spec result should be success if there are 0 layers,
+            // incomplete otherwise. This means that both values represent a success.
+            // This is the same for all Enumarte functions
+            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+                return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
+            }
+
+            std::vector<VkLayerProperties> layersProperties(count);
+            DAWN_TRY(CheckVkSuccess(
+                vkFunctions.EnumerateInstanceLayerProperties(&count, layersProperties.data()),
+                "vkEnumerateInstanceLayerProperties"));
+
+            std::unordered_map<std::string, VulkanLayer> knownLayers = CreateVulkanLayerNameMap();
+            for (const VkLayerProperties& layer : layersProperties) {
+                auto it = knownLayers.find(layer.layerName);
+                if (it != knownLayers.end()) {
+                    info.layers.set(it->second, true);
+                }
+            }
+        }
+
+        // Gather the info about the instance extensions
+        {
+            std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
+
+            DAWN_TRY_ASSIGN(info.extensions,
+                            GatherInstanceExtensions(nullptr, vkFunctions, knownExts));
+            MarkPromotedExtensions(&info.extensions, info.apiVersion);
+            info.extensions = EnsureDependencies(info.extensions);
+
+            for (VulkanLayer layer : IterateBitSet(info.layers)) {
+                DAWN_TRY_ASSIGN(info.layerExtensions[layer],
+                                GatherInstanceExtensions(GetVulkanLayerInfo(layer).name,
+                                                         vkFunctions, knownExts));
+                MarkPromotedExtensions(&info.layerExtensions[layer], info.apiVersion);
+                info.layerExtensions[layer] = EnsureDependencies(info.layerExtensions[layer]);
+            }
+        }
+
+        return std::move(info);
+    }
+
+    ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+        VkInstance instance,
+        const VulkanFunctions& vkFunctions) {
+        uint32_t count = 0;
+        VkResult result =
+            VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
+        if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+            return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
+        }
+
+        std::vector<VkPhysicalDevice> physicalDevices(count);
+        DAWN_TRY(CheckVkSuccess(
+            vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
+            "vkEnumeratePhysicalDevices"));
+
+        return std::move(physicalDevices);
+    }
+
+    ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+        VulkanDeviceInfo info = {};
+        VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+        const VulkanGlobalInfo& globalInfo = adapter.GetVulkanInstance()->GetGlobalInfo();
+        const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+
+        // Query the device properties first to get the ICD's `apiVersion`
+        vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
+
+        // Gather info about device memory.
+        {
+            VkPhysicalDeviceMemoryProperties memory;
+            vkFunctions.GetPhysicalDeviceMemoryProperties(physicalDevice, &memory);
+
+            info.memoryTypes.assign(memory.memoryTypes,
+                                    memory.memoryTypes + memory.memoryTypeCount);
+            info.memoryHeaps.assign(memory.memoryHeaps,
+                                    memory.memoryHeaps + memory.memoryHeapCount);
+        }
+
+        // Gather info about device queue families
+        {
+            uint32_t count = 0;
+            vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
+
+            info.queueFamilies.resize(count);
+            vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count,
+                                                               info.queueFamilies.data());
+        }
+
+        // Gather the info about the device layers
+        {
+            uint32_t count = 0;
+            VkResult result = VkResult::WrapUnsafe(
+                vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
+            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+                return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
+            }
+
+            info.layers.resize(count);
+            DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceLayerProperties(
+                                        physicalDevice, &count, info.layers.data()),
+                                    "vkEnumerateDeviceLayerProperties"));
+        }
+
+        // Gather the info about the device extensions
+        {
+            uint32_t count = 0;
+            VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
+                physicalDevice, nullptr, &count, nullptr));
+            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+                return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
+            }
+
+            std::vector<VkExtensionProperties> extensionsProperties;
+            extensionsProperties.resize(count);
+            DAWN_TRY(
+                CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
+                                   physicalDevice, nullptr, &count, extensionsProperties.data()),
+                               "vkEnumerateDeviceExtensionProperties"));
+
+            std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
+
+            for (const VkExtensionProperties& extension : extensionsProperties) {
+                auto it = knownExts.find(extension.extensionName);
+                if (it != knownExts.end()) {
+                    info.extensions.set(it->second, true);
+                }
+            }
+
+            MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
+            info.extensions = EnsureDependencies(info.extensions, globalInfo.extensions,
+                                                 info.properties.apiVersion);
+        }
+
+        // Gather general and extension features and properties
+        //
+        // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
+        // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
+        // because these extensions (transitively) depend on it in `EnsureDependencies`
+        VkPhysicalDeviceFeatures2 features2 = {};
+        features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+        features2.pNext = nullptr;
+        PNextChainBuilder featuresChain(&features2);
+
+        VkPhysicalDeviceProperties2 properties2 = {};
+        properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+        features2.pNext = nullptr;
+        PNextChainBuilder propertiesChain(&properties2);
+
+        if (info.extensions[DeviceExt::ShaderFloat16Int8]) {
+            featuresChain.Add(&info.shaderFloat16Int8Features,
+                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+        }
+
+        if (info.extensions[DeviceExt::_16BitStorage]) {
+            featuresChain.Add(&info._16BitStorageFeatures,
+                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+        }
+
+        if (info.extensions[DeviceExt::SubgroupSizeControl]) {
+            featuresChain.Add(&info.subgroupSizeControlFeatures,
+                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+            propertiesChain.Add(
+                &info.subgroupSizeControlProperties,
+                VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
+        }
+
+        if (info.extensions[DeviceExt::DriverProperties]) {
+            propertiesChain.Add(&info.driverProperties,
+                                VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES);
+        }
+
+        // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
+        // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
+        //
+        // Note that info.properties has already been filled at the start of this function to get
+        // `apiVersion`.
+        ASSERT(info.properties.apiVersion != 0);
+        if (info.extensions[DeviceExt::GetPhysicalDeviceProperties2]) {
+            vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
+            vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
+            info.features = features2.features;
+        } else {
+            ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
+            vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
+        }
+
+        // TODO(cwallez@chromium.org): gather info about formats
+
+        return std::move(info);
+    }
+
+    ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
+                                                       VkSurfaceKHR surface) {
+        VulkanSurfaceInfo info = {};
+
+        VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+        const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+
+        // Get the surface capabilities
+        DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
+                                    physicalDevice, surface, &info.capabilities),
+                                "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
+
+        // Query which queue families support presenting this surface
+        {
+            size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
+            info.supportedQueueFamilies.resize(nQueueFamilies, false);
+
+            for (uint32_t i = 0; i < nQueueFamilies; ++i) {
+                VkBool32 supported = VK_FALSE;
+                DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
+                                            physicalDevice, i, surface, &supported),
+                                        "vkGetPhysicalDeviceSurfaceSupportKHR"));
+
+                info.supportedQueueFamilies[i] = (supported == VK_TRUE);
+            }
+        }
+
+        // Gather supported formats
+        {
+            uint32_t count = 0;
+            VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+                physicalDevice, surface, &count, nullptr));
+            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+                return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
+            }
+
+            info.formats.resize(count);
+            DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+                                        physicalDevice, surface, &count, info.formats.data()),
+                                    "vkGetPhysicalDeviceSurfaceFormatsKHR"));
+        }
+
+        // Gather supported presents modes
+        {
+            uint32_t count = 0;
+            VkResult result =
+                VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+                    physicalDevice, surface, &count, nullptr));
+            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+                return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
+            }
+
+            info.presentModes.resize(count);
+            DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+                                        physicalDevice, surface, &count, info.presentModes.data()),
+                                    "vkGetPhysicalDeviceSurfacePresentModesKHR"));
+        }
+
+        return std::move(info);
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanInfo.h b/src/dawn/native/vulkan/VulkanInfo.h
new file mode 100644
index 0000000..5d87fcd
--- /dev/null
+++ b/src/dawn/native/vulkan/VulkanInfo.h
@@ -0,0 +1,91 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANINFO_H_
+#define DAWNNATIVE_VULKAN_VULKANINFO_H_
+
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/vulkan/VulkanExtensions.h"
+
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+    class Adapter;
+    class Backend;
+    struct VulkanFunctions;
+
+    // Global information - gathered before the instance is created
+    struct VulkanGlobalKnobs {
+        VulkanLayerSet layers;
+        ityp::array<VulkanLayer, InstanceExtSet, static_cast<uint32_t>(VulkanLayer::EnumCount)>
+            layerExtensions;
+
+        // During information gathering `extensions` only contains the instance's extensions but
+        // during the instance creation logic it becomes the OR of the instance's extensions and
+        // the selected layers' extensions.
+        InstanceExtSet extensions;
+        bool HasExt(InstanceExt ext) const;
+    };
+
+    struct VulkanGlobalInfo : VulkanGlobalKnobs {
+        uint32_t apiVersion;
+    };
+
+    // Device information - gathered before the device is created.
+    struct VulkanDeviceKnobs {
+        VkPhysicalDeviceFeatures features;
+        VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
+        VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
+        VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
+        VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR
+            zeroInitializeWorkgroupMemoryFeatures;
+
+        bool HasExt(DeviceExt ext) const;
+        DeviceExtSet extensions;
+    };
+
+    struct VulkanDeviceInfo : VulkanDeviceKnobs {
+        VkPhysicalDeviceProperties properties;
+        VkPhysicalDeviceDriverProperties driverProperties;
+        VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
+
+        std::vector<VkQueueFamilyProperties> queueFamilies;
+
+        std::vector<VkMemoryType> memoryTypes;
+        std::vector<VkMemoryHeap> memoryHeaps;
+
+        std::vector<VkLayerProperties> layers;
+        // TODO(cwallez@chromium.org): layer instance extensions
+    };
+
+    struct VulkanSurfaceInfo {
+        VkSurfaceCapabilitiesKHR capabilities;
+        std::vector<VkSurfaceFormatKHR> formats;
+        std::vector<VkPresentModeKHR> presentModes;
+        std::vector<bool> supportedQueueFamilies;
+    };
+
+    ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions);
+    ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+        VkInstance instance,
+        const VulkanFunctions& vkFunctions);
+    ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+    ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
+                                                       VkSurfaceKHR surface);
+}  // namespace dawn::native::vulkan
+
+#endif  // DAWNNATIVE_VULKAN_VULKANINFO_H_
diff --git a/src/dawn/native/vulkan/external_memory/MemoryService.h b/src/dawn/native/vulkan/external_memory/MemoryService.h
new file mode 100644
index 0000000..034bada
--- /dev/null
+++ b/src/dawn/native/vulkan/external_memory/MemoryService.h
@@ -0,0 +1,78 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
+#define DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/ExternalHandle.h"
+
+namespace dawn::native::vulkan {
+    class Device;
+    struct VulkanDeviceInfo;
+}  // namespace dawn::native::vulkan
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+    struct MemoryImportParams {
+        VkDeviceSize allocationSize;
+        uint32_t memoryTypeIndex;
+    };
+
+    class Service {
+      public:
+        explicit Service(Device* device);
+        ~Service();
+
+        static bool CheckSupport(const VulkanDeviceInfo& deviceInfo);
+
+        // True if the device reports it supports importing external memory.
+        bool SupportsImportMemory(VkFormat format,
+                                  VkImageType type,
+                                  VkImageTiling tiling,
+                                  VkImageUsageFlags usage,
+                                  VkImageCreateFlags flags);
+
+        // True if the device reports it supports creating VkImages from external memory.
+        bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                 VkFormat format,
+                                 VkImageUsageFlags usage,
+                                 bool* supportsDisjoint);
+
+        // Returns the parameters required for importing memory
+        ResultOrError<MemoryImportParams> GetMemoryImportParams(
+            const ExternalImageDescriptor* descriptor,
+            VkImage image);
+
+        // Given an external handle pointing to memory, import it into a VkDeviceMemory
+        ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
+                                                   const MemoryImportParams& importParams,
+                                                   VkImage image);
+
+        // Create a VkImage for the given handle type
+        ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
+                                           const VkImageCreateInfo& baseCreateInfo);
+
+      private:
+        Device* mDevice = nullptr;
+
+        // True if early checks pass that determine if the service is supported
+        bool mSupported = false;
+    };
+
+}}  // namespace dawn::native::vulkan::external_memory
+
+#endif  // DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
new file mode 100644
index 0000000..675e782
--- /dev/null
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
@@ -0,0 +1,357 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+    namespace {
+
+        bool GetFormatModifierProps(const VulkanFunctions& fn,
+                                    VkPhysicalDevice physicalDevice,
+                                    VkFormat format,
+                                    uint64_t modifier,
+                                    VkDrmFormatModifierPropertiesEXT* formatModifierProps) {
+            std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierPropsVector;
+            VkFormatProperties2 formatProps = {};
+            formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
+            PNextChainBuilder formatPropsChain(&formatProps);
+
+            VkDrmFormatModifierPropertiesListEXT formatModifierPropsList = {};
+            formatModifierPropsList.drmFormatModifierCount = 0;
+            formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
+            formatPropsChain.Add(&formatModifierPropsList,
+                                 VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
+
+            fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+
+            uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
+            formatModifierPropsVector.resize(modifierCount);
+            formatModifierPropsList.pDrmFormatModifierProperties = formatModifierPropsVector.data();
+
+            fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+            for (const auto& props : formatModifierPropsVector) {
+                if (props.drmFormatModifier == modifier) {
+                    *formatModifierProps = props;
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        // Some modifiers use multiple planes (for example, see the comment for
+        // I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h).
+        ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
+                                                      VkPhysicalDevice physicalDevice,
+                                                      VkFormat format,
+                                                      uint64_t modifier) {
+            VkDrmFormatModifierPropertiesEXT props;
+            if (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props)) {
+                return static_cast<uint32_t>(props.drmFormatModifierPlaneCount);
+            }
+            return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
+        }
+
+        bool IsMultiPlanarVkFormat(VkFormat format) {
+            switch (format) {
+                case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+                case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+                case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
+                case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
+                case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
+                case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
+                case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+                case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
+                case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
+                case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
+                case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
+                case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
+                case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
+                case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
+                case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
+                case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
+                case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
+                case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
+                case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+                case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
+                    return true;
+
+                default:
+                    return false;
+            }
+        }
+
+        bool SupportsDisjoint(const VulkanFunctions& fn,
+                              VkPhysicalDevice physicalDevice,
+                              VkFormat format,
+                              uint64_t modifier) {
+            if (IsMultiPlanarVkFormat(format)) {
+                VkDrmFormatModifierPropertiesEXT props;
+                return (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props) &&
+                        (props.drmFormatModifierTilingFeatures & VK_FORMAT_FEATURE_DISJOINT_BIT));
+            }
+            return false;
+        }
+
+    }  // anonymous namespace
+
+    Service::Service(Device* device)
+        : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+    }
+
+    Service::~Service() = default;
+
+    // static
+    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+        return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
+               deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
+    }
+
+    bool Service::SupportsImportMemory(VkFormat format,
+                                       VkImageType type,
+                                       VkImageTiling tiling,
+                                       VkImageUsageFlags usage,
+                                       VkImageCreateFlags flags) {
+        return mSupported && (!IsMultiPlanarVkFormat(format) ||
+                              (format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM &&
+                               mDevice->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)));
+    }
+
+    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                      VkFormat format,
+                                      VkImageUsageFlags usage,
+                                      bool* supportsDisjoint) {
+        *supportsDisjoint = false;
+        // Early out before we try using extension functions
+        if (!mSupported) {
+            return false;
+        }
+        if (descriptor->GetType() != ExternalImageType::DmaBuf) {
+            return false;
+        }
+        const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+            static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+
+        // Verify plane count for the modifier.
+        VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+        uint32_t planeCount = 0;
+        if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
+                                                         dmaBufDescriptor->drmModifier),
+                                   &planeCount)) {
+            return false;
+        }
+        if (planeCount == 0) {
+            return false;
+        }
+        // Only support the NV12 multi-planar format for now.
+        if (planeCount > 1 && format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
+            return false;
+        }
+        *supportsDisjoint =
+            SupportsDisjoint(mDevice->fn, physicalDevice, format, dmaBufDescriptor->drmModifier);
+
+        // Verify that the format modifier of the external memory and the requested Vulkan format
+        // are actually supported together in a dma-buf import.
+        VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {};
+        imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+        imageFormatInfo.format = format;
+        imageFormatInfo.type = VK_IMAGE_TYPE_2D;
+        imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+        imageFormatInfo.usage = usage;
+        imageFormatInfo.flags = 0;
+        PNextChainBuilder imageFormatInfoChain(&imageFormatInfo);
+
+        VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo = {};
+        externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+        imageFormatInfoChain.Add(&externalImageFormatInfo,
+                                 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
+
+        VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo = {};
+        drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+        drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        imageFormatInfoChain.Add(
+            &drmModifierInfo, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
+
+        // For mutable vkimage of multi-planar format, we also need to make sure the each
+        // plane's view format can be supported.
+        std::array<VkFormat, 2> viewFormats;
+        VkImageFormatListCreateInfo imageFormatListInfo = {};
+
+        if (planeCount > 1) {
+            ASSERT(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM);
+            viewFormats = {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM};
+            imageFormatListInfo.viewFormatCount = 2;
+            imageFormatListInfo.pViewFormats = viewFormats.data();
+            imageFormatInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+            imageFormatInfoChain.Add(&imageFormatListInfo,
+                                     VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+        }
+
+        VkImageFormatProperties2 imageFormatProps = {};
+        imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+        PNextChainBuilder imageFormatPropsChain(&imageFormatProps);
+
+        VkExternalImageFormatProperties externalImageFormatProps = {};
+        imageFormatPropsChain.Add(&externalImageFormatProps,
+                                  VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
+
+        VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+            physicalDevice, &imageFormatInfo, &imageFormatProps));
+        if (result != VK_SUCCESS) {
+            return false;
+        }
+        VkExternalMemoryFeatureFlags featureFlags =
+            externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
+        return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
+    }
+
+    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+        const ExternalImageDescriptor* descriptor,
+        VkImage image) {
+        DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+                        "ExternalImageDescriptor is not a ExternalImageDescriptorDmaBuf.");
+
+        const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+            static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+        VkDevice device = mDevice->GetVkDevice();
+
+        // Get the valid memory types for the VkImage.
+        VkMemoryRequirements memoryRequirements;
+        mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
+
+        VkMemoryFdPropertiesKHR fdProperties;
+        fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
+        fdProperties.pNext = nullptr;
+
+        // Get the valid memory types that the external memory can be imported as.
+        mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+                                             dmaBufDescriptor->memoryFD, &fdProperties);
+        // Choose the best memory type that satisfies both the image's constraint and the
+        // import's constraint.
+        memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
+        int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
+            memoryRequirements, MemoryKind::Opaque);
+        DAWN_INVALID_IF(memoryTypeIndex == -1,
+                        "Unable to find an appropriate memory type for import.");
+
+        MemoryImportParams params = {memoryRequirements.size,
+                                     static_cast<uint32_t>(memoryTypeIndex)};
+        return params;
+    }
+
+    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                        const MemoryImportParams& importParams,
+                                                        VkImage image) {
+        DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+
+        VkMemoryAllocateInfo memoryAllocateInfo = {};
+        memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+        memoryAllocateInfo.allocationSize = importParams.allocationSize;
+        memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+        PNextChainBuilder memoryAllocateInfoChain(&memoryAllocateInfo);
+
+        VkImportMemoryFdInfoKHR importMemoryFdInfo;
+        importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+        importMemoryFdInfo.fd = handle;
+        memoryAllocateInfoChain.Add(&importMemoryFdInfo,
+                                    VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR);
+
+        VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
+        memoryDedicatedAllocateInfo.image = image;
+        memoryDedicatedAllocateInfo.buffer = VkBuffer{};
+        memoryAllocateInfoChain.Add(&memoryDedicatedAllocateInfo,
+                                    VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO);
+
+        VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+        DAWN_TRY(
+            CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
+                                                      nullptr, &*allocatedMemory),
+                           "vkAllocateMemory"));
+        return allocatedMemory;
+    }
+
+    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                                const VkImageCreateInfo& baseCreateInfo) {
+        DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+                        "ExternalImageDescriptor is not a dma-buf descriptor.");
+
+        const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+            static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+        VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+        VkDevice device = mDevice->GetVkDevice();
+
+        uint32_t planeCount;
+        DAWN_TRY_ASSIGN(planeCount,
+                        GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
+                                              dmaBufDescriptor->drmModifier));
+
+        VkImageCreateInfo createInfo = baseCreateInfo;
+        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+        createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+
+        PNextChainBuilder createInfoChain(&createInfo);
+
+        VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo = {};
+        externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+        createInfoChain.Add(&externalMemoryImageCreateInfo,
+                            VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+        // For single plane formats.
+        VkSubresourceLayout planeLayout = {};
+        planeLayout.offset = 0;
+        planeLayout.size = 0;  // VK_EXT_image_drm_format_modifier mandates size = 0.
+        planeLayout.rowPitch = dmaBufDescriptor->stride;
+        planeLayout.arrayPitch = 0;  // Not an array texture
+        planeLayout.depthPitch = 0;  // Not a depth texture
+
+        VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo = {};
+        explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+        explicitCreateInfo.drmFormatModifierPlaneCount = 1;
+        explicitCreateInfo.pPlaneLayouts = &planeLayout;
+
+        // For multi-planar formats, we can't explicitly specify VkSubresourceLayout for each plane
+        // due to the lack of knowledge about the required 'offset'. Alternatively
+        // VkImageDrmFormatModifierListCreateInfoEXT can be used to create image with the DRM format
+        // modifier.
+        VkImageDrmFormatModifierListCreateInfoEXT listCreateInfo = {};
+        listCreateInfo.drmFormatModifierCount = 1;
+        listCreateInfo.pDrmFormatModifiers = &dmaBufDescriptor->drmModifier;
+
+        if (planeCount > 1) {
+            // For multi-planar formats, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT specifies that a
+            // VkImageView can be plane's format which might differ from the image's format.
+            createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+            createInfoChain.Add(&listCreateInfo,
+                                VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
+        } else {
+            createInfoChain.Add(
+                &explicitCreateInfo,
+                VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
+        }
+
+        // Create a new VkImage with tiling equal to the DRM format modifier.
+        VkImage image;
+        DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
+                                "CreateImage"));
+        return image;
+    }
+
+}}  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
new file mode 100644
index 0000000..7b3c239
--- /dev/null
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
@@ -0,0 +1,65 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+    Service::Service(Device* device) : mDevice(device) {
+        DAWN_UNUSED(mDevice);
+        DAWN_UNUSED(mSupported);
+    }
+
+    Service::~Service() = default;
+
+    // static
+    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+        return false;
+    }
+
+    bool Service::SupportsImportMemory(VkFormat format,
+                                       VkImageType type,
+                                       VkImageTiling tiling,
+                                       VkImageUsageFlags usage,
+                                       VkImageCreateFlags flags) {
+        return false;
+    }
+
+    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                      VkFormat format,
+                                      VkImageUsageFlags usage,
+                                      bool* supportsDisjoint) {
+        *supportsDisjoint = false;
+        return false;
+    }
+
+    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+        const ExternalImageDescriptor* descriptor,
+        VkImage image) {
+        return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+    }
+
+    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                        const MemoryImportParams& importParams,
+                                                        VkImage image) {
+        return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+    }
+
+    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                                const VkImageCreateInfo& baseCreateInfo) {
+        return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+    }
+
+}}  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
new file mode 100644
index 0000000..ad54617
--- /dev/null
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -0,0 +1,160 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+    Service::Service(Device* device)
+        : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+    }
+
+    Service::~Service() = default;
+
+    // static
+    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+        return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD);
+    }
+
+    bool Service::SupportsImportMemory(VkFormat format,
+                                       VkImageType type,
+                                       VkImageTiling tiling,
+                                       VkImageUsageFlags usage,
+                                       VkImageCreateFlags flags) {
+        // Early out before we try using extension functions
+        if (!mSupported) {
+            return false;
+        }
+
+        VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+        externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+        externalFormatInfo.pNext = nullptr;
+        externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+        VkPhysicalDeviceImageFormatInfo2 formatInfo;
+        formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+        formatInfo.pNext = &externalFormatInfo;
+        formatInfo.format = format;
+        formatInfo.type = type;
+        formatInfo.tiling = tiling;
+        formatInfo.usage = usage;
+        formatInfo.flags = flags;
+
+        VkExternalImageFormatProperties externalFormatProperties;
+        externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+        externalFormatProperties.pNext = nullptr;
+
+        VkImageFormatProperties2 formatProperties;
+        formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+        formatProperties.pNext = &externalFormatProperties;
+
+        VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+            ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
+
+        // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+        if (result != VK_SUCCESS) {
+            return false;
+        }
+
+        // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+        VkFlags memoryFlags =
+            externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+        return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+    }
+
+    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                      VkFormat format,
+                                      VkImageUsageFlags usage,
+                                      bool* supportsDisjoint) {
+        *supportsDisjoint = false;
+        return mSupported;
+    }
+
+    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+        const ExternalImageDescriptor* descriptor,
+        VkImage image) {
+        DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::OpaqueFD,
+                        "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+
+        const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+            static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+        MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+                                     opaqueFDDescriptor->memoryTypeIndex};
+        return params;
+    }
+
+    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                        const MemoryImportParams& importParams,
+                                                        VkImage image) {
+        DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+
+        VkMemoryRequirements requirements;
+        mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+        DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
+                        "Requested allocation size (%u) is smaller than the image requires (%u).",
+                        importParams.allocationSize, requirements.size);
+
+        VkImportMemoryFdInfoKHR importMemoryFdInfo;
+        importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
+        importMemoryFdInfo.pNext = nullptr;
+        importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+        importMemoryFdInfo.fd = handle;
+
+        VkMemoryAllocateInfo allocateInfo;
+        allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+        allocateInfo.pNext = &importMemoryFdInfo;
+        allocateInfo.allocationSize = importParams.allocationSize;
+        allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+
+        VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+        DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+                                                           nullptr, &*allocatedMemory),
+                                "vkAllocateMemory"));
+        return allocatedMemory;
+    }
+
+    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                                const VkImageCreateInfo& baseCreateInfo) {
+        VkImageCreateInfo createInfo = baseCreateInfo;
+        createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+        VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+        externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+        externalMemoryImageCreateInfo.pNext = nullptr;
+        externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+
+        PNextChainBuilder createInfoChain(&createInfo);
+        createInfoChain.Add(&externalMemoryImageCreateInfo,
+                            VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+        ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+        VkImage image;
+        DAWN_TRY(CheckVkSuccess(
+            mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+            "CreateImage"));
+        return image;
+    }
+
+}}  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
new file mode 100644
index 0000000..96c04a7
--- /dev/null
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -0,0 +1,162 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+    Service::Service(Device* device)
+        : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+    }
+
+    Service::~Service() = default;
+
+    // static
+    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+        return deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle);
+    }
+
+    bool Service::SupportsImportMemory(VkFormat format,
+                                       VkImageType type,
+                                       VkImageTiling tiling,
+                                       VkImageUsageFlags usage,
+                                       VkImageCreateFlags flags) {
+        // Early out before we try using extension functions
+        if (!mSupported) {
+            return false;
+        }
+
+        VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+        externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+        externalFormatInfo.pNext = nullptr;
+        externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+
+        VkPhysicalDeviceImageFormatInfo2 formatInfo;
+        formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+        formatInfo.pNext = &externalFormatInfo;
+        formatInfo.format = format;
+        formatInfo.type = type;
+        formatInfo.tiling = tiling;
+        formatInfo.usage = usage;
+        formatInfo.flags = flags;
+
+        VkExternalImageFormatProperties externalFormatProperties;
+        externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+        externalFormatProperties.pNext = nullptr;
+
+        VkImageFormatProperties2 formatProperties;
+        formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+        formatProperties.pNext = &externalFormatProperties;
+
+        VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+            ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
+
+        // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+        if (result != VK_SUCCESS) {
+            return false;
+        }
+
+        // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+        VkFlags memoryFlags =
+            externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+        return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+    }
+
+    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                      VkFormat format,
+                                      VkImageUsageFlags usage,
+                                      bool* supportsDisjoint) {
+        *supportsDisjoint = false;
+        return mSupported;
+    }
+
+    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+        const ExternalImageDescriptor* descriptor,
+        VkImage image) {
+        DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
+                        "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+
+        const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+            static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+        MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+                                     opaqueFDDescriptor->memoryTypeIndex};
+        return params;
+    }
+
+    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                        const MemoryImportParams& importParams,
+                                                        VkImage image) {
+        DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing memory with an invalid handle.");
+
+        VkMemoryRequirements requirements;
+        mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+        DAWN_INVALID_IF(
+            requirements.size > importParams.allocationSize,
+            "Requested allocation size (%u) is smaller than the required image size (%u).",
+            importParams.allocationSize, requirements.size);
+
+        VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
+        importMemoryHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA;
+        importMemoryHandleInfo.pNext = nullptr;
+        importMemoryHandleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+        importMemoryHandleInfo.handle = handle;
+
+        VkMemoryAllocateInfo allocateInfo;
+        allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+        allocateInfo.pNext = &importMemoryHandleInfo;
+        allocateInfo.allocationSize = importParams.allocationSize;
+        allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+
+        VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+        DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+                                                           nullptr, &*allocatedMemory),
+                                "vkAllocateMemory"));
+        return allocatedMemory;
+    }
+
+    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                                const VkImageCreateInfo& baseCreateInfo) {
+        VkImageCreateInfo createInfo = baseCreateInfo;
+        createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+        VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+        externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+        externalMemoryImageCreateInfo.pNext = nullptr;
+        externalMemoryImageCreateInfo.handleTypes =
+            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+
+        PNextChainBuilder createInfoChain(&createInfo);
+        createInfoChain.Add(&externalMemoryImageCreateInfo,
+                            VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+        ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+        VkImage image;
+        DAWN_TRY(CheckVkSuccess(
+            mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+            "CreateImage"));
+        return image;
+    }
+
+}}  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h b/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
new file mode 100644
index 0000000..c1f69f1
--- /dev/null
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
@@ -0,0 +1,60 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
+#define DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/vulkan/ExternalHandle.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+    class Device;
+}  // namespace dawn::native::vulkan
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+    class Service {
+      public:
+        explicit Service(Device* device);
+        ~Service();
+
+        static bool CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                                 VkPhysicalDevice physicalDevice,
+                                 const VulkanFunctions& fn);
+
+        // True if the device reports it supports this feature
+        bool Supported();
+
+        // Given an external handle, import it into a VkSemaphore
+        ResultOrError<VkSemaphore> ImportSemaphore(ExternalSemaphoreHandle handle);
+
+        // Create a VkSemaphore that is exportable into an external handle later
+        ResultOrError<VkSemaphore> CreateExportableSemaphore();
+
+        // Export a VkSemaphore into an external handle
+        ResultOrError<ExternalSemaphoreHandle> ExportSemaphore(VkSemaphore semaphore);
+
+      private:
+        Device* mDevice = nullptr;
+
+        // True if early checks pass that determine if the service is supported
+        bool mSupported = false;
+    };
+
+}}  // namespace dawn::native::vulkan::external_semaphore
+
+#endif  // DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
new file mode 100644
index 0000000..7e2b619
--- /dev/null
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
@@ -0,0 +1,137 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+static constexpr VkExternalSemaphoreHandleTypeFlagBits kHandleType =
+#if defined(DAWN_USE_SYNC_FDS)
+    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+#else
+    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+#endif  // defined(DAWN_USE_SYNC_FDS)
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+    Service::Service(Device* device)
+        : mDevice(device),
+          mSupported(CheckSupport(device->GetDeviceInfo(),
+                                  ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+                                  device->fn)) {
+    }
+
+    Service::~Service() = default;
+
+    // static
+    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                               VkPhysicalDevice physicalDevice,
+                               const VulkanFunctions& fn) {
+        if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+            return false;
+        }
+
+        VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+        semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+        semaphoreInfo.pNext = nullptr;
+        semaphoreInfo.handleType = kHandleType;
+
+        VkExternalSemaphorePropertiesKHR semaphoreProperties;
+        semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+        semaphoreProperties.pNext = nullptr;
+
+        fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+                                                        &semaphoreProperties);
+
+        VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+                                VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+
+        return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+    }
+
+    bool Service::Supported() {
+        return mSupported;
+    }
+
+    ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+        DAWN_INVALID_IF(handle < 0, "Importing a semaphore with an invalid handle.");
+
+        VkSemaphore semaphore = VK_NULL_HANDLE;
+        VkSemaphoreCreateInfo info;
+        info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+        info.pNext = nullptr;
+        info.flags = 0;
+
+        DAWN_TRY(CheckVkSuccess(
+            mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+            "vkCreateSemaphore"));
+
+        VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
+        importSemaphoreFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
+        importSemaphoreFdInfo.pNext = nullptr;
+        importSemaphoreFdInfo.semaphore = semaphore;
+        importSemaphoreFdInfo.flags = 0;
+        importSemaphoreFdInfo.handleType = kHandleType;
+        importSemaphoreFdInfo.fd = handle;
+
+        MaybeError status = CheckVkSuccess(
+            mDevice->fn.ImportSemaphoreFdKHR(mDevice->GetVkDevice(), &importSemaphoreFdInfo),
+            "vkImportSemaphoreFdKHR");
+
+        if (status.IsError()) {
+            mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+            DAWN_TRY(std::move(status));
+        }
+
+        return semaphore;
+    }
+
+    ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+        VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+        exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+        exportSemaphoreInfo.pNext = nullptr;
+        exportSemaphoreInfo.handleTypes = kHandleType;
+
+        VkSemaphoreCreateInfo semaphoreCreateInfo;
+        semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+        semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+        semaphoreCreateInfo.flags = 0;
+
+        VkSemaphore signalSemaphore;
+        DAWN_TRY(
+            CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+                                                       nullptr, &*signalSemaphore),
+                           "vkCreateSemaphore"));
+        return signalSemaphore;
+    }
+
+    ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+        VkSemaphoreGetFdInfoKHR semaphoreGetFdInfo;
+        semaphoreGetFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
+        semaphoreGetFdInfo.pNext = nullptr;
+        semaphoreGetFdInfo.semaphore = semaphore;
+        semaphoreGetFdInfo.handleType = kHandleType;
+
+        int fd = -1;
+        DAWN_TRY(CheckVkSuccess(
+            mDevice->fn.GetSemaphoreFdKHR(mDevice->GetVkDevice(), &semaphoreGetFdInfo, &fd),
+            "vkGetSemaphoreFdKHR"));
+
+        ASSERT(fd >= 0);
+        return fd;
+    }
+
+}}  // namespace dawn::native::vulkan::external_semaphore
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
new file mode 100644
index 0000000..3146e37
--- /dev/null
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
@@ -0,0 +1,50 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+    Service::Service(Device* device) : mDevice(device) {
+        DAWN_UNUSED(mDevice);
+        DAWN_UNUSED(mSupported);
+    }
+
+    Service::~Service() = default;
+
+    // static
+    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                               VkPhysicalDevice physicalDevice,
+                               const VulkanFunctions& fn) {
+        return false;
+    }
+
+    bool Service::Supported() {
+        return false;
+    }
+
+    ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+        return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+    }
+
+    ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+        return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+    }
+
+    ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+        return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+    }
+
+}}  // namespace dawn::native::vulkan::external_semaphore
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
new file mode 100644
index 0000000..03fa79c
--- /dev/null
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
@@ -0,0 +1,135 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+    Service::Service(Device* device)
+        : mDevice(device),
+          mSupported(CheckSupport(device->GetDeviceInfo(),
+                                  ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+                                  device->fn)) {
+    }
+
+    Service::~Service() = default;
+
+    // static
+    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                               VkPhysicalDevice physicalDevice,
+                               const VulkanFunctions& fn) {
+        if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+            return false;
+        }
+
+        VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+        semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+        semaphoreInfo.pNext = nullptr;
+        semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+        VkExternalSemaphorePropertiesKHR semaphoreProperties;
+        semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+        semaphoreProperties.pNext = nullptr;
+
+        fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+                                                        &semaphoreProperties);
+
+        VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+                                VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+
+        return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+    }
+
+    bool Service::Supported() {
+        return mSupported;
+    }
+
+    ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+        DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID,
+                        "Importing a semaphore with an invalid handle.");
+
+        VkSemaphore semaphore = VK_NULL_HANDLE;
+        VkSemaphoreCreateInfo info;
+        info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+        info.pNext = nullptr;
+        info.flags = 0;
+
+        DAWN_TRY(CheckVkSuccess(
+            mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+            "vkCreateSemaphore"));
+
+        VkImportSemaphoreZirconHandleInfoFUCHSIA importSemaphoreHandleInfo;
+        importSemaphoreHandleInfo.sType =
+            VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
+        importSemaphoreHandleInfo.pNext = nullptr;
+        importSemaphoreHandleInfo.semaphore = semaphore;
+        importSemaphoreHandleInfo.flags = 0;
+        importSemaphoreHandleInfo.handleType =
+            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+        importSemaphoreHandleInfo.handle = handle;
+
+        MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
+                                               mDevice->GetVkDevice(), &importSemaphoreHandleInfo),
+                                           "vkImportSemaphoreZirconHandleFUCHSIA");
+
+        if (status.IsError()) {
+            mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+            DAWN_TRY(std::move(status));
+        }
+
+        return semaphore;
+    }
+
+    ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+        VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+        exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+        exportSemaphoreInfo.pNext = nullptr;
+        exportSemaphoreInfo.handleTypes =
+            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+        VkSemaphoreCreateInfo semaphoreCreateInfo;
+        semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+        semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+        semaphoreCreateInfo.flags = 0;
+
+        VkSemaphore signalSemaphore;
+        DAWN_TRY(
+            CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+                                                       nullptr, &*signalSemaphore),
+                           "vkCreateSemaphore"));
+        return signalSemaphore;
+    }
+
+    ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+        VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
+        semaphoreGetHandleInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
+        semaphoreGetHandleInfo.pNext = nullptr;
+        semaphoreGetHandleInfo.semaphore = semaphore;
+        semaphoreGetHandleInfo.handleType =
+            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+        zx_handle_t handle = ZX_HANDLE_INVALID;
+        DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
+                                    mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
+                                "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
+
+        ASSERT(handle != ZX_HANDLE_INVALID);
+        return handle;
+    }
+
+}}  // namespace dawn::native::vulkan::external_semaphore
diff --git a/src/dawn/native/webgpu_absl_format.cpp b/src/dawn/native/webgpu_absl_format.cpp
new file mode 100644
index 0000000..e42ec89
--- /dev/null
+++ b/src/dawn/native/webgpu_absl_format.cpp
@@ -0,0 +1,441 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/webgpu_absl_format.h"
+
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/ShaderModule.h"
+#include "dawn/native/Subresource.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/VertexFormat.h"
+
+namespace dawn::native {
+
+    //
+    // Structs
+    //
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const Color* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == nullptr) {
+            s->Append("[null]");
+            return {true};
+        }
+        s->Append(absl::StrFormat("[Color r:%f, g:%f, b:%f, a:%f]", value->r, value->g, value->b,
+                                  value->a));
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const Extent3D* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == nullptr) {
+            s->Append("[null]");
+            return {true};
+        }
+        s->Append(absl::StrFormat("[Extent3D width:%u, height:%u, depthOrArrayLayers:%u]",
+                                  value->width, value->height, value->depthOrArrayLayers));
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const Origin3D* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == nullptr) {
+            s->Append("[null]");
+            return {true};
+        }
+        s->Append(absl::StrFormat("[Origin3D x:%u, y:%u, z:%u]", value->x, value->y, value->z));
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const BindingInfo& value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        static const auto* const fmt =
+            new absl::ParsedFormat<'u', 's', 's', 's'>("{ binding: %u, visibility: %s, %s: %s }");
+        switch (value.bindingType) {
+            case BindingInfoType::Buffer:
+                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
+                                          value.visibility, value.bindingType, value.buffer));
+                break;
+            case BindingInfoType::Sampler:
+                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
+                                          value.visibility, value.bindingType, value.sampler));
+                break;
+            case BindingInfoType::Texture:
+                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
+                                          value.visibility, value.bindingType, value.texture));
+                break;
+            case BindingInfoType::StorageTexture:
+                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
+                                          value.visibility, value.bindingType,
+                                          value.storageTexture));
+                break;
+            case BindingInfoType::ExternalTexture:
+                break;
+        }
+        return {true};
+    }
+
+    //
+    // Objects
+    //
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const DeviceBase* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == nullptr) {
+            s->Append("[null]");
+            return {true};
+        }
+        s->Append("[Device");
+        const std::string& label = value->GetLabel();
+        if (!label.empty()) {
+            s->Append(absl::StrFormat(" \"%s\"", label));
+        }
+        s->Append("]");
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const ApiObjectBase* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == nullptr) {
+            s->Append("[null]");
+            return {true};
+        }
+        s->Append("[");
+        if (value->IsError()) {
+            s->Append("Invalid ");
+        }
+        s->Append(ObjectTypeAsString(value->GetType()));
+        const std::string& label = value->GetLabel();
+        if (!label.empty()) {
+            s->Append(absl::StrFormat(" \"%s\"", label));
+        }
+        s->Append("]");
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const TextureViewBase* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == nullptr) {
+            s->Append("[null]");
+            return {true};
+        }
+        s->Append("[");
+        if (value->IsError()) {
+            s->Append("Invalid ");
+        }
+        s->Append(ObjectTypeAsString(value->GetType()));
+        const std::string& label = value->GetLabel();
+        if (!label.empty()) {
+            s->Append(absl::StrFormat(" \"%s\"", label));
+        }
+        const std::string& textureLabel = value->GetTexture()->GetLabel();
+        if (!textureLabel.empty()) {
+            s->Append(absl::StrFormat(" of Texture \"%s\"", textureLabel));
+        }
+        s->Append("]");
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const AttachmentState* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == nullptr) {
+            s->Append("[null]");
+            return {true};
+        }
+
+        s->Append("{ colorFormats: [");
+
+        ColorAttachmentIndex nextColorIndex(uint8_t(0));
+
+        bool needsComma = false;
+        for (ColorAttachmentIndex i : IterateBitSet(value->GetColorAttachmentsMask())) {
+            while (nextColorIndex < i) {
+                s->Append(absl::StrFormat("%s, ", wgpu::TextureFormat::Undefined));
+                nextColorIndex++;
+                needsComma = false;
+            }
+
+            if (needsComma) {
+                s->Append(", ");
+            }
+
+            s->Append(absl::StrFormat("%s", value->GetColorAttachmentFormat(i)));
+
+            nextColorIndex++;
+            needsComma = true;
+        }
+
+        s->Append("], ");
+
+        if (value->HasDepthStencilAttachment()) {
+            s->Append(absl::StrFormat("depthStencilFormat: %s, ", value->GetDepthStencilFormat()));
+        }
+
+        s->Append(absl::StrFormat("sampleCount: %u }", value->GetSampleCount()));
+
+        return {true};
+    }
+
+    //
+    // Enums
+    //
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+    AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s) {
+        if (value == Aspect::None) {
+            s->Append("None");
+            return {true};
+        }
+
+        bool first = true;
+
+        if (value & Aspect::Color) {
+            first = false;
+            s->Append("Color");
+            value &= ~Aspect::Color;
+        }
+
+        if (value & Aspect::Depth) {
+            if (!first) {
+                s->Append("|");
+            }
+            first = false;
+            s->Append("Depth");
+            value &= ~Aspect::Depth;
+        }
+
+        if (value & Aspect::Stencil) {
+            if (!first) {
+                s->Append("|");
+            }
+            first = false;
+            s->Append("Stencil");
+            value &= ~Aspect::Stencil;
+        }
+
+        // Output any remaining flags as a hex value
+        if (static_cast<bool>(value)) {
+            if (!first) {
+                s->Append("|");
+            }
+            s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
+        }
+
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        SampleTypeBit value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        if (value == SampleTypeBit::None) {
+            s->Append("None");
+            return {true};
+        }
+
+        bool first = true;
+
+        if (value & SampleTypeBit::Float) {
+            first = false;
+            s->Append("Float");
+            value &= ~SampleTypeBit::Float;
+        }
+
+        if (value & SampleTypeBit::UnfilterableFloat) {
+            if (!first) {
+                s->Append("|");
+            }
+            first = false;
+            s->Append("UnfilterableFloat");
+            value &= ~SampleTypeBit::UnfilterableFloat;
+        }
+
+        if (value & SampleTypeBit::Depth) {
+            if (!first) {
+                s->Append("|");
+            }
+            first = false;
+            s->Append("Depth");
+            value &= ~SampleTypeBit::Depth;
+        }
+
+        if (value & SampleTypeBit::Sint) {
+            if (!first) {
+                s->Append("|");
+            }
+            first = false;
+            s->Append("Sint");
+            value &= ~SampleTypeBit::Sint;
+        }
+
+        if (value & SampleTypeBit::Uint) {
+            if (!first) {
+                s->Append("|");
+            }
+            first = false;
+            s->Append("Uint");
+            value &= ~SampleTypeBit::Uint;
+        }
+
+        // Output any remaining flags as a hex value
+        if (static_cast<bool>(value)) {
+            if (!first) {
+                s->Append("|");
+            }
+            s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
+        }
+
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        BindingInfoType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        switch (value) {
+            case BindingInfoType::Buffer:
+                s->Append("buffer");
+                break;
+            case BindingInfoType::Sampler:
+                s->Append("sampler");
+                break;
+            case BindingInfoType::Texture:
+                s->Append("texture");
+                break;
+            case BindingInfoType::StorageTexture:
+                s->Append("storageTexture");
+                break;
+            case BindingInfoType::ExternalTexture:
+                s->Append("externalTexture");
+                break;
+        }
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        SingleShaderStage value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        switch (value) {
+            case SingleShaderStage::Compute:
+                s->Append("Compute");
+                break;
+            case SingleShaderStage::Vertex:
+                s->Append("Vertex");
+                break;
+            case SingleShaderStage::Fragment:
+                s->Append("Fragment");
+                break;
+        }
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        VertexFormatBaseType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        switch (value) {
+            case VertexFormatBaseType::Float:
+                s->Append("Float");
+                break;
+            case VertexFormatBaseType::Uint:
+                s->Append("Uint");
+                break;
+            case VertexFormatBaseType::Sint:
+                s->Append("Sint");
+                break;
+        }
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        InterStageComponentType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        switch (value) {
+            case InterStageComponentType::Float:
+                s->Append("Float");
+                break;
+            case InterStageComponentType::Uint:
+                s->Append("Uint");
+                break;
+            case InterStageComponentType::Sint:
+                s->Append("Sint");
+                break;
+        }
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        InterpolationType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        switch (value) {
+            case InterpolationType::Perspective:
+                s->Append("Perspective");
+                break;
+            case InterpolationType::Linear:
+                s->Append("Linear");
+                break;
+            case InterpolationType::Flat:
+                s->Append("Flat");
+                break;
+        }
+        return {true};
+    }
+
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        InterpolationSampling value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s) {
+        switch (value) {
+            case InterpolationSampling::None:
+                s->Append("None");
+                break;
+            case InterpolationSampling::Center:
+                s->Append("Center");
+                break;
+            case InterpolationSampling::Centroid:
+                s->Append("Centroid");
+                break;
+            case InterpolationSampling::Sample:
+                s->Append("Sample");
+                break;
+        }
+        return {true};
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/native/webgpu_absl_format.h b/src/dawn/native/webgpu_absl_format.h
new file mode 100644
index 0000000..ca7ddb5
--- /dev/null
+++ b/src/dawn/native/webgpu_absl_format.h
@@ -0,0 +1,134 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_WEBGPUABSLFORMAT_H_
+#define DAWNNATIVE_WEBGPUABSLFORMAT_H_
+
+#include "absl/strings/str_format.h"
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/webgpu_absl_format_autogen.h"
+
+namespace dawn::native {
+
+    //
+    // Structs
+    //
+
+    struct Color;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const Color* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    struct Extent3D;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const Extent3D* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    struct Origin3D;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const Origin3D* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    struct BindingInfo;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const BindingInfo& value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    //
+    // Objects
+    //
+
+    class DeviceBase;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const DeviceBase* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    class ApiObjectBase;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const ApiObjectBase* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    // Special case for TextureViews, since frequently the texture will be the
+    // thing that's labeled.
+    class TextureViewBase;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const TextureViewBase* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    class AttachmentState;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        const AttachmentState* value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    //
+    // Enums
+    //
+
+    enum class Aspect : uint8_t;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+    AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
+
+    enum class BindingInfoType;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        BindingInfoType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    enum class SampleTypeBit : uint8_t;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        SampleTypeBit value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    enum class SingleShaderStage;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        SingleShaderStage value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    enum class VertexFormatBaseType;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        VertexFormatBaseType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    enum class InterStageComponentType;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        InterStageComponentType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    enum class InterpolationType;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        InterpolationType value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+    enum class InterpolationSampling;
+    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+        InterpolationSampling value,
+        const absl::FormatConversionSpec& spec,
+        absl::FormatSink* s);
+
+}  // namespace dawn::native
+
+#endif  // DAWNNATIVE_WEBGPUABSLFORMAT_H_
diff --git a/src/dawn/node/CMakeLists.txt b/src/dawn/node/CMakeLists.txt
new file mode 100644
index 0000000..0cdeaa2
--- /dev/null
+++ b/src/dawn/node/CMakeLists.txt
@@ -0,0 +1,124 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set(GEN_DIR         "${CMAKE_CURRENT_BINARY_DIR}/gen")
+set(IDLGEN_TOOL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tools/src/cmd/idlgen")
+
+# idlgen() is a function that uses the tools/cmd/idlgen/main.go tool to generate
+# code from an IDL file and template.
+# idlgen() accepts the following named arguments:
+#   TEMPLATE <path> - (required) the path to the root .tmpl file. If the
+#                     template imports other templates, then these should be
+#                     added to the DEPENDS argument list.
+#   OUTPUT <path>   - (required) the output file path.
+#   IDLS <paths>    - (at least one required) the list of input WebIDL files.
+#   DEPENDS <paths> - an optional list of additional file dependencies used.
+function(idlgen)
+    cmake_parse_arguments(IDLGEN
+        ""                # options
+        "TEMPLATE;OUTPUT" # one_value_keywords
+        "IDLS;DEPENDS"    # multi_value_keywords
+        ${ARGN})
+
+    if(NOT IDLGEN_TEMPLATE)
+        message(FATAL_ERROR "idlgen() missing TEMPLATE argument")
+    endif()
+    if(NOT IDLGEN_OUTPUT)
+        message(FATAL_ERROR "idlgen() missing OUTPUT argument")
+    endif()
+    if(NOT IDLGEN_IDLS)
+        message(FATAL_ERROR "idlgen() missing IDLS argument(s)")
+    endif()
+    add_custom_command(
+        COMMAND ${GO_EXECUTABLE} "run" "main.go"
+                "--template" "${IDLGEN_TEMPLATE}"
+                "--output"   "${IDLGEN_OUTPUT}"
+                ${IDLGEN_IDLS}
+        DEPENDS "${IDLGEN_TOOL_DIR}/main.go"
+                ${IDLGEN_TEMPLATE}
+                ${IDLGEN_DEPENDS}
+                ${IDLGEN_IDLS}
+        OUTPUT  ${IDLGEN_OUTPUT}
+        WORKING_DIRECTORY ${IDLGEN_TOOL_DIR}
+        COMMENT "Generating ${IDLGEN_OUTPUT}"
+    )
+endfunction()
+
+add_subdirectory(binding)
+add_subdirectory(interop)
+
+add_library(dawn_node SHARED
+    "Module.cpp"
+)
+set_target_properties(dawn_node PROPERTIES
+    PREFIX ""
+    OUTPUT_NAME "dawn"
+    SUFFIX ".node"
+    RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}"
+    LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}"
+    CXX_STANDARD 17
+)
+target_link_libraries(dawn_node dawn_node_binding dawn_node_interop dawn_native dawncpp dawn_proc)
+target_include_directories(dawn_node PRIVATE
+    "${CMAKE_SOURCE_DIR}"
+    "${NODE_API_HEADERS_DIR}/include"
+    "${NODE_ADDON_API_DIR}"
+    "${GEN_DIR}"
+)
+
+# To reduce the build dependencies for compiling the dawn.node targets, we do
+# not use cmake-js for building, but instead just depend on node_api_headers.
+# As the name suggests, node_api_headers contains just the *headers* of Napi,
+# and does not provide a library to link against.
+# Fortunately node_api_headers provides a list of Napi symbols exported by Node,
+# which we can use to either produce weak-symbol stubs (unix) or generate a .lib
+# (Windows).
+
+# Parse the Napi symbols from ${NODE_API_HEADERS_DIR}/symbols.js
+file(READ "${NODE_API_HEADERS_DIR}/symbols.js" NAPI_SYMBOLS_JS_CONTENT)
+string(REGEX MATCHALL "napi_[a-z0-9_]*" NAPI_SYMBOLS "${NAPI_SYMBOLS_JS_CONTENT}")
+
+if (WIN32)
+    # Generate the NapiSymbols.def file from the Napi symbol list
+    set(NAPI_SYMBOLS_DEF "${GEN_DIR}/NapiSymbols.def")
+    list(TRANSFORM NAPI_SYMBOLS PREPEND "  ")
+    list(TRANSFORM NAPI_SYMBOLS APPEND "\n")
+    string(REPLACE ";" "" NAPI_SYMBOLS "${NAPI_SYMBOLS}")
+    string(PREPEND NAPI_SYMBOLS "LIBRARY node.exe\nEXPORTS\n")
+    file(GENERATE OUTPUT "${NAPI_SYMBOLS_DEF}" CONTENT "${NAPI_SYMBOLS}")
+    # Generate the NapiSymbols.lib from the NapiSymbols.def file
+    set(NAPI_SYMBOLS_LIB "${GEN_DIR}/NapiSymbols.lib")
+    # Resolve path to lib.exe
+    get_filename_component(VS_BIN_DIR "${CMAKE_LINKER}" DIRECTORY)
+    set(LIB_EXE "${VS_BIN_DIR}/lib.exe")
+    add_custom_command(
+        COMMAND "${LIB_EXE}"
+                "/DEF:${NAPI_SYMBOLS_DEF}"
+                "/OUT:${NAPI_SYMBOLS_LIB}"
+        DEPENDS "${NAPI_SYMBOLS_DEF}"
+        OUTPUT  "${NAPI_SYMBOLS_LIB}"
+        COMMENT "Generating ${NAPI_SYMBOLS_LIB}"
+    )
+    add_custom_target(napi-symbols DEPENDS "${NAPI_SYMBOLS_LIB}")
+    add_dependencies(dawn_node napi-symbols)
+    target_link_libraries(dawn_node "${NAPI_SYMBOLS_LIB}")
+else()
+    # Generate the NapiSymbols.h file from the Napi symbol list
+    set(NAPI_SYMBOLS_H "${GEN_DIR}/NapiSymbols.h")
+    list(TRANSFORM NAPI_SYMBOLS PREPEND "NAPI_SYMBOL(")
+    list(TRANSFORM NAPI_SYMBOLS APPEND ")\n")
+    string(REPLACE ";" "" NAPI_SYMBOLS "${NAPI_SYMBOLS}")
+    file(GENERATE OUTPUT "${NAPI_SYMBOLS_H}" CONTENT "${NAPI_SYMBOLS}")
+    target_sources(dawn_node PRIVATE "NapiSymbols.cpp")
+endif()
diff --git a/src/dawn/node/Module.cpp b/src/dawn/node/Module.cpp
new file mode 100644
index 0000000..f87631b
--- /dev/null
+++ b/src/dawn/node/Module.cpp
@@ -0,0 +1,65 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/dawn_proc.h"
+#include "src/dawn/node/binding/Flags.h"
+#include "src/dawn/node/binding/GPU.h"
+
+namespace {
+    Napi::Value CreateGPU(const Napi::CallbackInfo& info) {
+        const auto& env = info.Env();
+
+        std::tuple<std::vector<std::string>> args;
+        auto res = wgpu::interop::FromJS(info, args);
+        if (res != wgpu::interop::Success) {
+            Napi::Error::New(env, res.error).ThrowAsJavaScriptException();
+            return env.Undefined();
+        }
+
+        wgpu::binding::Flags flags;
+
+        // Parse out the key=value flags out of the input args array
+        for (const auto& arg : std::get<0>(args)) {
+            const size_t sep_index = arg.find("=");
+            if (sep_index == std::string::npos) {
+                Napi::Error::New(env, "Flags expected argument format is <key>=<value>")
+                    .ThrowAsJavaScriptException();
+                return env.Undefined();
+            }
+            flags.Set(arg.substr(0, sep_index), arg.substr(sep_index + 1));
+        }
+
+        // Construct a wgpu::interop::GPU interface, implemented by wgpu::bindings::GPU.
+        return wgpu::interop::GPU::Create<wgpu::binding::GPU>(env, std::move(flags));
+    }
+
+}  // namespace
+
+// Initialize() initializes the Dawn node module, registering all the WebGPU
+// types into the global object, and adding the 'create' function on the exported
+// object.
+Napi::Object Initialize(Napi::Env env, Napi::Object exports) {
+    // Begin by setting the Dawn procedure function pointers.
+    dawnProcSetProcs(&dawn::native::GetProcs());
+
+    // Register all the interop types
+    wgpu::interop::Initialize(env);
+
+    // Export function that creates and returns the wgpu::interop::GPU interface
+    exports.Set(Napi::String::New(env, "create"), Napi::Function::New<CreateGPU>(env));
+
+    return exports;
+}
+
+NODE_API_MODULE(addon, Initialize)
diff --git a/src/dawn/node/NapiSymbols.cpp b/src/dawn/node/NapiSymbols.cpp
new file mode 100644
index 0000000..a557eca
--- /dev/null
+++ b/src/dawn/node/NapiSymbols.cpp
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/utils/Debug.h"
+
+// To reduce the build dependencies for compiling the dawn.node targets, we do
+// not use cmake-js for building, but instead just depend on node_api_headers.
+// As the name suggests, node_api_headers contains just the *headers* of Napi,
+// and does not provide a library to link against.
+// Fortunately node_api_headers provides a list of Napi symbols exported by Node,
+// which we can use to produce weak-symbol stubs.
+
+#ifdef _WIN32
+#    error "NapiSymbols.cpp is not used on Windows"
+#endif
+
+#define NAPI_SYMBOL(NAME)                                                              \
+    __attribute__((weak)) void NAME() {                                                \
+        UNREACHABLE(                                                                   \
+            "#NAME is a weak stub, and should have been runtime replaced by the node " \
+            "implementation");                                                         \
+    }
+
+extern "C" {
+// List of Napi symbols generated from the node_api_headers/symbols.js file
+#include "NapiSymbols.h"
+}
diff --git a/src/dawn/node/OWNERS b/src/dawn/node/OWNERS
new file mode 100644
index 0000000..d19725d
--- /dev/null
+++ b/src/dawn/node/OWNERS
@@ -0,0 +1,2 @@
+amaiorano@google.com
+bclayton@google.com
diff --git a/src/dawn/node/README.md b/src/dawn/node/README.md
new file mode 100644
index 0000000..1d31764
--- /dev/null
+++ b/src/dawn/node/README.md
@@ -0,0 +1,150 @@
+# Dawn bindings for NodeJS
+
+Note: This code is currently WIP. There are a number of [known issues](#known-issues).
+
+## Building
+
+## System requirements
+
+- [CMake 3.10](https://cmake.org/download/) or greater
+- [Go 1.13](https://golang.org/dl/) or greater
+
+## Install `depot_tools`
+
+Dawn uses the Chromium build system and dependency management so you need to [install depot_tools] and add it to the PATH.
+
+[install depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
+
+### Fetch dependencies
+
+First, the steps are similar to [`doc/building.md`](../../docs/dawn/building.md), but instead of the `Get the code` step, run:
+
+```sh
+# Clone the repo as "dawn"
+git clone https://dawn.googlesource.com/dawn dawn && cd dawn
+
+# Bootstrap the NodeJS binding gclient configuration
+cp scripts/standalone-with-node.gclient .gclient
+
+# Fetch external dependencies and toolchains with gclient
+gclient sync
+```
+
+Optionally, on Linux install X11-xcb support:
+
+```sh
+sudo apt-get install libx11-xcb-dev
+```
+
+If you don't have those supporting libraries, then you must use the
+`-DDAWN_USE_X11=OFF` flag on Cmake.
+
+### Build
+
+Currently, the node bindings can only be built with CMake:
+
+```sh
+mkdir <build-output-path>
+cd <build-output-path>
+cmake <dawn-root-path> -GNinja -DDAWN_BUILD_NODE_BINDINGS=1 -DDAWN_ENABLE_PIC=1 -DDAWN_USE_X11=OFF
+ninja dawn.node
+```
+
+### Running WebGPU CTS
+
+1. [Build](#build) the `dawn.node` NodeJS module.
+2. Checkout the [WebGPU CTS repo](https://github.com/gpuweb/cts)
+  - Run `npm install` from inside the CTS directory to install its dependencies
+
+```sh
+./src/dawn/node/tools/run-cts --cts=<path-to-webgpu-cts> --dawn-node=<path-to-dawn.node> [WebGPU CTS query]
+```
+
+If this fails with the error message `TypeError: expander is not a function or its return value is not iterable`, try appending `--build=false` to the start of the `run-cts` command line flags.
+
+To test against SwiftShader instead of the default Vulkan device, prefix `./src/dawn/node/tools/run-cts` with `VK_ICD_FILENAMES=<swiftshader-cmake-build>/Linux/vk_swiftshader_icd.json` and append `--flag=dawn-backend=vulkan` to the start of run-cts command line flags. For example:
+
+```sh
+VK_ICD_FILENAMES=<swiftshader-cmake-build>/Linux/vk_swiftshader_icd.json ./src/dawn/node/tools/run-cts --cts=<path-to-webgpu-cts> --dawn-node=<path-to-dawn.node> --flag=dawn-backend=vulkan [WebGPU CTS query]
+```
+
+The `--flag` parameter must be passed in multiple times, once for each flag begin set. Here are some common arguments:
+* `dawn-backend=<null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles>`
+* `dlldir=<path>` - used to add an extra DLL search path on Windows, primarily to load the right d3dcompiler_47.dll
+* `enable-dawn-features=<features>` - enable [Dawn toggles](https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn/native/Toggles.cpp), e.g. `dump_shaders`
+* `disable-dawn-features=<features>` - disable [Dawn toggles](https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn/native/Toggles.cpp)
+
+For example, on Windows, to use the d3dcompiler_47.dll from a Chromium checkout, and to dump shader output, we could run the following using Git Bash:
+
+```sh
+./src/dawn/node/tools/run-cts --verbose --dawn-node=/c/src/dawn/build/Debug/dawn.node --cts=/c/src/gpuweb-cts --flag=dlldir="C:\src\chromium\src\out\Release" --flag=enable-dawn-features=dump_shaders 'webgpu:shader,execution,builtin,abs:integer_builtin_functions,abs_unsigned:storageClass="storage";storageMode="read_write";containerType="vector";isAtomic=false;baseType="u32";type="vec2%3Cu32%3E"'
+```
+
+Note that we pass `--verbose` above so that all test output, including the dumped shader, is written to stdout.
+
+### Testing against a `run-cts` expectations file
+
+You can write out an expectations file with the `--output <path>` command line flag, and then compare this snapshot to a later run with `--expect <path>`.
+
+## Debugging TypeScript with VSCode
+
+Open or create the `.vscode/launch.json` file, and add:
+
+```json
+{
+    "version": "0.2.0",
+    "configurations": [
+        {
+            "name": "Debug with node",
+            "type": "node",
+            "request": "launch",
+            "outFiles": [ "./**/*.js" ],
+            "args": [
+                "-e", "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');",
+                "--", "dummy-arg",
+                "--gpu-provider",
+                "[path-to-dawn.node]", // REPLACE: [path-to-dawn.node]
+                "[test-query]", // REPLACE: [test-query]
+            ],
+            "cwd": "[cts-root]" // REPLACE: [cts-root]
+        }
+    ]
+}
+```
+
+Replacing:
+
+- `[cts-root]` with the path to the CTS root directory. If you are editing the `.vscode/launch.json` from within the CTS workspace, then you may use `${workspaceFolder}`.
+- `[path-to-dawn.node]` this the path to the `dawn.node` module built by the [build step](#Build)
+- `test-query` with the test query string. Example: `webgpu:shader,execution,builtin,abs:*`
+
+## Debugging dawn-node issues in gdb/lldb
+
+It is possible to run the CTS with dawn-node directly similarly to Debugging TypeScript with VSCode:
+
+```sh
+cd <cts-root-dir>
+[path-to-node] \ # for example <dawn-root-dir>/third_party/node/<arch>/node
+    -e "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');" \
+    -- \
+    dummy-arg \
+    --gpu-provider [path to dawn.node] \
+    [test-query]
+```
+
+This command is then possible to run in your debugger of choice.
+
+## Known issues
+
+- Many WebGPU CTS tests are currently known to fail
+- Dawn uses special token values for some parameters / fields. These are currently passed straight through to dawn from the JavaScript. discussions: [1](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn/node/binding/Converter.cpp#167), [2](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn/node/binding/Converter.cpp#928), [3](https://dawn-review.googlesource.com/c/dawn/+/64909/4/src/dawn/node/binding/GPUTexture.cpp#42)
+- Backend validation is currently always set to 'full' to aid in debugging. This can be extremely slow. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn/node/binding/GPU.cpp#25)
+- Attempting to call `new T` in JavaScript, where `T` is an IDL interface type, should result in a TypeError "Illegal constructor". [discussion](https://dawn-review.googlesource.com/c/dawn/+/64902/9/src/dawn/node/interop/WebGPU.cpp.tmpl#293)
+- `GPUDevice` currently maintains a list of "lost promises". This should return the same promise. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64906/6/src/dawn/node/binding/GPUDevice.h#107)
+
+## Remaining work
+
+- Investigate CTS failures that are not expected to fail.
+- Generated includes live in `src/` for `dawn/node`, but outside for Dawn. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64903/9/src/dawn/node/interop/CMakeLists.txt#56)
+- Hook up to presubmit bots (CQ / Kokoro)
+- `binding::GPU` will require significant rework [once Dawn implements the device / adapter creation path properly](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn/node/binding/GPU.cpp).
diff --git a/src/dawn/node/binding/AsyncRunner.cpp b/src/dawn/node/binding/AsyncRunner.cpp
new file mode 100644
index 0000000..a978fa8
--- /dev/null
+++ b/src/dawn/node/binding/AsyncRunner.cpp
@@ -0,0 +1,60 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/AsyncRunner.h"
+
+#include <cassert>
+#include <limits>
+
+namespace wgpu::binding {
+
+    AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {
+    }
+
+    void AsyncRunner::Begin() {
+        assert(count_ != std::numeric_limits<decltype(count_)>::max());
+        if (count_++ == 0) {
+            QueueTick();
+        }
+    }
+
+    void AsyncRunner::End() {
+        assert(count_ > 0);
+        count_--;
+    }
+
+    void AsyncRunner::QueueTick() {
+        // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
+        // called.
+        if (tick_queued_) {
+            return;
+        }
+        tick_queued_ = true;
+        env_.Global()
+            .Get("setImmediate")
+            .As<Napi::Function>()
+            .Call({
+                // TODO(crbug.com/dawn/1127): Create once, reuse.
+                Napi::Function::New(env_,
+                                    [this](const Napi::CallbackInfo&) {
+                                        tick_queued_ = false;
+                                        if (count_ > 0) {
+                                            device_.Tick();
+                                            QueueTick();
+                                        }
+                                    }),
+            });
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/AsyncRunner.h b/src/dawn/node/binding/AsyncRunner.h
new file mode 100644
index 0000000..9ed6e5c
--- /dev/null
+++ b/src/dawn/node/binding/AsyncRunner.h
@@ -0,0 +1,77 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_ASYNC_RUNNER_H_
+#define DAWN_NODE_BINDING_ASYNC_RUNNER_H_
+
+#include <stdint.h>
+#include <memory>
+
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+
+namespace wgpu::binding {
+
+    // AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
+    // tasks in flight.
+    class AsyncRunner {
+      public:
+        AsyncRunner(Napi::Env env, wgpu::Device device);
+
+        // Begin() should be called when a new asynchronous task is started.
+        // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
+        // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
+        // thread is idle. This will be repeatedly called until the number of executing asynchronous
+        // tasks reaches 0 again.
+        void Begin();
+
+        // End() should be called once the asynchronous task has finished.
+        // Every call to Begin() should eventually result in a call to End().
+        void End();
+
+      private:
+        void QueueTick();
+        Napi::Env env_;
+        wgpu::Device const device_;
+        uint64_t count_ = 0;
+        bool tick_queued_ = false;
+    };
+
+    // AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
+    // AsyncRunner::End() on destruction.
+    class AsyncTask {
+      public:
+        inline AsyncTask(AsyncTask&&) = default;
+
+        // Constructor.
+        // Calls AsyncRunner::Begin()
+        inline AsyncTask(std::shared_ptr<AsyncRunner> runner) : runner_(std::move(runner)) {
+            runner_->Begin();
+        };
+
+        // Destructor.
+        // Calls AsyncRunner::End()
+        inline ~AsyncTask() {
+            runner_->End();
+        }
+
+      private:
+        AsyncTask(const AsyncTask&) = delete;
+        AsyncTask& operator=(const AsyncTask&) = delete;
+        std::shared_ptr<AsyncRunner> runner_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_ASYNC_RUNNER_H_
diff --git a/src/dawn/node/binding/CMakeLists.txt b/src/dawn/node/binding/CMakeLists.txt
new file mode 100644
index 0000000..1113a5d
--- /dev/null
+++ b/src/dawn/node/binding/CMakeLists.txt
@@ -0,0 +1,82 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_node_binding STATIC
+    "AsyncRunner.cpp"
+    "AsyncRunner.h"
+    "Converter.cpp"
+    "Converter.h"
+    "Errors.cpp"
+    "Errors.h"
+    "Flags.cpp"
+    "Flags.h"
+    "GPU.cpp"
+    "GPU.h"
+    "GPUAdapter.cpp"
+    "GPUAdapter.h"
+    "GPUBindGroup.cpp"
+    "GPUBindGroup.h"
+    "GPUBindGroupLayout.cpp"
+    "GPUBindGroupLayout.h"
+    "GPUBuffer.cpp"
+    "GPUBuffer.h"
+    "GPUCommandBuffer.cpp"
+    "GPUCommandBuffer.h"
+    "GPUCommandEncoder.cpp"
+    "GPUCommandEncoder.h"
+    "GPUComputePassEncoder.cpp"
+    "GPUComputePassEncoder.h"
+    "GPUComputePipeline.cpp"
+    "GPUComputePipeline.h"
+    "GPUDevice.cpp"
+    "GPUDevice.h"
+    "GPUPipelineLayout.cpp"
+    "GPUPipelineLayout.h"
+    "GPUQuerySet.cpp"
+    "GPUQuerySet.h"
+    "GPUQueue.cpp"
+    "GPUQueue.h"
+    "GPURenderBundle.cpp"
+    "GPURenderBundle.h"
+    "GPURenderBundleEncoder.cpp"
+    "GPURenderBundleEncoder.h"
+    "GPURenderPassEncoder.cpp"
+    "GPURenderPassEncoder.h"
+    "GPURenderPipeline.cpp"
+    "GPURenderPipeline.h"
+    "GPUSampler.cpp"
+    "GPUSampler.h"
+    "GPUShaderModule.cpp"
+    "GPUShaderModule.h"
+    "GPUSupportedLimits.cpp"
+    "GPUSupportedLimits.h"
+    "GPUTexture.cpp"
+    "GPUTexture.h"
+    "GPUTextureView.cpp"
+    "GPUTextureView.h"
+)
+
+target_include_directories(dawn_node_binding
+    PRIVATE
+        "${CMAKE_SOURCE_DIR}"
+        "${NODE_API_HEADERS_DIR}/include"
+        "${NODE_ADDON_API_DIR}"
+        "${GEN_DIR}"
+)
+
+target_link_libraries(dawn_node_binding
+    PRIVATE
+        dawncpp
+        dawn_node_interop
+)
diff --git a/src/dawn/node/binding/Converter.cpp b/src/dawn/node/binding/Converter.cpp
new file mode 100644
index 0000000..9c116f2
--- /dev/null
+++ b/src/dawn/node/binding/Converter.cpp
@@ -0,0 +1,1221 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/Converter.h"
+
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUPipelineLayout.h"
+#include "src/dawn/node/binding/GPUSampler.h"
+#include "src/dawn/node/binding/GPUShaderModule.h"
+#include "src/dawn/node/binding/GPUTexture.h"
+#include "src/dawn/node/binding/GPUTextureView.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    Converter::~Converter() {
+        for (auto& free : free_) {
+            free();
+        }
+    }
+
+    bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
+        out = {};
+        if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
+            out.depthOrArrayLayers = dict->depthOrArrayLayers;
+            out.width = dict->width;
+            out.height = dict->height;
+            return true;
+        }
+        if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
+            switch (vec->size()) {
+                default:
+                case 3:
+                    out.depthOrArrayLayers = (*vec)[2];
+                case 2:  // fallthrough
+                    out.height = (*vec)[1];
+                case 1:  // fallthrough
+                    out.width = (*vec)[0];
+                    return true;
+                case 0:
+                    break;
+            }
+        }
+        Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
+        out = {};
+        out.x = in.x;
+        out.y = in.y;
+        out.z = in.z;
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
+        out = {};
+        if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
+            out.r = dict->r;
+            out.g = dict->g;
+            out.b = dict->b;
+            out.a = dict->a;
+            return true;
+        }
+        if (auto* vec = std::get_if<std::vector<double>>(&in)) {
+            switch (vec->size()) {
+                default:
+                case 4:
+                    out.a = (*vec)[3];
+                case 3:  // fallthrough
+                    out.b = (*vec)[2];
+                case 2:  // fallthrough
+                    out.g = (*vec)[1];
+                case 1:  // fallthrough
+                    out.r = (*vec)[0];
+                    return true;
+                case 0:
+                    break;
+            }
+        }
+        Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::Origin3D& out,
+                            const std::vector<interop::GPUIntegerCoordinate>& in) {
+        out = {};
+        switch (in.size()) {
+            default:
+            case 3:
+                out.z = in[2];
+            case 2:  // fallthrough
+                out.y = in[1];
+            case 1:  // fallthrough
+                out.x = in[0];
+            case 0:
+                break;
+        }
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
+        out = wgpu::TextureAspect::All;
+        switch (in) {
+            case interop::GPUTextureAspect::kAll:
+                out = wgpu::TextureAspect::All;
+                return true;
+            case interop::GPUTextureAspect::kStencilOnly:
+                out = wgpu::TextureAspect::StencilOnly;
+                return true;
+            case interop::GPUTextureAspect::kDepthOnly:
+                out = wgpu::TextureAspect::DepthOnly;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
+        out = {};
+        return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
+               Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
+    }
+
+    bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
+        out = {};
+        out.buffer = *in.buffer.As<GPUBuffer>();
+        return Convert(out.layout.offset, in.offset) &&
+               Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
+               Convert(out.layout.rowsPerImage, in.rowsPerImage);
+    }
+
+    bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
+        out = {};
+        if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
+            std::visit(
+                [&](auto&& v) {
+                    auto arr = v.ArrayBuffer();
+                    out.data = arr.Data();
+                    out.size = arr.ByteLength();
+                    out.bytesPerElement = v.ElementSize();
+                },
+                *view);
+            return true;
+        }
+        if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
+            out.data = arr->Data();
+            out.size = arr->ByteLength();
+            out.bytesPerElement = 1;
+            return true;
+        }
+        Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
+        out = {};
+        return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
+               Convert(out.rowsPerImage, in.rowsPerImage);
+    }
+
+    bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
+        out = wgpu::TextureFormat::Undefined;
+        switch (in) {
+            case interop::GPUTextureFormat::kR8Unorm:
+                out = wgpu::TextureFormat::R8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kR8Snorm:
+                out = wgpu::TextureFormat::R8Snorm;
+                return true;
+            case interop::GPUTextureFormat::kR8Uint:
+                out = wgpu::TextureFormat::R8Uint;
+                return true;
+            case interop::GPUTextureFormat::kR8Sint:
+                out = wgpu::TextureFormat::R8Sint;
+                return true;
+            case interop::GPUTextureFormat::kR16Uint:
+                out = wgpu::TextureFormat::R16Uint;
+                return true;
+            case interop::GPUTextureFormat::kR16Sint:
+                out = wgpu::TextureFormat::R16Sint;
+                return true;
+            case interop::GPUTextureFormat::kR16Float:
+                out = wgpu::TextureFormat::R16Float;
+                return true;
+            case interop::GPUTextureFormat::kRg8Unorm:
+                out = wgpu::TextureFormat::RG8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kRg8Snorm:
+                out = wgpu::TextureFormat::RG8Snorm;
+                return true;
+            case interop::GPUTextureFormat::kRg8Uint:
+                out = wgpu::TextureFormat::RG8Uint;
+                return true;
+            case interop::GPUTextureFormat::kRg8Sint:
+                out = wgpu::TextureFormat::RG8Sint;
+                return true;
+            case interop::GPUTextureFormat::kR32Uint:
+                out = wgpu::TextureFormat::R32Uint;
+                return true;
+            case interop::GPUTextureFormat::kR32Sint:
+                out = wgpu::TextureFormat::R32Sint;
+                return true;
+            case interop::GPUTextureFormat::kR32Float:
+                out = wgpu::TextureFormat::R32Float;
+                return true;
+            case interop::GPUTextureFormat::kRg16Uint:
+                out = wgpu::TextureFormat::RG16Uint;
+                return true;
+            case interop::GPUTextureFormat::kRg16Sint:
+                out = wgpu::TextureFormat::RG16Sint;
+                return true;
+            case interop::GPUTextureFormat::kRg16Float:
+                out = wgpu::TextureFormat::RG16Float;
+                return true;
+            case interop::GPUTextureFormat::kRgba8Unorm:
+                out = wgpu::TextureFormat::RGBA8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kRgba8UnormSrgb:
+                out = wgpu::TextureFormat::RGBA8UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kRgba8Snorm:
+                out = wgpu::TextureFormat::RGBA8Snorm;
+                return true;
+            case interop::GPUTextureFormat::kRgba8Uint:
+                out = wgpu::TextureFormat::RGBA8Uint;
+                return true;
+            case interop::GPUTextureFormat::kRgba8Sint:
+                out = wgpu::TextureFormat::RGBA8Sint;
+                return true;
+            case interop::GPUTextureFormat::kBgra8Unorm:
+                out = wgpu::TextureFormat::BGRA8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kBgra8UnormSrgb:
+                out = wgpu::TextureFormat::BGRA8UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kRgb9E5Ufloat:
+                out = wgpu::TextureFormat::RGB9E5Ufloat;
+                return true;
+            case interop::GPUTextureFormat::kRgb10A2Unorm:
+                out = wgpu::TextureFormat::RGB10A2Unorm;
+                return true;
+            case interop::GPUTextureFormat::kRg11B10Ufloat:
+                out = wgpu::TextureFormat::RG11B10Ufloat;
+                return true;
+            case interop::GPUTextureFormat::kRg32Uint:
+                out = wgpu::TextureFormat::RG32Uint;
+                return true;
+            case interop::GPUTextureFormat::kRg32Sint:
+                out = wgpu::TextureFormat::RG32Sint;
+                return true;
+            case interop::GPUTextureFormat::kRg32Float:
+                out = wgpu::TextureFormat::RG32Float;
+                return true;
+            case interop::GPUTextureFormat::kRgba16Uint:
+                out = wgpu::TextureFormat::RGBA16Uint;
+                return true;
+            case interop::GPUTextureFormat::kRgba16Sint:
+                out = wgpu::TextureFormat::RGBA16Sint;
+                return true;
+            case interop::GPUTextureFormat::kRgba16Float:
+                out = wgpu::TextureFormat::RGBA16Float;
+                return true;
+            case interop::GPUTextureFormat::kRgba32Uint:
+                out = wgpu::TextureFormat::RGBA32Uint;
+                return true;
+            case interop::GPUTextureFormat::kRgba32Sint:
+                out = wgpu::TextureFormat::RGBA32Sint;
+                return true;
+            case interop::GPUTextureFormat::kRgba32Float:
+                out = wgpu::TextureFormat::RGBA32Float;
+                return true;
+            case interop::GPUTextureFormat::kStencil8:
+                out = wgpu::TextureFormat::Stencil8;
+                return true;
+            case interop::GPUTextureFormat::kDepth16Unorm:
+                out = wgpu::TextureFormat::Depth16Unorm;
+                return true;
+            case interop::GPUTextureFormat::kDepth24Plus:
+                out = wgpu::TextureFormat::Depth24Plus;
+                return true;
+            case interop::GPUTextureFormat::kDepth24PlusStencil8:
+                out = wgpu::TextureFormat::Depth24PlusStencil8;
+                return true;
+            case interop::GPUTextureFormat::kDepth32Float:
+                out = wgpu::TextureFormat::Depth32Float;
+                return true;
+            case interop::GPUTextureFormat::kDepth24UnormStencil8:
+                out = wgpu::TextureFormat::Depth24UnormStencil8;
+                return true;
+            case interop::GPUTextureFormat::kDepth32FloatStencil8:
+                out = wgpu::TextureFormat::Depth32FloatStencil8;
+                return true;
+            case interop::GPUTextureFormat::kBc1RgbaUnorm:
+                out = wgpu::TextureFormat::BC1RGBAUnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
+                out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kBc2RgbaUnorm:
+                out = wgpu::TextureFormat::BC2RGBAUnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
+                out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kBc3RgbaUnorm:
+                out = wgpu::TextureFormat::BC3RGBAUnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
+                out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kBc4RUnorm:
+                out = wgpu::TextureFormat::BC4RUnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc4RSnorm:
+                out = wgpu::TextureFormat::BC4RSnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc5RgUnorm:
+                out = wgpu::TextureFormat::BC5RGUnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc5RgSnorm:
+                out = wgpu::TextureFormat::BC5RGSnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc6HRgbUfloat:
+                out = wgpu::TextureFormat::BC6HRGBUfloat;
+                return true;
+            case interop::GPUTextureFormat::kBc6HRgbFloat:
+                out = wgpu::TextureFormat::BC6HRGBFloat;
+                return true;
+            case interop::GPUTextureFormat::kBc7RgbaUnorm:
+                out = wgpu::TextureFormat::BC7RGBAUnorm;
+                return true;
+            case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
+                out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kEtc2Rgb8Unorm:
+                out = wgpu::TextureFormat::ETC2RGB8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kEtc2Rgb8UnormSrgb:
+                out = wgpu::TextureFormat::ETC2RGB8UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kEtc2Rgb8A1Unorm:
+                out = wgpu::TextureFormat::ETC2RGB8A1Unorm;
+                return true;
+            case interop::GPUTextureFormat::kEtc2Rgb8A1UnormSrgb:
+                out = wgpu::TextureFormat::ETC2RGB8A1UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kEtc2Rgba8Unorm:
+                out = wgpu::TextureFormat::ETC2RGBA8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kEtc2Rgba8UnormSrgb:
+                out = wgpu::TextureFormat::ETC2RGBA8UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kEacR11Unorm:
+                out = wgpu::TextureFormat::EACR11Unorm;
+                return true;
+            case interop::GPUTextureFormat::kEacR11Snorm:
+                out = wgpu::TextureFormat::EACR11Snorm;
+                return true;
+            case interop::GPUTextureFormat::kEacRg11Unorm:
+                out = wgpu::TextureFormat::EACRG11Unorm;
+                return true;
+            case interop::GPUTextureFormat::kEacRg11Snorm:
+                out = wgpu::TextureFormat::EACRG11Snorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc4X4Unorm:
+                out = wgpu::TextureFormat::ASTC4x4Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc4X4UnormSrgb:
+                out = wgpu::TextureFormat::ASTC4x4UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc5X4Unorm:
+                out = wgpu::TextureFormat::ASTC5x4Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc5X4UnormSrgb:
+                out = wgpu::TextureFormat::ASTC5x4UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc5X5Unorm:
+                out = wgpu::TextureFormat::ASTC5x5Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc5X5UnormSrgb:
+                out = wgpu::TextureFormat::ASTC5x5UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc6X5Unorm:
+                out = wgpu::TextureFormat::ASTC6x5Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc6X5UnormSrgb:
+                out = wgpu::TextureFormat::ASTC6x5UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc6X6Unorm:
+                out = wgpu::TextureFormat::ASTC6x6Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc6X6UnormSrgb:
+                out = wgpu::TextureFormat::ASTC6x6UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc8X5Unorm:
+                out = wgpu::TextureFormat::ASTC8x5Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc8X5UnormSrgb:
+                out = wgpu::TextureFormat::ASTC8x5UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc8X6Unorm:
+                out = wgpu::TextureFormat::ASTC8x6Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc8X6UnormSrgb:
+                out = wgpu::TextureFormat::ASTC8x6UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc8X8Unorm:
+                out = wgpu::TextureFormat::ASTC8x8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc8X8UnormSrgb:
+                out = wgpu::TextureFormat::ASTC8x8UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X5Unorm:
+                out = wgpu::TextureFormat::ASTC10x5Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X5UnormSrgb:
+                out = wgpu::TextureFormat::ASTC10x5UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X6Unorm:
+                out = wgpu::TextureFormat::ASTC10x6Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X6UnormSrgb:
+                out = wgpu::TextureFormat::ASTC10x6UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X8Unorm:
+                out = wgpu::TextureFormat::ASTC10x8Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X8UnormSrgb:
+                out = wgpu::TextureFormat::ASTC10x8UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X10Unorm:
+                out = wgpu::TextureFormat::ASTC10x10Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc10X10UnormSrgb:
+                out = wgpu::TextureFormat::ASTC10x10UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc12X10Unorm:
+                out = wgpu::TextureFormat::ASTC12x10Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc12X10UnormSrgb:
+                out = wgpu::TextureFormat::ASTC12x10UnormSrgb;
+                return true;
+            case interop::GPUTextureFormat::kAstc12X12Unorm:
+                out = wgpu::TextureFormat::ASTC12x12Unorm;
+                return true;
+            case interop::GPUTextureFormat::kAstc12X12UnormSrgb:
+                out = wgpu::TextureFormat::ASTC12x12UnormSrgb;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
+        out = static_cast<wgpu::TextureUsage>(in.value);
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
+        out = static_cast<wgpu::ColorWriteMask>(in.value);
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
+        out = static_cast<wgpu::BufferUsage>(in.value);
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
+        out = static_cast<wgpu::MapMode>(in.value);
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
+        out = static_cast<wgpu::ShaderStage>(in.value);
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
+        out = wgpu::TextureDimension::e1D;
+        switch (in) {
+            case interop::GPUTextureDimension::k1D:
+                out = wgpu::TextureDimension::e1D;
+                return true;
+            case interop::GPUTextureDimension::k2D:
+                out = wgpu::TextureDimension::e2D;
+                return true;
+            case interop::GPUTextureDimension::k3D:
+                out = wgpu::TextureDimension::e3D;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::TextureViewDimension& out,
+                            const interop::GPUTextureViewDimension& in) {
+        out = wgpu::TextureViewDimension::Undefined;
+        switch (in) {
+            case interop::GPUTextureViewDimension::k1D:
+                out = wgpu::TextureViewDimension::e1D;
+                return true;
+            case interop::GPUTextureViewDimension::k2D:
+                out = wgpu::TextureViewDimension::e2D;
+                return true;
+            case interop::GPUTextureViewDimension::k2DArray:
+                out = wgpu::TextureViewDimension::e2DArray;
+                return true;
+            case interop::GPUTextureViewDimension::kCube:
+                out = wgpu::TextureViewDimension::Cube;
+                return true;
+            case interop::GPUTextureViewDimension::kCubeArray:
+                out = wgpu::TextureViewDimension::CubeArray;
+                return true;
+            case interop::GPUTextureViewDimension::k3D:
+                out = wgpu::TextureViewDimension::e3D;
+                return true;
+            default:
+                break;
+        }
+        Napi::Error::New(env, "invalid value for GPUTextureViewDimension")
+            .ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
+                            const interop::GPUProgrammableStage& in) {
+        out = {};
+        out.module = *in.module.As<GPUShaderModule>();
+
+        // Replace nulls in the entryPoint name with another character that's disallowed in
+        // identifiers. This is so that using "main\0" doesn't match an entryPoint named "main".
+        // TODO(dawn:1345): Replace with a way to size strings explicitly in webgpu.h
+        char* entryPoint = Allocate<char>(in.entryPoint.size() + 1);
+        entryPoint[in.entryPoint.size()] = '\0';
+        for (size_t i = 0; i < in.entryPoint.size(); i++) {
+            if (in.entryPoint[i] == '\0') {
+                entryPoint[i] = '#';
+            } else {
+                entryPoint[i] = in.entryPoint[i];
+            }
+        }
+        out.entryPoint = entryPoint;
+
+        return Convert(out.constants, out.constantCount, in.constants);
+    }
+
+    bool Converter::Convert(wgpu::ConstantEntry& out,
+                            const std::string& in_name,
+                            wgpu::interop::GPUPipelineConstantValue in_value) {
+        out.key = in_name.c_str();
+        out.value = in_value;
+        return true;
+    }
+
+    bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
+        out = {};
+        return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
+               Convert(out.srcFactor, in.srcFactor);
+    }
+
+    bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
+        out = wgpu::BlendFactor::Zero;
+        switch (in) {
+            case interop::GPUBlendFactor::kZero:
+                out = wgpu::BlendFactor::Zero;
+                return true;
+            case interop::GPUBlendFactor::kOne:
+                out = wgpu::BlendFactor::One;
+                return true;
+            case interop::GPUBlendFactor::kSrc:
+                out = wgpu::BlendFactor::Src;
+                return true;
+            case interop::GPUBlendFactor::kOneMinusSrc:
+                out = wgpu::BlendFactor::OneMinusSrc;
+                return true;
+            case interop::GPUBlendFactor::kSrcAlpha:
+                out = wgpu::BlendFactor::SrcAlpha;
+                return true;
+            case interop::GPUBlendFactor::kOneMinusSrcAlpha:
+                out = wgpu::BlendFactor::OneMinusSrcAlpha;
+                return true;
+            case interop::GPUBlendFactor::kDst:
+                out = wgpu::BlendFactor::Dst;
+                return true;
+            case interop::GPUBlendFactor::kOneMinusDst:
+                out = wgpu::BlendFactor::OneMinusDst;
+                return true;
+            case interop::GPUBlendFactor::kDstAlpha:
+                out = wgpu::BlendFactor::DstAlpha;
+                return true;
+            case interop::GPUBlendFactor::kOneMinusDstAlpha:
+                out = wgpu::BlendFactor::OneMinusDstAlpha;
+                return true;
+            case interop::GPUBlendFactor::kSrcAlphaSaturated:
+                out = wgpu::BlendFactor::SrcAlphaSaturated;
+                return true;
+            case interop::GPUBlendFactor::kConstant:
+                out = wgpu::BlendFactor::Constant;
+                return true;
+            case interop::GPUBlendFactor::kOneMinusConstant:
+                out = wgpu::BlendFactor::OneMinusConstant;
+                return true;
+            default:
+                break;
+        }
+        Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
+        out = wgpu::BlendOperation::Add;
+        switch (in) {
+            case interop::GPUBlendOperation::kAdd:
+                out = wgpu::BlendOperation::Add;
+                return true;
+            case interop::GPUBlendOperation::kSubtract:
+                out = wgpu::BlendOperation::Subtract;
+                return true;
+            case interop::GPUBlendOperation::kReverseSubtract:
+                out = wgpu::BlendOperation::ReverseSubtract;
+                return true;
+            case interop::GPUBlendOperation::kMin:
+                out = wgpu::BlendOperation::Min;
+                return true;
+            case interop::GPUBlendOperation::kMax:
+                out = wgpu::BlendOperation::Max;
+                return true;
+            default:
+                break;
+        }
+        Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
+        out = {};
+        return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
+    }
+
+    bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
+        out = {};
+        return Convert(out.topology, in.topology) &&
+               Convert(out.stripIndexFormat, in.stripIndexFormat) &&
+               Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
+    }
+
+    bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
+        out = {};
+        return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
+               Convert(out.writeMask, in.writeMask);
+    }
+
+    bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
+        out = {};
+        return Convert(out.format, in.format) &&
+               Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
+               Convert(out.depthCompare, in.depthCompare) &&
+               Convert(out.stencilFront, in.stencilFront) &&
+               Convert(out.stencilBack, in.stencilBack) &&
+               Convert(out.stencilReadMask, in.stencilReadMask) &&
+               Convert(out.stencilWriteMask, in.stencilWriteMask) &&
+               Convert(out.depthBias, in.depthBias) &&
+               Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
+               Convert(out.depthBiasClamp, in.depthBiasClamp);
+    }
+
+    bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
+        out = {};
+        return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
+               Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
+    }
+
+    bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
+        out = {};
+        return Convert(out.targets, out.targetCount, in.targets) &&  //
+               Convert(out.module, in.module) &&                     //
+               Convert(out.entryPoint, in.entryPoint) &&             //
+               Convert(out.constants, out.constantCount, in.constants);
+    }
+
+    bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
+        out = wgpu::PrimitiveTopology::LineList;
+        switch (in) {
+            case interop::GPUPrimitiveTopology::kPointList:
+                out = wgpu::PrimitiveTopology::PointList;
+                return true;
+            case interop::GPUPrimitiveTopology::kLineList:
+                out = wgpu::PrimitiveTopology::LineList;
+                return true;
+            case interop::GPUPrimitiveTopology::kLineStrip:
+                out = wgpu::PrimitiveTopology::LineStrip;
+                return true;
+            case interop::GPUPrimitiveTopology::kTriangleList:
+                out = wgpu::PrimitiveTopology::TriangleList;
+                return true;
+            case interop::GPUPrimitiveTopology::kTriangleStrip:
+                out = wgpu::PrimitiveTopology::TriangleStrip;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUPrimitiveTopology")
+            .ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
+        out = wgpu::FrontFace::CW;
+        switch (in) {
+            case interop::GPUFrontFace::kCw:
+                out = wgpu::FrontFace::CW;
+                return true;
+            case interop::GPUFrontFace::kCcw:
+                out = wgpu::FrontFace::CCW;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
+        out = wgpu::CullMode::None;
+        switch (in) {
+            case interop::GPUCullMode::kNone:
+                out = wgpu::CullMode::None;
+                return true;
+            case interop::GPUCullMode::kFront:
+                out = wgpu::CullMode::Front;
+                return true;
+            case interop::GPUCullMode::kBack:
+                out = wgpu::CullMode::Back;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
+        out = wgpu::CompareFunction::Undefined;
+        switch (in) {
+            case interop::GPUCompareFunction::kNever:
+                out = wgpu::CompareFunction::Never;
+                return true;
+            case interop::GPUCompareFunction::kLess:
+                out = wgpu::CompareFunction::Less;
+                return true;
+            case interop::GPUCompareFunction::kLessEqual:
+                out = wgpu::CompareFunction::LessEqual;
+                return true;
+            case interop::GPUCompareFunction::kGreater:
+                out = wgpu::CompareFunction::Greater;
+                return true;
+            case interop::GPUCompareFunction::kGreaterEqual:
+                out = wgpu::CompareFunction::GreaterEqual;
+                return true;
+            case interop::GPUCompareFunction::kEqual:
+                out = wgpu::CompareFunction::Equal;
+                return true;
+            case interop::GPUCompareFunction::kNotEqual:
+                out = wgpu::CompareFunction::NotEqual;
+                return true;
+            case interop::GPUCompareFunction::kAlways:
+                out = wgpu::CompareFunction::Always;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
+        out = wgpu::IndexFormat::Undefined;
+        switch (in) {
+            case interop::GPUIndexFormat::kUint16:
+                out = wgpu::IndexFormat::Uint16;
+                return true;
+            case interop::GPUIndexFormat::kUint32:
+                out = wgpu::IndexFormat::Uint32;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
+        out = wgpu::StencilOperation::Zero;
+        switch (in) {
+            case interop::GPUStencilOperation::kKeep:
+                out = wgpu::StencilOperation::Keep;
+                return true;
+            case interop::GPUStencilOperation::kZero:
+                out = wgpu::StencilOperation::Zero;
+                return true;
+            case interop::GPUStencilOperation::kReplace:
+                out = wgpu::StencilOperation::Replace;
+                return true;
+            case interop::GPUStencilOperation::kInvert:
+                out = wgpu::StencilOperation::Invert;
+                return true;
+            case interop::GPUStencilOperation::kIncrementClamp:
+                out = wgpu::StencilOperation::IncrementClamp;
+                return true;
+            case interop::GPUStencilOperation::kDecrementClamp:
+                out = wgpu::StencilOperation::DecrementClamp;
+                return true;
+            case interop::GPUStencilOperation::kIncrementWrap:
+                out = wgpu::StencilOperation::IncrementWrap;
+                return true;
+            case interop::GPUStencilOperation::kDecrementWrap:
+                out = wgpu::StencilOperation::DecrementWrap;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
+        return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
+               Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
+    }
+
+    bool Converter::Convert(wgpu::VertexBufferLayout& out,
+                            const interop::GPUVertexBufferLayout& in) {
+        out = {};
+        return Convert(out.attributes, out.attributeCount, in.attributes) &&
+               Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
+    }
+
+    bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
+        out = {};
+        return Convert(out.module, in.module) &&
+               Convert(out.buffers, out.bufferCount, in.buffers) &&
+               Convert(out.entryPoint, in.entryPoint) &&
+               Convert(out.constants, out.constantCount, in.constants);
+    }
+
+    bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
+        out = wgpu::VertexStepMode::Instance;
+        switch (in) {
+            case interop::GPUVertexStepMode::kInstance:
+                out = wgpu::VertexStepMode::Instance;
+                return true;
+            case interop::GPUVertexStepMode::kVertex:
+                out = wgpu::VertexStepMode::Vertex;
+                return true;
+            default:
+                break;
+        }
+        Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
+        return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
+               Convert(out.shaderLocation, in.shaderLocation);
+    }
+
+    bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
+        out = wgpu::VertexFormat::Undefined;
+        switch (in) {
+            case interop::GPUVertexFormat::kUint8X2:
+                out = wgpu::VertexFormat::Uint8x2;
+                return true;
+            case interop::GPUVertexFormat::kUint8X4:
+                out = wgpu::VertexFormat::Uint8x4;
+                return true;
+            case interop::GPUVertexFormat::kSint8X2:
+                out = wgpu::VertexFormat::Sint8x2;
+                return true;
+            case interop::GPUVertexFormat::kSint8X4:
+                out = wgpu::VertexFormat::Sint8x4;
+                return true;
+            case interop::GPUVertexFormat::kUnorm8X2:
+                out = wgpu::VertexFormat::Unorm8x2;
+                return true;
+            case interop::GPUVertexFormat::kUnorm8X4:
+                out = wgpu::VertexFormat::Unorm8x4;
+                return true;
+            case interop::GPUVertexFormat::kSnorm8X2:
+                out = wgpu::VertexFormat::Snorm8x2;
+                return true;
+            case interop::GPUVertexFormat::kSnorm8X4:
+                out = wgpu::VertexFormat::Snorm8x4;
+                return true;
+            case interop::GPUVertexFormat::kUint16X2:
+                out = wgpu::VertexFormat::Uint16x2;
+                return true;
+            case interop::GPUVertexFormat::kUint16X4:
+                out = wgpu::VertexFormat::Uint16x4;
+                return true;
+            case interop::GPUVertexFormat::kSint16X2:
+                out = wgpu::VertexFormat::Sint16x2;
+                return true;
+            case interop::GPUVertexFormat::kSint16X4:
+                out = wgpu::VertexFormat::Sint16x4;
+                return true;
+            case interop::GPUVertexFormat::kUnorm16X2:
+                out = wgpu::VertexFormat::Unorm16x2;
+                return true;
+            case interop::GPUVertexFormat::kUnorm16X4:
+                out = wgpu::VertexFormat::Unorm16x4;
+                return true;
+            case interop::GPUVertexFormat::kSnorm16X2:
+                out = wgpu::VertexFormat::Snorm16x2;
+                return true;
+            case interop::GPUVertexFormat::kSnorm16X4:
+                out = wgpu::VertexFormat::Snorm16x4;
+                return true;
+            case interop::GPUVertexFormat::kFloat16X2:
+                out = wgpu::VertexFormat::Float16x2;
+                return true;
+            case interop::GPUVertexFormat::kFloat16X4:
+                out = wgpu::VertexFormat::Float16x4;
+                return true;
+            case interop::GPUVertexFormat::kFloat32:
+                out = wgpu::VertexFormat::Float32;
+                return true;
+            case interop::GPUVertexFormat::kFloat32X2:
+                out = wgpu::VertexFormat::Float32x2;
+                return true;
+            case interop::GPUVertexFormat::kFloat32X3:
+                out = wgpu::VertexFormat::Float32x3;
+                return true;
+            case interop::GPUVertexFormat::kFloat32X4:
+                out = wgpu::VertexFormat::Float32x4;
+                return true;
+            case interop::GPUVertexFormat::kUint32:
+                out = wgpu::VertexFormat::Uint32;
+                return true;
+            case interop::GPUVertexFormat::kUint32X2:
+                out = wgpu::VertexFormat::Uint32x2;
+                return true;
+            case interop::GPUVertexFormat::kUint32X3:
+                out = wgpu::VertexFormat::Uint32x3;
+                return true;
+            case interop::GPUVertexFormat::kUint32X4:
+                out = wgpu::VertexFormat::Uint32x4;
+                return true;
+            case interop::GPUVertexFormat::kSint32:
+                out = wgpu::VertexFormat::Sint32;
+                return true;
+            case interop::GPUVertexFormat::kSint32X2:
+                out = wgpu::VertexFormat::Sint32x2;
+                return true;
+            case interop::GPUVertexFormat::kSint32X3:
+                out = wgpu::VertexFormat::Sint32x3;
+                return true;
+            case interop::GPUVertexFormat::kSint32X4:
+                out = wgpu::VertexFormat::Sint32x4;
+                return true;
+            default:
+                break;
+        }
+        Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
+                            const interop::GPURenderPassColorAttachment& in) {
+        out = {};
+        return Convert(out.view, in.view) &&                    //
+               Convert(out.resolveTarget, in.resolveTarget) &&  //
+               Convert(out.clearValue, in.clearValue) &&        //
+               Convert(out.loadOp, in.loadOp) &&                //
+               Convert(out.storeOp, in.storeOp);
+    }
+
+    bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
+                            const interop::GPURenderPassDepthStencilAttachment& in) {
+        out = {};
+        return Convert(out.view, in.view) &&                            //
+               Convert(out.depthClearValue, in.depthClearValue) &&      //
+               Convert(out.depthLoadOp, in.depthLoadOp) &&              //
+               Convert(out.depthStoreOp, in.depthStoreOp) &&            //
+               Convert(out.depthReadOnly, in.depthReadOnly) &&          //
+               Convert(out.stencilClearValue, in.stencilClearValue) &&  //
+               Convert(out.stencilLoadOp, in.stencilLoadOp) &&          //
+               Convert(out.stencilStoreOp, in.stencilStoreOp) &&        //
+               Convert(out.stencilReadOnly, in.stencilReadOnly);
+    }
+
+    bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
+        out = wgpu::LoadOp::Clear;
+        switch (in) {
+            case interop::GPULoadOp::kLoad:
+                out = wgpu::LoadOp::Load;
+                return true;
+            case interop::GPULoadOp::kClear:
+                out = wgpu::LoadOp::Clear;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
+        out = wgpu::StoreOp::Store;
+        switch (in) {
+            case interop::GPUStoreOp::kStore:
+                out = wgpu::StoreOp::Store;
+                return true;
+            case interop::GPUStoreOp::kDiscard:
+                out = wgpu::StoreOp::Discard;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
+        out = {};
+        if (!Convert(out.binding, in.binding)) {
+            return false;
+        }
+
+        if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
+            return Convert(out.sampler, *res);
+        }
+        if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
+            return Convert(out.textureView, *res);
+        }
+        if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
+            auto buffer = res->buffer.As<GPUBuffer>();
+            out.size = wgpu::kWholeSize;
+            if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
+                return false;
+            }
+            out.buffer = *buffer;
+            return true;
+        }
+        if (auto* res =
+                std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
+            // TODO(crbug.com/dawn/1129): External textures
+            UNIMPLEMENTED();
+        }
+        Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
+            .ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
+                            const interop::GPUBindGroupLayoutEntry& in) {
+        // TODO(crbug.com/dawn/1129): External textures
+        return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
+               Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
+               Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
+    }
+
+    bool Converter::Convert(wgpu::BufferBindingLayout& out,
+                            const interop::GPUBufferBindingLayout& in) {
+        return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
+               Convert(out.minBindingSize, in.minBindingSize);
+    }
+
+    bool Converter::Convert(wgpu::SamplerBindingLayout& out,
+                            const interop::GPUSamplerBindingLayout& in) {
+        return Convert(out.type, in.type);
+    }
+
+    bool Converter::Convert(wgpu::TextureBindingLayout& out,
+                            const interop::GPUTextureBindingLayout& in) {
+        return Convert(out.sampleType, in.sampleType) &&
+               Convert(out.viewDimension, in.viewDimension) &&
+               Convert(out.multisampled, in.multisampled);
+    }
+
+    bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
+                            const interop::GPUStorageTextureBindingLayout& in) {
+        return Convert(out.access, in.access) && Convert(out.format, in.format) &&
+               Convert(out.viewDimension, in.viewDimension);
+    }
+
+    bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
+        out = wgpu::BufferBindingType::Undefined;
+        switch (in) {
+            case interop::GPUBufferBindingType::kUniform:
+                out = wgpu::BufferBindingType::Uniform;
+                return true;
+            case interop::GPUBufferBindingType::kStorage:
+                out = wgpu::BufferBindingType::Storage;
+                return true;
+            case interop::GPUBufferBindingType::kReadOnlyStorage:
+                out = wgpu::BufferBindingType::ReadOnlyStorage;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUBufferBindingType")
+            .ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
+        out = wgpu::TextureSampleType::Undefined;
+        switch (in) {
+            case interop::GPUTextureSampleType::kFloat:
+                out = wgpu::TextureSampleType::Float;
+                return true;
+            case interop::GPUTextureSampleType::kUnfilterableFloat:
+                out = wgpu::TextureSampleType::UnfilterableFloat;
+                return true;
+            case interop::GPUTextureSampleType::kDepth:
+                out = wgpu::TextureSampleType::Depth;
+                return true;
+            case interop::GPUTextureSampleType::kSint:
+                out = wgpu::TextureSampleType::Sint;
+                return true;
+            case interop::GPUTextureSampleType::kUint:
+                out = wgpu::TextureSampleType::Uint;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUTextureSampleType")
+            .ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::SamplerBindingType& out,
+                            const interop::GPUSamplerBindingType& in) {
+        out = wgpu::SamplerBindingType::Undefined;
+        switch (in) {
+            case interop::GPUSamplerBindingType::kFiltering:
+                out = wgpu::SamplerBindingType::Filtering;
+                return true;
+            case interop::GPUSamplerBindingType::kNonFiltering:
+                out = wgpu::SamplerBindingType::NonFiltering;
+                return true;
+            case interop::GPUSamplerBindingType::kComparison:
+                out = wgpu::SamplerBindingType::Comparison;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUSamplerBindingType")
+            .ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::StorageTextureAccess& out,
+                            const interop::GPUStorageTextureAccess& in) {
+        out = wgpu::StorageTextureAccess::Undefined;
+        switch (in) {
+            case interop::GPUStorageTextureAccess::kWriteOnly:
+                out = wgpu::StorageTextureAccess::WriteOnly;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUStorageTextureAccess")
+            .ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
+        out = wgpu::QueryType::Occlusion;
+        switch (in) {
+            case interop::GPUQueryType::kOcclusion:
+                out = wgpu::QueryType::Occlusion;
+                return true;
+            case interop::GPUQueryType::kTimestamp:
+                out = wgpu::QueryType::Timestamp;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
+        out = wgpu::AddressMode::Repeat;
+        switch (in) {
+            case interop::GPUAddressMode::kClampToEdge:
+                out = wgpu::AddressMode::ClampToEdge;
+                return true;
+            case interop::GPUAddressMode::kRepeat:
+                out = wgpu::AddressMode::Repeat;
+                return true;
+            case interop::GPUAddressMode::kMirrorRepeat:
+                out = wgpu::AddressMode::MirrorRepeat;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
+        out = wgpu::FilterMode::Nearest;
+        switch (in) {
+            case interop::GPUFilterMode::kNearest:
+                out = wgpu::FilterMode::Nearest;
+                return true;
+            case interop::GPUFilterMode::kLinear:
+                out = wgpu::FilterMode::Linear;
+                return true;
+        }
+        Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
+        return false;
+    }
+
+    bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
+                            const interop::GPUComputePipelineDescriptor& in) {
+        return Convert(out.label, in.label) &&    //
+               Convert(out.layout, in.layout) &&  //
+               Convert(out.compute, in.compute);
+    }
+
+    bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
+                            const interop::GPURenderPipelineDescriptor& in) {
+        wgpu::RenderPipelineDescriptor desc{};
+        return Convert(out.label, in.label) &&                //
+               Convert(out.layout, in.layout) &&              //
+               Convert(out.vertex, in.vertex) &&              //
+               Convert(out.primitive, in.primitive) &&        //
+               Convert(out.depthStencil, in.depthStencil) &&  //
+               Convert(out.multisample, in.multisample) &&    //
+               Convert(out.fragment, in.fragment);
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/Converter.h b/src/dawn/node/binding/Converter.h
new file mode 100644
index 0000000..17754cb
--- /dev/null
+++ b/src/dawn/node/binding/Converter.h
@@ -0,0 +1,410 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_CONVERTER_H_
+#define DAWN_NODE_BINDING_CONVERTER_H_
+
+#include <functional>
+#include <type_traits>
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // ImplOfTraits is a traits helper that is used to associate the interop interface type to the
+    // binding implementation type.
+    template <typename T>
+    struct ImplOfTraits {};
+
+    // DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
+    // `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
+#define DECLARE_IMPL(NAME)               \
+    class NAME;                          \
+    template <>                          \
+    struct ImplOfTraits<interop::NAME> { \
+        using type = binding::NAME;      \
+    }
+
+    // Declare the interop interface to binding implementations
+    DECLARE_IMPL(GPUBindGroup);
+    DECLARE_IMPL(GPUBindGroupLayout);
+    DECLARE_IMPL(GPUBuffer);
+    DECLARE_IMPL(GPUPipelineLayout);
+    DECLARE_IMPL(GPUQuerySet);
+    DECLARE_IMPL(GPURenderBundle);
+    DECLARE_IMPL(GPURenderPipeline);
+    DECLARE_IMPL(GPUSampler);
+    DECLARE_IMPL(GPUShaderModule);
+    DECLARE_IMPL(GPUTexture);
+    DECLARE_IMPL(GPUTextureView);
+#undef DECLARE_IMPL
+
+    // Helper for obtaining the binding implementation type from the interop interface type
+    template <typename T>
+    using ImplOf = typename ImplOfTraits<T>::type;
+
+    // Converter is a utility class for converting IDL generated interop types into Dawn types.
+    // As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
+    // heap allocations for conversions of vector or optional types. These pointers are
+    // automatically freed when the Converter is destructed.
+    class Converter {
+      public:
+        Converter(Napi::Env e) : env(e) {
+        }
+        ~Converter();
+
+        // Conversion function. Converts the interop type IN to the Dawn type OUT.
+        // Returns true on success, false on failure.
+        template <typename OUT, typename IN>
+        [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
+            return Convert(std::forward<OUT>(out), std::forward<IN>(in));
+        }
+
+        // Vector conversion function. Converts the vector of interop type IN to a pointer of
+        // elements of Dawn type OUT, which is assigned to 'out_els'.
+        // out_count is assigned the number of elements in 'in'.
+        // Returns true on success, false on failure.
+        // The pointer assigned to 'out_els' is valid until the Converter is destructed.
+        template <typename OUT, typename IN>
+        [[nodiscard]] inline bool operator()(OUT*& out_els,
+                                             uint32_t& out_count,
+                                             const std::vector<IN>& in) {
+            return Convert(out_els, out_count, in);
+        }
+
+        // Returns the Env that this Converter was constructed with.
+        inline Napi::Env Env() const {
+            return env;
+        }
+
+        // BufferSource is the converted type of interop::BufferSource.
+        struct BufferSource {
+            void* data;
+            size_t size;             // in bytes
+            size_t bytesPerElement;  // 1 for ArrayBuffers
+        };
+
+      private:
+        // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
+        [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
+
+        [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
+
+        [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
+
+        [[nodiscard]] bool Convert(wgpu::Origin3D& out,
+                                   const std::vector<interop::GPUIntegerCoordinate>& in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
+
+        [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out,
+                                   const interop::GPUImageCopyTexture& in);
+
+        [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out,
+                                   const interop::GPUImageCopyBuffer& in);
+
+        [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out,
+                                   const interop::GPUImageDataLayout& in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureUsage& out,
+                                   const interop::GPUTextureUsageFlags& in);
+
+        [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out,
+                                   const interop::GPUColorWriteFlags& in);
+
+        [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
+
+        [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
+
+        [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureDimension& out,
+                                   const interop::GPUTextureDimension& in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
+                                   const interop::GPUTextureViewDimension& in);
+
+        [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
+                                   const interop::GPUProgrammableStage& in);
+
+        [[nodiscard]] bool Convert(wgpu::ConstantEntry& out,
+                                   const std::string& in_name,
+                                   wgpu::interop::GPUPipelineConstantValue in_value);
+
+        [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
+
+        [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
+
+        [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
+
+        [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
+
+        [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
+
+        [[nodiscard]] bool Convert(wgpu::ColorTargetState& out,
+                                   const interop::GPUColorTargetState& in);
+
+        [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
+                                   const interop::GPUDepthStencilState& in);
+
+        [[nodiscard]] bool Convert(wgpu::MultisampleState& out,
+                                   const interop::GPUMultisampleState& in);
+
+        [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
+
+        [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
+                                   const interop::GPUPrimitiveTopology& in);
+
+        [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
+
+        [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
+
+        [[nodiscard]] bool Convert(wgpu::CompareFunction& out,
+                                   const interop::GPUCompareFunction& in);
+
+        [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
+
+        [[nodiscard]] bool Convert(wgpu::StencilOperation& out,
+                                   const interop::GPUStencilOperation& in);
+
+        [[nodiscard]] bool Convert(wgpu::StencilFaceState& out,
+                                   const interop::GPUStencilFaceState& in);
+
+        [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
+
+        [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
+                                   const interop::GPUVertexBufferLayout& in);
+
+        [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
+
+        [[nodiscard]] bool Convert(wgpu::VertexAttribute& out,
+                                   const interop::GPUVertexAttribute& in);
+
+        [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
+
+        [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
+                                   const interop::GPURenderPassColorAttachment& in);
+
+        [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
+                                   const interop::GPURenderPassDepthStencilAttachment& in);
+
+        [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
+
+        [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
+
+        [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
+
+        [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
+                                   const interop::GPUBindGroupLayoutEntry& in);
+
+        [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
+                                   const interop::GPUBufferBindingLayout& in);
+
+        [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
+                                   const interop::GPUSamplerBindingLayout& in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
+                                   const interop::GPUTextureBindingLayout& in);
+
+        [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
+                                   const interop::GPUStorageTextureBindingLayout& in);
+
+        [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
+                                   const interop::GPUBufferBindingType& in);
+
+        [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
+                                   const interop::GPUSamplerBindingType& in);
+
+        [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
+                                   const interop::GPUTextureSampleType& in);
+
+        [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
+                                   const interop::GPUStorageTextureAccess& in);
+
+        [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
+
+        [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
+
+        [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
+
+        [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
+                                   const interop::GPUComputePipelineDescriptor& in);
+
+        [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
+                                   const interop::GPURenderPipelineDescriptor& in);
+
+        // std::string to C string
+        inline bool Convert(const char*& out, const std::string& in) {
+            out = in.c_str();
+            return true;
+        }
+
+        // Pass-through (no conversion)
+        template <typename T>
+        inline bool Convert(T& out, const T& in) {
+            out = in;
+            return true;
+        }
+
+        // Integral number conversion, with dynamic limit checking
+        template <typename OUT,
+                  typename IN,
+                  typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
+        inline bool Convert(OUT& out, const IN& in) {
+            out = static_cast<OUT>(in);
+            if (static_cast<IN>(out) != in) {
+                Napi::Error::New(env, "Integer value (" + std::to_string(in) +
+                                          ") cannot be converted to the Dawn data type without "
+                                          "truncation of the value")
+                    .ThrowAsJavaScriptException();
+                return false;
+            }
+            return true;
+        }
+
+        // ClampedInteger<T>
+        template <typename T>
+        inline bool Convert(T& out, const interop::ClampedInteger<T>& in) {
+            out = in;
+            return true;
+        }
+
+        // EnforceRangeInteger<T>
+        template <typename T>
+        inline bool Convert(T& out, const interop::EnforceRangeInteger<T>& in) {
+            out = in;
+            return true;
+        }
+
+        template <typename OUT, typename... IN_TYPES>
+        inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
+            return std::visit([&](auto&& i) { return Convert(out, i); }, in);
+        }
+
+        // If the std::optional does not have a value, then Convert() simply returns true and 'out'
+        // is not assigned a new value.
+        template <typename OUT, typename IN>
+        inline bool Convert(OUT& out, const std::optional<IN>& in) {
+            if (in.has_value()) {
+                return Convert(out, in.value());
+            }
+            return true;
+        }
+
+        // std::optional -> T*
+        // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
+        // whether 'in' has a value.
+        template <typename OUT,
+                  typename IN,
+                  typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
+        inline bool Convert(OUT*& out, const std::optional<IN>& in) {
+            if (in.has_value()) {
+                auto* el = Allocate<std::remove_const_t<OUT>>();
+                if (!Convert(*el, in.value())) {
+                    return false;
+                }
+                out = el;
+            } else {
+                out = nullptr;
+            }
+            return true;
+        }
+
+        // interop::Interface -> Dawn object
+        template <typename OUT, typename IN>
+        inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
+            using Impl = ImplOf<IN>;
+            out = *in.template As<Impl>();
+            if (!out) {
+                LOG("Dawn object has been destroyed. This should not happen");
+                return false;
+            }
+            return true;
+        }
+
+        // vector -> raw pointer + count
+        template <typename OUT, typename IN>
+        inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
+            if (in.size() == 0) {
+                out_els = nullptr;
+                out_count = 0;
+                return true;
+            }
+            auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+            for (size_t i = 0; i < in.size(); i++) {
+                if (!Convert(els[i], in[i])) {
+                    return false;
+                }
+            }
+            out_els = els;
+            return Convert(out_count, in.size());
+        }
+
+        // unordered_map -> raw pointer + count
+        template <typename OUT, typename IN_KEY, typename IN_VALUE>
+        inline bool Convert(OUT*& out_els,
+                            uint32_t& out_count,
+                            const std::unordered_map<IN_KEY, IN_VALUE>& in) {
+            if (in.size() == 0) {
+                out_els = nullptr;
+                out_count = 0;
+                return true;
+            }
+            auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+            size_t i = 0;
+            for (auto& [key, value] : in) {
+                if (!Convert(els[i++], key, value)) {
+                    return false;
+                }
+            }
+            out_els = els;
+            return Convert(out_count, in.size());
+        }
+
+        // std::optional<T> -> raw pointer + count
+        template <typename OUT, typename IN>
+        inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::optional<IN>& in) {
+            if (!in.has_value()) {
+                out_els = nullptr;
+                out_count = 0;
+                return true;
+            }
+            return Convert(out_els, out_count, in.value());
+        }
+
+        Napi::Env env;
+
+        // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
+        // the first element. The array is freed when the Converter is destructed.
+        template <typename T>
+        T* Allocate(size_t n = 1) {
+            auto* ptr = new T[n]{};
+            free_.emplace_back([ptr] { delete[] ptr; });
+            return ptr;
+        }
+
+        std::vector<std::function<void()>> free_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_CONVERTER_H_
diff --git a/src/dawn/node/binding/Errors.cpp b/src/dawn/node/binding/Errors.cpp
new file mode 100644
index 0000000..62def5d
--- /dev/null
+++ b/src/dawn/node/binding/Errors.cpp
@@ -0,0 +1,179 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/Errors.h"
+
+namespace wgpu::binding {
+
+    namespace {
+        constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
+        constexpr char kWrongDocumentError[] = "WrongDocumentError";
+        constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
+        constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
+        constexpr char kNotFoundError[] = "NotFoundError";
+        constexpr char kNotSupportedError[] = "NotSupportedError";
+        constexpr char kInUseAttributeError[] = "InUseAttributeError";
+        constexpr char kInvalidStateError[] = "InvalidStateError";
+        constexpr char kSyntaxError[] = "SyntaxError";
+        constexpr char kInvalidModificationError[] = "InvalidModificationError";
+        constexpr char kNamespaceError[] = "NamespaceError";
+        constexpr char kSecurityError[] = "SecurityError";
+        constexpr char kNetworkError[] = "NetworkError";
+        constexpr char kAbortError[] = "AbortError";
+        constexpr char kURLMismatchError[] = "URLMismatchError";
+        constexpr char kQuotaExceededError[] = "QuotaExceededError";
+        constexpr char kTimeoutError[] = "TimeoutError";
+        constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
+        constexpr char kDataCloneError[] = "DataCloneError";
+        constexpr char kEncodingError[] = "EncodingError";
+        constexpr char kNotReadableError[] = "NotReadableError";
+        constexpr char kUnknownError[] = "UnknownError";
+        constexpr char kConstraintError[] = "ConstraintError";
+        constexpr char kDataError[] = "DataError";
+        constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
+        constexpr char kReadOnlyError[] = "ReadOnlyError";
+        constexpr char kVersionError[] = "VersionError";
+        constexpr char kOperationError[] = "OperationError";
+        constexpr char kNotAllowedError[] = "NotAllowedError";
+
+        static Napi::Error New(Napi::Env env,
+                               std::string name,
+                               std::string message,
+                               unsigned short code = 0) {
+            auto err = Napi::Error::New(env);
+            err.Set("name", name);
+            err.Set("message", message.empty() ? name : message);
+            err.Set("code", static_cast<double>(code));
+            return err;
+        }
+
+    }  // namespace
+
+    Napi::Error Errors::HierarchyRequestError(Napi::Env env, std::string message) {
+        return New(env, kHierarchyRequestError, message);
+    }
+
+    Napi::Error Errors::WrongDocumentError(Napi::Env env, std::string message) {
+        return New(env, kWrongDocumentError, message);
+    }
+
+    Napi::Error Errors::InvalidCharacterError(Napi::Env env, std::string message) {
+        return New(env, kInvalidCharacterError, message);
+    }
+
+    Napi::Error Errors::NoModificationAllowedError(Napi::Env env, std::string message) {
+        return New(env, kNoModificationAllowedError, message);
+    }
+
+    Napi::Error Errors::NotFoundError(Napi::Env env, std::string message) {
+        return New(env, kNotFoundError, message);
+    }
+
+    Napi::Error Errors::NotSupportedError(Napi::Env env, std::string message) {
+        return New(env, kNotSupportedError, message);
+    }
+
+    Napi::Error Errors::InUseAttributeError(Napi::Env env, std::string message) {
+        return New(env, kInUseAttributeError, message);
+    }
+
+    Napi::Error Errors::InvalidStateError(Napi::Env env, std::string message) {
+        return New(env, kInvalidStateError, message);
+    }
+
+    Napi::Error Errors::SyntaxError(Napi::Env env, std::string message) {
+        return New(env, kSyntaxError, message);
+    }
+
+    Napi::Error Errors::InvalidModificationError(Napi::Env env, std::string message) {
+        return New(env, kInvalidModificationError, message);
+    }
+
+    Napi::Error Errors::NamespaceError(Napi::Env env, std::string message) {
+        return New(env, kNamespaceError, message);
+    }
+
+    Napi::Error Errors::SecurityError(Napi::Env env, std::string message) {
+        return New(env, kSecurityError, message);
+    }
+
+    Napi::Error Errors::NetworkError(Napi::Env env, std::string message) {
+        return New(env, kNetworkError, message);
+    }
+
+    Napi::Error Errors::AbortError(Napi::Env env, std::string message) {
+        return New(env, kAbortError, message);
+    }
+
+    Napi::Error Errors::URLMismatchError(Napi::Env env, std::string message) {
+        return New(env, kURLMismatchError, message);
+    }
+
+    Napi::Error Errors::QuotaExceededError(Napi::Env env, std::string message) {
+        return New(env, kQuotaExceededError, message);
+    }
+
+    Napi::Error Errors::TimeoutError(Napi::Env env, std::string message) {
+        return New(env, kTimeoutError, message);
+    }
+
+    Napi::Error Errors::InvalidNodeTypeError(Napi::Env env, std::string message) {
+        return New(env, kInvalidNodeTypeError, message);
+    }
+
+    Napi::Error Errors::DataCloneError(Napi::Env env, std::string message) {
+        return New(env, kDataCloneError, message);
+    }
+
+    Napi::Error Errors::EncodingError(Napi::Env env, std::string message) {
+        return New(env, kEncodingError, message);
+    }
+
+    Napi::Error Errors::NotReadableError(Napi::Env env, std::string message) {
+        return New(env, kNotReadableError, message);
+    }
+
+    Napi::Error Errors::UnknownError(Napi::Env env, std::string message) {
+        return New(env, kUnknownError, message);
+    }
+
+    Napi::Error Errors::ConstraintError(Napi::Env env, std::string message) {
+        return New(env, kConstraintError, message);
+    }
+
+    Napi::Error Errors::DataError(Napi::Env env, std::string message) {
+        return New(env, kDataError, message);
+    }
+
+    Napi::Error Errors::TransactionInactiveError(Napi::Env env, std::string message) {
+        return New(env, kTransactionInactiveError, message);
+    }
+
+    Napi::Error Errors::ReadOnlyError(Napi::Env env, std::string message) {
+        return New(env, kReadOnlyError, message);
+    }
+
+    Napi::Error Errors::VersionError(Napi::Env env, std::string message) {
+        return New(env, kVersionError, message);
+    }
+
+    Napi::Error Errors::OperationError(Napi::Env env, std::string message) {
+        return New(env, kOperationError, message);
+    }
+
+    Napi::Error Errors::NotAllowedError(Napi::Env env, std::string message) {
+        return New(env, kNotAllowedError, message);
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/Errors.h b/src/dawn/node/binding/Errors.h
new file mode 100644
index 0000000..742df1f
--- /dev/null
+++ b/src/dawn/node/binding/Errors.h
@@ -0,0 +1,60 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_ERRORS_H_
+#define DAWN_NODE_BINDING_ERRORS_H_
+
+#include "napi.h"
+
+namespace wgpu::binding {
+
+    // Errors contains static helper methods for creating DOMException error
+    // messages as documented at:
+    // https://heycam.github.io/webidl/#idl-DOMException-error-names
+    class Errors {
+      public:
+        static Napi::Error HierarchyRequestError(Napi::Env, std::string message = {});
+        static Napi::Error WrongDocumentError(Napi::Env, std::string message = {});
+        static Napi::Error InvalidCharacterError(Napi::Env, std::string message = {});
+        static Napi::Error NoModificationAllowedError(Napi::Env, std::string message = {});
+        static Napi::Error NotFoundError(Napi::Env, std::string message = {});
+        static Napi::Error NotSupportedError(Napi::Env, std::string message = {});
+        static Napi::Error InUseAttributeError(Napi::Env, std::string message = {});
+        static Napi::Error InvalidStateError(Napi::Env, std::string message = {});
+        static Napi::Error SyntaxError(Napi::Env, std::string message = {});
+        static Napi::Error InvalidModificationError(Napi::Env, std::string message = {});
+        static Napi::Error NamespaceError(Napi::Env, std::string message = {});
+        static Napi::Error SecurityError(Napi::Env, std::string message = {});
+        static Napi::Error NetworkError(Napi::Env, std::string message = {});
+        static Napi::Error AbortError(Napi::Env, std::string message = {});
+        static Napi::Error URLMismatchError(Napi::Env, std::string message = {});
+        static Napi::Error QuotaExceededError(Napi::Env, std::string message = {});
+        static Napi::Error TimeoutError(Napi::Env, std::string message = {});
+        static Napi::Error InvalidNodeTypeError(Napi::Env, std::string message = {});
+        static Napi::Error DataCloneError(Napi::Env, std::string message = {});
+        static Napi::Error EncodingError(Napi::Env, std::string message = {});
+        static Napi::Error NotReadableError(Napi::Env, std::string message = {});
+        static Napi::Error UnknownError(Napi::Env, std::string message = {});
+        static Napi::Error ConstraintError(Napi::Env, std::string message = {});
+        static Napi::Error DataError(Napi::Env, std::string message = {});
+        static Napi::Error TransactionInactiveError(Napi::Env, std::string message = {});
+        static Napi::Error ReadOnlyError(Napi::Env, std::string message = {});
+        static Napi::Error VersionError(Napi::Env, std::string message = {});
+        static Napi::Error OperationError(Napi::Env, std::string message = {});
+        static Napi::Error NotAllowedError(Napi::Env, std::string message = {});
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_ERRORS_H_
diff --git a/src/dawn/node/binding/Flags.cpp b/src/dawn/node/binding/Flags.cpp
new file mode 100644
index 0000000..40b0560
--- /dev/null
+++ b/src/dawn/node/binding/Flags.cpp
@@ -0,0 +1,29 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/Flags.h"
+
+namespace wgpu::binding {
+    void Flags::Set(const std::string& key, const std::string& value) {
+        flags_[key] = value;
+    }
+
+    std::optional<std::string> Flags::Get(const std::string& key) const {
+        auto iter = flags_.find(key);
+        if (iter != flags_.end()) {
+            return iter->second;
+        }
+        return {};
+    }
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/Flags.h b/src/dawn/node/binding/Flags.h
new file mode 100644
index 0000000..89b7b43
--- /dev/null
+++ b/src/dawn/node/binding/Flags.h
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_FLAGS_H_
+#define DAWN_NODE_BINDING_FLAGS_H_
+
+#include <optional>
+#include <string>
+#include <unordered_map>
+
+namespace wgpu::binding {
+    // Flags maintains a key-value mapping of input flags passed into the module's create()
+    // function, used to configure dawn_node.
+    class Flags {
+      public:
+        void Set(const std::string& key, const std::string& value);
+        std::optional<std::string> Get(const std::string& key) const;
+
+      private:
+        std::unordered_map<std::string, std::string> flags_;
+    };
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_FLAGS_H_
diff --git a/src/dawn/node/binding/GPU.cpp b/src/dawn/node/binding/GPU.cpp
new file mode 100644
index 0000000..43472ec
--- /dev/null
+++ b/src/dawn/node/binding/GPU.cpp
@@ -0,0 +1,165 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPU.h"
+
+#include "src/dawn/node/binding/GPUAdapter.h"
+
+#include <cstdlib>
+
+#if defined(_WIN32)
+#    include <Windows.h>
+#endif
+
+namespace {
+    std::string GetEnvVar(const char* varName) {
+#if defined(_WIN32)
+        // Use _dupenv_s to avoid unsafe warnings about std::getenv
+        char* value = nullptr;
+        _dupenv_s(&value, nullptr, varName);
+        if (value) {
+            std::string result = value;
+            free(value);
+            return result;
+        }
+        return "";
+#else
+        if (auto* val = std::getenv(varName)) {
+            return val;
+        }
+        return "";
+#endif
+    }
+
+    void SetDllDir(const char* dir) {
+        (void)dir;
+#if defined(_WIN32)
+        ::SetDllDirectory(dir);
+#endif
+    }
+
+}  // namespace
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPU
+    ////////////////////////////////////////////////////////////////////////////////
+    GPU::GPU(Flags flags) : flags_(std::move(flags)) {
+        // TODO: Disable in 'release'
+        instance_.EnableBackendValidation(true);
+        instance_.SetBackendValidationLevel(dawn::native::BackendValidationLevel::Full);
+
+        // Setting the DllDir changes where we load adapter DLLs from (e.g. d3dcompiler_47.dll)
+        if (auto dir = flags_.Get("dlldir")) {
+            SetDllDir(dir->c_str());
+        }
+        instance_.DiscoverDefaultAdapters();
+    }
+
+    interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
+        Napi::Env env,
+        interop::GPURequestAdapterOptions options) {
+        auto promise = interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(
+            env, PROMISE_INFO);
+
+        if (options.forceFallbackAdapter) {
+            // Software adapters are not currently supported.
+            promise.Resolve({});
+            return promise;
+        }
+
+        auto adapters = instance_.GetAdapters();
+        if (adapters.empty()) {
+            promise.Resolve({});
+            return promise;
+        }
+
+#if defined(_WIN32)
+        constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
+#elif defined(__linux__)
+        constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
+#elif defined(__APPLE__)
+        constexpr auto defaultBackendType = wgpu::BackendType::Metal;
+#else
+#    error "Unsupported platform"
+#endif
+
+        auto targetBackendType = defaultBackendType;
+        std::string forceBackend;
+
+        // Check for override from env var
+        if (std::string envVar = GetEnvVar("DAWNNODE_BACKEND"); !envVar.empty()) {
+            forceBackend = envVar;
+        }
+
+        // Check for override from flag
+        if (auto f = flags_.Get("dawn-backend")) {
+            forceBackend = *f;
+        }
+
+        std::transform(forceBackend.begin(), forceBackend.end(), forceBackend.begin(),
+                       [](char c) { return std::tolower(c); });
+
+        // Default to first adapter if a backend is not specified
+        size_t adapterIndex = 0;
+
+        if (!forceBackend.empty()) {
+            if (forceBackend == "null") {
+                targetBackendType = wgpu::BackendType::Null;
+            } else if (forceBackend == "webgpu") {
+                targetBackendType = wgpu::BackendType::WebGPU;
+            } else if (forceBackend == "d3d11") {
+                targetBackendType = wgpu::BackendType::D3D11;
+            } else if (forceBackend == "d3d12" || forceBackend == "d3d") {
+                targetBackendType = wgpu::BackendType::D3D12;
+            } else if (forceBackend == "metal") {
+                targetBackendType = wgpu::BackendType::Metal;
+            } else if (forceBackend == "vulkan" || forceBackend == "vk") {
+                targetBackendType = wgpu::BackendType::Vulkan;
+            } else if (forceBackend == "opengl" || forceBackend == "gl") {
+                targetBackendType = wgpu::BackendType::OpenGL;
+            } else if (forceBackend == "opengles" || forceBackend == "gles") {
+                targetBackendType = wgpu::BackendType::OpenGLES;
+            } else {
+                promise.Reject("unknown backend '" + forceBackend + "'");
+                return promise;
+            }
+        }
+
+        bool found = false;
+        for (size_t i = 0; i < adapters.size(); ++i) {
+            wgpu::AdapterProperties props;
+            adapters[i].GetProperties(&props);
+            if (props.backendType == targetBackendType) {
+                adapterIndex = i;
+                found = true;
+                break;
+            }
+        }
+        if (!found) {
+            if (!forceBackend.empty()) {
+                promise.Reject("backend '" + forceBackend + "' not found");
+            } else {
+                promise.Reject("no suitable backends found");
+            }
+            return promise;
+        }
+
+        auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex], flags_);
+        promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
+        return promise;
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPU.h b/src/dawn/node/binding/GPU.h
new file mode 100644
index 0000000..de6b140
--- /dev/null
+++ b/src/dawn/node/binding/GPU.h
@@ -0,0 +1,42 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPU_H_
+#define DAWN_NODE_BINDING_GPU_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/Flags.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+    // GPU is an implementation of interop::GPU that wraps a dawn::native::Instance.
+    class GPU final : public interop::GPU {
+      public:
+        GPU(Flags flags);
+
+        // interop::GPU interface compliance
+        interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
+            Napi::Env env,
+            interop::GPURequestAdapterOptions options) override;
+
+      private:
+        const Flags flags_;
+        dawn::native::Instance instance_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPU_H_
diff --git a/src/dawn/node/binding/GPUAdapter.cpp b/src/dawn/node/binding/GPUAdapter.cpp
new file mode 100644
index 0000000..d604431
--- /dev/null
+++ b/src/dawn/node/binding/GPUAdapter.cpp
@@ -0,0 +1,271 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUAdapter.h"
+
+#include <unordered_set>
+
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/binding/Flags.h"
+#include "src/dawn/node/binding/GPUDevice.h"
+#include "src/dawn/node/binding/GPUSupportedLimits.h"
+
+namespace {
+    // TODO(amaiorano): Move to utility header
+    std::vector<std::string> Split(const std::string& s, char delim) {
+        if (s.empty())
+            return {};
+
+        std::vector<std::string> result;
+        const size_t lastIndex = s.length() - 1;
+        size_t startIndex = 0;
+        size_t i = startIndex;
+
+        while (i <= lastIndex) {
+            if (s[i] == delim) {
+                auto token = s.substr(startIndex, i - startIndex);
+                if (!token.empty())  // Discard empty tokens
+                    result.push_back(token);
+                startIndex = i + 1;
+            } else if (i == lastIndex) {
+                auto token = s.substr(startIndex, i - startIndex + 1);
+                if (!token.empty())  // Discard empty tokens
+                    result.push_back(token);
+            }
+            ++i;
+        }
+        return result;
+    }
+}  // namespace
+
+#define FOR_EACH_LIMIT(X)                        \
+    X(maxTextureDimension1D)                     \
+    X(maxTextureDimension2D)                     \
+    X(maxTextureDimension3D)                     \
+    X(maxTextureArrayLayers)                     \
+    X(maxBindGroups)                             \
+    X(maxDynamicUniformBuffersPerPipelineLayout) \
+    X(maxDynamicStorageBuffersPerPipelineLayout) \
+    X(maxSampledTexturesPerShaderStage)          \
+    X(maxSamplersPerShaderStage)                 \
+    X(maxStorageBuffersPerShaderStage)           \
+    X(maxStorageTexturesPerShaderStage)          \
+    X(maxUniformBuffersPerShaderStage)           \
+    X(maxUniformBufferBindingSize)               \
+    X(maxStorageBufferBindingSize)               \
+    X(minUniformBufferOffsetAlignment)           \
+    X(minStorageBufferOffsetAlignment)           \
+    X(maxVertexBuffers)                          \
+    X(maxVertexAttributes)                       \
+    X(maxVertexBufferArrayStride)                \
+    X(maxInterStageShaderComponents)             \
+    X(maxComputeWorkgroupStorageSize)            \
+    X(maxComputeInvocationsPerWorkgroup)         \
+    X(maxComputeWorkgroupSizeX)                  \
+    X(maxComputeWorkgroupSizeY)                  \
+    X(maxComputeWorkgroupSizeZ)                  \
+    X(maxComputeWorkgroupsPerDimension)
+
+namespace wgpu::binding {
+
+    namespace {
+
+        ////////////////////////////////////////////////////////////////////////////////
+        // wgpu::binding::<anon>::Features
+        // Implements interop::GPUSupportedFeatures
+        ////////////////////////////////////////////////////////////////////////////////
+        class Features : public interop::GPUSupportedFeatures {
+          public:
+            Features(WGPUDeviceProperties properties) {
+                if (properties.depth24UnormStencil8) {
+                    enabled_.emplace(interop::GPUFeatureName::kDepth24UnormStencil8);
+                }
+                if (properties.depth32FloatStencil8) {
+                    enabled_.emplace(interop::GPUFeatureName::kDepth32FloatStencil8);
+                }
+                if (properties.timestampQuery) {
+                    enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+                }
+                if (properties.textureCompressionBC) {
+                    enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
+                }
+                if (properties.textureCompressionETC2) {
+                    enabled_.emplace(interop::GPUFeatureName::kTextureCompressionEtc2);
+                }
+                if (properties.textureCompressionASTC) {
+                    enabled_.emplace(interop::GPUFeatureName::kTextureCompressionAstc);
+                }
+                if (properties.timestampQuery) {
+                    enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+                }
+
+                // TODO(dawn:1123) add support for these extensions when possible.
+                // wgpu::interop::GPUFeatureName::kIndirectFirstInstance
+                // wgpu::interop::GPUFeatureName::kDepthClipControl
+            }
+
+            bool has(interop::GPUFeatureName feature) {
+                return enabled_.count(feature) != 0;
+            }
+
+            // interop::GPUSupportedFeatures compliance
+            bool has(Napi::Env, std::string name) override {
+                interop::GPUFeatureName feature;
+                if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
+                    return has(feature);
+                }
+                return false;
+            }
+            std::vector<std::string> keys(Napi::Env) override {
+                std::vector<std::string> out;
+                out.reserve(enabled_.size());
+                for (auto feature : enabled_) {
+                    out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
+                }
+                return out;
+            }
+
+          private:
+            std::unordered_set<interop::GPUFeatureName> enabled_;
+        };
+
+    }  // namespace
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUAdapter
+    // TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUAdapter::GPUAdapter(dawn::native::Adapter a, const Flags& flags)
+        : adapter_(a), flags_(flags) {
+    }
+
+    std::string GPUAdapter::getName(Napi::Env) {
+        return "dawn-adapter";
+    }
+
+    interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
+        return interop::GPUSupportedFeatures::Create<Features>(env,
+                                                               adapter_.GetAdapterProperties());
+    }
+
+    interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
+        WGPUSupportedLimits limits{};
+        if (!adapter_.GetLimits(&limits)) {
+            Napi::Error::New(env, "failed to get adapter limits").ThrowAsJavaScriptException();
+        }
+
+        wgpu::SupportedLimits wgpuLimits{};
+
+#define COPY_LIMIT(LIMIT) wgpuLimits.limits.LIMIT = limits.limits.LIMIT;
+        FOR_EACH_LIMIT(COPY_LIMIT)
+#undef COPY_LIMIT
+
+        return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, wgpuLimits);
+    }
+
+    bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
+        Napi::Env env,
+        interop::GPUDeviceDescriptor descriptor) {
+        wgpu::DeviceDescriptor desc{};  // TODO(crbug.com/dawn/1133): Fill in.
+        interop::Promise<interop::Interface<interop::GPUDevice>> promise(env, PROMISE_INFO);
+
+        std::vector<wgpu::FeatureName> requiredFeatures;
+        // See src/dawn/native/Features.cpp for enum <-> string mappings.
+        for (auto required : descriptor.requiredFeatures) {
+            switch (required) {
+                case interop::GPUFeatureName::kTextureCompressionBc:
+                    requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionBC);
+                    continue;
+                case interop::GPUFeatureName::kTextureCompressionEtc2:
+                    requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionETC2);
+                    continue;
+                case interop::GPUFeatureName::kTextureCompressionAstc:
+                    requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionASTC);
+                    continue;
+                case interop::GPUFeatureName::kTimestampQuery:
+                    requiredFeatures.emplace_back(wgpu::FeatureName::TimestampQuery);
+                    continue;
+                case interop::GPUFeatureName::kDepth24UnormStencil8:
+                    requiredFeatures.emplace_back(wgpu::FeatureName::Depth24UnormStencil8);
+                    continue;
+                case interop::GPUFeatureName::kDepth32FloatStencil8:
+                    requiredFeatures.emplace_back(wgpu::FeatureName::Depth32FloatStencil8);
+                    continue;
+                case interop::GPUFeatureName::kDepthClipControl:
+                case interop::GPUFeatureName::kIndirectFirstInstance:
+                    // TODO(dawn:1123) Add support for these extensions when possible.
+                    continue;
+            }
+            UNIMPLEMENTED("required: ", required);
+        }
+
+        wgpu::RequiredLimits limits;
+#define COPY_LIMIT(LIMIT)                                        \
+    if (descriptor.requiredLimits.count(#LIMIT)) {               \
+        limits.limits.LIMIT = descriptor.requiredLimits[#LIMIT]; \
+        descriptor.requiredLimits.erase(#LIMIT);                 \
+    }
+        FOR_EACH_LIMIT(COPY_LIMIT)
+#undef COPY_LIMIT
+
+        for (auto [key, _] : descriptor.requiredLimits) {
+            promise.Reject(binding::Errors::OperationError(env, "Unknown limit \"" + key + "\""));
+            return promise;
+        }
+
+        // Propogate enabled/disabled dawn features
+        // Note: DawnDeviceTogglesDescriptor::forceEnabledToggles and forceDisabledToggles are
+        // vectors of 'const char*', so we make sure the parsed strings survive the CreateDevice()
+        // call by storing them on the stack.
+        std::vector<std::string> enabledToggles;
+        std::vector<std::string> disabledToggles;
+        std::vector<const char*> forceEnabledToggles;
+        std::vector<const char*> forceDisabledToggles;
+        if (auto values = flags_.Get("enable-dawn-features")) {
+            enabledToggles = Split(*values, ',');
+            for (auto& t : enabledToggles) {
+                forceEnabledToggles.emplace_back(t.c_str());
+            }
+        }
+        if (auto values = flags_.Get("disable-dawn-features")) {
+            disabledToggles = Split(*values, ',');
+            for (auto& t : disabledToggles) {
+                forceDisabledToggles.emplace_back(t.c_str());
+            }
+        }
+
+        desc.requiredFeaturesCount = requiredFeatures.size();
+        desc.requiredFeatures = requiredFeatures.data();
+        desc.requiredLimits = &limits;
+
+        DawnTogglesDeviceDescriptor togglesDesc = {};
+        desc.nextInChain = &togglesDesc;
+        togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
+        togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
+        togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
+        togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
+
+        auto wgpu_device = adapter_.CreateDevice(&desc);
+        if (wgpu_device) {
+            promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
+        } else {
+            promise.Reject(binding::Errors::OperationError(env, "failed to create device"));
+        }
+        return promise;
+    }
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUAdapter.h b/src/dawn/node/binding/GPUAdapter.h
new file mode 100644
index 0000000..de03234
--- /dev/null
+++ b/src/dawn/node/binding/GPUAdapter.h
@@ -0,0 +1,47 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUADAPTER_H_
+#define DAWN_NODE_BINDING_GPUADAPTER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+    class Flags;
+
+    // GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn::native::Adapter.
+    class GPUAdapter final : public interop::GPUAdapter {
+      public:
+        GPUAdapter(dawn::native::Adapter a, const Flags& flags);
+
+        // interop::GPUAdapter interface compliance
+        std::string getName(Napi::Env) override;
+        interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+        interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+        bool getIsFallbackAdapter(Napi::Env) override;
+        interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
+            Napi::Env env,
+            interop::GPUDeviceDescriptor descriptor) override;
+
+      private:
+        dawn::native::Adapter adapter_;
+        const Flags& flags_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUADAPTER_H_
diff --git a/src/dawn/node/binding/GPUBindGroup.cpp b/src/dawn/node/binding/GPUBindGroup.cpp
new file mode 100644
index 0000000..1170cef
--- /dev/null
+++ b/src/dawn/node/binding/GPUBindGroup.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUBindGroup.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUBindGroup
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUBindGroup::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUBindGroup::setLabel(Napi::Env,
+                                std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUBindGroup.h b/src/dawn/node/binding/GPUBindGroup.h
new file mode 100644
index 0000000..54b005a
--- /dev/null
+++ b/src/dawn/node/binding/GPUBindGroup.h
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBINDGROUP_H_
+#define DAWN_NODE_BINDING_GPUBINDGROUP_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
+    class GPUBindGroup final : public interop::GPUBindGroup {
+      public:
+        GPUBindGroup(wgpu::BindGroup group);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::BindGroup&() const {
+            return group_;
+        }
+
+        // interop::GPUBindGroup interface compliance
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+
+      private:
+        wgpu::BindGroup group_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUBINDGROUP_H_
diff --git a/src/dawn/node/binding/GPUBindGroupLayout.cpp b/src/dawn/node/binding/GPUBindGroupLayout.cpp
new file mode 100644
index 0000000..5642150
--- /dev/null
+++ b/src/dawn/node/binding/GPUBindGroupLayout.cpp
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUBindGroupLayout
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout)
+        : layout_(std::move(layout)) {
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUBindGroupLayout::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUBindGroupLayout::setLabel(Napi::Env,
+                                      std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUBindGroupLayout.h b/src/dawn/node/binding/GPUBindGroupLayout.h
new file mode 100644
index 0000000..0382c47
--- /dev/null
+++ b/src/dawn/node/binding/GPUBindGroupLayout.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
+#define DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
+    // wgpu::BindGroupLayout.
+    class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
+      public:
+        GPUBindGroupLayout(wgpu::BindGroupLayout layout);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::BindGroupLayout&() const {
+            return layout_;
+        }
+
+        // interop::GPUBindGroupLayout interface compliance
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::BindGroupLayout layout_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
diff --git a/src/dawn/node/binding/GPUBuffer.cpp b/src/dawn/node/binding/GPUBuffer.cpp
new file mode 100644
index 0000000..ebafaa5e
--- /dev/null
+++ b/src/dawn/node/binding/GPUBuffer.cpp
@@ -0,0 +1,169 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUBuffer.h"
+
+#include <memory>
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUBuffer
+    // TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
+    // robustly passing, pull out validation and see what / if breaks.
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
+                         wgpu::BufferDescriptor desc,
+                         wgpu::Device device,
+                         std::shared_ptr<AsyncRunner> async)
+        : buffer_(std::move(buffer)),
+          desc_(desc),
+          device_(std::move(device)),
+          async_(std::move(async)) {
+        if (desc.mappedAtCreation) {
+            state_ = State::MappedAtCreation;
+        }
+    }
+
+    interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
+                                               interop::GPUMapModeFlags mode,
+                                               interop::GPUSize64 offset,
+                                               std::optional<interop::GPUSize64> size) {
+        wgpu::MapMode md{};
+        Converter conv(env);
+        if (!conv(md, mode)) {
+            interop::Promise<void> promise(env, PROMISE_INFO);
+            promise.Reject(Errors::OperationError(env));
+            return promise;
+        }
+
+        if (state_ != State::Unmapped) {
+            interop::Promise<void> promise(env, PROMISE_INFO);
+            promise.Reject(Errors::OperationError(env));
+            device_.InjectError(wgpu::ErrorType::Validation,
+                                "mapAsync called on buffer that is not in the unmapped state");
+            return promise;
+        }
+
+        struct Context {
+            Napi::Env env;
+            interop::Promise<void> promise;
+            AsyncTask task;
+            State& state;
+        };
+        auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_, state_};
+        auto promise = ctx->promise;
+
+        uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
+
+        state_ = State::MappingPending;
+
+        buffer_.MapAsync(
+            md, offset, s,
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+                c->state = State::Unmapped;
+                switch (status) {
+                    case WGPUBufferMapAsyncStatus_Force32:
+                        UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
+                        break;
+                    case WGPUBufferMapAsyncStatus_Success:
+                        c->promise.Resolve();
+                        c->state = State::Mapped;
+                        break;
+                    case WGPUBufferMapAsyncStatus_Error:
+                        c->promise.Reject(Errors::OperationError(c->env));
+                        break;
+                    case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
+                    case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
+                        c->promise.Reject(Errors::AbortError(c->env));
+                        break;
+                    case WGPUBufferMapAsyncStatus_Unknown:
+                    case WGPUBufferMapAsyncStatus_DeviceLost:
+                        // TODO: The spec is a bit vague around what the promise should do
+                        // here.
+                        c->promise.Reject(Errors::UnknownError(c->env));
+                        break;
+                }
+            },
+            ctx);
+
+        return promise;
+    }
+
+    interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
+                                                   interop::GPUSize64 offset,
+                                                   std::optional<interop::GPUSize64> size) {
+        if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
+            Errors::OperationError(env).ThrowAsJavaScriptException();
+            return {};
+        }
+
+        uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
+
+        uint64_t start = offset;
+        uint64_t end = offset + s;
+        for (auto& mapping : mapped_) {
+            if (mapping.Intersects(start, end)) {
+                Errors::OperationError(env).ThrowAsJavaScriptException();
+                return {};
+            }
+        }
+
+        auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
+                        ? buffer_.GetMappedRange(offset, s)
+                        : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
+        if (!ptr) {
+            Errors::OperationError(env).ThrowAsJavaScriptException();
+            return {};
+        }
+        auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
+        // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
+        mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
+        return array_buffer;
+    }
+
+    void GPUBuffer::unmap(Napi::Env env) {
+        if (state_ == State::Destroyed) {
+            device_.InjectError(wgpu::ErrorType::Validation,
+                                "unmap() called on a destroyed buffer");
+            return;
+        }
+
+        for (auto& mapping : mapped_) {
+            mapping.buffer.Value().Detach();
+        }
+        mapped_.clear();
+        buffer_.Unmap();
+        state_ = State::Unmapped;
+    }
+
+    void GPUBuffer::destroy(Napi::Env) {
+        buffer_.Destroy();
+        state_ = State::Destroyed;
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUBuffer::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUBuffer::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUBuffer.h b/src/dawn/node/binding/GPUBuffer.h
new file mode 100644
index 0000000..994bd1c
--- /dev/null
+++ b/src/dawn/node/binding/GPUBuffer.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBUFFER_H_
+#define DAWN_NODE_BINDING_GPUBUFFER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
+    class GPUBuffer final : public interop::GPUBuffer {
+      public:
+        GPUBuffer(wgpu::Buffer buffer,
+                  wgpu::BufferDescriptor desc,
+                  wgpu::Device device,
+                  std::shared_ptr<AsyncRunner> async);
+
+        // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
+        const wgpu::BufferDescriptor& Desc() const {
+            return desc_;
+        }
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::Buffer&() const {
+            return buffer_;
+        }
+
+        // interop::GPUBuffer interface compliance
+        interop::Promise<void> mapAsync(Napi::Env env,
+                                        interop::GPUMapModeFlags mode,
+                                        interop::GPUSize64 offset,
+                                        std::optional<interop::GPUSize64> size) override;
+        interop::ArrayBuffer getMappedRange(Napi::Env env,
+                                            interop::GPUSize64 offset,
+                                            std::optional<interop::GPUSize64> size) override;
+        void unmap(Napi::Env) override;
+        void destroy(Napi::Env) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        struct Mapping {
+            uint64_t start;
+            uint64_t end;
+            inline bool Intersects(uint64_t s, uint64_t e) const {
+                return s < end && e > start;
+            }
+            Napi::Reference<interop::ArrayBuffer> buffer;
+        };
+
+        // https://www.w3.org/TR/webgpu/#buffer-interface
+        enum class State {
+            Unmapped,
+            Mapped,
+            MappedAtCreation,
+            MappingPending,
+            Destroyed,
+        };
+
+        wgpu::Buffer buffer_;
+        wgpu::BufferDescriptor const desc_;
+        wgpu::Device const device_;
+        std::shared_ptr<AsyncRunner> async_;
+        State state_ = State::Unmapped;
+        std::vector<Mapping> mapped_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUBUFFER_H_
diff --git a/src/dawn/node/binding/GPUCommandBuffer.cpp b/src/dawn/node/binding/GPUCommandBuffer.cpp
new file mode 100644
index 0000000..ac86c1d
--- /dev/null
+++ b/src/dawn/node/binding/GPUCommandBuffer.cpp
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUCommandBuffer
+    ////////////////////////////////////////////////////////////////////////////////
+
+    GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUCommandBuffer::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUCommandBuffer::setLabel(Napi::Env,
+                                    std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUCommandBuffer.h b/src/dawn/node/binding/GPUCommandBuffer.h
new file mode 100644
index 0000000..34604dc
--- /dev/null
+++ b/src/dawn/node/binding/GPUCommandBuffer.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
+#define DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
+    // wgpu::CommandBuffer.
+    class GPUCommandBuffer final : public interop::GPUCommandBuffer {
+      public:
+        GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::CommandBuffer&() const {
+            return cmd_buf_;
+        }
+
+        // interop::GPUCommandBuffer interface compliance
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::CommandBuffer cmd_buf_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
diff --git a/src/dawn/node/binding/GPUCommandEncoder.cpp b/src/dawn/node/binding/GPUCommandEncoder.cpp
new file mode 100644
index 0000000..b707966
--- /dev/null
+++ b/src/dawn/node/binding/GPUCommandEncoder.cpp
@@ -0,0 +1,216 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUCommandEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPU.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+#include "src/dawn/node/binding/GPUComputePassEncoder.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/binding/GPURenderPassEncoder.h"
+#include "src/dawn/node/binding/GPUTexture.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUCommandEncoder
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {
+    }
+
+    interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
+        Napi::Env env,
+        interop::GPURenderPassDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::RenderPassDescriptor desc{};
+        // TODO(dawn:1250) handle timestampWrites
+        if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
+            !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
+            !conv(desc.label, descriptor.label) ||
+            !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
+            return {};
+        }
+
+        return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(
+            env, enc_.BeginRenderPass(&desc));
+    }
+
+    interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
+        Napi::Env env,
+        interop::GPUComputePassDescriptor descriptor) {
+        wgpu::ComputePassDescriptor desc{};
+        // TODO(dawn:1250) handle timestampWrites
+        return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
+            env, enc_.BeginComputePass(&desc));
+    }
+
+    void GPUCommandEncoder::clearBuffer(Napi::Env env,
+                                        interop::Interface<interop::GPUBuffer> buffer,
+                                        interop::GPUSize64 offset,
+                                        std::optional<interop::GPUSize64> size) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        uint64_t s = wgpu::kWholeSize;
+        if (!conv(b, buffer) ||  //
+            !conv(s, size)) {
+            return;
+        }
+
+        enc_.ClearBuffer(b, offset, s);
+    }
+
+    void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
+                                               interop::Interface<interop::GPUBuffer> source,
+                                               interop::GPUSize64 sourceOffset,
+                                               interop::Interface<interop::GPUBuffer> destination,
+                                               interop::GPUSize64 destinationOffset,
+                                               interop::GPUSize64 size) {
+        Converter conv(env);
+
+        wgpu::Buffer src{};
+        wgpu::Buffer dst{};
+        if (!conv(src, source) ||  //
+            !conv(dst, destination)) {
+            return;
+        }
+
+        enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
+    }
+
+    void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
+                                                interop::GPUImageCopyBuffer source,
+                                                interop::GPUImageCopyTexture destination,
+                                                interop::GPUExtent3D copySize) {
+        Converter conv(env);
+
+        wgpu::ImageCopyBuffer src{};
+        wgpu::ImageCopyTexture dst{};
+        wgpu::Extent3D size{};
+        if (!conv(src, source) ||       //
+            !conv(dst, destination) ||  //
+            !conv(size, copySize)) {
+            return;
+        }
+
+        enc_.CopyBufferToTexture(&src, &dst, &size);
+    }
+
+    void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
+                                                interop::GPUImageCopyTexture source,
+                                                interop::GPUImageCopyBuffer destination,
+                                                interop::GPUExtent3D copySize) {
+        Converter conv(env);
+
+        wgpu::ImageCopyTexture src{};
+        wgpu::ImageCopyBuffer dst{};
+        wgpu::Extent3D size{};
+        if (!conv(src, source) ||       //
+            !conv(dst, destination) ||  //
+            !conv(size, copySize)) {
+            return;
+        }
+
+        enc_.CopyTextureToBuffer(&src, &dst, &size);
+    }
+
+    void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
+                                                 interop::GPUImageCopyTexture source,
+                                                 interop::GPUImageCopyTexture destination,
+                                                 interop::GPUExtent3D copySize) {
+        Converter conv(env);
+
+        wgpu::ImageCopyTexture src{};
+        wgpu::ImageCopyTexture dst{};
+        wgpu::Extent3D size{};
+        if (!conv(src, source) ||       //
+            !conv(dst, destination) ||  //
+            !conv(size, copySize)) {
+            return;
+        }
+
+        enc_.CopyTextureToTexture(&src, &dst, &size);
+    }
+
+    void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+        enc_.PushDebugGroup(groupLabel.c_str());
+    }
+
+    void GPUCommandEncoder::popDebugGroup(Napi::Env) {
+        enc_.PopDebugGroup();
+    }
+
+    void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+        enc_.InsertDebugMarker(markerLabel.c_str());
+    }
+
+    void GPUCommandEncoder::writeTimestamp(Napi::Env env,
+                                           interop::Interface<interop::GPUQuerySet> querySet,
+                                           interop::GPUSize32 queryIndex) {
+        Converter conv(env);
+
+        wgpu::QuerySet q{};
+        if (!conv(q, querySet)) {
+            return;
+        }
+
+        enc_.WriteTimestamp(q, queryIndex);
+    }
+
+    void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
+                                            interop::Interface<interop::GPUQuerySet> querySet,
+                                            interop::GPUSize32 firstQuery,
+                                            interop::GPUSize32 queryCount,
+                                            interop::Interface<interop::GPUBuffer> destination,
+                                            interop::GPUSize64 destinationOffset) {
+        Converter conv(env);
+
+        wgpu::QuerySet q{};
+        uint32_t f = 0;
+        uint32_t c = 0;
+        wgpu::Buffer b{};
+        uint64_t o = 0;
+
+        if (!conv(q, querySet) ||     //
+            !conv(f, firstQuery) ||   //
+            !conv(c, queryCount) ||   //
+            !conv(b, destination) ||  //
+            !conv(o, destinationOffset)) {
+            return;
+        }
+
+        enc_.ResolveQuerySet(q, f, c, b, o);
+    }
+
+    interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
+        Napi::Env env,
+        interop::GPUCommandBufferDescriptor descriptor) {
+        wgpu::CommandBufferDescriptor desc{};
+        return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUCommandEncoder::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUCommandEncoder::setLabel(Napi::Env,
+                                     std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUCommandEncoder.h b/src/dawn/node/binding/GPUCommandEncoder.h
new file mode 100644
index 0000000..3b3179a
--- /dev/null
+++ b/src/dawn/node/binding/GPUCommandEncoder.h
@@ -0,0 +1,84 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
+#define DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
+    // wgpu::CommandEncoder.
+    class GPUCommandEncoder final : public interop::GPUCommandEncoder {
+      public:
+        GPUCommandEncoder(wgpu::CommandEncoder enc);
+
+        // interop::GPUCommandEncoder interface compliance
+        interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
+            Napi::Env,
+            interop::GPURenderPassDescriptor descriptor) override;
+        interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
+            Napi::Env,
+            interop::GPUComputePassDescriptor descriptor) override;
+        void clearBuffer(Napi::Env,
+                         interop::Interface<interop::GPUBuffer> buffer,
+                         interop::GPUSize64 offset,
+                         std::optional<interop::GPUSize64> size) override;
+        void copyBufferToBuffer(Napi::Env,
+                                interop::Interface<interop::GPUBuffer> source,
+                                interop::GPUSize64 sourceOffset,
+                                interop::Interface<interop::GPUBuffer> destination,
+                                interop::GPUSize64 destinationOffset,
+                                interop::GPUSize64 size) override;
+        void copyBufferToTexture(Napi::Env,
+                                 interop::GPUImageCopyBuffer source,
+                                 interop::GPUImageCopyTexture destination,
+                                 interop::GPUExtent3D copySize) override;
+        void copyTextureToBuffer(Napi::Env,
+                                 interop::GPUImageCopyTexture source,
+                                 interop::GPUImageCopyBuffer destination,
+                                 interop::GPUExtent3D copySize) override;
+        void copyTextureToTexture(Napi::Env,
+                                  interop::GPUImageCopyTexture source,
+                                  interop::GPUImageCopyTexture destination,
+                                  interop::GPUExtent3D copySize) override;
+        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+        void popDebugGroup(Napi::Env) override;
+        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+        void writeTimestamp(Napi::Env,
+                            interop::Interface<interop::GPUQuerySet> querySet,
+                            interop::GPUSize32 queryIndex) override;
+        void resolveQuerySet(Napi::Env,
+                             interop::Interface<interop::GPUQuerySet> querySet,
+                             interop::GPUSize32 firstQuery,
+                             interop::GPUSize32 queryCount,
+                             interop::Interface<interop::GPUBuffer> destination,
+                             interop::GPUSize64 destinationOffset) override;
+        interop::Interface<interop::GPUCommandBuffer> finish(
+            Napi::Env env,
+            interop::GPUCommandBufferDescriptor descriptor) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::CommandEncoder enc_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
diff --git a/src/dawn/node/binding/GPUComputePassEncoder.cpp b/src/dawn/node/binding/GPUComputePassEncoder.cpp
new file mode 100644
index 0000000..b08518e
--- /dev/null
+++ b/src/dawn/node/binding/GPUComputePassEncoder.cpp
@@ -0,0 +1,128 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUComputePassEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUComputePipeline.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUComputePassEncoder
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc)
+        : enc_(std::move(enc)) {
+    }
+
+    void GPUComputePassEncoder::setPipeline(
+        Napi::Env,
+        interop::Interface<interop::GPUComputePipeline> pipeline) {
+        enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
+    }
+
+    void GPUComputePassEncoder::dispatch(Napi::Env,
+                                         interop::GPUSize32 workgroupCountX,
+                                         interop::GPUSize32 workgroupCountY,
+                                         interop::GPUSize32 workgroupCountZ) {
+        enc_.Dispatch(workgroupCountX, workgroupCountY, workgroupCountZ);
+    }
+
+    void GPUComputePassEncoder::dispatchIndirect(
+        Napi::Env,
+        interop::Interface<interop::GPUBuffer> indirectBuffer,
+        interop::GPUSize64 indirectOffset) {
+        enc_.DispatchIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
+    }
+
+    void GPUComputePassEncoder::end(Napi::Env) {
+        enc_.End();
+    }
+
+    void GPUComputePassEncoder::setBindGroup(
+        Napi::Env env,
+        interop::GPUIndex32 index,
+        interop::Interface<interop::GPUBindGroup> bindGroup,
+        std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+        Converter conv(env);
+
+        wgpu::BindGroup bg{};
+        uint32_t* offsets = nullptr;
+        uint32_t num_offsets = 0;
+        if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+            return;
+        }
+
+        enc_.SetBindGroup(index, bg, num_offsets, offsets);
+    }
+
+    void GPUComputePassEncoder::setBindGroup(Napi::Env env,
+                                             interop::GPUIndex32 index,
+                                             interop::Interface<interop::GPUBindGroup> bindGroup,
+                                             interop::Uint32Array dynamicOffsetsData,
+                                             interop::GPUSize64 dynamicOffsetsDataStart,
+                                             interop::GPUSize32 dynamicOffsetsDataLength) {
+        Converter conv(env);
+
+        wgpu::BindGroup bg{};
+        if (!conv(bg, bindGroup)) {
+            return;
+        }
+
+        if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
+            Napi::RangeError::New(env,
+                                  "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
+                .ThrowAsJavaScriptException();
+            return;
+        }
+
+        if (dynamicOffsetsDataLength >
+            dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
+            Napi::RangeError::New(env,
+                                  "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
+                                  "bound of dynamicOffsetData")
+                .ThrowAsJavaScriptException();
+            return;
+        }
+
+        enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+                          dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+    }
+
+    void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+        enc_.PushDebugGroup(groupLabel.c_str());
+    }
+
+    void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
+        enc_.PopDebugGroup();
+    }
+
+    void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+        enc_.InsertDebugMarker(markerLabel.c_str());
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUComputePassEncoder::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUComputePassEncoder::setLabel(Napi::Env,
+                                         std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUComputePassEncoder.h b/src/dawn/node/binding/GPUComputePassEncoder.h
new file mode 100644
index 0000000..8b325cb
--- /dev/null
+++ b/src/dawn/node/binding/GPUComputePassEncoder.h
@@ -0,0 +1,69 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
+#define DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
+    // wgpu::ComputePassEncoder.
+    class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
+      public:
+        GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::ComputePassEncoder&() const {
+            return enc_;
+        }
+
+        // interop::GPUComputePassEncoder interface compliance
+        void setPipeline(Napi::Env,
+                         interop::Interface<interop::GPUComputePipeline> pipeline) override;
+        void dispatch(Napi::Env,
+                      interop::GPUSize32 workgroupCountX,
+                      interop::GPUSize32 workgroupCountY,
+                      interop::GPUSize32 workgroupCountZ) override;
+        void dispatchIndirect(Napi::Env,
+                              interop::Interface<interop::GPUBuffer> indirectBuffer,
+                              interop::GPUSize64 indirectOffset) override;
+        void end(Napi::Env) override;
+        void setBindGroup(Napi::Env,
+                          interop::GPUIndex32 index,
+                          interop::Interface<interop::GPUBindGroup> bindGroup,
+                          std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+        void setBindGroup(Napi::Env,
+                          interop::GPUIndex32 index,
+                          interop::Interface<interop::GPUBindGroup> bindGroup,
+                          interop::Uint32Array dynamicOffsetsData,
+                          interop::GPUSize64 dynamicOffsetsDataStart,
+                          interop::GPUSize32 dynamicOffsetsDataLength) override;
+        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+        void popDebugGroup(Napi::Env) override;
+        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+
+      private:
+        wgpu::ComputePassEncoder enc_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
diff --git a/src/dawn/node/binding/GPUComputePipeline.cpp b/src/dawn/node/binding/GPUComputePipeline.cpp
new file mode 100644
index 0000000..9ae0924
--- /dev/null
+++ b/src/dawn/node/binding/GPUComputePipeline.cpp
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUComputePipeline.h"
+
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUComputePipeline
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
+        : pipeline_(std::move(pipeline)) {
+    }
+
+    interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
+        Napi::Env env,
+        uint32_t index) {
+        return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+            env, pipeline_.GetBindGroupLayout(index));
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUComputePipeline::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUComputePipeline::setLabel(Napi::Env,
+                                      std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUComputePipeline.h b/src/dawn/node/binding/GPUComputePipeline.h
new file mode 100644
index 0000000..0410db0
--- /dev/null
+++ b/src/dawn/node/binding/GPUComputePipeline.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUPIPELINE_H_
+#define DAWN_NODE_BINDING_GPUPIPELINE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
+    // wgpu::ComputePipeline.
+    class GPUComputePipeline final : public interop::GPUComputePipeline {
+      public:
+        GPUComputePipeline(wgpu::ComputePipeline pipeline);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::ComputePipeline&() const {
+            return pipeline_;
+        }
+
+        // interop::GPUComputePipeline interface compliance
+        interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+                                                                           uint32_t index) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::ComputePipeline pipeline_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUPIPELINE_H_
diff --git a/src/dawn/node/binding/GPUDevice.cpp b/src/dawn/node/binding/GPUDevice.cpp
new file mode 100644
index 0000000..8175c31
--- /dev/null
+++ b/src/dawn/node/binding/GPUDevice.cpp
@@ -0,0 +1,530 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUDevice.h"
+
+#include <memory>
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+#include "src/dawn/node/binding/GPUCommandEncoder.h"
+#include "src/dawn/node/binding/GPUComputePipeline.h"
+#include "src/dawn/node/binding/GPUPipelineLayout.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/binding/GPUQueue.h"
+#include "src/dawn/node/binding/GPURenderBundleEncoder.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/binding/GPUSampler.h"
+#include "src/dawn/node/binding/GPUShaderModule.h"
+#include "src/dawn/node/binding/GPUSupportedLimits.h"
+#include "src/dawn/node/binding/GPUTexture.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    namespace {
+
+        class DeviceLostInfo : public interop::GPUDeviceLostInfo {
+          public:
+            DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
+                : reason_(reason), message_(message) {
+            }
+            std::variant<interop::GPUDeviceLostReason, interop::UndefinedType> getReason(
+                Napi::Env env) override {
+                return reason_;
+            }
+            std::string getMessage(Napi::Env) override {
+                return message_;
+            }
+
+          private:
+            interop::GPUDeviceLostReason reason_;
+            std::string message_;
+        };
+
+        class OOMError : public interop::GPUOutOfMemoryError {};
+        class ValidationError : public interop::GPUValidationError {
+          public:
+            ValidationError(std::string message) : message_(std::move(message)) {
+            }
+
+            std::string getMessage(Napi::Env) override {
+                return message_;
+            };
+
+          private:
+            std::string message_;
+        };
+
+    }  // namespace
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUDevice
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
+        : env_(env),
+          device_(device),
+          async_(std::make_shared<AsyncRunner>(env, device)),
+          lost_promise_(env, PROMISE_INFO) {
+        device_.SetLoggingCallback(
+            [](WGPULoggingType type, char const* message, void* userdata) {
+                std::cout << type << ": " << message << std::endl;
+            },
+            nullptr);
+        device_.SetUncapturedErrorCallback(
+            [](WGPUErrorType type, char const* message, void* userdata) {
+                std::cout << type << ": " << message << std::endl;
+            },
+            nullptr);
+
+        device_.SetDeviceLostCallback(
+            [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
+                auto r = interop::GPUDeviceLostReason::kDestroyed;
+                switch (reason) {
+                    case WGPUDeviceLostReason_Force32:
+                        UNREACHABLE("WGPUDeviceLostReason_Force32");
+                        break;
+                    case WGPUDeviceLostReason_Destroyed:
+                    case WGPUDeviceLostReason_Undefined:
+                        r = interop::GPUDeviceLostReason::kDestroyed;
+                        break;
+                }
+                auto* self = static_cast<GPUDevice*>(userdata);
+                if (self->lost_promise_.GetState() == interop::PromiseState::Pending) {
+                    self->lost_promise_.Resolve(
+                        interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
+                }
+            },
+            this);
+    }
+
+    GPUDevice::~GPUDevice() {
+    }
+
+    interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
+        class Features : public interop::GPUSupportedFeatures {
+          public:
+            bool has(Napi::Env, std::string feature) override {
+                UNIMPLEMENTED();
+            }
+            std::vector<std::string> keys(Napi::Env) override {
+                UNIMPLEMENTED();
+            }
+        };
+        return interop::GPUSupportedFeatures::Create<Features>(env);
+    }
+
+    interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
+        wgpu::SupportedLimits limits{};
+        if (!device_.GetLimits(&limits)) {
+            Napi::Error::New(env, "failed to get device limits").ThrowAsJavaScriptException();
+        }
+        return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, limits);
+    }
+
+    interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
+        return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
+    }
+
+    void GPUDevice::destroy(Napi::Env env) {
+        if (lost_promise_.GetState() == interop::PromiseState::Pending) {
+            lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
+                env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
+        }
+        device_.Destroy();
+    }
+
+    interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
+        Napi::Env env,
+        interop::GPUBufferDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::BufferDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) ||
+            !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
+            !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
+            return {};
+        }
+        return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc,
+                                                     device_, async_);
+    }
+
+    interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
+        Napi::Env env,
+        interop::GPUTextureDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::TextureDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) ||  //
+            !conv(desc.size, descriptor.size) ||                                           //
+            !conv(desc.dimension, descriptor.dimension) ||                                 //
+            !conv(desc.mipLevelCount, descriptor.mipLevelCount) ||                         //
+            !conv(desc.sampleCount, descriptor.sampleCount) ||                             //
+            !conv(desc.format, descriptor.format)) {
+            return {};
+        }
+        return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
+    }
+
+    interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
+        Napi::Env env,
+        interop::GPUSamplerDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::SamplerDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) ||                //
+            !conv(desc.addressModeU, descriptor.addressModeU) ||  //
+            !conv(desc.addressModeV, descriptor.addressModeV) ||  //
+            !conv(desc.addressModeW, descriptor.addressModeW) ||  //
+            !conv(desc.magFilter, descriptor.magFilter) ||        //
+            !conv(desc.minFilter, descriptor.minFilter) ||        //
+            !conv(desc.mipmapFilter, descriptor.mipmapFilter) ||  //
+            !conv(desc.lodMinClamp, descriptor.lodMinClamp) ||    //
+            !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) ||    //
+            !conv(desc.compare, descriptor.compare) ||            //
+            !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
+            return {};
+        }
+        return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
+    }
+
+    interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
+        Napi::Env,
+        interop::GPUExternalTextureDescriptor descriptor) {
+        UNIMPLEMENTED();
+    }
+
+    interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
+        Napi::Env env,
+        interop::GPUBindGroupLayoutDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::BindGroupLayoutDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) ||
+            !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+            return {};
+        }
+
+        return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+            env, device_.CreateBindGroupLayout(&desc));
+    }
+
+    interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
+        Napi::Env env,
+        interop::GPUPipelineLayoutDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::PipelineLayoutDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) ||
+            !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
+            return {};
+        }
+
+        return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
+            env, device_.CreatePipelineLayout(&desc));
+    }
+
+    interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
+        Napi::Env env,
+        interop::GPUBindGroupDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::BindGroupDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
+            !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+            return {};
+        }
+
+        return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
+    }
+
+    interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
+        Napi::Env env,
+        interop::GPUShaderModuleDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
+        wgpu::ShaderModuleDescriptor sm_desc{};
+        if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
+            return {};
+        }
+        sm_desc.nextInChain = &wgsl_desc;
+
+        return interop::GPUShaderModule::Create<GPUShaderModule>(
+            env, device_.CreateShaderModule(&sm_desc), async_);
+    }
+
+    interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
+        Napi::Env env,
+        interop::GPUComputePipelineDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::ComputePipelineDescriptor desc{};
+        if (!conv(desc, descriptor)) {
+            return {};
+        }
+
+        return interop::GPUComputePipeline::Create<GPUComputePipeline>(
+            env, device_.CreateComputePipeline(&desc));
+    }
+
+    interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
+        Napi::Env env,
+        interop::GPURenderPipelineDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::RenderPipelineDescriptor desc{};
+        if (!conv(desc, descriptor)) {
+            return {};
+        }
+
+        return interop::GPURenderPipeline::Create<GPURenderPipeline>(
+            env, device_.CreateRenderPipeline(&desc));
+    }
+
+    interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+    GPUDevice::createComputePipelineAsync(Napi::Env env,
+                                          interop::GPUComputePipelineDescriptor descriptor) {
+        using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
+
+        Converter conv(env);
+
+        wgpu::ComputePipelineDescriptor desc{};
+        if (!conv(desc, descriptor)) {
+            Promise promise(env, PROMISE_INFO);
+            promise.Reject(Errors::OperationError(env));
+            return promise;
+        }
+
+        struct Context {
+            Napi::Env env;
+            Promise promise;
+            AsyncTask task;
+        };
+        auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+        auto promise = ctx->promise;
+
+        device_.CreateComputePipelineAsync(
+            &desc,
+            [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
+               char const* message, void* userdata) {
+                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+                switch (status) {
+                    case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+                        c->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
+                            c->env, pipeline));
+                        break;
+                    default:
+                        c->promise.Reject(Errors::OperationError(c->env));
+                        break;
+                }
+            },
+            ctx);
+
+        return promise;
+    }
+
+    interop::Promise<interop::Interface<interop::GPURenderPipeline>>
+    GPUDevice::createRenderPipelineAsync(Napi::Env env,
+                                         interop::GPURenderPipelineDescriptor descriptor) {
+        using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
+
+        Converter conv(env);
+
+        wgpu::RenderPipelineDescriptor desc{};
+        if (!conv(desc, descriptor)) {
+            Promise promise(env, PROMISE_INFO);
+            promise.Reject(Errors::OperationError(env));
+            return promise;
+        }
+
+        struct Context {
+            Napi::Env env;
+            Promise promise;
+            AsyncTask task;
+        };
+        auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+        auto promise = ctx->promise;
+
+        device_.CreateRenderPipelineAsync(
+            &desc,
+            [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
+               char const* message, void* userdata) {
+                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+                switch (status) {
+                    case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+                        c->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
+                            c->env, pipeline));
+                        break;
+                    default:
+                        c->promise.Reject(Errors::OperationError(c->env));
+                        break;
+                }
+            },
+            ctx);
+
+        return promise;
+    }
+
+    interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
+        Napi::Env env,
+        interop::GPUCommandEncoderDescriptor descriptor) {
+        wgpu::CommandEncoderDescriptor desc{};
+        return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
+            env, device_.CreateCommandEncoder(&desc));
+    }
+
+    interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
+        Napi::Env env,
+        interop::GPURenderBundleEncoderDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::RenderBundleEncoderDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) ||
+            !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
+            !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
+            !conv(desc.sampleCount, descriptor.sampleCount) ||
+            !conv(desc.depthReadOnly, descriptor.depthReadOnly) ||
+            !conv(desc.stencilReadOnly, descriptor.stencilReadOnly)) {
+            return {};
+        }
+
+        return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
+            env, device_.CreateRenderBundleEncoder(&desc));
+    }
+
+    interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
+        Napi::Env env,
+        interop::GPUQuerySetDescriptor descriptor) {
+        Converter conv(env);
+
+        wgpu::QuerySetDescriptor desc{};
+        if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
+            !conv(desc.count, descriptor.count)) {
+            return {};
+        }
+
+        return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
+    }
+
+    interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(
+        Napi::Env env) {
+        return lost_promise_;
+    }
+
+    void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
+        wgpu::ErrorFilter f;
+        switch (filter) {
+            case interop::GPUErrorFilter::kOutOfMemory:
+                f = wgpu::ErrorFilter::OutOfMemory;
+                break;
+            case interop::GPUErrorFilter::kValidation:
+                f = wgpu::ErrorFilter::Validation;
+                break;
+            default:
+                Napi::Error::New(env, "unhandled GPUErrorFilter value")
+                    .ThrowAsJavaScriptException();
+                return;
+        }
+        device_.PushErrorScope(f);
+    }
+
+    interop::Promise<std::optional<interop::GPUError>> GPUDevice::popErrorScope(Napi::Env env) {
+        using Promise = interop::Promise<std::optional<interop::GPUError>>;
+        struct Context {
+            Napi::Env env;
+            Promise promise;
+            AsyncTask task;
+        };
+        auto* ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+        auto promise = ctx->promise;
+
+        device_.PopErrorScope(
+            [](WGPUErrorType type, char const* message, void* userdata) {
+                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+                auto env = c->env;
+                switch (type) {
+                    case WGPUErrorType::WGPUErrorType_NoError:
+                        c->promise.Resolve({});
+                        break;
+                    case WGPUErrorType::WGPUErrorType_OutOfMemory:
+                        c->promise.Resolve(interop::GPUOutOfMemoryError::Create<OOMError>(env));
+                        break;
+                    case WGPUErrorType::WGPUErrorType_Validation:
+                        c->promise.Resolve(
+                            interop::GPUValidationError::Create<ValidationError>(env, message));
+                        break;
+                    case WGPUErrorType::WGPUErrorType_Unknown:
+                    case WGPUErrorType::WGPUErrorType_DeviceLost:
+                        c->promise.Reject(Errors::OperationError(env, message));
+                        break;
+                    default:
+                        c->promise.Reject("unhandled error type");
+                        break;
+                }
+            },
+            ctx);
+
+        return promise;
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUDevice::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    };
+
+    void GPUDevice::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    };
+
+    interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
+        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+        UNIMPLEMENTED();
+    }
+
+    void GPUDevice::setOnuncapturederror(Napi::Env,
+                                         interop::Interface<interop::EventHandler> value) {
+        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+        UNIMPLEMENTED();
+    }
+
+    void GPUDevice::addEventListener(
+        Napi::Env,
+        std::string type,
+        std::optional<interop::Interface<interop::EventListener>> callback,
+        std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
+        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+        UNIMPLEMENTED();
+    }
+
+    void GPUDevice::removeEventListener(
+        Napi::Env,
+        std::string type,
+        std::optional<interop::Interface<interop::EventListener>> callback,
+        std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
+        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+        UNIMPLEMENTED();
+    }
+
+    bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
+        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUDevice.h b/src/dawn/node/binding/GPUDevice.h
new file mode 100644
index 0000000..e3215bd
--- /dev/null
+++ b/src/dawn/node/binding/GPUDevice.h
@@ -0,0 +1,115 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUDEVICE_H_
+#define DAWN_NODE_BINDING_GPUDEVICE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+    // GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
+    class GPUDevice final : public interop::GPUDevice {
+      public:
+        GPUDevice(Napi::Env env, wgpu::Device device);
+        ~GPUDevice();
+
+        // interop::GPUDevice interface compliance
+        interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+        interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+        interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
+        void destroy(Napi::Env) override;
+        interop::Interface<interop::GPUBuffer> createBuffer(
+            Napi::Env env,
+            interop::GPUBufferDescriptor descriptor) override;
+        interop::Interface<interop::GPUTexture> createTexture(
+            Napi::Env,
+            interop::GPUTextureDescriptor descriptor) override;
+        interop::Interface<interop::GPUSampler> createSampler(
+            Napi::Env,
+            interop::GPUSamplerDescriptor descriptor) override;
+        interop::Interface<interop::GPUExternalTexture> importExternalTexture(
+            Napi::Env,
+            interop::GPUExternalTextureDescriptor descriptor) override;
+        interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
+            Napi::Env,
+            interop::GPUBindGroupLayoutDescriptor descriptor) override;
+        interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
+            Napi::Env,
+            interop::GPUPipelineLayoutDescriptor descriptor) override;
+        interop::Interface<interop::GPUBindGroup> createBindGroup(
+            Napi::Env,
+            interop::GPUBindGroupDescriptor descriptor) override;
+        interop::Interface<interop::GPUShaderModule> createShaderModule(
+            Napi::Env,
+            interop::GPUShaderModuleDescriptor descriptor) override;
+        interop::Interface<interop::GPUComputePipeline> createComputePipeline(
+            Napi::Env,
+            interop::GPUComputePipelineDescriptor descriptor) override;
+        interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
+            Napi::Env,
+            interop::GPURenderPipelineDescriptor descriptor) override;
+        interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+        createComputePipelineAsync(Napi::Env env,
+                                   interop::GPUComputePipelineDescriptor descriptor) override;
+        interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
+            Napi::Env env,
+            interop::GPURenderPipelineDescriptor descriptor) override;
+        interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
+            Napi::Env env,
+            interop::GPUCommandEncoderDescriptor descriptor) override;
+        interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
+            Napi::Env,
+            interop::GPURenderBundleEncoderDescriptor descriptor) override;
+        interop::Interface<interop::GPUQuerySet> createQuerySet(
+            Napi::Env,
+            interop::GPUQuerySetDescriptor descriptor) override;
+        interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
+            Napi::Env env) override;
+        void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
+        interop::Promise<std::optional<interop::GPUError>> popErrorScope(Napi::Env env) override;
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+        interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
+        void setOnuncapturederror(Napi::Env,
+                                  interop::Interface<interop::EventHandler> value) override;
+        void addEventListener(
+            Napi::Env,
+            std::string type,
+            std::optional<interop::Interface<interop::EventListener>> callback,
+            std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
+        void removeEventListener(
+            Napi::Env,
+            std::string type,
+            std::optional<interop::Interface<interop::EventListener>> callback,
+            std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
+        bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
+
+      private:
+        void QueueTick();
+
+        Napi::Env env_;
+        wgpu::Device device_;
+        std::shared_ptr<AsyncRunner> async_;
+
+        // This promise's JS object lives as long as the device because it is stored in .lost
+        // of the wrapper JS object.
+        interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> lost_promise_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUDEVICE_H_
diff --git a/src/dawn/node/binding/GPUPipelineLayout.cpp b/src/dawn/node/binding/GPUPipelineLayout.cpp
new file mode 100644
index 0000000..4e0b5a9
--- /dev/null
+++ b/src/dawn/node/binding/GPUPipelineLayout.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUPipelineLayout.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUPipelineLayout
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUPipelineLayout::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUPipelineLayout::setLabel(Napi::Env,
+                                     std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUPipelineLayout.h b/src/dawn/node/binding/GPUPipelineLayout.h
new file mode 100644
index 0000000..a9c3bb6
--- /dev/null
+++ b/src/dawn/node/binding/GPUPipelineLayout.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
+#define DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
+    // wgpu::PipelineLayout.
+    class GPUPipelineLayout final : public interop::GPUPipelineLayout {
+      public:
+        GPUPipelineLayout(wgpu::PipelineLayout layout);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::PipelineLayout&() const {
+            return layout_;
+        }
+
+        // interop::GPUPipelineLayout interface compliance
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+
+      private:
+        wgpu::PipelineLayout layout_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
diff --git a/src/dawn/node/binding/GPUQuerySet.cpp b/src/dawn/node/binding/GPUQuerySet.cpp
new file mode 100644
index 0000000..e9f0e3f
--- /dev/null
+++ b/src/dawn/node/binding/GPUQuerySet.cpp
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUQuerySet.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUQuerySet
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {
+    }
+
+    void GPUQuerySet::destroy(Napi::Env) {
+        query_set_.Destroy();
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUQuerySet::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUQuerySet::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUQuerySet.h b/src/dawn/node/binding/GPUQuerySet.h
new file mode 100644
index 0000000..2ae99eb
--- /dev/null
+++ b/src/dawn/node/binding/GPUQuerySet.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUQUERYSET_H_
+#define DAWN_NODE_BINDING_GPUQUERYSET_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
+    class GPUQuerySet final : public interop::GPUQuerySet {
+      public:
+        GPUQuerySet(wgpu::QuerySet query_set);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::QuerySet&() const {
+            return query_set_;
+        }
+
+        // interop::GPUQuerySet interface compliance
+        void destroy(Napi::Env) override;
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+
+      private:
+        wgpu::QuerySet query_set_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUQUERYSET_H_
diff --git a/src/dawn/node/binding/GPUQueue.cpp b/src/dawn/node/binding/GPUQueue.cpp
new file mode 100644
index 0000000..0e3a0be
--- /dev/null
+++ b/src/dawn/node/binding/GPUQueue.cpp
@@ -0,0 +1,161 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUQueue.h"
+
+#include <cassert>
+#include <limits>
+#include <memory>
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUQueue
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
+        : queue_(std::move(queue)), async_(std::move(async)) {
+    }
+
+    void GPUQueue::submit(
+        Napi::Env env,
+        std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
+        std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
+        for (size_t i = 0; i < commandBuffers.size(); i++) {
+            bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
+        }
+        Converter conv(env);
+        uint32_t bufs_size;
+        if (!conv(bufs_size, bufs.size())) {
+            return;
+        }
+        queue_.Submit(bufs_size, bufs.data());
+    }
+
+    interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
+        struct Context {
+            Napi::Env env;
+            interop::Promise<void> promise;
+            AsyncTask task;
+        };
+        auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_};
+        auto promise = ctx->promise;
+
+        queue_.OnSubmittedWorkDone(
+            0,
+            [](WGPUQueueWorkDoneStatus status, void* userdata) {
+                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+                if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
+                    Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
+                        .ThrowAsJavaScriptException();
+                }
+                c->promise.Resolve();
+            },
+            ctx);
+
+        return promise;
+    }
+
+    void GPUQueue::writeBuffer(Napi::Env env,
+                               interop::Interface<interop::GPUBuffer> buffer,
+                               interop::GPUSize64 bufferOffset,
+                               interop::BufferSource data,
+                               interop::GPUSize64 dataOffsetElements,
+                               std::optional<interop::GPUSize64> sizeElements) {
+        wgpu::Buffer buf = *buffer.As<GPUBuffer>();
+        Converter::BufferSource src{};
+        Converter conv(env);
+        if (!conv(src, data)) {
+            return;
+        }
+
+        // Note that in the JS semantics of WebGPU, writeBuffer works in number of elements of the
+        // typed arrays.
+        if (dataOffsetElements > uint64_t(src.size / src.bytesPerElement)) {
+            binding::Errors::OperationError(env, "dataOffset is larger than data's size.")
+                .ThrowAsJavaScriptException();
+            return;
+        }
+        uint64_t dataOffset = dataOffsetElements * src.bytesPerElement;
+        src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
+        src.size -= dataOffset;
+
+        // Size defaults to dataSize - dataOffset. Instead of computing in elements, we directly
+        // use it in bytes, and convert the provided value, if any, in bytes.
+        uint64_t size64 = uint64_t(src.size);
+        if (sizeElements.has_value()) {
+            if (sizeElements.value() > std::numeric_limits<uint64_t>::max() / src.bytesPerElement) {
+                binding::Errors::OperationError(env, "size overflows.")
+                    .ThrowAsJavaScriptException();
+                return;
+            }
+            size64 = sizeElements.value() * src.bytesPerElement;
+        }
+
+        if (size64 > uint64_t(src.size)) {
+            binding::Errors::OperationError(env, "size + dataOffset is larger than data's size.")
+                .ThrowAsJavaScriptException();
+            return;
+        }
+
+        if (size64 % 4 != 0) {
+            binding::Errors::OperationError(env, "size is not a multiple of 4 bytes.")
+                .ThrowAsJavaScriptException();
+            return;
+        }
+
+        assert(size64 <= std::numeric_limits<size_t>::max());
+        queue_.WriteBuffer(buf, bufferOffset, src.data, static_cast<size_t>(size64));
+    }
+
+    void GPUQueue::writeTexture(Napi::Env env,
+                                interop::GPUImageCopyTexture destination,
+                                interop::BufferSource data,
+                                interop::GPUImageDataLayout dataLayout,
+                                interop::GPUExtent3D size) {
+        wgpu::ImageCopyTexture dst{};
+        Converter::BufferSource src{};
+        wgpu::TextureDataLayout layout{};
+        wgpu::Extent3D sz{};
+        Converter conv(env);
+        if (!conv(dst, destination) ||    //
+            !conv(src, data) ||           //
+            !conv(layout, dataLayout) ||  //
+            !conv(sz, size)) {
+            return;
+        }
+
+        queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
+    }
+
+    void GPUQueue::copyExternalImageToTexture(Napi::Env,
+                                              interop::GPUImageCopyExternalImage source,
+                                              interop::GPUImageCopyTextureTagged destination,
+                                              interop::GPUExtent3D copySize) {
+        UNIMPLEMENTED();
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUQueue::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUQueue::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUQueue.h b/src/dawn/node/binding/GPUQueue.h
new file mode 100644
index 0000000..0c5ba7d
--- /dev/null
+++ b/src/dawn/node/binding/GPUQueue.h
@@ -0,0 +1,61 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUQUEUE_H_
+#define DAWN_NODE_BINDING_GPUQUEUE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
+    class GPUQueue final : public interop::GPUQueue {
+      public:
+        GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
+
+        // interop::GPUQueue interface compliance
+        void submit(
+            Napi::Env,
+            std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
+        interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
+        void writeBuffer(Napi::Env,
+                         interop::Interface<interop::GPUBuffer> buffer,
+                         interop::GPUSize64 bufferOffset,
+                         interop::BufferSource data,
+                         interop::GPUSize64 dataOffset,
+                         std::optional<interop::GPUSize64> size) override;
+        void writeTexture(Napi::Env,
+                          interop::GPUImageCopyTexture destination,
+                          interop::BufferSource data,
+                          interop::GPUImageDataLayout dataLayout,
+                          interop::GPUExtent3D size) override;
+        void copyExternalImageToTexture(Napi::Env,
+                                        interop::GPUImageCopyExternalImage source,
+                                        interop::GPUImageCopyTextureTagged destination,
+                                        interop::GPUExtent3D copySize) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::Queue queue_;
+        std::shared_ptr<AsyncRunner> async_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUQUEUE_H_
diff --git a/src/dawn/node/binding/GPURenderBundle.cpp b/src/dawn/node/binding/GPURenderBundle.cpp
new file mode 100644
index 0000000..b741817
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderBundle.cpp
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderBundle.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPURenderBundle.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPURenderBundle
+    ////////////////////////////////////////////////////////////////////////////////
+    GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPURenderBundle::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPURenderBundle::setLabel(Napi::Env,
+                                   std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderBundle.h b/src/dawn/node/binding/GPURenderBundle.h
new file mode 100644
index 0000000..8d9b2c7
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderBundle.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
+#define DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
+    // wgpu::RenderBundle.
+    class GPURenderBundle final : public interop::GPURenderBundle {
+      public:
+        GPURenderBundle(wgpu::RenderBundle bundle);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::RenderBundle&() const {
+            return bundle_;
+        }
+
+        // interop::GPURenderBundle interface compliance
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::RenderBundle bundle_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
diff --git a/src/dawn/node/binding/GPURenderBundleEncoder.cpp b/src/dawn/node/binding/GPURenderBundleEncoder.cpp
new file mode 100644
index 0000000..87ecd34
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderBundleEncoder.cpp
@@ -0,0 +1,193 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderBundleEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPURenderBundle.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPURenderBundleEncoder
+    ////////////////////////////////////////////////////////////////////////////////
+    GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
+        : enc_(std::move(enc)) {
+    }
+
+    interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
+        Napi::Env env,
+        interop::GPURenderBundleDescriptor descriptor) {
+        wgpu::RenderBundleDescriptor desc{};
+
+        return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
+    }
+
+    void GPURenderBundleEncoder::setBindGroup(
+        Napi::Env env,
+        interop::GPUIndex32 index,
+        interop::Interface<interop::GPUBindGroup> bindGroup,
+        std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+        Converter conv(env);
+
+        wgpu::BindGroup bg{};
+        uint32_t* offsets = nullptr;
+        uint32_t num_offsets = 0;
+        if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+            return;
+        }
+
+        enc_.SetBindGroup(index, bg, num_offsets, offsets);
+    }
+
+    void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
+                                              interop::GPUIndex32 index,
+                                              interop::Interface<interop::GPUBindGroup> bindGroup,
+                                              interop::Uint32Array dynamicOffsetsData,
+                                              interop::GPUSize64 dynamicOffsetsDataStart,
+                                              interop::GPUSize32 dynamicOffsetsDataLength) {
+        Converter conv(env);
+
+        wgpu::BindGroup bg{};
+        if (!conv(bg, bindGroup)) {
+            return;
+        }
+
+        enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+                          dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+    }
+
+    void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+        enc_.PushDebugGroup(groupLabel.c_str());
+    }
+
+    void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
+        enc_.PopDebugGroup();
+    }
+
+    void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+        enc_.InsertDebugMarker(markerLabel.c_str());
+    }
+
+    void GPURenderBundleEncoder::setPipeline(
+        Napi::Env env,
+        interop::Interface<interop::GPURenderPipeline> pipeline) {
+        Converter conv(env);
+
+        wgpu::RenderPipeline p{};
+        if (!conv(p, pipeline)) {
+            return;
+        }
+
+        enc_.SetPipeline(p);
+    }
+
+    void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
+                                                interop::Interface<interop::GPUBuffer> buffer,
+                                                interop::GPUIndexFormat indexFormat,
+                                                interop::GPUSize64 offset,
+                                                std::optional<interop::GPUSize64> size) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        wgpu::IndexFormat f{};
+        uint64_t o = 0;
+        uint64_t s = wgpu::kWholeSize;
+        if (!conv(b, buffer) ||       //
+            !conv(f, indexFormat) ||  //
+            !conv(o, offset) ||       //
+            !conv(s, size)) {
+            return;
+        }
+
+        enc_.SetIndexBuffer(b, f, o, s);
+    }
+
+    void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
+                                                 interop::GPUIndex32 slot,
+                                                 interop::Interface<interop::GPUBuffer> buffer,
+                                                 interop::GPUSize64 offset,
+                                                 std::optional<interop::GPUSize64> size) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        uint64_t s = wgpu::kWholeSize;
+        if (!conv(b, buffer) || !conv(s, size)) {
+            return;
+        }
+        enc_.SetVertexBuffer(slot, b, offset, s);
+    }
+
+    void GPURenderBundleEncoder::draw(Napi::Env env,
+                                      interop::GPUSize32 vertexCount,
+                                      interop::GPUSize32 instanceCount,
+                                      interop::GPUSize32 firstVertex,
+                                      interop::GPUSize32 firstInstance) {
+        enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+    }
+
+    void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
+                                             interop::GPUSize32 indexCount,
+                                             interop::GPUSize32 instanceCount,
+                                             interop::GPUSize32 firstIndex,
+                                             interop::GPUSignedOffset32 baseVertex,
+                                             interop::GPUSize32 firstInstance) {
+        enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+    }
+
+    void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
+                                              interop::Interface<interop::GPUBuffer> indirectBuffer,
+                                              interop::GPUSize64 indirectOffset) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        uint64_t o = 0;
+
+        if (!conv(b, indirectBuffer) ||  //
+            !conv(o, indirectOffset)) {
+            return;
+        }
+        enc_.DrawIndirect(b, o);
+    }
+
+    void GPURenderBundleEncoder::drawIndexedIndirect(
+        Napi::Env env,
+        interop::Interface<interop::GPUBuffer> indirectBuffer,
+        interop::GPUSize64 indirectOffset) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        uint64_t o = 0;
+
+        if (!conv(b, indirectBuffer) ||  //
+            !conv(o, indirectOffset)) {
+            return;
+        }
+        enc_.DrawIndexedIndirect(b, o);
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPURenderBundleEncoder::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPURenderBundleEncoder::setLabel(Napi::Env,
+                                          std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderBundleEncoder.h b/src/dawn/node/binding/GPURenderBundleEncoder.h
new file mode 100644
index 0000000..8c879b5
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderBundleEncoder.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
+#define DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
+    // wgpu::RenderBundleEncoder.
+    class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
+      public:
+        GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
+
+        // interop::GPURenderBundleEncoder interface compliance
+        interop::Interface<interop::GPURenderBundle> finish(
+            Napi::Env,
+            interop::GPURenderBundleDescriptor descriptor) override;
+        void setBindGroup(Napi::Env,
+                          interop::GPUIndex32 index,
+                          interop::Interface<interop::GPUBindGroup> bindGroup,
+                          std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+        void setBindGroup(Napi::Env,
+                          interop::GPUIndex32 index,
+                          interop::Interface<interop::GPUBindGroup> bindGroup,
+                          interop::Uint32Array dynamicOffsetsData,
+                          interop::GPUSize64 dynamicOffsetsDataStart,
+                          interop::GPUSize32 dynamicOffsetsDataLength) override;
+        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+        void popDebugGroup(Napi::Env) override;
+        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+        void setPipeline(Napi::Env,
+                         interop::Interface<interop::GPURenderPipeline> pipeline) override;
+        void setIndexBuffer(Napi::Env,
+                            interop::Interface<interop::GPUBuffer> buffer,
+                            interop::GPUIndexFormat indexFormat,
+                            interop::GPUSize64 offset,
+                            std::optional<interop::GPUSize64> size) override;
+        void setVertexBuffer(Napi::Env,
+                             interop::GPUIndex32 slot,
+                             interop::Interface<interop::GPUBuffer> buffer,
+                             interop::GPUSize64 offset,
+                             std::optional<interop::GPUSize64> size) override;
+        void draw(Napi::Env,
+                  interop::GPUSize32 vertexCount,
+                  interop::GPUSize32 instanceCount,
+                  interop::GPUSize32 firstVertex,
+                  interop::GPUSize32 firstInstance) override;
+        void drawIndexed(Napi::Env,
+                         interop::GPUSize32 indexCount,
+                         interop::GPUSize32 instanceCount,
+                         interop::GPUSize32 firstIndex,
+                         interop::GPUSignedOffset32 baseVertex,
+                         interop::GPUSize32 firstInstance) override;
+        void drawIndirect(Napi::Env,
+                          interop::Interface<interop::GPUBuffer> indirectBuffer,
+                          interop::GPUSize64 indirectOffset) override;
+        void drawIndexedIndirect(Napi::Env,
+                                 interop::Interface<interop::GPUBuffer> indirectBuffer,
+                                 interop::GPUSize64 indirectOffset) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::RenderBundleEncoder enc_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
diff --git a/src/dawn/node/binding/GPURenderPassEncoder.cpp b/src/dawn/node/binding/GPURenderPassEncoder.cpp
new file mode 100644
index 0000000..0297d65
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderPassEncoder.cpp
@@ -0,0 +1,255 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderPassEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/binding/GPURenderBundle.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPURenderPassEncoder
+    ////////////////////////////////////////////////////////////////////////////////
+    GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {
+    }
+
+    void GPURenderPassEncoder::setViewport(Napi::Env,
+                                           float x,
+                                           float y,
+                                           float width,
+                                           float height,
+                                           float minDepth,
+                                           float maxDepth) {
+        enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
+    }
+
+    void GPURenderPassEncoder::setScissorRect(Napi::Env,
+                                              interop::GPUIntegerCoordinate x,
+                                              interop::GPUIntegerCoordinate y,
+                                              interop::GPUIntegerCoordinate width,
+                                              interop::GPUIntegerCoordinate height) {
+        enc_.SetScissorRect(x, y, width, height);
+    }
+
+    void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
+        Converter conv(env);
+
+        wgpu::Color c{};
+        if (!conv(c, color)) {
+            return;
+        }
+
+        enc_.SetBlendConstant(&c);
+    }
+
+    void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
+        enc_.SetStencilReference(reference);
+    }
+
+    void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
+        enc_.BeginOcclusionQuery(queryIndex);
+    }
+
+    void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
+        enc_.EndOcclusionQuery();
+    }
+
+    void GPURenderPassEncoder::executeBundles(
+        Napi::Env env,
+        std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
+        Converter conv(env);
+
+        wgpu::RenderBundle* bundles = nullptr;
+        uint32_t bundleCount = 0;
+        if (!conv(bundles, bundleCount, bundles_in)) {
+            return;
+        }
+
+        enc_.ExecuteBundles(bundleCount, bundles);
+    }
+
+    void GPURenderPassEncoder::end(Napi::Env) {
+        enc_.End();
+    }
+
+    void GPURenderPassEncoder::setBindGroup(
+        Napi::Env env,
+        interop::GPUIndex32 index,
+        interop::Interface<interop::GPUBindGroup> bindGroup,
+        std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+        Converter conv(env);
+
+        wgpu::BindGroup bg{};
+        uint32_t* offsets = nullptr;
+        uint32_t num_offsets = 0;
+        if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+            return;
+        }
+
+        enc_.SetBindGroup(index, bg, num_offsets, offsets);
+    }
+
+    void GPURenderPassEncoder::setBindGroup(Napi::Env env,
+                                            interop::GPUIndex32 index,
+                                            interop::Interface<interop::GPUBindGroup> bindGroup,
+                                            interop::Uint32Array dynamicOffsetsData,
+                                            interop::GPUSize64 dynamicOffsetsDataStart,
+                                            interop::GPUSize32 dynamicOffsetsDataLength) {
+        Converter conv(env);
+
+        wgpu::BindGroup bg{};
+        if (!conv(bg, bindGroup)) {
+            return;
+        }
+
+        if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
+            Napi::RangeError::New(env,
+                                  "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
+                .ThrowAsJavaScriptException();
+            return;
+        }
+
+        if (dynamicOffsetsDataLength >
+            dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
+            Napi::RangeError::New(env,
+                                  "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
+                                  "bound of dynamicOffsetData")
+                .ThrowAsJavaScriptException();
+            return;
+        }
+
+        enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+                          dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+    }
+
+    void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+        enc_.PushDebugGroup(groupLabel.c_str());
+    }
+
+    void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
+        enc_.PopDebugGroup();
+    }
+
+    void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+        enc_.InsertDebugMarker(markerLabel.c_str());
+    }
+
+    void GPURenderPassEncoder::setPipeline(
+        Napi::Env env,
+        interop::Interface<interop::GPURenderPipeline> pipeline) {
+        Converter conv(env);
+        wgpu::RenderPipeline rp{};
+        if (!conv(rp, pipeline)) {
+            return;
+        }
+        enc_.SetPipeline(rp);
+    }
+
+    void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
+                                              interop::Interface<interop::GPUBuffer> buffer,
+                                              interop::GPUIndexFormat indexFormat,
+                                              interop::GPUSize64 offset,
+                                              std::optional<interop::GPUSize64> size) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        wgpu::IndexFormat f;
+        uint64_t s = wgpu::kWholeSize;
+        if (!conv(b, buffer) ||       //
+            !conv(f, indexFormat) ||  //
+            !conv(s, size)) {
+            return;
+        }
+        enc_.SetIndexBuffer(b, f, offset, s);
+    }
+
+    void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
+                                               interop::GPUIndex32 slot,
+                                               interop::Interface<interop::GPUBuffer> buffer,
+                                               interop::GPUSize64 offset,
+                                               std::optional<interop::GPUSize64> size) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        uint64_t s = wgpu::kWholeSize;
+        if (!conv(b, buffer) || !conv(s, size)) {
+            return;
+        }
+        enc_.SetVertexBuffer(slot, b, offset, s);
+    }
+
+    void GPURenderPassEncoder::draw(Napi::Env env,
+                                    interop::GPUSize32 vertexCount,
+                                    interop::GPUSize32 instanceCount,
+                                    interop::GPUSize32 firstVertex,
+                                    interop::GPUSize32 firstInstance) {
+        enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+    }
+
+    void GPURenderPassEncoder::drawIndexed(Napi::Env env,
+                                           interop::GPUSize32 indexCount,
+                                           interop::GPUSize32 instanceCount,
+                                           interop::GPUSize32 firstIndex,
+                                           interop::GPUSignedOffset32 baseVertex,
+                                           interop::GPUSize32 firstInstance) {
+        enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+    }
+
+    void GPURenderPassEncoder::drawIndirect(Napi::Env env,
+                                            interop::Interface<interop::GPUBuffer> indirectBuffer,
+                                            interop::GPUSize64 indirectOffset) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        uint64_t o = 0;
+
+        if (!conv(b, indirectBuffer) ||  //
+            !conv(o, indirectOffset)) {
+            return;
+        }
+        enc_.DrawIndirect(b, o);
+    }
+
+    void GPURenderPassEncoder::drawIndexedIndirect(
+        Napi::Env env,
+        interop::Interface<interop::GPUBuffer> indirectBuffer,
+        interop::GPUSize64 indirectOffset) {
+        Converter conv(env);
+
+        wgpu::Buffer b{};
+        uint64_t o = 0;
+
+        if (!conv(b, indirectBuffer) ||  //
+            !conv(o, indirectOffset)) {
+            return;
+        }
+        enc_.DrawIndexedIndirect(b, o);
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPURenderPassEncoder::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPURenderPassEncoder::setLabel(Napi::Env,
+                                        std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderPassEncoder.h b/src/dawn/node/binding/GPURenderPassEncoder.h
new file mode 100644
index 0000000..bbd8012
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderPassEncoder.h
@@ -0,0 +1,108 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
+#define DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
+    // wgpu::RenderPassEncoder.
+    class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
+      public:
+        GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::RenderPassEncoder&() const {
+            return enc_;
+        }
+
+        // interop::GPURenderPassEncoder interface compliance
+        void setViewport(Napi::Env,
+                         float x,
+                         float y,
+                         float width,
+                         float height,
+                         float minDepth,
+                         float maxDepth) override;
+        void setScissorRect(Napi::Env,
+                            interop::GPUIntegerCoordinate x,
+                            interop::GPUIntegerCoordinate y,
+                            interop::GPUIntegerCoordinate width,
+                            interop::GPUIntegerCoordinate height) override;
+        void setBlendConstant(Napi::Env, interop::GPUColor color) override;
+        void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
+        void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
+        void endOcclusionQuery(Napi::Env) override;
+        void executeBundles(
+            Napi::Env,
+            std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
+        void end(Napi::Env) override;
+        void setBindGroup(Napi::Env,
+                          interop::GPUIndex32 index,
+                          interop::Interface<interop::GPUBindGroup> bindGroup,
+                          std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+        void setBindGroup(Napi::Env,
+                          interop::GPUIndex32 index,
+                          interop::Interface<interop::GPUBindGroup> bindGroup,
+                          interop::Uint32Array dynamicOffsetsData,
+                          interop::GPUSize64 dynamicOffsetsDataStart,
+                          interop::GPUSize32 dynamicOffsetsDataLength) override;
+        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+        void popDebugGroup(Napi::Env) override;
+        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+        void setPipeline(Napi::Env,
+                         interop::Interface<interop::GPURenderPipeline> pipeline) override;
+        void setIndexBuffer(Napi::Env,
+                            interop::Interface<interop::GPUBuffer> buffer,
+                            interop::GPUIndexFormat indexFormat,
+                            interop::GPUSize64 offset,
+                            std::optional<interop::GPUSize64> size) override;
+        void setVertexBuffer(Napi::Env,
+                             interop::GPUIndex32 slot,
+                             interop::Interface<interop::GPUBuffer> buffer,
+                             interop::GPUSize64 offset,
+                             std::optional<interop::GPUSize64> size) override;
+        void draw(Napi::Env,
+                  interop::GPUSize32 vertexCount,
+                  interop::GPUSize32 instanceCount,
+                  interop::GPUSize32 firstVertex,
+                  interop::GPUSize32 firstInstance) override;
+        void drawIndexed(Napi::Env,
+                         interop::GPUSize32 indexCount,
+                         interop::GPUSize32 instanceCount,
+                         interop::GPUSize32 firstIndex,
+                         interop::GPUSignedOffset32 baseVertex,
+                         interop::GPUSize32 firstInstance) override;
+        void drawIndirect(Napi::Env,
+                          interop::Interface<interop::GPUBuffer> indirectBuffer,
+                          interop::GPUSize64 indirectOffset) override;
+        void drawIndexedIndirect(Napi::Env,
+                                 interop::Interface<interop::GPUBuffer> indirectBuffer,
+                                 interop::GPUSize64 indirectOffset) override;
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+
+      private:
+        wgpu::RenderPassEncoder enc_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
diff --git a/src/dawn/node/binding/GPURenderPipeline.cpp b/src/dawn/node/binding/GPURenderPipeline.cpp
new file mode 100644
index 0000000..8618f1f
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderPipeline.cpp
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPURenderPipeline
+    ////////////////////////////////////////////////////////////////////////////////
+    GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
+        : pipeline_(std::move(pipeline)) {
+    }
+
+    interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
+        Napi::Env env,
+        uint32_t index) {
+        return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+            env, pipeline_.GetBindGroupLayout(index));
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPURenderPipeline::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPURenderPipeline::setLabel(Napi::Env,
+                                     std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderPipeline.h b/src/dawn/node/binding/GPURenderPipeline.h
new file mode 100644
index 0000000..ad8b0af
--- /dev/null
+++ b/src/dawn/node/binding/GPURenderPipeline.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
+#define DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
+    // wgpu::RenderPipeline.
+    class GPURenderPipeline final : public interop::GPURenderPipeline {
+      public:
+        GPURenderPipeline(wgpu::RenderPipeline pipeline);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::RenderPipeline&() const {
+            return pipeline_;
+        }
+
+        // interop::GPURenderPipeline interface compliance
+        interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+                                                                           uint32_t index) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::RenderPipeline pipeline_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
diff --git a/src/dawn/node/binding/GPUSampler.cpp b/src/dawn/node/binding/GPUSampler.cpp
new file mode 100644
index 0000000..c1076e9
--- /dev/null
+++ b/src/dawn/node/binding/GPUSampler.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUSampler.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUSampler
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUSampler::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUSampler::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUSampler.h b/src/dawn/node/binding/GPUSampler.h
new file mode 100644
index 0000000..e6d792e
--- /dev/null
+++ b/src/dawn/node/binding/GPUSampler.h
@@ -0,0 +1,44 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSAMPLER_H_
+#define DAWN_NODE_BINDING_GPUSAMPLER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+    // GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
+    class GPUSampler final : public interop::GPUSampler {
+      public:
+        GPUSampler(wgpu::Sampler sampler);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::Sampler&() const {
+            return sampler_;
+        }
+
+        // interop::GPUSampler interface compliance
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+
+      private:
+        wgpu::Sampler sampler_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUSAMPLER_H_
diff --git a/src/dawn/node/binding/GPUShaderModule.cpp b/src/dawn/node/binding/GPUShaderModule.cpp
new file mode 100644
index 0000000..a599f05
--- /dev/null
+++ b/src/dawn/node/binding/GPUShaderModule.cpp
@@ -0,0 +1,126 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUShaderModule.h"
+
+#include <memory>
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUShaderModule
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
+        : shader_(std::move(shader)), async_(std::move(async)) {
+    }
+
+    interop::Promise<interop::Interface<interop::GPUCompilationInfo>>
+    GPUShaderModule::compilationInfo(Napi::Env env) {
+        struct GPUCompilationMessage : public interop::GPUCompilationMessage {
+            WGPUCompilationMessage message;
+
+            GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {
+            }
+            std::string getMessage(Napi::Env) override {
+                return message.message;
+            }
+            interop::GPUCompilationMessageType getType(Napi::Env) override {
+                switch (message.type) {
+                    case WGPUCompilationMessageType_Error:
+                        return interop::GPUCompilationMessageType::kError;
+                    case WGPUCompilationMessageType_Warning:
+                        return interop::GPUCompilationMessageType::kWarning;
+                    case WGPUCompilationMessageType_Info:
+                        return interop::GPUCompilationMessageType::kInfo;
+                    default:
+                        UNIMPLEMENTED();
+                }
+            }
+            uint64_t getLineNum(Napi::Env) override {
+                return message.lineNum;
+            }
+            uint64_t getLinePos(Napi::Env) override {
+                return message.linePos;
+            }
+            uint64_t getOffset(Napi::Env) override {
+                return message.offset;
+            }
+            uint64_t getLength(Napi::Env) override {
+                return message.length;
+            }
+        };
+
+        using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
+
+        struct GPUCompilationInfo : public interop::GPUCompilationInfo {
+            std::vector<Napi::ObjectReference> messages;
+
+            GPUCompilationInfo(Napi::Env env, Messages msgs) {
+                messages.reserve(msgs.size());
+                for (auto& msg : msgs) {
+                    messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
+                }
+            }
+            Messages getMessages(Napi::Env) override {
+                Messages out;
+                out.reserve(messages.size());
+                for (auto& msg : messages) {
+                    out.emplace_back(msg.Value());
+                }
+                return out;
+            }
+        };
+
+        using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
+
+        struct Context {
+            Napi::Env env;
+            Promise promise;
+            AsyncTask task;
+        };
+        auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+        auto promise = ctx->promise;
+
+        shader_.GetCompilationInfo(
+            [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
+               void* userdata) {
+                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+                Messages messages(compilationInfo->messageCount);
+                for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
+                    auto& msg = compilationInfo->messages[i];
+                    messages[i] =
+                        interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
+                }
+
+                c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
+                    c->env, c->env, std::move(messages)));
+            },
+            ctx);
+
+        return promise;
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUShaderModule::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUShaderModule::setLabel(Napi::Env,
+                                   std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUShaderModule.h b/src/dawn/node/binding/GPUShaderModule.h
new file mode 100644
index 0000000..df2bdc2
--- /dev/null
+++ b/src/dawn/node/binding/GPUShaderModule.h
@@ -0,0 +1,50 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSHADERMODULE_H_
+#define DAWN_NODE_BINDING_GPUSHADERMODULE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
+    // wgpu::ShaderModule.
+    class GPUShaderModule final : public interop::GPUShaderModule {
+      public:
+        GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::ShaderModule&() const {
+            return shader_;
+        }
+
+        // interop::GPUShaderModule interface compliance
+        interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
+            Napi::Env) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::ShaderModule shader_;
+        std::shared_ptr<AsyncRunner> async_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUSHADERMODULE_H_
diff --git a/src/dawn/node/binding/GPUSupportedLimits.cpp b/src/dawn/node/binding/GPUSupportedLimits.cpp
new file mode 100644
index 0000000..23c19b2
--- /dev/null
+++ b/src/dawn/node/binding/GPUSupportedLimits.cpp
@@ -0,0 +1,131 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUSupportedLimits.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUSupportedLimits
+    ////////////////////////////////////////////////////////////////////////////////
+
+    GPUSupportedLimits::GPUSupportedLimits(wgpu::SupportedLimits limits)
+        : limits_(std::move(limits)) {
+    }
+
+    uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
+        return limits_.limits.maxTextureDimension1D;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
+        return limits_.limits.maxTextureDimension2D;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
+        return limits_.limits.maxTextureDimension3D;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
+        return limits_.limits.maxTextureArrayLayers;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
+        return limits_.limits.maxBindGroups;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
+        return limits_.limits.maxDynamicUniformBuffersPerPipelineLayout;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
+        return limits_.limits.maxDynamicStorageBuffersPerPipelineLayout;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
+        return limits_.limits.maxSampledTexturesPerShaderStage;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
+        return limits_.limits.maxSamplersPerShaderStage;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
+        return limits_.limits.maxStorageBuffersPerShaderStage;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
+        return limits_.limits.maxStorageTexturesPerShaderStage;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
+        return limits_.limits.maxUniformBuffersPerShaderStage;
+    }
+
+    uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
+        return limits_.limits.maxUniformBufferBindingSize;
+    }
+
+    uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
+        return limits_.limits.maxStorageBufferBindingSize;
+    }
+
+    uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
+        return limits_.limits.minUniformBufferOffsetAlignment;
+    }
+
+    uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
+        return limits_.limits.minStorageBufferOffsetAlignment;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
+        return limits_.limits.maxVertexBuffers;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
+        return limits_.limits.maxVertexAttributes;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
+        return limits_.limits.maxVertexBufferArrayStride;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
+        return limits_.limits.maxInterStageShaderComponents;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
+        return limits_.limits.maxComputeWorkgroupStorageSize;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
+        return limits_.limits.maxComputeInvocationsPerWorkgroup;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
+        return limits_.limits.maxComputeWorkgroupSizeX;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
+        return limits_.limits.maxComputeWorkgroupSizeY;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
+        return limits_.limits.maxComputeWorkgroupSizeZ;
+    }
+
+    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
+        return limits_.limits.maxComputeWorkgroupsPerDimension;
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUSupportedLimits.h b/src/dawn/node/binding/GPUSupportedLimits.h
new file mode 100644
index 0000000..e571c67
--- /dev/null
+++ b/src/dawn/node/binding/GPUSupportedLimits.h
@@ -0,0 +1,64 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
+#define DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
+    class GPUSupportedLimits final : public interop::GPUSupportedLimits {
+      public:
+        GPUSupportedLimits(wgpu::SupportedLimits);
+
+        // interop::GPUSupportedLimits interface compliance
+        uint32_t getMaxTextureDimension1D(Napi::Env) override;
+        uint32_t getMaxTextureDimension2D(Napi::Env) override;
+        uint32_t getMaxTextureDimension3D(Napi::Env) override;
+        uint32_t getMaxTextureArrayLayers(Napi::Env) override;
+        uint32_t getMaxBindGroups(Napi::Env) override;
+        uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
+        uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
+        uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
+        uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
+        uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
+        uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
+        uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
+        uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
+        uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
+        uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
+        uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
+        uint32_t getMaxVertexBuffers(Napi::Env) override;
+        uint32_t getMaxVertexAttributes(Napi::Env) override;
+        uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
+        uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
+        uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
+        uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
+        uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
+        uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
+        uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
+        uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
+
+      private:
+        wgpu::SupportedLimits limits_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
diff --git a/src/dawn/node/binding/GPUTexture.cpp b/src/dawn/node/binding/GPUTexture.cpp
new file mode 100644
index 0000000..ac0465f
--- /dev/null
+++ b/src/dawn/node/binding/GPUTexture.cpp
@@ -0,0 +1,64 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUTexture.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/binding/GPUTextureView.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUTexture
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {
+    }
+
+    interop::Interface<interop::GPUTextureView> GPUTexture::createView(
+        Napi::Env env,
+        interop::GPUTextureViewDescriptor descriptor) {
+        if (!texture_) {
+            Errors::OperationError(env).ThrowAsJavaScriptException();
+            return {};
+        }
+
+        wgpu::TextureViewDescriptor desc{};
+        Converter conv(env);
+        if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) ||        //
+            !conv(desc.mipLevelCount, descriptor.mipLevelCount) ||      //
+            !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) ||    //
+            !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) ||  //
+            !conv(desc.format, descriptor.format) ||                    //
+            !conv(desc.dimension, descriptor.dimension) ||              //
+            !conv(desc.aspect, descriptor.aspect)) {
+            return {};
+        }
+        return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
+    }
+
+    void GPUTexture::destroy(Napi::Env) {
+        texture_.Destroy();
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUTexture::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUTexture::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    }
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUTexture.h b/src/dawn/node/binding/GPUTexture.h
new file mode 100644
index 0000000..dda0829
--- /dev/null
+++ b/src/dawn/node/binding/GPUTexture.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUTEXTURE_H_
+#define DAWN_NODE_BINDING_GPUTEXTURE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
+    class GPUTexture final : public interop::GPUTexture {
+      public:
+        GPUTexture(wgpu::Texture texture);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::Texture&() const {
+            return texture_;
+        }
+
+        // interop::GPUTexture interface compliance
+        interop::Interface<interop::GPUTextureView> createView(
+            Napi::Env,
+            interop::GPUTextureViewDescriptor descriptor) override;
+        void destroy(Napi::Env) override;
+        std::variant<std::string, interop::UndefinedType>getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType>value) override;
+
+      private:
+        wgpu::Texture texture_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUTEXTURE_H_
diff --git a/src/dawn/node/binding/GPUTextureView.cpp b/src/dawn/node/binding/GPUTextureView.cpp
new file mode 100644
index 0000000..7998f6c
--- /dev/null
+++ b/src/dawn/node/binding/GPUTextureView.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUTextureView.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // wgpu::bindings::GPUTextureView
+    ////////////////////////////////////////////////////////////////////////////////
+    GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {
+    }
+
+    std::variant<std::string, interop::UndefinedType> GPUTextureView::getLabel(Napi::Env) {
+        UNIMPLEMENTED();
+    }
+
+    void GPUTextureView::setLabel(Napi::Env,
+                                  std::variant<std::string, interop::UndefinedType> value) {
+        UNIMPLEMENTED();
+    };
+
+}  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUTextureView.h b/src/dawn/node/binding/GPUTextureView.h
new file mode 100644
index 0000000..ceb750a
--- /dev/null
+++ b/src/dawn/node/binding/GPUTextureView.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
+#define DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+    // GPUTextureView is an implementation of interop::GPUTextureView that wraps a
+    // wgpu::TextureView.
+    class GPUTextureView final : public interop::GPUTextureView {
+      public:
+        GPUTextureView(wgpu::TextureView view);
+
+        // Implicit cast operator to Dawn GPU object
+        inline operator const wgpu::TextureView&() const {
+            return view_;
+        }
+
+        // interop::GPUTextureView interface compliance
+        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+
+      private:
+        wgpu::TextureView view_;
+    };
+
+}  // namespace wgpu::binding
+
+#endif  // DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
diff --git a/src/dawn/node/interop/Browser.idl b/src/dawn/node/interop/Browser.idl
new file mode 100644
index 0000000..b36c667
--- /dev/null
+++ b/src/dawn/node/interop/Browser.idl
@@ -0,0 +1,88 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// An IDL file that provides stub definitions for dictionaries and interfaces
+// used by the webgpu.idl file
+//
+// The [LegacyNoInterfaceObject] annotation asks idlgen to not create a global constructor for
+// an interface. It is a real WebIDL annotation but we use it liberally here.
+// https://webidl.spec.whatwg.org/#LegacyNoInterfaceObject
+
+dictionary EventInit {
+  boolean bubbles = false;
+  boolean cancelable = false;
+  boolean composed = false;
+};
+
+[LegacyNoInterfaceObject] interface Navigator {
+  readonly attribute DOMString vendorSub;
+  readonly attribute DOMString productSub;
+  readonly attribute DOMString vendor;
+};
+
+[LegacyNoInterfaceObject] interface Event {
+  readonly attribute boolean bubbles;
+  readonly attribute boolean cancelable;
+  attribute boolean returnValue;
+};
+
+[LegacyNoInterfaceObject] interface WorkerNavigator{};
+
+[LegacyNoInterfaceObject] interface EventListener {
+  undefined handleEvent(Event event);
+};
+
+[LegacyNoInterfaceObject] interface EventTarget {
+  undefined addEventListener(DOMString type, EventListener? callback, optional (AddEventListenerOptions or boolean) options);
+  undefined removeEventListener(DOMString type, EventListener? callback, optional (EventListenerOptions or boolean) options);
+  boolean dispatchEvent(Event event);
+};
+
+dictionary EventListenerOptions { boolean capture = false; };
+
+dictionary AddEventListenerOptions : EventListenerOptions {
+  boolean passive = false;
+  boolean once = false;
+};
+
+[LegacyNoInterfaceObject] interface HTMLVideoElement {
+  attribute unsigned long width;
+  attribute unsigned long height;
+  readonly attribute unsigned long videoWidth;
+  readonly attribute unsigned long videoHeight;
+  attribute DOMString poster;
+};
+
+typedef(Int8Array or Int16Array or Int32Array or Uint8Array or Uint16Array or
+        Uint32Array or Float32Array or Float64Array or
+        DataView) ArrayBufferView;
+
+typedef(ArrayBufferView or ArrayBuffer) BufferSource;
+
+[LegacyNoInterfaceObject] interface ImageBitmap {
+  readonly attribute unsigned long width;
+  readonly attribute unsigned long height;
+};
+
+[LegacyNoInterfaceObject] interface HTMLCanvasElement {
+  attribute unsigned long width;
+  attribute unsigned long height;
+};
+
+[LegacyNoInterfaceObject] interface OffscreenCanvas {
+  attribute unsigned long width;
+  attribute unsigned long height;
+};
+
+[LegacyNoInterfaceObject] interface EventHandler{};
diff --git a/src/dawn/node/interop/CMakeLists.txt b/src/dawn/node/interop/CMakeLists.txt
new file mode 100644
index 0000000..98b5695
--- /dev/null
+++ b/src/dawn/node/interop/CMakeLists.txt
@@ -0,0 +1,62 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Paths to generated files
+set(INTEROP_GEN_DIR    "${GEN_DIR}/src/dawn/node/interop")
+set(INTEROP_WEBGPU_H   "${INTEROP_GEN_DIR}/WebGPU.h")
+set(INTEROP_WEBGPU_CPP "${INTEROP_GEN_DIR}/WebGPU.cpp")
+
+idlgen(
+    TEMPLATE
+        "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.h.tmpl"
+    IDLS
+        "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
+        "${WEBGPU_IDL_PATH}"
+    DEPENDS
+        "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
+    OUTPUT
+        "${INTEROP_WEBGPU_H}"
+)
+
+idlgen(
+    TEMPLATE
+        "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.cpp.tmpl"
+    IDLS
+        "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
+        "${WEBGPU_IDL_PATH}"
+    DEPENDS
+        "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
+    OUTPUT
+        "${INTEROP_WEBGPU_CPP}"
+)
+
+add_library(dawn_node_interop STATIC
+    "Core.cpp"
+    "Core.h"
+    "${INTEROP_WEBGPU_H}"
+    "${INTEROP_WEBGPU_CPP}"
+)
+
+target_include_directories(dawn_node_interop
+    PRIVATE
+        "${CMAKE_SOURCE_DIR}"
+        "${NODE_API_HEADERS_DIR}/include"
+        "${NODE_ADDON_API_DIR}"
+        "${GEN_DIR}"
+)
+
+target_link_libraries(dawn_node_interop
+    PRIVATE
+        dawncpp
+)
diff --git a/src/dawn/node/interop/Core.cpp b/src/dawn/node/interop/Core.cpp
new file mode 100644
index 0000000..151d852
--- /dev/null
+++ b/src/dawn/node/interop/Core.cpp
@@ -0,0 +1,170 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/interop/Core.h"
+
+namespace wgpu::interop {
+
+    Result Success;
+
+    Result Error(std::string msg) {
+        return {msg};
+    }
+
+    Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
+        if (value.IsBoolean()) {
+            out = value.ToBoolean();
+            return Success;
+        }
+        return Error("value is not a boolean");
+    }
+    Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
+        if (value.IsString()) {
+            out = value.ToString();
+            return Success;
+        }
+        return Error("value is not a string");
+    }
+    Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().Int32Value();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().Uint32Value();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().Int32Value();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().Uint32Value();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().Int32Value();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().Uint32Value();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().Int64Value();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
+        if (value.IsNumber()) {
+            // Note that the JS Number type only stores doubles, so the max integer
+            // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
+            // with 1 implicit bit). This is why there's no UInt64Value() function.
+            out = static_cast<uint64_t>(value.ToNumber().Int64Value());
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().FloatValue();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
+        if (value.IsNumber()) {
+            out = value.ToNumber().DoubleValue();
+            return Success;
+        }
+        return Error("value is not a number");
+    }
+    Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
+        return Napi::Value::From(env, value);
+    }
+
+    Result Converter<UndefinedType>::FromJS(Napi::Env, Napi::Value value, UndefinedType&) {
+        if (value.IsUndefined()) {
+            return Success;
+        }
+        return Error("value is undefined");
+    }
+    Napi::Value Converter<UndefinedType>::ToJS(Napi::Env env, UndefinedType) {
+        return env.Undefined();
+    }
+
+}  // namespace wgpu::interop
diff --git a/src/dawn/node/interop/Core.h b/src/dawn/node/interop/Core.h
new file mode 100644
index 0000000..49ebb40
--- /dev/null
+++ b/src/dawn/node/interop/Core.h
@@ -0,0 +1,810 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides core interop helpers used by the code generated by the
+// templates.
+
+#ifndef DAWN_NODE_INTEROP_CORE_WEBGPU_H_
+#define DAWN_NODE_INTEROP_CORE_WEBGPU_H_
+
+#include <cstdint>
+#include <optional>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <variant>
+#include <vector>
+
+#include "napi.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+#define ENABLE_INTEROP_LOGGING 0  // Enable for verbose interop logging
+
+#if ENABLE_INTEROP_LOGGING
+#    define INTEROP_LOG(...) LOG(__VA_ARGS__)
+#else
+#    define INTEROP_LOG(...)
+#endif
+
+// A helper macro for constructing a PromiseInfo with the current file, function and line.
+// See PromiseInfo
+#define PROMISE_INFO                     \
+    ::wgpu::interop::PromiseInfo {       \
+        __FILE__, __FUNCTION__, __LINE__ \
+    }
+
+namespace wgpu::interop {
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Primitive JavaScript types
+    ////////////////////////////////////////////////////////////////////////////////
+    using Object = Napi::Object;
+    using ArrayBuffer = Napi::ArrayBuffer;
+    using Int8Array = Napi::TypedArrayOf<int8_t>;
+    using Int16Array = Napi::TypedArrayOf<int16_t>;
+    using Int32Array = Napi::TypedArrayOf<int32_t>;
+    using Uint8Array = Napi::TypedArrayOf<uint8_t>;
+    using Uint16Array = Napi::TypedArrayOf<uint16_t>;
+    using Uint32Array = Napi::TypedArrayOf<uint32_t>;
+    using Float32Array = Napi::TypedArrayOf<float>;
+    using Float64Array = Napi::TypedArrayOf<double>;
+    using DataView = Napi::TypedArray;
+
+    // Datatype used for undefined values.
+    struct UndefinedType {};
+    static constexpr UndefinedType Undefined;
+
+    template <typename T>
+    using FrozenArray = std::vector<T>;
+
+    // A wrapper class for integers that's as transparent as possible and is used to distinguish
+    // that the type is tagged with the [Clamp] WebIDL attribute.
+    template <typename T>
+    struct ClampedInteger {
+        static_assert(std::is_integral_v<T>);
+
+        using IntegerType = T;
+        ClampedInteger() : value(0) {
+        }
+        ClampedInteger(T value) : value(value) {
+        }
+        operator T() const {
+            return value;
+        }
+        T value;
+    };
+
+    // A wrapper class for integers that's as transparent as possible and is used to distinguish
+    // that the type is tagged with the [EnforceRange] WebIDL attribute.
+    template <typename T>
+    struct EnforceRangeInteger {
+        static_assert(std::is_integral_v<T>);
+
+        using IntegerType = T;
+        EnforceRangeInteger() : value(0) {
+        }
+        EnforceRangeInteger(T value) : value(value) {
+        }
+        operator T() const {
+            return value;
+        }
+        T value;
+    };
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Result
+    ////////////////////////////////////////////////////////////////////////////////
+
+    // Result is used to hold an success / error state by functions that perform JS <-> C++
+    // conversion
+    struct [[nodiscard]] Result {
+        // Returns true if the operation succeeded, false if there was an error
+        inline operator bool() const {
+            return error.empty();
+        }
+
+        // If Result is an error, then a new Error is returned with the
+        // stringified values append to the error message.
+        // If Result is a success, then a success Result is returned.
+        template <typename... VALUES>
+        Result Append(VALUES && ... values) {
+            if (*this) {
+                return *this;
+            }
+            std::stringstream ss;
+            ss << error << "\n";
+            utils::Write(ss, std::forward<VALUES>(values)...);
+            return {ss.str()};
+        }
+
+        // The error message, if the operation failed.
+        std::string error;
+    };
+
+    // A successful result
+    extern Result Success;
+
+    // Returns a Result with the given error message
+    Result Error(std::string msg);
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Interface<T>
+    ////////////////////////////////////////////////////////////////////////////////
+
+    // Interface<T> is a templated wrapper around a JavaScript object, which
+    // implements the template-generated interface type T. Interfaces are returned
+    // by either calling T::Bind() or T::Create().
+    template <typename T>
+    class Interface {
+      public:
+        // Constructs an Interface with no JS object.
+        inline Interface() {
+        }
+
+        // Constructs an Interface wrapping the given JS object.
+        // The JS object must have been created with a call to T::Bind().
+        explicit inline Interface(Napi::Object o) : object(o) {
+        }
+
+        // Implicit conversion operators to Napi objects.
+        inline operator napi_value() const {
+            return object;
+        }
+        inline operator const Napi::Value&() const {
+            return object;
+        }
+        inline operator const Napi::Object&() const {
+            return object;
+        }
+
+        // Member and dereference operators
+        inline T* operator->() const {
+            return T::Unwrap(object);
+        }
+        inline T* operator*() const {
+            return T::Unwrap(object);
+        }
+
+        // As<IMPL>() returns the unwrapped object cast to the implementation type.
+        // The interface implementation *must* be of the template type IMPL.
+        template <typename IMPL>
+        inline IMPL* As() const {
+            return static_cast<IMPL*>(T::Unwrap(object));
+        }
+
+      private:
+        Napi::Object object;
+    };
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Promise<T>
+    ////////////////////////////////////////////////////////////////////////////////
+
+    // Info holds details about where the promise was constructed.
+    // Used for printing debug messages when a promise is finalized without being resolved
+    // or rejected.
+    // Use the PROMISE_INFO macro to populate this structure.
+    struct PromiseInfo {
+        const char* file = nullptr;
+        const char* function = nullptr;
+        int line = 0;
+    };
+
+    enum class PromiseState {
+        Pending,
+        Resolved,
+        Rejected,
+    };
+
+    namespace detail {
+        // Base class for Promise<T> specializations.
+        class PromiseBase {
+          public:
+            // Implicit conversion operators to Napi promises.
+            inline operator napi_value() const {
+                return state_->deferred.Promise();
+            }
+            inline operator Napi::Value() const {
+                return state_->deferred.Promise();
+            }
+            inline operator Napi::Promise() const {
+                return state_->deferred.Promise();
+            }
+
+            // Reject() rejects the promise with the given failure value.
+            void Reject(Napi::Value value) const {
+                state_->deferred.Reject(value);
+                state_->state = PromiseState::Rejected;
+            }
+            void Reject(Napi::Error err) const {
+                Reject(err.Value());
+            }
+            void Reject(std::string err) const {
+                Reject(Napi::Error::New(state_->deferred.Env(), err));
+            }
+
+            PromiseState GetState() const {
+                return state_->state;
+            }
+
+          protected:
+            void Resolve(Napi::Value value) const {
+                state_->deferred.Resolve(value);
+                state_->state = PromiseState::Resolved;
+            }
+
+            struct State {
+                Napi::Promise::Deferred deferred;
+                PromiseInfo info;
+                PromiseState state = PromiseState::Pending;
+            };
+
+            PromiseBase(Napi::Env env, const PromiseInfo& info)
+                : state_(new State{Napi::Promise::Deferred::New(env), info}) {
+                state_->deferred.Promise().AddFinalizer(
+                    [](Napi::Env, State* state) {
+                        if (state->state == PromiseState::Pending) {
+                            ::wgpu::utils::Fatal("Promise not resolved or rejected",
+                                                 state->info.file, state->info.line,
+                                                 state->info.function);
+                        }
+                        delete state;
+                    },
+                    state_);
+            }
+
+            State* const state_;
+        };
+    }  // namespace detail
+
+    // Promise<T> is a templated wrapper around a JavaScript promise, which can
+    // resolve to the template type T.
+    template <typename T>
+    class Promise : public detail::PromiseBase {
+      public:
+        // Constructor
+        Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
+        }
+
+        // Resolve() fulfills the promise with the given value.
+        void Resolve(T&& value) const {
+            PromiseBase::Resolve(ToJS(state_->deferred.Env(), std::forward<T>(value)));
+        }
+    };
+
+    // Specialization for Promises that resolve with no value
+    template <>
+    class Promise<void> : public detail::PromiseBase {
+      public:
+        // Constructor
+        Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
+        }
+
+        // Resolve() fulfills the promise.
+        void Resolve() const {
+            PromiseBase::Resolve(state_->deferred.Env().Undefined());
+        }
+    };
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Converter<T>
+    ////////////////////////////////////////////////////////////////////////////////
+
+    // Converter<T> is specialized for each type T which can be converted from C++
+    // to JavaScript, or JavaScript to C++.
+    // Each specialization of Converter<T> is expected to have two static methods
+    // with the signatures:
+    //
+    //  // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
+    //  static Result FromJS(Napi::Env, Napi::Value in, T& out);
+    //
+    //  // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
+    //  // this value.
+    //  static Napi::Value ToJS(Napi::Env, T in);
+    template <typename T>
+    class Converter {};
+
+    template <>
+    class Converter<Napi::Object> {
+      public:
+        static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
+            if (value.IsObject()) {
+                out = value.ToObject();
+                return Success;
+            }
+            return Error("value is not an object");
+        }
+        static inline Napi::Value ToJS(Napi::Env, Napi::Object value) {
+            return value;
+        }
+    };
+
+    template <>
+    class Converter<ArrayBuffer> {
+      public:
+        static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
+            if (value.IsArrayBuffer()) {
+                out = value.As<ArrayBuffer>();
+                return Success;
+            }
+            return Error("value is not a ArrayBuffer");
+        };
+        static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+            return value;
+        }
+    };
+
+    template <>
+    class Converter<Napi::TypedArray> {
+      public:
+        static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
+            if (value.IsTypedArray()) {
+                out = value.As<Napi::TypedArray>();
+                return Success;
+            }
+            return Error("value is not a TypedArray");
+        };
+        static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+            return value;
+        }
+    };
+
+    template <typename T>
+    class Converter<Napi::TypedArrayOf<T>> {
+      public:
+        // clang-format off
+        // The Napi element type of T
+        static constexpr napi_typedarray_type element_type =
+              std::is_same<T, int8_t>::value   ? napi_int8_array
+            : std::is_same<T, uint8_t>::value  ? napi_uint8_array
+            : std::is_same<T, int16_t>::value  ? napi_int16_array
+            : std::is_same<T, uint16_t>::value ? napi_uint16_array
+            : std::is_same<T, int32_t>::value  ? napi_int32_array
+            : std::is_same<T, uint32_t>::value ? napi_uint32_array
+            : std::is_same<T, float>::value    ? napi_float32_array
+            : std::is_same<T, double>::value   ? napi_float64_array
+            : std::is_same<T, int64_t>::value  ? napi_bigint64_array
+            : std::is_same<T, uint64_t>::value ? napi_biguint64_array
+            : static_cast<napi_typedarray_type>(-1);
+        // clang-format on
+        static_assert(static_cast<int>(element_type) >= 0,
+                      "unsupported T type for Napi::TypedArrayOf<T>");
+        static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
+            if (value.IsTypedArray()) {
+                auto arr = value.As<Napi::TypedArrayOf<T>>();
+                if (arr.TypedArrayType() == element_type) {
+                    out = arr;
+                    return Success;
+                }
+                return Error("value is not a TypedArray of the correct element type");
+            }
+            return Error("value is not a TypedArray");
+        };
+        static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+            return value;
+        }
+    };
+
+    template <>
+    class Converter<std::string> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, std::string&);
+        static Napi::Value ToJS(Napi::Env, std::string);
+    };
+
+    template <>
+    class Converter<bool> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, bool&);
+        static Napi::Value ToJS(Napi::Env, bool);
+    };
+
+    template <>
+    class Converter<int8_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, int8_t&);
+        static Napi::Value ToJS(Napi::Env, int8_t);
+    };
+
+    template <>
+    class Converter<uint8_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
+        static Napi::Value ToJS(Napi::Env, uint8_t);
+    };
+
+    template <>
+    class Converter<int16_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, int16_t&);
+        static Napi::Value ToJS(Napi::Env, int16_t);
+    };
+
+    template <>
+    class Converter<uint16_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
+        static Napi::Value ToJS(Napi::Env, uint16_t);
+    };
+
+    template <>
+    class Converter<int32_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, int32_t&);
+        static Napi::Value ToJS(Napi::Env, int32_t);
+    };
+
+    template <>
+    class Converter<uint32_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
+        static Napi::Value ToJS(Napi::Env, uint32_t);
+    };
+
+    template <>
+    class Converter<int64_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, int64_t&);
+        static Napi::Value ToJS(Napi::Env, int64_t);
+    };
+
+    template <>
+    class Converter<uint64_t> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
+        static Napi::Value ToJS(Napi::Env, uint64_t);
+    };
+
+    template <>
+    class Converter<float> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, float&);
+        static Napi::Value ToJS(Napi::Env, float);
+    };
+
+    template <>
+    class Converter<double> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, double&);
+        static Napi::Value ToJS(Napi::Env, double);
+    };
+
+    // [Clamp]ed integers must convert values outside of the integer range by clamping them.
+    template <typename T>
+    class Converter<ClampedInteger<T>> {
+      public:
+        static Result FromJS(Napi::Env env, Napi::Value value, ClampedInteger<T>& out) {
+            double doubleValue;
+            Result res = Converter<double>::FromJS(env, value, doubleValue);
+            if (!res) {
+                return res;
+            }
+
+            // Check for clamping first.
+            constexpr T kMin = std::numeric_limits<T>::min();
+            constexpr T kMax = std::numeric_limits<T>::max();
+            if (doubleValue < kMin) {
+                out = kMin;
+                return Success;
+            }
+            if (doubleValue > kMax) {
+                out = kMax;
+                return Success;
+            }
+
+            // Yay, no clamping! We can convert the integer type as usual.
+            T correctValue;
+            res = Converter<T>::FromJS(env, value, correctValue);
+            if (!res) {
+                return res;
+            }
+            out = correctValue;
+            return Success;
+        }
+        static Napi::Value ToJS(Napi::Env env, const ClampedInteger<T>& value) {
+            return Converter<T>::ToJS(env, value.value);
+        }
+    };
+
+    // [EnforceRange] integers cause a TypeError when converted from out of range values
+    template <typename T>
+    class Converter<EnforceRangeInteger<T>> {
+      public:
+        static Result FromJS(Napi::Env env, Napi::Value value, EnforceRangeInteger<T>& out) {
+            double doubleValue;
+            Result res = Converter<double>::FromJS(env, value, doubleValue);
+            if (!res) {
+                return res;
+            }
+
+            // Check for out of range and throw a type error.
+            constexpr T kMin = std::numeric_limits<T>::min();
+            constexpr T kMax = std::numeric_limits<T>::max();
+            if (!(kMin <= doubleValue && doubleValue <= kMax)) {
+                return Error("Values are out of the range of that integer.");
+            }
+
+            // Yay, no error! We can convert the integer type as usual.
+            T correctValue;
+            res = Converter<T>::FromJS(env, value, correctValue);
+            if (!res) {
+                return res;
+            }
+            out = correctValue;
+            return Success;
+        }
+        static Napi::Value ToJS(Napi::Env env, const EnforceRangeInteger<T>& value) {
+            return Converter<T>::ToJS(env, value.value);
+        }
+    };
+
+    template <>
+    class Converter<UndefinedType> {
+      public:
+        static Result FromJS(Napi::Env, Napi::Value, UndefinedType&);
+        static Napi::Value ToJS(Napi::Env, UndefinedType);
+    };
+
+    template <typename T>
+    class Converter<Interface<T>> {
+      public:
+        static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
+            if (!value.IsObject()) {
+                return Error("value is not object");
+            }
+            auto obj = value.As<Napi::Object>();
+            if (!T::Unwrap(obj)) {
+                return Error("object is not of the correct interface type");
+            }
+            out = Interface<T>(obj);
+            return Success;
+        }
+        static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) {
+            return {env, value};
+        }
+    };
+
+    template <typename T>
+    class Converter<std::optional<T>> {
+      public:
+        static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
+            if (value.IsNull() || value.IsUndefined()) {
+                out.reset();
+                return Success;
+            }
+            T v{};
+            auto res = Converter<T>::FromJS(env, value, v);
+            if (!res) {
+                return res;
+            }
+            out = std::move(v);
+            return Success;
+        }
+        static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
+            if (value.has_value()) {
+                return Converter<T>::ToJS(env, value.value());
+            }
+            return env.Null();
+        }
+    };
+
+    template <typename T>
+    class Converter<std::vector<T>> {
+      public:
+        static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
+            if (!value.IsArray()) {
+                return Error("value is not an array");
+            }
+            auto arr = value.As<Napi::Array>();
+            std::vector<T> vec(arr.Length());
+            for (size_t i = 0; i < vec.size(); i++) {
+                auto res = Converter<T>::FromJS(env, arr[static_cast<uint32_t>(i)], vec[i]);
+                if (!res) {
+                    return res.Append("for array element ", i);
+                }
+            }
+            out = std::move(vec);
+            return Success;
+        }
+        static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
+            auto arr = Napi::Array::New(env, vec.size());
+            for (size_t i = 0; i < vec.size(); i++) {
+                arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
+            }
+            return arr;
+        }
+    };
+
+    template <typename K, typename V>
+    class Converter<std::unordered_map<K, V>> {
+      public:
+        static inline Result FromJS(Napi::Env env,
+                                    Napi::Value value,
+                                    std::unordered_map<K, V>& out) {
+            if (!value.IsObject()) {
+                return Error("value is not an object");
+            }
+            auto obj = value.ToObject();
+            auto keys = obj.GetPropertyNames();
+            std::unordered_map<K, V> map(keys.Length());
+            for (uint32_t i = 0; i < static_cast<uint32_t>(keys.Length()); i++) {
+                K key{};
+                V value{};
+                auto key_res = Converter<K>::FromJS(env, keys[i], key);
+                if (!key_res) {
+                    return key_res.Append("for object key");
+                }
+                auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
+                if (!value_res) {
+                    return value_res.Append("for object value of key: ", key);
+                }
+                map[key] = value;
+            }
+            out = std::move(map);
+            return Success;
+        }
+        static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
+            auto obj = Napi::Object::New(env);
+            for (auto it : value) {
+                obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
+            }
+            return obj;
+        }
+    };
+
+    template <typename... TYPES>
+    class Converter<std::variant<TYPES...>> {
+        template <typename TY>
+        static inline Result TryFromJS(Napi::Env env,
+                                       Napi::Value value,
+                                       std::variant<TYPES...>& out) {
+            TY v{};
+            auto res = Converter<TY>::FromJS(env, value, v);
+            if (!res) {
+                return Error("no possible types matched");
+            }
+            out = std::move(v);
+            return Success;
+        }
+
+        template <typename T0, typename T1, typename... TN>
+        static inline Result TryFromJS(Napi::Env env,
+                                       Napi::Value value,
+                                       std::variant<TYPES...>& out) {
+            if (TryFromJS<T0>(env, value, out)) {
+                return Success;
+            }
+            return TryFromJS<T1, TN...>(env, value, out);
+        }
+
+      public:
+        static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+            return TryFromJS<TYPES...>(env, value, out);
+        }
+        static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
+            return std::visit(
+                [&](auto&& v) {
+                    using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
+                    return Converter<T>::ToJS(env, v);
+                },
+                value);
+        }
+    };
+
+    template <typename T>
+    class Converter<Promise<T>> {
+      public:
+        static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) {
+            UNIMPLEMENTED();
+        }
+        static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) {
+            return promise;
+        }
+    };
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Helpers
+    ////////////////////////////////////////////////////////////////////////////////
+
+    // FromJS() is a helper function which delegates to
+    // Converter<T>::FromJS()
+    template <typename T>
+    inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
+        return Converter<T>::FromJS(env, value, out);
+    }
+
+    // FromJSOptional() is similar to FromJS(), but if 'value' is either null
+    // or undefined then 'out' is left unassigned.
+    template <typename T>
+    inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
+        if (value.IsNull() || value.IsUndefined()) {
+            return Success;
+        }
+        return Converter<T>::FromJS(env, value, out);
+    }
+
+    // ToJS() is a helper function which delegates to Converter<T>::ToJS()
+    template <typename T>
+    inline Napi::Value ToJS(Napi::Env env, T&& value) {
+        return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(
+            env, std::forward<T>(value));
+    }
+
+    // DefaultedParameter can be used in the tuple parameter types passed to
+    // FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
+    // that have a default value. If the argument is omitted in the call, then
+    // DefaultedParameter::default_value will be assigned to
+    // DefaultedParameter::value.
+    template <typename T>
+    struct DefaultedParameter {
+        T value;          // The argument value assigned by FromJS()
+        T default_value;  // The default value if no argument supplied
+
+        // Implicit conversion operator. Returns value.
+        inline operator const T&() const {
+            return value;
+        }
+    };
+
+    // IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
+    template <typename T>
+    struct IsDefaultedParameter {
+        static constexpr bool value = false;
+    };
+    template <typename T>
+    struct IsDefaultedParameter<DefaultedParameter<T>> {
+        static constexpr bool value = true;
+    };
+
+    // FromJS() is a helper function for bulk converting the arguments of 'info'.
+    // PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
+    // Parameters may be of the templated DefaultedParameter type, in which case
+    // the parameter will default to the default-value if omitted.
+    template <typename PARAM_TYPES, int BASE_INDEX = 0>
+    inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
+        if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
+            using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
+            auto& value = info[BASE_INDEX];
+            auto& out = std::get<BASE_INDEX>(args);
+            if constexpr (IsDefaultedParameter<T>::value) {
+                // Parameter has a default value.
+                // Check whether the argument was provided.
+                if (value.IsNull() || value.IsUndefined()) {
+                    // Use default value for this parameter
+                    out.value = out.default_value;
+                } else {
+                    // Argument was provided
+                    auto res = FromJS(info.Env(), value, out.value);
+                    if (!res) {
+                        return res;
+                    }
+                }
+            } else {
+                // Parameter does not have a default value.
+                auto res = FromJS(info.Env(), value, out);
+                if (!res) {
+                    return res;
+                }
+            }
+            // Convert the rest of the arguments
+            return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
+        } else {
+            return Success;
+        }
+    }
+
+}  // namespace wgpu::interop
+
+#endif  //  DAWN_NODE_INTEROP_CORE_WEBGPU_H_
diff --git a/src/dawn/node/interop/WebGPU.cpp.tmpl b/src/dawn/node/interop/WebGPU.cpp.tmpl
new file mode 100644
index 0000000..f711b1e
--- /dev/null
+++ b/src/dawn/node/interop/WebGPU.cpp.tmpl
@@ -0,0 +1,409 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn/node/tools/cmd/idlgen/main.go to generate
+the WebGPU.cpp source file.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+  types used by this template
+* src/dawn/node/tools/cmd/idlgen/main.go for additional structures and functions
+  used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+{{- Include "WebGPUCommon.tmpl" -}}
+
+#include "src/dawn/node/interop/WebGPU.h"
+
+#include <unordered_map>
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu {
+namespace interop {
+
+namespace {
+
+{{template "Wrappers" $}}
+
+}  // namespace
+
+{{ range $ := .Declarations}}
+{{-        if IsDictionary $}}{{template "Dictionary" $}}
+{{-   else if IsInterface  $}}{{template "Interface"  $}}
+{{-   else if IsEnum       $}}{{template "Enum"       $}}
+{{-   end}}
+{{- end}}
+
+
+void Initialize(Napi::Env env) {
+  auto* wrapper = Wrappers::Init(env);
+  auto global = env.Global();
+{{- range $ := .Declarations}}
+{{-   if IsInterfaceOrNamespace $}}
+{{-     if not (HasAnnotation $ "LegacyNoInterfaceObject")}}
+  global.Set(Napi::String::New(env, "{{$.Name}}"), wrapper->{{$.Name}}_ctor.Value());
+{{-     end}}
+{{-   end}}
+{{- end}}
+}
+
+}  // namespace interop
+}  // namespace wgpu
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Wrappers emits the C++ 'Wrappers' class, which holds all the interface and
+-- namespace interop wrapper classes.
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Wrappers"}}
+// Wrappers holds all the Napi class constructors, and Napi::ObjectWrap type
+// declarations, for each of the WebIDL interface and namespace types.
+class Wrappers {
+  Wrappers(Napi::Env env) {
+{{-   range $ := .Declarations}}
+{{-     if IsInterfaceOrNamespace $}}
+    {{$.Name}}_ctor = Napi::Persistent(W{{$.Name}}::Class(env));
+{{-     end}}
+{{-   end}}
+  }
+
+  static Wrappers* instance;
+
+public:
+{{-   range $ := .Declarations}}
+{{-     if IsInterfaceOrNamespace $}}{{template "Wrapper" $}}
+{{-     end}}
+{{-   end}}
+
+  // Allocates and constructs the Wrappers instance
+  static Wrappers* Init(Napi::Env env) {
+    instance = new Wrappers(env);
+    return instance;
+  }
+
+  // Destructs and frees the Wrappers instance
+  static void Term(Napi::Env env) {
+    delete instance;
+    instance = nullptr;
+  }
+
+  static Wrappers* For(Napi::Env env) {
+    // Currently Napi only actually supports a single Env, so there's no point
+    // maintaining a map of Env to Wrapper. Note: This might not always be true.
+    return instance;
+  }
+
+{{   range $ := .Declarations}}
+{{-     if IsInterfaceOrNamespace $}}
+  Napi::FunctionReference {{$.Name}}_ctor;
+{{-     end}}
+{{-   end}}
+};
+
+Wrappers* Wrappers::instance = nullptr;
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Wrapper emits the C++ wrapper class for the given ast.Interface or
+-- ast.Namespace.
+-- This wrapper class inherits from Napi::ObjectWrap, which binds the lifetime
+-- of the JavaScript object to the lifetime of the wrapper class instance.
+-- If the wrapper is for an interface, the wrapper object holds a unique_ptr to
+-- the interface implementation, and delegates all exposed method calls on to
+-- the implementation.
+-- See: https://github.com/nodejs/node-addon-api/blob/main/doc/object_wrap.md
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Wrapper"}}
+  struct W{{$.Name}} : public Napi::ObjectWrap<W{{$.Name}}> {
+{{-  if IsInterface $}}
+    std::unique_ptr<{{$.Name}}> impl;
+{{-  end}}
+    static Napi::Function Class(Napi::Env env) {
+      return DefineClass(env, "{{$.Name}}", {
+{{   if $s := SetlikeOf $}}
+        InstanceMethod("has", &W{{$.Name}}::has),
+        InstanceMethod("keys", &W{{$.Name}}::keys),
+{{-  end}}
+{{-  range $m := MethodsOf $}}
+        InstanceMethod("{{$m.Name}}", &W{{$.Name}}::{{$m.Name}}),
+{{-  end}}
+{{-  range $a := AttributesOf $}}
+{{-   if not (HasAnnotation $a "SameObject")}}
+        InstanceAccessor("{{$a.Name}}", &W{{$.Name}}::get{{Title $a.Name}},
+{{-    if $a.Readonly}} nullptr{{else}} &W{{$.Name}}::set{{Title $a.Name}}{{end -}}
+        ),
+{{-   end}}
+{{-  end}}
+{{-  range $c := ConstantsOf $}}
+        StaticValue("{{$c.Name}}", ToJS(env, {{$.Name}}::{{$c.Name}}), napi_default_jsproperty),
+{{-  end}}
+      });
+    }
+
+    W{{$.Name}}(const Napi::CallbackInfo& info) : ObjectWrap(info) {}
+
+{{   if $s := SetlikeOf $}}
+    Napi::Value has(const Napi::CallbackInfo& info) {
+      std::tuple<{{template "Type" $s.Elem}}> args;
+      auto res = FromJS(info, args);
+      if (res) {
+          return ToJS(info.Env(), impl->has(info.Env(), std::get<0>(args)));
+      }
+      Napi::TypeError::New(info.Env(), res.error).ThrowAsJavaScriptException();
+      return {};
+    }
+    Napi::Value keys(const Napi::CallbackInfo& info) {
+      return ToJS(info.Env(), impl->keys(info.Env()));
+    }
+{{-  end}}
+{{-  range $m := MethodsOf $}}
+    Napi::Value {{$m.Name}}(const Napi::CallbackInfo& info) {
+      std::string error;
+{{-    range $overload_idx, $o := $m.Overloads}}
+{{- $overloaded := gt (len $m.Overloads) 1}}
+      { {{if $overloaded}}// Overload {{$overload_idx}}{{end}}
+        std::tuple<
+{{-        range $i, $p := $o.Parameters}}
+{{-          if $i}}, {{end}}
+{{-          if      $p.Init    }}DefaultedParameter<{{template "Type" $p.Type}}>
+{{-          else if $p.Optional}}std::optional<{{template "Type" $p.Type}}>
+{{-          else               }}{{template "Type" $p.Type}}
+{{-          end}}
+{{-        end}}> args;
+
+{{-        range $i, $p := $o.Parameters}}
+{{-          if $p.Init}}
+        std::get<{{$i}} /* {{$p.Name}} */>(args).default_value = {{Eval "Literal" "Value" $p.Init "Type" $p.Type}};
+{{-          end}}
+{{-        end}}
+
+        auto res = FromJS(info, args);
+        if (res) {
+          {{/* indent */}}INTEROP_LOG(
+{{-        range $i, $p := $o.Parameters}}
+{{-          if $i}}, ", {{$p.Name}}: "{{else}}"{{$p.Name}}: "{{end}}, std::get<{{$i}}>(args)
+{{-        end}});
+          {{/* indent */}}
+{{-      if not (IsUndefinedType $o.Type) }}auto result = {{end -}}
+          impl->{{$o.Name}}(info.Env(){{range $i, $_ := $o.Parameters}}, std::get<{{$i}}>(args){{end}});
+          {{/* indent */ -}}
+{{-      if   IsUndefinedType $o.Type}}return info.Env().Undefined();
+{{-      else                        }}return ToJS(info.Env(), result);
+{{-      end                         }}
+        }
+        error = {{if $overloaded}}"\noverload {{$overload_idx}} failed to match:\n" + {{end}}res.error;
+      }
+{{-    end}}
+      Napi::TypeError::New(info.Env(), "no overload matched for {{$m.Name}}:\n" + error).ThrowAsJavaScriptException();
+      return {};
+    }
+{{-  end}}
+
+{{-  range $a := AttributesOf $}}
+{{-   if not (HasAnnotation $a "SameObject")}}
+    Napi::Value get{{Title $a.Name}}(const Napi::CallbackInfo& info) {
+      return ToJS(info.Env(), impl->get{{Title $a.Name}}(info.Env()));
+    }
+{{-   if not $a.Readonly}}
+    void set{{Title $a.Name}}(const Napi::CallbackInfo& info, const Napi::Value& value) {
+      {{template "Type" $a.Type}} v{};
+      auto res = FromJS(info.Env(), value, v);
+      if (res) {
+        impl->set{{Title $a.Name}}(info.Env(), std::move(v));
+      } else {
+        res = res.Append("invalid value to {{$a.Name}}");
+        Napi::TypeError::New(info.Env(), res.error).ThrowAsJavaScriptException();
+      }
+    }
+{{-    end}}
+{{-   end}}
+{{-  end}}
+  };
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Dictionary emits the C++ method implementations and associated functions of
+-- the interop type that defines the given ast.Dictionary
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Dictionary"}}
+Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
+  auto object = value.ToObject();
+  Result res;
+{{- template "DictionaryMembersFromJS" $}};
+  return Success;
+}
+
+Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
+  auto object = Napi::Object::New(env);
+{{- template "DictionaryMembersToJS" $}}
+  return object;
+}
+
+std::ostream& operator<<(std::ostream& o, const {{$.Name}}& dict) {
+    o << "{{$.Name}} {";
+{{-    range $i, $m := $.Members}}
+    o << {{if $i}}", "{{else}}" "{{end}} << "{{$m.Name}}: ";
+    utils::Write(o, dict.{{$m.Name}});
+{{-    end          }}
+    o << "}" << std::endl;
+    return o;
+}
+{{ end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMembersFromJS emits the C++ logic to convert each of the
+-- dictionary ast.Member fields from JavaScript to C++. Each call to ToJS() is
+-- emitted as a separate statement, and requires a 'Result res' local to be
+-- declared
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMembersFromJS"}}
+{{-    if $.Inherits}}{{template "DictionaryMembersFromJS" (Lookup $.Inherits)}}{{end}}
+{{-    range $i, $m := $.Members}}
+  {{/* indent */}}
+{{-      if   $m.Init }}res = interop::FromJSOptional(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
+{{-      else         }}res = interop::FromJS(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
+{{-      end          }}
+  if (!res) {
+    return res.Append("while converting member '{{$m.Name}}'");
+  }
+{{-    end}}
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMembersToJS emits the C++ logic to convert each of the
+-- dictionary ast.Member fields to JavaScript from C++. Each call to ToJS() is
+-- emitted as a separate statement
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMembersToJS"}}
+{{-    if $.Inherits}}{{template "DictionaryMembersToJS" (Lookup $.Inherits)}}{{end}}
+{{-    range $m := $.Members}}
+  object.Set(Napi::String::New(env, "{{$m.Name}}"), interop::ToJS(env, value.{{$m.Name}}));
+{{-    end}}
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Interface emits the C++ method implementations that define the given
+-- ast.Interface.
+-- Note: Most of the actual binding logic lives in the interface wrapper class.
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Interface"}}
+{{$.Name}}::{{$.Name}}() = default;
+
+{{$.Name}}* {{$.Name}}::Unwrap(Napi::Object object) {
+  auto* wrappers = Wrappers::For(object.Env());
+  if (!object.InstanceOf(wrappers->{{$.Name}}_ctor.Value())) {
+    return nullptr;
+  }
+  return Wrappers::W{{$.Name}}::Unwrap(object)->impl.get();
+}
+
+Interface<{{$.Name}}> {{$.Name}}::Bind(Napi::Env env, std::unique_ptr<{{$.Name}}>&& impl) {
+  auto* wrappers = Wrappers::For(env);
+  auto object = wrappers->{{$.Name}}_ctor.New({});
+  auto* wrapper = Wrappers::W{{$.Name}}::Unwrap(object);
+  wrapper->impl = std::move(impl);
+
+{{- /*Add the [SameObject] members as read-only property on the JS object.*/ -}}
+{{- range $a := AttributesOf $}}
+{{-   if HasAnnotation $a "SameObject"}}
+  object.DefineProperty(Napi::PropertyDescriptor::Value(
+    "{{$a.Name}}", ToJS(env, wrapper->impl->get{{Title $a.Name}}(env))
+  ));
+{{-   end}}
+{{- end}}
+
+  return Interface<{{$.Name}}>(object);
+}
+
+{{$.Name}}::~{{$.Name}}() = default;
+{{ end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Enum emits the C++ associated functions of the interop type that defines the
+-- given ast.Enum
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Enum"}}
+bool Converter<{{$.Name}}>::FromString(std::string str, {{$.Name}}& out) {
+{{-  range $e := $.Values}}
+  if (str == {{$e.Value}}) {
+    out = {{$.Name}}::{{EnumEntryName $e.Value}};
+    return true;
+  }
+{{-  end}}
+  return false;
+}
+
+const char* Converter<{{$.Name}}>::ToString({{$.Name}} value) {
+  switch (value) {
+{{-  range $e := $.Values}}
+  case {{$.Name}}::{{EnumEntryName $e.Value}}:
+    return {{$e.Value}};
+{{-  end}}
+  }
+  return nullptr;
+}
+
+Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
+  std::string str = value.ToString();
+  if (FromString(str, out)) {
+    return Success;
+  }
+  return Error(str + " is not a valid enum value of {{$.Name}}");
+}
+
+Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
+  switch (value) {
+{{-  range $e := $.Values}}
+  case {{$.Name}}::{{EnumEntryName $e.Value}}:
+    return Napi::String::New(env, {{$e.Value}});
+{{-  end}}
+  }
+  return env.Undefined();
+}
+
+std::ostream& operator<<(std::ostream& o, {{$.Name}} value) {
+  if (auto* s = Converter<{{$.Name}}>::ToString(value)) {
+    return o << s;
+  }
+  return o << "undefined<{{$.Name}}>";
+}
+
+{{end}}
diff --git a/src/dawn/node/interop/WebGPU.h.tmpl b/src/dawn/node/interop/WebGPU.h.tmpl
new file mode 100644
index 0000000..628c569
--- /dev/null
+++ b/src/dawn/node/interop/WebGPU.h.tmpl
@@ -0,0 +1,286 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn/node/tools/cmd/idlgen/main.go to generate
+the WebGPU.h header file.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+  types used by this template
+* src/dawn/node/tools/cmd/idlgen/main.go for additional structures and functions
+  used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+{{- Include "WebGPUCommon.tmpl" -}}
+
+#ifndef DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+#define DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+
+#include "src/dawn/node/interop/Core.h"
+
+namespace wgpu {
+namespace interop {
+
+// Initialize() registers the WebGPU types with the Napi environment.
+void Initialize(Napi::Env env);
+
+{{  range $ := .Declarations}}
+{{-        if IsDictionary $}}{{template "Dictionary" $}}
+{{-   else if IsNamespace  $}}{{template "Namespace" $}}
+{{-   else if IsInterface  $}}{{template "Interface" $}}
+{{-   else if IsEnum       $}}{{template "Enum" $}}
+{{-   else if IsTypedef    $}}{{template "Typedef" $}}
+{{-   end}}
+{{- end}}
+
+}  // namespace interop
+}  // namespace wgpu
+
+#endif // DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Dictionary emits the C++ header declaration that defines the interop type for
+-- the given ast.Dictionary
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Dictionary"}}
+// dictionary {{$.Name}}
+class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
+public:
+{{   range $m := $.Members}}
+{{-    if      IsConstructor $m}}  {{$.Name}}();
+{{     else if IsMember      $m}}  {{template "DictionaryMember" $m}}
+{{     end}}
+{{-  end -}}
+};
+
+template<>
+class Converter<{{$.Name}}> {
+public:
+  static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
+  static Napi::Value ToJS(Napi::Env, {{$.Name}});
+};
+
+std::ostream& operator<<(std::ostream& o, const {{$.Name}}& desc);
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Namespace emits the C++ header declaration that defines the interop type for
+-- the given ast.Namespace
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Namespace"}}
+// namespace {{$.Name}}
+class {{$.Name}} {
+public:
+  virtual ~{{$.Name}}();
+  {{$.Name}}();
+{{-  range $c := ConstantsOf $}}
+{{-    template "Constant" $c}}
+{{-  end}}
+};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Interface emits the C++ header declaration that defines the interop type for
+-- the given ast.Interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Interface"}}
+// interface {{$.Name}}
+class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
+public:
+  static Interface<{{$.Name}}> Bind(Napi::Env, std::unique_ptr<{{$.Name}}>&&);
+  static {{$.Name}}* Unwrap(Napi::Object);
+
+  template<typename T, typename ... ARGS>
+  static inline Interface<{{$.Name}}> Create(Napi::Env env, ARGS&& ... args) {
+    return Bind(env, std::make_unique<T>(std::forward<ARGS>(args)...));
+  }
+
+  virtual ~{{$.Name}}();
+  {{$.Name}}();
+{{-  if $s := SetlikeOf $}}
+{{-    template "InterfaceSetlike" $s}}
+{{-  end}}
+{{-  range $m := MethodsOf $}}
+{{-    template "InterfaceMethod" $m}}
+{{-  end}}
+{{-  range $a := AttributesOf $}}
+{{-    template "InterfaceAttribute" $a}}
+{{-  end}}
+{{-  range $c := ConstantsOf  $}}
+{{-    template "Constant" $c}}
+{{-  end}}
+};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Typedef emits the C++ header declaration that defines the interop type for
+-- the given ast.Interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Typedef"}}
+{{-   if HasAnnotation $ "EnforceRange"}}
+using {{$.Name}} = EnforceRangeInteger<{{template "Type" $.Type}}>;
+{{-   else}}
+using {{$.Name}} = {{template "Type" $.Type}};
+{{-   end}}
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Enum emits the C++ header declaration that defines the interop type for
+-- the given ast.Enum
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Enum"}}
+enum class {{$.Name}} {
+{{-  range $ := $.Values}}
+  {{EnumEntryName $.Value}},
+{{-  end}}
+};
+
+template<>
+class Converter<{{$.Name}}> {
+public:
+  static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
+  static Napi::Value ToJS(Napi::Env, {{$.Name}});
+  static bool FromString(std::string, {{$.Name}}&);
+  static const char* ToString({{$.Name}});
+};
+
+std::ostream& operator<<(std::ostream& o, {{$.Name}});
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMember emits the C++ declaration for a single dictionary ast.Member
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMember"}}
+{{-   if $.Attribute}}{{template "AttributeType" $}} {{$.Name}}
+{{-     if $.Init}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}}{{end}};
+{{-   else          }}{{template "Type" $.Type}} {{$.Name}}({{template "Parameters" $.Parameters}});
+{{-   end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceSetlike emits the C++ methods for a setlike interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceSetlike"}}
+  virtual bool has(Napi::Env, {{template "Type" $.Elem}}) = 0;
+  virtual std::vector<{{template "Type" $.Elem}}> keys(Napi::Env) = 0;
+{{- /* TODO(crbug.com/dawn/1143):
+       entries, forEach, size, values
+       read-write: add, clear, or delete
+*/}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceMethod emits the C++ declaration for a single interface ast.Member
+-- method
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceMethod"}}
+{{-   range $o := $.Overloads}}
+  virtual {{template "Type" $o.Type}} {{$.Name}}(Napi::Env{{template "ParametersWithLeadingComma" $o.Parameters}}) = 0;
+{{-   end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceAttribute emits the C++ declaration for a single interface
+-- ast.Member attribute
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceAttribute"}}
+  virtual {{template "Type" $.Type}} get{{Title $.Name}}(Napi::Env) = 0;
+{{-   if not $.Readonly}}
+  virtual void set{{Title $.Name}}(Napi::Env, {{template "Type" $.Type}} value) = 0;
+{{-   end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Constant emits the C++ declaration for a single ast.Member constant
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Constant"}}
+  static constexpr {{template "Type" $.Type}} {{$.Name}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}};
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Parameters emits the C++ comma separated list of parameter declarations for
+-- the given []ast.Parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Parameters"}}
+{{-   range $i, $param := $ }}
+{{-     if $i }}, {{end}}
+{{-     template "Parameter" $param}}
+{{-   end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- ParametersWithLeadingComma emits the C++ comma separated list of parameter
+-- declarations for the given []ast.Parameter, starting with a leading comma
+-- for the first parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "ParametersWithLeadingComma"}}
+{{-   range $i, $param := $ }}, {{/*  */}}
+{{-     template "Parameter" $param}}
+{{-   end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Parameter emits the C++ parameter type and name for the given ast.Parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Parameter" -}}
+{{-   if      $.Init    }}{{template "Type" $.Type}} {{$.Name}}
+{{-   else if $.Optional}}std::optional<{{template "Type" $.Type}}> {{$.Name}}
+{{-   else              }}{{template "Type" $.Type}} {{$.Name}}
+{{-   end               }}
+{{- end}}
diff --git a/src/dawn/node/interop/WebGPUCommon.tmpl b/src/dawn/node/interop/WebGPUCommon.tmpl
new file mode 100644
index 0000000..bf11709
--- /dev/null
+++ b/src/dawn/node/interop/WebGPUCommon.tmpl
@@ -0,0 +1,147 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn/node/tools/cmd/idlgen/main.go.
+This file provides common template definitions and is included by WebGPU.h.tmpl
+and WebGPU.cpp.tmpl.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+  types used by this template
+* src/dawn/node/tools/cmd/idlgen/main.go for additional structures and functions
+  used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Type generates the C++ type for the given ast.Type
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Type" -}}
+{{-        if IsUndefinedType $}}void
+{{-   else if IsTypeName      $}}
+{{-          if eq $.Name "boolean"             }}bool
+{{-     else if eq $.Name "short"               }}int16_t
+{{-     else if eq $.Name "unsigned short"      }}uint16_t
+{{-     else if eq $.Name "long"                }}int32_t
+{{-     else if eq $.Name "unsigned long"       }}uint32_t
+{{-     else if eq $.Name "long long"           }}int64_t
+{{-     else if eq $.Name "unsigned long long"  }}uint64_t
+{{-     else if eq $.Name "object"              }}Object
+{{-     else if eq $.Name "DOMString"           }}std::string
+{{-     else if eq $.Name "USVString"           }}std::string
+{{-     else if eq $.Name "ArrayBuffer"         }}ArrayBuffer
+{{-     else if IsInterface (Lookup $.Name)     }}Interface<{{$.Name}}>
+{{-     else                                    }}{{$.Name}}
+{{-     end                                     }}
+{{-   else if IsParametrizedType $}}{{$.Name}}<{{template "TypeList" $.Elems}}>
+{{-   else if IsNullableType     $}}std::optional<{{template "Type" $.Type}}>
+{{-   else if IsUnionType        $}}std::variant<{{template "VariantTypeList" $.Types}}>
+{{-   else if IsSequenceType     $}}std::vector<{{template "Type" $.Elem}}>
+{{-   else if IsRecordType       $}}std::unordered_map<{{template "Type" $.Key}}, {{template "Type" $.Elem}}>
+{{-   else                        }} /* Unhandled Type {{printf "%T" $}} */
+{{-   end -}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- AttributeType generates the C++ type for the given ast.Member
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "AttributeType" -}}
+{{-   if      $.Required }}{{template "AttributeClampHelper" $}}
+{{-   else if $.Init     }}{{template "AttributeClampHelper" $}}
+{{-   else               }}std::optional<{{template "AttributeClampHelper" $}}>
+{{-   end}}
+{{- end }}
+
+
+{{- /*
+    A helper for AttributeType that wraps integer types if necessary for WebIDL attributes.
+    Note that [Clamp] and [EnforceRange] are supposed to be an annotation on the type and not
+    the attribute, but webidlparser doesn't parse this correctly.
+*/ -}}
+{{- define "AttributeClampHelper" -}}
+{{-   if HasAnnotation $ "Clamp" }}
+ClampedInteger<{{template "Type" $.Type}}>
+{{-   else if HasAnnotation $ "EnforceRange" }}
+EnforceRangeInteger<{{template "Type" $.Type}}>
+{{-   else}}
+{{template "Type" $.Type}}
+{{-   end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Literal generates a C++ literal value using the following arguments:
+--   Value - the ast.Literal
+--   Type  - the ast.Type of the literal
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Literal" -}}
+{{-   if      IsDefaultDictionaryLiteral $.Value}}{{template "Type" $.Type}}{}
+{{-   else if IsTypeName                 $.Type }}
+{{-     $ty := Lookup $.Type.Name}}
+{{-     if      IsTypedef      $ty     }}{{Eval "Literal" "Value" $.Value "Type" $ty.Type}}
+{{-     else if IsEnum         $ty     }}{{$.Type.Name}}::{{EnumEntryName $.Value.Value}}
+{{-     else if IsBasicLiteral $.Value }}{{$.Value.Value}}
+{{-     else                           }}/* Unhandled Type {{printf "ty: %T $.Type.Name: %T $.Value: %T" $ty $.Type.Name $.Value}} */
+{{-     end                            }}
+{{-   else if IsSequenceType $.Type  }}{{template "Type" $.Type}}{} {{- /* TODO: Assumes the initialiser is empty */}}
+{{-   else if IsBasicLiteral $.Value }}{{$.Value.Value}}
+{{-   else }} /* Unhandled Type {{printf "%T %T" $.Type $.Value}} */
+{{-   end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- TypeList generates a C++ comma separated list of types from the given
+-- []ast.Type
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "TypeList" -}}
+{{-   range $i, $ty := $}}
+{{-     if $i }}, {{end}}
+{{-     template "Type" $ty}}
+{{-   end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- VariantTypeList generates a C++ comma separated list of types from the given
+-- []ast.Type, skipping any 'undefined' types
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "VariantTypeList" -}}
+{{-   range $i, $ty := $}}
+{{-     if $i }}, {{end}}
+{{-     if IsUndefinedType $ty -}}
+          UndefinedType
+{{-     else}}
+{{-       template "Type" $ty}}
+{{-     end}}
+{{-   end}}
+{{- end }}
+
diff --git a/src/dawn/node/tools/go.mod b/src/dawn/node/tools/go.mod
new file mode 100644
index 0000000..b5eb8df
--- /dev/null
+++ b/src/dawn/node/tools/go.mod
@@ -0,0 +1,9 @@
+module dawn.googlesource.com/dawn/src/dawn/node/tools
+
+go 1.16
+
+require (
+	github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094
+	github.com/mattn/go-colorable v0.1.9
+	github.com/mattn/go-isatty v0.0.14 // indirect
+)
diff --git a/src/dawn/node/tools/go.sum b/src/dawn/node/tools/go.sum
new file mode 100644
index 0000000..42c0118
--- /dev/null
+++ b/src/dawn/node/tools/go.sum
@@ -0,0 +1,33 @@
+github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094 h1:CTVJdI6oUCRNucMEmoh3c2U88DesoPtefsxKhoZ1WuQ=
+github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094/go.mod h1:bV550SPlMos7UhMprxlm14XTBTpKHSUZ8Q4Id5qQuyw=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/src/dawn/node/tools/run-cts b/src/dawn/node/tools/run-cts
new file mode 100755
index 0000000..cf58452
--- /dev/null
+++ b/src/dawn/node/tools/run-cts
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+# Copyright 2021 The Tint Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e # Fail on any error.
+
+if [ ! -x "$(which go)" ] ; then
+    echo "error: go needs to be on \$PATH to use $0"
+    exit 1
+fi
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd )"
+ROOT_DIR="$( cd "${SCRIPT_DIR}/.." >/dev/null 2>&1 && pwd )"
+BINARY="${SCRIPT_DIR}/bin/run-cts"
+
+# Rebuild the binary.
+# Note, go caches build artifacts, so this is quick for repeat calls
+pushd "${SCRIPT_DIR}/src/cmd/run-cts" > /dev/null
+    go build -o "${BINARY}" main.go
+popd > /dev/null
+
+"${BINARY}" "$@"
diff --git a/src/dawn/node/tools/src/cmd/idlgen/main.go b/src/dawn/node/tools/src/cmd/idlgen/main.go
new file mode 100644
index 0000000..9985203
--- /dev/null
+++ b/src/dawn/node/tools/src/cmd/idlgen/main.go
@@ -0,0 +1,680 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// idlgen is a tool used to generate code from WebIDL files and a golang
+// template file
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"text/template"
+	"unicode"
+
+	"github.com/ben-clayton/webidlparser/ast"
+	"github.com/ben-clayton/webidlparser/parser"
+)
+
+func main() {
+	if err := run(); err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+}
+
+func showUsage() {
+	fmt.Println(`
+idlgen is a tool used to generate code from WebIDL files and a golang
+template file
+
+Usage:
+  idlgen --template=<template-path> --output=<output-path> <idl-file> [<idl-file>...]`)
+	os.Exit(1)
+}
+
+func run() error {
+	var templatePath string
+	var outputPath string
+	flag.StringVar(&templatePath, "template", "", "the template file run with the parsed WebIDL files")
+	flag.StringVar(&outputPath, "output", "", "the output file")
+	flag.Parse()
+
+	idlFiles := flag.Args()
+
+	// Check all required arguments are provided
+	if templatePath == "" || outputPath == "" || len(idlFiles) == 0 {
+		showUsage()
+	}
+
+	// Open up the output file
+	out := os.Stdout
+	if outputPath != "" {
+		file, err := os.Create(outputPath)
+		if err != nil {
+			return fmt.Errorf("failed to open output file '%v'", outputPath)
+		}
+		out = file
+		defer file.Close()
+	}
+
+	// Read the template file
+	tmpl, err := ioutil.ReadFile(templatePath)
+	if err != nil {
+		return fmt.Errorf("failed to open template file '%v'", templatePath)
+	}
+
+	// idl is the combination of the parsed idlFiles
+	idl := &ast.File{}
+
+	// Parse each of the WebIDL files and add the declarations to idl
+	for _, path := range idlFiles {
+		content, err := ioutil.ReadFile(path)
+		if err != nil {
+			return fmt.Errorf("failed to open file '%v'", path)
+		}
+		fileIDL := parser.Parse(string(content))
+		if numErrs := len(fileIDL.Errors); numErrs != 0 {
+			errs := make([]string, numErrs)
+			for i, e := range fileIDL.Errors {
+				errs[i] = e.Message
+			}
+			return fmt.Errorf("errors found while parsing %v:\n%v", path, strings.Join(errs, "\n"))
+		}
+		idl.Declarations = append(idl.Declarations, fileIDL.Declarations...)
+	}
+
+	// Initialize the generator
+	g := generator{t: template.New(templatePath)}
+	g.workingDir = filepath.Dir(templatePath)
+	g.funcs = map[string]interface{}{
+		// Functions exposed to the template
+		"AttributesOf":               attributesOf,
+		"ConstantsOf":                constantsOf,
+		"EnumEntryName":              enumEntryName,
+		"Eval":                       g.eval,
+		"HasAnnotation":              hasAnnotation,
+		"Include":                    g.include,
+		"IsBasicLiteral":             is(ast.BasicLiteral{}),
+		"IsConstructor":              isConstructor,
+		"IsDefaultDictionaryLiteral": is(ast.DefaultDictionaryLiteral{}),
+		"IsDictionary":               is(ast.Dictionary{}),
+		"IsEnum":                     is(ast.Enum{}),
+		"IsInterface":                is(ast.Interface{}),
+		"IsInterfaceOrNamespace":     is(ast.Interface{}, ast.Namespace{}),
+		"IsMember":                   is(ast.Member{}),
+		"IsNamespace":                is(ast.Namespace{}),
+		"IsNullableType":             is(ast.NullableType{}),
+		"IsParametrizedType":         is(ast.ParametrizedType{}),
+		"IsRecordType":               is(ast.RecordType{}),
+		"IsSequenceType":             is(ast.SequenceType{}),
+		"IsTypedef":                  is(ast.Typedef{}),
+		"IsTypeName":                 is(ast.TypeName{}),
+		"IsUndefinedType":            isUndefinedType,
+		"IsUnionType":                is(ast.UnionType{}),
+		"Lookup":                     g.lookup,
+		"MethodsOf":                  methodsOf,
+		"SetlikeOf":                  setlikeOf,
+		"Title":                      strings.Title,
+	}
+	t, err := g.t.
+		Option("missingkey=invalid").
+		Funcs(g.funcs).
+		Parse(string(tmpl))
+	if err != nil {
+		return fmt.Errorf("failed to parse template file '%v': %w", templatePath, err)
+	}
+
+	// simplify the definitions in the WebIDL before passing this to the template
+	idl, declarations := simplify(idl)
+
+	// Patch the IDL for the differences we need compared to the upstream IDL.
+	patch(idl, declarations)
+	g.declarations = declarations
+
+	// Write the file header
+	fmt.Fprintf(out, header, strings.Join(os.Args[1:], "\n//   "))
+
+	// Execute the template
+	return t.Execute(out, idl)
+}
+
+// declarations is a map of WebIDL declaration name to its AST node.
+type declarations map[string]ast.Decl
+
+// nameOf returns the name of the AST node n.
+// Returns an empty string if the node is not named.
+func nameOf(n ast.Node) string {
+	switch n := n.(type) {
+	case *ast.Namespace:
+		return n.Name
+	case *ast.Interface:
+		return n.Name
+	case *ast.Dictionary:
+		return n.Name
+	case *ast.Enum:
+		return n.Name
+	case *ast.Typedef:
+		return n.Name
+	case *ast.Mixin:
+		return n.Name
+	case *ast.Includes:
+		return ""
+	default:
+		panic(fmt.Errorf("unhandled AST declaration %T", n))
+	}
+}
+
+// simplify processes the AST 'in', returning a new AST that:
+// * Has all partial interfaces merged into a single interface.
+// * Has all mixins flattened into their place of use.
+// * Has all the declarations ordered in dependency order (leaf first)
+// simplify also returns the map of declarations in the AST.
+func simplify(in *ast.File) (*ast.File, declarations) {
+	s := simplifier{
+		declarations: declarations{},
+		registered:   map[string]bool{},
+		out:          &ast.File{},
+	}
+
+	// Walk the IDL declarations to merge together partial interfaces and embed
+	// mixins into their uses.
+	{
+		interfaces := map[string]*ast.Interface{}
+		mixins := map[string]*ast.Mixin{}
+		includes := []*ast.Includes{}
+		for _, d := range in.Declarations {
+			switch d := d.(type) {
+			case *ast.Interface:
+				if i, ok := interfaces[d.Name]; ok {
+					// Merge partial body into one interface
+					i.Members = append(i.Members, d.Members...)
+				} else {
+					clone := *d
+					d := &clone
+					interfaces[d.Name] = d
+					s.declarations[d.Name] = d
+				}
+			case *ast.Mixin:
+				mixins[d.Name] = d
+				s.declarations[d.Name] = d
+			case *ast.Includes:
+				includes = append(includes, d)
+			default:
+				if name := nameOf(d); name != "" {
+					s.declarations[nameOf(d)] = d
+				}
+			}
+		}
+
+		// Merge mixin into interface
+		for _, include := range includes {
+			i, ok := interfaces[include.Name]
+			if !ok {
+				panic(fmt.Errorf("%v includes %v, but %v is not an interface", include.Name, include.Source, include.Name))
+			}
+			m, ok := mixins[include.Source]
+			if !ok {
+				panic(fmt.Errorf("%v includes %v, but %v is not an mixin", include.Name, include.Source, include.Source))
+			}
+			// Merge mixin into the interface
+			for _, member := range m.Members {
+				if member, ok := member.(*ast.Member); ok {
+					i.Members = append(i.Members, member)
+				}
+			}
+		}
+	}
+
+	// Now traverse the declarations in to produce the dependency-ordered
+	// output `s.out`.
+	for _, d := range in.Declarations {
+		if name := nameOf(d); name != "" {
+			s.visit(s.declarations[nameOf(d)])
+		}
+	}
+
+	return s.out, s.declarations
+}
+
+// simplifier holds internal state for simplify()
+type simplifier struct {
+	// all AST declarations
+	declarations declarations
+	// set of visited declarations
+	registered map[string]bool
+	// the dependency-ordered output
+	out *ast.File
+}
+
+// visit traverses the AST declaration 'd' adding all dependent declarations to
+// s.out.
+func (s *simplifier) visit(d ast.Decl) {
+	register := func(name string) bool {
+		if s.registered[name] {
+			return true
+		}
+		s.registered[name] = true
+		return false
+	}
+	switch d := d.(type) {
+	case *ast.Namespace:
+		if register(d.Name) {
+			return
+		}
+		for _, m := range d.Members {
+			if m, ok := m.(*ast.Member); ok {
+				s.visitType(m.Type)
+				for _, p := range m.Parameters {
+					s.visitType(p.Type)
+				}
+			}
+		}
+	case *ast.Interface:
+		if register(d.Name) {
+			return
+		}
+		if d, ok := s.declarations[d.Inherits]; ok {
+			s.visit(d)
+		}
+		for _, m := range d.Members {
+			if m, ok := m.(*ast.Member); ok {
+				s.visitType(m.Type)
+				for _, p := range m.Parameters {
+					s.visitType(p.Type)
+				}
+			}
+		}
+	case *ast.Dictionary:
+		if register(d.Name) {
+			return
+		}
+		if d, ok := s.declarations[d.Inherits]; ok {
+			s.visit(d)
+		}
+		for _, m := range d.Members {
+			s.visitType(m.Type)
+			for _, p := range m.Parameters {
+				s.visitType(p.Type)
+			}
+		}
+	case *ast.Typedef:
+		if register(d.Name) {
+			return
+		}
+		s.visitType(d.Type)
+	case *ast.Mixin:
+		if register(d.Name) {
+			return
+		}
+		for _, m := range d.Members {
+			if m, ok := m.(*ast.Member); ok {
+				s.visitType(m.Type)
+				for _, p := range m.Parameters {
+					s.visitType(p.Type)
+				}
+			}
+		}
+	case *ast.Enum:
+		if register(d.Name) {
+			return
+		}
+	case *ast.Includes:
+		if register(d.Name) {
+			return
+		}
+	default:
+		panic(fmt.Errorf("unhandled AST declaration %T", d))
+	}
+
+	s.out.Declarations = append(s.out.Declarations, d)
+}
+
+// visitType traverses the AST type 't' adding all dependent declarations to
+// s.out.
+func (s *simplifier) visitType(t ast.Type) {
+	switch t := t.(type) {
+	case *ast.TypeName:
+		if d, ok := s.declarations[t.Name]; ok {
+			s.visit(d)
+		}
+	case *ast.UnionType:
+		for _, t := range t.Types {
+			s.visitType(t)
+		}
+	case *ast.ParametrizedType:
+		for _, t := range t.Elems {
+			s.visitType(t)
+		}
+	case *ast.NullableType:
+		s.visitType(t.Type)
+	case *ast.SequenceType:
+		s.visitType(t.Elem)
+	case *ast.RecordType:
+		s.visitType(t.Elem)
+	default:
+		panic(fmt.Errorf("unhandled AST type %T", t))
+	}
+}
+
+func patch(idl *ast.File, decl declarations) {
+	// Add [SameObject] to GPUDevice.lost
+	for _, member := range decl["GPUDevice"].(*ast.Interface).Members {
+		if m := member.(*ast.Member); m != nil && m.Name == "lost" {
+			annotation := &ast.Annotation{}
+			annotation.Name = "SameObject"
+			m.Annotations = append(m.Annotations, annotation)
+		}
+	}
+}
+
+// generator holds the template generator state
+type generator struct {
+	// the root template
+	t *template.Template
+	// the working directory
+	workingDir string
+	// map of function name to function exposed to the template executor
+	funcs map[string]interface{}
+	// dependency-sorted declarations
+	declarations declarations
+}
+
+// eval executes the sub-template with the given name and arguments, returning
+// the generated output
+// args can be a single argument:
+//   arg[0]
+// or a list of name-value pairs:
+//   (args[0]: name, args[1]: value), (args[2]: name, args[3]: value)...
+func (g *generator) eval(template string, args ...interface{}) (string, error) {
+	target := g.t.Lookup(template)
+	if target == nil {
+		return "", fmt.Errorf("template '%v' not found", template)
+	}
+	sb := strings.Builder{}
+	var err error
+	if len(args) == 1 {
+		err = target.Execute(&sb, args[0])
+	} else {
+		m := newMap()
+		if len(args)%2 != 0 {
+			return "", fmt.Errorf("Eval expects a single argument or list name-value pairs")
+		}
+		for i := 0; i < len(args); i += 2 {
+			name, ok := args[i].(string)
+			if !ok {
+				return "", fmt.Errorf("Eval argument %v is not a string", i)
+			}
+			m.Put(name, args[i+1])
+		}
+		err = target.Execute(&sb, m)
+	}
+	if err != nil {
+		return "", fmt.Errorf("while evaluating '%v': %v", template, err)
+	}
+	return sb.String(), nil
+}
+
+// lookup returns the declaration with the given name, or nil if not found.
+func (g *generator) lookup(name string) ast.Decl {
+	return g.declarations[name]
+}
+
+// include loads the template with the given path, importing the declarations
+// into the scope of the current template.
+func (g *generator) include(path string) (string, error) {
+	t, err := g.t.
+		Option("missingkey=invalid").
+		Funcs(g.funcs).
+		ParseFiles(filepath.Join(g.workingDir, path))
+	if err != nil {
+		return "", err
+	}
+	g.t.AddParseTree(path, t.Tree)
+	return "", nil
+}
+
+// Map is a simple generic key-value map, which can be used in the template
+type Map map[interface{}]interface{}
+
+func newMap() Map { return Map{} }
+
+// Put adds the key-value pair into the map.
+// Put always returns an empty string so nothing is printed in the template.
+func (m Map) Put(key, value interface{}) string {
+	m[key] = value
+	return ""
+}
+
+// Get looks up and returns the value with the given key. If the map does not
+// contain the given key, then nil is returned.
+func (m Map) Get(key interface{}) interface{} {
+	return m[key]
+}
+
+// is returns a function that returns true if the value passed to the function
+// matches any of the types of the objects in 'prototypes'.
+func is(prototypes ...interface{}) func(interface{}) bool {
+	types := make([]reflect.Type, len(prototypes))
+	for i, p := range prototypes {
+		types[i] = reflect.TypeOf(p)
+	}
+	return func(v interface{}) bool {
+		ty := reflect.TypeOf(v)
+		for _, rty := range types {
+			if ty == rty || ty == reflect.PtrTo(rty) {
+				return true
+			}
+		}
+		return false
+	}
+}
+
+// isConstructor returns true if the object is a constructor ast.Member.
+func isConstructor(v interface{}) bool {
+	if member, ok := v.(*ast.Member); ok {
+		if ty, ok := member.Type.(*ast.TypeName); ok {
+			return ty.Name == "constructor"
+		}
+	}
+	return false
+}
+
+// isUndefinedType returns true if the type is 'undefined'
+func isUndefinedType(ty ast.Type) bool {
+	if ty, ok := ty.(*ast.TypeName); ok {
+		return ty.Name == "undefined"
+	}
+	return false
+}
+
+// enumEntryName formats the enum entry name 's' for use in a C++ enum.
+func enumEntryName(s string) string {
+	return "k" + strings.ReplaceAll(pascalCase(strings.Trim(s, `"`)), "-", "")
+}
+
+func findAnnotation(list []*ast.Annotation, name string) *ast.Annotation {
+	for _, annotation := range list {
+		if annotation.Name == name {
+			return annotation
+		}
+	}
+	return nil
+}
+
+func hasAnnotation(obj interface{}, name string) bool {
+	switch obj := obj.(type) {
+	case *ast.Interface:
+		return findAnnotation(obj.Annotations, name) != nil
+	case *ast.Member:
+		return findAnnotation(obj.Annotations, name) != nil
+	case *ast.Namespace:
+		return findAnnotation(obj.Annotations, name) != nil
+	case *ast.Parameter:
+		return findAnnotation(obj.Annotations, name) != nil
+	case *ast.Typedef:
+		return findAnnotation(obj.Annotations, name) != nil || findAnnotation(obj.TypeAnnotations, name) != nil
+	}
+	panic("Unhandled AST node type in hasAnnotation")
+}
+
+// Method describes a WebIDL interface method
+type Method struct {
+	// Name of the method
+	Name string
+	// The list of overloads of the method
+	Overloads []*ast.Member
+}
+
+// methodsOf returns all the methods of the given WebIDL interface.
+func methodsOf(obj interface{}) []*Method {
+	iface, ok := obj.(*ast.Interface)
+	if !ok {
+		return nil
+	}
+	byName := map[string]*Method{}
+	out := []*Method{}
+	for _, member := range iface.Members {
+		member := member.(*ast.Member)
+		if !member.Const && !member.Attribute && !isConstructor(member) {
+			if method, ok := byName[member.Name]; ok {
+				method.Overloads = append(method.Overloads, member)
+			} else {
+				method = &Method{
+					Name:      member.Name,
+					Overloads: []*ast.Member{member},
+				}
+				byName[member.Name] = method
+				out = append(out, method)
+			}
+		}
+	}
+	return out
+}
+
+// attributesOf returns all the attributes of the given WebIDL interface or
+// namespace.
+func attributesOf(obj interface{}) []*ast.Member {
+	out := []*ast.Member{}
+	add := func(m interface{}) {
+		if m := m.(*ast.Member); m.Attribute {
+			out = append(out, m)
+		}
+	}
+	switch obj := obj.(type) {
+	case *ast.Interface:
+		for _, m := range obj.Members {
+			add(m)
+		}
+	case *ast.Namespace:
+		for _, m := range obj.Members {
+			add(m)
+		}
+	default:
+		return nil
+	}
+	return out
+}
+
+// constantsOf returns all the constant values of the given WebIDL interface or
+// namespace.
+func constantsOf(obj interface{}) []*ast.Member {
+	out := []*ast.Member{}
+	add := func(m interface{}) {
+		if m := m.(*ast.Member); m.Const {
+			out = append(out, m)
+		}
+	}
+	switch obj := obj.(type) {
+	case *ast.Interface:
+		for _, m := range obj.Members {
+			add(m)
+		}
+	case *ast.Namespace:
+		for _, m := range obj.Members {
+			add(m)
+		}
+	default:
+		return nil
+	}
+	return out
+}
+
+// setlikeOf returns the setlike ast.Pattern, if obj is a setlike interface.
+func setlikeOf(obj interface{}) *ast.Pattern {
+	iface, ok := obj.(*ast.Interface)
+	if !ok {
+		return nil
+	}
+	for _, pattern := range iface.Patterns {
+		if pattern.Type == ast.Setlike {
+			return pattern
+		}
+	}
+	return nil
+}
+
+// pascalCase returns the snake-case string s transformed into 'PascalCase',
+// Rules:
+// * The first letter of the string is capitalized
+// * Characters following an underscore, hyphen or number are capitalized
+// * Underscores are removed from the returned string
+// See: https://en.wikipedia.org/wiki/Camel_case
+func pascalCase(s string) string {
+	b := strings.Builder{}
+	upper := true
+	for _, r := range s {
+		if r == '_' || r == '-' {
+			upper = true
+			continue
+		}
+		if upper {
+			b.WriteRune(unicode.ToUpper(r))
+			upper = false
+		} else {
+			b.WriteRune(r)
+		}
+		if unicode.IsNumber(r) {
+			upper = true
+		}
+	}
+	return b.String()
+}
+
+const header = `// Copyright 2021 The Dawn Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+////////////////////////////////////////////////////////////////////////////////
+// File generated by tools/cmd/idlgen.go, with the arguments:
+//   %v
+//
+// Do not modify this file directly
+////////////////////////////////////////////////////////////////////////////////
+
+`
diff --git a/src/dawn/node/tools/src/cmd/run-cts/main.go b/src/dawn/node/tools/src/cmd/run-cts/main.go
new file mode 100644
index 0000000..d20294b
--- /dev/null
+++ b/src/dawn/node/tools/src/cmd/run-cts/main.go
@@ -0,0 +1,1166 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
+package main
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"net/http"
+	"os"
+	"os/exec"
+	"os/signal"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+	"unicode/utf8"
+
+	"github.com/mattn/go-colorable"
+	"github.com/mattn/go-isatty"
+)
+
+const (
+	testTimeout = time.Minute
+)
+
+func main() {
+	if err := run(); err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+}
+
+func showUsage() {
+	fmt.Println(`
+run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
+
+Usage:
+  run-cts --dawn-node=<path to dawn.node> --cts=<path to WebGPU CTS> [test-query]`)
+	os.Exit(1)
+}
+
+var (
+	colors  bool
+	stdout  io.Writer
+	mainCtx context.Context
+)
+
+// ANSI escape sequences
+const (
+	escape       = "\u001B["
+	positionLeft = escape + "0G"
+	ansiReset    = escape + "0m"
+
+	bold = escape + "1m"
+
+	red     = escape + "31m"
+	green   = escape + "32m"
+	yellow  = escape + "33m"
+	blue    = escape + "34m"
+	magenta = escape + "35m"
+	cyan    = escape + "36m"
+	white   = escape + "37m"
+)
+
+type dawnNodeFlags []string
+
+func (f *dawnNodeFlags) String() string {
+	return fmt.Sprint(strings.Join(*f, ""))
+}
+
+func (f *dawnNodeFlags) Set(value string) error {
+	// Multiple flags must be passed in individually:
+	// -flag=a=b -dawn_node_flag=c=d
+	*f = append(*f, value)
+	return nil
+}
+
+func makeMainCtx() context.Context {
+	ctx, cancel := context.WithCancel(context.Background())
+	sigs := make(chan os.Signal, 1)
+	signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
+	go func() {
+		sig := <-sigs
+		fmt.Printf("Signal received: %v\n", sig)
+		cancel()
+	}()
+	return ctx
+}
+
+func run() error {
+	mainCtx = makeMainCtx()
+
+	colors = os.Getenv("TERM") != "dumb" ||
+		isatty.IsTerminal(os.Stdout.Fd()) ||
+		isatty.IsCygwinTerminal(os.Stdout.Fd())
+	if colors {
+		if _, disable := os.LookupEnv("NO_COLOR"); disable {
+			colors = false
+		}
+	}
+
+	backendDefault := "default"
+	if vkIcdFilenames := os.Getenv("VK_ICD_FILENAMES"); vkIcdFilenames != "" {
+		backendDefault = "vulkan"
+	}
+
+	var dawnNode, cts, node, npx, resultsPath, expectationsPath, logFilename, backend string
+	var verbose, isolated, build bool
+	var numRunners int
+	var flags dawnNodeFlags
+	flag.StringVar(&dawnNode, "dawn-node", "", "path to dawn.node module")
+	flag.StringVar(&cts, "cts", defaultCtsPath(), "root directory of WebGPU CTS")
+	flag.StringVar(&node, "node", defaultNodePath(), "path to node executable")
+	flag.StringVar(&npx, "npx", "", "path to npx executable")
+	flag.StringVar(&resultsPath, "output", "", "path to write test results file")
+	flag.StringVar(&expectationsPath, "expect", "", "path to expectations file")
+	flag.BoolVar(&verbose, "verbose", false, "print extra information while testing")
+	flag.BoolVar(&build, "build", true, "attempt to build the CTS before running")
+	flag.BoolVar(&isolated, "isolate", false, "run each test in an isolated process")
+	flag.BoolVar(&colors, "colors", colors, "enable / disable colors")
+	flag.IntVar(&numRunners, "j", runtime.NumCPU()/2, "number of concurrent runners. 0 runs serially")
+	flag.StringVar(&logFilename, "log", "", "path to log file of tests run and result")
+	flag.Var(&flags, "flag", "flag to pass to dawn-node as flag=value. multiple flags must be passed in individually")
+	flag.StringVar(&backend, "backend", backendDefault, "backend to use: default|null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles."+
+		" set to 'vulkan' if VK_ICD_FILENAMES environment variable is set, 'default' otherwise")
+	flag.Parse()
+
+	if colors {
+		stdout = colorable.NewColorableStdout()
+	} else {
+		stdout = colorable.NewNonColorable(os.Stdout)
+	}
+
+	// Check mandatory arguments
+	if dawnNode == "" || cts == "" {
+		showUsage()
+	}
+	if !isFile(dawnNode) {
+		return fmt.Errorf("'%v' is not a file", dawnNode)
+	}
+	if !isDir(cts) {
+		return fmt.Errorf("'%v' is not a directory", cts)
+	}
+
+	// Make paths absolute
+	for _, path := range []*string{&dawnNode, &cts} {
+		abs, err := filepath.Abs(*path)
+		if err != nil {
+			return fmt.Errorf("unable to get absolute path for '%v'", *path)
+		}
+		*path = abs
+	}
+
+	// The test query is the optional unnamed argument
+	query := "webgpu:*"
+	switch len(flag.Args()) {
+	case 0:
+	case 1:
+		query = flag.Args()[0]
+	default:
+		return fmt.Errorf("only a single query can be provided")
+	}
+
+	// Find node
+	if node == "" {
+		return fmt.Errorf("cannot find path to node. Specify with --node")
+	}
+	// Find npx
+	if npx == "" {
+		var err error
+		npx, err = exec.LookPath("npx")
+		if err != nil {
+			npx = ""
+		}
+	}
+
+	// Forward the backend to use, if specified.
+	if backend != "default" {
+		fmt.Println("Forcing backend to", backend)
+		flags = append(flags, fmt.Sprint("dawn-backend=", backend))
+	}
+
+	// While running the CTS, always allow unsafe APIs so they can be tested.
+	disableDawnFeaturesFound := false
+	for i, flag := range flags {
+		if strings.HasPrefix(flag, "disable-dawn-features=") {
+			flags[i] = flag + ",disallow_unsafe_apis"
+			disableDawnFeaturesFound = true
+		}
+	}
+	if !disableDawnFeaturesFound {
+		flags = append(flags, "disable-dawn-features=disallow_unsafe_apis")
+	}
+
+	r := runner{
+		numRunners: numRunners,
+		verbose:    verbose,
+		node:       node,
+		npx:        npx,
+		dawnNode:   dawnNode,
+		cts:        cts,
+		flags:      flags,
+		results:    testcaseStatuses{},
+		evalScript: func(main string) string {
+			return fmt.Sprintf(`require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/%v.ts');`, main)
+		},
+	}
+
+	if logFilename != "" {
+		writer, err := os.Create(logFilename)
+		if err != nil {
+			return fmt.Errorf("failed to open log '%v': %w", logFilename, err)
+		}
+		defer writer.Close()
+		r.log = newLogger(writer)
+	}
+
+	cache := cache{}
+	cachePath := dawnNode + ".runcts.cache"
+	if err := cache.load(cachePath); err != nil && verbose {
+		fmt.Println("failed to load cache from", cachePath, err)
+	}
+	defer cache.save(cachePath)
+
+	// Scan the CTS source to determine the most recent change to the CTS source
+	mostRecentSourceChange, err := r.scanSourceTimestamps(verbose)
+	if err != nil {
+		return fmt.Errorf("failed to scan source files for modified timestamps: %w", err)
+	}
+
+	ctsNeedsRebuild := mostRecentSourceChange.After(cache.BuildTimestamp) ||
+		!isDir(filepath.Join(r.cts, "out-node"))
+	if build {
+		if verbose {
+			fmt.Println("CTS needs rebuild:", ctsNeedsRebuild)
+		}
+
+		if npx != "" {
+			if ctsNeedsRebuild {
+				if err := r.buildCTS(verbose); err != nil {
+					return fmt.Errorf("failed to build CTS: %w", err)
+				}
+				cache.BuildTimestamp = mostRecentSourceChange
+			}
+			// Use the prebuilt CTS (instead of using the `setup-ts-in-node` transpiler)
+			r.evalScript = func(main string) string {
+				return fmt.Sprintf(`require('./out-node/common/runtime/%v.js');`, main)
+			}
+		} else {
+			fmt.Println("npx not found on PATH. Using runtime TypeScript transpilation (slow)")
+		}
+	}
+
+	// If an expectations file was specified, load it.
+	if expectationsPath != "" {
+		if ex, err := loadExpectations(expectationsPath); err == nil {
+			r.expectations = ex
+		} else {
+			return err
+		}
+	}
+
+	if numRunners > 0 {
+		// Find all the test cases that match the given queries.
+		if err := r.gatherTestCases(query, verbose); err != nil {
+			return fmt.Errorf("failed to gather test cases: %w", err)
+		}
+
+		if isolated {
+			fmt.Println("Running in parallel isolated...")
+			fmt.Printf("Testing %d test cases...\n", len(r.testcases))
+			if err := r.runParallelIsolated(); err != nil {
+				return err
+			}
+		} else {
+			fmt.Println("Running in parallel with server...")
+			fmt.Printf("Testing %d test cases...\n", len(r.testcases))
+			if err := r.runParallelWithServer(); err != nil {
+				return err
+			}
+		}
+	} else {
+		fmt.Println("Running serially...")
+		if err := r.runSerially(query); err != nil {
+			return err
+		}
+	}
+
+	if resultsPath != "" {
+		if err := saveExpectations(resultsPath, r.results); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type logger struct {
+	writer        io.Writer
+	idx           int
+	resultByIndex map[int]result
+}
+
+// newLogger creates a new logger instance.
+func newLogger(writer io.Writer) logger {
+	return logger{writer, 0, map[int]result{}}
+}
+
+// logResult writes the test results to the log file in sequential order.
+// logResult should be called whenever a new test result becomes available.
+func (l *logger) logResults(res result) {
+	if l.writer == nil {
+		return
+	}
+	l.resultByIndex[res.index] = res
+	for {
+		logRes, ok := l.resultByIndex[l.idx]
+		if !ok {
+			break
+		}
+		fmt.Fprintf(l.writer, "%v [%v]\n", logRes.testcase, logRes.status)
+		l.idx++
+	}
+}
+
+// Cache holds cached information between runs to optimize runs
+type cache struct {
+	BuildTimestamp time.Time
+}
+
+// load loads the cache information from the JSON file at path
+func (c *cache) load(path string) error {
+	f, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	return json.NewDecoder(f).Decode(c)
+}
+
+// save saves the cache information to the JSON file at path
+func (c *cache) save(path string) error {
+	f, err := os.Create(path)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	return json.NewEncoder(f).Encode(c)
+}
+
+type runner struct {
+	numRunners               int
+	verbose                  bool
+	node, npx, dawnNode, cts string
+	flags                    dawnNodeFlags
+	evalScript               func(string) string
+	testcases                []string
+	expectations             testcaseStatuses
+	results                  testcaseStatuses
+	log                      logger
+}
+
+// scanSourceTimestamps scans all the .js and .ts files in all subdirectories of
+// r.cts, and returns the file with the most recent timestamp.
+func (r *runner) scanSourceTimestamps(verbose bool) (time.Time, error) {
+	if verbose {
+		start := time.Now()
+		fmt.Println("Scanning .js / .ts files for changes...")
+		defer func() {
+			fmt.Println("completed in", time.Since(start))
+		}()
+	}
+
+	dir := filepath.Join(r.cts, "src")
+
+	mostRecentChange := time.Time{}
+	err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+		switch filepath.Ext(path) {
+		case ".ts", ".js":
+			if info.ModTime().After(mostRecentChange) {
+				mostRecentChange = info.ModTime()
+			}
+		}
+		return nil
+	})
+	if err != nil {
+		return time.Time{}, err
+	}
+	return mostRecentChange, nil
+}
+
+// buildCTS calls `npx grunt run:build-out-node` in the CTS directory to compile
+// the TypeScript files down to JavaScript. Doing this once ahead of time can be
+// much faster than dynamically transpiling when there are many tests to run.
+func (r *runner) buildCTS(verbose bool) error {
+	if verbose {
+		start := time.Now()
+		fmt.Println("Building CTS...")
+		defer func() {
+			fmt.Println("completed in", time.Since(start))
+		}()
+	}
+
+	cmd := exec.Command(r.npx, "grunt", "run:build-out-node")
+	cmd.Dir = r.cts
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("%w: %v", err, string(out))
+	}
+	return nil
+}
+
+// gatherTestCases() queries the CTS for all test cases that match the given
+// query. On success, gatherTestCases() populates r.testcases.
+func (r *runner) gatherTestCases(query string, verbose bool) error {
+	if verbose {
+		start := time.Now()
+		fmt.Println("Gathering test cases...")
+		defer func() {
+			fmt.Println("completed in", time.Since(start))
+		}()
+	}
+
+	args := append([]string{
+		"-e", r.evalScript("cmdline"),
+		"--", // Start of arguments
+		// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+		// and slices away the first two arguments. When running with '-e', args
+		// start at 1, so just inject a dummy argument.
+		"dummy-arg",
+		"--list",
+	}, query)
+
+	cmd := exec.Command(r.node, args...)
+	cmd.Dir = r.cts
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("%w\n%v", err, string(out))
+	}
+
+	tests := filterTestcases(strings.Split(string(out), "\n"))
+	r.testcases = tests
+	return nil
+}
+
+type portListener struct {
+	buffer strings.Builder
+	port   chan int
+}
+
+func newPortListener() portListener {
+	return portListener{strings.Builder{}, make(chan int)}
+}
+
+var portRE = regexp.MustCompile(`\[\[(\d+)\]\]`)
+
+func (p *portListener) Write(data []byte) (n int, err error) {
+	if p.port != nil {
+		p.buffer.Write(data)
+		match := portRE.FindStringSubmatch(p.buffer.String())
+		if len(match) == 2 {
+			port, err := strconv.Atoi(match[1])
+			if err != nil {
+				return 0, err
+			}
+			p.port <- port
+			close(p.port)
+			p.port = nil
+		}
+	}
+	return len(data), nil
+}
+
+// runParallelWithServer() starts r.numRunners instances of the CTS server test
+// runner, and issues test run requests to those servers, concurrently.
+func (r *runner) runParallelWithServer() error {
+	// Create a chan of test indices.
+	// This will be read by the test runner goroutines.
+	caseIndices := make(chan int, len(r.testcases))
+	for i := range r.testcases {
+		caseIndices <- i
+	}
+	close(caseIndices)
+
+	// Create a chan for the test results.
+	// This will be written to by the test runner goroutines.
+	results := make(chan result, len(r.testcases))
+
+	// Spin up the test runner goroutines
+	wg := &sync.WaitGroup{}
+	for i := 0; i < r.numRunners; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			if err := r.runServer(caseIndices, results); err != nil {
+				results <- result{
+					status: fail,
+					error:  fmt.Errorf("Test server error: %w", err),
+				}
+			}
+		}()
+	}
+
+	r.streamResults(wg, results)
+	return nil
+}
+
+type redirectingWriter struct {
+	io.Writer
+}
+
+// runServer starts a test runner server instance, takes case indices from
+// caseIndices, and requests the server run the test with the given index.
+// The result of the test run is written to the results chan.
+// Once the caseIndices chan has been closed, the server is stopped and
+// runServer returns.
+func (r *runner) runServer(caseIndices <-chan int, results chan<- result) error {
+	var port int
+	var rw redirectingWriter
+
+	stopServer := func() {}
+	startServer := func() error {
+		args := []string{
+			"-e", r.evalScript("server"), // Evaluate 'eval'.
+			"--",
+			// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+			// and slices away the first two arguments. When running with '-e', args
+			// start at 1, so just inject a dummy argument.
+			"dummy-arg",
+			// Actual arguments begin here
+			"--gpu-provider", r.dawnNode,
+		}
+		for _, f := range r.flags {
+			args = append(args, "--gpu-provider-flag", f)
+		}
+
+		ctx := mainCtx
+		cmd := exec.CommandContext(ctx, r.node, args...)
+
+		serverLog := &bytes.Buffer{}
+
+		pl := newPortListener()
+
+		cmd.Dir = r.cts
+		cmd.Stdout = io.MultiWriter(&rw, serverLog, &pl)
+		cmd.Stderr = io.MultiWriter(&rw, serverLog)
+
+		err := cmd.Start()
+		if err != nil {
+			return fmt.Errorf("failed to start test runner server: %v", err)
+		}
+
+		select {
+		case port = <-pl.port:
+		case <-time.After(time.Second * 10):
+			return fmt.Errorf("timeout waiting for server port:\n%v", serverLog.String())
+		case <-ctx.Done():
+			return ctx.Err()
+		}
+
+		return nil
+	}
+	stopServer = func() {
+		if port > 0 {
+			go http.Post(fmt.Sprintf("http://localhost:%v/terminate", port), "", &bytes.Buffer{})
+			time.Sleep(time.Millisecond * 100)
+			port = 0
+		}
+	}
+
+	for idx := range caseIndices {
+		// Redirect the server log per test case
+		caseServerLog := &bytes.Buffer{}
+		rw.Writer = caseServerLog
+
+		if port == 0 {
+			if err := startServer(); err != nil {
+				return err
+			}
+		}
+
+		res := result{index: idx, testcase: r.testcases[idx]}
+
+		type Response struct {
+			Status  string
+			Message string
+		}
+		postResp, err := http.Post(fmt.Sprintf("http://localhost:%v/run?%v", port, r.testcases[idx]), "", &bytes.Buffer{})
+		if err != nil {
+			res.error = fmt.Errorf("server POST failure. Restarting server... This can happen when there is a crash. Try running with --isolate.")
+			res.status = fail
+			results <- res
+			stopServer()
+			continue
+		}
+
+		if postResp.StatusCode == http.StatusOK {
+			var resp Response
+			if err := json.NewDecoder(postResp.Body).Decode(&resp); err != nil {
+				res.error = fmt.Errorf("server response decode failure")
+				res.status = fail
+				results <- res
+				continue
+			}
+
+			switch resp.Status {
+			case "pass":
+				res.status = pass
+				res.message = resp.Message + caseServerLog.String()
+			case "warn":
+				res.status = warn
+				res.message = resp.Message + caseServerLog.String()
+			case "fail":
+				res.status = fail
+				res.message = resp.Message + caseServerLog.String()
+			case "skip":
+				res.status = skip
+				res.message = resp.Message + caseServerLog.String()
+			default:
+				res.status = fail
+				res.error = fmt.Errorf("unknown status: '%v'", resp.Status)
+			}
+		} else {
+			msg, err := ioutil.ReadAll(postResp.Body)
+			if err != nil {
+				msg = []byte(err.Error())
+			}
+			res.status = fail
+			res.error = fmt.Errorf("server error: %v", string(msg))
+		}
+		results <- res
+	}
+
+	stopServer()
+	return nil
+}
+
+// runParallelIsolated() calls the CTS command-line test runner to run each
+// testcase in a separate process. This reduces possibility of state leakage
+// between tests.
+// Up to r.numRunners tests will be run concurrently.
+func (r *runner) runParallelIsolated() error {
+	// Create a chan of test indices.
+	// This will be read by the test runner goroutines.
+	caseIndices := make(chan int, len(r.testcases))
+	for i := range r.testcases {
+		caseIndices <- i
+	}
+	close(caseIndices)
+
+	// Create a chan for the test results.
+	// This will be written to by the test runner goroutines.
+	results := make(chan result, len(r.testcases))
+
+	// Spin up the test runner goroutines
+	wg := &sync.WaitGroup{}
+	for i := 0; i < r.numRunners; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for idx := range caseIndices {
+				res := r.runTestcase(r.testcases[idx])
+				res.index = idx
+				results <- res
+			}
+		}()
+	}
+
+	r.streamResults(wg, results)
+	return nil
+}
+
+// streamResults reads from the chan 'results', printing the results in test-id
+// sequential order. Once the WaitGroup 'wg' is complete, streamResults() will
+// automatically close the 'results' chan.
+// Once all the results have been printed, a summary will be printed and the
+// function will return.
+func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
+	// Create another goroutine to close the results chan when all the runner
+	// goroutines have finished.
+	start := time.Now()
+	var timeTaken time.Duration
+	go func() {
+		wg.Wait()
+		timeTaken = time.Since(start)
+		close(results)
+	}()
+
+	// Total number of tests, test counts binned by status
+	numTests, numByExpectedStatus := len(r.testcases), map[expectedStatus]int{}
+
+	// Helper function for printing a progress bar.
+	lastStatusUpdate, animFrame := time.Now(), 0
+	updateProgress := func() {
+		printANSIProgressBar(animFrame, numTests, numByExpectedStatus)
+		animFrame++
+		lastStatusUpdate = time.Now()
+	}
+
+	// Pull test results as they become available.
+	// Update the status counts, and print any failures (or all test results if --verbose)
+	progressUpdateRate := time.Millisecond * 10
+	if !colors {
+		// No colors == no cursor control. Reduce progress updates so that
+		// we're not printing endless progress bars.
+		progressUpdateRate = time.Second
+	}
+
+	for res := range results {
+		r.log.logResults(res)
+		r.results[res.testcase] = res.status
+		expected := r.expectations[res.testcase]
+		exStatus := expectedStatus{
+			status:   res.status,
+			expected: expected == res.status,
+		}
+		numByExpectedStatus[exStatus] = numByExpectedStatus[exStatus] + 1
+		name := res.testcase
+		if r.verbose ||
+			res.error != nil ||
+			(exStatus.status != pass && exStatus.status != skip && !exStatus.expected) {
+			fmt.Printf("%v - %v: %v", name, res.status, res.message)
+			if expected != "" {
+				fmt.Printf(" [%v -> %v]", expected, res.status)
+			}
+			fmt.Println()
+			if res.error != nil {
+				fmt.Println(res.error)
+			}
+			updateProgress()
+		}
+		if time.Since(lastStatusUpdate) > progressUpdateRate {
+			updateProgress()
+		}
+	}
+	printANSIProgressBar(animFrame, numTests, numByExpectedStatus)
+
+	// All done. Print final stats.
+	fmt.Printf("\nCompleted in %v\n", timeTaken)
+
+	var numExpectedByStatus map[status]int
+	if r.expectations != nil {
+		// The status of each testcase that was run
+		numExpectedByStatus = map[status]int{}
+		for t, s := range r.expectations {
+			if _, wasTested := r.results[t]; wasTested {
+				numExpectedByStatus[s] = numExpectedByStatus[s] + 1
+			}
+		}
+	}
+
+	for _, s := range statuses {
+		// number of tests, just run, that resulted in the given status
+		numByStatus := numByExpectedStatus[expectedStatus{s, true}] +
+			numByExpectedStatus[expectedStatus{s, false}]
+		// difference in number of tests that had the given status from the
+		// expected number (taken from the expectations file)
+		diffFromExpected := 0
+		if numExpectedByStatus != nil {
+			diffFromExpected = numByStatus - numExpectedByStatus[s]
+		}
+		if numByStatus == 0 && diffFromExpected == 0 {
+			continue
+		}
+
+		fmt.Print(bold, statusColor[s])
+		fmt.Print(alignRight(strings.ToUpper(string(s))+": ", 10))
+		fmt.Print(ansiReset)
+		if numByStatus > 0 {
+			fmt.Print(bold)
+		}
+		fmt.Print(alignLeft(numByStatus, 10))
+		fmt.Print(ansiReset)
+		fmt.Print(alignRight("("+percentage(numByStatus, numTests)+")", 6))
+
+		if diffFromExpected != 0 {
+			fmt.Print(bold, " [")
+			fmt.Printf("%+d", diffFromExpected)
+			fmt.Print(ansiReset, "]")
+		}
+		fmt.Println()
+	}
+
+}
+
+// runSerially() calls the CTS test runner to run the test query in a single
+// process.
+// TODO(bclayton): Support comparing against r.expectations
+func (r *runner) runSerially(query string) error {
+	start := time.Now()
+	result := r.runTestcase(query)
+	timeTaken := time.Since(start)
+
+	if r.verbose {
+		fmt.Println(result)
+	}
+	fmt.Println("Status:", result.status)
+	fmt.Println("Completed in", timeTaken)
+	return nil
+}
+
+// status is an enumerator of test result status
+type status string
+
+const (
+	pass    status = "pass"
+	warn    status = "warn"
+	fail    status = "fail"
+	skip    status = "skip"
+	timeout status = "timeout"
+)
+
+// All the status types
+var statuses = []status{pass, warn, fail, skip, timeout}
+
+var statusColor = map[status]string{
+	pass:    green,
+	warn:    yellow,
+	skip:    blue,
+	timeout: yellow,
+	fail:    red,
+}
+
+// expectedStatus is a test status, along with a boolean to indicate whether the
+// status matches the test expectations
+type expectedStatus struct {
+	status   status
+	expected bool
+}
+
+// result holds the information about a completed test
+type result struct {
+	index    int
+	testcase string
+	status   status
+	message  string
+	error    error
+}
+
+// runTestcase() runs the CTS testcase with the given query, returning the test
+// result.
+func (r *runner) runTestcase(query string) result {
+	ctx, cancel := context.WithTimeout(mainCtx, testTimeout)
+	defer cancel()
+
+	args := []string{
+		"-e", r.evalScript("cmdline"), // Evaluate 'eval'.
+		"--",
+		// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+		// and slices away the first two arguments. When running with '-e', args
+		// start at 1, so just inject a dummy argument.
+		"dummy-arg",
+		// Actual arguments begin here
+		"--gpu-provider", r.dawnNode,
+		"--verbose",
+	}
+	for _, f := range r.flags {
+		args = append(args, "--gpu-provider-flag", f)
+	}
+	args = append(args, query)
+
+	cmd := exec.CommandContext(ctx, r.node, args...)
+	cmd.Dir = r.cts
+
+	var buf bytes.Buffer
+	cmd.Stdout = &buf
+	cmd.Stderr = &buf
+
+	err := cmd.Run()
+	msg := buf.String()
+	switch {
+	case errors.Is(err, context.DeadlineExceeded):
+		return result{testcase: query, status: timeout, message: msg}
+	case strings.Contains(msg, "[fail]"):
+		return result{testcase: query, status: fail, message: msg}
+	case strings.Contains(msg, "[warn]"):
+		return result{testcase: query, status: warn, message: msg}
+	case strings.Contains(msg, "[skip]"):
+		return result{testcase: query, status: skip, message: msg}
+	case strings.Contains(msg, "[pass]"), err == nil:
+		return result{testcase: query, status: pass, message: msg}
+	}
+	return result{testcase: query, status: fail, message: fmt.Sprint(msg, err), error: err}
+}
+
+// filterTestcases returns in with empty strings removed
+func filterTestcases(in []string) []string {
+	out := make([]string, 0, len(in))
+	for _, c := range in {
+		if c != "" {
+			out = append(out, c)
+		}
+	}
+	return out
+}
+
+// percentage returns the percentage of n out of total as a string
+func percentage(n, total int) string {
+	if total == 0 {
+		return "-"
+	}
+	f := float64(n) / float64(total)
+	return fmt.Sprintf("%.1f%c", f*100.0, '%')
+}
+
+// isDir returns true if the path resolves to a directory
+func isDir(path string) bool {
+	s, err := os.Stat(path)
+	if err != nil {
+		return false
+	}
+	return s.IsDir()
+}
+
+// isFile returns true if the path resolves to a file
+func isFile(path string) bool {
+	s, err := os.Stat(path)
+	if err != nil {
+		return false
+	}
+	return !s.IsDir()
+}
+
+// alignLeft returns the string of 'val' padded so that it is aligned left in
+// a column of the given width
+func alignLeft(val interface{}, width int) string {
+	s := fmt.Sprint(val)
+	padding := width - utf8.RuneCountInString(s)
+	if padding < 0 {
+		return s
+	}
+	return s + strings.Repeat(" ", padding)
+}
+
+// alignRight returns the string of 'val' padded so that it is aligned right in
+// a column of the given width
+func alignRight(val interface{}, width int) string {
+	s := fmt.Sprint(val)
+	padding := width - utf8.RuneCountInString(s)
+	if padding < 0 {
+		return s
+	}
+	return strings.Repeat(" ", padding) + s
+}
+
+// printANSIProgressBar prints a colored progress bar, providing realtime
+// information about the status of the CTS run.
+// Note: We'll want to skip this if !isatty or if we're running on windows.
+func printANSIProgressBar(animFrame int, numTests int, numByExpectedStatus map[expectedStatus]int) {
+	const barWidth = 50
+
+	animSymbols := []rune{'⣾', '⣽', '⣻', '⢿', '⡿', '⣟', '⣯', '⣷'}
+	blockSymbols := []rune{'▏', '▎', '▍', '▌', '▋', '▊', '▉'}
+
+	numBlocksPrinted := 0
+
+	fmt.Fprint(stdout, string(animSymbols[animFrame%len(animSymbols)]), " [")
+	animFrame++
+
+	numFinished := 0
+
+	for _, status := range statuses {
+		for _, expected := range []bool{true, false} {
+			color := statusColor[status]
+			if expected {
+				color += bold
+			}
+
+			num := numByExpectedStatus[expectedStatus{status, expected}]
+			numFinished += num
+			statusFrac := float64(num) / float64(numTests)
+			fNumBlocks := barWidth * statusFrac
+			fmt.Fprint(stdout, color)
+			numBlocks := int(math.Ceil(fNumBlocks))
+			if expected {
+				if numBlocks > 1 {
+					fmt.Print(strings.Repeat(string("░"), numBlocks))
+				}
+			} else {
+				if numBlocks > 1 {
+					fmt.Print(strings.Repeat(string("▉"), numBlocks))
+				}
+				if numBlocks > 0 {
+					frac := fNumBlocks - math.Floor(fNumBlocks)
+					symbol := blockSymbols[int(math.Round(frac*float64(len(blockSymbols)-1)))]
+					fmt.Print(string(symbol))
+				}
+			}
+			numBlocksPrinted += numBlocks
+		}
+	}
+
+	if barWidth > numBlocksPrinted {
+		fmt.Print(strings.Repeat(string(" "), barWidth-numBlocksPrinted))
+	}
+	fmt.Fprint(stdout, ansiReset)
+	fmt.Print("] ", percentage(numFinished, numTests))
+
+	if colors {
+		// move cursor to start of line so the bar is overridden
+		fmt.Fprint(stdout, positionLeft)
+	} else {
+		// cannot move cursor, so newline
+		fmt.Println()
+	}
+}
+
+// testcaseStatus is a pair of testcase name and result status
+// Intended to be serialized for expectations files.
+type testcaseStatus struct {
+	Testcase string
+	Status   status
+}
+
+// testcaseStatuses is a map of testcase to test status
+type testcaseStatuses map[string]status
+
+// loadExpectations loads the test expectations from path
+func loadExpectations(path string) (testcaseStatuses, error) {
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, fmt.Errorf("failed to open expectations file: %w", err)
+	}
+	defer f.Close()
+
+	statuses := []testcaseStatus{}
+	if err := json.NewDecoder(f).Decode(&statuses); err != nil {
+		return nil, fmt.Errorf("failed to read expectations file: %w", err)
+	}
+
+	out := make(testcaseStatuses, len(statuses))
+	for _, s := range statuses {
+		out[s.Testcase] = s.Status
+	}
+	return out, nil
+}
+
+// saveExpectations saves the test results 'ex' as an expectations file to path
+func saveExpectations(path string, ex testcaseStatuses) error {
+	f, err := os.Create(path)
+	if err != nil {
+		return fmt.Errorf("failed to create expectations file: %w", err)
+	}
+	defer f.Close()
+
+	statuses := make([]testcaseStatus, 0, len(ex))
+	for testcase, status := range ex {
+		statuses = append(statuses, testcaseStatus{testcase, status})
+	}
+	sort.Slice(statuses, func(i, j int) bool { return statuses[i].Testcase < statuses[j].Testcase })
+
+	e := json.NewEncoder(f)
+	e.SetIndent("", "  ")
+	if err := e.Encode(&statuses); err != nil {
+		return fmt.Errorf("failed to save expectations file: %w", err)
+	}
+
+	return nil
+}
+
+// defaultNodePath looks for the node binary, first in dawn's third_party
+// directory, falling back to PATH. This is used as the default for the --node
+// command line flag.
+func defaultNodePath() string {
+	if dir := thisDir(); dir != "" {
+		if dawnRoot := getDawnRoot(); dawnRoot != "" {
+			node := filepath.Join(dawnRoot, "third_party/node")
+			if info, err := os.Stat(node); err == nil && info.IsDir() {
+				path := ""
+				switch fmt.Sprintf("%v/%v", runtime.GOOS, runtime.GOARCH) { // See `go tool dist list`
+				case "darwin/amd64":
+					path = filepath.Join(node, "node-darwin-x64/bin/node")
+				case "darwin/arm64":
+					path = filepath.Join(node, "node-darwin-arm64/bin/node")
+				case "linux/amd64":
+					path = filepath.Join(node, "node-linux-x64/bin/node")
+				case "windows/amd64":
+					path = filepath.Join(node, "node.exe")
+				}
+				if _, err := os.Stat(path); err == nil {
+					return path
+				}
+			}
+		}
+	}
+
+	if path, err := exec.LookPath("node"); err == nil {
+		return path
+	}
+
+	return ""
+}
+
+// defaultCtsPath looks for the webgpu-cts directory in dawn's third_party
+// directory. This is used as the default for the --cts command line flag.
+func defaultCtsPath() string {
+	if dir := thisDir(); dir != "" {
+		if dawnRoot := getDawnRoot(); dawnRoot != "" {
+			cts := filepath.Join(dawnRoot, "third_party/webgpu-cts")
+			if info, err := os.Stat(cts); err == nil && info.IsDir() {
+				return cts
+			}
+		}
+	}
+
+	return ""
+}
+
+// getDawnRoot returns the path to the dawn project's root directory or empty
+// string if not found.
+func getDawnRoot() string {
+	return getPathOfFileInParentDirs(thisDir(), "DEPS")
+}
+
+// getPathOfFileInParentDirs looks for file with `name` in paths starting from
+// `path`, and up into parent directories, returning the clean path in which the
+// file is found, or empty string if not found.
+func getPathOfFileInParentDirs(path string, name string) string {
+	sep := string(filepath.Separator)
+	path, _ = filepath.Abs(path)
+	numDirs := strings.Count(path, sep) + 1
+	for i := 0; i < numDirs; i++ {
+		test := filepath.Join(path, name)
+		if _, err := os.Stat(test); err == nil {
+			return filepath.Clean(path)
+		}
+
+		path = path + sep + ".."
+	}
+	return ""
+}
+
+// thisDir returns the path to the directory that holds the .go file of the
+// caller function
+func thisDir() string {
+	_, file, _, ok := runtime.Caller(1)
+	if !ok {
+		return ""
+	}
+	return filepath.Dir(file)
+}
diff --git a/src/dawn/node/utils/Debug.h b/src/dawn/node/utils/Debug.h
new file mode 100644
index 0000000..cf7c7d8
--- /dev/null
+++ b/src/dawn/node/utils/Debug.h
@@ -0,0 +1,146 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNODE_UTILS_DEBUG_H_
+#define DAWNNODE_UTILS_DEBUG_H_
+
+#include <iostream>
+#include <optional>
+#include <sstream>
+#include <unordered_map>
+#include <variant>
+#include <vector>
+
+#include "dawn/webgpu_cpp_print.h"
+
+namespace wgpu::utils {
+
+    // Write() is a helper for printing container types to the std::ostream.
+    // Write() is used by the LOG() macro below.
+
+    // Forward declarations
+    inline std::ostream& Write(std::ostream& out) {
+        return out;
+    }
+    template <typename T>
+    inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
+    template <typename T>
+    inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
+    template <typename K, typename V>
+    inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
+    template <typename... TYS>
+    inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
+    template <typename VALUE>
+    std::ostream& Write(std::ostream& out, VALUE&& value);
+
+    // Write() implementations
+    template <typename T>
+    std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
+        if (value.has_value()) {
+            return Write(out, value.value());
+        }
+        return out << "<undefined>";
+    }
+
+    template <typename T>
+    std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
+        out << "[";
+        bool first = true;
+        for (const auto& el : value) {
+            if (!first) {
+                out << ", ";
+            }
+            first = false;
+            Write(out, el);
+        }
+        return out << "]";
+    }
+
+    template <typename K, typename V>
+    std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
+        out << "{";
+        bool first = true;
+        for (auto& [key, value] : value) {
+            if (!first) {
+                out << ", ";
+            }
+            first = false;
+            Write(out, key);
+            out << ": ";
+            Write(out, value);
+        }
+        return out << "}";
+    }
+
+    template <typename... TYS>
+    std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
+        std::visit([&](auto&& v) { Write(out, v); }, value);
+        return out;
+    }
+
+    template <typename VALUE>
+    std::ostream& Write(std::ostream& out, VALUE&& value) {
+        return out << std::forward<VALUE>(value);
+    }
+
+    template <typename FIRST, typename... REST>
+    inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
+        Write(out, std::forward<FIRST>(first));
+        Write(out, std::forward<REST>(rest)...);
+        return out;
+    }
+
+    // Fatal() prints a message to stdout with the given file, line, function and optional message,
+    // then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
+    // UNIMPLEMENTED() macro below.
+    template <typename... MSG_ARGS>
+    [[noreturn]] inline void Fatal(const char* reason,
+                                   const char* file,
+                                   int line,
+                                   const char* function,
+                                   MSG_ARGS&&... msg_args) {
+        std::stringstream msg;
+        msg << file << ":" << line << ": " << reason << ": " << function << "()";
+        if constexpr (sizeof...(msg_args) > 0) {
+            msg << " ";
+            Write(msg, std::forward<MSG_ARGS>(msg_args)...);
+        }
+        std::cout << msg.str() << std::endl;
+        abort();
+    }
+
+// LOG() prints the current file, line and function to stdout, followed by a
+// string representation of all the variadic arguments.
+#define LOG(...)                                                                                  \
+    ::wgpu::utils::Write(std::cout << __FILE__ << ":" << __LINE__ << " " << __FUNCTION__ << ": ", \
+                         ##__VA_ARGS__)                                                           \
+        << std::endl
+
+// UNIMPLEMENTED() prints 'UNIMPLEMENTED' with the current file, line and
+// function to stdout, along with the optional message, then calls abort().
+// The macro calls Fatal(), which is annotated with [[noreturn]].
+// Used to stub code that has not yet been implemented.
+#define UNIMPLEMENTED(...) \
+    ::wgpu::utils::Fatal("UNIMPLEMENTED", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
+
+// UNREACHABLE() prints 'UNREACHABLE' with the current file, line and
+// function to stdout, along with the optional message, then calls abort().
+// The macro calls Fatal(), which is annotated with [[noreturn]].
+// Used to stub code that has not yet been implemented.
+#define UNREACHABLE(...) \
+    ::wgpu::utils::Fatal("UNREACHABLE", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
+
+}  // namespace wgpu::utils
+
+#endif  // DAWNNODE_UTILS_DEBUG_H_
diff --git a/src/dawn/platform/BUILD.gn b/src/dawn/platform/BUILD.gn
new file mode 100644
index 0000000..2d1cb00
--- /dev/null
+++ b/src/dawn/platform/BUILD.gn
@@ -0,0 +1,41 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/scripts/dawn_component.gni")
+
+dawn_component("platform") {
+  DEFINE_PREFIX = "DAWN_PLATFORM"
+
+  configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+  sources = [
+    "${dawn_root}/include/dawn/platform/DawnPlatform.h",
+    "${dawn_root}/include/dawn/platform/dawn_platform_export.h",
+    "DawnPlatform.cpp",
+    "WorkerThread.cpp",
+    "WorkerThread.h",
+    "tracing/EventTracer.cpp",
+    "tracing/EventTracer.h",
+    "tracing/TraceEvent.h",
+  ]
+
+  deps = [ "${dawn_root}/src/dawn/common" ]
+
+  public_deps = [
+    # DawnPlatform.h has #include <dawn/webgpu.h>
+    "${dawn_root}/include/dawn:headers",
+  ]
+}
diff --git a/src/dawn/platform/CMakeLists.txt b/src/dawn/platform/CMakeLists.txt
new file mode 100644
index 0000000..4a74b23
--- /dev/null
+++ b/src/dawn/platform/CMakeLists.txt
@@ -0,0 +1,32 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_platform ${DAWN_DUMMY_FILE})
+
+target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+    target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_SHARED_LIBRARY")
+endif()
+
+target_sources(dawn_platform PRIVATE
+    "${DAWN_INCLUDE_DIR}/dawn/platform/DawnPlatform.h"
+    "${DAWN_INCLUDE_DIR}/dawn/platform/dawn_platform_export.h"
+    "DawnPlatform.cpp"
+    "WorkerThread.cpp"
+    "WorkerThread.h"
+    "tracing/EventTracer.cpp"
+    "tracing/EventTracer.h"
+    "tracing/TraceEvent.h"
+)
+target_link_libraries(dawn_platform PUBLIC dawn_headers PRIVATE dawn_internal_config dawn_common)
diff --git a/src/dawn/platform/DawnPlatform.cpp b/src/dawn/platform/DawnPlatform.cpp
new file mode 100644
index 0000000..2706316
--- /dev/null
+++ b/src/dawn/platform/DawnPlatform.cpp
@@ -0,0 +1,63 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/WorkerThread.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::platform {
+
+    CachingInterface::CachingInterface() = default;
+
+    CachingInterface::~CachingInterface() = default;
+
+    Platform::Platform() = default;
+
+    Platform::~Platform() = default;
+
+    const unsigned char* Platform::GetTraceCategoryEnabledFlag(TraceCategory category) {
+        static unsigned char disabled = 0;
+        return &disabled;
+    }
+
+    double Platform::MonotonicallyIncreasingTime() {
+        return 0;
+    }
+
+    uint64_t Platform::AddTraceEvent(char phase,
+                                     const unsigned char* categoryGroupEnabled,
+                                     const char* name,
+                                     uint64_t id,
+                                     double timestamp,
+                                     int numArgs,
+                                     const char** argNames,
+                                     const unsigned char* argTypes,
+                                     const uint64_t* argValues,
+                                     unsigned char flags) {
+        // AddTraceEvent cannot be called if events are disabled.
+        ASSERT(false);
+        return 0;
+    }
+
+    dawn::platform::CachingInterface* Platform::GetCachingInterface(const void* fingerprint,
+                                                                    size_t fingerprintSize) {
+        return nullptr;
+    }
+
+    std::unique_ptr<dawn::platform::WorkerTaskPool> Platform::CreateWorkerTaskPool() {
+        return std::make_unique<AsyncWorkerThreadPool>();
+    }
+
+}  // namespace dawn::platform
diff --git a/src/dawn/platform/WorkerThread.cpp b/src/dawn/platform/WorkerThread.cpp
new file mode 100644
index 0000000..8ecbc58
--- /dev/null
+++ b/src/dawn/platform/WorkerThread.cpp
@@ -0,0 +1,97 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/platform/WorkerThread.h"
+
+#include <condition_variable>
+#include <functional>
+#include <thread>
+
+#include "dawn/common/Assert.h"
+
+namespace {
+
+    class AsyncWaitableEventImpl {
+      public:
+        AsyncWaitableEventImpl() : mIsComplete(false) {
+        }
+
+        void Wait() {
+            std::unique_lock<std::mutex> lock(mMutex);
+            mCondition.wait(lock, [this] { return mIsComplete; });
+        }
+
+        bool IsComplete() {
+            std::lock_guard<std::mutex> lock(mMutex);
+            return mIsComplete;
+        }
+
+        void MarkAsComplete() {
+            {
+                std::lock_guard<std::mutex> lock(mMutex);
+                mIsComplete = true;
+            }
+            mCondition.notify_all();
+        }
+
+      private:
+        std::mutex mMutex;
+        std::condition_variable mCondition;
+        bool mIsComplete;
+    };
+
+    class AsyncWaitableEvent final : public dawn::platform::WaitableEvent {
+      public:
+        explicit AsyncWaitableEvent()
+            : mWaitableEventImpl(std::make_shared<AsyncWaitableEventImpl>()) {
+        }
+
+        void Wait() override {
+            mWaitableEventImpl->Wait();
+        }
+
+        bool IsComplete() override {
+            return mWaitableEventImpl->IsComplete();
+        }
+
+        std::shared_ptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
+            return mWaitableEventImpl;
+        }
+
+      private:
+        std::shared_ptr<AsyncWaitableEventImpl> mWaitableEventImpl;
+    };
+
+}  // anonymous namespace
+
+namespace dawn::platform {
+
+    std::unique_ptr<dawn::platform::WaitableEvent> AsyncWorkerThreadPool::PostWorkerTask(
+        dawn::platform::PostWorkerTaskCallback callback,
+        void* userdata) {
+        std::unique_ptr<AsyncWaitableEvent> waitableEvent = std::make_unique<AsyncWaitableEvent>();
+
+        std::function<void()> doTask =
+            [callback, userdata, waitableEventImpl = waitableEvent->GetWaitableEventImpl()]() {
+                callback(userdata);
+                waitableEventImpl->MarkAsComplete();
+            };
+
+        std::thread thread(doTask);
+        thread.detach();
+
+        return waitableEvent;
+    }
+
+}  // namespace dawn::platform
diff --git a/src/dawn/platform/WorkerThread.h b/src/dawn/platform/WorkerThread.h
new file mode 100644
index 0000000..9079689
--- /dev/null
+++ b/src/dawn/platform/WorkerThread.h
@@ -0,0 +1,32 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_WORKERTHREAD_H_
+#define COMMON_WORKERTHREAD_H_
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::platform {
+
+    class AsyncWorkerThreadPool : public dawn::platform::WorkerTaskPool, public NonCopyable {
+      public:
+        std::unique_ptr<dawn::platform::WaitableEvent> PostWorkerTask(
+            dawn::platform::PostWorkerTaskCallback callback,
+            void* userdata) override;
+    };
+
+}  // namespace dawn::platform
+
+#endif
diff --git a/src/dawn/platform/tracing/EventTracer.cpp b/src/dawn/platform/tracing/EventTracer.cpp
new file mode 100644
index 0000000..7445d98
--- /dev/null
+++ b/src/dawn/platform/tracing/EventTracer.cpp
@@ -0,0 +1,58 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/platform/tracing/EventTracer.h"
+#include "dawn/common/Assert.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::platform::tracing {
+
+    const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
+        static unsigned char disabled = 0;
+        if (platform == nullptr) {
+            return &disabled;
+        }
+
+        const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
+        if (categoryEnabledFlag != nullptr) {
+            return categoryEnabledFlag;
+        }
+
+        return &disabled;
+    }
+
+    TraceEventHandle AddTraceEvent(Platform* platform,
+                                   char phase,
+                                   const unsigned char* categoryGroupEnabled,
+                                   const char* name,
+                                   uint64_t id,
+                                   int numArgs,
+                                   const char** argNames,
+                                   const unsigned char* argTypes,
+                                   const uint64_t* argValues,
+                                   unsigned char flags) {
+        ASSERT(platform != nullptr);
+
+        double timestamp = platform->MonotonicallyIncreasingTime();
+        if (timestamp != 0) {
+            TraceEventHandle handle =
+                platform->AddTraceEvent(phase, categoryGroupEnabled, name, id, timestamp, numArgs,
+                                        argNames, argTypes, argValues, flags);
+            return handle;
+        }
+
+        return static_cast<TraceEventHandle>(0);
+    }
+
+}  // namespace dawn::platform::tracing
diff --git a/src/dawn/platform/tracing/EventTracer.h b/src/dawn/platform/tracing/EventTracer.h
new file mode 100644
index 0000000..0200ec5
--- /dev/null
+++ b/src/dawn/platform/tracing/EventTracer.h
@@ -0,0 +1,51 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNPLATFORM_TRACING_EVENTTRACER_H_
+#define DAWNPLATFORM_TRACING_EVENTTRACER_H_
+
+#include "dawn/platform/dawn_platform_export.h"
+
+#include <cstdint>
+
+namespace dawn::platform {
+
+    class Platform;
+    enum class TraceCategory;
+
+    namespace tracing {
+
+        using TraceEventHandle = uint64_t;
+
+        DAWN_PLATFORM_EXPORT const unsigned char* GetTraceCategoryEnabledFlag(
+            Platform* platform,
+            TraceCategory category);
+
+        // TODO(enga): Simplify this API.
+        DAWN_PLATFORM_EXPORT TraceEventHandle
+        AddTraceEvent(Platform* platform,
+                      char phase,
+                      const unsigned char* categoryGroupEnabled,
+                      const char* name,
+                      uint64_t id,
+                      int numArgs,
+                      const char** argNames,
+                      const unsigned char* argTypes,
+                      const uint64_t* argValues,
+                      unsigned char flags);
+
+    }  // namespace tracing
+}  // namespace dawn::platform
+
+#endif  // DAWNPLATFORM_TRACING_EVENTTRACER_H_
diff --git a/src/dawn/platform/tracing/TraceEvent.h b/src/dawn/platform/tracing/TraceEvent.h
new file mode 100644
index 0000000..e120e08
--- /dev/null
+++ b/src/dawn/platform/tracing/TraceEvent.h
@@ -0,0 +1,991 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+//    Begin and end of function calls
+//    Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+//   TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent")
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+//   TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+//   doSomethingCostly()
+//   TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you need them
+// to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+//   void doSomethingCostly() {
+//     TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+//     ...
+//   }
+//
+// Additional parameters can be associated with an event:
+//   void doSomethingCostly2(int howMuch) {
+//     TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+//         "howMuch", howMuch);
+//     ...
+//   }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use ASYNC_BEGIN and
+// ASYNC_END:
+//   [single threaded sender code]
+//     static int send_count = 0;
+//     ++send_count;
+//     TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+//     Send(new MyMessage(send_count));
+//   [receive code]
+//     void OnMyMessage(send_count) {
+//       TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+//     }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process. Pointers can
+// be used for the ID parameter, and they will be mangled internally so that
+// the same pointer on two different processes will not match. For example:
+//   class MyTracedClass {
+//    public:
+//     MyTracedClass() {
+//       TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+//     }
+//     ~MyTracedClass() {
+//       TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+//     }
+//   }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+//   TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+//       "bytesPinned", g_myCounterValue[0],
+//       "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disembiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category, name, and arg_names. Thus, the following code will
+// cause problems:
+//     char* str = strdup("impprtantName");
+//     TRACE_EVENT_INSTANT0("SUBSYSTEM", str);  // BAD!
+//     free(str);                   // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+//        The |arg_values|, when used, are always deep copied with the _COPY
+//        macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", std::string("string will be copied"));
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to addTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling addTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because addTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of aquiring a lock
+// and resolving the category.
+
+#ifndef DAWNPLATFORM_TRACING_TRACEEVENT_H_
+#define DAWNPLATFORM_TRACING_TRACEEVENT_H_
+
+#include <string>
+
+#include "dawn/platform/tracing/EventTracer.h"
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT0(platform, category, name) \
+    INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0)
+#define TRACE_EVENT1(platform, category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0, arg1_name, arg1_val)
+#define TRACE_EVENT2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val)         \
+    INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0, arg1_name, arg1_val, arg2_name, \
+                                    arg2_val)
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(platform, category, name)                            \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+                             TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_INSTANT1(platform, category, name, arg1_name, arg1_val)       \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+                             TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name,                \
+                             TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(platform, category, name)                       \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+                             TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_INSTANT1(platform, category, name, arg1_name, arg1_val)  \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+                             TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(platform, category, name, arg1_name, arg1_val, arg2_name, \
+                                  arg2_val)                                                 \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name,           \
+                             TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(platform, category, name)                            \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+                             TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_BEGIN1(platform, category, name, arg1_name, arg1_val)       \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+                             TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name,                \
+                             TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(platform, category, name)                       \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+                             TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_BEGIN1(platform, category, name, arg1_name, arg1_val)  \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+                             TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(platform, category, name, arg1_name, arg1_val, arg2_name, \
+                                arg2_val)                                                 \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name,           \
+                             TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_END0(platform, category, name)                            \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+                             TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_END1(platform, category, name, arg1_name, arg1_val)       \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+                             TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name,                \
+                             TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(platform, category, name) \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(platform, category, name, arg1_name, arg1_val)  \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+                             TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name,                     \
+                             TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_COUNTER1(platform, category, name, value)                           \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
+                             TRACE_EVENT_FLAG_NONE, 0, "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(platform, category, name, value)                      \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
+                             TRACE_EVENT_FLAG_COPY, 0, "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_COUNTER2(platform, category, name, value1_name, value1_val, value2_name, value2_val) \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name,                  \
+                             TRACE_EVENT_FLAG_NONE, 0, value1_name, static_cast<int>(value1_val),  \
+                             value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(platform, category, name, value1_name, value1_val, value2_name,       \
+                            value2_val)                                                           \
+    INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name,                 \
+                             TRACE_EVENT_FLAG_COPY, 0, value1_name, static_cast<int>(value1_val), \
+                             value2_name, static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+//   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+//   will be xored with a hash of the process ID so that the same pointer on
+//   two different processes will not collide.
+#define TRACE_COUNTER_ID1(platform, category, name, id, value)                                \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0, "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(platform, category, name, id, value)                           \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0, "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+//   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+//   will be xored with a hash of the process ID so that the same pointer on
+//   two different processes will not collide.
+#define TRACE_COUNTER_ID2(platform, category, name, id, value1_name, value1_val, value2_name, \
+                          value2_val)                                                         \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                                         \
+        platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_NONE, 0,    \
+        value1_name, static_cast<int>(value1_val), value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(platform, category, name, id, value1_name, value1_val, value2_name, \
+                               value2_val)                                                         \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                                              \
+        platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_COPY, 0,         \
+        value1_name, static_cast<int>(value1_val), value2_name, static_cast<int>(value2_val))
+
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+//   events are considered to match if their category, name and id values all
+//   match. |id| must either be a pointer or an integer value up to 64 bits. If
+//   it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_BEGIN macros. When the operation completes, call ASYNC_END.
+// An async operation can span threads and processes, but all events in that
+// operation must use the same |name| and |id|. Each event can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(platform, category, name, id)                                    \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_ASYNC_BEGIN1(platform, category, name, id, arg1_name, arg1_val)               \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(platform, category, name, id, arg1_name, arg1_val, arg2_name,    \
+                                 arg2_val)                                                        \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name,    \
+                                     arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(platform, category, name, id)                               \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(platform, category, name, id, arg1_name, arg1_val)          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(platform, category, name, id, arg1_name, arg1_val,          \
+                                      arg2_name, arg2_val)                                        \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name,    \
+                                     arg2_val)
+
+// Records a single ASYNC_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// ASYNC_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_ASYNC_STEP0(platform, category, name, id, step)                              \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0, "step", step)
+#define TRACE_EVENT_ASYNC_STEP1(platform, category, name, id, step, arg1_name, arg1_val)         \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_STEP0(platform, category, name, id, step)                         \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0, "step", step)
+#define TRACE_EVENT_COPY_ASYNC_STEP1(platform, category, name, id, step, arg1_name, arg1_val)    \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(platform, category, name, id)                                    \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_ASYNC_END1(platform, category, name, id, arg1_name, arg1_val)               \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(platform, category, name, id, arg1_name, arg1_val, arg2_name,    \
+                               arg2_val)                                                        \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+                                     TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name,  \
+                                     arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(platform, category, name, id)                               \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_ASYNC_END1(platform, category, name, id, arg1_name, arg1_val)          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+                                     TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(platform, category, name, id, arg1_name, arg1_val, arg2_name, \
+                                    arg2_val)                                                     \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id,   \
+                                     TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name,    \
+                                     arg2_val)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+//   considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+//   If it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+//   NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+//   be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(platform, category_group, name, id)          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN,             \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0,           \
+                                     arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val, \
+                                          arg2_name, arg2_val)                                     \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN,             \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0,           \
+                                     arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(platform, category_group, name, id)          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(platform, category_group, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END,             \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0,         \
+                                     arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(platform, category_group, name, id, arg1_name, arg1_val, \
+                                        arg2_name, arg2_val)                                     \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END,             \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0,         \
+                                     arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with none, one or two associated argument. If the category is not enabled,
+// then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(platform, category_group, name, id)          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(platform, category_group, name, id, arg1_name, \
+                                            arg1_val)                                      \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT,   \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0,   \
+                                     arg1_name, arg1_val)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(platform, category_group, name, id, arg1_name, \
+                                            arg1_val, arg2_name, arg2_val)                 \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT,   \
+                                     category_group, name, id, TRACE_EVENT_FLAG_NONE, 0,   \
+                                     arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2(platform, category_group, name, id,       \
+                                                        arg1_name, arg1_val, arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN,            \
+                                     category_group, name, id,                                    \
+                                     TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, 0,       \
+                                     arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2(platform, category_group, name, id,       \
+                                                      arg1_name, arg1_val, arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END,            \
+                                     category_group, name, id,                                  \
+                                     TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, 0,     \
+                                     arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(platform, category_group, name, id, \
+                                                         timestamp)                          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                                      \
+        platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,          \
+        TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(platform, category_group, name, id, \
+                                                       timestamp)                          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                                    \
+        platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,          \
+        TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP1(platform, category_group, name, id, \
+                                                       timestamp, arg1_name, arg1_val)     \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                                    \
+        platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,          \
+        TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0, arg1_name, \
+        arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0(platform, category_group, name, id, \
+                                                           timestamp)                          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                                        \
+        platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id,          \
+        TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(platform, category_group, name, id, \
+                                                              timestamp)                          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                                           \
+        platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,               \
+        TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(platform, category_group, name, id, \
+                                                            timestamp)                          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                                         \
+        platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,               \
+        TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, 0)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+//   events are considered to match if their category_group, name and id values
+//   all match. |id| must either be a pointer or an integer value up to 64 bits.
+//   If it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(platform, category_group, name, id)                                \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_FLOW_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val)           \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val,           \
+                                arg2_name, arg2_val)                                               \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
+                                     arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(platform, category_group, name, id)                           \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val)      \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val,      \
+                                     arg2_name, arg2_val)                                          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
+                                     arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(platform, category_group, name, id, step)                          \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_NONE, 0, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(platform, category_group, name, id, step, arg1_name, arg1_val)     \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_NONE, 0, "step", step, arg1_name,       \
+                                     arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(platform, category_group, name, id, step)                     \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_COPY, 0, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(platform, category_group, name, id, step, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name,  \
+                                     id, TRACE_EVENT_FLAG_COPY, 0, "step", step, arg1_name,        \
+                                     arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(platform, category_group, name, id)                                \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(platform, category_group, name, id)              \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_BIND_TO_ENCLOSING, 0)
+#define TRACE_EVENT_FLOW_END1(platform, category_group, name, id, arg1_name, arg1_val)           \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_END2(platform, category_group, name, id, arg1_name, arg1_val, arg2_name,  \
+                              arg2_val)                                                            \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name,   \
+                                     id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
+                                     arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(platform, category_group, name, id)                           \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_FLOW_END1(platform, category_group, name, id, arg1_name, arg1_val)      \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+                                     id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(platform, category_group, name, id, arg1_name, arg1_val,        \
+                                   arg2_name, arg2_val)                                            \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name,   \
+                                     id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
+                                     arg2_val)
+
+// Creates a scope of a sampling state with the given category and name (both must
+// be constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+//
+// {  // The sampling state is set within this scope.
+//    TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+//    ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, name) \
+    TraceEvent::SamplingStateScope<bucket_number> traceEventSamplingScope(category "\0" name);
+
+// Returns a current sampling state of the given bucket.
+// The format of the returned string is "category\0name".
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+    TraceEvent::SamplingStateScope<bucket_number>::current()
+
+// Sets a current sampling state of the given bucket.
+// |category| and |name| have to be constant strings.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, name) \
+    TraceEvent::SamplingStateScope<bucket_number>::set(category "\0" name)
+
+// Sets a current sampling state of the given bucket.
+// |categoryAndName| doesn't need to be a constant string.
+// The format of the string is "category\0name".
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(bucket_number, categoryAndName) \
+    TraceEvent::SamplingStateScope<bucket_number>::set(categoryAndName)
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+    TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+    TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+    TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category name. The returned
+// pointer can be held permanently in a local static for example. If the
+// unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const unsigned char*
+//     TRACE_EVENT_API_GET_CATEGORY_ENABLED(const char* category_name)
+#define TRACE_EVENT_API_GET_CATEGORY_ENABLED dawn::platform::tracing::GetTraceCategoryEnabledFlag
+
+// Add a trace event to the platform tracing system.
+// void TRACE_EVENT_API_ADD_TRACE_EVENT(
+//                    char phase,
+//                    const unsigned char* category_enabled,
+//                    const char* name,
+//                    unsigned long long id,
+//                    int num_args,
+//                    const char** arg_names,
+//                    const unsigned char* arg_types,
+//                    const unsigned long long* arg_values,
+//                    unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT dawn::platform::tracing::AddTraceEvent
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collissions.
+#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b)
+#define INTERNALTRACEEVENTUID(name_prefix) INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, category)    \
+    static const unsigned char* INTERNALTRACEEVENTUID(catstatic) = 0; \
+    if (!INTERNALTRACEEVENTUID(catstatic))                            \
+        INTERNALTRACEEVENTUID(catstatic) = TRACE_EVENT_API_GET_CATEGORY_ENABLED(platform, category);
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(platformObj, phase, category, name, flags, ...)          \
+    do {                                                                                  \
+        INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platformObj,                               \
+                                               ::dawn::platform::TraceCategory::category) \
+        if (*INTERNALTRACEEVENTUID(catstatic)) {                                          \
+            dawn::platform::TraceEvent::addTraceEvent(                                    \
+                platformObj, phase, INTERNALTRACEEVENTUID(catstatic), name,               \
+                dawn::platform::TraceEvent::noEventId, flags, __VA_ARGS__);               \
+        }                                                                                 \
+    } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(platformObj, category, name, ...)                          \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platformObj, ::dawn::platform::TraceCategory::category) \
+    dawn::platform::TraceEvent::TraceEndOnScopeClose INTERNALTRACEEVENTUID(profileScope);          \
+    do {                                                                                           \
+        if (*INTERNALTRACEEVENTUID(catstatic)) {                                                   \
+            dawn::platform::TraceEvent::addTraceEvent(                                             \
+                platformObj, TRACE_EVENT_PHASE_BEGIN, INTERNALTRACEEVENTUID(catstatic), name,      \
+                dawn::platform::TraceEvent::noEventId, TRACE_EVENT_FLAG_NONE, __VA_ARGS__);        \
+            INTERNALTRACEEVENTUID(profileScope)                                                    \
+                .initialize(platformObj, INTERNALTRACEEVENTUID(catstatic), name);                  \
+        }                                                                                          \
+    } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(platformObj, phase, category, name, id, flags, ...) \
+    do {                                                                                     \
+        INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platformObj,                                  \
+                                               ::dawn::platform::TraceCategory::category)    \
+        if (*INTERNALTRACEEVENTUID(catstatic)) {                                             \
+            unsigned char traceEventFlags = flags | TRACE_EVENT_FLAG_HAS_ID;                 \
+            dawn::platform::TraceEvent::TraceID traceEventTraceID(id, &traceEventFlags);     \
+            dawn::platform::TraceEvent::addTraceEvent(                                       \
+                platformObj, phase, INTERNALTRACEEVENTUID(catstatic), name,                  \
+                traceEventTraceID.data(), traceEventFlags, __VA_ARGS__);                     \
+        }                                                                                    \
+    } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP ('T')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned char>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned char>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned char>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned char>(1 << 2))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+
+namespace dawn::platform::TraceEvent {
+
+    // Specify these values when the corresponding argument of addTraceEvent is not
+    // used.
+    const int zeroNumArgs = 0;
+    const unsigned long long noEventId = 0;
+
+    // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+    // are mangled with the Process ID so that they are unlikely to collide when the
+    // same pointer is used on different processes.
+    class TraceID {
+      public:
+        explicit TraceID(const void* id, unsigned char* flags)
+            : m_data(static_cast<unsigned long long>(reinterpret_cast<uintptr_t>(id))) {
+            *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+        }
+        explicit TraceID(unsigned long long id, unsigned char* flags) : m_data(id) {
+            (void)flags;
+        }
+        explicit TraceID(unsigned long id, unsigned char* flags) : m_data(id) {
+            (void)flags;
+        }
+        explicit TraceID(unsigned int id, unsigned char* flags) : m_data(id) {
+            (void)flags;
+        }
+        explicit TraceID(unsigned short id, unsigned char* flags) : m_data(id) {
+            (void)flags;
+        }
+        explicit TraceID(unsigned char id, unsigned char* flags) : m_data(id) {
+            (void)flags;
+        }
+        explicit TraceID(long long id, unsigned char* flags)
+            : m_data(static_cast<unsigned long long>(id)) {
+            (void)flags;
+        }
+        explicit TraceID(long id, unsigned char* flags)
+            : m_data(static_cast<unsigned long long>(id)) {
+            (void)flags;
+        }
+        explicit TraceID(int id, unsigned char* flags)
+            : m_data(static_cast<unsigned long long>(id)) {
+            (void)flags;
+        }
+        explicit TraceID(short id, unsigned char* flags)
+            : m_data(static_cast<unsigned long long>(id)) {
+            (void)flags;
+        }
+        explicit TraceID(signed char id, unsigned char* flags)
+            : m_data(static_cast<unsigned long long>(id)) {
+            (void)flags;
+        }
+
+        unsigned long long data() const {
+            return m_data;
+        }
+
+      private:
+        unsigned long long m_data;
+    };
+
+    // Simple union to store various types as unsigned long long.
+    union TraceValueUnion {
+        bool m_bool;
+        unsigned long long m_uint;
+        long long m_int;
+        double m_double;
+        const void* m_pointer;
+        const char* m_string;
+    };
+
+    // Simple container for const char* that should be copied instead of retained.
+    class TraceStringWithCopy {
+      public:
+        explicit TraceStringWithCopy(const char* str) : m_str(str) {
+        }
+        operator const char*() const {
+            return m_str;
+        }
+
+      private:
+        const char* m_str;
+    };
+
+// Define setTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, value_type_id)            \
+    static inline void setTraceValue(actual_type arg, unsigned char* type, uint64_t* value) { \
+        TraceValueUnion typeValue;                                                            \
+        typeValue.union_member = arg;                                                         \
+        *type = value_type_id;                                                                \
+        *value = typeValue.m_uint;                                                            \
+    }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id)                      \
+    static inline void setTraceValue(actual_type arg, unsigned char* type, uint64_t* value) { \
+        *type = value_type_id;                                                                \
+        *value = static_cast<unsigned long long>(arg);                                        \
+    }
+
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+    INTERNAL_DECLARE_SET_TRACE_VALUE(bool, m_bool, TRACE_VALUE_TYPE_BOOL)
+    INTERNAL_DECLARE_SET_TRACE_VALUE(double, m_double, TRACE_VALUE_TYPE_DOUBLE)
+    INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, m_pointer, TRACE_VALUE_TYPE_POINTER)
+    INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, m_string, TRACE_VALUE_TYPE_STRING)
+    INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&,
+                                     m_string,
+                                     TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+    static inline void setTraceValue(const std::string& arg, unsigned char* type, uint64_t* value) {
+        TraceValueUnion typeValue;
+        typeValue.m_string = arg.data();
+        *type = TRACE_VALUE_TYPE_COPY_STRING;
+        *value = typeValue.m_uint;
+    }
+
+    // These addTraceEvent template functions are defined here instead of in the
+    // macro, because the arg values could be temporary string objects. In order to
+    // store pointers to the internal c_str and pass through to the tracing API, the
+    // arg values must live throughout these procedures.
+
+    static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+        dawn::platform::Platform* platform,
+        char phase,
+        const unsigned char* categoryEnabled,
+        const char* name,
+        unsigned long long id,
+        unsigned char flags,
+        int /*unused, helps avoid empty __VA_ARGS__*/) {
+        return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id,
+                                               zeroNumArgs, 0, 0, 0, flags);
+    }
+
+    template <class ARG1_TYPE>
+    static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+        dawn::platform::Platform* platform,
+        char phase,
+        const unsigned char* categoryEnabled,
+        const char* name,
+        unsigned long long id,
+        unsigned char flags,
+        int /*unused, helps avoid empty __VA_ARGS__*/,
+        const char* arg1Name,
+        const ARG1_TYPE& arg1Val) {
+        const int numArgs = 1;
+        unsigned char argTypes[1];
+        uint64_t argValues[1];
+        setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+        return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+                                               &arg1Name, argTypes, argValues, flags);
+    }
+
+    template <class ARG1_TYPE, class ARG2_TYPE>
+    static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+        dawn::platform::Platform* platform,
+        char phase,
+        const unsigned char* categoryEnabled,
+        const char* name,
+        unsigned long long id,
+        unsigned char flags,
+        int /*unused, helps avoid empty __VA_ARGS__*/,
+        const char* arg1Name,
+        const ARG1_TYPE& arg1Val,
+        const char* arg2Name,
+        const ARG2_TYPE& arg2Val) {
+        const int numArgs = 2;
+        const char* argNames[2] = {arg1Name, arg2Name};
+        unsigned char argTypes[2];
+        uint64_t argValues[2];
+        setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+        setTraceValue(arg2Val, &argTypes[1], &argValues[1]);
+        return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+                                               argNames, argTypes, argValues, flags);
+    }
+
+    // Used by TRACE_EVENTx macro. Do not use directly.
+    class TraceEndOnScopeClose {
+      public:
+        // Note: members of m_data intentionally left uninitialized. See initialize.
+        TraceEndOnScopeClose() : m_pdata(0) {
+        }
+        ~TraceEndOnScopeClose() {
+            if (m_pdata)
+                addEventIfEnabled();
+        }
+
+        void initialize(dawn::platform::Platform* platform,
+                        const unsigned char* categoryEnabled,
+                        const char* name) {
+            m_data.platform = platform;
+            m_data.categoryEnabled = categoryEnabled;
+            m_data.name = name;
+            m_pdata = &m_data;
+        }
+
+      private:
+        // Add the end event if the category is still enabled.
+        void addEventIfEnabled() {
+            // Only called when m_pdata is non-null.
+            if (*m_pdata->categoryEnabled) {
+                TRACE_EVENT_API_ADD_TRACE_EVENT(m_pdata->platform, TRACE_EVENT_PHASE_END,
+                                                m_pdata->categoryEnabled, m_pdata->name, noEventId,
+                                                zeroNumArgs, 0, 0, 0, TRACE_EVENT_FLAG_NONE);
+            }
+        }
+
+        // This Data struct workaround is to avoid initializing all the members
+        // in Data during construction of this object, since this object is always
+        // constructed, even when tracing is disabled. If the members of Data were
+        // members of this class instead, compiler warnings occur about potential
+        // uninitialized accesses.
+        struct Data {
+            dawn::platform::Platform* platform;
+            const unsigned char* categoryEnabled;
+            const char* name;
+        };
+        Data* m_pdata;
+        Data m_data;
+    };
+
+}  // namespace dawn::platform::TraceEvent
+
+#endif  // DAWNPLATFORM_TRACING_TRACEEVENT_H_
diff --git a/src/dawn/tests/BUILD.gn b/src/dawn/tests/BUILD.gn
new file mode 100644
index 0000000..27b0b7e
--- /dev/null
+++ b/src/dawn/tests/BUILD.gn
@@ -0,0 +1,653 @@
+# Copyright 2012 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("//testing/test.gni")
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_features.gni")
+
+group("tests") {
+  testonly = true
+  deps = [
+    ":dawn_end2end_tests",
+    ":dawn_perf_tests",
+    ":dawn_unittests",
+  ]
+}
+
+###############################################################################
+# Gtest Gmock - Handle building inside and outside of Chromium.
+###############################################################################
+
+# When building outside of Chromium we need to define our own targets for GTest
+# and GMock. However when compiling inside of Chromium we need to reuse the
+# existing targets, both because Chromium has a special harness for swarming
+# and because otherwise the "gn check" fails.
+
+if (!build_with_chromium) {
+  # When we aren't in Chromium we define out own targets based on the location
+  # of the googletest repo.
+  googletest_dir = dawn_googletest_dir
+
+  config("gtest_config") {
+    include_dirs = [
+      "${googletest_dir}/googletest",
+      "${googletest_dir}/googletest/include",
+    ]
+  }
+
+  static_library("gtest") {
+    testonly = true
+    sources = [ "${googletest_dir}/googletest/src/gtest-all.cc" ]
+    public_configs = [ ":gtest_config" ]
+  }
+
+  config("gmock_config") {
+    include_dirs = [
+      "${googletest_dir}/googlemock",
+      "${googletest_dir}/googlemock/include",
+      "${googletest_dir}/googletest/include",
+    ]
+  }
+
+  static_library("gmock") {
+    testonly = true
+    sources = [ "${googletest_dir}/googlemock/src/gmock-all.cc" ]
+    public_configs = [ ":gmock_config" ]
+  }
+
+  group("gmock_and_gtest") {
+    testonly = true
+    public_deps = [
+      ":gmock",
+      ":gtest",
+    ]
+  }
+} else {
+  # When we are in Chromium we reuse its targets, and also add some deps that
+  # are needed to launch the test in swarming mode.
+  group("gmock_and_gtest") {
+    testonly = true
+    public_deps = [
+      "//base",
+      "//base/test:test_support",
+      "//testing/gmock",
+      "//testing/gtest",
+    ]
+  }
+}
+
+###############################################################################
+# Wrapping of Chromium targets
+###############################################################################
+
+# These targets are separated because they are Chromium sources files that
+# can't use the dawn_internal config, otherwise Dawn's warning flags get
+# applied while compiling a bunch of Chromium's //base (via header inclusion)
+if (build_with_chromium) {
+  source_set("unittests_main") {
+    testonly = true
+    deps = [ ":gmock_and_gtest" ]
+    sources = [ "//gpu/dawn_unittests_main.cc" ]
+  }
+  source_set("end2end_tests_main") {
+    testonly = true
+    deps = [ ":gmock_and_gtest" ]
+    sources = [ "//gpu/dawn_end2end_tests_main.cc" ]
+  }
+  source_set("perf_tests_main") {
+    testonly = true
+    deps = [ ":gmock_and_gtest" ]
+    sources = [ "//gpu/dawn_perf_tests_main.cc" ]
+  }
+}
+
+###############################################################################
+# Dawn test template
+###############################################################################
+template("dawn_test") {
+  test(target_name) {
+    # Copy all variables except "configs", which has a default value
+    forward_variables_from(invoker, "*", [ "configs" ])
+    if (defined(invoker.configs)) {
+      configs += invoker.configs
+    }
+
+    configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+  }
+}
+
+###############################################################################
+# Dawn unittests
+###############################################################################
+
+dawn_json_generator("mock_webgpu_gen") {
+  target = "mock_api"
+  outputs = [
+    "src/dawn/mock_webgpu.h",
+    "src/dawn/mock_webgpu.cpp",
+  ]
+}
+
+# Source code for mocks used for unit testing are separated from the rest of
+# sources so that they aren't included in non-test builds.
+source_set("native_mocks_sources") {
+  testonly = true
+
+  deps = [
+    ":gmock_and_gtest",
+    "${dawn_root}/src/dawn/native:sources",
+    "${dawn_root}/src/dawn/native:static",
+    "${dawn_root}/src/dawn/utils",
+  ]
+
+  # Add internal dawn native config for internal unittests.
+  configs += [ "${dawn_root}/src/dawn/native:internal" ]
+
+  sources = [
+    "unittests/native/mocks/BindGroupLayoutMock.h",
+    "unittests/native/mocks/BindGroupMock.h",
+    "unittests/native/mocks/CommandBufferMock.h",
+    "unittests/native/mocks/ComputePipelineMock.h",
+    "unittests/native/mocks/DeviceMock.h",
+    "unittests/native/mocks/ExternalTextureMock.h",
+    "unittests/native/mocks/PipelineLayoutMock.h",
+    "unittests/native/mocks/QuerySetMock.h",
+    "unittests/native/mocks/RenderPipelineMock.h",
+    "unittests/native/mocks/SamplerMock.h",
+    "unittests/native/mocks/ShaderModuleMock.cpp",
+    "unittests/native/mocks/ShaderModuleMock.h",
+    "unittests/native/mocks/SwapChainMock.h",
+    "unittests/native/mocks/TextureMock.h",
+  ]
+}
+
+dawn_test("dawn_unittests") {
+  deps = [
+    ":gmock_and_gtest",
+    ":mock_webgpu_gen",
+    ":native_mocks_sources",
+    "${dawn_root}/src/dawn:cpp",
+    "${dawn_root}/src/dawn:proc",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_root}/src/dawn/native:sources",
+    "${dawn_root}/src/dawn/native:static",
+    "${dawn_root}/src/dawn/utils",
+    "${dawn_root}/src/dawn/wire",
+  ]
+
+  # Add internal dawn native config for internal unittests.
+  configs = [ "${dawn_root}/src/dawn/native:internal" ]
+
+  sources = get_target_outputs(":mock_webgpu_gen")
+  sources += [
+    "${dawn_root}/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp",
+    "${dawn_root}/src/dawn/wire/client/ClientMemoryTransferService_mock.h",
+    "${dawn_root}/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp",
+    "${dawn_root}/src/dawn/wire/server/ServerMemoryTransferService_mock.h",
+    "DawnNativeTest.cpp",
+    "DawnNativeTest.h",
+    "MockCallback.h",
+    "ToggleParser.cpp",
+    "ToggleParser.h",
+    "unittests/AsyncTaskTests.cpp",
+    "unittests/BitSetIteratorTests.cpp",
+    "unittests/BuddyAllocatorTests.cpp",
+    "unittests/BuddyMemoryAllocatorTests.cpp",
+    "unittests/ChainUtilsTests.cpp",
+    "unittests/CommandAllocatorTests.cpp",
+    "unittests/ConcurrentCacheTests.cpp",
+    "unittests/EnumClassBitmasksTests.cpp",
+    "unittests/EnumMaskIteratorTests.cpp",
+    "unittests/ErrorTests.cpp",
+    "unittests/FeatureTests.cpp",
+    "unittests/GPUInfoTests.cpp",
+    "unittests/GetProcAddressTests.cpp",
+    "unittests/ITypArrayTests.cpp",
+    "unittests/ITypBitsetTests.cpp",
+    "unittests/ITypSpanTests.cpp",
+    "unittests/ITypVectorTests.cpp",
+    "unittests/LimitsTests.cpp",
+    "unittests/LinkedListTests.cpp",
+    "unittests/MathTests.cpp",
+    "unittests/ObjectBaseTests.cpp",
+    "unittests/PerStageTests.cpp",
+    "unittests/PerThreadProcTests.cpp",
+    "unittests/PlacementAllocatedTests.cpp",
+    "unittests/RefBaseTests.cpp",
+    "unittests/RefCountedTests.cpp",
+    "unittests/ResultTests.cpp",
+    "unittests/RingBufferAllocatorTests.cpp",
+    "unittests/SerialMapTests.cpp",
+    "unittests/SerialQueueTests.cpp",
+    "unittests/SlabAllocatorTests.cpp",
+    "unittests/StackContainerTests.cpp",
+    "unittests/SubresourceStorageTests.cpp",
+    "unittests/SystemUtilsTests.cpp",
+    "unittests/ToBackendTests.cpp",
+    "unittests/TypedIntegerTests.cpp",
+    "unittests/VersionTests.cpp",
+    "unittests/native/CacheKeyTests.cpp",
+    "unittests/native/CommandBufferEncodingTests.cpp",
+    "unittests/native/CreatePipelineAsyncTaskTests.cpp",
+    "unittests/native/DestroyObjectTests.cpp",
+    "unittests/native/DeviceCreationTests.cpp",
+    "unittests/validation/BindGroupValidationTests.cpp",
+    "unittests/validation/BufferValidationTests.cpp",
+    "unittests/validation/CommandBufferValidationTests.cpp",
+    "unittests/validation/ComputeIndirectValidationTests.cpp",
+    "unittests/validation/ComputeValidationTests.cpp",
+    "unittests/validation/CopyCommandsValidationTests.cpp",
+    "unittests/validation/CopyTextureForBrowserTests.cpp",
+    "unittests/validation/DebugMarkerValidationTests.cpp",
+    "unittests/validation/DeviceValidationTests.cpp",
+    "unittests/validation/DrawIndirectValidationTests.cpp",
+    "unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp",
+    "unittests/validation/DynamicStateCommandValidationTests.cpp",
+    "unittests/validation/ErrorScopeValidationTests.cpp",
+    "unittests/validation/ExternalTextureTests.cpp",
+    "unittests/validation/GetBindGroupLayoutValidationTests.cpp",
+    "unittests/validation/IndexBufferValidationTests.cpp",
+    "unittests/validation/InternalUsageValidationTests.cpp",
+    "unittests/validation/LabelTests.cpp",
+    "unittests/validation/MinimumBufferSizeValidationTests.cpp",
+    "unittests/validation/MultipleDeviceTests.cpp",
+    "unittests/validation/OverridableConstantsValidationTests.cpp",
+    "unittests/validation/PipelineAndPassCompatibilityTests.cpp",
+    "unittests/validation/QueryValidationTests.cpp",
+    "unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp",
+    "unittests/validation/QueueSubmitValidationTests.cpp",
+    "unittests/validation/QueueWriteBufferValidationTests.cpp",
+    "unittests/validation/QueueWriteTextureValidationTests.cpp",
+    "unittests/validation/RenderBundleValidationTests.cpp",
+    "unittests/validation/RenderPassDescriptorValidationTests.cpp",
+    "unittests/validation/RenderPipelineValidationTests.cpp",
+    "unittests/validation/ResourceUsageTrackingTests.cpp",
+    "unittests/validation/SamplerValidationTests.cpp",
+    "unittests/validation/ShaderModuleValidationTests.cpp",
+    "unittests/validation/StorageTextureValidationTests.cpp",
+    "unittests/validation/TextureSubresourceTests.cpp",
+    "unittests/validation/TextureValidationTests.cpp",
+    "unittests/validation/TextureViewValidationTests.cpp",
+    "unittests/validation/ToggleValidationTests.cpp",
+    "unittests/validation/UnsafeAPIValidationTests.cpp",
+    "unittests/validation/ValidationTest.cpp",
+    "unittests/validation/ValidationTest.h",
+    "unittests/validation/VertexBufferValidationTests.cpp",
+    "unittests/validation/VertexStateValidationTests.cpp",
+    "unittests/validation/VideoViewsValidationTests.cpp",
+    "unittests/validation/WriteBufferTests.cpp",
+    "unittests/wire/WireAdapterTests.cpp",
+    "unittests/wire/WireArgumentTests.cpp",
+    "unittests/wire/WireBasicTests.cpp",
+    "unittests/wire/WireBufferMappingTests.cpp",
+    "unittests/wire/WireCreatePipelineAsyncTests.cpp",
+    "unittests/wire/WireDestroyObjectTests.cpp",
+    "unittests/wire/WireDisconnectTests.cpp",
+    "unittests/wire/WireErrorCallbackTests.cpp",
+    "unittests/wire/WireExtensionTests.cpp",
+    "unittests/wire/WireInjectDeviceTests.cpp",
+    "unittests/wire/WireInjectInstanceTests.cpp",
+    "unittests/wire/WireInjectSwapChainTests.cpp",
+    "unittests/wire/WireInjectTextureTests.cpp",
+    "unittests/wire/WireInstanceTests.cpp",
+    "unittests/wire/WireMemoryTransferServiceTests.cpp",
+    "unittests/wire/WireOptionalTests.cpp",
+    "unittests/wire/WireQueueTests.cpp",
+    "unittests/wire/WireShaderModuleTests.cpp",
+    "unittests/wire/WireTest.cpp",
+    "unittests/wire/WireTest.h",
+    "unittests/wire/WireWGPUDevicePropertiesTests.cpp",
+  ]
+
+  if (is_win) {
+    sources += [ "unittests/WindowsUtilsTests.cpp" ]
+  }
+
+  if (dawn_enable_d3d12) {
+    sources += [ "unittests/d3d12/CopySplitTests.cpp" ]
+  }
+
+  # When building inside Chromium, use their gtest main function because it is
+  # needed to run in swarming correctly.
+  if (build_with_chromium) {
+    deps += [ ":unittests_main" ]
+  } else {
+    sources += [ "UnittestsMain.cpp" ]
+  }
+}
+
+###############################################################################
+# Dawn end2end tests targets
+###############################################################################
+
+source_set("end2end_tests_sources") {
+  configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+  testonly = true
+
+  deps = [
+    ":gmock_and_gtest",
+    "${dawn_root}/src/dawn:cpp",
+    "${dawn_root}/src/dawn:proc",
+    "${dawn_root}/src/dawn/common",
+
+    # Statically linked because the end2end white_box tests use Dawn internals.
+    "${dawn_root}/src/dawn/native:static",
+    "${dawn_root}/src/dawn/utils",
+    "${dawn_root}/src/dawn/wire",
+  ]
+
+  sources = [
+    "DawnTest.h",
+    "MockCallback.h",
+    "ParamGenerator.h",
+    "ToggleParser.cpp",
+    "ToggleParser.h",
+    "end2end/AdapterDiscoveryTests.cpp",
+    "end2end/BasicTests.cpp",
+    "end2end/BindGroupTests.cpp",
+    "end2end/BufferTests.cpp",
+    "end2end/BufferZeroInitTests.cpp",
+    "end2end/ClipSpaceTests.cpp",
+    "end2end/ColorStateTests.cpp",
+    "end2end/CommandEncoderTests.cpp",
+    "end2end/CompressedTextureFormatTests.cpp",
+    "end2end/ComputeCopyStorageBufferTests.cpp",
+    "end2end/ComputeDispatchTests.cpp",
+    "end2end/ComputeLayoutMemoryBufferTests.cpp",
+    "end2end/ComputeSharedMemoryTests.cpp",
+    "end2end/ComputeStorageBufferBarrierTests.cpp",
+    "end2end/CopyTests.cpp",
+    "end2end/CopyTextureForBrowserTests.cpp",
+    "end2end/CreatePipelineAsyncTests.cpp",
+    "end2end/CullingTests.cpp",
+    "end2end/DebugMarkerTests.cpp",
+    "end2end/DeprecatedAPITests.cpp",
+    "end2end/DepthBiasTests.cpp",
+    "end2end/DepthStencilCopyTests.cpp",
+    "end2end/DepthStencilLoadOpTests.cpp",
+    "end2end/DepthStencilSamplingTests.cpp",
+    "end2end/DepthStencilStateTests.cpp",
+    "end2end/DestroyTests.cpp",
+    "end2end/DeviceInitializationTests.cpp",
+    "end2end/DeviceLostTests.cpp",
+    "end2end/DrawIndexedIndirectTests.cpp",
+    "end2end/DrawIndexedTests.cpp",
+    "end2end/DrawIndirectTests.cpp",
+    "end2end/DrawTests.cpp",
+    "end2end/DynamicBufferOffsetTests.cpp",
+    "end2end/EntryPointTests.cpp",
+    "end2end/ExternalTextureTests.cpp",
+    "end2end/FirstIndexOffsetTests.cpp",
+    "end2end/GpuMemorySynchronizationTests.cpp",
+    "end2end/IndexFormatTests.cpp",
+    "end2end/MaxLimitTests.cpp",
+    "end2end/MemoryAllocationStressTests.cpp",
+    "end2end/MultisampledRenderingTests.cpp",
+    "end2end/MultisampledSamplingTests.cpp",
+    "end2end/NonzeroBufferCreationTests.cpp",
+    "end2end/NonzeroTextureCreationTests.cpp",
+    "end2end/ObjectCachingTests.cpp",
+    "end2end/OpArrayLengthTests.cpp",
+    "end2end/PipelineLayoutTests.cpp",
+    "end2end/PrimitiveStateTests.cpp",
+    "end2end/PrimitiveTopologyTests.cpp",
+    "end2end/QueryTests.cpp",
+    "end2end/QueueTests.cpp",
+    "end2end/QueueTimelineTests.cpp",
+    "end2end/ReadOnlyDepthStencilAttachmentTests.cpp",
+    "end2end/RenderAttachmentTests.cpp",
+    "end2end/RenderBundleTests.cpp",
+    "end2end/RenderPassLoadOpTests.cpp",
+    "end2end/RenderPassTests.cpp",
+    "end2end/RequiredBufferSizeInCopyTests.cpp",
+    "end2end/SamplerFilterAnisotropicTests.cpp",
+    "end2end/SamplerTests.cpp",
+    "end2end/ScissorTests.cpp",
+    "end2end/ShaderFloat16Tests.cpp",
+    "end2end/ShaderTests.cpp",
+    "end2end/StorageTextureTests.cpp",
+    "end2end/SubresourceRenderAttachmentTests.cpp",
+    "end2end/Texture3DTests.cpp",
+    "end2end/TextureFormatTests.cpp",
+    "end2end/TextureSubresourceTests.cpp",
+    "end2end/TextureViewTests.cpp",
+    "end2end/TextureZeroInitTests.cpp",
+    "end2end/VertexFormatTests.cpp",
+    "end2end/VertexOnlyRenderPipelineTests.cpp",
+    "end2end/VertexStateTests.cpp",
+    "end2end/ViewportOrientationTests.cpp",
+    "end2end/ViewportTests.cpp",
+  ]
+
+  # Validation tests that need OS windows live in end2end tests.
+  sources += [
+    "unittests/validation/ValidationTest.cpp",
+    "unittests/validation/ValidationTest.h",
+  ]
+
+  libs = []
+
+  if (dawn_enable_d3d12) {
+    sources += [
+      "end2end/D3D12CachingTests.cpp",
+      "end2end/D3D12ResourceWrappingTests.cpp",
+      "end2end/VideoViewsTests_win.cpp",
+    ]
+    libs += [
+      "d3d11.lib",
+      "dxgi.lib",
+    ]
+  }
+
+  if (dawn_enable_metal) {
+    sources += [
+      "end2end/IOSurfaceWrappingTests.cpp",
+      "end2end/VideoViewsTests_mac.cpp",
+    ]
+    frameworks = [ "IOSurface.framework" ]
+  }
+
+  if (dawn_enable_opengl) {
+    assert(dawn_supports_glfw_for_windowing)
+  }
+
+  if (dawn_supports_glfw_for_windowing) {
+    sources += [
+      "end2end/SwapChainTests.cpp",
+      "end2end/SwapChainValidationTests.cpp",
+      "end2end/WindowSurfaceTests.cpp",
+    ]
+    deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+  }
+
+  if (dawn_enable_d3d12 || (dawn_enable_vulkan && is_chromeos) ||
+      dawn_enable_metal) {
+    sources += [
+      "end2end/VideoViewsTests.cpp",
+      "end2end/VideoViewsTests.h",
+    ]
+  }
+
+  if (dawn_enable_vulkan && is_chromeos) {
+    sources += [ "end2end/VideoViewsTests_gbm.cpp" ]
+  }
+}
+
+source_set("white_box_tests_sources") {
+  configs += [ "${dawn_root}/src/dawn/native:internal" ]
+  testonly = true
+
+  deps = [
+    ":gmock_and_gtest",
+    "${dawn_root}/src/dawn:cpp",
+    "${dawn_root}/src/dawn:proc",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_root}/src/dawn/native:sources",
+
+    # Statically linked because the end2end white_box tests use Dawn internals.
+    "${dawn_root}/src/dawn/native:static",
+    "${dawn_root}/src/dawn/utils",
+    "${dawn_root}/src/dawn/wire",
+  ]
+
+  sources = [
+    "DawnTest.h",
+    "ParamGenerator.h",
+    "ToggleParser.h",
+  ]
+
+  if (dawn_enable_vulkan) {
+    deps += [ "${dawn_vulkan_headers_dir}:vulkan_headers" ]
+
+    if (is_chromeos) {
+      sources += [
+        "white_box/VulkanImageWrappingTests.cpp",
+        "white_box/VulkanImageWrappingTests.h",
+        "white_box/VulkanImageWrappingTests_DmaBuf.cpp",
+      ]
+    } else if (is_linux) {
+      sources += [
+        "white_box/VulkanImageWrappingTests.cpp",
+        "white_box/VulkanImageWrappingTests.h",
+        "white_box/VulkanImageWrappingTests_OpaqueFD.cpp",
+      ]
+    }
+
+    if (dawn_enable_error_injection) {
+      sources += [ "white_box/VulkanErrorInjectorTests.cpp" ]
+    }
+  }
+
+  sources += [
+    "white_box/BufferAllocatedSizeTests.cpp",
+    "white_box/InternalResourceUsageTests.cpp",
+    "white_box/InternalStorageBufferBindingTests.cpp",
+    "white_box/QueryInternalShaderTests.cpp",
+  ]
+
+  if (dawn_enable_d3d12) {
+    sources += [
+      "white_box/D3D12DescriptorHeapTests.cpp",
+      "white_box/D3D12GPUTimestampCalibrationTests.cpp",
+      "white_box/D3D12ResidencyTests.cpp",
+      "white_box/D3D12ResourceHeapTests.cpp",
+    ]
+  }
+
+  if (dawn_enable_metal) {
+    sources += [ "white_box/MetalAutoreleasePoolTests.mm" ]
+  }
+
+  if (dawn_enable_opengl) {
+    deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+  }
+
+  if (dawn_enable_opengles) {
+    sources += [ "white_box/EGLImageWrappingTests.cpp" ]
+    deps += [ "//third_party/angle:libEGL" ]
+  }
+
+  libs = []
+}
+
+dawn_test("dawn_end2end_tests") {
+  deps = [
+    ":end2end_tests_sources",
+    ":gmock_and_gtest",
+    ":white_box_tests_sources",
+    "${dawn_root}/src/dawn:cpp",
+    "${dawn_root}/src/dawn:proc",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_root}/src/dawn/native:static",
+    "${dawn_root}/src/dawn/utils",
+    "${dawn_root}/src/dawn/wire",
+  ]
+
+  sources = [
+    "DawnTest.cpp",
+    "DawnTest.h",
+  ]
+
+  libs = []
+
+  # When building inside Chromium, use their gtest main function because it is
+  # needed to run in swarming correctly.
+  if (build_with_chromium) {
+    deps += [ ":end2end_tests_main" ]
+  } else {
+    sources += [ "End2EndTestsMain.cpp" ]
+  }
+
+  if (dawn_enable_opengl) {
+    deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+  }
+
+  if (is_chromeos) {
+    libs += [ "gbm" ]
+  }
+}
+
+###############################################################################
+# Dawn perf tests
+###############################################################################
+
+dawn_test("dawn_perf_tests") {
+  deps = [
+    ":gmock_and_gtest",
+    "${dawn_root}/src/dawn:cpp",
+    "${dawn_root}/src/dawn:proc",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_root}/src/dawn/native",
+    "${dawn_root}/src/dawn/platform",
+    "${dawn_root}/src/dawn/utils",
+    "${dawn_root}/src/dawn/wire",
+  ]
+
+  sources = [
+    "DawnTest.cpp",
+    "DawnTest.h",
+    "ParamGenerator.h",
+    "ToggleParser.cpp",
+    "ToggleParser.h",
+    "perf_tests/BufferUploadPerf.cpp",
+    "perf_tests/DawnPerfTest.cpp",
+    "perf_tests/DawnPerfTest.h",
+    "perf_tests/DawnPerfTestPlatform.cpp",
+    "perf_tests/DawnPerfTestPlatform.h",
+    "perf_tests/DrawCallPerf.cpp",
+    "perf_tests/ShaderRobustnessPerf.cpp",
+    "perf_tests/SubresourceTrackingPerf.cpp",
+  ]
+
+  libs = []
+
+  # When building inside Chromium, use their gtest main function and the
+  # other perf test scaffolding in order to run in swarming correctly.
+  if (build_with_chromium) {
+    deps += [ ":perf_tests_main" ]
+    data_deps = [ "//testing:run_perf_test" ]
+  } else {
+    sources += [ "PerfTestsMain.cpp" ]
+  }
+
+  if (dawn_enable_metal) {
+    frameworks = [ "IOSurface.framework" ]
+  }
+
+  if (dawn_enable_opengl) {
+    deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+  }
+}
diff --git a/src/dawn/tests/DawnNativeTest.cpp b/src/dawn/tests/DawnNativeTest.cpp
new file mode 100644
index 0000000..fcf74f7
--- /dev/null
+++ b/src/dawn/tests/DawnNativeTest.cpp
@@ -0,0 +1,88 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnNativeTest.h"
+
+#include "absl/strings/str_cat.h"
+#include "dawn/common/Assert.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/ErrorData.h"
+
+namespace dawn::native {
+
+    void AddFatalDawnFailure(const char* expression, const ErrorData* error) {
+        const auto& backtrace = error->GetBacktrace();
+        GTEST_MESSAGE_AT_(
+            backtrace.at(0).file, backtrace.at(0).line,
+            absl::StrCat(expression, " returned error: ", error->GetMessage()).c_str(),
+            ::testing::TestPartResult::kFatalFailure);
+    }
+
+}  // namespace dawn::native
+
+DawnNativeTest::DawnNativeTest() {
+    dawnProcSetProcs(&dawn::native::GetProcs());
+}
+
+DawnNativeTest::~DawnNativeTest() {
+    device = wgpu::Device();
+    dawnProcSetProcs(nullptr);
+}
+
+void DawnNativeTest::SetUp() {
+    instance = std::make_unique<dawn::native::Instance>();
+    instance->DiscoverDefaultAdapters();
+
+    std::vector<dawn::native::Adapter> adapters = instance->GetAdapters();
+
+    // DawnNative unittests run against the null backend, find the corresponding adapter
+    bool foundNullAdapter = false;
+    for (auto& currentAdapter : adapters) {
+        wgpu::AdapterProperties adapterProperties;
+        currentAdapter.GetProperties(&adapterProperties);
+
+        if (adapterProperties.backendType == wgpu::BackendType::Null) {
+            adapter = currentAdapter;
+            foundNullAdapter = true;
+            break;
+        }
+    }
+
+    ASSERT(foundNullAdapter);
+
+    device = wgpu::Device(CreateTestDevice());
+    device.SetUncapturedErrorCallback(DawnNativeTest::OnDeviceError, nullptr);
+}
+
+void DawnNativeTest::TearDown() {
+}
+
+WGPUDevice DawnNativeTest::CreateTestDevice() {
+    // Disabled disallowing unsafe APIs so we can test them.
+    wgpu::DeviceDescriptor deviceDescriptor = {};
+    wgpu::DawnTogglesDeviceDescriptor togglesDesc = {};
+    deviceDescriptor.nextInChain = &togglesDesc;
+
+    const char* toggle = "disallow_unsafe_apis";
+    togglesDesc.forceDisabledToggles = &toggle;
+    togglesDesc.forceDisabledTogglesCount = 1;
+
+    return adapter.CreateDevice(&deviceDescriptor);
+}
+
+// static
+void DawnNativeTest::OnDeviceError(WGPUErrorType type, const char* message, void* userdata) {
+    ASSERT(type != WGPUErrorType_NoError);
+    FAIL() << "Unexpected error: " << message;
+}
diff --git a/src/dawn/tests/DawnNativeTest.h b/src/dawn/tests/DawnNativeTest.h
new file mode 100644
index 0000000..9e8df19
--- /dev/null
+++ b/src/dawn/tests/DawnNativeTest.h
@@ -0,0 +1,53 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_DAWNNATIVETEST_H_
+#define TESTS_DAWNNATIVETEST_H_
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/webgpu_cpp.h"
+
+namespace dawn::native {
+
+    // This is similar to DAWN_TRY_ASSIGN but produces a fatal GTest error if EXPR is an error.
+#define DAWN_ASSERT_AND_ASSIGN(VAR, EXPR) \
+    DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {}, AddFatalDawnFailure(#EXPR, error.get()))
+
+    void AddFatalDawnFailure(const char* expression, const ErrorData* error);
+
+}  // namespace dawn::native
+
+class DawnNativeTest : public ::testing::Test {
+  public:
+    DawnNativeTest();
+    ~DawnNativeTest() override;
+
+    void SetUp() override;
+    void TearDown() override;
+
+    virtual WGPUDevice CreateTestDevice();
+
+  protected:
+    std::unique_ptr<dawn::native::Instance> instance;
+    dawn::native::Adapter adapter;
+    wgpu::Device device;
+
+  private:
+    static void OnDeviceError(WGPUErrorType type, const char* message, void* userdata);
+};
+
+#endif  // TESTS_DAWNNATIVETEST_H_
diff --git a/src/dawn/tests/DawnTest.cpp b/src/dawn/tests/DawnTest.cpp
new file mode 100644
index 0000000..30a4503
--- /dev/null
+++ b/src/dawn/tests/DawnTest.cpp
@@ -0,0 +1,1729 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/PlatformDebugLogger.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/utils/TerribleCommandBuffer.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+#include "dawn/utils/WireHelper.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+#include <algorithm>
+#include <fstream>
+#include <iomanip>
+#include <regex>
+#include <sstream>
+#include <unordered_map>
+#include <unordered_set>
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+#    include "GLFW/glfw3.h"
+#    include "dawn/native/OpenGLBackend.h"
+#endif  // DAWN_ENABLE_BACKEND_OPENGL
+
+namespace {
+
+    std::string ParamName(wgpu::BackendType type) {
+        switch (type) {
+            case wgpu::BackendType::D3D12:
+                return "D3D12";
+            case wgpu::BackendType::Metal:
+                return "Metal";
+            case wgpu::BackendType::Null:
+                return "Null";
+            case wgpu::BackendType::OpenGL:
+                return "OpenGL";
+            case wgpu::BackendType::OpenGLES:
+                return "OpenGLES";
+            case wgpu::BackendType::Vulkan:
+                return "Vulkan";
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    const char* AdapterTypeName(wgpu::AdapterType type) {
+        switch (type) {
+            case wgpu::AdapterType::DiscreteGPU:
+                return "Discrete GPU";
+            case wgpu::AdapterType::IntegratedGPU:
+                return "Integrated GPU";
+            case wgpu::AdapterType::CPU:
+                return "CPU";
+            case wgpu::AdapterType::Unknown:
+                return "Unknown";
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    struct MapReadUserdata {
+        DawnTestBase* test;
+        size_t slot;
+    };
+
+    DawnTestEnvironment* gTestEnv = nullptr;
+
+    template <typename T>
+    void printBuffer(testing::AssertionResult& result, const T* buffer, const size_t count) {
+        static constexpr unsigned int kBytes = sizeof(T);
+
+        for (size_t index = 0; index < count; ++index) {
+            auto byteView = reinterpret_cast<const uint8_t*>(buffer + index);
+            for (unsigned int b = 0; b < kBytes; ++b) {
+                char buf[4];
+                sprintf(buf, "%02X ", byteView[b]);
+                result << buf;
+            }
+        }
+        result << std::endl;
+    }
+
+}  // anonymous namespace
+
+const RGBA8 RGBA8::kZero = RGBA8(0, 0, 0, 0);
+const RGBA8 RGBA8::kBlack = RGBA8(0, 0, 0, 255);
+const RGBA8 RGBA8::kRed = RGBA8(255, 0, 0, 255);
+const RGBA8 RGBA8::kGreen = RGBA8(0, 255, 0, 255);
+const RGBA8 RGBA8::kBlue = RGBA8(0, 0, 255, 255);
+const RGBA8 RGBA8::kYellow = RGBA8(255, 255, 0, 255);
+const RGBA8 RGBA8::kWhite = RGBA8(255, 255, 255, 255);
+
+BackendTestConfig::BackendTestConfig(wgpu::BackendType backendType,
+                                     std::initializer_list<const char*> forceEnabledWorkarounds,
+                                     std::initializer_list<const char*> forceDisabledWorkarounds)
+    : backendType(backendType),
+      forceEnabledWorkarounds(forceEnabledWorkarounds),
+      forceDisabledWorkarounds(forceDisabledWorkarounds) {
+}
+
+BackendTestConfig D3D12Backend(std::initializer_list<const char*> forceEnabledWorkarounds,
+                               std::initializer_list<const char*> forceDisabledWorkarounds) {
+    return BackendTestConfig(wgpu::BackendType::D3D12, forceEnabledWorkarounds,
+                             forceDisabledWorkarounds);
+}
+
+BackendTestConfig MetalBackend(std::initializer_list<const char*> forceEnabledWorkarounds,
+                               std::initializer_list<const char*> forceDisabledWorkarounds) {
+    return BackendTestConfig(wgpu::BackendType::Metal, forceEnabledWorkarounds,
+                             forceDisabledWorkarounds);
+}
+
+BackendTestConfig NullBackend(std::initializer_list<const char*> forceEnabledWorkarounds,
+                              std::initializer_list<const char*> forceDisabledWorkarounds) {
+    return BackendTestConfig(wgpu::BackendType::Null, forceEnabledWorkarounds,
+                             forceDisabledWorkarounds);
+}
+
+BackendTestConfig OpenGLBackend(std::initializer_list<const char*> forceEnabledWorkarounds,
+                                std::initializer_list<const char*> forceDisabledWorkarounds) {
+    return BackendTestConfig(wgpu::BackendType::OpenGL, forceEnabledWorkarounds,
+                             forceDisabledWorkarounds);
+}
+
+BackendTestConfig OpenGLESBackend(std::initializer_list<const char*> forceEnabledWorkarounds,
+                                  std::initializer_list<const char*> forceDisabledWorkarounds) {
+    return BackendTestConfig(wgpu::BackendType::OpenGLES, forceEnabledWorkarounds,
+                             forceDisabledWorkarounds);
+}
+
+BackendTestConfig VulkanBackend(std::initializer_list<const char*> forceEnabledWorkarounds,
+                                std::initializer_list<const char*> forceDisabledWorkarounds) {
+    return BackendTestConfig(wgpu::BackendType::Vulkan, forceEnabledWorkarounds,
+                             forceDisabledWorkarounds);
+}
+
+TestAdapterProperties::TestAdapterProperties(const wgpu::AdapterProperties& properties,
+                                             bool selected)
+    : wgpu::AdapterProperties(properties), adapterName(properties.name), selected(selected) {
+}
+
+AdapterTestParam::AdapterTestParam(const BackendTestConfig& config,
+                                   const TestAdapterProperties& adapterProperties)
+    : adapterProperties(adapterProperties),
+      forceEnabledWorkarounds(config.forceEnabledWorkarounds),
+      forceDisabledWorkarounds(config.forceDisabledWorkarounds) {
+}
+
+std::ostream& operator<<(std::ostream& os, const AdapterTestParam& param) {
+    os << ParamName(param.adapterProperties.backendType) << " "
+       << param.adapterProperties.adapterName;
+
+    // In a Windows Remote Desktop session there are two adapters named "Microsoft Basic Render
+    // Driver" with different adapter types. We must differentiate them to avoid any tests using the
+    // same name.
+    if (param.adapterProperties.deviceID == 0x008C) {
+        std::string adapterType = AdapterTypeName(param.adapterProperties.adapterType);
+        os << " " << adapterType;
+    }
+
+    for (const char* forceEnabledWorkaround : param.forceEnabledWorkarounds) {
+        os << "; e:" << forceEnabledWorkaround;
+    }
+    for (const char* forceDisabledWorkaround : param.forceDisabledWorkarounds) {
+        os << "; d:" << forceDisabledWorkaround;
+    }
+    return os;
+}
+
+DawnTestBase::PrintToStringParamName::PrintToStringParamName(const char* test) : mTest(test) {
+}
+
+std::string DawnTestBase::PrintToStringParamName::SanitizeParamName(std::string paramName,
+                                                                    size_t index) const {
+    // Sanitize the adapter name for GoogleTest
+    std::string sanitizedName = std::regex_replace(paramName, std::regex("[^a-zA-Z0-9]+"), "_");
+
+    // Strip trailing underscores, if any.
+    while (sanitizedName.back() == '_') {
+        sanitizedName.resize(sanitizedName.length() - 1);
+    }
+
+    // We don't know the the test name at this point, but the format usually looks like
+    // this.
+    std::string prefix = mTest + ".TheTestNameUsuallyGoesHere/";
+    std::string testFormat = prefix + sanitizedName;
+    if (testFormat.length() > 220) {
+        // The bots don't support test names longer than 256. Shorten the name and append a unique
+        // index if we're close. The failure log will still print the full param name.
+        std::string suffix = std::string("__") + std::to_string(index);
+        size_t targetLength = sanitizedName.length();
+        targetLength -= testFormat.length() - 220;
+        targetLength -= suffix.length();
+        sanitizedName.resize(targetLength);
+        sanitizedName = sanitizedName + suffix;
+    }
+    return sanitizedName;
+}
+
+// Implementation of DawnTestEnvironment
+
+void InitDawnEnd2EndTestEnvironment(int argc, char** argv) {
+    gTestEnv = new DawnTestEnvironment(argc, argv);
+    testing::AddGlobalTestEnvironment(gTestEnv);
+}
+
+// static
+void DawnTestEnvironment::SetEnvironment(DawnTestEnvironment* env) {
+    gTestEnv = env;
+}
+
+DawnTestEnvironment::DawnTestEnvironment(int argc, char** argv) {
+    ParseArgs(argc, argv);
+
+    if (mBackendValidationLevel != dawn::native::BackendValidationLevel::Disabled) {
+        mPlatformDebugLogger =
+            std::unique_ptr<utils::PlatformDebugLogger>(utils::CreatePlatformDebugLogger());
+    }
+
+    // Create a temporary instance to select available and preferred adapters. This is done before
+    // test instantiation so GetAvailableAdapterTestParamsForBackends can generate test
+    // parameterizations all selected adapters. We drop the instance at the end of this function
+    // because the Vulkan validation layers use static global mutexes which behave badly when
+    // Chromium's test launcher forks the test process. The instance will be recreated on test
+    // environment setup.
+    std::unique_ptr<dawn::native::Instance> instance = CreateInstanceAndDiscoverAdapters();
+    ASSERT(instance);
+
+    SelectPreferredAdapterProperties(instance.get());
+    PrintTestConfigurationAndAdapterInfo(instance.get());
+}
+
+DawnTestEnvironment::~DawnTestEnvironment() = default;
+
+void DawnTestEnvironment::ParseArgs(int argc, char** argv) {
+    size_t argLen = 0;  // Set when parsing --arg=X arguments
+    for (int i = 1; i < argc; ++i) {
+        if (strcmp("-w", argv[i]) == 0 || strcmp("--use-wire", argv[i]) == 0) {
+            mUseWire = true;
+            continue;
+        }
+
+        if (strcmp("--run-suppressed-tests", argv[i]) == 0) {
+            mRunSuppressedTests = true;
+            continue;
+        }
+
+        constexpr const char kEnableBackendValidationSwitch[] = "--enable-backend-validation";
+        argLen = sizeof(kEnableBackendValidationSwitch) - 1;
+        if (strncmp(argv[i], kEnableBackendValidationSwitch, argLen) == 0) {
+            const char* level = argv[i] + argLen;
+            if (level[0] != '\0') {
+                if (strcmp(level, "=full") == 0) {
+                    mBackendValidationLevel = dawn::native::BackendValidationLevel::Full;
+                } else if (strcmp(level, "=partial") == 0) {
+                    mBackendValidationLevel = dawn::native::BackendValidationLevel::Partial;
+                } else if (strcmp(level, "=disabled") == 0) {
+                    mBackendValidationLevel = dawn::native::BackendValidationLevel::Disabled;
+                } else {
+                    dawn::ErrorLog() << "Invalid backend validation level" << level;
+                    UNREACHABLE();
+                }
+            } else {
+                mBackendValidationLevel = dawn::native::BackendValidationLevel::Partial;
+            }
+            continue;
+        }
+
+        if (strcmp("-c", argv[i]) == 0 || strcmp("--begin-capture-on-startup", argv[i]) == 0) {
+            mBeginCaptureOnStartup = true;
+            continue;
+        }
+
+        if (mToggleParser.ParseEnabledToggles(argv[i])) {
+            continue;
+        }
+
+        if (mToggleParser.ParseDisabledToggles(argv[i])) {
+            continue;
+        }
+
+        constexpr const char kVendorIdFilterArg[] = "--adapter-vendor-id=";
+        argLen = sizeof(kVendorIdFilterArg) - 1;
+        if (strncmp(argv[i], kVendorIdFilterArg, argLen) == 0) {
+            const char* vendorIdFilter = argv[i] + argLen;
+            if (vendorIdFilter[0] != '\0') {
+                mVendorIdFilter = strtoul(vendorIdFilter, nullptr, 16);
+                // Set filter flag if vendor id is non-zero.
+                mHasVendorIdFilter = mVendorIdFilter != 0;
+            }
+            continue;
+        }
+
+        constexpr const char kExclusiveDeviceTypePreferenceArg[] =
+            "--exclusive-device-type-preference=";
+        argLen = sizeof(kExclusiveDeviceTypePreferenceArg) - 1;
+        if (strncmp(argv[i], kExclusiveDeviceTypePreferenceArg, argLen) == 0) {
+            const char* preference = argv[i] + argLen;
+            if (preference[0] != '\0') {
+                std::istringstream ss(preference);
+                std::string type;
+                while (std::getline(ss, type, ',')) {
+                    if (strcmp(type.c_str(), "discrete") == 0) {
+                        mDevicePreferences.push_back(wgpu::AdapterType::DiscreteGPU);
+                    } else if (strcmp(type.c_str(), "integrated") == 0) {
+                        mDevicePreferences.push_back(wgpu::AdapterType::IntegratedGPU);
+                    } else if (strcmp(type.c_str(), "cpu") == 0) {
+                        mDevicePreferences.push_back(wgpu::AdapterType::CPU);
+                    } else {
+                        dawn::ErrorLog() << "Invalid device type preference: " << type;
+                        UNREACHABLE();
+                    }
+                }
+            }
+            continue;
+        }
+
+        constexpr const char kWireTraceDirArg[] = "--wire-trace-dir=";
+        argLen = sizeof(kWireTraceDirArg) - 1;
+        if (strncmp(argv[i], kWireTraceDirArg, argLen) == 0) {
+            mWireTraceDir = argv[i] + argLen;
+            continue;
+        }
+
+        constexpr const char kBackendArg[] = "--backend=";
+        argLen = sizeof(kBackendArg) - 1;
+        if (strncmp(argv[i], kBackendArg, argLen) == 0) {
+            const char* param = argv[i] + argLen;
+            if (strcmp("d3d12", param) == 0) {
+                mBackendTypeFilter = wgpu::BackendType::D3D12;
+            } else if (strcmp("metal", param) == 0) {
+                mBackendTypeFilter = wgpu::BackendType::Metal;
+            } else if (strcmp("null", param) == 0) {
+                mBackendTypeFilter = wgpu::BackendType::Null;
+            } else if (strcmp("opengl", param) == 0) {
+                mBackendTypeFilter = wgpu::BackendType::OpenGL;
+            } else if (strcmp("opengles", param) == 0) {
+                mBackendTypeFilter = wgpu::BackendType::OpenGLES;
+            } else if (strcmp("vulkan", param) == 0) {
+                mBackendTypeFilter = wgpu::BackendType::Vulkan;
+            } else {
+                dawn::ErrorLog()
+                    << "Invalid backend \"" << param
+                    << "\". Valid backends are: d3d12, metal, null, opengl, opengles, vulkan.";
+                UNREACHABLE();
+            }
+            mHasBackendTypeFilter = true;
+            continue;
+        }
+        if (strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+            dawn::InfoLog()
+                << "\n\nUsage: " << argv[0]
+                << " [GTEST_FLAGS...] [-w] [-c]\n"
+                   "    [--enable-toggles=toggles] [--disable-toggles=toggles]\n"
+                   "    [--backend=x]\n"
+                   "    [--adapter-vendor-id=x] "
+                   "[--enable-backend-validation[=full,partial,disabled]]\n"
+                   "    [--exclusive-device-type-preference=integrated,cpu,discrete]\n\n"
+                   "  -w, --use-wire: Run the tests through the wire (defaults to no wire)\n"
+                   "  -c, --begin-capture-on-startup: Begin debug capture on startup "
+                   "(defaults to no capture)\n"
+                   "  --enable-backend-validation: Enables backend validation. Defaults to \n"
+                   "    'partial' to enable only minimum backend validation. Set to 'full' to\n"
+                   "    enable all available backend validation with less performance overhead.\n"
+                   "    Set to 'disabled' to run with no validation (same as no flag).\n"
+                   "  --enable-toggles: Comma-delimited list of Dawn toggles to enable.\n"
+                   "    ex.) skip_validation,disable_robustness,turn_off_vsync\n"
+                   "  --disable-toggles: Comma-delimited list of Dawn toggles to disable\n"
+                   "  --adapter-vendor-id: Select adapter by vendor id to run end2end tests"
+                   "on multi-GPU systems \n"
+                   "  --backend: Select adapter by backend type. Valid backends are: d3d12, metal, "
+                   "null, opengl, opengles, vulkan\n"
+                   "  --exclusive-device-type-preference: Comma-delimited list of preferred device "
+                   "types. For each backend, tests will run only on adapters that match the first "
+                   "available device type\n"
+                   "  --run-suppressed-tests: Run all the tests that will be skipped by the macro "
+                   "DAWN_SUPPRESS_TEST_IF()\n";
+            continue;
+        }
+
+        // Skip over args that look like they're for Googletest.
+        constexpr const char kGtestArgPrefix[] = "--gtest_";
+        if (strncmp(kGtestArgPrefix, argv[i], sizeof(kGtestArgPrefix) - 1) == 0) {
+            continue;
+        }
+
+        dawn::WarningLog() << " Unused argument: " << argv[i];
+    }
+}
+
+std::unique_ptr<dawn::native::Instance> DawnTestEnvironment::CreateInstanceAndDiscoverAdapters() {
+    auto instance = std::make_unique<dawn::native::Instance>();
+    instance->EnableBeginCaptureOnStartup(mBeginCaptureOnStartup);
+    instance->SetBackendValidationLevel(mBackendValidationLevel);
+    instance->DiscoverDefaultAdapters();
+
+#ifdef DAWN_ENABLE_BACKEND_DESKTOP_GL
+    if (!glfwInit()) {
+        return instance;
+    }
+    glfwDefaultWindowHints();
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+    mOpenGLWindow = glfwCreateWindow(400, 400, "Dawn OpenGL test window", nullptr, nullptr);
+
+    glfwMakeContextCurrent(mOpenGLWindow);
+    dawn::native::opengl::AdapterDiscoveryOptions adapterOptions;
+    adapterOptions.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    instance->DiscoverAdapters(&adapterOptions);
+#endif  // DAWN_ENABLE_BACKEND_DESKTOP_GL
+
+#ifdef DAWN_ENABLE_BACKEND_OPENGLES
+
+    ScopedEnvironmentVar angleDefaultPlatform;
+    if (GetEnvironmentVar("ANGLE_DEFAULT_PLATFORM").first.empty()) {
+        angleDefaultPlatform.Set("ANGLE_DEFAULT_PLATFORM", "swiftshader");
+    }
+
+    if (!glfwInit()) {
+        return instance;
+    }
+    glfwDefaultWindowHints();
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+    glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+    glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+    mOpenGLESWindow = glfwCreateWindow(400, 400, "Dawn OpenGLES test window", nullptr, nullptr);
+
+    glfwMakeContextCurrent(mOpenGLESWindow);
+    dawn::native::opengl::AdapterDiscoveryOptionsES adapterOptionsES;
+    adapterOptionsES.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    instance->DiscoverAdapters(&adapterOptionsES);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
+#endif  // DAWN_ENABLE_BACKEND_OPENGLES
+
+    return instance;
+}
+
+GLFWwindow* DawnTestEnvironment::GetOpenGLWindow() const {
+    return mOpenGLWindow;
+}
+
+GLFWwindow* DawnTestEnvironment::GetOpenGLESWindow() const {
+    return mOpenGLESWindow;
+}
+
+void DawnTestEnvironment::SelectPreferredAdapterProperties(const dawn::native::Instance* instance) {
+    // Get the first available preferred device type.
+    wgpu::AdapterType preferredDeviceType = static_cast<wgpu::AdapterType>(-1);
+    bool hasDevicePreference = false;
+    for (wgpu::AdapterType devicePreference : mDevicePreferences) {
+        for (const dawn::native::Adapter& adapter : instance->GetAdapters()) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            if (properties.adapterType == devicePreference) {
+                preferredDeviceType = devicePreference;
+                hasDevicePreference = true;
+                break;
+            }
+        }
+        if (hasDevicePreference) {
+            break;
+        }
+    }
+
+    std::set<std::pair<wgpu::BackendType, std::string>> adapterNameSet;
+    for (const dawn::native::Adapter& adapter : instance->GetAdapters()) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        // All adapters are selected by default.
+        bool selected = true;
+        // The adapter is deselected if:
+        if (mHasBackendTypeFilter) {
+            // It doesn't match the backend type, if present.
+            selected &= properties.backendType == mBackendTypeFilter;
+        }
+        if (mHasVendorIdFilter) {
+            // It doesn't match the vendor id, if present.
+            selected &= mVendorIdFilter == properties.vendorID;
+
+            if (!mDevicePreferences.empty()) {
+                dawn::WarningLog() << "Vendor ID filter provided. Ignoring device type preference.";
+            }
+        }
+        if (hasDevicePreference) {
+            // There is a device preference and:
+            selected &=
+                // The device type doesn't match the first available preferred type for that
+                // backend, if present.
+                (properties.adapterType == preferredDeviceType) ||
+                // Always select Unknown OpenGL adapters if we don't want a CPU adapter.
+                // OpenGL will usually be unknown because we can't query the device type.
+                // If we ever have Swiftshader GL (unlikely), we could set the DeviceType properly.
+                (preferredDeviceType != wgpu::AdapterType::CPU &&
+                 properties.adapterType == wgpu::AdapterType::Unknown &&
+                 (properties.backendType == wgpu::BackendType::OpenGL ||
+                  properties.backendType == wgpu::BackendType::OpenGLES)) ||
+                // Always select the Null backend. There are few tests on this backend, and they run
+                // quickly. This is temporary as to not lose coverage. We can group it with
+                // Swiftshader as a CPU adapter when we have Swiftshader tests.
+                (properties.backendType == wgpu::BackendType::Null);
+        }
+
+        // In Windows Remote Desktop sessions we may be able to discover multiple adapters that
+        // have the same name and backend type. We will just choose one adapter from them in our
+        // tests.
+        const auto adapterTypeAndName =
+            std::make_pair(properties.backendType, std::string(properties.name));
+        if (adapterNameSet.find(adapterTypeAndName) == adapterNameSet.end()) {
+            adapterNameSet.insert(adapterTypeAndName);
+            mAdapterProperties.emplace_back(properties, selected);
+        }
+    }
+}
+
+std::vector<AdapterTestParam> DawnTestEnvironment::GetAvailableAdapterTestParamsForBackends(
+    const BackendTestConfig* params,
+    size_t numParams) {
+    std::vector<AdapterTestParam> testParams;
+    for (size_t i = 0; i < numParams; ++i) {
+        for (const auto& adapterProperties : mAdapterProperties) {
+            if (params[i].backendType == adapterProperties.backendType &&
+                adapterProperties.selected) {
+                testParams.push_back(AdapterTestParam(params[i], adapterProperties));
+            }
+        }
+    }
+    return testParams;
+}
+
+void DawnTestEnvironment::PrintTestConfigurationAndAdapterInfo(
+    dawn::native::Instance* instance) const {
+    dawn::LogMessage log = dawn::InfoLog();
+    log << "Testing configuration\n"
+           "---------------------\n"
+           "UseWire: "
+        << (mUseWire ? "true" : "false")
+        << "\n"
+           "Run suppressed tests: "
+        << (mRunSuppressedTests ? "true" : "false")
+        << "\n"
+           "BackendValidation: ";
+
+    switch (mBackendValidationLevel) {
+        case dawn::native::BackendValidationLevel::Full:
+            log << "full";
+            break;
+        case dawn::native::BackendValidationLevel::Partial:
+            log << "partial";
+            break;
+        case dawn::native::BackendValidationLevel::Disabled:
+            log << "disabled";
+            break;
+        default:
+            UNREACHABLE();
+    }
+
+    if (GetEnabledToggles().size() > 0) {
+        log << "\n"
+               "Enabled Toggles\n";
+        for (const std::string& toggle : GetEnabledToggles()) {
+            const dawn::native::ToggleInfo* info = instance->GetToggleInfo(toggle.c_str());
+            ASSERT(info != nullptr);
+            log << " - " << info->name << ": " << info->description << "\n";
+        }
+    }
+
+    if (GetDisabledToggles().size() > 0) {
+        log << "\n"
+               "Disabled Toggles\n";
+        for (const std::string& toggle : GetDisabledToggles()) {
+            const dawn::native::ToggleInfo* info = instance->GetToggleInfo(toggle.c_str());
+            ASSERT(info != nullptr);
+            log << " - " << info->name << ": " << info->description << "\n";
+        }
+    }
+
+    log << "\n"
+           "BeginCaptureOnStartup: "
+        << (mBeginCaptureOnStartup ? "true" : "false")
+        << "\n"
+           "\n"
+        << "System adapters: \n";
+
+    for (const TestAdapterProperties& properties : mAdapterProperties) {
+        std::ostringstream vendorId;
+        std::ostringstream deviceId;
+        vendorId << std::setfill('0') << std::uppercase << std::internal << std::hex << std::setw(4)
+                 << properties.vendorID;
+        deviceId << std::setfill('0') << std::uppercase << std::internal << std::hex << std::setw(4)
+                 << properties.deviceID;
+
+        // Preparing for outputting hex numbers
+        log << std::showbase << std::hex << std::setfill('0') << std::setw(4)
+
+            << " - \"" << properties.adapterName << "\" - \"" << properties.driverDescription
+            << "\"\n"
+            << "   type: " << AdapterTypeName(properties.adapterType)
+            << ", backend: " << ParamName(properties.backendType) << "\n"
+            << "   vendorId: 0x" << vendorId.str() << ", deviceId: 0x" << deviceId.str()
+            << (properties.selected ? " [Selected]" : "") << "\n";
+    }
+}
+
+void DawnTestEnvironment::SetUp() {
+    mInstance = CreateInstanceAndDiscoverAdapters();
+    ASSERT(mInstance);
+}
+
+void DawnTestEnvironment::TearDown() {
+    // When Vulkan validation layers are enabled, it's unsafe to call Vulkan APIs in the destructor
+    // of a static/global variable, so the instance must be manually released beforehand.
+    mInstance.reset();
+}
+
+bool DawnTestEnvironment::UsesWire() const {
+    return mUseWire;
+}
+
+bool DawnTestEnvironment::RunSuppressedTests() const {
+    return mRunSuppressedTests;
+}
+
+dawn::native::BackendValidationLevel DawnTestEnvironment::GetBackendValidationLevel() const {
+    return mBackendValidationLevel;
+}
+
+dawn::native::Instance* DawnTestEnvironment::GetInstance() const {
+    return mInstance.get();
+}
+
+bool DawnTestEnvironment::HasVendorIdFilter() const {
+    return mHasVendorIdFilter;
+}
+
+uint32_t DawnTestEnvironment::GetVendorIdFilter() const {
+    return mVendorIdFilter;
+}
+
+bool DawnTestEnvironment::HasBackendTypeFilter() const {
+    return mHasBackendTypeFilter;
+}
+
+wgpu::BackendType DawnTestEnvironment::GetBackendTypeFilter() const {
+    return mBackendTypeFilter;
+}
+
+const char* DawnTestEnvironment::GetWireTraceDir() const {
+    if (mWireTraceDir.length() == 0) {
+        return nullptr;
+    }
+    return mWireTraceDir.c_str();
+}
+
+const std::vector<std::string>& DawnTestEnvironment::GetEnabledToggles() const {
+    return mToggleParser.GetEnabledToggles();
+}
+
+const std::vector<std::string>& DawnTestEnvironment::GetDisabledToggles() const {
+    return mToggleParser.GetDisabledToggles();
+}
+
+// Implementation of DawnTest
+
+DawnTestBase::DawnTestBase(const AdapterTestParam& param)
+    : mParam(param),
+      mWireHelper(utils::CreateWireHelper(gTestEnv->UsesWire(), gTestEnv->GetWireTraceDir())) {
+}
+
+DawnTestBase::~DawnTestBase() {
+    // We need to destroy child objects before the Device
+    mReadbackSlots.clear();
+    queue = wgpu::Queue();
+    device = wgpu::Device();
+
+    // D3D12's GPU-based validation will accumulate objects over time if the backend device is not
+    // destroyed and recreated, so we reset it here.
+    if (IsD3D12() && IsBackendValidationEnabled()) {
+        mBackendAdapter.ResetInternalDeviceForTesting();
+    }
+    mWireHelper.reset();
+}
+
+bool DawnTestBase::IsD3D12() const {
+    return mParam.adapterProperties.backendType == wgpu::BackendType::D3D12;
+}
+
+bool DawnTestBase::IsMetal() const {
+    return mParam.adapterProperties.backendType == wgpu::BackendType::Metal;
+}
+
+bool DawnTestBase::IsNull() const {
+    return mParam.adapterProperties.backendType == wgpu::BackendType::Null;
+}
+
+bool DawnTestBase::IsOpenGL() const {
+    return mParam.adapterProperties.backendType == wgpu::BackendType::OpenGL;
+}
+
+bool DawnTestBase::IsOpenGLES() const {
+    return mParam.adapterProperties.backendType == wgpu::BackendType::OpenGLES;
+}
+
+bool DawnTestBase::IsVulkan() const {
+    return mParam.adapterProperties.backendType == wgpu::BackendType::Vulkan;
+}
+
+bool DawnTestBase::IsAMD() const {
+    return gpu_info::IsAMD(mParam.adapterProperties.vendorID);
+}
+
+bool DawnTestBase::IsARM() const {
+    return gpu_info::IsARM(mParam.adapterProperties.vendorID);
+}
+
+bool DawnTestBase::IsImgTec() const {
+    return gpu_info::IsImgTec(mParam.adapterProperties.vendorID);
+}
+
+bool DawnTestBase::IsIntel() const {
+    return gpu_info::IsIntel(mParam.adapterProperties.vendorID);
+}
+
+bool DawnTestBase::IsNvidia() const {
+    return gpu_info::IsNvidia(mParam.adapterProperties.vendorID);
+}
+
+bool DawnTestBase::IsQualcomm() const {
+    return gpu_info::IsQualcomm(mParam.adapterProperties.vendorID);
+}
+
+bool DawnTestBase::IsSwiftshader() const {
+    return gpu_info::IsSwiftshader(mParam.adapterProperties.vendorID,
+                                   mParam.adapterProperties.deviceID);
+}
+
+bool DawnTestBase::IsANGLE() const {
+    return !mParam.adapterProperties.adapterName.find("ANGLE");
+}
+
+bool DawnTestBase::IsWARP() const {
+    return gpu_info::IsWARP(mParam.adapterProperties.vendorID, mParam.adapterProperties.deviceID);
+}
+
+bool DawnTestBase::IsWindows() const {
+#ifdef DAWN_PLATFORM_WINDOWS
+    return true;
+#else
+    return false;
+#endif
+}
+
+bool DawnTestBase::IsLinux() const {
+#ifdef DAWN_PLATFORM_LINUX
+    return true;
+#else
+    return false;
+#endif
+}
+
+bool DawnTestBase::IsMacOS(int32_t majorVersion, int32_t minorVersion) const {
+#ifdef DAWN_PLATFORM_MACOS
+    if (majorVersion == -1 && minorVersion == -1) {
+        return true;
+    }
+    int32_t majorVersionOut, minorVersionOut = 0;
+    GetMacOSVersion(&majorVersionOut, &minorVersionOut);
+    return (majorVersion != -1 && majorVersion == majorVersionOut) &&
+           (minorVersion != -1 && minorVersion == minorVersionOut);
+#else
+    return false;
+#endif
+}
+
+bool DawnTestBase::UsesWire() const {
+    return gTestEnv->UsesWire();
+}
+
+bool DawnTestBase::IsBackendValidationEnabled() const {
+    return gTestEnv->GetBackendValidationLevel() != dawn::native::BackendValidationLevel::Disabled;
+}
+
+bool DawnTestBase::RunSuppressedTests() const {
+    return gTestEnv->RunSuppressedTests();
+}
+
+bool DawnTestBase::IsDXC() const {
+    return HasToggleEnabled("use_dxc");
+}
+
+bool DawnTestBase::IsAsan() const {
+#if defined(ADDRESS_SANITIZER)
+    return true;
+#else
+    return false;
+#endif
+}
+
+bool DawnTestBase::HasToggleEnabled(const char* toggle) const {
+    auto toggles = dawn::native::GetTogglesUsed(backendDevice);
+    return std::find_if(toggles.begin(), toggles.end(), [toggle](const char* name) {
+               return strcmp(toggle, name) == 0;
+           }) != toggles.end();
+}
+
+bool DawnTestBase::HasVendorIdFilter() const {
+    return gTestEnv->HasVendorIdFilter();
+}
+
+uint32_t DawnTestBase::GetVendorIdFilter() const {
+    return gTestEnv->GetVendorIdFilter();
+}
+
+bool DawnTestBase::HasBackendTypeFilter() const {
+    return gTestEnv->HasBackendTypeFilter();
+}
+
+wgpu::BackendType DawnTestBase::GetBackendTypeFilter() const {
+    return gTestEnv->GetBackendTypeFilter();
+}
+
+wgpu::Instance DawnTestBase::GetInstance() const {
+    return gTestEnv->GetInstance()->Get();
+}
+
+dawn::native::Adapter DawnTestBase::GetAdapter() const {
+    return mBackendAdapter;
+}
+
+std::vector<wgpu::FeatureName> DawnTestBase::GetRequiredFeatures() {
+    return {};
+}
+
+wgpu::RequiredLimits DawnTestBase::GetRequiredLimits(const wgpu::SupportedLimits&) {
+    return {};
+}
+
+const wgpu::AdapterProperties& DawnTestBase::GetAdapterProperties() const {
+    return mParam.adapterProperties;
+}
+
+wgpu::SupportedLimits DawnTestBase::GetSupportedLimits() {
+    WGPUSupportedLimits supportedLimits;
+    supportedLimits.nextInChain = nullptr;
+    dawn::native::GetProcs().deviceGetLimits(backendDevice, &supportedLimits);
+    return *reinterpret_cast<wgpu::SupportedLimits*>(&supportedLimits);
+}
+
+bool DawnTestBase::SupportsFeatures(const std::vector<wgpu::FeatureName>& features) {
+    ASSERT(mBackendAdapter);
+    std::vector<wgpu::FeatureName> supportedFeatures;
+    uint32_t count =
+        dawn::native::GetProcs().adapterEnumerateFeatures(mBackendAdapter.Get(), nullptr);
+    supportedFeatures.resize(count);
+    dawn::native::GetProcs().adapterEnumerateFeatures(
+        mBackendAdapter.Get(), reinterpret_cast<WGPUFeatureName*>(&supportedFeatures[0]));
+
+    std::unordered_set<wgpu::FeatureName> supportedSet;
+    for (wgpu::FeatureName f : supportedFeatures) {
+        supportedSet.insert(f);
+    }
+
+    for (wgpu::FeatureName f : features) {
+        if (supportedSet.count(f) == 0) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void DawnTestBase::SetUp() {
+    {
+        // Find the adapter that exactly matches our adapter properties.
+        const auto& adapters = gTestEnv->GetInstance()->GetAdapters();
+        const auto& it = std::find_if(
+            adapters.begin(), adapters.end(), [&](const dawn::native::Adapter& adapter) {
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+
+                return (mParam.adapterProperties.selected &&
+                        properties.deviceID == mParam.adapterProperties.deviceID &&
+                        properties.vendorID == mParam.adapterProperties.vendorID &&
+                        properties.adapterType == mParam.adapterProperties.adapterType &&
+                        properties.backendType == mParam.adapterProperties.backendType &&
+                        strcmp(properties.name, mParam.adapterProperties.adapterName.c_str()) == 0);
+            });
+        ASSERT(it != adapters.end());
+        mBackendAdapter = *it;
+    }
+
+    // Setup the per-test platform. Tests can provide one by overloading CreateTestPlatform.
+    mTestPlatform = CreateTestPlatform();
+    gTestEnv->GetInstance()->SetPlatform(mTestPlatform.get());
+
+    // Create the device from the adapter
+    for (const char* forceEnabledWorkaround : mParam.forceEnabledWorkarounds) {
+        ASSERT(gTestEnv->GetInstance()->GetToggleInfo(forceEnabledWorkaround) != nullptr);
+    }
+    for (const char* forceDisabledWorkaround : mParam.forceDisabledWorkarounds) {
+        ASSERT(gTestEnv->GetInstance()->GetToggleInfo(forceDisabledWorkaround) != nullptr);
+    }
+
+    std::vector<const char*> forceEnabledToggles = mParam.forceEnabledWorkarounds;
+    std::vector<const char*> forceDisabledToggles = mParam.forceDisabledWorkarounds;
+
+    std::vector<wgpu::FeatureName> requiredFeatures = GetRequiredFeatures();
+
+    wgpu::SupportedLimits supportedLimits;
+    mBackendAdapter.GetLimits(reinterpret_cast<WGPUSupportedLimits*>(&supportedLimits));
+    wgpu::RequiredLimits requiredLimits = GetRequiredLimits(supportedLimits);
+
+    // Disabled disallowing unsafe APIs so we can test them.
+    forceDisabledToggles.push_back("disallow_unsafe_apis");
+
+    for (const std::string& toggle : gTestEnv->GetEnabledToggles()) {
+        const dawn::native::ToggleInfo* info =
+            gTestEnv->GetInstance()->GetToggleInfo(toggle.c_str());
+        ASSERT(info != nullptr);
+        forceEnabledToggles.push_back(info->name);
+    }
+
+    for (const std::string& toggle : gTestEnv->GetDisabledToggles()) {
+        const dawn::native::ToggleInfo* info =
+            gTestEnv->GetInstance()->GetToggleInfo(toggle.c_str());
+        ASSERT(info != nullptr);
+        forceDisabledToggles.push_back(info->name);
+    }
+
+    wgpu::DeviceDescriptor deviceDescriptor = {};
+    deviceDescriptor.requiredLimits = &requiredLimits;
+    deviceDescriptor.requiredFeatures = requiredFeatures.data();
+    deviceDescriptor.requiredFeaturesCount = requiredFeatures.size();
+
+    wgpu::DawnTogglesDeviceDescriptor togglesDesc = {};
+    deviceDescriptor.nextInChain = &togglesDesc;
+    togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
+    togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
+    togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
+    togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
+
+    std::tie(device, backendDevice) =
+        mWireHelper->RegisterDevice(mBackendAdapter.CreateDevice(&deviceDescriptor));
+    ASSERT_NE(nullptr, backendDevice);
+
+    std::string traceName =
+        std::string(::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name()) +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name();
+    mWireHelper->BeginWireTrace(traceName.c_str());
+
+    queue = device.GetQueue();
+
+    device.SetUncapturedErrorCallback(OnDeviceError, this);
+    device.SetDeviceLostCallback(OnDeviceLost, this);
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+    if (IsOpenGL()) {
+        glfwMakeContextCurrent(gTestEnv->GetOpenGLWindow());
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+    if (IsOpenGLES()) {
+        glfwMakeContextCurrent(gTestEnv->GetOpenGLESWindow());
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+    device.SetLoggingCallback(
+        [](WGPULoggingType type, char const* message, void*) {
+            switch (type) {
+                case WGPULoggingType_Verbose:
+                    dawn::DebugLog() << message;
+                    break;
+                case WGPULoggingType_Warning:
+                    dawn::WarningLog() << message;
+                    break;
+                case WGPULoggingType_Error:
+                    dawn::ErrorLog() << message;
+                    break;
+                default:
+                    dawn::InfoLog() << message;
+                    break;
+            }
+        },
+        nullptr);
+}
+
+void DawnTestBase::TearDown() {
+    FlushWire();
+
+    MapSlotsSynchronously();
+    ResolveExpectations();
+
+    for (size_t i = 0; i < mReadbackSlots.size(); ++i) {
+        mReadbackSlots[i].buffer.Unmap();
+    }
+
+    if (!UsesWire()) {
+        EXPECT_EQ(mLastWarningCount,
+                  dawn::native::GetDeprecationWarningCountForTesting(device.Get()));
+    }
+
+    // The device will be destroyed soon after, so we want to set the expectation.
+    ExpectDeviceDestruction();
+}
+
+void DawnTestBase::StartExpectDeviceError(testing::Matcher<std::string> errorMatcher) {
+    mExpectError = true;
+    mError = false;
+    mErrorMatcher = errorMatcher;
+}
+
+bool DawnTestBase::EndExpectDeviceError() {
+    mExpectError = false;
+    mErrorMatcher = testing::_;
+    return mError;
+}
+
+void DawnTestBase::ExpectDeviceDestruction() {
+    mExpectDestruction = true;
+}
+
+// static
+void DawnTestBase::OnDeviceError(WGPUErrorType type, const char* message, void* userdata) {
+    ASSERT(type != WGPUErrorType_NoError);
+    DawnTestBase* self = static_cast<DawnTestBase*>(userdata);
+
+    ASSERT_TRUE(self->mExpectError) << "Got unexpected device error: " << message;
+    ASSERT_FALSE(self->mError) << "Got two errors in expect block";
+    if (self->mExpectError) {
+        ASSERT_THAT(message, self->mErrorMatcher);
+    }
+    self->mError = true;
+}
+
+void DawnTestBase::OnDeviceLost(WGPUDeviceLostReason reason, const char* message, void* userdata) {
+    DawnTestBase* self = static_cast<DawnTestBase*>(userdata);
+    if (self->mExpectDestruction) {
+        EXPECT_EQ(reason, WGPUDeviceLostReason_Destroyed);
+        return;
+    }
+    // Using ADD_FAILURE + ASSERT instead of FAIL to prevent the current test from continuing with a
+    // corrupt state.
+    ADD_FAILURE() << "Device lost during test: " << message;
+    ASSERT(false);
+}
+
+std::ostringstream& DawnTestBase::AddBufferExpectation(const char* file,
+                                                       int line,
+                                                       const wgpu::Buffer& buffer,
+                                                       uint64_t offset,
+                                                       uint64_t size,
+                                                       detail::Expectation* expectation) {
+    auto readback = ReserveReadback(size);
+
+    // We need to enqueue the copy immediately because by the time we resolve the expectation,
+    // the buffer might have been modified.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToBuffer(buffer, offset, readback.buffer, readback.offset, size);
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    DeferredExpectation deferred;
+    deferred.file = file;
+    deferred.line = line;
+    deferred.readbackSlot = readback.slot;
+    deferred.readbackOffset = readback.offset;
+    deferred.size = size;
+    deferred.rowBytes = size;
+    deferred.bytesPerRow = size;
+    deferred.expectation.reset(expectation);
+
+    mDeferredExpectations.push_back(std::move(deferred));
+    mDeferredExpectations.back().message = std::make_unique<std::ostringstream>();
+    return *(mDeferredExpectations.back().message.get());
+}
+
+std::ostringstream& DawnTestBase::AddTextureExpectationImpl(const char* file,
+                                                            int line,
+                                                            detail::Expectation* expectation,
+                                                            const wgpu::Texture& texture,
+                                                            wgpu::Origin3D origin,
+                                                            wgpu::Extent3D extent,
+                                                            uint32_t level,
+                                                            wgpu::TextureAspect aspect,
+                                                            uint32_t dataSize,
+                                                            uint32_t bytesPerRow) {
+    if (bytesPerRow == 0) {
+        bytesPerRow = Align(extent.width * dataSize, kTextureBytesPerRowAlignment);
+    } else {
+        ASSERT(bytesPerRow >= extent.width * dataSize);
+        ASSERT(bytesPerRow == Align(bytesPerRow, kTextureBytesPerRowAlignment));
+    }
+
+    uint32_t rowsPerImage = extent.height;
+    uint32_t size = utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, extent.width,
+                                               extent.height, extent.depthOrArrayLayers, dataSize);
+
+    auto readback = ReserveReadback(Align(size, 4));
+
+    // We need to enqueue the copy immediately because by the time we resolve the expectation,
+    // the texture might have been modified.
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, level, origin, aspect);
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(readback.buffer, readback.offset, bytesPerRow, rowsPerImage);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &extent);
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    DeferredExpectation deferred;
+    deferred.file = file;
+    deferred.line = line;
+    deferred.readbackSlot = readback.slot;
+    deferred.readbackOffset = readback.offset;
+    deferred.size = size;
+    deferred.rowBytes = extent.width * dataSize;
+    deferred.bytesPerRow = bytesPerRow;
+    deferred.expectation.reset(expectation);
+
+    mDeferredExpectations.push_back(std::move(deferred));
+    mDeferredExpectations.back().message = std::make_unique<std::ostringstream>();
+    return *(mDeferredExpectations.back().message.get());
+}
+
+std::ostringstream& DawnTestBase::ExpectSampledFloatDataImpl(wgpu::TextureView textureView,
+                                                             const char* wgslTextureType,
+                                                             uint32_t width,
+                                                             uint32_t height,
+                                                             uint32_t componentCount,
+                                                             uint32_t sampleCount,
+                                                             detail::Expectation* expectation) {
+    std::ostringstream shaderSource;
+    shaderSource << "let width : u32 = " << width << "u;\n";
+    shaderSource << "@group(0) @binding(0) var tex : " << wgslTextureType << ";\n";
+    shaderSource << R"(
+        struct Result {
+            values : array<f32>
+        }
+        @group(0) @binding(1) var<storage, read_write> result : Result;
+    )";
+    shaderSource << "let componentCount : u32 = " << componentCount << "u;\n";
+    shaderSource << "let sampleCount : u32 = " << sampleCount << "u;\n";
+
+    shaderSource << "fn doTextureLoad(t: " << wgslTextureType
+                 << ", coord: vec2<i32>, sample: u32, component: u32) -> f32";
+    if (sampleCount > 1) {
+        shaderSource << R"({
+            return textureLoad(tex, coord, i32(sample))[component];
+        })";
+    } else {
+        if (strcmp(wgslTextureType, "texture_depth_2d") == 0) {
+            ASSERT(componentCount == 1);
+            shaderSource << R"({
+                return textureLoad(tex, coord, 0);
+            })";
+        } else {
+            shaderSource << R"({
+                return textureLoad(tex, coord, 0)[component];
+            })";
+        }
+    }
+    shaderSource << R"(
+        @stage(compute) @workgroup_size(1) fn main(
+            @builtin(global_invocation_id) GlobalInvocationId : vec3<u32>
+        ) {
+            let baseOutIndex = GlobalInvocationId.y * width + GlobalInvocationId.x;
+            for (var s = 0u; s < sampleCount; s = s + 1u) {
+                for (var c = 0u; c < componentCount; c = c + 1u) {
+                    result.values[
+                        baseOutIndex * sampleCount * componentCount +
+                        s * componentCount +
+                        c
+                    ] = doTextureLoad(tex, vec2<i32>(GlobalInvocationId.xy), s, c);
+                }
+            }
+        }
+    )";
+
+    wgpu::ShaderModule csModule = utils::CreateShaderModule(device, shaderSource.str().c_str());
+
+    wgpu::ComputePipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.compute.module = csModule;
+    pipelineDescriptor.compute.entryPoint = "main";
+
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDescriptor);
+
+    // Create and initialize the slot buffer so that it won't unexpectedly affect the count of
+    // resources lazily cleared.
+    const std::vector<float> initialBufferData(width * height * componentCount * sampleCount, 0.f);
+    wgpu::Buffer readbackBuffer = utils::CreateBufferFromData(
+        device, initialBufferData.data(), sizeof(float) * initialBufferData.size(),
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Storage);
+
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {{0, textureView}, {1, readbackBuffer}});
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = commandEncoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Dispatch(width, height);
+    pass.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    return EXPECT_BUFFER(readbackBuffer, 0, initialBufferData.size() * sizeof(float), expectation);
+}
+
+std::ostringstream& DawnTestBase::ExpectSampledFloatData(wgpu::Texture texture,
+                                                         uint32_t width,
+                                                         uint32_t height,
+                                                         uint32_t componentCount,
+                                                         uint32_t arrayLayer,
+                                                         uint32_t mipLevel,
+                                                         detail::Expectation* expectation) {
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+    viewDesc.baseMipLevel = mipLevel;
+    viewDesc.mipLevelCount = 1;
+    viewDesc.baseArrayLayer = arrayLayer;
+    viewDesc.arrayLayerCount = 1;
+
+    return ExpectSampledFloatDataImpl(texture.CreateView(&viewDesc), "texture_2d<f32>", width,
+                                      height, componentCount, 1, expectation);
+}
+
+std::ostringstream& DawnTestBase::ExpectMultisampledFloatData(wgpu::Texture texture,
+                                                              uint32_t width,
+                                                              uint32_t height,
+                                                              uint32_t componentCount,
+                                                              uint32_t sampleCount,
+                                                              uint32_t arrayLayer,
+                                                              uint32_t mipLevel,
+                                                              detail::Expectation* expectation) {
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+    viewDesc.baseMipLevel = mipLevel;
+    viewDesc.mipLevelCount = 1;
+    viewDesc.baseArrayLayer = arrayLayer;
+    viewDesc.arrayLayerCount = 1;
+
+    return ExpectSampledFloatDataImpl(texture.CreateView(&viewDesc), "texture_multisampled_2d<f32>",
+                                      width, height, componentCount, sampleCount, expectation);
+}
+
+std::ostringstream& DawnTestBase::ExpectSampledDepthData(wgpu::Texture texture,
+                                                         uint32_t width,
+                                                         uint32_t height,
+                                                         uint32_t arrayLayer,
+                                                         uint32_t mipLevel,
+                                                         detail::Expectation* expectation) {
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+    viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+    viewDesc.baseMipLevel = mipLevel;
+    viewDesc.mipLevelCount = 1;
+    viewDesc.baseArrayLayer = arrayLayer;
+    viewDesc.arrayLayerCount = 1;
+
+    return ExpectSampledFloatDataImpl(texture.CreateView(&viewDesc), "texture_depth_2d", width,
+                                      height, 1, 1, expectation);
+}
+
+std::ostringstream& DawnTestBase::ExpectAttachmentDepthStencilTestData(
+    wgpu::Texture texture,
+    wgpu::TextureFormat format,
+    uint32_t width,
+    uint32_t height,
+    uint32_t arrayLayer,
+    uint32_t mipLevel,
+    std::vector<float> expectedDepth,
+    uint8_t* expectedStencil) {
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+    // Make the color attachment that we'll use to read back.
+    wgpu::TextureDescriptor colorTexDesc = {};
+    colorTexDesc.size = {width, height, 1};
+    colorTexDesc.format = wgpu::TextureFormat::R32Uint;
+    colorTexDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture colorTexture = device.CreateTexture(&colorTexDesc);
+
+    wgpu::Texture depthDataTexture = nullptr;
+    if (expectedDepth.size() > 0) {
+        // Make a sampleable texture to store the depth data. We'll sample this in the
+        // shader to output depth.
+        wgpu::TextureDescriptor depthDataDesc = {};
+        depthDataDesc.size = {width, height, 1};
+        depthDataDesc.format = wgpu::TextureFormat::R32Float;
+        depthDataDesc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst;
+        depthDataTexture = device.CreateTexture(&depthDataDesc);
+
+        // Upload the depth data.
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(depthDataTexture, 0, {0, 0, 0});
+        wgpu::TextureDataLayout textureDataLayout =
+            utils::CreateTextureDataLayout(0, sizeof(float) * width);
+        wgpu::Extent3D copyExtent = {width, height, 1};
+
+        queue.WriteTexture(&imageCopyTexture, expectedDepth.data(),
+                           sizeof(float) * expectedDepth.size(), &textureDataLayout, &copyExtent);
+    }
+
+    // Pipeline for a full screen quad.
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+    pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>(-1.0, -1.0),
+                vec2<f32>( 3.0, -1.0),
+                vec2<f32>(-1.0,  3.0));
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        })");
+
+    if (depthDataTexture) {
+        // Sample the input texture and write out depth. |result| will only be set to 1 if we
+        // pass the depth test.
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var texture0 : texture_2d<f32>;
+
+            struct FragmentOut {
+                @location(0) result : u32,
+                @builtin(frag_depth) fragDepth : f32,
+            }
+
+            @stage(fragment)
+            fn main(@builtin(position) FragCoord : vec4<f32>) -> FragmentOut {
+                var output : FragmentOut;
+                output.result = 1u;
+                output.fragDepth = textureLoad(texture0, vec2<i32>(FragCoord.xy), 0)[0];
+                return output;
+            })");
+    } else {
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @stage(fragment)
+            fn main() -> @location(0) u32 {
+                return 1u;
+            })");
+    }
+
+    wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil(format);
+    if (depthDataTexture) {
+        // Pass the depth test only if the depth is equal.
+        depthStencil->depthCompare = wgpu::CompareFunction::Equal;
+    }
+
+    if (expectedStencil != nullptr) {
+        // Pass the stencil test only if the stencil is equal.
+        depthStencil->stencilFront.compare = wgpu::CompareFunction::Equal;
+    }
+
+    pipelineDescriptor.cTargets[0].format = colorTexDesc.format;
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.baseMipLevel = mipLevel;
+    viewDesc.mipLevelCount = 1;
+    viewDesc.baseArrayLayer = arrayLayer;
+    viewDesc.arrayLayerCount = 1;
+
+    utils::ComboRenderPassDescriptor passDescriptor({colorTexture.CreateView()},
+                                                    texture.CreateView(&viewDesc));
+    passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+    passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+    switch (format) {
+        case wgpu::TextureFormat::Depth24Plus:
+        case wgpu::TextureFormat::Depth32Float:
+        case wgpu::TextureFormat::Depth16Unorm:
+            passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            break;
+        case wgpu::TextureFormat::Stencil8:
+            passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            break;
+        default:
+            break;
+    }
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+    if (expectedStencil != nullptr) {
+        pass.SetStencilReference(*expectedStencil);
+    }
+    pass.SetPipeline(pipeline);
+    if (depthDataTexture) {
+        // Bind the depth data texture.
+        pass.SetBindGroup(0, utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, depthDataTexture.CreateView()}}));
+    }
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> colorData(width * height, 1u);
+    return EXPECT_TEXTURE_EQ(colorData.data(), colorTexture, {0, 0}, {width, height});
+}
+
+void DawnTestBase::WaitABit() {
+    device.Tick();
+    FlushWire();
+
+    utils::USleep(100);
+}
+
+void DawnTestBase::FlushWire() {
+    if (gTestEnv->UsesWire()) {
+        bool C2SFlushed = mWireHelper->FlushClient();
+        bool S2CFlushed = mWireHelper->FlushServer();
+        ASSERT(C2SFlushed);
+        ASSERT(S2CFlushed);
+    }
+}
+
+void DawnTestBase::WaitForAllOperations() {
+    bool done = false;
+    device.GetQueue().OnSubmittedWorkDone(
+        0u, [](WGPUQueueWorkDoneStatus, void* userdata) { *static_cast<bool*>(userdata) = true; },
+        &done);
+    while (!done) {
+        WaitABit();
+    }
+}
+
+DawnTestBase::ReadbackReservation DawnTestBase::ReserveReadback(uint64_t readbackSize) {
+    ReadbackSlot slot;
+    slot.bufferSize = readbackSize;
+
+    // Create and initialize the slot buffer so that it won't unexpectedly affect the count of
+    // resource lazy clear in the tests.
+    const std::vector<uint8_t> initialBufferData(readbackSize, 0u);
+    slot.buffer =
+        utils::CreateBufferFromData(device, initialBufferData.data(), readbackSize,
+                                    wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst);
+
+    ReadbackReservation reservation;
+    reservation.buffer = slot.buffer;
+    reservation.slot = mReadbackSlots.size();
+    reservation.offset = 0;
+
+    mReadbackSlots.push_back(std::move(slot));
+    return reservation;
+}
+
+void DawnTestBase::MapSlotsSynchronously() {
+    // Initialize numPendingMapOperations before mapping, just in case the callback is called
+    // immediately.
+    mNumPendingMapOperations = mReadbackSlots.size();
+
+    // Map all readback slots
+    for (size_t i = 0; i < mReadbackSlots.size(); ++i) {
+        MapReadUserdata* userdata = new MapReadUserdata{this, i};
+
+        const ReadbackSlot& slot = mReadbackSlots[i];
+        slot.buffer.MapAsync(wgpu::MapMode::Read, 0, wgpu::kWholeMapSize, SlotMapCallback,
+                             userdata);
+    }
+
+    // Busy wait until all map operations are done.
+    while (mNumPendingMapOperations != 0) {
+        WaitABit();
+    }
+}
+
+// static
+void DawnTestBase::SlotMapCallback(WGPUBufferMapAsyncStatus status, void* userdata_) {
+    DAWN_ASSERT(status == WGPUBufferMapAsyncStatus_Success);
+
+    std::unique_ptr<MapReadUserdata> userdata(static_cast<MapReadUserdata*>(userdata_));
+    DawnTestBase* test = userdata->test;
+    ReadbackSlot* slot = &test->mReadbackSlots[userdata->slot];
+
+    slot->mappedData = slot->buffer.GetConstMappedRange();
+    test->mNumPendingMapOperations--;
+}
+
+void DawnTestBase::ResolveExpectations() {
+    for (const auto& expectation : mDeferredExpectations) {
+        DAWN_ASSERT(mReadbackSlots[expectation.readbackSlot].mappedData != nullptr);
+
+        // Get a pointer to the mapped copy of the data for the expectation.
+        const char* data =
+            static_cast<const char*>(mReadbackSlots[expectation.readbackSlot].mappedData);
+        data += expectation.readbackOffset;
+
+        uint32_t size;
+        std::vector<char> packedData;
+        if (expectation.rowBytes != expectation.bytesPerRow) {
+            DAWN_ASSERT(expectation.bytesPerRow > expectation.rowBytes);
+            uint32_t rowCount =
+                (expectation.size + expectation.bytesPerRow - 1) / expectation.bytesPerRow;
+            uint32_t packedSize = rowCount * expectation.rowBytes;
+            packedData.resize(packedSize);
+            for (uint32_t r = 0; r < rowCount; ++r) {
+                for (uint32_t i = 0; i < expectation.rowBytes; ++i) {
+                    packedData[i + r * expectation.rowBytes] =
+                        data[i + r * expectation.bytesPerRow];
+                }
+            }
+            data = packedData.data();
+            size = packedSize;
+        } else {
+            size = expectation.size;
+        }
+
+        // Get the result for the expectation and add context to failures
+        testing::AssertionResult result = expectation.expectation->Check(data, size);
+        if (!result) {
+            result << " Expectation created at " << expectation.file << ":" << expectation.line
+                   << std::endl;
+            result << expectation.message->str();
+        }
+
+        EXPECT_TRUE(result);
+    }
+}
+
+std::unique_ptr<dawn::platform::Platform> DawnTestBase::CreateTestPlatform() {
+    return nullptr;
+}
+
+bool RGBA8::operator==(const RGBA8& other) const {
+    return r == other.r && g == other.g && b == other.b && a == other.a;
+}
+
+bool RGBA8::operator!=(const RGBA8& other) const {
+    return !(*this == other);
+}
+
+bool RGBA8::operator<=(const RGBA8& other) const {
+    return (r <= other.r && g <= other.g && b <= other.b && a <= other.a);
+}
+
+bool RGBA8::operator>=(const RGBA8& other) const {
+    return (r >= other.r && g >= other.g && b >= other.b && a >= other.a);
+}
+
+std::ostream& operator<<(std::ostream& stream, const RGBA8& color) {
+    return stream << "RGBA8(" << static_cast<int>(color.r) << ", " << static_cast<int>(color.g)
+                  << ", " << static_cast<int>(color.b) << ", " << static_cast<int>(color.a) << ")";
+}
+
+namespace detail {
+    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+        const BackendTestConfig* params,
+        size_t numParams) {
+        ASSERT(gTestEnv != nullptr);
+        return gTestEnv->GetAvailableAdapterTestParamsForBackends(params, numParams);
+    }
+
+    // Helper classes to set expectations
+
+    template <typename T, typename U>
+    ExpectEq<T, U>::ExpectEq(T singleValue, T tolerance) : mTolerance(tolerance) {
+        mExpected.push_back(singleValue);
+    }
+
+    template <typename T, typename U>
+    ExpectEq<T, U>::ExpectEq(const T* values, const unsigned int count, T tolerance)
+        : mTolerance(tolerance) {
+        mExpected.assign(values, values + count);
+    }
+
+    namespace {
+
+        template <typename T, typename U = T>
+        testing::AssertionResult CheckImpl(const T& expected, const U& actual, const T& tolerance) {
+            ASSERT(tolerance == T{});
+            if (expected != actual) {
+                return testing::AssertionFailure() << expected << ", actual " << actual;
+            }
+            return testing::AssertionSuccess();
+        }
+
+        template <>
+        testing::AssertionResult CheckImpl<float>(const float& expected,
+                                                  const float& actual,
+                                                  const float& tolerance) {
+            if (abs(expected - actual) > tolerance) {
+                return tolerance == 0.0
+                           ? testing::AssertionFailure() << expected << ", actual " << actual
+                           : testing::AssertionFailure() << "within " << tolerance << " of "
+                                                         << expected << ", actual " << actual;
+            }
+            return testing::AssertionSuccess();
+        }
+
+        // Interpret uint16_t as float16
+        // This is mostly for reading float16 output from textures
+        template <>
+        testing::AssertionResult CheckImpl<float, uint16_t>(const float& expected,
+                                                            const uint16_t& actual,
+                                                            const float& tolerance) {
+            float actualF32 = Float16ToFloat32(actual);
+            if (abs(expected - actualF32) > tolerance) {
+                return tolerance == 0.0
+                           ? testing::AssertionFailure() << expected << ", actual " << actualF32
+                           : testing::AssertionFailure() << "within " << tolerance << " of "
+                                                         << expected << ", actual " << actualF32;
+            }
+            return testing::AssertionSuccess();
+        }
+
+    }  // namespace
+
+    template <typename T, typename U>
+    testing::AssertionResult ExpectEq<T, U>::Check(const void* data, size_t size) {
+        DAWN_ASSERT(size == sizeof(U) * mExpected.size());
+        const U* actual = static_cast<const U*>(data);
+
+        for (size_t i = 0; i < mExpected.size(); ++i) {
+            testing::AssertionResult check = CheckImpl(mExpected[i], actual[i], mTolerance);
+            if (!check) {
+                testing::AssertionResult result = testing::AssertionFailure()
+                                                  << "Expected data[" << i << "] to be "
+                                                  << check.message() << std::endl;
+
+                if (mExpected.size() <= 1024) {
+                    result << "Expected:" << std::endl;
+                    printBuffer(result, mExpected.data(), mExpected.size());
+
+                    result << "Actual:" << std::endl;
+                    printBuffer(result, actual, mExpected.size());
+                }
+
+                return result;
+            }
+        }
+        return testing::AssertionSuccess();
+    }
+
+    template class ExpectEq<uint8_t>;
+    template class ExpectEq<uint16_t>;
+    template class ExpectEq<uint32_t>;
+    template class ExpectEq<uint64_t>;
+    template class ExpectEq<RGBA8>;
+    template class ExpectEq<float>;
+    template class ExpectEq<float, uint16_t>;
+
+    template <typename T>
+    ExpectBetweenColors<T>::ExpectBetweenColors(T value0, T value1) {
+        T l, h;
+        l.r = std::min(value0.r, value1.r);
+        l.g = std::min(value0.g, value1.g);
+        l.b = std::min(value0.b, value1.b);
+        l.a = std::min(value0.a, value1.a);
+
+        h.r = std::max(value0.r, value1.r);
+        h.g = std::max(value0.g, value1.g);
+        h.b = std::max(value0.b, value1.b);
+        h.a = std::max(value0.a, value1.a);
+
+        mLowerColorChannels.push_back(l);
+        mHigherColorChannels.push_back(h);
+
+        mValues0.push_back(value0);
+        mValues1.push_back(value1);
+    }
+
+    template <typename T>
+    testing::AssertionResult ExpectBetweenColors<T>::Check(const void* data, size_t size) {
+        DAWN_ASSERT(size == sizeof(T) * mLowerColorChannels.size());
+        DAWN_ASSERT(mHigherColorChannels.size() == mLowerColorChannels.size());
+        DAWN_ASSERT(mValues0.size() == mValues1.size());
+        DAWN_ASSERT(mValues0.size() == mLowerColorChannels.size());
+
+        const T* actual = static_cast<const T*>(data);
+
+        for (size_t i = 0; i < mLowerColorChannels.size(); ++i) {
+            if (!(actual[i] >= mLowerColorChannels[i] && actual[i] <= mHigherColorChannels[i])) {
+                testing::AssertionResult result = testing::AssertionFailure()
+                                                  << "Expected data[" << i << "] to be between "
+                                                  << mValues0[i] << " and " << mValues1[i]
+                                                  << ", actual " << actual[i] << std::endl;
+
+                if (mLowerColorChannels.size() <= 1024) {
+                    result << "Expected between:" << std::endl;
+                    printBuffer(result, mValues0.data(), mLowerColorChannels.size());
+                    result << "and" << std::endl;
+                    printBuffer(result, mValues1.data(), mLowerColorChannels.size());
+
+                    result << "Actual:" << std::endl;
+                    printBuffer(result, actual, mLowerColorChannels.size());
+                }
+
+                return result;
+            }
+        }
+
+        return testing::AssertionSuccess();
+    }
+
+    template class ExpectBetweenColors<RGBA8>;
+}  // namespace detail
diff --git a/src/dawn/tests/DawnTest.h b/src/dawn/tests/DawnTest.h
new file mode 100644
index 0000000..877b2fc
--- /dev/null
+++ b/src/dawn/tests/DawnTest.h
@@ -0,0 +1,798 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_DAWNTEST_H_
+#define TESTS_DAWNTEST_H_
+
+#include "dawn/common/Log.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/Preprocessor.h"
+#include "dawn/dawn_proc_table.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/tests/ParamGenerator.h"
+#include "dawn/tests/ToggleParser.h"
+#include "dawn/utils/ScopedAutoreleasePool.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/webgpu_cpp.h"
+#include "dawn/webgpu_cpp_print.h"
+
+#include <dawn/platform/DawnPlatform.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+// Getting data back from Dawn is done in an async manners so all expectations are "deferred"
+// until the end of the test. Also expectations use a copy to a MapRead buffer to get the data
+// so resources should have the CopySrc allowed usage bit if you want to add expectations on
+// them.
+
+// AddBufferExpectation is defined in DawnTestBase as protected function. This ensures the macro can
+// only be used in derivd class of DawnTestBase. Use "this" pointer to ensure the macro works with
+// CRTP.
+#define EXPECT_BUFFER(buffer, offset, size, expectation) \
+    this->AddBufferExpectation(__FILE__, __LINE__, buffer, offset, size, expectation)
+
+#define EXPECT_BUFFER_U8_EQ(expected, buffer, offset) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint8_t), new ::detail::ExpectEq<uint8_t>(expected))
+
+#define EXPECT_BUFFER_U8_RANGE_EQ(expected, buffer, offset, count) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint8_t) * (count),       \
+                  new ::detail::ExpectEq<uint8_t>(expected, count))
+
+#define EXPECT_BUFFER_U16_EQ(expected, buffer, offset) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint16_t), new ::detail::ExpectEq<uint16_t>(expected))
+
+#define EXPECT_BUFFER_U16_RANGE_EQ(expected, buffer, offset, count) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint16_t) * (count),       \
+                  new ::detail::ExpectEq<uint16_t>(expected, count))
+
+#define EXPECT_BUFFER_U32_EQ(expected, buffer, offset) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint32_t), new ::detail::ExpectEq<uint32_t>(expected))
+
+#define EXPECT_BUFFER_U32_RANGE_EQ(expected, buffer, offset, count) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint32_t) * (count),       \
+                  new ::detail::ExpectEq<uint32_t>(expected, count))
+
+#define EXPECT_BUFFER_U64_EQ(expected, buffer, offset) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint64_t), new ::detail::ExpectEq<uint64_t>(expected))
+
+#define EXPECT_BUFFER_U64_RANGE_EQ(expected, buffer, offset, count) \
+    EXPECT_BUFFER(buffer, offset, sizeof(uint64_t) * (count),       \
+                  new ::detail::ExpectEq<uint64_t>(expected, count))
+
+#define EXPECT_BUFFER_FLOAT_EQ(expected, buffer, offset) \
+    EXPECT_BUFFER(buffer, offset, sizeof(float), new ::detail::ExpectEq<float>(expected))
+
+#define EXPECT_BUFFER_FLOAT_RANGE_EQ(expected, buffer, offset, count) \
+    EXPECT_BUFFER(buffer, offset, sizeof(float) * (count),            \
+                  new ::detail::ExpectEq<float>(expected, count))
+
+// Test a pixel of the mip level 0 of a 2D texture.
+#define EXPECT_PIXEL_RGBA8_EQ(expected, texture, x, y) \
+    AddTextureExpectation(__FILE__, __LINE__, expected, texture, {x, y})
+
+#define EXPECT_PIXEL_FLOAT_EQ(expected, texture, x, y) \
+    AddTextureExpectation(__FILE__, __LINE__, expected, texture, {x, y})
+
+#define EXPECT_PIXEL_FLOAT16_EQ(expected, texture, x, y) \
+    AddTextureExpectation<float, uint16_t>(__FILE__, __LINE__, expected, texture, {x, y})
+
+#define EXPECT_PIXEL_RGBA8_BETWEEN(color0, color1, texture, x, y) \
+    AddTextureBetweenColorsExpectation(__FILE__, __LINE__, color0, color1, texture, x, y)
+
+#define EXPECT_TEXTURE_EQ(...) AddTextureExpectation(__FILE__, __LINE__, __VA_ARGS__)
+
+#define EXPECT_TEXTURE_FLOAT16_EQ(...) \
+    AddTextureExpectation<float, uint16_t>(__FILE__, __LINE__, __VA_ARGS__)
+
+#define ASSERT_DEVICE_ERROR_MSG(statement, matcher)             \
+    StartExpectDeviceError(matcher);                            \
+    statement;                                                  \
+    FlushWire();                                                \
+    if (!EndExpectDeviceError()) {                              \
+        FAIL() << "Expected device error in:\n " << #statement; \
+    }                                                           \
+    do {                                                        \
+    } while (0)
+
+#define ASSERT_DEVICE_ERROR(statement) ASSERT_DEVICE_ERROR_MSG(statement, testing::_)
+
+struct RGBA8 {
+    constexpr RGBA8() : RGBA8(0, 0, 0, 0) {
+    }
+    constexpr RGBA8(uint8_t r, uint8_t g, uint8_t b, uint8_t a) : r(r), g(g), b(b), a(a) {
+    }
+    bool operator==(const RGBA8& other) const;
+    bool operator!=(const RGBA8& other) const;
+    bool operator<=(const RGBA8& other) const;
+    bool operator>=(const RGBA8& other) const;
+
+    uint8_t r, g, b, a;
+
+    static const RGBA8 kZero;
+    static const RGBA8 kBlack;
+    static const RGBA8 kRed;
+    static const RGBA8 kGreen;
+    static const RGBA8 kBlue;
+    static const RGBA8 kYellow;
+    static const RGBA8 kWhite;
+};
+std::ostream& operator<<(std::ostream& stream, const RGBA8& color);
+
+struct BackendTestConfig {
+    BackendTestConfig(wgpu::BackendType backendType,
+                      std::initializer_list<const char*> forceEnabledWorkarounds = {},
+                      std::initializer_list<const char*> forceDisabledWorkarounds = {});
+
+    wgpu::BackendType backendType;
+
+    std::vector<const char*> forceEnabledWorkarounds;
+    std::vector<const char*> forceDisabledWorkarounds;
+};
+
+struct TestAdapterProperties : wgpu::AdapterProperties {
+    TestAdapterProperties(const wgpu::AdapterProperties& properties, bool selected);
+    std::string adapterName;
+    bool selected;
+
+  private:
+    // This may be temporary, so it is copied into |adapterName| and made private.
+    using wgpu::AdapterProperties::name;
+};
+
+struct AdapterTestParam {
+    AdapterTestParam(const BackendTestConfig& config,
+                     const TestAdapterProperties& adapterProperties);
+
+    TestAdapterProperties adapterProperties;
+    std::vector<const char*> forceEnabledWorkarounds;
+    std::vector<const char*> forceDisabledWorkarounds;
+};
+
+std::ostream& operator<<(std::ostream& os, const AdapterTestParam& param);
+
+BackendTestConfig D3D12Backend(std::initializer_list<const char*> forceEnabledWorkarounds = {},
+                               std::initializer_list<const char*> forceDisabledWorkarounds = {});
+
+BackendTestConfig MetalBackend(std::initializer_list<const char*> forceEnabledWorkarounds = {},
+                               std::initializer_list<const char*> forceDisabledWorkarounds = {});
+
+BackendTestConfig NullBackend(std::initializer_list<const char*> forceEnabledWorkarounds = {},
+                              std::initializer_list<const char*> forceDisabledWorkarounds = {});
+
+BackendTestConfig OpenGLBackend(std::initializer_list<const char*> forceEnabledWorkarounds = {},
+                                std::initializer_list<const char*> forceDisabledWorkarounds = {});
+
+BackendTestConfig OpenGLESBackend(std::initializer_list<const char*> forceEnabledWorkarounds = {},
+                                  std::initializer_list<const char*> forceDisabledWorkarounds = {});
+
+BackendTestConfig VulkanBackend(std::initializer_list<const char*> forceEnabledWorkarounds = {},
+                                std::initializer_list<const char*> forceDisabledWorkarounds = {});
+
+struct GLFWwindow;
+
+namespace utils {
+    class PlatformDebugLogger;
+    class TerribleCommandBuffer;
+    class WireHelper;
+}  // namespace utils
+
+namespace detail {
+    class Expectation;
+    class CustomTextureExpectation;
+
+    template <typename T, typename U = T>
+    class ExpectEq;
+    template <typename T>
+    class ExpectBetweenColors;
+}  // namespace detail
+
+namespace dawn::wire {
+    class CommandHandler;
+    class WireClient;
+    class WireServer;
+}  // namespace dawn::wire
+
+void InitDawnEnd2EndTestEnvironment(int argc, char** argv);
+
+class DawnTestEnvironment : public testing::Environment {
+  public:
+    DawnTestEnvironment(int argc, char** argv);
+    ~DawnTestEnvironment() override;
+
+    static void SetEnvironment(DawnTestEnvironment* env);
+
+    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+        const BackendTestConfig* params,
+        size_t numParams);
+
+    void SetUp() override;
+    void TearDown() override;
+
+    bool UsesWire() const;
+    dawn::native::BackendValidationLevel GetBackendValidationLevel() const;
+    dawn::native::Instance* GetInstance() const;
+    bool HasVendorIdFilter() const;
+    uint32_t GetVendorIdFilter() const;
+    bool HasBackendTypeFilter() const;
+    wgpu::BackendType GetBackendTypeFilter() const;
+    const char* GetWireTraceDir() const;
+    GLFWwindow* GetOpenGLWindow() const;
+    GLFWwindow* GetOpenGLESWindow() const;
+
+    const std::vector<std::string>& GetEnabledToggles() const;
+    const std::vector<std::string>& GetDisabledToggles() const;
+
+    bool RunSuppressedTests() const;
+
+  protected:
+    std::unique_ptr<dawn::native::Instance> mInstance;
+
+  private:
+    void ParseArgs(int argc, char** argv);
+    std::unique_ptr<dawn::native::Instance> CreateInstanceAndDiscoverAdapters();
+    void SelectPreferredAdapterProperties(const dawn::native::Instance* instance);
+    void PrintTestConfigurationAndAdapterInfo(dawn::native::Instance* instance) const;
+
+    bool mUseWire = false;
+    dawn::native::BackendValidationLevel mBackendValidationLevel =
+        dawn::native::BackendValidationLevel::Disabled;
+    bool mBeginCaptureOnStartup = false;
+    bool mHasVendorIdFilter = false;
+    uint32_t mVendorIdFilter = 0;
+    bool mHasBackendTypeFilter = false;
+    wgpu::BackendType mBackendTypeFilter;
+    std::string mWireTraceDir;
+    bool mRunSuppressedTests = false;
+
+    ToggleParser mToggleParser;
+
+    std::vector<wgpu::AdapterType> mDevicePreferences;
+    std::vector<TestAdapterProperties> mAdapterProperties;
+
+    std::unique_ptr<utils::PlatformDebugLogger> mPlatformDebugLogger;
+    GLFWwindow* mOpenGLWindow;
+    GLFWwindow* mOpenGLESWindow;
+};
+
+class DawnTestBase {
+    friend class DawnPerfTestBase;
+
+  public:
+    DawnTestBase(const AdapterTestParam& param);
+    virtual ~DawnTestBase();
+
+    void SetUp();
+    void TearDown();
+
+    bool IsD3D12() const;
+    bool IsMetal() const;
+    bool IsNull() const;
+    bool IsOpenGL() const;
+    bool IsOpenGLES() const;
+    bool IsVulkan() const;
+
+    bool IsAMD() const;
+    bool IsARM() const;
+    bool IsImgTec() const;
+    bool IsIntel() const;
+    bool IsNvidia() const;
+    bool IsQualcomm() const;
+    bool IsSwiftshader() const;
+    bool IsANGLE() const;
+    bool IsWARP() const;
+
+    bool IsWindows() const;
+    bool IsLinux() const;
+    bool IsMacOS(int32_t majorVersion = -1, int32_t minorVersion = -1) const;
+
+    bool UsesWire() const;
+    bool IsBackendValidationEnabled() const;
+    bool RunSuppressedTests() const;
+
+    bool IsDXC() const;
+
+    bool IsAsan() const;
+
+    bool HasToggleEnabled(const char* workaround) const;
+
+    void StartExpectDeviceError(testing::Matcher<std::string> errorMatcher = testing::_);
+    bool EndExpectDeviceError();
+
+    void ExpectDeviceDestruction();
+
+    bool HasVendorIdFilter() const;
+    uint32_t GetVendorIdFilter() const;
+
+    bool HasBackendTypeFilter() const;
+    wgpu::BackendType GetBackendTypeFilter() const;
+
+    wgpu::Instance GetInstance() const;
+    dawn::native::Adapter GetAdapter() const;
+
+    virtual std::unique_ptr<dawn::platform::Platform> CreateTestPlatform();
+
+    struct PrintToStringParamName {
+        PrintToStringParamName(const char* test);
+        std::string SanitizeParamName(std::string paramName, size_t index) const;
+
+        template <class ParamType>
+        std::string operator()(const ::testing::TestParamInfo<ParamType>& info) const {
+            return SanitizeParamName(::testing::PrintToStringParamName()(info), info.index);
+        }
+
+        std::string mTest;
+    };
+
+  protected:
+    wgpu::Device device;
+    wgpu::Queue queue;
+
+    DawnProcTable backendProcs = {};
+    WGPUDevice backendDevice = nullptr;
+
+    size_t mLastWarningCount = 0;
+
+    // Helper methods to implement the EXPECT_ macros
+    std::ostringstream& AddBufferExpectation(const char* file,
+                                             int line,
+                                             const wgpu::Buffer& buffer,
+                                             uint64_t offset,
+                                             uint64_t size,
+                                             detail::Expectation* expectation);
+
+    // T - expected value Type
+    // U - actual value Type (defaults = T)
+    template <typename T, typename U = T>
+    std::ostringstream& AddTextureExpectation(const char* file,
+                                              int line,
+                                              const T* expectedData,
+                                              const wgpu::Texture& texture,
+                                              wgpu::Origin3D origin,
+                                              wgpu::Extent3D extent,
+                                              wgpu::TextureFormat format,
+                                              T tolerance = 0,
+                                              uint32_t level = 0,
+                                              wgpu::TextureAspect aspect = wgpu::TextureAspect::All,
+                                              uint32_t bytesPerRow = 0) {
+        uint32_t texelBlockSize = utils::GetTexelBlockSizeInBytes(format);
+        uint32_t texelComponentCount = utils::GetWGSLRenderableColorTextureComponentCount(format);
+
+        return AddTextureExpectationImpl(
+            file, line,
+            new detail::ExpectEq<T, U>(
+                expectedData,
+                texelComponentCount * extent.width * extent.height * extent.depthOrArrayLayers,
+                tolerance),
+            texture, origin, extent, level, aspect, texelBlockSize, bytesPerRow);
+    }
+
+    template <typename T, typename U = T>
+    std::ostringstream& AddTextureExpectation(const char* file,
+                                              int line,
+                                              const T* expectedData,
+                                              const wgpu::Texture& texture,
+                                              wgpu::Origin3D origin,
+                                              wgpu::Extent3D extent,
+                                              uint32_t level = 0,
+                                              wgpu::TextureAspect aspect = wgpu::TextureAspect::All,
+                                              uint32_t bytesPerRow = 0) {
+        return AddTextureExpectationImpl(
+            file, line,
+            new detail::ExpectEq<T, U>(expectedData,
+                                       extent.width * extent.height * extent.depthOrArrayLayers),
+            texture, origin, extent, level, aspect, sizeof(U), bytesPerRow);
+    }
+
+    template <typename T, typename U = T>
+    std::ostringstream& AddTextureExpectation(const char* file,
+                                              int line,
+                                              const T& expectedData,
+                                              const wgpu::Texture& texture,
+                                              wgpu::Origin3D origin,
+                                              uint32_t level = 0,
+                                              wgpu::TextureAspect aspect = wgpu::TextureAspect::All,
+                                              uint32_t bytesPerRow = 0) {
+        return AddTextureExpectationImpl(file, line, new detail::ExpectEq<T, U>(expectedData),
+                                         texture, origin, {1, 1}, level, aspect, sizeof(U),
+                                         bytesPerRow);
+    }
+
+    template <typename E,
+              typename = typename std::enable_if<
+                  std::is_base_of<detail::CustomTextureExpectation, E>::value>::type>
+    std::ostringstream& AddTextureExpectation(const char* file,
+                                              int line,
+                                              E* expectation,
+                                              const wgpu::Texture& texture,
+                                              wgpu::Origin3D origin,
+                                              wgpu::Extent3D extent,
+                                              uint32_t level = 0,
+                                              wgpu::TextureAspect aspect = wgpu::TextureAspect::All,
+                                              uint32_t bytesPerRow = 0) {
+        return AddTextureExpectationImpl(file, line, expectation, texture, origin, extent, level,
+                                         aspect, expectation->DataSize(), bytesPerRow);
+    }
+
+    template <typename T>
+    std::ostringstream& AddTextureBetweenColorsExpectation(
+        const char* file,
+        int line,
+        const T& color0,
+        const T& color1,
+        const wgpu::Texture& texture,
+        uint32_t x,
+        uint32_t y,
+        uint32_t level = 0,
+        wgpu::TextureAspect aspect = wgpu::TextureAspect::All,
+        uint32_t bytesPerRow = 0) {
+        return AddTextureExpectationImpl(
+            file, line, new detail::ExpectBetweenColors<T>(color0, color1), texture, {x, y}, {1, 1},
+            level, aspect, sizeof(T), bytesPerRow);
+    }
+
+    std::ostringstream& ExpectSampledFloatData(wgpu::Texture texture,
+                                               uint32_t width,
+                                               uint32_t height,
+                                               uint32_t componentCount,
+                                               uint32_t arrayLayer,
+                                               uint32_t mipLevel,
+                                               detail::Expectation* expectation);
+
+    std::ostringstream& ExpectMultisampledFloatData(wgpu::Texture texture,
+                                                    uint32_t width,
+                                                    uint32_t height,
+                                                    uint32_t componentCount,
+                                                    uint32_t sampleCount,
+                                                    uint32_t arrayLayer,
+                                                    uint32_t mipLevel,
+                                                    detail::Expectation* expectation);
+
+    std::ostringstream& ExpectSampledDepthData(wgpu::Texture depthTexture,
+                                               uint32_t width,
+                                               uint32_t height,
+                                               uint32_t arrayLayer,
+                                               uint32_t mipLevel,
+                                               detail::Expectation* expectation);
+
+    // Check depth by uploading expected data to a sampled texture, writing it out as a depth
+    // attachment, and then using the "equals" depth test to check the contents are the same.
+    // Check stencil by rendering a full screen quad and using the "equals" stencil test with
+    // a stencil reference value. Note that checking stencil checks that the entire stencil
+    // buffer is equal to the expected stencil value.
+    std::ostringstream& ExpectAttachmentDepthStencilTestData(wgpu::Texture texture,
+                                                             wgpu::TextureFormat format,
+                                                             uint32_t width,
+                                                             uint32_t height,
+                                                             uint32_t arrayLayer,
+                                                             uint32_t mipLevel,
+                                                             std::vector<float> expectedDepth,
+                                                             uint8_t* expectedStencil);
+
+    std::ostringstream& ExpectAttachmentDepthTestData(wgpu::Texture texture,
+                                                      wgpu::TextureFormat format,
+                                                      uint32_t width,
+                                                      uint32_t height,
+                                                      uint32_t arrayLayer,
+                                                      uint32_t mipLevel,
+                                                      std::vector<float> expectedDepth) {
+        return ExpectAttachmentDepthStencilTestData(texture, format, width, height, arrayLayer,
+                                                    mipLevel, std::move(expectedDepth), nullptr);
+    }
+
+    std::ostringstream& ExpectAttachmentStencilTestData(wgpu::Texture texture,
+                                                        wgpu::TextureFormat format,
+                                                        uint32_t width,
+                                                        uint32_t height,
+                                                        uint32_t arrayLayer,
+                                                        uint32_t mipLevel,
+                                                        uint8_t expectedStencil) {
+        return ExpectAttachmentDepthStencilTestData(texture, format, width, height, arrayLayer,
+                                                    mipLevel, {}, &expectedStencil);
+    }
+
+    void WaitABit();
+    void FlushWire();
+    void WaitForAllOperations();
+
+    bool SupportsFeatures(const std::vector<wgpu::FeatureName>& features);
+
+    // Called in SetUp() to get the features required to be enabled in the tests. The tests must
+    // check if the required features are supported by the adapter in this function and guarantee
+    // the returned features are all supported by the adapter. The tests may provide different
+    // code path to handle the situation when not all features are supported.
+    virtual std::vector<wgpu::FeatureName> GetRequiredFeatures();
+
+    virtual wgpu::RequiredLimits GetRequiredLimits(const wgpu::SupportedLimits&);
+
+    const wgpu::AdapterProperties& GetAdapterProperties() const;
+
+    // TODO(crbug.com/dawn/689): Use limits returned from the wire
+    // This is implemented here because tests need to always query
+    // the |backendDevice| since limits are not implemented in the wire.
+    wgpu::SupportedLimits GetSupportedLimits();
+
+  private:
+    utils::ScopedAutoreleasePool mObjCAutoreleasePool;
+    AdapterTestParam mParam;
+    std::unique_ptr<utils::WireHelper> mWireHelper;
+
+    // Tracking for validation errors
+    static void OnDeviceError(WGPUErrorType type, const char* message, void* userdata);
+    static void OnDeviceLost(WGPUDeviceLostReason reason, const char* message, void* userdata);
+    bool mExpectError = false;
+    bool mError = false;
+    testing::Matcher<std::string> mErrorMatcher;
+    bool mExpectDestruction = false;
+
+    std::ostringstream& AddTextureExpectationImpl(const char* file,
+                                                  int line,
+                                                  detail::Expectation* expectation,
+                                                  const wgpu::Texture& texture,
+                                                  wgpu::Origin3D origin,
+                                                  wgpu::Extent3D extent,
+                                                  uint32_t level,
+                                                  wgpu::TextureAspect aspect,
+                                                  uint32_t dataSize,
+                                                  uint32_t bytesPerRow);
+
+    std::ostringstream& ExpectSampledFloatDataImpl(wgpu::TextureView textureView,
+                                                   const char* wgslTextureType,
+                                                   uint32_t width,
+                                                   uint32_t height,
+                                                   uint32_t componentCount,
+                                                   uint32_t sampleCount,
+                                                   detail::Expectation* expectation);
+
+    // MapRead buffers used to get data for the expectations
+    struct ReadbackSlot {
+        wgpu::Buffer buffer;
+        uint64_t bufferSize;
+        const void* mappedData = nullptr;
+    };
+    std::vector<ReadbackSlot> mReadbackSlots;
+
+    // Maps all the buffers and fill ReadbackSlot::mappedData
+    void MapSlotsSynchronously();
+    static void SlotMapCallback(WGPUBufferMapAsyncStatus status, void* userdata);
+    size_t mNumPendingMapOperations = 0;
+
+    // Reserve space where the data for an expectation can be copied
+    struct ReadbackReservation {
+        wgpu::Buffer buffer;
+        size_t slot;
+        uint64_t offset;
+    };
+    ReadbackReservation ReserveReadback(uint64_t readbackSize);
+
+    struct DeferredExpectation {
+        const char* file;
+        int line;
+        size_t readbackSlot;
+        uint64_t readbackOffset;
+        uint64_t size;
+        uint32_t rowBytes;
+        uint32_t bytesPerRow;
+        std::unique_ptr<detail::Expectation> expectation;
+        // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54316
+        // Use unique_ptr because of missing move/copy constructors on std::basic_ostringstream
+        std::unique_ptr<std::ostringstream> message;
+    };
+    std::vector<DeferredExpectation> mDeferredExpectations;
+
+    // Assuming the data is mapped, checks all expectations
+    void ResolveExpectations();
+
+    dawn::native::Adapter mBackendAdapter;
+
+    std::unique_ptr<dawn::platform::Platform> mTestPlatform;
+};
+
+#define DAWN_SKIP_TEST_IF_BASE(condition, type, reason)   \
+    do {                                                  \
+        if (condition) {                                  \
+            dawn::InfoLog() << "Test " type ": " #reason; \
+            GTEST_SKIP();                                 \
+            return;                                       \
+        }                                                 \
+    } while (0)
+
+// Skip a test which requires a feature or a toggle to be present / not present or some WIP
+// features.
+#define DAWN_TEST_UNSUPPORTED_IF(condition) \
+    DAWN_SKIP_TEST_IF_BASE(condition, "unsupported", condition)
+
+// Skip a test when the test failing on a specific HW / backend / OS combination. We can disable
+// this macro with the command line parameter "--run-suppressed-tests".
+#define DAWN_SUPPRESS_TEST_IF(condition) \
+    DAWN_SKIP_TEST_IF_BASE(!RunSuppressedTests() && condition, "suppressed", condition)
+
+#define EXPECT_DEPRECATION_WARNINGS(statement, n)                                 \
+    do {                                                                          \
+        if (UsesWire()) {                                                         \
+            statement;                                                            \
+        } else {                                                                  \
+            size_t warningsBefore =                                               \
+                dawn::native::GetDeprecationWarningCountForTesting(device.Get()); \
+            statement;                                                            \
+            size_t warningsAfter =                                                \
+                dawn::native::GetDeprecationWarningCountForTesting(device.Get()); \
+            EXPECT_EQ(mLastWarningCount, warningsBefore);                         \
+            if (!HasToggleEnabled("skip_validation")) {                           \
+                EXPECT_EQ(warningsAfter, warningsBefore + n);                     \
+            }                                                                     \
+            mLastWarningCount = warningsAfter;                                    \
+        }                                                                         \
+    } while (0)
+#define EXPECT_DEPRECATION_WARNING(statement) EXPECT_DEPRECATION_WARNINGS(statement, 1)
+
+template <typename Params = AdapterTestParam>
+class DawnTestWithParams : public DawnTestBase, public ::testing::TestWithParam<Params> {
+  protected:
+    DawnTestWithParams();
+    ~DawnTestWithParams() override = default;
+
+    void SetUp() override {
+        DawnTestBase::SetUp();
+    }
+
+    void TearDown() override {
+        DawnTestBase::TearDown();
+    }
+};
+
+template <typename Params>
+DawnTestWithParams<Params>::DawnTestWithParams() : DawnTestBase(this->GetParam()) {
+}
+
+using DawnTest = DawnTestWithParams<>;
+
+// Instantiate the test once for each backend provided after the first argument. Use it like this:
+//     DAWN_INSTANTIATE_TEST(MyTestFixture, MetalBackend, OpenGLBackend)
+#define DAWN_INSTANTIATE_TEST(testName, ...)                                            \
+    const decltype(DAWN_PP_GET_HEAD(__VA_ARGS__)) testName##params[] = {__VA_ARGS__};   \
+    INSTANTIATE_TEST_SUITE_P(                                                           \
+        , testName,                                                                     \
+        testing::ValuesIn(::detail::GetAvailableAdapterTestParamsForBackends(           \
+            testName##params, sizeof(testName##params) / sizeof(testName##params[0]))), \
+        DawnTestBase::PrintToStringParamName(#testName));                               \
+    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(testName)
+
+// Instantiate the test once for each backend provided in the first param list.
+// The test will be parameterized over the following param lists.
+// Use it like this:
+//     DAWN_INSTANTIATE_TEST_P(MyTestFixture, {MetalBackend(), OpenGLBackend()}, {A, B}, {1, 2})
+// MyTestFixture must extend DawnTestWithParams<Param> where Param is a struct that extends
+// AdapterTestParam, and whose constructor looks like:
+//     Param(AdapterTestParam, ABorC, 12or3, ..., otherParams... )
+//     You must also teach GTest how to print this struct.
+//     https://github.com/google/googletest/blob/master/docs/advanced.md#teaching-googletest-how-to-print-your-values
+// Macro DAWN_TEST_PARAM_STRUCT can help generate this struct.
+#define DAWN_INSTANTIATE_TEST_P(testName, ...)                                                 \
+    INSTANTIATE_TEST_SUITE_P(                                                                  \
+        , testName, ::testing::ValuesIn(MakeParamGenerator<testName::ParamType>(__VA_ARGS__)), \
+        DawnTestBase::PrintToStringParamName(#testName));                                      \
+    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(testName)
+
+// Implementation for DAWN_TEST_PARAM_STRUCT to declare/print struct fields.
+#define DAWN_TEST_PARAM_STRUCT_DECL_STRUCT_FIELD(Type) Type DAWN_PP_CONCATENATE(m, Type);
+#define DAWN_TEST_PARAM_STRUCT_PRINT_STRUCT_FIELD(Type) \
+    o << "; " << #Type << "=" << param.DAWN_PP_CONCATENATE(m, Type);
+
+// Usage: DAWN_TEST_PARAM_STRUCT(Foo, TypeA, TypeB, ...)
+// Generate a test param struct called Foo which extends AdapterTestParam and generated
+// struct _Dawn_Foo. _Dawn_Foo has members of types TypeA, TypeB, etc. which are named mTypeA,
+// mTypeB, etc. in the order they are placed in the macro argument list. Struct Foo should be
+// constructed with an AdapterTestParam as the first argument, followed by a list of values
+// to initialize the base _Dawn_Foo struct.
+// It is recommended to use alias declarations so that stringified types are more readable.
+// Example:
+//   using MyParam = unsigned int;
+//   DAWN_TEST_PARAM_STRUCT(FooParams, MyParam);
+#define DAWN_TEST_PARAM_STRUCT(StructName, ...)                                                    \
+    struct DAWN_PP_CONCATENATE(_Dawn_, StructName) {                                               \
+        DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH)(DAWN_TEST_PARAM_STRUCT_DECL_STRUCT_FIELD,  \
+                                                        __VA_ARGS__))                              \
+    };                                                                                             \
+    std::ostream& operator<<(std::ostream& o,                                                      \
+                             const DAWN_PP_CONCATENATE(_Dawn_, StructName) & param) {              \
+        DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH)(DAWN_TEST_PARAM_STRUCT_PRINT_STRUCT_FIELD, \
+                                                        __VA_ARGS__))                              \
+        return o;                                                                                  \
+    }                                                                                              \
+    struct StructName : AdapterTestParam, DAWN_PP_CONCATENATE(_Dawn_, StructName) {                \
+        template <typename... Args>                                                                \
+        StructName(const AdapterTestParam& param, Args&&... args)                                  \
+            : AdapterTestParam(param),                                                             \
+              DAWN_PP_CONCATENATE(_Dawn_, StructName){std::forward<Args>(args)...} {               \
+        }                                                                                          \
+    };                                                                                             \
+    std::ostream& operator<<(std::ostream& o, const StructName& param) {                           \
+        o << static_cast<const AdapterTestParam&>(param);                                          \
+        o << "; " << static_cast<const DAWN_PP_CONCATENATE(_Dawn_, StructName)&>(param);           \
+        return o;                                                                                  \
+    }                                                                                              \
+    static_assert(true, "require semicolon")
+
+namespace detail {
+    // Helper functions used for DAWN_INSTANTIATE_TEST
+    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+        const BackendTestConfig* params,
+        size_t numParams);
+
+    // All classes used to implement the deferred expectations should inherit from this.
+    class Expectation {
+      public:
+        virtual ~Expectation() = default;
+
+        // Will be called with the buffer or texture data the expectation should check.
+        virtual testing::AssertionResult Check(const void* data, size_t size) = 0;
+    };
+
+    // Expectation that checks the data is equal to some expected values.
+    // T - expected value Type
+    // U - actual value Type (defaults = T)
+    // This is expanded for float16 mostly where T=float, U=uint16_t
+    template <typename T, typename U>
+    class ExpectEq : public Expectation {
+      public:
+        ExpectEq(T singleValue, T tolerance = {});
+        ExpectEq(const T* values, const unsigned int count, T tolerance = {});
+
+        testing::AssertionResult Check(const void* data, size_t size) override;
+
+      private:
+        std::vector<T> mExpected;
+        T mTolerance;
+    };
+    extern template class ExpectEq<uint8_t>;
+    extern template class ExpectEq<int16_t>;
+    extern template class ExpectEq<uint32_t>;
+    extern template class ExpectEq<uint64_t>;
+    extern template class ExpectEq<RGBA8>;
+    extern template class ExpectEq<float>;
+    extern template class ExpectEq<float, uint16_t>;
+
+    template <typename T>
+    class ExpectBetweenColors : public Expectation {
+      public:
+        // Inclusive for now
+        ExpectBetweenColors(T value0, T value1);
+        testing::AssertionResult Check(const void* data, size_t size) override;
+
+      private:
+        std::vector<T> mLowerColorChannels;
+        std::vector<T> mHigherColorChannels;
+
+        // used for printing error
+        std::vector<T> mValues0;
+        std::vector<T> mValues1;
+    };
+    // A color is considered between color0 and color1 when all channel values are within range of
+    // each counterparts. It doesn't matter which value is higher or lower. Essentially color =
+    // lerp(color0, color1, t) where t is [0,1]. But I don't want to be too strict here.
+    extern template class ExpectBetweenColors<RGBA8>;
+
+    class CustomTextureExpectation : public Expectation {
+      public:
+        virtual ~CustomTextureExpectation() = default;
+        virtual uint32_t DataSize() = 0;
+    };
+
+}  // namespace detail
+
+#endif  // TESTS_DAWNTEST_H_
diff --git a/src/dawn/tests/End2EndTestsMain.cpp b/src/dawn/tests/End2EndTestsMain.cpp
new file mode 100644
index 0000000..1991922
--- /dev/null
+++ b/src/dawn/tests/End2EndTestsMain.cpp
@@ -0,0 +1,21 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+int main(int argc, char** argv) {
+    InitDawnEnd2EndTestEnvironment(argc, argv);
+    testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
diff --git a/src/dawn/tests/MockCallback.h b/src/dawn/tests/MockCallback.h
new file mode 100644
index 0000000..2b795ed
--- /dev/null
+++ b/src/dawn/tests/MockCallback.h
@@ -0,0 +1,101 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock.h>
+
+#include "dawn/common/Assert.h"
+
+#include <memory>
+#include <set>
+
+namespace testing {
+
+    template <typename F>
+    class MockCallback;
+
+    // Helper class for mocking callbacks used for Dawn callbacks with |void* userdata|
+    // as the last callback argument.
+    //
+    // Example Usage:
+    //   MockCallback<WGPUDeviceLostCallback> mock;
+    //
+    //   void* foo = XYZ; // this is the callback userdata
+    //
+    //   wgpuDeviceSetDeviceLostCallback(device, mock.Callback(), mock.MakeUserdata(foo));
+    //   EXPECT_CALL(mock, Call(_, foo));
+    template <typename R, typename... Args>
+    class MockCallback<R (*)(Args...)> : public ::testing::MockFunction<R(Args...)> {
+        using CallbackType = R (*)(Args...);
+
+      public:
+        // Helper function makes it easier to get the callback using |foo.Callback()|
+        // unstead of MockCallback<CallbackType>::Callback.
+        static CallbackType Callback() {
+            return CallUnboundCallback;
+        }
+
+        void* MakeUserdata(void* userdata) {
+            auto mockAndUserdata =
+                std::unique_ptr<MockAndUserdata>(new MockAndUserdata{this, userdata});
+
+            // Add the userdata to a set of userdata for this mock. We never
+            // remove from this set even if a callback should only be called once so that
+            // repeated calls to the callback still forward the userdata correctly.
+            // Userdata will be destroyed when the mock is destroyed.
+            auto [result, inserted] = mUserdatas.insert(std::move(mockAndUserdata));
+            ASSERT(inserted);
+            return result->get();
+        }
+
+      private:
+        struct MockAndUserdata {
+            MockCallback* mock;
+            void* userdata;
+        };
+
+        static R CallUnboundCallback(Args... args) {
+            std::tuple<Args...> tuple = std::make_tuple(args...);
+
+            constexpr size_t ArgC = sizeof...(Args);
+            static_assert(ArgC >= 1, "Mock callback requires at least one argument (the userdata)");
+
+            // Get the userdata. It should be the last argument.
+            auto userdata = std::get<ArgC - 1>(tuple);
+            static_assert(std::is_same<decltype(userdata), void*>::value,
+                          "Last callback argument must be void* userdata");
+
+            // Extract the mock.
+            ASSERT(userdata != nullptr);
+            auto* mockAndUserdata = reinterpret_cast<MockAndUserdata*>(userdata);
+            MockCallback* mock = mockAndUserdata->mock;
+            ASSERT(mock != nullptr);
+
+            // Replace the userdata
+            std::get<ArgC - 1>(tuple) = mockAndUserdata->userdata;
+
+            // Forward the callback to the mock.
+            return mock->CallImpl(std::make_index_sequence<ArgC>{}, std::move(tuple));
+        }
+
+        // This helper cannot be inlined because we dependent on the templated index sequence
+        // to unpack the tuple arguments.
+        template <size_t... Is>
+        R CallImpl(const std::index_sequence<Is...>&, std::tuple<Args...> args) {
+            return this->Call(std::get<Is>(args)...);
+        }
+
+        std::set<std::unique_ptr<MockAndUserdata>> mUserdatas;
+    };
+
+}  // namespace testing
diff --git a/src/dawn/tests/OWNERS b/src/dawn/tests/OWNERS
new file mode 100644
index 0000000..b3edd60
--- /dev/null
+++ b/src/dawn/tests/OWNERS
@@ -0,0 +1,3 @@
+# Anybody can review tests, but please ask a top level owner for
+# harness / utility changes.
+*
diff --git a/src/dawn/tests/ParamGenerator.h b/src/dawn/tests/ParamGenerator.h
new file mode 100644
index 0000000..c90591d
--- /dev/null
+++ b/src/dawn/tests/ParamGenerator.h
@@ -0,0 +1,140 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_PARAMGENERATOR_H_
+#define TESTS_PARAMGENERATOR_H_
+
+#include <tuple>
+#include <vector>
+
+// ParamStruct is a custom struct which ParamStruct will yield when iterating.
+// The types Params... should be the same as the types passed to the constructor
+// of ParamStruct.
+template <typename ParamStruct, typename... Params>
+class ParamGenerator {
+    using ParamTuple = std::tuple<std::vector<Params>...>;
+    using Index = std::array<size_t, sizeof...(Params)>;
+
+    static constexpr auto s_indexSequence = std::make_index_sequence<sizeof...(Params)>{};
+
+    // Using an N-dimensional Index, extract params from ParamTuple and pass
+    // them to the constructor of ParamStruct.
+    template <size_t... Is>
+    static ParamStruct GetParam(const ParamTuple& params,
+                                const Index& index,
+                                std::index_sequence<Is...>) {
+        return ParamStruct(std::get<Is>(params)[std::get<Is>(index)]...);
+    }
+
+    // Get the last value index into a ParamTuple.
+    template <size_t... Is>
+    static Index GetLastIndex(const ParamTuple& params, std::index_sequence<Is...>) {
+        return Index{std::get<Is>(params).size() - 1 ...};
+    }
+
+  public:
+    using value_type = ParamStruct;
+
+    ParamGenerator(std::vector<Params>... params) : mParams(params...), mIsEmpty(false) {
+        for (bool isEmpty : {params.empty()...}) {
+            mIsEmpty |= isEmpty;
+        }
+    }
+
+    class Iterator : public std::iterator<std::forward_iterator_tag, ParamStruct, size_t> {
+      public:
+        Iterator& operator++() {
+            // Increment the Index by 1. If the i'th place reaches the maximum,
+            // reset it to 0 and continue with the i+1'th place.
+            for (int i = mIndex.size() - 1; i >= 0; --i) {
+                if (mIndex[i] >= mLastIndex[i]) {
+                    mIndex[i] = 0;
+                } else {
+                    mIndex[i]++;
+                    return *this;
+                }
+            }
+
+            // Set a marker that the iterator has reached the end.
+            mEnd = true;
+            return *this;
+        }
+
+        bool operator==(const Iterator& other) const {
+            return mEnd == other.mEnd && mIndex == other.mIndex;
+        }
+
+        bool operator!=(const Iterator& other) const {
+            return !(*this == other);
+        }
+
+        ParamStruct operator*() const {
+            return GetParam(mParams, mIndex, s_indexSequence);
+        }
+
+      private:
+        friend class ParamGenerator;
+
+        Iterator(ParamTuple params, Index index)
+            : mParams(params), mIndex(index), mLastIndex{GetLastIndex(params, s_indexSequence)} {
+        }
+
+        ParamTuple mParams;
+        Index mIndex;
+        Index mLastIndex;
+        bool mEnd = false;
+    };
+
+    Iterator begin() const {
+        if (mIsEmpty) {
+            return end();
+        }
+        return Iterator(mParams, {});
+    }
+
+    Iterator end() const {
+        Iterator iter(mParams, GetLastIndex(mParams, s_indexSequence));
+        ++iter;
+        return iter;
+    }
+
+  private:
+    ParamTuple mParams;
+    bool mIsEmpty;
+};
+
+struct BackendTestConfig;
+struct AdapterTestParam;
+
+namespace detail {
+    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+        const BackendTestConfig* params,
+        size_t numParams);
+}
+
+template <typename Param, typename... Params>
+auto MakeParamGenerator(std::vector<BackendTestConfig>&& first,
+                        std::initializer_list<Params>&&... params) {
+    return ParamGenerator<Param, AdapterTestParam, Params...>(
+        ::detail::GetAvailableAdapterTestParamsForBackends(first.data(), first.size()),
+        std::forward<std::initializer_list<Params>&&>(params)...);
+}
+template <typename Param, typename... Params>
+auto MakeParamGenerator(std::vector<BackendTestConfig>&& first, std::vector<Params>&&... params) {
+    return ParamGenerator<Param, AdapterTestParam, Params...>(
+        ::detail::GetAvailableAdapterTestParamsForBackends(first.data(), first.size()),
+        std::forward<std::vector<Params>&&>(params)...);
+}
+
+#endif  // TESTS_PARAMGENERATOR_H_
diff --git a/src/dawn/tests/PerfTestsMain.cpp b/src/dawn/tests/PerfTestsMain.cpp
new file mode 100644
index 0000000..a927e71
--- /dev/null
+++ b/src/dawn/tests/PerfTestsMain.cpp
@@ -0,0 +1,21 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/perf_tests/DawnPerfTest.h"
+
+int main(int argc, char** argv) {
+    InitDawnPerfTestEnvironment(argc, argv);
+    testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
diff --git a/src/dawn/tests/ToggleParser.cpp b/src/dawn/tests/ToggleParser.cpp
new file mode 100644
index 0000000..997a1d4
--- /dev/null
+++ b/src/dawn/tests/ToggleParser.cpp
@@ -0,0 +1,57 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/ToggleParser.h"
+
+#include <cstring>
+#include <sstream>
+
+ToggleParser::ToggleParser() = default;
+ToggleParser::~ToggleParser() = default;
+
+bool ToggleParser::ParseEnabledToggles(char* arg) {
+    constexpr const char kEnableTogglesSwitch[] = "--enable-toggles=";
+    size_t argLen = sizeof(kEnableTogglesSwitch) - 1;
+    if (strncmp(arg, kEnableTogglesSwitch, argLen) == 0) {
+        std::string toggle;
+        std::stringstream toggles(arg + argLen);
+        while (getline(toggles, toggle, ',')) {
+            mEnabledToggles.push_back(toggle);
+        }
+        return true;
+    }
+    return false;
+}
+
+bool ToggleParser::ParseDisabledToggles(char* arg) {
+    constexpr const char kDisableTogglesSwitch[] = "--disable-toggles=";
+    size_t argLDis = sizeof(kDisableTogglesSwitch) - 1;
+    if (strncmp(arg, kDisableTogglesSwitch, argLDis) == 0) {
+        std::string toggle;
+        std::stringstream toggles(arg + argLDis);
+        while (getline(toggles, toggle, ',')) {
+            mDisabledToggles.push_back(toggle);
+        }
+        return true;
+    }
+    return false;
+}
+
+const std::vector<std::string>& ToggleParser::GetEnabledToggles() const {
+    return mEnabledToggles;
+}
+
+const std::vector<std::string>& ToggleParser::GetDisabledToggles() const {
+    return mDisabledToggles;
+}
diff --git a/src/dawn/tests/ToggleParser.h b/src/dawn/tests/ToggleParser.h
new file mode 100644
index 0000000..d5ff90b
--- /dev/null
+++ b/src/dawn/tests/ToggleParser.h
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_TOGGLEPARSER_H_
+#define TESTS_TOGGLEPARSER_H_
+
+#include <string>
+#include <vector>
+
+class ToggleParser {
+  public:
+    ToggleParser();
+    ~ToggleParser();
+
+    bool ParseEnabledToggles(char* arg);
+    bool ParseDisabledToggles(char* arg);
+
+    const std::vector<std::string>& GetEnabledToggles() const;
+    const std::vector<std::string>& GetDisabledToggles() const;
+
+  private:
+    std::vector<std::string> mEnabledToggles;
+    std::vector<std::string> mDisabledToggles;
+};
+
+#endif  // TESTS_TOGGLEPARSER_H_
diff --git a/src/dawn/tests/UnittestsMain.cpp b/src/dawn/tests/UnittestsMain.cpp
new file mode 100644
index 0000000..b98a616
--- /dev/null
+++ b/src/dawn/tests/UnittestsMain.cpp
@@ -0,0 +1,23 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "unittests/validation/ValidationTest.h"
+
+#include <gtest/gtest.h>
+
+int main(int argc, char** argv) {
+    InitDawnValidationTestEnvironment(argc, argv);
+    testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
diff --git a/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
new file mode 100644
index 0000000..62295fc
--- /dev/null
+++ b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
@@ -0,0 +1,415 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/tests/MockCallback.h"
+#include "dawn/webgpu_cpp.h"
+
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+#    include "dawn/native/VulkanBackend.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+#    include "dawn/native/D3D12Backend.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+#    include "dawn/native/MetalBackend.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+#    include "dawn/native/MetalBackend.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) || defined(DAWN_ENABLE_BACKEND_OPENGLES)
+#    include "GLFW/glfw3.h"
+#    include "dawn/native/OpenGLBackend.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) || defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+#include <gtest/gtest.h>
+
+namespace {
+
+    using namespace testing;
+
+    class AdapterDiscoveryTests : public ::testing::Test {};
+
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+    // Test only discovering the SwiftShader adapter
+    TEST(AdapterDiscoveryTests, OnlySwiftShader) {
+        dawn::native::Instance instance;
+
+        dawn::native::vulkan::AdapterDiscoveryOptions options;
+        options.forceSwiftShader = true;
+        instance.DiscoverAdapters(&options);
+
+        const auto& adapters = instance.GetAdapters();
+        EXPECT_LE(adapters.size(), 1u);  // 0 or 1 SwiftShader adapters.
+        for (const auto& adapter : adapters) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
+            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
+            EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
+        }
+    }
+
+    // Test discovering only Vulkan adapters
+    TEST(AdapterDiscoveryTests, OnlyVulkan) {
+        dawn::native::Instance instance;
+
+        dawn::native::vulkan::AdapterDiscoveryOptions options;
+        instance.DiscoverAdapters(&options);
+
+        const auto& adapters = instance.GetAdapters();
+        for (const auto& adapter : adapters) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
+        }
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+    // Test discovering only D3D12 adapters
+    TEST(AdapterDiscoveryTests, OnlyD3D12) {
+        dawn::native::Instance instance;
+
+        dawn::native::d3d12::AdapterDiscoveryOptions options;
+        instance.DiscoverAdapters(&options);
+
+        const auto& adapters = instance.GetAdapters();
+        for (const auto& adapter : adapters) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
+        }
+    }
+
+    // Test discovering a D3D12 adapter from a prexisting DXGI adapter
+    TEST(AdapterDiscoveryTests, MatchingDXGIAdapter) {
+        using Microsoft::WRL::ComPtr;
+
+        ComPtr<IDXGIFactory4> dxgiFactory;
+        HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
+        ASSERT_EQ(hr, S_OK);
+
+        for (uint32_t adapterIndex = 0;; ++adapterIndex) {
+            ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
+            if (dxgiFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
+                break;  // No more adapters to enumerate.
+            }
+
+            dawn::native::Instance instance;
+
+            dawn::native::d3d12::AdapterDiscoveryOptions options;
+            options.dxgiAdapter = std::move(dxgiAdapter);
+            instance.DiscoverAdapters(&options);
+
+            const auto& adapters = instance.GetAdapters();
+            for (const auto& adapter : adapters) {
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+
+                EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
+            }
+        }
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+    // Test discovering only Metal adapters
+    TEST(AdapterDiscoveryTests, OnlyMetal) {
+        dawn::native::Instance instance;
+
+        dawn::native::metal::AdapterDiscoveryOptions options;
+        instance.DiscoverAdapters(&options);
+
+        const auto& adapters = instance.GetAdapters();
+        for (const auto& adapter : adapters) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            EXPECT_EQ(properties.backendType, wgpu::BackendType::Metal);
+        }
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+    // Test discovering only desktop OpenGL adapters
+    TEST(AdapterDiscoveryTests, OnlyDesktopGL) {
+        if (!glfwInit()) {
+            GTEST_SKIP() << "glfwInit() failed";
+        }
+        glfwDefaultWindowHints();
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+        glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+        glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+        glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+        GLFWwindow* window =
+            glfwCreateWindow(400, 400, "Dawn OpenGL test window", nullptr, nullptr);
+        glfwMakeContextCurrent(window);
+
+        dawn::native::Instance instance;
+
+        dawn::native::opengl::AdapterDiscoveryOptions options;
+        options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+        instance.DiscoverAdapters(&options);
+        glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
+
+        const auto& adapters = instance.GetAdapters();
+        for (const auto& adapter : adapters) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGL);
+        }
+
+        glfwDestroyWindow(window);
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+    // Test discovering only OpenGLES adapters
+    TEST(AdapterDiscoveryTests, OnlyOpenGLES) {
+        ScopedEnvironmentVar angleDefaultPlatform;
+        if (GetEnvironmentVar("ANGLE_DEFAULT_PLATFORM").first.empty()) {
+            angleDefaultPlatform.Set("ANGLE_DEFAULT_PLATFORM", "swiftshader");
+        }
+
+        if (!glfwInit()) {
+            GTEST_SKIP() << "glfwInit() failed";
+        }
+        glfwDefaultWindowHints();
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+        glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+        glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+        glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+        GLFWwindow* window =
+            glfwCreateWindow(400, 400, "Dawn OpenGLES test window", nullptr, nullptr);
+        glfwMakeContextCurrent(window);
+
+        dawn::native::Instance instance;
+
+        dawn::native::opengl::AdapterDiscoveryOptionsES options;
+        options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+        instance.DiscoverAdapters(&options);
+        glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
+
+        const auto& adapters = instance.GetAdapters();
+        for (const auto& adapter : adapters) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGLES);
+        }
+
+        glfwDestroyWindow(window);
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(DAWN_ENABLE_BACKEND_VULKAN)
+    // Test discovering the Metal backend, then the Vulkan backend
+    // does not duplicate adapters.
+    TEST(AdapterDiscoveryTests, OneBackendThenTheOther) {
+        dawn::native::Instance instance;
+        uint32_t metalAdapterCount = 0;
+        {
+            dawn::native::metal::AdapterDiscoveryOptions options;
+            instance.DiscoverAdapters(&options);
+
+            const auto& adapters = instance.GetAdapters();
+            metalAdapterCount = adapters.size();
+            for (const auto& adapter : adapters) {
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+
+                ASSERT_EQ(properties.backendType, wgpu::BackendType::Metal);
+            }
+        }
+        {
+            dawn::native::vulkan::AdapterDiscoveryOptions options;
+            instance.DiscoverAdapters(&options);
+
+            uint32_t metalAdapterCount2 = 0;
+            const auto& adapters = instance.GetAdapters();
+            for (const auto& adapter : adapters) {
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+
+                EXPECT_TRUE(properties.backendType == wgpu::BackendType::Metal ||
+                            properties.backendType == wgpu::BackendType::Vulkan);
+                if (properties.backendType == wgpu::BackendType::Metal) {
+                    metalAdapterCount2++;
+                }
+            }
+            EXPECT_EQ(metalAdapterCount, metalAdapterCount2);
+        }
+    }
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN) && defined(DAWN_ENABLE_BACKEND_METAL)
+
+    class AdapterCreationTest : public ::testing::Test {
+      protected:
+        void SetUp() override {
+            dawnProcSetProcs(&dawn_native::GetProcs());
+
+            {
+                auto nativeInstance = std::make_unique<dawn_native::Instance>();
+                nativeInstance->DiscoverDefaultAdapters();
+                for (dawn_native::Adapter& nativeAdapter : nativeInstance->GetAdapters()) {
+                    anyAdapterAvailable = true;
+
+                    wgpu::AdapterProperties properties;
+                    nativeAdapter.GetProperties(&properties);
+                    swiftShaderAvailable =
+                        swiftShaderAvailable ||
+                        gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID);
+                    discreteGPUAvailable = discreteGPUAvailable ||
+                                           properties.adapterType == wgpu::AdapterType::DiscreteGPU;
+                    integratedGPUAvailable =
+                        integratedGPUAvailable ||
+                        properties.adapterType == wgpu::AdapterType::IntegratedGPU;
+                }
+            }
+
+            instance = wgpu::CreateInstance();
+        }
+
+        void TearDown() override {
+            instance = nullptr;
+            dawnProcSetProcs(nullptr);
+        }
+
+        wgpu::Instance instance;
+        bool anyAdapterAvailable = false;
+        bool swiftShaderAvailable = false;
+        bool discreteGPUAvailable = false;
+        bool integratedGPUAvailable = false;
+    };
+
+    // Test that requesting the default adapter works
+    TEST_F(AdapterCreationTest, DefaultAdapter) {
+        wgpu::RequestAdapterOptions options = {};
+
+        MockCallback<WGPURequestAdapterCallback> cb;
+
+        WGPUAdapter cAdapter = nullptr;
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+            .WillOnce(SaveArg<1>(&cAdapter));
+        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+    }
+
+    // Test that passing nullptr for the options gets the default adapter
+    TEST_F(AdapterCreationTest, NullGivesDefaultAdapter) {
+        wgpu::RequestAdapterOptions options = {};
+
+        MockCallback<WGPURequestAdapterCallback> cb;
+
+        WGPUAdapter cAdapter = nullptr;
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+            .WillOnce(SaveArg<1>(&cAdapter));
+        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this + 1))
+            .WillOnce(SaveArg<1>(&cAdapter));
+        instance.RequestAdapter(nullptr, cb.Callback(), cb.MakeUserdata(this + 1));
+
+        wgpu::Adapter adapter2 = wgpu::Adapter::Acquire(cAdapter);
+        EXPECT_EQ(adapter.Get(), adapter2.Get());
+    }
+
+    // Test that requesting the fallback adapter returns SwiftShader.
+    TEST_F(AdapterCreationTest, FallbackAdapter) {
+        wgpu::RequestAdapterOptions options = {};
+        options.forceFallbackAdapter = true;
+
+        MockCallback<WGPURequestAdapterCallback> cb;
+
+        WGPUAdapter cAdapter = nullptr;
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+            .WillOnce(SaveArg<1>(&cAdapter));
+        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+        EXPECT_EQ(adapter != nullptr, swiftShaderAvailable);
+        if (adapter != nullptr) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
+            EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
+        }
+    }
+
+    // Test that requesting a high performance GPU works
+    TEST_F(AdapterCreationTest, PreferHighPerformance) {
+        wgpu::RequestAdapterOptions options = {};
+        options.powerPreference = wgpu::PowerPreference::HighPerformance;
+
+        MockCallback<WGPURequestAdapterCallback> cb;
+
+        WGPUAdapter cAdapter = nullptr;
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+            .WillOnce(SaveArg<1>(&cAdapter));
+        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+        if (discreteGPUAvailable) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::DiscreteGPU);
+        }
+    }
+
+    // Test that requesting a low power GPU works
+    TEST_F(AdapterCreationTest, PreferLowPower) {
+        wgpu::RequestAdapterOptions options = {};
+        options.powerPreference = wgpu::PowerPreference::LowPower;
+
+        MockCallback<WGPURequestAdapterCallback> cb;
+
+        WGPUAdapter cAdapter = nullptr;
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+            .WillOnce(SaveArg<1>(&cAdapter));
+        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+        if (integratedGPUAvailable) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::IntegratedGPU);
+        }
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/end2end/BasicTests.cpp b/src/dawn/tests/end2end/BasicTests.cpp
new file mode 100644
index 0000000..4193fc2
--- /dev/null
+++ b/src/dawn/tests/end2end/BasicTests.cpp
@@ -0,0 +1,66 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+class BasicTests : public DawnTest {};
+
+// Test adapter filter by vendor id.
+TEST_P(BasicTests, VendorIdFilter) {
+    DAWN_TEST_UNSUPPORTED_IF(!HasVendorIdFilter());
+
+    ASSERT_EQ(GetAdapterProperties().vendorID, GetVendorIdFilter());
+}
+
+// Test adapter filter by backend type.
+TEST_P(BasicTests, BackendType) {
+    DAWN_TEST_UNSUPPORTED_IF(!HasBackendTypeFilter());
+
+    ASSERT_EQ(GetAdapterProperties().backendType, GetBackendTypeFilter());
+}
+
+// Test Queue::WriteBuffer changes the content of the buffer, but really this is the most
+// basic test possible, and tests the test harness
+TEST_P(BasicTests, QueueWriteBuffer) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    uint32_t value = 0x01020304;
+    queue.WriteBuffer(buffer, 0, &value, sizeof(value));
+
+    EXPECT_BUFFER_U32_EQ(value, buffer, 0);
+}
+
+// Test a validation error for Queue::WriteBuffer but really this is the most basic test possible
+// for ASSERT_DEVICE_ERROR
+TEST_P(BasicTests, QueueWriteBufferError) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    uint8_t value = 187;
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buffer, 1000, &value, sizeof(value)));
+}
+
+DAWN_INSTANTIATE_TEST(BasicTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/BindGroupTests.cpp b/src/dawn/tests/end2end/BindGroupTests.cpp
new file mode 100644
index 0000000..3c576a9
--- /dev/null
+++ b/src/dawn/tests/end2end/BindGroupTests.cpp
@@ -0,0 +1,1656 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static uint32_t kRTSize = 8;
+
+class BindGroupTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        mMinUniformBufferOffsetAlignment =
+            GetSupportedLimits().limits.minUniformBufferOffsetAlignment;
+    }
+    wgpu::CommandBuffer CreateSimpleComputeCommandBuffer(const wgpu::ComputePipeline& pipeline,
+                                                         const wgpu::BindGroup& bindGroup) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+        return encoder.Finish();
+    }
+
+    wgpu::PipelineLayout MakeBasicPipelineLayout(
+        std::vector<wgpu::BindGroupLayout> bindingInitializer) const {
+        wgpu::PipelineLayoutDescriptor descriptor;
+
+        descriptor.bindGroupLayoutCount = bindingInitializer.size();
+        descriptor.bindGroupLayouts = bindingInitializer.data();
+
+        return device.CreatePipelineLayout(&descriptor);
+    }
+
+    wgpu::ShaderModule MakeSimpleVSModule() const {
+        return utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+             var pos = array<vec2<f32>, 3>(
+                vec2<f32>(-1.0, 1.0),
+                vec2<f32>( 1.0, 1.0),
+                vec2<f32>(-1.0, -1.0));
+
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        })");
+    }
+
+    wgpu::ShaderModule MakeFSModule(std::vector<wgpu::BufferBindingType> bindingTypes) const {
+        ASSERT(bindingTypes.size() <= kMaxBindGroups);
+
+        std::ostringstream fs;
+        for (size_t i = 0; i < bindingTypes.size(); ++i) {
+            fs << "struct Buffer" << i << R"( {
+                color : vec4<f32>
+            })";
+
+            switch (bindingTypes[i]) {
+                case wgpu::BufferBindingType::Uniform:
+                    fs << "\n@group(" << i << ") @binding(0) var<uniform> buffer" << i
+                       << " : Buffer" << i << ";";
+                    break;
+                case wgpu::BufferBindingType::ReadOnlyStorage:
+                    fs << "\n@group(" << i << ") @binding(0) var<storage, read> buffer" << i
+                       << " : Buffer" << i << ";";
+                    break;
+                default:
+                    UNREACHABLE();
+            }
+        }
+
+        fs << "\n@stage(fragment) fn main() -> @location(0) vec4<f32>{\n";
+        fs << "var fragColor : vec4<f32> = vec4<f32>();\n";
+        for (size_t i = 0; i < bindingTypes.size(); ++i) {
+            fs << "fragColor = fragColor + buffer" << i << ".color;\n";
+        }
+        fs << "return fragColor;\n";
+        fs << "}\n";
+        return utils::CreateShaderModule(device, fs.str().c_str());
+    }
+
+    wgpu::RenderPipeline MakeTestPipeline(const utils::BasicRenderPass& renderPass,
+                                          std::vector<wgpu::BufferBindingType> bindingTypes,
+                                          std::vector<wgpu::BindGroupLayout> bindGroupLayouts) {
+        wgpu::ShaderModule vsModule = MakeSimpleVSModule();
+        wgpu::ShaderModule fsModule = MakeFSModule(bindingTypes);
+
+        wgpu::PipelineLayout pipelineLayout = MakeBasicPipelineLayout(bindGroupLayouts);
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.layout = pipelineLayout;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+        wgpu::BlendState blend;
+        blend.color.operation = wgpu::BlendOperation::Add;
+        blend.color.srcFactor = wgpu::BlendFactor::One;
+        blend.color.dstFactor = wgpu::BlendFactor::One;
+        blend.alpha.operation = wgpu::BlendOperation::Add;
+        blend.alpha.srcFactor = wgpu::BlendFactor::One;
+        blend.alpha.dstFactor = wgpu::BlendFactor::One;
+
+        pipelineDescriptor.cTargets[0].blend = &blend;
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    uint32_t mMinUniformBufferOffsetAlignment;
+};
+
+// Test a bindgroup reused in two command buffers in the same call to queue.Submit().
+// This test passes by not asserting or crashing.
+TEST_P(BindGroupTests, ReusedBindGroupSingleSubmit) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Contents {
+            f : f32
+        }
+        @group(0) @binding(0) var <uniform> contents: Contents;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+          var f : f32 = contents.f;
+        })");
+
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.compute.module = module;
+    cpDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline cp = device.CreateComputePipeline(&cpDesc);
+
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = sizeof(float);
+    bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, cp.GetBindGroupLayout(0), {{0, buffer}});
+
+    wgpu::CommandBuffer cb[2];
+    cb[0] = CreateSimpleComputeCommandBuffer(cp, bindGroup);
+    cb[1] = CreateSimpleComputeCommandBuffer(cp, bindGroup);
+    queue.Submit(2, cb);
+}
+
+// Test a bindgroup containing a UBO which is used in both the vertex and fragment shader.
+// It contains a transformation matrix for the VS and the fragment color for the FS.
+// These must result in different register offsets in the native APIs.
+TEST_P(BindGroupTests, ReusedUBO) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        // TODO(crbug.com/tint/369): Use a mat2x2 when Tint translates it correctly.
+        struct VertexUniformBuffer {
+            transform : vec4<f32>
+        }
+
+        @group(0) @binding(0) var <uniform> vertexUbo : VertexUniformBuffer;
+
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>(-1.0, 1.0),
+                vec2<f32>( 1.0, 1.0),
+                vec2<f32>(-1.0, -1.0));
+
+            var transform = mat2x2<f32>(vertexUbo.transform.xy, vertexUbo.transform.zw);
+            return vec4<f32>(transform * pos[VertexIndex], 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct FragmentUniformBuffer {
+            color : vec4<f32>
+        }
+        @group(0) @binding(1) var <uniform> fragmentUbo : FragmentUniformBuffer;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return fragmentUbo.color;
+        })");
+
+    utils::ComboRenderPipelineDescriptor textureDescriptor;
+    textureDescriptor.vertex.module = vsModule;
+    textureDescriptor.cFragment.module = fsModule;
+    textureDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&textureDescriptor);
+
+    struct Data {
+        float transform[8];
+        char padding[256 - 8 * sizeof(float)];
+        float color[4];
+    };
+    ASSERT(offsetof(Data, color) == 256);
+    Data data{
+        {1.f, 0.f, 0.f, 1.0f},
+        {0},
+        {0.f, 1.f, 0.f, 1.f},
+    };
+    wgpu::Buffer buffer =
+        utils::CreateBufferFromData(device, &data, sizeof(data), wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+        device, pipeline.GetBindGroupLayout(0),
+        {{0, buffer, 0, sizeof(Data::transform)}, {1, buffer, 256, sizeof(Data::color)}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// Test a bindgroup containing a UBO in the vertex shader and a sampler and texture in the fragment
+// shader. In D3D12 for example, these different types of bindings end up in different namespaces,
+// but the register offsets used must match between the shader module and descriptor range.
+TEST_P(BindGroupTests, UBOSamplerAndTexture) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        // TODO(crbug.com/tint/369): Use a mat2x2 when Tint translates it correctly.
+        struct VertexUniformBuffer {
+            transform : vec4<f32>
+        }
+        @group(0) @binding(0) var <uniform> vertexUbo : VertexUniformBuffer;
+
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>(-1.0, 1.0),
+                vec2<f32>( 1.0, 1.0),
+                vec2<f32>(-1.0, -1.0));
+
+            var transform = mat2x2<f32>(vertexUbo.transform.xy, vertexUbo.transform.zw);
+            return vec4<f32>(transform * pos[VertexIndex], 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(1) var samp : sampler;
+        @group(0) @binding(2) var tex : texture_2d<f32>;
+
+        @stage(fragment)
+        fn main(@builtin(position) FragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+            return textureSample(tex, samp, FragCoord.xy);
+        })");
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.cFragment.module = fsModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    constexpr float transform[] = {1.f, 0.f, 0.f, 1.f};
+    wgpu::Buffer buffer = utils::CreateBufferFromData(device, &transform, sizeof(transform),
+                                                      wgpu::BufferUsage::Uniform);
+
+    wgpu::SamplerDescriptor samplerDescriptor = {};
+    samplerDescriptor.minFilter = wgpu::FilterMode::Nearest;
+    samplerDescriptor.magFilter = wgpu::FilterMode::Nearest;
+    samplerDescriptor.mipmapFilter = wgpu::FilterMode::Nearest;
+    samplerDescriptor.addressModeU = wgpu::AddressMode::ClampToEdge;
+    samplerDescriptor.addressModeV = wgpu::AddressMode::ClampToEdge;
+    samplerDescriptor.addressModeW = wgpu::AddressMode::ClampToEdge;
+
+    wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor);
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = kRTSize;
+    descriptor.size.height = kRTSize;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    uint32_t width = kRTSize, height = kRTSize;
+    uint32_t widthInBytes = width * sizeof(RGBA8);
+    widthInBytes = (widthInBytes + 255) & ~255;
+    uint32_t sizeInBytes = widthInBytes * height;
+    uint32_t size = sizeInBytes / sizeof(RGBA8);
+    std::vector<RGBA8> data = std::vector<RGBA8>(size);
+    for (uint32_t i = 0; i < size; i++) {
+        data[i] = RGBA8(0, 255, 0, 255);
+    }
+    wgpu::Buffer stagingBuffer =
+        utils::CreateBufferFromData(device, data.data(), sizeInBytes, wgpu::BufferUsage::CopySrc);
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                             {{0, buffer, 0, sizeof(transform)}, {1, sampler}, {2, textureView}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, widthInBytes);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {width, height, 1};
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+TEST_P(BindGroupTests, MultipleBindLayouts) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        // TODO(crbug.com/tint/369): Use a mat2x2 when Tint translates it correctly.
+        struct VertexUniformBuffer {
+            transform : vec4<f32>
+        }
+
+        @group(0) @binding(0) var <uniform> vertexUbo1 : VertexUniformBuffer;
+        @group(1) @binding(0) var <uniform> vertexUbo2 : VertexUniformBuffer;
+
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>(-1.0, 1.0),
+                vec2<f32>( 1.0, 1.0),
+                vec2<f32>(-1.0, -1.0));
+
+            return vec4<f32>(mat2x2<f32>(
+                vertexUbo1.transform.xy + vertexUbo2.transform.xy,
+                vertexUbo1.transform.zw + vertexUbo2.transform.zw
+            ) * pos[VertexIndex], 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct FragmentUniformBuffer {
+            color : vec4<f32>
+        }
+
+        @group(0) @binding(1) var <uniform> fragmentUbo1 : FragmentUniformBuffer;
+        @group(1) @binding(1) var <uniform> fragmentUbo2 : FragmentUniformBuffer;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return fragmentUbo1.color + fragmentUbo2.color;
+        })");
+
+    utils::ComboRenderPipelineDescriptor textureDescriptor;
+    textureDescriptor.vertex.module = vsModule;
+    textureDescriptor.cFragment.module = fsModule;
+    textureDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&textureDescriptor);
+
+    struct Data {
+        float transform[4];
+        char padding[256 - 4 * sizeof(float)];
+        float color[4];
+    };
+    ASSERT(offsetof(Data, color) == 256);
+
+    std::vector<Data> data;
+    std::vector<wgpu::Buffer> buffers;
+    std::vector<wgpu::BindGroup> bindGroups;
+
+    data.push_back({{1.0f, 0.0f, 0.0f, 0.0f}, {0}, {0.0f, 1.0f, 0.0f, 1.0f}});
+
+    data.push_back({{0.0f, 0.0f, 0.0f, 1.0f}, {0}, {1.0f, 0.0f, 0.0f, 1.0f}});
+
+    for (int i = 0; i < 2; i++) {
+        wgpu::Buffer buffer =
+            utils::CreateBufferFromData(device, &data[i], sizeof(Data), wgpu::BufferUsage::Uniform);
+        buffers.push_back(buffer);
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, buffers[i], 0, sizeof(Data::transform)},
+                                                   {1, buffers[i], 256, sizeof(Data::color)}}));
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroups[0]);
+    pass.SetBindGroup(1, bindGroups[1]);
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 filled(255, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// This is a regression test for crbug.com/dawn/1170 that tests a module that contains multiple
+// entry points, using non-zero binding groups. This has the potential to cause problems when we
+// only remap bindings for one entry point, as the remaining unmapped binding numbers may be invalid
+// for certain backends.
+// This test passes by not asserting or crashing.
+TEST_P(BindGroupTests, MultipleEntryPointsWithMultipleNonZeroGroups) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Contents {
+            f : f32
+        }
+        @group(0) @binding(0) var <uniform> contents0: Contents;
+        @group(1) @binding(0) var <uniform> contents1: Contents;
+        @group(2) @binding(0) var <uniform> contents2: Contents;
+
+        @stage(compute) @workgroup_size(1) fn main0() {
+          var a : f32 = contents0.f;
+        }
+
+        @stage(compute) @workgroup_size(1) fn main1() {
+          var a : f32 = contents1.f;
+          var b : f32 = contents2.f;
+        }
+
+        @stage(compute) @workgroup_size(1) fn main2() {
+          var a : f32 = contents0.f;
+          var b : f32 = contents1.f;
+          var c : f32 = contents2.f;
+        })");
+
+    // main0: bind (0,0)
+    {
+        wgpu::ComputePipelineDescriptor cpDesc;
+        cpDesc.compute.module = module;
+        cpDesc.compute.entryPoint = "main0";
+        wgpu::ComputePipeline cp = device.CreateComputePipeline(&cpDesc);
+
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = sizeof(float);
+        bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
+        wgpu::Buffer buffer0 = device.CreateBuffer(&bufferDesc);
+        wgpu::BindGroup bindGroup0 =
+            utils::MakeBindGroup(device, cp.GetBindGroupLayout(0), {{0, buffer0}});
+
+        wgpu::CommandBuffer cb;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bindGroup0);
+        pass.Dispatch(1);
+        pass.End();
+        cb = encoder.Finish();
+        queue.Submit(1, &cb);
+    }
+
+    // main1: bind (1,0) and (2,0)
+    {
+        wgpu::ComputePipelineDescriptor cpDesc;
+        cpDesc.compute.module = module;
+        cpDesc.compute.entryPoint = "main1";
+        wgpu::ComputePipeline cp = device.CreateComputePipeline(&cpDesc);
+
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = sizeof(float);
+        bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
+        wgpu::Buffer buffer1 = device.CreateBuffer(&bufferDesc);
+        wgpu::Buffer buffer2 = device.CreateBuffer(&bufferDesc);
+        wgpu::BindGroup bindGroup0 = utils::MakeBindGroup(device, cp.GetBindGroupLayout(0), {});
+        wgpu::BindGroup bindGroup1 =
+            utils::MakeBindGroup(device, cp.GetBindGroupLayout(1), {{0, buffer1}});
+        wgpu::BindGroup bindGroup2 =
+            utils::MakeBindGroup(device, cp.GetBindGroupLayout(2), {{0, buffer2}});
+
+        wgpu::CommandBuffer cb;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bindGroup0);
+        pass.SetBindGroup(1, bindGroup1);
+        pass.SetBindGroup(2, bindGroup2);
+        pass.Dispatch(1);
+        pass.End();
+        cb = encoder.Finish();
+        queue.Submit(1, &cb);
+    }
+
+    // main2: bind (0,0), (1,0), and (2,0)
+    {
+        wgpu::ComputePipelineDescriptor cpDesc;
+        cpDesc.compute.module = module;
+        cpDesc.compute.entryPoint = "main2";
+        wgpu::ComputePipeline cp = device.CreateComputePipeline(&cpDesc);
+
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = sizeof(float);
+        bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
+        wgpu::Buffer buffer0 = device.CreateBuffer(&bufferDesc);
+        wgpu::Buffer buffer1 = device.CreateBuffer(&bufferDesc);
+        wgpu::Buffer buffer2 = device.CreateBuffer(&bufferDesc);
+        wgpu::BindGroup bindGroup0 =
+            utils::MakeBindGroup(device, cp.GetBindGroupLayout(0), {{0, buffer0}});
+        wgpu::BindGroup bindGroup1 =
+            utils::MakeBindGroup(device, cp.GetBindGroupLayout(1), {{0, buffer1}});
+        wgpu::BindGroup bindGroup2 =
+            utils::MakeBindGroup(device, cp.GetBindGroupLayout(2), {{0, buffer2}});
+
+        wgpu::CommandBuffer cb;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bindGroup0);
+        pass.SetBindGroup(1, bindGroup1);
+        pass.SetBindGroup(2, bindGroup2);
+        pass.Dispatch(1);
+        pass.End();
+        cb = encoder.Finish();
+        queue.Submit(1, &cb);
+    }
+}
+
+// This test reproduces an out-of-bound bug on D3D12 backends when calling draw command twice with
+// one pipeline that has 4 bind group sets in one render pass.
+TEST_P(BindGroupTests, DrawTwiceInSamePipelineWithFourBindGroupSets) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(renderPass,
+                         {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform,
+                          wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform},
+                         {layout, layout, layout, layout});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    pass.SetPipeline(pipeline);
+
+    // The color will be added 8 times, so the value should be 0.125. But we choose 0.126
+    // because of precision issues on some devices (for example NVIDIA bots).
+    std::array<float, 4> color = {0.126, 0, 0, 0.126};
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, layout, {{0, uniformBuffer, 0, sizeof(color)}});
+
+    pass.SetBindGroup(0, bindGroup);
+    pass.SetBindGroup(1, bindGroup);
+    pass.SetBindGroup(2, bindGroup);
+    pass.SetBindGroup(3, bindGroup);
+    pass.Draw(3);
+
+    pass.SetPipeline(pipeline);
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 filled(255, 0, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// Test that bind groups can be set before the pipeline.
+TEST_P(BindGroupTests, SetBindGroupBeforePipeline) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    // Create a bind group layout which uses a single uniform buffer.
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+
+    // Create a pipeline that uses the uniform bind group layout.
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(renderPass, {wgpu::BufferBindingType::Uniform}, {layout});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    // Create a bind group with a uniform buffer and fill it with RGBAunorm(1, 0, 0, 1).
+    std::array<float, 4> color = {1, 0, 0, 1};
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, layout, {{0, uniformBuffer, 0, sizeof(color)}});
+
+    // Set the bind group, then the pipeline, and draw.
+    pass.SetBindGroup(0, bindGroup);
+    pass.SetPipeline(pipeline);
+    pass.Draw(3);
+
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // The result should be red.
+    RGBA8 filled(255, 0, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// Test that dynamic bind groups can be set before the pipeline.
+TEST_P(BindGroupTests, SetDynamicBindGroupBeforePipeline) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    // Create a bind group layout which uses a single dynamic uniform buffer.
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true}});
+
+    // Create a pipeline that uses the dynamic uniform bind group layout for two bind groups.
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(
+        renderPass, {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform},
+        {layout, layout});
+
+    // Prepare data RGBAunorm(1, 0, 0, 0.5) and RGBAunorm(0, 1, 0, 0.5). They will be added in the
+    // shader.
+    std::array<float, 4> color0 = {1, 0, 0, 0.501};
+    std::array<float, 4> color1 = {0, 1, 0, 0.501};
+
+    size_t color1Offset = Align(sizeof(color0), mMinUniformBufferOffsetAlignment);
+
+    std::vector<uint8_t> data(color1Offset + sizeof(color1));
+    memcpy(data.data(), color0.data(), sizeof(color0));
+    memcpy(data.data() + color1Offset, color1.data(), sizeof(color1));
+
+    // Create a bind group and uniform buffer with the color data. It will be bound at the offset
+    // to each color.
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, data.data(), data.size(), wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, layout, {{0, uniformBuffer, 0, 4 * sizeof(float)}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    // Set the first dynamic bind group.
+    uint32_t dynamicOffset = 0;
+    pass.SetBindGroup(0, bindGroup, 1, &dynamicOffset);
+
+    // Set the second dynamic bind group.
+    dynamicOffset = color1Offset;
+    pass.SetBindGroup(1, bindGroup, 1, &dynamicOffset);
+
+    // Set the pipeline and draw.
+    pass.SetPipeline(pipeline);
+    pass.Draw(3);
+
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // The result should be RGBAunorm(1, 0, 0, 0.5) + RGBAunorm(0, 1, 0, 0.5)
+    RGBA8 filled(255, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// Test that bind groups set for one pipeline are still set when the pipeline changes.
+TEST_P(BindGroupTests, BindGroupsPersistAfterPipelineChange) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    // Create a bind group layout which uses a single dynamic uniform buffer.
+    wgpu::BindGroupLayout uniformLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true}});
+
+    // Create a bind group layout which uses a single dynamic storage buffer.
+    wgpu::BindGroupLayout storageLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage, true}});
+
+    // Create a pipeline which uses the uniform buffer and storage buffer bind groups.
+    wgpu::RenderPipeline pipeline0 = MakeTestPipeline(
+        renderPass, {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::ReadOnlyStorage},
+        {uniformLayout, storageLayout});
+
+    // Create a pipeline which uses the uniform buffer bind group twice.
+    wgpu::RenderPipeline pipeline1 = MakeTestPipeline(
+        renderPass, {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform},
+        {uniformLayout, uniformLayout});
+
+    // Prepare data RGBAunorm(1, 0, 0, 0.5) and RGBAunorm(0, 1, 0, 0.5). They will be added in the
+    // shader.
+    std::array<float, 4> color0 = {1, 0, 0, 0.5};
+    std::array<float, 4> color1 = {0, 1, 0, 0.5};
+
+    size_t color1Offset = Align(sizeof(color0), mMinUniformBufferOffsetAlignment);
+
+    std::vector<uint8_t> data(color1Offset + sizeof(color1));
+    memcpy(data.data(), color0.data(), sizeof(color0));
+    memcpy(data.data() + color1Offset, color1.data(), sizeof(color1));
+
+    // Create a bind group and uniform buffer with the color data. It will be bound at the offset
+    // to each color.
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, data.data(), data.size(), wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, uniformLayout, {{0, uniformBuffer, 0, 4 * sizeof(float)}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    // Set the first pipeline (uniform, storage).
+    pass.SetPipeline(pipeline0);
+
+    // Set the first bind group at a dynamic offset.
+    // This bind group matches the slot in the pipeline layout.
+    uint32_t dynamicOffset = 0;
+    pass.SetBindGroup(0, bindGroup, 1, &dynamicOffset);
+
+    // Set the second bind group at a dynamic offset.
+    // This bind group does not match the slot in the pipeline layout.
+    dynamicOffset = color1Offset;
+    pass.SetBindGroup(1, bindGroup, 1, &dynamicOffset);
+
+    // Set the second pipeline (uniform, uniform).
+    // Both bind groups match the pipeline.
+    // They should persist and not need to be bound again.
+    pass.SetPipeline(pipeline1);
+    pass.Draw(3);
+
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // The result should be RGBAunorm(1, 0, 0, 0.5) + RGBAunorm(0, 1, 0, 0.5)
+    RGBA8 filled(255, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// Do a successful draw. Then, change the pipeline and one bind group.
+// Draw to check that the all bind groups are set.
+TEST_P(BindGroupTests, DrawThenChangePipelineAndBindGroup) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    // Create a bind group layout which uses a single dynamic uniform buffer.
+    wgpu::BindGroupLayout uniformLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true}});
+
+    // Create a bind group layout which uses a single dynamic storage buffer.
+    wgpu::BindGroupLayout storageLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage, true}});
+
+    // Create a pipeline with pipeline layout (uniform, uniform, storage).
+    wgpu::RenderPipeline pipeline0 =
+        MakeTestPipeline(renderPass,
+                         {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform,
+                          wgpu::BufferBindingType::ReadOnlyStorage},
+                         {uniformLayout, uniformLayout, storageLayout});
+
+    // Create a pipeline with pipeline layout (uniform, storage, storage).
+    wgpu::RenderPipeline pipeline1 = MakeTestPipeline(
+        renderPass,
+        {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::ReadOnlyStorage,
+         wgpu::BufferBindingType::ReadOnlyStorage},
+        {uniformLayout, storageLayout, storageLayout});
+
+    // Prepare color data.
+    // The first draw will use { color0, color1, color2 }.
+    // The second draw will use { color0, color3, color2 }.
+    // The pipeline uses additive color and alpha blending so the result of two draws should be
+    // { 2 * color0 + color1 + 2 * color2 + color3} = RGBAunorm(1, 1, 1, 1)
+    std::array<float, 4> color0 = {0.501, 0, 0, 0};
+    std::array<float, 4> color1 = {0, 1, 0, 0};
+    std::array<float, 4> color2 = {0, 0, 0, 0.501};
+    std::array<float, 4> color3 = {0, 0, 1, 0};
+
+    size_t color1Offset = Align(sizeof(color0), mMinUniformBufferOffsetAlignment);
+    size_t color2Offset = Align(color1Offset + sizeof(color1), mMinUniformBufferOffsetAlignment);
+    size_t color3Offset = Align(color2Offset + sizeof(color2), mMinUniformBufferOffsetAlignment);
+
+    std::vector<uint8_t> data(color3Offset + sizeof(color3), 0);
+    memcpy(data.data(), color0.data(), sizeof(color0));
+    memcpy(data.data() + color1Offset, color1.data(), sizeof(color1));
+    memcpy(data.data() + color2Offset, color2.data(), sizeof(color2));
+    memcpy(data.data() + color3Offset, color3.data(), sizeof(color3));
+
+    // Create a uniform and storage buffer bind groups to bind the color data.
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, data.data(), data.size(), wgpu::BufferUsage::Uniform);
+
+    wgpu::Buffer storageBuffer =
+        utils::CreateBufferFromData(device, data.data(), data.size(), wgpu::BufferUsage::Storage);
+
+    wgpu::BindGroup uniformBindGroup =
+        utils::MakeBindGroup(device, uniformLayout, {{0, uniformBuffer, 0, 4 * sizeof(float)}});
+    wgpu::BindGroup storageBindGroup =
+        utils::MakeBindGroup(device, storageLayout, {{0, storageBuffer, 0, 4 * sizeof(float)}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    // Set the pipeline to (uniform, uniform, storage)
+    pass.SetPipeline(pipeline0);
+
+    // Set the first bind group to color0 in the dynamic uniform buffer.
+    uint32_t dynamicOffset = 0;
+    pass.SetBindGroup(0, uniformBindGroup, 1, &dynamicOffset);
+
+    // Set the first bind group to color1 in the dynamic uniform buffer.
+    dynamicOffset = color1Offset;
+    pass.SetBindGroup(1, uniformBindGroup, 1, &dynamicOffset);
+
+    // Set the first bind group to color2 in the dynamic storage buffer.
+    dynamicOffset = color2Offset;
+    pass.SetBindGroup(2, storageBindGroup, 1, &dynamicOffset);
+
+    pass.Draw(3);
+
+    // Set the pipeline to (uniform, storage, storage)
+    //  - The first bind group should persist (inherited on some backends)
+    //  - The second bind group needs to be set again to pass validation.
+    //    It changed from uniform to storage.
+    //  - The third bind group should persist. It should be set again by the backend internally.
+    pass.SetPipeline(pipeline1);
+
+    // Set the second bind group to color3 in the dynamic storage buffer.
+    dynamicOffset = color3Offset;
+    pass.SetBindGroup(1, storageBindGroup, 1, &dynamicOffset);
+
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 filled(255, 255, 255, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// Test for crbug.com/dawn/1049, where setting a pipeline without drawing can prevent
+// bind groups from being applied later
+TEST_P(BindGroupTests, DrawThenChangePipelineTwiceAndBindGroup) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    // Create a bind group layout which uses a single dynamic uniform buffer.
+    wgpu::BindGroupLayout uniformLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true}});
+
+    // Create a pipeline with pipeline layout (uniform, uniform, uniform).
+    wgpu::RenderPipeline pipeline0 =
+        MakeTestPipeline(renderPass,
+                         {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform,
+                          wgpu::BufferBindingType::Uniform},
+                         {uniformLayout, uniformLayout, uniformLayout});
+
+    // Create a pipeline with pipeline layout (uniform).
+    wgpu::RenderPipeline pipeline1 = MakeTestPipeline(
+        renderPass, {wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform},
+        {uniformLayout, uniformLayout});
+
+    // Prepare color data.
+    // The first draw will use { color0, color1, color2 }.
+    // The second draw will use { color0, color1, color3 }.
+    // The pipeline uses additive color and alpha so the result of two draws should be
+    // { 2 * color0 + 2 * color1 + color2 + color3} = RGBAunorm(1, 1, 1, 1)
+    std::array<float, 4> color0 = {0.501, 0, 0, 0};
+    std::array<float, 4> color1 = {0, 0.501, 0, 0};
+    std::array<float, 4> color2 = {0, 0, 1, 0};
+    std::array<float, 4> color3 = {0, 0, 0, 1};
+
+    size_t color0Offset = 0;
+    size_t color1Offset = Align(color0Offset + sizeof(color0), mMinUniformBufferOffsetAlignment);
+    size_t color2Offset = Align(color1Offset + sizeof(color1), mMinUniformBufferOffsetAlignment);
+    size_t color3Offset = Align(color2Offset + sizeof(color2), mMinUniformBufferOffsetAlignment);
+
+    std::vector<uint8_t> data(color3Offset + sizeof(color3), 0);
+    memcpy(data.data(), color0.data(), sizeof(color0));
+    memcpy(data.data() + color1Offset, color1.data(), sizeof(color1));
+    memcpy(data.data() + color2Offset, color2.data(), sizeof(color2));
+    memcpy(data.data() + color3Offset, color3.data(), sizeof(color3));
+
+    // Create a uniform and storage buffer bind groups to bind the color data.
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, data.data(), data.size(), wgpu::BufferUsage::Uniform);
+
+    wgpu::BindGroup uniformBindGroup =
+        utils::MakeBindGroup(device, uniformLayout, {{0, uniformBuffer, 0, 4 * sizeof(float)}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    // Set the pipeline to (uniform, uniform, uniform)
+    pass.SetPipeline(pipeline0);
+
+    // Set the first bind group to color0 in the dynamic uniform buffer.
+    uint32_t dynamicOffset = color0Offset;
+    pass.SetBindGroup(0, uniformBindGroup, 1, &dynamicOffset);
+
+    // Set the first bind group to color1 in the dynamic uniform buffer.
+    dynamicOffset = color1Offset;
+    pass.SetBindGroup(1, uniformBindGroup, 1, &dynamicOffset);
+
+    // Set the first bind group to color2 in the dynamic uniform buffer.
+    dynamicOffset = color2Offset;
+    pass.SetBindGroup(2, uniformBindGroup, 1, &dynamicOffset);
+
+    // This draw will internally apply bind groups for pipeline 0.
+    pass.Draw(3);
+
+    // When we set pipeline 1, which has no bind group at index 2 in its layout, it
+    // should not prevent bind group 2 from being used after reverting to pipeline 0.
+    // More specifically, internally the pipeline 1 layout should not be saved,
+    // because we never applied the bind groups via a Draw or Dispatch.
+    pass.SetPipeline(pipeline1);
+
+    // Set the second bind group to color3 in the dynamic uniform buffer.
+    dynamicOffset = color3Offset;
+    pass.SetBindGroup(2, uniformBindGroup, 1, &dynamicOffset);
+
+    // Revert to pipeline 0
+    pass.SetPipeline(pipeline0);
+
+    // Internally this should re-apply bind group 2. Because we already
+    // drew with this pipeline, and setting pipeline 1 did not dirty the bind groups,
+    // bind groups 0 and 1 should still be valid.
+    pass.Draw(3);
+
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 filled(255, 255, 255, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, max, max);
+}
+
+// Regression test for crbug.com/dawn/408 where dynamic offsets were applied in the wrong order.
+// Dynamic offsets should be applied in increasing order of binding number.
+TEST_P(BindGroupTests, DynamicOffsetOrder) {
+    // We will put the following values and the respective offsets into a buffer.
+    // The test will ensure that the correct dynamic offset is applied to each buffer by reading the
+    // value from an offset binding.
+    std::array<uint32_t, 3> offsets = {3 * mMinUniformBufferOffsetAlignment,
+                                       1 * mMinUniformBufferOffsetAlignment,
+                                       2 * mMinUniformBufferOffsetAlignment};
+    std::array<uint32_t, 3> values = {21, 67, 32};
+
+    // Create three buffers large enough to by offset by the largest offset.
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 3 * mMinUniformBufferOffsetAlignment + sizeof(uint32_t);
+    bufferDescriptor.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
+
+    wgpu::Buffer buffer0 = device.CreateBuffer(&bufferDescriptor);
+    wgpu::Buffer buffer3 = device.CreateBuffer(&bufferDescriptor);
+
+    // This test uses both storage and uniform buffers to ensure buffer bindings are sorted first by
+    // binding number before type.
+    bufferDescriptor.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer2 = device.CreateBuffer(&bufferDescriptor);
+
+    // Populate the values
+    queue.WriteBuffer(buffer0, offsets[0], &values[0], sizeof(uint32_t));
+    queue.WriteBuffer(buffer2, offsets[1], &values[1], sizeof(uint32_t));
+    queue.WriteBuffer(buffer3, offsets[2], &values[2], sizeof(uint32_t));
+
+    wgpu::Buffer outputBuffer = utils::CreateBufferFromData(
+        device, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Storage, {0, 0, 0});
+
+    // Create the bind group and bind group layout.
+    // Note: The order of the binding numbers are intentionally different and not in increasing
+    // order.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {
+                    {3, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true},
+                    {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true},
+                    {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, true},
+                    {4, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                });
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl,
+                                                     {
+                                                         {0, buffer0, 0, sizeof(uint32_t)},
+                                                         {3, buffer3, 0, sizeof(uint32_t)},
+                                                         {2, buffer2, 0, sizeof(uint32_t)},
+                                                         {4, outputBuffer, 0, 3 * sizeof(uint32_t)},
+                                                     });
+
+    wgpu::ComputePipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.compute.module = utils::CreateShaderModule(device, R"(
+        struct Buffer {
+            value : u32
+        }
+
+        struct OutputBuffer {
+            value : vec3<u32>
+        }
+
+        @group(0) @binding(2) var<uniform> buffer2 : Buffer;
+        @group(0) @binding(3) var<storage, read> buffer3 : Buffer;
+        @group(0) @binding(0) var<storage, read> buffer0 : Buffer;
+        @group(0) @binding(4) var<storage, read_write> outputBuffer : OutputBuffer;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            outputBuffer.value = vec3<u32>(buffer0.value, buffer2.value, buffer3.value);
+        })");
+    pipelineDescriptor.compute.entryPoint = "main";
+    pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDescriptor);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+    computePassEncoder.SetPipeline(pipeline);
+    computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.End();
+
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(values.data(), outputBuffer, 0, values.size());
+}
+
+// Test that ensures that backends do not remap bindings such that dynamic and non-dynamic bindings
+// conflict. This can happen if the backend treats dynamic bindings separately from non-dynamic
+// bindings.
+TEST_P(BindGroupTests, DynamicAndNonDynamicBindingsDoNotConflictAfterRemapping) {
+    // // TODO(crbug.com/dawn/1106): Test output is wrong on D3D12 using WARP.
+    DAWN_SUPPRESS_TEST_IF(IsWARP());
+
+    auto RunTestWith = [&](bool dynamicBufferFirst) {
+        uint32_t dynamicBufferBindingNumber = dynamicBufferFirst ? 0 : 1;
+        uint32_t bufferBindingNumber = dynamicBufferFirst ? 1 : 0;
+
+        std::array<uint32_t, 1> offsets{mMinUniformBufferOffsetAlignment};
+        std::array<uint32_t, 2> values = {21, 67};
+
+        // Create three buffers large enough to by offset by the largest offset.
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = 2 * mMinUniformBufferOffsetAlignment + sizeof(uint32_t);
+        bufferDescriptor.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+
+        wgpu::Buffer dynamicBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+        // Populate the values
+        queue.WriteBuffer(dynamicBuffer, mMinUniformBufferOffsetAlignment,
+                          &values[dynamicBufferBindingNumber], sizeof(uint32_t));
+        queue.WriteBuffer(buffer, 0, &values[bufferBindingNumber], sizeof(uint32_t));
+
+        wgpu::Buffer outputBuffer = utils::CreateBufferFromData(
+            device, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Storage, {0, 0});
+
+        // Create a bind group layout which uses a single dynamic uniform buffer.
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {
+                {dynamicBufferBindingNumber, wgpu::ShaderStage::Compute,
+                 wgpu::BufferBindingType::Uniform, true},
+                {bufferBindingNumber, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+            });
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, bgl,
+            {
+                {dynamicBufferBindingNumber, dynamicBuffer, 0, sizeof(uint32_t)},
+                {bufferBindingNumber, buffer, 0, sizeof(uint32_t)},
+                {2, outputBuffer, 0, 2 * sizeof(uint32_t)},
+            });
+
+        wgpu::ComputePipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.compute.module = utils::CreateShaderModule(device, R"(
+        struct Buffer {
+            value : u32
+        }
+
+        struct OutputBuffer {
+            value : vec2<u32>
+        }
+
+        @group(0) @binding(0) var<uniform> buffer0 : Buffer;
+        @group(0) @binding(1) var<uniform> buffer1 : Buffer;
+        @group(0) @binding(2) var<storage, read_write> outputBuffer : OutputBuffer;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            outputBuffer.value = vec2<u32>(buffer0.value, buffer1.value);
+        })");
+        pipelineDescriptor.compute.entryPoint = "main";
+        pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDescriptor);
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetPipeline(pipeline);
+        computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
+        computePassEncoder.Dispatch(1);
+        computePassEncoder.End();
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER_U32_RANGE_EQ(values.data(), outputBuffer, 0, values.size());
+    };
+
+    // Run the test with the dynamic buffer in index 0 and with the non-dynamic buffer in index 1,
+    // and vice versa. This should cause a conflict at index 0, if the binding remapping is too
+    // aggressive.
+    RunTestWith(true);
+    RunTestWith(false);
+}
+
+// Test that visibility of bindings in BindGroupLayout can be none
+// This test passes by not asserting or crashing.
+TEST_P(BindGroupTests, BindGroupLayoutVisibilityCanBeNone) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::BindGroupLayoutEntry entry;
+    entry.binding = 0;
+    entry.visibility = wgpu::ShaderStage::None;
+    entry.buffer.type = wgpu::BufferBindingType::Uniform;
+    wgpu::BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = 1;
+    descriptor.entries = &entry;
+    wgpu::BindGroupLayout layout = device.CreateBindGroupLayout(&descriptor);
+
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(renderPass, {}, {layout});
+
+    std::array<float, 4> color = {1, 0, 0, 1};
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, layout, {{0, uniformBuffer, 0, sizeof(color)}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+}
+
+// Regression test for crbug.com/dawn/448 that dynamic buffer bindings can have None visibility.
+TEST_P(BindGroupTests, DynamicBindingNoneVisibility) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::BindGroupLayoutEntry entry;
+    entry.binding = 0;
+    entry.visibility = wgpu::ShaderStage::None;
+    entry.buffer.type = wgpu::BufferBindingType::Uniform;
+    entry.buffer.hasDynamicOffset = true;
+    wgpu::BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = 1;
+    descriptor.entries = &entry;
+    wgpu::BindGroupLayout layout = device.CreateBindGroupLayout(&descriptor);
+
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(renderPass, {}, {layout});
+
+    std::array<float, 4> color = {1, 0, 0, 1};
+    wgpu::Buffer uniformBuffer =
+        utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, layout, {{0, uniformBuffer, 0, sizeof(color)}});
+
+    uint32_t dynamicOffset = 0;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup, 1, &dynamicOffset);
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+}
+
+// Test that bind group bindings may have unbounded and arbitrary binding numbers
+TEST_P(BindGroupTests, ArbitraryBindingNumbers) {
+    // TODO(crbug.com/dawn/736): Test output is wrong with D3D12 + WARP.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>(-1.0, 1.0),
+                vec2<f32>( 1.0, 1.0),
+                vec2<f32>(-1.0, -1.0));
+
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct Ubo {
+            color : vec4<f32>
+        }
+
+        @group(0) @binding(953) var <uniform> ubo1 : Ubo;
+        @group(0) @binding(47) var <uniform> ubo2 : Ubo;
+        @group(0) @binding(111) var <uniform> ubo3 : Ubo;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return ubo1.color + 2.0 * ubo2.color + 4.0 * ubo3.color;
+        })");
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.cFragment.module = fsModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    wgpu::Buffer black =
+        utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {0.f, 0.f, 0.f, 0.f});
+    wgpu::Buffer red =
+        utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {0.251f, 0.0f, 0.0f, 0.0f});
+    wgpu::Buffer green =
+        utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {0.0f, 0.251f, 0.0f, 0.0f});
+    wgpu::Buffer blue =
+        utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {0.0f, 0.0f, 0.251f, 0.0f});
+
+    auto DoTest = [&](wgpu::Buffer color1, wgpu::Buffer color2, wgpu::Buffer color3, RGBA8 filled) {
+        auto DoTestInner = [&](wgpu::BindGroup bindGroup) {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(3);
+            pass.End();
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            queue.Submit(1, &commands);
+
+            EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 1);
+        };
+
+        utils::BindingInitializationHelper bindings[] = {
+            {953, color1, 0, 4 * sizeof(float)},  //
+            {47, color2, 0, 4 * sizeof(float)},   //
+            {111, color3, 0, 4 * sizeof(float)},  //
+        };
+
+        // Should work regardless of what order the bindings are specified in.
+        DoTestInner(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                         {bindings[0], bindings[1], bindings[2]}));
+        DoTestInner(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                         {bindings[1], bindings[0], bindings[2]}));
+        DoTestInner(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                         {bindings[2], bindings[0], bindings[1]}));
+    };
+
+    // first color is normal, second is 2x, third is 3x.
+    DoTest(black, black, black, RGBA8(0, 0, 0, 0));
+
+    // Check the first binding maps to the first slot. We know this because the colors are
+    // multiplied 1x.
+    DoTest(red, black, black, RGBA8(64, 0, 0, 0));
+    DoTest(green, black, black, RGBA8(0, 64, 0, 0));
+    DoTest(blue, black, black, RGBA8(0, 0, 64, 0));
+
+    // Use multiple bindings and check the second color maps to the second slot.
+    // We know this because the second slot is multiplied 2x.
+    DoTest(green, blue, black, RGBA8(0, 64, 128, 0));
+    DoTest(blue, green, black, RGBA8(0, 128, 64, 0));
+    DoTest(red, green, black, RGBA8(64, 128, 0, 0));
+
+    // Use multiple bindings and check the third color maps to the third slot.
+    // We know this because the third slot is multiplied 4x.
+    DoTest(black, blue, red, RGBA8(255, 0, 128, 0));
+    DoTest(blue, black, green, RGBA8(0, 255, 64, 0));
+    DoTest(red, black, blue, RGBA8(64, 0, 255, 0));
+}
+
+// This is a regression test for crbug.com/dawn/355 which tests that destruction of a bind group
+// that holds the last reference to its bind group layout does not result in a use-after-free. In
+// the bug, the destructor of BindGroupBase, when destroying member mLayout,
+// Ref<BindGroupLayoutBase> assigns to Ref::mPointee, AFTER calling Release(). After the BGL is
+// destroyed, the storage for |mPointee| has been freed.
+TEST_P(BindGroupTests, LastReferenceToBindGroupLayout) {
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = sizeof(float);
+    bufferDesc.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    wgpu::BindGroup bg;
+    {
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+        bg = utils::MakeBindGroup(device, bgl, {{0, buffer, 0, sizeof(float)}});
+    }
+}
+
+// Test that bind groups with an empty bind group layout may be created and used.
+TEST_P(BindGroupTests, EmptyLayout) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(device, {});
+    wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {});
+
+    wgpu::ComputePipelineDescriptor pipelineDesc;
+    pipelineDesc.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+    pipelineDesc.compute.entryPoint = "main";
+    pipelineDesc.compute.module = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1) fn main() {
+        })");
+
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bg);
+    pass.Dispatch(1);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+}
+
+// Test creating a BGL with a storage buffer binding but declared readonly in the shader works.
+// This is a regression test for crbug.com/dawn/410 which tests that it can successfully compile and
+// execute the shader.
+TEST_P(BindGroupTests, ReadonlyStorage) {
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+    pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 3>(
+                vec2<f32>(-1.0, 1.0),
+                vec2<f32>( 1.0, 1.0),
+                vec2<f32>(-1.0, -1.0));
+
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        })");
+
+    pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        struct Buffer0 {
+            color : vec4<f32>
+        }
+        @group(0) @binding(0) var<storage, read> buffer0 : Buffer0;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return buffer0.color;
+        })");
+
+    constexpr uint32_t kRTSize = 4;
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+
+    pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    std::array<float, 4> greenColor = {0, 1, 0, 1};
+    wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
+        device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Storage);
+
+    pass.SetPipeline(renderPipeline);
+    pass.SetBindGroup(0, utils::MakeBindGroup(device, bgl, {{0, storageBuffer}}));
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+}
+
+// Test that creating a large bind group, with each binding type at the max count, works and can be
+// used correctly. The test loads a different value from each binding, and writes 1 to a storage
+// buffer if all values are correct.
+TEST_P(BindGroupTests, ReallyLargeBindGroup) {
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    std::ostringstream interface;
+    std::ostringstream body;
+    uint32_t binding = 0;
+    uint32_t expectedValue = 42;
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+    auto CreateTextureWithRedData = [&](wgpu::TextureFormat format, uint32_t value,
+                                        wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.usage = wgpu::TextureUsage::CopyDst | usage;
+        textureDesc.size = {1, 1, 1};
+        textureDesc.format = format;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        if (format == wgpu::TextureFormat::R8Unorm) {
+            ASSERT(expectedValue < 255u);
+        }
+        wgpu::Buffer textureData =
+            utils::CreateBufferFromData(device, wgpu::BufferUsage::CopySrc, {value});
+
+        wgpu::ImageCopyBuffer imageCopyBuffer = {};
+        imageCopyBuffer.buffer = textureData;
+        imageCopyBuffer.layout.bytesPerRow = 256;
+
+        wgpu::ImageCopyTexture imageCopyTexture = {};
+        imageCopyTexture.texture = texture;
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        commandEncoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+        return texture;
+    };
+
+    std::vector<wgpu::BindGroupEntry> bgEntries;
+    static_assert(kMaxSampledTexturesPerShaderStage == kMaxSamplersPerShaderStage,
+                  "Please update this test");
+    for (uint32_t i = 0; i < kMaxSampledTexturesPerShaderStage; ++i) {
+        wgpu::Texture texture = CreateTextureWithRedData(
+            wgpu::TextureFormat::R8Unorm, expectedValue, wgpu::TextureUsage::TextureBinding);
+        bgEntries.push_back({nullptr, binding, nullptr, 0, 0, nullptr, texture.CreateView()});
+
+        interface << "@group(0) @binding(" << binding++ << ") "
+                  << "var tex" << i << " : texture_2d<f32>;\n";
+
+        bgEntries.push_back({nullptr, binding, nullptr, 0, 0, device.CreateSampler(), nullptr});
+
+        interface << "@group(0) @binding(" << binding++ << ")"
+                  << "var samp" << i << " : sampler;\n";
+
+        body << "if (abs(textureSampleLevel(tex" << i << ", samp" << i
+             << ", vec2<f32>(0.5, 0.5), 0.0).r - " << expectedValue++
+             << ".0 / 255.0) > 0.0001) {\n";
+        body << "    return;\n";
+        body << "}\n";
+    }
+    for (uint32_t i = 0; i < kMaxStorageTexturesPerShaderStage; ++i) {
+        wgpu::Texture texture = CreateTextureWithRedData(
+            wgpu::TextureFormat::R32Uint, expectedValue, wgpu::TextureUsage::StorageBinding);
+        bgEntries.push_back({nullptr, binding, nullptr, 0, 0, nullptr, texture.CreateView()});
+
+        interface << "@group(0) @binding(" << binding++ << ") "
+                  << "var image" << i << " : texture_storage_2d<r32uint, write>;\n";
+
+        body << "_ = image" << i << ";";
+    }
+
+    for (uint32_t i = 0; i < kMaxUniformBuffersPerShaderStage; ++i) {
+        wgpu::Buffer buffer = utils::CreateBufferFromData<uint32_t>(
+            device, wgpu::BufferUsage::Uniform, {expectedValue, 0, 0, 0});
+        bgEntries.push_back({nullptr, binding, buffer, 0, 4 * sizeof(uint32_t), nullptr, nullptr});
+
+        interface << "struct UniformBuffer" << i << R"({
+                value : u32
+            }
+        )";
+        interface << "@group(0) @binding(" << binding++ << ") "
+                  << "var<uniform> ubuf" << i << " : UniformBuffer" << i << ";\n";
+
+        body << "if (ubuf" << i << ".value != " << expectedValue++ << "u) {\n";
+        body << "    return;\n";
+        body << "}\n";
+    }
+    // Save one storage buffer for writing the result
+    for (uint32_t i = 0; i < kMaxStorageBuffersPerShaderStage - 1; ++i) {
+        wgpu::Buffer buffer = utils::CreateBufferFromData<uint32_t>(
+            device, wgpu::BufferUsage::Storage, {expectedValue});
+        bgEntries.push_back({nullptr, binding, buffer, 0, sizeof(uint32_t), nullptr, nullptr});
+
+        interface << "struct ReadOnlyStorageBuffer" << i << R"({
+                value : u32
+            }
+        )";
+        interface << "@group(0) @binding(" << binding++ << ") "
+                  << "var<storage, read> sbuf" << i << " : ReadOnlyStorageBuffer" << i << ";\n";
+
+        body << "if (sbuf" << i << ".value != " << expectedValue++ << "u) {\n";
+        body << "    return;\n";
+        body << "}\n";
+    }
+
+    wgpu::Buffer result = utils::CreateBufferFromData<uint32_t>(
+        device, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc, {0});
+    bgEntries.push_back({nullptr, binding, result, 0, sizeof(uint32_t), nullptr, nullptr});
+
+    interface << R"(struct ReadWriteStorageBuffer{
+            value : u32
+        }
+    )";
+    interface << "@group(0) @binding(" << binding++ << ") "
+              << "var<storage, read_write> result : ReadWriteStorageBuffer;\n";
+
+    body << "result.value = 1u;\n";
+
+    std::string shader =
+        interface.str() + "@stage(compute) @workgroup_size(1) fn main() {\n" + body.str() + "}\n";
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.compute.module = utils::CreateShaderModule(device, shader.c_str());
+    cpDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline cp = device.CreateComputePipeline(&cpDesc);
+
+    wgpu::BindGroupDescriptor bgDesc = {};
+    bgDesc.layout = cp.GetBindGroupLayout(0);
+    bgDesc.entryCount = static_cast<uint32_t>(bgEntries.size());
+    bgDesc.entries = bgEntries.data();
+
+    wgpu::BindGroup bg = device.CreateBindGroup(&bgDesc);
+
+    wgpu::ComputePassEncoder pass = commandEncoder.BeginComputePass();
+    pass.SetPipeline(cp);
+    pass.SetBindGroup(0, bg);
+    pass.Dispatch(1, 1, 1);
+    pass.End();
+
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_EQ(1, result, 0);
+}
+
+// This is a regression test for crbug.com/dawn/319 where creating a bind group with a
+// destroyed resource would crash the backend.
+TEST_P(BindGroupTests, CreateWithDestroyedResource) {
+    auto doBufferTest = [&](wgpu::BufferBindingType bindingType, wgpu::BufferUsage usage) {
+        wgpu::BindGroupLayout bgl =
+            utils::MakeBindGroupLayout(device, {{0, wgpu::ShaderStage::Fragment, bindingType}});
+
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = sizeof(float);
+        bufferDesc.usage = usage;
+        wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+        buffer.Destroy();
+
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer, 0, sizeof(float)}});
+    };
+
+    // Test various usages and binding types since they take different backend code paths.
+    doBufferTest(wgpu::BufferBindingType::Uniform, wgpu::BufferUsage::Uniform);
+    doBufferTest(wgpu::BufferBindingType::Storage, wgpu::BufferUsage::Storage);
+    doBufferTest(wgpu::BufferBindingType::ReadOnlyStorage, wgpu::BufferUsage::Storage);
+
+    // Test a sampled texture.
+    {
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+
+        wgpu::TextureDescriptor textureDesc;
+        textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+        textureDesc.size = {1, 1, 1};
+        textureDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+
+        // Create view, then destroy.
+        {
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            wgpu::TextureView textureView = texture.CreateView();
+
+            texture.Destroy();
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, textureView}});
+        }
+        // Destroy, then create view.
+        {
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            texture.Destroy();
+            wgpu::TextureView textureView = texture.CreateView();
+
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, textureView}});
+        }
+    }
+
+    // Test a storage texture.
+    {
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly,
+                      wgpu::TextureFormat::R32Uint}});
+
+        wgpu::TextureDescriptor textureDesc;
+        textureDesc.usage = wgpu::TextureUsage::StorageBinding;
+        textureDesc.size = {1, 1, 1};
+        textureDesc.format = wgpu::TextureFormat::R32Uint;
+
+        // Create view, then destroy.
+        {
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            wgpu::TextureView textureView = texture.CreateView();
+
+            texture.Destroy();
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, textureView}});
+        }
+        // Destroy, then create view.
+        {
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            texture.Destroy();
+            wgpu::TextureView textureView = texture.CreateView();
+
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, textureView}});
+        }
+    }
+}
+
+DAWN_INSTANTIATE_TEST(BindGroupTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/BufferTests.cpp b/src/dawn/tests/end2end/BufferTests.cpp
new file mode 100644
index 0000000..0d41e22
--- /dev/null
+++ b/src/dawn/tests/end2end/BufferTests.cpp
@@ -0,0 +1,942 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include <array>
+#include <cstring>
+
+class BufferMappingTests : public DawnTest {
+  protected:
+    void MapAsyncAndWait(const wgpu::Buffer& buffer,
+                         wgpu::MapMode mode,
+                         size_t offset,
+                         size_t size) {
+        bool done = false;
+        buffer.MapAsync(
+            mode, offset, size,
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+                *static_cast<bool*>(userdata) = true;
+            },
+            &done);
+
+        while (!done) {
+            WaitABit();
+        }
+    }
+
+    wgpu::Buffer CreateMapReadBuffer(uint64_t size) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+        return device.CreateBuffer(&descriptor);
+    }
+
+    wgpu::Buffer CreateMapWriteBuffer(uint64_t size) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+        return device.CreateBuffer(&descriptor);
+    }
+};
+
+void CheckMapping(const void* actual, const void* expected, size_t size) {
+    EXPECT_NE(actual, nullptr);
+    if (actual != nullptr) {
+        EXPECT_EQ(0, memcmp(actual, expected, size));
+    }
+}
+
+// Test that the simplest map read works
+TEST_P(BufferMappingTests, MapRead_Basic) {
+    wgpu::Buffer buffer = CreateMapReadBuffer(4);
+
+    uint32_t myData = 0x01020304;
+    constexpr size_t kSize = sizeof(myData);
+    queue.WriteBuffer(buffer, 0, &myData, kSize);
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 4);
+    CheckMapping(buffer.GetConstMappedRange(), &myData, kSize);
+    CheckMapping(buffer.GetConstMappedRange(0, kSize), &myData, kSize);
+    buffer.Unmap();
+}
+
+// Test map-reading a zero-sized buffer.
+TEST_P(BufferMappingTests, MapRead_ZeroSized) {
+    wgpu::Buffer buffer = CreateMapReadBuffer(0);
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, wgpu::kWholeMapSize);
+    ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+    buffer.Unmap();
+}
+
+// Test map-reading with a non-zero offset
+TEST_P(BufferMappingTests, MapRead_NonZeroOffset) {
+    wgpu::Buffer buffer = CreateMapReadBuffer(12);
+
+    uint32_t myData[3] = {0x01020304, 0x05060708, 0x090A0B0C};
+    queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 8, 4);
+    ASSERT_EQ(myData[2], *static_cast<const uint32_t*>(buffer.GetConstMappedRange(8)));
+    buffer.Unmap();
+}
+
+// Map read and unmap twice. Test that both of these two iterations work.
+TEST_P(BufferMappingTests, MapRead_Twice) {
+    wgpu::Buffer buffer = CreateMapReadBuffer(4);
+
+    uint32_t myData = 0x01020304;
+    queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 4);
+    ASSERT_EQ(myData, *static_cast<const uint32_t*>(buffer.GetConstMappedRange()));
+    buffer.Unmap();
+
+    myData = 0x05060708;
+    queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 4);
+    ASSERT_EQ(myData, *static_cast<const uint32_t*>(buffer.GetConstMappedRange()));
+    buffer.Unmap();
+}
+
+// Map read and test multiple get mapped range data
+TEST_P(BufferMappingTests, MapRead_MultipleMappedRange) {
+    wgpu::Buffer buffer = CreateMapReadBuffer(12);
+
+    uint32_t myData[] = {0x00010203, 0x04050607, 0x08090a0b};
+    queue.WriteBuffer(buffer, 0, &myData, 12);
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 12);
+    ASSERT_EQ(myData[0], *static_cast<const uint32_t*>(buffer.GetConstMappedRange(0)));
+    ASSERT_EQ(myData[1], *(static_cast<const uint32_t*>(buffer.GetConstMappedRange(0)) + 1));
+    ASSERT_EQ(myData[2], *(static_cast<const uint32_t*>(buffer.GetConstMappedRange(0)) + 2));
+    ASSERT_EQ(myData[2], *static_cast<const uint32_t*>(buffer.GetConstMappedRange(8)));
+    buffer.Unmap();
+}
+
+// Test map-reading a large buffer.
+TEST_P(BufferMappingTests, MapRead_Large) {
+    constexpr uint32_t kDataSize = 1000 * 1000;
+    constexpr size_t kByteSize = kDataSize * sizeof(uint32_t);
+    wgpu::Buffer buffer = CreateMapReadBuffer(kByteSize);
+
+    std::vector<uint32_t> myData;
+    for (uint32_t i = 0; i < kDataSize; ++i) {
+        myData.push_back(i);
+    }
+    queue.WriteBuffer(buffer, 0, myData.data(), kByteSize);
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, kByteSize);
+    EXPECT_EQ(nullptr, buffer.GetConstMappedRange(0, kByteSize + 4));
+    EXPECT_EQ(0, memcmp(buffer.GetConstMappedRange(), myData.data(), kByteSize));
+    EXPECT_EQ(0, memcmp(buffer.GetConstMappedRange(8), myData.data() + 2, kByteSize - 8));
+    EXPECT_EQ(
+        0, memcmp(buffer.GetConstMappedRange(8, kByteSize - 8), myData.data() + 2, kByteSize - 8));
+    buffer.Unmap();
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Read, 16, kByteSize - 16);
+    // Size is too big.
+    EXPECT_EQ(nullptr, buffer.GetConstMappedRange(16, kByteSize - 12));
+    // Offset defaults to 0 which is less than 16
+    EXPECT_EQ(nullptr, buffer.GetConstMappedRange());
+    // Offset less than 8 is less than 16
+    EXPECT_EQ(nullptr, buffer.GetConstMappedRange(8));
+
+    // Test a couple values.
+    EXPECT_EQ(0, memcmp(buffer.GetConstMappedRange(16), myData.data() + 4, kByteSize - 16));
+    EXPECT_EQ(0, memcmp(buffer.GetConstMappedRange(24), myData.data() + 6, kByteSize - 24));
+
+    buffer.Unmap();
+}
+
+// Test that GetConstMappedRange works inside map-read callback
+TEST_P(BufferMappingTests, MapRead_InCallback) {
+    constexpr size_t kBufferSize = 12;
+    wgpu::Buffer buffer = CreateMapReadBuffer(kBufferSize);
+
+    uint32_t myData[3] = {0x01020304, 0x05060708, 0x090A0B0C};
+    static constexpr size_t kSize = sizeof(myData);
+    queue.WriteBuffer(buffer, 0, &myData, kSize);
+
+    struct UserData {
+        bool done;
+        wgpu::Buffer buffer;
+        void* expected;
+    };
+    UserData user{false, buffer, &myData};
+
+    buffer.MapAsync(
+        wgpu::MapMode::Read, 0, kBufferSize,
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            UserData* user = static_cast<UserData*>(userdata);
+
+            EXPECT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+            if (status == WGPUBufferMapAsyncStatus_Success) {
+                CheckMapping(user->buffer.GetConstMappedRange(), user->expected, kSize);
+                CheckMapping(user->buffer.GetConstMappedRange(0, kSize), user->expected, kSize);
+
+                CheckMapping(user->buffer.GetConstMappedRange(8, 4),
+                             static_cast<const uint32_t*>(user->expected) + 2, sizeof(uint32_t));
+
+                user->buffer.Unmap();
+            }
+            user->done = true;
+        },
+        &user);
+
+    while (!user.done) {
+        WaitABit();
+    }
+}
+
+// Test that the simplest map write works.
+TEST_P(BufferMappingTests, MapWrite_Basic) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+
+    uint32_t myData = 2934875;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
+    ASSERT_NE(nullptr, buffer.GetMappedRange());
+    ASSERT_NE(nullptr, buffer.GetConstMappedRange());
+    memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test that the simplest map write works with a range.
+TEST_P(BufferMappingTests, MapWrite_BasicRange) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+
+    uint32_t myData = 2934875;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
+    ASSERT_NE(nullptr, buffer.GetMappedRange(0, 4));
+    ASSERT_NE(nullptr, buffer.GetConstMappedRange(0, 4));
+    memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test map-writing a zero-sized buffer.
+TEST_P(BufferMappingTests, MapWrite_ZeroSized) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(0);
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, wgpu::kWholeMapSize);
+    ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+    ASSERT_NE(buffer.GetMappedRange(), nullptr);
+    buffer.Unmap();
+}
+
+// Test map-writing with a non-zero offset.
+TEST_P(BufferMappingTests, MapWrite_NonZeroOffset) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+
+    uint32_t myData = 2934875;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 8, 4);
+    memcpy(buffer.GetMappedRange(8), &myData, sizeof(myData));
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 8);
+}
+
+// Map, write and unmap twice. Test that both of these two iterations work.
+TEST_P(BufferMappingTests, MapWrite_Twice) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+
+    uint32_t myData = 2934875;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
+    memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+
+    myData = 9999999;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
+    memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Map write and unmap twice with different ranges and make sure the first write is preserved
+TEST_P(BufferMappingTests, MapWrite_TwicePreserve) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+
+    uint32_t data1 = 0x08090a0b;
+    size_t offset1 = 8;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, offset1, sizeof(data1));
+    memcpy(buffer.GetMappedRange(offset1), &data1, sizeof(data1));
+    buffer.Unmap();
+
+    uint32_t data2 = 0x00010203;
+    size_t offset2 = 0;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, offset2, sizeof(data2));
+    memcpy(buffer.GetMappedRange(offset2), &data2, sizeof(data2));
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(data1, buffer, offset1);
+    EXPECT_BUFFER_U32_EQ(data2, buffer, offset2);
+}
+
+// Map write and unmap twice with overlapping ranges and make sure data is updated correctly
+TEST_P(BufferMappingTests, MapWrite_TwiceRangeOverlap) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(16);
+
+    uint32_t data1[] = {0x01234567, 0x89abcdef};
+    size_t offset1 = 8;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, offset1, 8);
+    memcpy(buffer.GetMappedRange(offset1), data1, 8);
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(0x00000000, buffer, 0);
+    EXPECT_BUFFER_U32_EQ(0x00000000, buffer, 4);
+    EXPECT_BUFFER_U32_EQ(0x01234567, buffer, 8);
+    EXPECT_BUFFER_U32_EQ(0x89abcdef, buffer, 12);
+
+    uint32_t data2[] = {0x01234567, 0x89abcdef, 0x55555555};
+    size_t offset2 = 0;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, offset2, 12);
+    memcpy(buffer.GetMappedRange(offset2), data2, 12);
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(0x01234567, buffer, 0);
+    EXPECT_BUFFER_U32_EQ(0x89abcdef, buffer, 4);
+    EXPECT_BUFFER_U32_EQ(0x55555555, buffer, 8);
+    EXPECT_BUFFER_U32_EQ(0x89abcdef, buffer, 12);
+}
+
+// Map write and test multiple mapped range data get updated correctly
+TEST_P(BufferMappingTests, MapWrite_MultipleMappedRange) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+
+    uint32_t data1 = 0x08090a0b;
+    size_t offset1 = 8;
+    uint32_t data2 = 0x00010203;
+    size_t offset2 = 0;
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 12);
+    memcpy(buffer.GetMappedRange(offset1), &data1, sizeof(data1));
+    memcpy(buffer.GetMappedRange(offset2), &data2, sizeof(data2));
+    buffer.Unmap();
+
+    EXPECT_BUFFER_U32_EQ(data1, buffer, offset1);
+    EXPECT_BUFFER_U32_EQ(data2, buffer, offset2);
+}
+
+// Test mapping a large buffer.
+TEST_P(BufferMappingTests, MapWrite_Large) {
+    constexpr uint32_t kDataSize = 1000 * 1000;
+    constexpr size_t kByteSize = kDataSize * sizeof(uint32_t);
+    wgpu::Buffer buffer = CreateMapWriteBuffer(kDataSize * sizeof(uint32_t));
+
+    std::vector<uint32_t> myData;
+    for (uint32_t i = 0; i < kDataSize; ++i) {
+        myData.push_back(i);
+    }
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 16, kByteSize - 20);
+    EXPECT_EQ(nullptr, buffer.GetMappedRange());
+    EXPECT_EQ(nullptr, buffer.GetMappedRange(0));
+    EXPECT_EQ(nullptr, buffer.GetMappedRange(8));
+    EXPECT_EQ(nullptr, buffer.GetMappedRange(16, kByteSize - 8));
+    memcpy(buffer.GetMappedRange(16), myData.data(), kByteSize - 20);
+    buffer.Unmap();
+    EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), buffer, 16, kDataSize - 5);
+}
+
+// Stress test mapping many buffers.
+TEST_P(BufferMappingTests, MapWrite_ManySimultaneous) {
+    constexpr uint32_t kDataSize = 1000;
+    std::vector<uint32_t> myData;
+    for (uint32_t i = 0; i < kDataSize; ++i) {
+        myData.push_back(i);
+    }
+
+    constexpr uint32_t kBuffers = 100;
+    std::array<wgpu::Buffer, kBuffers> buffers;
+    uint32_t mapCompletedCount = 0;
+
+    // Create buffers and request mapping them.
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = static_cast<uint32_t>(kDataSize * sizeof(uint32_t));
+    descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    for (uint32_t i = 0; i < kBuffers; ++i) {
+        buffers[i] = device.CreateBuffer(&descriptor);
+
+        buffers[i].MapAsync(
+            wgpu::MapMode::Write, 0, descriptor.size,
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+                (*static_cast<uint32_t*>(userdata))++;
+            },
+            &mapCompletedCount);
+    }
+
+    // Wait for all mappings to complete
+    while (mapCompletedCount != kBuffers) {
+        WaitABit();
+    }
+
+    // All buffers are mapped, write into them and unmap them all.
+    for (uint32_t i = 0; i < kBuffers; ++i) {
+        memcpy(buffers[i].GetMappedRange(0, descriptor.size), myData.data(), descriptor.size);
+        buffers[i].Unmap();
+    }
+
+    // Check the content of the buffers.
+    for (uint32_t i = 0; i < kBuffers; ++i) {
+        EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), buffers[i], 0, kDataSize);
+    }
+}
+
+// Test that the map offset isn't updated when the call is an error.
+TEST_P(BufferMappingTests, OffsetNotUpdatedOnError) {
+    uint32_t data[3] = {0xCA7, 0xB0A7, 0xBA7};
+    wgpu::Buffer buffer = CreateMapReadBuffer(sizeof(data));
+    queue.WriteBuffer(buffer, 0, data, sizeof(data));
+
+    // Map the buffer but do not wait on the result yet.
+    bool done1 = false;
+    bool done2 = false;
+    buffer.MapAsync(
+        wgpu::MapMode::Read, 8, 4,
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+            *static_cast<bool*>(userdata) = true;
+        },
+        &done1);
+
+    // Call MapAsync another time, it is an error because the buffer is already being mapped so
+    // mMapOffset is not updated.
+    ASSERT_DEVICE_ERROR(buffer.MapAsync(
+        wgpu::MapMode::Read, 0, 4,
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            *static_cast<bool*>(userdata) = true;
+        },
+        &done2));
+
+    while (!done1 || !done2) {
+        WaitABit();
+    }
+
+    // mMapOffset has not been updated so it should still be 4, which is data[1]
+    ASSERT_EQ(0, memcmp(buffer.GetConstMappedRange(8), &data[2], sizeof(uint32_t)));
+}
+
+// Test that Get(Const)MappedRange work inside map-write callback.
+TEST_P(BufferMappingTests, MapWrite_InCallbackDefault) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+
+    static constexpr uint32_t myData = 2934875;
+    static constexpr size_t kSize = sizeof(myData);
+
+    struct UserData {
+        bool done;
+        wgpu::Buffer buffer;
+    };
+    UserData user{false, buffer};
+
+    buffer.MapAsync(
+        wgpu::MapMode::Write, 0, kSize,
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            UserData* user = static_cast<UserData*>(userdata);
+
+            EXPECT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+            if (status == WGPUBufferMapAsyncStatus_Success) {
+                EXPECT_NE(nullptr, user->buffer.GetConstMappedRange());
+                void* ptr = user->buffer.GetMappedRange();
+                EXPECT_NE(nullptr, ptr);
+                if (ptr != nullptr) {
+                    uint32_t data = myData;
+                    memcpy(ptr, &data, kSize);
+                }
+
+                user->buffer.Unmap();
+            }
+            user->done = true;
+        },
+        &user);
+
+    while (!user.done) {
+        WaitABit();
+    }
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test that Get(Const)MappedRange with range work inside map-write callback.
+TEST_P(BufferMappingTests, MapWrite_InCallbackRange) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+
+    static constexpr uint32_t myData = 2934875;
+    static constexpr size_t kSize = sizeof(myData);
+
+    struct UserData {
+        bool done;
+        wgpu::Buffer buffer;
+    };
+    UserData user{false, buffer};
+
+    buffer.MapAsync(
+        wgpu::MapMode::Write, 0, kSize,
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            UserData* user = static_cast<UserData*>(userdata);
+
+            EXPECT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+            if (status == WGPUBufferMapAsyncStatus_Success) {
+                EXPECT_NE(nullptr, user->buffer.GetConstMappedRange(0, kSize));
+                void* ptr = user->buffer.GetMappedRange(0, kSize);
+                EXPECT_NE(nullptr, ptr);
+                if (ptr != nullptr) {
+                    uint32_t data = myData;
+                    memcpy(ptr, &data, kSize);
+                }
+
+                user->buffer.Unmap();
+            }
+            user->done = true;
+        },
+        &user);
+
+    while (!user.done) {
+        WaitABit();
+    }
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Regression test for crbug.com/dawn/969 where this test
+// produced invalid barriers.
+TEST_P(BufferMappingTests, MapWrite_ZeroSizedTwice) {
+    wgpu::Buffer buffer = CreateMapWriteBuffer(0);
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, wgpu::kWholeMapSize);
+    buffer.Unmap();
+
+    MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, wgpu::kWholeMapSize);
+}
+
+DAWN_INSTANTIATE_TEST(BufferMappingTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class BufferMappedAtCreationTests : public DawnTest {
+  protected:
+    static void MapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+        EXPECT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+        *static_cast<bool*>(userdata) = true;
+    }
+
+    const void* MapAsyncAndWait(const wgpu::Buffer& buffer, wgpu::MapMode mode, size_t size) {
+        bool done = false;
+        buffer.MapAsync(mode, 0, size, MapCallback, &done);
+
+        while (!done) {
+            WaitABit();
+        }
+
+        return buffer.GetConstMappedRange(0, size);
+    }
+
+    void UnmapBuffer(const wgpu::Buffer& buffer) {
+        buffer.Unmap();
+    }
+
+    wgpu::Buffer BufferMappedAtCreation(wgpu::BufferUsage usage, uint64_t size) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
+        descriptor.mappedAtCreation = true;
+        return device.CreateBuffer(&descriptor);
+    }
+
+    wgpu::Buffer BufferMappedAtCreationWithData(wgpu::BufferUsage usage,
+                                                const std::vector<uint32_t>& data) {
+        size_t byteLength = data.size() * sizeof(uint32_t);
+        wgpu::Buffer buffer = BufferMappedAtCreation(usage, byteLength);
+        memcpy(buffer.GetMappedRange(), data.data(), byteLength);
+        return buffer;
+    }
+};
+
+// Test that the simplest mappedAtCreation works for MapWrite buffers.
+TEST_P(BufferMappedAtCreationTests, MapWriteUsageSmall) {
+    uint32_t myData = 230502;
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+        wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+    UnmapBuffer(buffer);
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test that the simplest mappedAtCreation works for MapRead buffers.
+TEST_P(BufferMappedAtCreationTests, MapReadUsageSmall) {
+    uint32_t myData = 230502;
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::MapRead, {myData});
+    UnmapBuffer(buffer);
+
+    const void* mappedData = MapAsyncAndWait(buffer, wgpu::MapMode::Read, 4);
+    ASSERT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData));
+    UnmapBuffer(buffer);
+}
+
+// Test that the simplest mappedAtCreation works for non-mappable buffers.
+TEST_P(BufferMappedAtCreationTests, NonMappableUsageSmall) {
+    uint32_t myData = 4239;
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::CopySrc, {myData});
+    UnmapBuffer(buffer);
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test mappedAtCreation for a large MapWrite buffer
+TEST_P(BufferMappedAtCreationTests, MapWriteUsageLarge) {
+    constexpr uint64_t kDataSize = 1000 * 1000;
+    std::vector<uint32_t> myData;
+    for (uint32_t i = 0; i < kDataSize; ++i) {
+        myData.push_back(i);
+    }
+
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+        wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+    UnmapBuffer(buffer);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), buffer, 0, kDataSize);
+}
+
+// Test mappedAtCreation for a large MapRead buffer
+TEST_P(BufferMappedAtCreationTests, MapReadUsageLarge) {
+    constexpr uint64_t kDataSize = 1000 * 1000;
+    std::vector<uint32_t> myData;
+    for (uint32_t i = 0; i < kDataSize; ++i) {
+        myData.push_back(i);
+    }
+
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::MapRead, myData);
+    UnmapBuffer(buffer);
+
+    const void* mappedData =
+        MapAsyncAndWait(buffer, wgpu::MapMode::Read, kDataSize * sizeof(uint32_t));
+    ASSERT_EQ(0, memcmp(mappedData, myData.data(), kDataSize * sizeof(uint32_t)));
+    UnmapBuffer(buffer);
+}
+
+// Test mappedAtCreation for a large non-mappable buffer
+TEST_P(BufferMappedAtCreationTests, NonMappableUsageLarge) {
+    constexpr uint64_t kDataSize = 1000 * 1000;
+    std::vector<uint32_t> myData;
+    for (uint32_t i = 0; i < kDataSize; ++i) {
+        myData.push_back(i);
+    }
+
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::CopySrc, {myData});
+    UnmapBuffer(buffer);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), buffer, 0, kDataSize);
+}
+
+// Test destroying a non-mappable buffer mapped at creation.
+// This is a regression test for an issue where the D3D12 backend thought the buffer was actually
+// mapped and tried to unlock the heap residency (when actually the buffer was using a staging
+// buffer)
+TEST_P(BufferMappedAtCreationTests, DestroyNonMappableWhileMappedForCreation) {
+    wgpu::Buffer buffer = BufferMappedAtCreation(wgpu::BufferUsage::CopySrc, 4);
+    buffer.Destroy();
+}
+
+// Test destroying a mappable buffer mapped at creation.
+TEST_P(BufferMappedAtCreationTests, DestroyMappableWhileMappedForCreation) {
+    wgpu::Buffer buffer = BufferMappedAtCreation(wgpu::BufferUsage::MapRead, 4);
+    buffer.Destroy();
+}
+
+// Test that mapping a buffer is valid after mappedAtCreation and Unmap
+TEST_P(BufferMappedAtCreationTests, CreateThenMapSuccess) {
+    static uint32_t myData = 230502;
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+        wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+    UnmapBuffer(buffer);
+
+    EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+
+    bool done = false;
+    buffer.MapAsync(
+        wgpu::MapMode::Write, 0, 4,
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+            *static_cast<bool*>(userdata) = true;
+        },
+        &done);
+
+    while (!done) {
+        WaitABit();
+    }
+
+    UnmapBuffer(buffer);
+}
+
+// Test that is is invalid to map a buffer twice when using mappedAtCreation
+TEST_P(BufferMappedAtCreationTests, CreateThenMapBeforeUnmapFailure) {
+    uint32_t myData = 230502;
+    wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+        wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+
+    ASSERT_DEVICE_ERROR([&]() {
+        bool done = false;
+        buffer.MapAsync(
+            wgpu::MapMode::Write, 0, 4,
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                ASSERT_EQ(WGPUBufferMapAsyncStatus_Error, status);
+                *static_cast<bool*>(userdata) = true;
+            },
+            &done);
+
+        while (!done) {
+            WaitABit();
+        }
+    }());
+
+    // mappedAtCreation is unaffected by the MapWrite error.
+    UnmapBuffer(buffer);
+}
+
+// Test that creating a zero-sized buffer mapped is allowed.
+TEST_P(BufferMappedAtCreationTests, ZeroSized) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 0;
+    descriptor.usage = wgpu::BufferUsage::Vertex;
+    descriptor.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    ASSERT_NE(nullptr, buffer.GetMappedRange());
+
+    // Check that unmapping the buffer works too.
+    UnmapBuffer(buffer);
+}
+
+// Test that creating a zero-sized mapppable buffer mapped. (it is a different code path)
+TEST_P(BufferMappedAtCreationTests, ZeroSizedMappableBuffer) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 0;
+    descriptor.usage = wgpu::BufferUsage::MapWrite;
+    descriptor.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    ASSERT_NE(nullptr, buffer.GetMappedRange());
+
+    // Check that unmapping the buffer works too.
+    UnmapBuffer(buffer);
+}
+
+// Test that creating a zero-sized error buffer mapped. (it is a different code path)
+TEST_P(BufferMappedAtCreationTests, ZeroSizedErrorBuffer) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 0;
+    descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::Storage;
+    descriptor.mappedAtCreation = true;
+    wgpu::Buffer buffer;
+    ASSERT_DEVICE_ERROR(buffer = device.CreateBuffer(&descriptor));
+
+    ASSERT_NE(nullptr, buffer.GetMappedRange());
+}
+
+// Test the result of GetMappedRange when mapped at creation.
+TEST_P(BufferMappedAtCreationTests, GetMappedRange) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+    descriptor.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    ASSERT_EQ(buffer.GetMappedRange(), buffer.GetConstMappedRange());
+    ASSERT_NE(buffer.GetMappedRange(), nullptr);
+    buffer.Unmap();
+}
+
+// Test the result of GetMappedRange when mapped at creation for a zero-sized buffer.
+TEST_P(BufferMappedAtCreationTests, GetMappedRangeZeroSized) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 0;
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+    descriptor.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    ASSERT_EQ(buffer.GetMappedRange(), buffer.GetConstMappedRange());
+    ASSERT_NE(buffer.GetMappedRange(), nullptr);
+    buffer.Unmap();
+}
+
+DAWN_INSTANTIATE_TEST(BufferMappedAtCreationTests,
+                      D3D12Backend(),
+                      D3D12Backend({}, {"use_d3d12_resource_heap_tier2"}),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class BufferTests : public DawnTest {};
+
+// Test that creating a zero-buffer is allowed.
+TEST_P(BufferTests, ZeroSizedBuffer) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 0;
+    desc.usage = wgpu::BufferUsage::CopyDst;
+    device.CreateBuffer(&desc);
+}
+
+// Test that creating a very large buffers fails gracefully.
+TEST_P(BufferTests, CreateBufferOOM) {
+    // TODO(http://crbug.com/dawn/749): Missing support.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+    DAWN_TEST_UNSUPPORTED_IF(IsAsan());
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+
+    descriptor.size = std::numeric_limits<uint64_t>::max();
+    ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+
+    // UINT64_MAX may be special cased. Test a smaller, but really large buffer also fails
+    descriptor.size = 1ull << 50;
+    ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+}
+
+// Test that a very large buffer mappedAtCreation fails gracefully.
+TEST_P(BufferTests, BufferMappedAtCreationOOM) {
+    // TODO(http://crbug.com/dawn/749): Missing support.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+    DAWN_TEST_UNSUPPORTED_IF(IsAsan());
+
+    // Test non-mappable buffer
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::CopyDst;
+        descriptor.mappedAtCreation = true;
+
+        // Control: test a small buffer works.
+        device.CreateBuffer(&descriptor);
+
+        // Test an enormous buffer fails
+        descriptor.size = std::numeric_limits<uint64_t>::max();
+        ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+
+        // UINT64_MAX may be special cased. Test a smaller, but really large buffer also fails
+        descriptor.size = 1ull << 50;
+        ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+    }
+
+    // Test mappable buffer
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+        descriptor.mappedAtCreation = true;
+
+        // Control: test a small buffer works.
+        device.CreateBuffer(&descriptor);
+
+        // Test an enormous buffer fails
+        descriptor.size = std::numeric_limits<uint64_t>::max();
+        ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+
+        // UINT64_MAX may be special cased. Test a smaller, but really large buffer also fails
+        descriptor.size = 1ull << 50;
+        ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+    }
+}
+
+// Test that mapping an OOM buffer fails gracefully
+TEST_P(BufferTests, CreateBufferOOMMapAsync) {
+    // TODO(http://crbug.com/dawn/749): Missing support.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+    DAWN_TEST_UNSUPPORTED_IF(IsAsan());
+
+    auto RunTest = [this](const wgpu::BufferDescriptor& descriptor) {
+        wgpu::Buffer buffer;
+        ASSERT_DEVICE_ERROR(buffer = device.CreateBuffer(&descriptor));
+
+        bool done = false;
+        ASSERT_DEVICE_ERROR(buffer.MapAsync(
+            wgpu::MapMode::Write, 0, 4,
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                EXPECT_EQ(status, WGPUBufferMapAsyncStatus_Error);
+                *static_cast<bool*>(userdata) = true;
+            },
+            &done));
+
+        while (!done) {
+            WaitABit();
+        }
+    };
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+
+    // Test an enormous buffer
+    descriptor.size = std::numeric_limits<uint64_t>::max();
+    RunTest(descriptor);
+
+    // UINT64_MAX may be special cased. Test a smaller, but really large buffer also fails
+    descriptor.size = 1ull << 50;
+    RunTest(descriptor);
+}
+
+DAWN_INSTANTIATE_TEST(BufferTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class BufferNoSuballocationTests : public DawnTest {};
+
+// Regression test for crbug.com/1313172
+// This tests a buffer. It then performs writeBuffer and immediately destroys
+// it. Though writeBuffer references a destroyed buffer, it should not crash.
+TEST_P(BufferNoSuballocationTests, WriteBufferThenDestroy) {
+    uint32_t myData = 0x01020304;
+
+    wgpu::BufferDescriptor desc;
+    desc.size = 1024;
+    desc.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    // Enqueue a pending write into the buffer.
+    constexpr size_t kSize = sizeof(myData);
+    queue.WriteBuffer(buffer, 0, &myData, kSize);
+
+    // Destroy the buffer.
+    buffer.Destroy();
+
+    // Flush and wait for all commands.
+    queue.Submit(0, nullptr);
+    WaitForAllOperations();
+}
+
+DAWN_INSTANTIATE_TEST(BufferNoSuballocationTests,
+                      D3D12Backend({"disable_resource_suballocation"}),
+                      MetalBackend({"disable_resource_suballocation"}),
+                      OpenGLBackend({"disable_resource_suballocation"}),
+                      OpenGLESBackend({"disable_resource_suballocation"}),
+                      VulkanBackend({"disable_resource_suballocation"}));
diff --git a/src/dawn/tests/end2end/BufferZeroInitTests.cpp b/src/dawn/tests/end2end/BufferZeroInitTests.cpp
new file mode 100644
index 0000000..515e187
--- /dev/null
+++ b/src/dawn/tests/end2end/BufferZeroInitTests.cpp
@@ -0,0 +1,1387 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#define EXPECT_LAZY_CLEAR(N, statement)                                                        \
+    do {                                                                                       \
+        if (UsesWire()) {                                                                      \
+            statement;                                                                         \
+        } else {                                                                               \
+            size_t lazyClearsBefore = dawn::native::GetLazyClearCountForTesting(device.Get()); \
+            statement;                                                                         \
+            size_t lazyClearsAfter = dawn::native::GetLazyClearCountForTesting(device.Get());  \
+            EXPECT_EQ(N, lazyClearsAfter - lazyClearsBefore);                                  \
+        }                                                                                      \
+    } while (0)
+
+namespace {
+
+    struct BufferZeroInitInCopyT2BSpec {
+        wgpu::Extent3D textureSize;
+        uint64_t bufferOffset;
+        uint64_t extraBytes;
+        uint32_t bytesPerRow;
+        uint32_t rowsPerImage;
+        uint32_t lazyClearCount;
+    };
+
+}  // anonymous namespace
+
+class BufferZeroInitTest : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        std::vector<wgpu::FeatureName> requiredFeatures = {};
+        if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
+            requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
+        }
+        return requiredFeatures;
+    }
+
+  public:
+    wgpu::Buffer CreateBuffer(uint64_t size,
+                              wgpu::BufferUsage usage,
+                              bool mappedAtCreation = false) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
+        descriptor.mappedAtCreation = mappedAtCreation;
+        return device.CreateBuffer(&descriptor);
+    }
+
+    void MapAsyncAndWait(wgpu::Buffer buffer,
+                         wgpu::MapMode mapMode,
+                         uint64_t offset,
+                         uint64_t size) {
+        ASSERT(mapMode == wgpu::MapMode::Read || mapMode == wgpu::MapMode::Write);
+
+        bool done = false;
+        buffer.MapAsync(
+            mapMode, offset, size,
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+                *static_cast<bool*>(userdata) = true;
+            },
+            &done);
+
+        while (!done) {
+            WaitABit();
+        }
+    }
+
+    wgpu::Texture CreateAndInitializeTexture(const wgpu::Extent3D& size,
+                                             wgpu::TextureFormat format,
+                                             wgpu::Color color = {0.f, 0.f, 0.f, 0.f}) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.format = format;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc |
+                           wgpu::TextureUsage::RenderAttachment |
+                           wgpu::TextureUsage::StorageBinding;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        for (uint32_t arrayLayer = 0; arrayLayer < size.depthOrArrayLayers; ++arrayLayer) {
+            wgpu::TextureViewDescriptor viewDescriptor;
+            viewDescriptor.format = format;
+            viewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+            viewDescriptor.baseArrayLayer = arrayLayer;
+            viewDescriptor.arrayLayerCount = 1u;
+
+            utils::ComboRenderPassDescriptor renderPassDescriptor(
+                {texture.CreateView(&viewDescriptor)});
+            renderPassDescriptor.cColorAttachments[0].clearValue = color;
+            wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+            renderPass.End();
+        }
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        return texture;
+    }
+
+    void TestBufferZeroInitInCopyTextureToBuffer(const BufferZeroInitInCopyT2BSpec& spec) {
+        constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::R32Float;
+        ASSERT(utils::GetTexelBlockSizeInBytes(kTextureFormat) * spec.textureSize.width %
+                   kTextureBytesPerRowAlignment ==
+               0);
+
+        constexpr wgpu::Color kClearColor = {0.5f, 0.5f, 0.5f, 0.5f};
+        wgpu::Texture texture =
+            CreateAndInitializeTexture(spec.textureSize, kTextureFormat, kClearColor);
+
+        const wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+
+        const uint64_t bufferSize = spec.bufferOffset + spec.extraBytes +
+                                    utils::RequiredBytesInCopy(spec.bytesPerRow, spec.rowsPerImage,
+                                                               spec.textureSize, kTextureFormat);
+        wgpu::Buffer buffer =
+            CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+        const wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+            buffer, spec.bufferOffset, spec.bytesPerRow, spec.rowsPerImage);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &spec.textureSize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        EXPECT_LAZY_CLEAR(spec.lazyClearCount, queue.Submit(1, &commandBuffer));
+
+        const uint64_t expectedValueCount = bufferSize / sizeof(float);
+        std::vector<float> expectedValues(expectedValueCount, 0.f);
+
+        for (uint32_t slice = 0; slice < spec.textureSize.depthOrArrayLayers; ++slice) {
+            const uint64_t baseOffsetBytesPerSlice =
+                spec.bufferOffset + spec.bytesPerRow * spec.rowsPerImage * slice;
+            for (uint32_t y = 0; y < spec.textureSize.height; ++y) {
+                const uint64_t baseOffsetBytesPerRow =
+                    baseOffsetBytesPerSlice + spec.bytesPerRow * y;
+                const uint64_t baseOffsetFloatCountPerRow = baseOffsetBytesPerRow / sizeof(float);
+                for (uint32_t x = 0; x < spec.textureSize.width; ++x) {
+                    expectedValues[baseOffsetFloatCountPerRow + x] = 0.5f;
+                }
+            }
+        }
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedValues.data(), buffer, 0,
+                                                           expectedValues.size()));
+    }
+
+    void TestBufferZeroInitInBindGroup(wgpu::ShaderModule module,
+                                       uint64_t bufferOffset,
+                                       uint64_t boundBufferSize,
+                                       const std::vector<uint32_t>& expectedBufferData) {
+        wgpu::ComputePipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.layout = nullptr;
+        pipelineDescriptor.compute.module = module;
+        pipelineDescriptor.compute.entryPoint = "main";
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDescriptor);
+
+        const uint64_t bufferSize = expectedBufferData.size() * sizeof(uint32_t);
+        wgpu::Buffer buffer =
+            CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc |
+                                         wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
+        wgpu::Texture outputTexture =
+            CreateAndInitializeTexture({1u, 1u, 1u}, wgpu::TextureFormat::RGBA8Unorm);
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, pipeline.GetBindGroupLayout(0),
+            {{0, buffer, bufferOffset, boundBufferSize}, {1u, outputTexture.CreateView()}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
+        computePass.SetBindGroup(0, bindGroup);
+        computePass.SetPipeline(pipeline);
+        computePass.Dispatch(1u);
+        computePass.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(expectedBufferData.data(), buffer, 0,
+                                                         expectedBufferData.size()));
+
+        constexpr RGBA8 kExpectedColor = {0, 255, 0, 255};
+        EXPECT_PIXEL_RGBA8_EQ(kExpectedColor, outputTexture, 0u, 0u);
+    }
+
+    wgpu::RenderPipeline CreateRenderPipelineForTest(
+        const char* vertexShader,
+        uint32_t vertexBufferCount = 1u,
+        wgpu::VertexFormat vertexFormat = wgpu::VertexFormat::Float32x4) {
+        constexpr wgpu::TextureFormat kColorAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexShader);
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment)
+            fn main(@location(0) i_color : vec4<f32>) -> @location(0) vec4<f32> {
+                return i_color;
+            })");
+
+        ASSERT(vertexBufferCount <= 1u);
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        descriptor.vertex.bufferCount = vertexBufferCount;
+        descriptor.cBuffers[0].arrayStride = Align(utils::VertexFormatSize(vertexFormat), 4);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = vertexFormat;
+        descriptor.cTargets[0].format = kColorAttachmentFormat;
+        return device.CreateRenderPipeline(&descriptor);
+    }
+
+    void ExpectLazyClearSubmitAndCheckOutputs(wgpu::CommandEncoder encoder,
+                                              wgpu::Buffer buffer,
+                                              uint64_t bufferSize,
+                                              wgpu::Texture colorAttachment) {
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        // Although we just bind a part of the buffer, we still expect the whole buffer to be
+        // lazily initialized to 0.
+        const std::vector<uint32_t> expectedBufferData(bufferSize / sizeof(uint32_t), 0);
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(expectedBufferData.data(), buffer, 0,
+                                                         expectedBufferData.size()));
+
+        const RGBA8 kExpectedPixelValue = {0, 255, 0, 255};
+        EXPECT_PIXEL_RGBA8_EQ(kExpectedPixelValue, colorAttachment, 0, 0);
+    }
+
+    void TestBufferZeroInitAsVertexBuffer(uint64_t vertexBufferOffset) {
+        constexpr wgpu::TextureFormat kColorAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::RenderPipeline renderPipeline = CreateRenderPipelineForTest(R"(
+            struct VertexOut {
+                @location(0) color : vec4<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex) fn main(@location(0) pos : vec4<f32>) -> VertexOut {
+                var output : VertexOut;
+                if (all(pos == vec4<f32>(0.0, 0.0, 0.0, 0.0))) {
+                    output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+                } else {
+                    output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+                }
+                output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                return output;
+            })");
+
+        constexpr uint64_t kVertexAttributeSize = sizeof(float) * 4;
+        const uint64_t vertexBufferSize = kVertexAttributeSize + vertexBufferOffset;
+        wgpu::Buffer vertexBuffer =
+            CreateBuffer(vertexBufferSize, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::CopySrc |
+                                               wgpu::BufferUsage::CopyDst);
+        wgpu::Texture colorAttachment =
+            CreateAndInitializeTexture({1, 1, 1}, kColorAttachmentFormat);
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorAttachment.CreateView()});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+
+        // Bind the buffer with offset == vertexBufferOffset and size kVertexAttributeSize as the
+        // vertex buffer.
+        renderPass.SetVertexBuffer(0, vertexBuffer, vertexBufferOffset, kVertexAttributeSize);
+        renderPass.SetPipeline(renderPipeline);
+        renderPass.Draw(1);
+        renderPass.End();
+
+        ExpectLazyClearSubmitAndCheckOutputs(encoder, vertexBuffer, vertexBufferSize,
+                                             colorAttachment);
+    }
+
+    void TestBufferZeroInitAsIndexBuffer(uint64_t indexBufferOffset) {
+        constexpr wgpu::TextureFormat kColorAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::RenderPipeline renderPipeline =
+            CreateRenderPipelineForTest(R"(
+            struct VertexOut {
+                @location(0) color : vec4<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+                var output : VertexOut;
+                if (VertexIndex == 0u) {
+                    output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+                } else {
+                    output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+                }
+                output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                return output;
+            })",
+                                        0 /* vertexBufferCount */);
+
+        // The buffer size cannot be less than 4
+        const uint64_t indexBufferSize = sizeof(uint32_t) + indexBufferOffset;
+        wgpu::Buffer indexBuffer =
+            CreateBuffer(indexBufferSize, wgpu::BufferUsage::Index | wgpu::BufferUsage::CopySrc |
+                                              wgpu::BufferUsage::CopyDst);
+
+        wgpu::Texture colorAttachment =
+            CreateAndInitializeTexture({1, 1, 1}, kColorAttachmentFormat);
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorAttachment.CreateView()});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(renderPipeline);
+
+        // Bind the buffer with offset == indexBufferOffset and size sizeof(uint32_t) as the index
+        // buffer.
+        renderPass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16, indexBufferOffset,
+                                  sizeof(uint32_t));
+        renderPass.DrawIndexed(1);
+        renderPass.End();
+
+        ExpectLazyClearSubmitAndCheckOutputs(encoder, indexBuffer, indexBufferSize,
+                                             colorAttachment);
+    }
+
+    void TestBufferZeroInitAsIndirectBufferForDrawIndirect(uint64_t indirectBufferOffset) {
+        constexpr wgpu::TextureFormat kColorAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr wgpu::Color kClearColorGreen = {0.f, 1.f, 0.f, 1.f};
+
+        // As long as the vertex shader is executed once, the output color will be red.
+        wgpu::RenderPipeline renderPipeline =
+            CreateRenderPipelineForTest(R"(
+            struct VertexOut {
+                @location(0) color : vec4<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex) fn main() -> VertexOut {
+                var output : VertexOut;
+                output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+                output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                return output;
+            })",
+                                        0 /* vertexBufferCount */);
+
+        // Clear the color attachment to green.
+        wgpu::Texture colorAttachment =
+            CreateAndInitializeTexture({1, 1, 1}, kColorAttachmentFormat, kClearColorGreen);
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorAttachment.CreateView()});
+        renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+
+        const uint64_t bufferSize = kDrawIndirectSize + indirectBufferOffset;
+        wgpu::Buffer indirectBuffer =
+            CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Indirect);
+
+        // The indirect buffer should be lazily cleared to 0, so we actually draw nothing and the
+        // color attachment will keep its original color (green) after we end the render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(renderPipeline);
+        renderPass.DrawIndirect(indirectBuffer, indirectBufferOffset);
+        renderPass.End();
+
+        ExpectLazyClearSubmitAndCheckOutputs(encoder, indirectBuffer, bufferSize, colorAttachment);
+    }
+
+    void TestBufferZeroInitAsIndirectBufferForDrawIndexedIndirect(uint64_t indirectBufferOffset) {
+        constexpr wgpu::TextureFormat kColorAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr wgpu::Color kClearColorGreen = {0.f, 1.f, 0.f, 1.f};
+
+        // As long as the vertex shader is executed once, the output color will be red.
+        wgpu::RenderPipeline renderPipeline =
+            CreateRenderPipelineForTest(R"(
+            struct VertexOut {
+                @location(0) color : vec4<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex) fn main() -> VertexOut {
+                var output : VertexOut;
+                output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+                output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                return output;
+            })",
+                                        0 /* vertexBufferCount */);
+        wgpu::Buffer indexBuffer =
+            utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0});
+
+        // Clear the color attachment to green.
+        wgpu::Texture colorAttachment =
+            CreateAndInitializeTexture({1, 1, 1}, kColorAttachmentFormat, kClearColorGreen);
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorAttachment.CreateView()});
+        renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+
+        const uint64_t bufferSize = kDrawIndexedIndirectSize + indirectBufferOffset;
+        wgpu::Buffer indirectBuffer =
+            CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Indirect);
+
+        // The indirect buffer should be lazily cleared to 0, so we actually draw nothing and the
+        // color attachment will keep its original color (green) after we end the render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(renderPipeline);
+        renderPass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16);
+        renderPass.DrawIndexedIndirect(indirectBuffer, indirectBufferOffset);
+        renderPass.End();
+
+        ExpectLazyClearSubmitAndCheckOutputs(encoder, indirectBuffer, bufferSize, colorAttachment);
+    }
+
+    void TestBufferZeroInitAsIndirectBufferForDispatchIndirect(uint64_t indirectBufferOffset) {
+        constexpr wgpu::TextureFormat kColorAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr wgpu::Color kClearColorGreen = {0.f, 1.f, 0.f, 1.f};
+
+        // As long as the comptue shader is executed once, the pixel color of outImage will be set
+        // to red.
+        const char* computeShader = R"(
+            @group(0) @binding(0) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+            @stage(compute) @workgroup_size(1) fn main() {
+                textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+            })";
+
+        wgpu::ComputePipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.layout = nullptr;
+        pipelineDescriptor.compute.module = utils::CreateShaderModule(device, computeShader);
+        pipelineDescriptor.compute.entryPoint = "main";
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDescriptor);
+
+        // Clear the color of outputTexture to green.
+        wgpu::Texture outputTexture =
+            CreateAndInitializeTexture({1u, 1u, 1u}, kColorAttachmentFormat, kClearColorGreen);
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                         {{0, outputTexture.CreateView()}});
+
+        const uint64_t bufferSize = kDispatchIndirectSize + indirectBufferOffset;
+        wgpu::Buffer indirectBuffer =
+            CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Indirect);
+
+        // The indirect buffer should be lazily cleared to 0, so we actually don't execute the
+        // compute shader and the output texture should keep its original color (green).
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
+        computePass.SetBindGroup(0, bindGroup);
+        computePass.SetPipeline(pipeline);
+        computePass.DispatchIndirect(indirectBuffer, indirectBufferOffset);
+        computePass.End();
+
+        ExpectLazyClearSubmitAndCheckOutputs(encoder, indirectBuffer, bufferSize, outputTexture);
+    }
+};
+
+// Test that calling writeBuffer to overwrite the entire buffer doesn't need to lazily initialize
+// the destination buffer.
+TEST_P(BufferZeroInitTest, WriteBufferToEntireBuffer) {
+    constexpr uint32_t kBufferSize = 8u;
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = CreateBuffer(kBufferSize, kBufferUsage);
+
+    constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedData = {
+        {0x02020202u, 0x02020202u}};
+    EXPECT_LAZY_CLEAR(0u, queue.WriteBuffer(buffer, 0, kExpectedData.data(), kBufferSize));
+
+    EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), buffer, 0,
+                                                     kBufferSize / sizeof(uint32_t)));
+}
+
+// Test that calling writeBuffer to overwrite a part of buffer needs to lazily initialize the
+// destination buffer.
+TEST_P(BufferZeroInitTest, WriteBufferToSubBuffer) {
+    constexpr uint32_t kBufferSize = 8u;
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+
+    constexpr uint32_t kCopyValue = 0x02020202u;
+
+    // offset == 0
+    {
+        wgpu::Buffer buffer = CreateBuffer(kBufferSize, kBufferUsage);
+
+        constexpr uint32_t kCopyOffset = 0u;
+        EXPECT_LAZY_CLEAR(1u,
+                          queue.WriteBuffer(buffer, kCopyOffset, &kCopyValue, sizeof(kCopyValue)));
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_EQ(kCopyValue, buffer, kCopyOffset));
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_EQ(0, buffer, kBufferSize - sizeof(kCopyValue)));
+    }
+
+    // offset > 0
+    {
+        wgpu::Buffer buffer = CreateBuffer(kBufferSize, kBufferUsage);
+
+        constexpr uint32_t kCopyOffset = 4u;
+        EXPECT_LAZY_CLEAR(1u,
+                          queue.WriteBuffer(buffer, kCopyOffset, &kCopyValue, sizeof(kCopyValue)));
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_EQ(0, buffer, 0));
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_EQ(kCopyValue, buffer, kCopyOffset));
+    }
+}
+
+// Test that the code path of CopyBufferToBuffer clears the source buffer correctly when it is the
+// first use of the source buffer.
+TEST_P(BufferZeroInitTest, CopyBufferToBufferSource) {
+    constexpr uint64_t kBufferSize = 16u;
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = kBufferSize;
+    bufferDescriptor.usage = kBufferUsage;
+
+    constexpr std::array<uint8_t, kBufferSize> kInitialData = {
+        {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}};
+
+    wgpu::Buffer dstBuffer =
+        utils::CreateBufferFromData(device, kInitialData.data(), kBufferSize, kBufferUsage);
+
+    constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedData = {{0, 0, 0, 0}};
+
+    // Full copy from the source buffer
+    {
+        wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, 0, kBufferSize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
+                                                         kBufferSize / sizeof(uint32_t)));
+    }
+
+    // Partial copy from the source buffer
+    // srcOffset == 0
+    {
+        constexpr uint64_t kSrcOffset = 0;
+        constexpr uint64_t kCopySize = kBufferSize / 2;
+
+        wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, kSrcOffset, dstBuffer, 0, kCopySize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
+                                                         kBufferSize / sizeof(uint32_t)));
+    }
+
+    // srcOffset > 0 and srcOffset + copySize == srcBufferSize
+    {
+        constexpr uint64_t kSrcOffset = kBufferSize / 2;
+        constexpr uint64_t kCopySize = kBufferSize - kSrcOffset;
+
+        wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, kSrcOffset, dstBuffer, 0, kCopySize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
+                                                         kBufferSize / sizeof(uint32_t)));
+    }
+
+    // srcOffset > 0 and srcOffset + copySize < srcBufferSize
+    {
+        constexpr uint64_t kSrcOffset = kBufferSize / 4;
+        constexpr uint64_t kCopySize = kBufferSize / 2;
+
+        wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, kSrcOffset, dstBuffer, 0, kCopySize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
+                                                         kBufferSize / sizeof(uint32_t)));
+    }
+}
+
+// Test that the code path of CopyBufferToBuffer clears the destination buffer correctly when it is
+// the first use of the destination buffer.
+TEST_P(BufferZeroInitTest, CopyBufferToBufferDestination) {
+    constexpr uint64_t kBufferSize = 16u;
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = kBufferSize;
+    bufferDescriptor.usage = kBufferUsage;
+
+    const std::array<uint8_t, kBufferSize> kInitialData = {
+        {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}};
+    wgpu::Buffer srcBuffer =
+        utils::CreateBufferFromData(device, kInitialData.data(), kBufferSize, kBufferUsage);
+
+    // Full copy from the source buffer doesn't need lazy initialization at all.
+    {
+        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, 0, kBufferSize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<const uint32_t*>(kInitialData.data()),
+                                           dstBuffer, 0, kBufferSize / sizeof(uint32_t)));
+    }
+
+    // Partial copy from the source buffer needs lazy initialization.
+    // offset == 0
+    {
+        constexpr uint32_t kDstOffset = 0;
+        constexpr uint32_t kCopySize = kBufferSize / 2;
+
+        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, kDstOffset, kCopySize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        std::array<uint8_t, kBufferSize> expectedData;
+        expectedData.fill(0);
+        for (uint32_t index = kDstOffset; index < kDstOffset + kCopySize; ++index) {
+            expectedData[index] = kInitialData[index - kDstOffset];
+        }
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()),
+                                           dstBuffer, 0, kBufferSize / sizeof(uint32_t)));
+    }
+
+    // offset > 0 and dstOffset + CopySize == kBufferSize
+    {
+        constexpr uint32_t kDstOffset = kBufferSize / 2;
+        constexpr uint32_t kCopySize = kBufferSize - kDstOffset;
+
+        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, kDstOffset, kCopySize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        std::array<uint8_t, kBufferSize> expectedData;
+        expectedData.fill(0);
+        for (uint32_t index = kDstOffset; index < kDstOffset + kCopySize; ++index) {
+            expectedData[index] = kInitialData[index - kDstOffset];
+        }
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()),
+                                           dstBuffer, 0, kBufferSize / sizeof(uint32_t)));
+    }
+
+    // offset > 0 and dstOffset + CopySize < kBufferSize
+    {
+        constexpr uint32_t kDstOffset = kBufferSize / 4;
+        constexpr uint32_t kCopySize = kBufferSize / 2;
+
+        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, kDstOffset, kCopySize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        std::array<uint8_t, kBufferSize> expectedData;
+        expectedData.fill(0);
+        for (uint32_t index = kDstOffset; index < kDstOffset + kCopySize; ++index) {
+            expectedData[index] = kInitialData[index - kDstOffset];
+        }
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()),
+                                           dstBuffer, 0, kBufferSize / sizeof(uint32_t)));
+    }
+}
+
+// Test that the code path of readable buffer mapping clears the buffer correctly when it is the
+// first use of the buffer.
+TEST_P(BufferZeroInitTest, MapAsync_Read) {
+    constexpr uint32_t kBufferSize = 16u;
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+
+    constexpr wgpu::MapMode kMapMode = wgpu::MapMode::Read;
+
+    // Map the whole buffer
+    {
+        wgpu::Buffer buffer = CreateBuffer(kBufferSize, kBufferUsage);
+        EXPECT_LAZY_CLEAR(1u, MapAsyncAndWait(buffer, kMapMode, 0, kBufferSize));
+
+        const uint32_t* mappedDataUint = static_cast<const uint32_t*>(buffer.GetConstMappedRange());
+        for (uint32_t i = 0; i < kBufferSize / sizeof(uint32_t); ++i) {
+            EXPECT_EQ(0u, mappedDataUint[i]);
+        }
+        buffer.Unmap();
+    }
+
+    // Map a range of a buffer
+    {
+        wgpu::Buffer buffer = CreateBuffer(kBufferSize, kBufferUsage);
+
+        constexpr uint64_t kOffset = 8u;
+        constexpr uint64_t kSize = 8u;
+        EXPECT_LAZY_CLEAR(1u, MapAsyncAndWait(buffer, kMapMode, kOffset, kSize));
+
+        const uint32_t* mappedDataUint =
+            static_cast<const uint32_t*>(buffer.GetConstMappedRange(kOffset));
+        for (uint32_t i = 0; i < kSize / sizeof(uint32_t); ++i) {
+            EXPECT_EQ(0u, mappedDataUint[i]);
+        }
+        buffer.Unmap();
+
+        EXPECT_LAZY_CLEAR(0u, MapAsyncAndWait(buffer, kMapMode, 0, kBufferSize));
+        mappedDataUint = static_cast<const uint32_t*>(buffer.GetConstMappedRange());
+        for (uint32_t i = 0; i < kBufferSize / sizeof(uint32_t); ++i) {
+            EXPECT_EQ(0u, mappedDataUint[i]);
+        }
+        buffer.Unmap();
+    }
+}
+
+// Test that the code path of writable buffer mapping clears the buffer correctly when it is the
+// first use of the buffer.
+TEST_P(BufferZeroInitTest, MapAsync_Write) {
+    constexpr uint32_t kBufferSize = 16u;
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+
+    constexpr wgpu::MapMode kMapMode = wgpu::MapMode::Write;
+
+    constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedData = {{0, 0, 0, 0}};
+
+    // Map the whole buffer
+    {
+        wgpu::Buffer buffer = CreateBuffer(kBufferSize, kBufferUsage);
+        EXPECT_LAZY_CLEAR(1u, MapAsyncAndWait(buffer, kMapMode, 0, kBufferSize));
+        buffer.Unmap();
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<const uint32_t*>(kExpectedData.data()),
+                                           buffer, 0, kExpectedData.size()));
+    }
+
+    // Map a range of a buffer
+    {
+        wgpu::Buffer buffer = CreateBuffer(kBufferSize, kBufferUsage);
+
+        constexpr uint64_t kOffset = 8u;
+        constexpr uint64_t kSize = 8u;
+        EXPECT_LAZY_CLEAR(1u, MapAsyncAndWait(buffer, kMapMode, kOffset, kSize));
+        buffer.Unmap();
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<const uint32_t*>(kExpectedData.data()),
+                                           buffer, 0, kExpectedData.size()));
+    }
+}
+
+// Test that the code path of creating a buffer with BufferDescriptor.mappedAtCreation == true
+// clears the buffer correctly at the creation of the buffer.
+TEST_P(BufferZeroInitTest, MappedAtCreation) {
+    constexpr uint32_t kBufferSize = 16u;
+
+    constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedData = {{0, 0, 0, 0}};
+
+    // Buffer with MapRead usage
+    {
+        constexpr wgpu::BufferUsage kBufferUsage = wgpu::BufferUsage::MapRead;
+
+        wgpu::Buffer buffer;
+        EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
+        const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
+        buffer.Unmap();
+
+        MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, kBufferSize);
+        mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
+        buffer.Unmap();
+    }
+
+    // Buffer with MapRead usage and upload the buffer (from CPU and GPU)
+    {
+        constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedFinalData = {
+            {10, 20, 30, 40}};
+
+        constexpr wgpu::BufferUsage kBufferUsage =
+            wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+
+        wgpu::Buffer buffer;
+        EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
+
+        // Update data from the CPU side.
+        uint32_t* mappedData = static_cast<uint32_t*>(buffer.GetMappedRange());
+        mappedData[2] = kExpectedFinalData[2];
+        mappedData[3] = kExpectedFinalData[3];
+        buffer.Unmap();
+
+        // Update data from the GPU side.
+        wgpu::Buffer uploadBuffer = utils::CreateBufferFromData(
+            device, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst,
+            {kExpectedFinalData[0], kExpectedFinalData[1]});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(uploadBuffer, 0, buffer, 0, 2 * sizeof(uint32_t));
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+        // Check the content of the buffer on the CPU side
+        MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, kBufferSize);
+        const uint32_t* constMappedData =
+            static_cast<const uint32_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(kExpectedFinalData.data(), constMappedData, kBufferSize));
+    }
+
+    // Buffer with MapWrite usage
+    {
+        constexpr wgpu::BufferUsage kBufferUsage =
+            wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+
+        wgpu::Buffer buffer;
+        EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
+
+        const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
+        buffer.Unmap();
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), buffer, 0, kExpectedData.size()));
+    }
+
+    // Buffer with neither MapRead nor MapWrite usage
+    {
+        constexpr wgpu::BufferUsage kBufferUsage = wgpu::BufferUsage::CopySrc;
+
+        wgpu::Buffer buffer;
+        EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
+
+        const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
+        buffer.Unmap();
+
+        EXPECT_LAZY_CLEAR(
+            0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), buffer, 0, kExpectedData.size()));
+    }
+}
+
+// Test that the code path of CopyBufferToTexture clears the source buffer correctly when it is the
+// first use of the buffer.
+TEST_P(BufferZeroInitTest, CopyBufferToTexture) {
+    constexpr wgpu::Extent3D kTextureSize = {16u, 16u, 1u};
+
+    constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::R32Uint;
+
+    wgpu::Texture texture = CreateAndInitializeTexture(kTextureSize, kTextureFormat);
+    const wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+
+    const uint32_t rowsPerImage = kTextureSize.height;
+    const uint32_t requiredBufferSizeForCopy = utils::RequiredBytesInCopy(
+        kTextureBytesPerRowAlignment, rowsPerImage, kTextureSize, kTextureFormat);
+
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+
+    // bufferOffset == 0
+    {
+        constexpr uint64_t kOffset = 0;
+        const uint32_t totalBufferSize = requiredBufferSizeForCopy + kOffset;
+        wgpu::Buffer buffer = CreateBuffer(totalBufferSize, kBufferUsage);
+        const wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+            buffer, kOffset, kTextureBytesPerRowAlignment, kTextureSize.height);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &kTextureSize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        const std::vector<uint32_t> expectedValues(totalBufferSize / sizeof(uint32_t), 0);
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(expectedValues.data(), buffer, 0,
+                                                         totalBufferSize / sizeof(uint32_t)));
+    }
+
+    // bufferOffset > 0
+    {
+        constexpr uint64_t kOffset = 8u;
+        const uint32_t totalBufferSize = requiredBufferSizeForCopy + kOffset;
+        wgpu::Buffer buffer = CreateBuffer(totalBufferSize, kBufferUsage);
+        const wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+            buffer, kOffset, kTextureBytesPerRowAlignment, kTextureSize.height);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &kTextureSize);
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+        const std::vector<uint32_t> expectedValues(totalBufferSize / sizeof(uint32_t), 0);
+        EXPECT_LAZY_CLEAR(0u, EXPECT_BUFFER_U32_RANGE_EQ(expectedValues.data(), buffer, 0,
+                                                         totalBufferSize / sizeof(uint32_t)));
+    }
+}
+
+// Test that the code path of CopyTextureToBuffer clears the destination buffer correctly when it is
+// the first use of the buffer and the texture is a 2D non-array texture.
+TEST_P(BufferZeroInitTest, Copy2DTextureToBuffer) {
+    constexpr wgpu::Extent3D kTextureSize = {64u, 8u, 1u};
+
+    // bytesPerRow == texelBlockSizeInBytes * copySize.width && bytesPerRow * copySize.height ==
+    // buffer.size
+    {
+        TestBufferZeroInitInCopyTextureToBuffer(
+            {kTextureSize, 0u, 0u, kTextureBytesPerRowAlignment, kTextureSize.height, 0u});
+    }
+
+    // bytesPerRow > texelBlockSizeInBytes * copySize.width
+    {
+        constexpr uint64_t kBytesPerRow = kTextureBytesPerRowAlignment * 2;
+        TestBufferZeroInitInCopyTextureToBuffer(
+            {kTextureSize, 0u, 0u, kBytesPerRow, kTextureSize.height, 1u});
+    }
+
+    // bufferOffset > 0
+    {
+        constexpr uint64_t kBufferOffset = 16u;
+        TestBufferZeroInitInCopyTextureToBuffer({kTextureSize, kBufferOffset, 0u,
+                                                 kTextureBytesPerRowAlignment, kTextureSize.height,
+                                                 1u});
+    }
+
+    // bytesPerRow * copySize.height < buffer.size
+    {
+        constexpr uint64_t kExtraBufferSize = 16u;
+        TestBufferZeroInitInCopyTextureToBuffer({kTextureSize, 0u, kExtraBufferSize,
+                                                 kTextureBytesPerRowAlignment, kTextureSize.height,
+                                                 1u});
+    }
+}
+
+// Test that the code path of CopyTextureToBuffer clears the destination buffer correctly when it is
+// the first use of the buffer and the texture is a 2D array texture.
+TEST_P(BufferZeroInitTest, Copy2DArrayTextureToBuffer) {
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    constexpr wgpu::Extent3D kTextureSize = {64u, 4u, 3u};
+
+    // bytesPerRow == texelBlockSizeInBytes * copySize.width && rowsPerImage == copySize.height &&
+    // bytesPerRow * (rowsPerImage * (copySize.depthOrArrayLayers - 1) + copySize.height) ==
+    // buffer.size
+    {
+        TestBufferZeroInitInCopyTextureToBuffer(
+            {kTextureSize, 0u, 0u, kTextureBytesPerRowAlignment, kTextureSize.height, 0u});
+    }
+
+    // rowsPerImage > copySize.height
+    {
+        constexpr uint64_t kRowsPerImage = kTextureSize.height + 1u;
+        TestBufferZeroInitInCopyTextureToBuffer(
+            {kTextureSize, 0u, 0u, kTextureBytesPerRowAlignment, kRowsPerImage, 1u});
+    }
+
+    // bytesPerRow * rowsPerImage * copySize.depthOrArrayLayers < buffer.size
+    {
+        constexpr uint64_t kExtraBufferSize = 16u;
+        TestBufferZeroInitInCopyTextureToBuffer({kTextureSize, 0u, kExtraBufferSize,
+                                                 kTextureBytesPerRowAlignment, kTextureSize.height,
+                                                 1u});
+    }
+}
+
+// Test that the buffer will be lazy initialized correctly when its first use is to be bound as a
+// uniform buffer.
+TEST_P(BufferZeroInitTest, BoundAsUniformBuffer) {
+    // TODO(crbug.com/dawn/661): Diagnose and fix this backend validation failure on GLES.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES() && IsBackendValidationEnabled());
+
+    constexpr uint32_t kBoundBufferSize = 16u;
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct UBO {
+            value : vec4<u32>
+        }
+        @group(0) @binding(0) var<uniform> ubo : UBO;
+        @group(0) @binding(1) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            if (all(ubo.value == vec4<u32>(0u, 0u, 0u, 0u))) {
+                textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(0.0, 1.0, 0.0, 1.0));
+            } else {
+                textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+            }
+        }
+    )");
+
+    // Bind the whole buffer
+    {
+        const std::vector<uint32_t> expected(kBoundBufferSize / sizeof(uint32_t), 0u);
+        TestBufferZeroInitInBindGroup(module, 0, kBoundBufferSize, expected);
+    }
+
+    // Bind a range of a buffer
+    {
+        constexpr uint32_t kOffset = 256u;
+        constexpr uint32_t kExtraBytes = 16u;
+        const std::vector<uint32_t> expected(
+            (kBoundBufferSize + kOffset + kExtraBytes) / sizeof(uint32_t), 0u);
+        TestBufferZeroInitInBindGroup(module, kOffset, kBoundBufferSize, expected);
+    }
+}
+
+// Test that the buffer will be lazy initialized correctly when its first use is to be bound as a
+// read-only storage buffer.
+TEST_P(BufferZeroInitTest, BoundAsReadonlyStorageBuffer) {
+    // TODO(crbug.com/dawn/661): Diagnose and fix this backend validation failure on GLES.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES() && IsBackendValidationEnabled());
+
+    constexpr uint32_t kBoundBufferSize = 16u;
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct SSBO {
+            value : vec4<u32>
+        }
+        @group(0) @binding(0) var<storage, read> ssbo : SSBO;
+        @group(0) @binding(1) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            if (all(ssbo.value == vec4<u32>(0u, 0u, 0u, 0u))) {
+                textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(0.0, 1.0, 0.0, 1.0));
+            } else {
+                textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+            }
+        }
+    )");
+
+    // Bind the whole buffer
+    {
+        const std::vector<uint32_t> expected(kBoundBufferSize / sizeof(uint32_t), 0u);
+        TestBufferZeroInitInBindGroup(module, 0, kBoundBufferSize, expected);
+    }
+
+    // Bind a range of a buffer
+    {
+        constexpr uint32_t kOffset = 256u;
+        constexpr uint32_t kExtraBytes = 16u;
+        const std::vector<uint32_t> expected(
+            (kBoundBufferSize + kOffset + kExtraBytes) / sizeof(uint32_t), 0u);
+        TestBufferZeroInitInBindGroup(module, kOffset, kBoundBufferSize, expected);
+    }
+}
+
+// Test that the buffer will be lazy initialized correctly when its first use is to be bound as a
+// storage buffer.
+TEST_P(BufferZeroInitTest, BoundAsStorageBuffer) {
+    // TODO(crbug.com/dawn/661): Diagnose and fix this backend validation failure on GLES.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES() && IsBackendValidationEnabled());
+
+    constexpr uint32_t kBoundBufferSize = 32u;
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct SSBO {
+            value : array<vec4<u32>, 2>
+        }
+        @group(0) @binding(0) var<storage, read_write> ssbo : SSBO;
+        @group(0) @binding(1) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            if (all(ssbo.value[0] == vec4<u32>(0u, 0u, 0u, 0u)) &&
+                all(ssbo.value[1] == vec4<u32>(0u, 0u, 0u, 0u))) {
+                textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(0.0, 1.0, 0.0, 1.0));
+            } else {
+                textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+            }
+
+            storageBarrier();
+
+            ssbo.value[0].x = 10u;
+            ssbo.value[1].y = 20u;
+        }
+    )");
+
+    // Bind the whole buffer
+    {
+        std::vector<uint32_t> expected(kBoundBufferSize / sizeof(uint32_t), 0u);
+        expected[0] = 10u;
+        expected[5] = 20u;
+        TestBufferZeroInitInBindGroup(module, 0, kBoundBufferSize, expected);
+    }
+
+    // Bind a range of a buffer
+    {
+        constexpr uint32_t kOffset = 256u;
+        constexpr uint32_t kExtraBytes = 16u;
+        std::vector<uint32_t> expected(
+            (kBoundBufferSize + kOffset + kExtraBytes) / sizeof(uint32_t), 0u);
+        expected[kOffset / sizeof(uint32_t)] = 10u;
+        expected[kOffset / sizeof(uint32_t) + 5u] = 20u;
+        TestBufferZeroInitInBindGroup(module, kOffset, kBoundBufferSize, expected);
+    }
+}
+
+// Test the buffer will be lazily initialized correctly when its first use is in SetVertexBuffer.
+TEST_P(BufferZeroInitTest, SetVertexBuffer) {
+    // Bind the whole buffer as a vertex buffer.
+    {
+        constexpr uint64_t kVertexBufferOffset = 0u;
+        TestBufferZeroInitAsVertexBuffer(kVertexBufferOffset);
+    }
+
+    // Bind the buffer as a vertex buffer with a non-zero offset.
+    {
+        constexpr uint64_t kVertexBufferOffset = 16u;
+        TestBufferZeroInitAsVertexBuffer(kVertexBufferOffset);
+    }
+}
+
+// Test for crbug.com/dawn/837.
+// Test that the padding after a buffer allocation is initialized to 0.
+// This test makes an unaligned vertex buffer which should be padded in the backend
+// allocation. It then tries to index off the end of the vertex buffer in an indexed
+// draw call. A backend which implements robust buffer access via clamping should
+// still see zeros at the end of the buffer.
+TEST_P(BufferZeroInitTest, PaddingInitialized) {
+    DAWN_SUPPRESS_TEST_IF(IsANGLE());                              // TODO(crbug.com/dawn/1084).
+    DAWN_SUPPRESS_TEST_IF(IsLinux() && IsVulkan() && IsNvidia());  // TODO(crbug.com/dawn/1214).
+
+    constexpr wgpu::TextureFormat kColorAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+    // A small sub-4-byte format means a single vertex can fit entirely within the padded buffer,
+    // touching some of the padding. Test a small format, as well as larger formats.
+    for (wgpu::VertexFormat vertexFormat :
+         {wgpu::VertexFormat::Unorm8x2, wgpu::VertexFormat::Float16x2,
+          wgpu::VertexFormat::Float32x2}) {
+        wgpu::RenderPipeline renderPipeline =
+            CreateRenderPipelineForTest(R"(
+            struct VertexOut {
+                @location(0) color : vec4<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex) fn main(@location(0) pos : vec2<f32>) -> VertexOut {
+                var output : VertexOut;
+                if (all(pos == vec2<f32>(0.0, 0.0))) {
+                    output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+                } else {
+                    output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+                }
+                output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                return output;
+            })",
+                                        /* vertexBufferCount */ 1u, vertexFormat);
+
+        // Create an index buffer the indexes off the end of the vertex buffer.
+        wgpu::Buffer indexBuffer =
+            utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {1});
+
+        const uint32_t vertexFormatSize = utils::VertexFormatSize(vertexFormat);
+
+        // Create an 8-bit texture to use to initialize buffer contents.
+        wgpu::TextureDescriptor initTextureDesc = {};
+        initTextureDesc.size = {vertexFormatSize + 4, 1, 1};
+        initTextureDesc.format = wgpu::TextureFormat::R8Unorm;
+        initTextureDesc.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+        wgpu::ImageCopyTexture zeroTextureSrc =
+            utils::CreateImageCopyTexture(device.CreateTexture(&initTextureDesc), 0, {0, 0, 0});
+        {
+            wgpu::TextureDataLayout layout =
+                utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
+            std::vector<uint8_t> data(initTextureDesc.size.width);
+            queue.WriteTexture(&zeroTextureSrc, data.data(), data.size(), &layout,
+                               &initTextureDesc.size);
+        }
+
+        for (uint32_t extraBytes : {0, 1, 2, 3, 4}) {
+            // Create a vertex buffer to hold a single vertex attribute.
+            // Uniform usage is added to force even more padding on D3D12.
+            // The buffer is internally padded and allocated as a larger buffer.
+            const uint32_t vertexBufferSize = vertexFormatSize + extraBytes;
+            for (uint32_t vertexBufferOffset = 0; vertexBufferOffset <= vertexBufferSize;
+                 vertexBufferOffset += 4u) {
+                wgpu::Buffer vertexBuffer = CreateBuffer(
+                    vertexBufferSize, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform |
+                                          wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+                // "Fully" initialize the buffer with a copy from an 8-bit texture, touching
+                // everything except the padding. From the point-of-view of the API, all
+                // |vertexBufferSize| bytes are initialized. Note: Uses CopyTextureToBuffer because
+                // it does not require 4-byte alignment.
+                {
+                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                    wgpu::ImageCopyBuffer dst =
+                        utils::CreateImageCopyBuffer(vertexBuffer, 0, wgpu::kCopyStrideUndefined);
+                    wgpu::Extent3D extent = {vertexBufferSize, 1, 1};
+                    encoder.CopyTextureToBuffer(&zeroTextureSrc, &dst, &extent);
+
+                    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+                    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+                }
+
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+                wgpu::Texture colorAttachment =
+                    CreateAndInitializeTexture({1, 1, 1}, kColorAttachmentFormat);
+                utils::ComboRenderPassDescriptor renderPassDescriptor(
+                    {colorAttachment.CreateView()});
+
+                wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+
+                renderPass.SetVertexBuffer(0, vertexBuffer, vertexBufferOffset);
+                renderPass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+
+                renderPass.SetPipeline(renderPipeline);
+                renderPass.DrawIndexed(1);
+                renderPass.End();
+
+                wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+                EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+                constexpr RGBA8 kExpectedPixelValue = {0, 255, 0, 255};
+                EXPECT_PIXEL_RGBA8_EQ(kExpectedPixelValue, colorAttachment, 0, 0);
+            }
+        }
+    }
+}
+
+// Test the buffer will be lazily initialized correctly when its first use is in SetIndexBuffer.
+TEST_P(BufferZeroInitTest, SetIndexBuffer) {
+    // Bind the whole buffer as an index buffer.
+    {
+        constexpr uint64_t kIndexBufferOffset = 0u;
+        TestBufferZeroInitAsIndexBuffer(kIndexBufferOffset);
+    }
+
+    // Bind the buffer as an index buffer with a non-zero offset.
+    {
+        constexpr uint64_t kIndexBufferOffset = 16u;
+        TestBufferZeroInitAsIndexBuffer(kIndexBufferOffset);
+    }
+}
+
+// Test the buffer will be lazily initialized correctly when its first use is an indirect buffer for
+// DrawIndirect.
+TEST_P(BufferZeroInitTest, IndirectBufferForDrawIndirect) {
+    // Bind the whole buffer as an indirect buffer.
+    {
+        constexpr uint64_t kOffset = 0u;
+        TestBufferZeroInitAsIndirectBufferForDrawIndirect(kOffset);
+    }
+
+    // Bind the buffer as an indirect buffer with a non-zero offset.
+    {
+        constexpr uint64_t kOffset = 8u;
+        TestBufferZeroInitAsIndirectBufferForDrawIndirect(kOffset);
+    }
+}
+
+// Test the buffer will be lazily initialized correctly when its first use is an indirect buffer for
+// DrawIndexedIndirect.
+TEST_P(BufferZeroInitTest, IndirectBufferForDrawIndexedIndirect) {
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offset= that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL());
+
+    // Bind the whole buffer as an indirect buffer.
+    {
+        constexpr uint64_t kOffset = 0u;
+        TestBufferZeroInitAsIndirectBufferForDrawIndexedIndirect(kOffset);
+    }
+
+    // Bind the buffer as an indirect buffer with a non-zero offset.
+    {
+        constexpr uint64_t kOffset = 8u;
+        TestBufferZeroInitAsIndirectBufferForDrawIndexedIndirect(kOffset);
+    }
+}
+
+// Test the buffer will be lazily initialized correctly when its first use is an indirect buffer for
+// DispatchIndirect.
+TEST_P(BufferZeroInitTest, IndirectBufferForDispatchIndirect) {
+    // TODO(crbug.com/dawn/661): Diagnose and fix this backend validation failure on GLES.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES() && IsBackendValidationEnabled());
+
+    // Bind the whole buffer as an indirect buffer.
+    {
+        constexpr uint64_t kOffset = 0u;
+        TestBufferZeroInitAsIndirectBufferForDispatchIndirect(kOffset);
+    }
+
+    // Bind the buffer as an indirect buffer with a non-zero offset.
+    {
+        constexpr uint64_t kOffset = 8u;
+        TestBufferZeroInitAsIndirectBufferForDispatchIndirect(kOffset);
+    }
+}
+
+// Test the buffer will be lazily initialized correctly when its first use is in resolveQuerySet
+TEST_P(BufferZeroInitTest, ResolveQuerySet) {
+    // Timestamp query is not supported on OpenGL
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+
+    // TODO(crbug.com/dawn/545): Crash occurs if we only call WriteTimestamp in a command encoder
+    // without any copy commands on Metal on AMD GPU.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsAMD());
+
+    // Skip if timestamp feature is not supported on device
+    DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}));
+
+    // crbug.com/dawn/940: Does not work on Mac 11.0+. Backend validation changed.
+    DAWN_TEST_UNSUPPORTED_IF(IsMacOS() && !IsMacOS(10));
+
+    constexpr uint64_t kBufferSize = 16u;
+    constexpr wgpu::BufferUsage kBufferUsage =
+        wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopyDst;
+
+    wgpu::QuerySetDescriptor descriptor;
+    descriptor.count = 2u;
+    descriptor.type = wgpu::QueryType::Timestamp;
+    wgpu::QuerySet querySet = device.CreateQuerySet(&descriptor);
+
+    // Resolve data to the whole buffer doesn't need lazy initialization.
+    {
+        constexpr uint32_t kQueryCount = 2u;
+        constexpr uint64_t kDestinationOffset = 0u;
+
+        wgpu::Buffer destination = CreateBuffer(kBufferSize, kBufferUsage);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.WriteTimestamp(querySet, 1);
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, kDestinationOffset);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+    }
+
+    // Resolve data to partial of the buffer needs lazy initialization.
+    // destinationOffset == 0 and destinationOffset + 8 * queryCount < kBufferSize
+    {
+        constexpr uint32_t kQueryCount = 1u;
+        constexpr uint64_t kDestinationOffset = 0u;
+
+        wgpu::Buffer destination = CreateBuffer(kBufferSize, kBufferUsage);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, kDestinationOffset);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+    }
+
+    // destinationOffset > 0 and destinationOffset + 8 * queryCount <= kBufferSize
+    {
+        constexpr uint32_t kQueryCount = 1;
+        constexpr uint64_t kDestinationOffset = 256u;
+
+        wgpu::Buffer destination = CreateBuffer(kBufferSize + kDestinationOffset, kBufferUsage);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, kDestinationOffset);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+    }
+}
+
+DAWN_INSTANTIATE_TEST(BufferZeroInitTest,
+                      D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      MetalBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"}));
diff --git a/src/dawn/tests/end2end/ClipSpaceTests.cpp b/src/dawn/tests/end2end/ClipSpaceTests.cpp
new file mode 100644
index 0000000..098913a
--- /dev/null
+++ b/src/dawn/tests/end2end/ClipSpaceTests.cpp
@@ -0,0 +1,100 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class ClipSpaceTest : public DawnTest {
+  protected:
+    wgpu::RenderPipeline CreatePipelineForTest() {
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+        // Draw two triangles:
+        // 1. The depth value of the top-left one is >= 0.5
+        // 2. The depth value of the bottom-right one is <= 0.5
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec3<f32>, 6>(
+                    vec3<f32>(-1.0,  1.0, 1.0),
+                    vec3<f32>(-1.0, -1.0, 0.5),
+                    vec3<f32>( 1.0,  1.0, 0.5),
+                    vec3<f32>( 1.0,  1.0, 0.5),
+                    vec3<f32>(-1.0, -1.0, 0.5),
+                    vec3<f32>( 1.0, -1.0, 0.0));
+                return vec4<f32>(pos[VertexIndex], 1.0);
+            })");
+
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+               return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            })");
+
+        wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil();
+        depthStencil->depthCompare = wgpu::CompareFunction::LessEqual;
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::Texture Create2DTextureForTest(wgpu::TextureFormat format) {
+        wgpu::TextureDescriptor textureDescriptor;
+        textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+        textureDescriptor.format = format;
+        textureDescriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        textureDescriptor.mipLevelCount = 1;
+        textureDescriptor.sampleCount = 1;
+        textureDescriptor.size = {kSize, kSize, 1};
+        return device.CreateTexture(&textureDescriptor);
+    }
+
+    static constexpr uint32_t kSize = 4;
+};
+
+// Test that the clip space is correctly configured.
+TEST_P(ClipSpaceTest, ClipSpace) {
+    wgpu::Texture colorTexture = Create2DTextureForTest(wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture depthStencilTexture =
+        Create2DTextureForTest(wgpu::TextureFormat::Depth24PlusStencil8);
+
+    utils::ComboRenderPassDescriptor renderPassDescriptor({colorTexture.CreateView()},
+                                                          depthStencilTexture.CreateView());
+    renderPassDescriptor.cColorAttachments[0].clearValue = {0.0, 1.0, 0.0, 1.0};
+    renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+
+    // Clear the depth stencil attachment to 0.5f, so only the bottom-right triangle should be
+    // drawn.
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.5f;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder renderPass = commandEncoder.BeginRenderPass(&renderPassDescriptor);
+    renderPass.SetPipeline(CreatePipelineForTest());
+    renderPass.Draw(6);
+    renderPass.End();
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, colorTexture, kSize - 1, kSize - 1);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, colorTexture, 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(ClipSpaceTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ColorStateTests.cpp b/src/dawn/tests/end2end/ColorStateTests.cpp
new file mode 100644
index 0000000..1a9b1c6
--- /dev/null
+++ b/src/dawn/tests/end2end/ColorStateTests.cpp
@@ -0,0 +1,1166 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static unsigned int kRTSize = 64;
+
+class ColorStateTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+        pipelineLayout = utils::MakePipelineLayout(device, {bindGroupLayout});
+
+        // TODO(crbug.com/dawn/489): D3D12_Microsoft_Basic_Render_Driver_CPU
+        // produces invalid results for these tests.
+        DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+        vsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex)
+                fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                    var pos = array<vec2<f32>, 3>(
+                        vec2<f32>(-1.0, -1.0),
+                        vec2<f32>(3.0, -1.0),
+                        vec2<f32>(-1.0, 3.0));
+                    return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+                }
+            )");
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    }
+
+    struct TriangleSpec {
+        RGBA8 color;
+        std::array<float, 4> blendFactor = {};
+    };
+
+    // Set up basePipeline and testPipeline. testPipeline has the given blend state on the first
+    // attachment. basePipeline has no blending
+    void SetupSingleSourcePipelines(wgpu::ColorTargetState colorTargetState) {
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+                struct MyBlock {
+                    color : vec4<f32>
+                }
+
+                @group(0) @binding(0) var<uniform> myUbo : MyBlock;
+
+                @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                    return myUbo.color;
+                }
+            )");
+
+        utils::ComboRenderPipelineDescriptor baseDescriptor;
+        baseDescriptor.layout = pipelineLayout;
+        baseDescriptor.vertex.module = vsModule;
+        baseDescriptor.cFragment.module = fsModule;
+        baseDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+        basePipeline = device.CreateRenderPipeline(&baseDescriptor);
+
+        utils::ComboRenderPipelineDescriptor testDescriptor;
+        testDescriptor.layout = pipelineLayout;
+        testDescriptor.vertex.module = vsModule;
+        testDescriptor.cFragment.module = fsModule;
+        testDescriptor.cTargets[0] = colorTargetState;
+        testDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+        testPipeline = device.CreateRenderPipeline(&testDescriptor);
+    }
+
+    // Create a bind group to set the colors as a uniform buffer
+    template <size_t N>
+    wgpu::BindGroup MakeBindGroupForColors(std::array<RGBA8, N> colors) {
+        std::array<float, 4 * N> data;
+        for (unsigned int i = 0; i < N; ++i) {
+            data[4 * i + 0] = static_cast<float>(colors[i].r) / 255.f;
+            data[4 * i + 1] = static_cast<float>(colors[i].g) / 255.f;
+            data[4 * i + 2] = static_cast<float>(colors[i].b) / 255.f;
+            data[4 * i + 3] = static_cast<float>(colors[i].a) / 255.f;
+        }
+
+        uint32_t bufferSize = static_cast<uint32_t>(4 * N * sizeof(float));
+
+        wgpu::Buffer buffer =
+            utils::CreateBufferFromData(device, &data, bufferSize, wgpu::BufferUsage::Uniform);
+        return utils::MakeBindGroup(device, testPipeline.GetBindGroupLayout(0),
+                                    {{0, buffer, 0, bufferSize}});
+    }
+
+    // Test that after drawing a triangle with the base color, and then the given triangle spec, the
+    // color is as expected
+    void DoSingleSourceTest(RGBA8 base, const TriangleSpec& triangle, const RGBA8& expected) {
+        wgpu::Color blendConstant{triangle.blendFactor[0], triangle.blendFactor[1],
+                                  triangle.blendFactor[2], triangle.blendFactor[3]};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            // First use the base pipeline to draw a triangle with no blending
+            pass.SetPipeline(basePipeline);
+            pass.SetBindGroup(0, MakeBindGroupForColors(std::array<RGBA8, 1>({{base}})));
+            pass.Draw(3);
+
+            // Then use the test pipeline to draw the test triangle with blending
+            pass.SetPipeline(testPipeline);
+            pass.SetBindGroup(0, MakeBindGroupForColors(std::array<RGBA8, 1>({{triangle.color}})));
+            pass.SetBlendConstant(&blendConstant);
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(expected, renderPass.color, kRTSize / 2, kRTSize / 2);
+    }
+
+    // Given a vector of tests where each element is <testColor, expectedColor>, check that all
+    // expectations are true for the given blend operation
+    void CheckBlendOperation(RGBA8 base,
+                             wgpu::BlendOperation operation,
+                             std::vector<std::pair<RGBA8, RGBA8>> tests) {
+        wgpu::BlendComponent blendComponent;
+        blendComponent.operation = operation;
+        blendComponent.srcFactor = wgpu::BlendFactor::One;
+        blendComponent.dstFactor = wgpu::BlendFactor::One;
+
+        wgpu::BlendState blend;
+        blend.color = blendComponent;
+        blend.alpha = blendComponent;
+
+        wgpu::ColorTargetState descriptor;
+        descriptor.blend = &blend;
+        descriptor.writeMask = wgpu::ColorWriteMask::All;
+
+        SetupSingleSourcePipelines(descriptor);
+
+        for (const auto& [triangleColor, expectedColor] : tests) {
+            DoSingleSourceTest(base, {triangleColor}, expectedColor);
+        }
+    }
+
+    // Given a vector of tests where each element is <testSpec, expectedColor>, check that all
+    // expectations are true for the given blend factors
+    void CheckBlendFactor(RGBA8 base,
+                          wgpu::BlendFactor colorSrcFactor,
+                          wgpu::BlendFactor colorDstFactor,
+                          wgpu::BlendFactor alphaSrcFactor,
+                          wgpu::BlendFactor alphaDstFactor,
+                          std::vector<std::pair<TriangleSpec, RGBA8>> tests) {
+        wgpu::BlendComponent colorBlend;
+        colorBlend.operation = wgpu::BlendOperation::Add;
+        colorBlend.srcFactor = colorSrcFactor;
+        colorBlend.dstFactor = colorDstFactor;
+
+        wgpu::BlendComponent alphaBlend;
+        alphaBlend.operation = wgpu::BlendOperation::Add;
+        alphaBlend.srcFactor = alphaSrcFactor;
+        alphaBlend.dstFactor = alphaDstFactor;
+
+        wgpu::BlendState blend;
+        blend.color = colorBlend;
+        blend.alpha = alphaBlend;
+
+        wgpu::ColorTargetState descriptor;
+        descriptor.blend = &blend;
+        descriptor.writeMask = wgpu::ColorWriteMask::All;
+
+        SetupSingleSourcePipelines(descriptor);
+
+        for (const auto& [triangles, expectedColor] : tests) {
+            DoSingleSourceTest(base, triangles, expectedColor);
+        }
+    }
+
+    void CheckSrcBlendFactor(RGBA8 base,
+                             wgpu::BlendFactor colorFactor,
+                             wgpu::BlendFactor alphaFactor,
+                             std::vector<std::pair<TriangleSpec, RGBA8>> tests) {
+        CheckBlendFactor(base, colorFactor, wgpu::BlendFactor::One, alphaFactor,
+                         wgpu::BlendFactor::One, tests);
+    }
+
+    void CheckDstBlendFactor(RGBA8 base,
+                             wgpu::BlendFactor colorFactor,
+                             wgpu::BlendFactor alphaFactor,
+                             std::vector<std::pair<TriangleSpec, RGBA8>> tests) {
+        CheckBlendFactor(base, wgpu::BlendFactor::One, colorFactor, wgpu::BlendFactor::One,
+                         alphaFactor, tests);
+    }
+
+    wgpu::PipelineLayout pipelineLayout;
+    utils::BasicRenderPass renderPass;
+    wgpu::RenderPipeline basePipeline;
+    wgpu::RenderPipeline testPipeline;
+    wgpu::ShaderModule vsModule;
+};
+
+namespace {
+    // Add two colors and clamp
+    constexpr RGBA8 operator+(const RGBA8& col1, const RGBA8& col2) {
+        int r = static_cast<int>(col1.r) + static_cast<int>(col2.r);
+        int g = static_cast<int>(col1.g) + static_cast<int>(col2.g);
+        int b = static_cast<int>(col1.b) + static_cast<int>(col2.b);
+        int a = static_cast<int>(col1.a) + static_cast<int>(col2.a);
+        r = (r > 255 ? 255 : (r < 0 ? 0 : r));
+        g = (g > 255 ? 255 : (g < 0 ? 0 : g));
+        b = (b > 255 ? 255 : (b < 0 ? 0 : b));
+        a = (a > 255 ? 255 : (a < 0 ? 0 : a));
+
+        return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
+                     static_cast<uint8_t>(a));
+    }
+
+    // Subtract two colors and clamp
+    constexpr RGBA8 operator-(const RGBA8& col1, const RGBA8& col2) {
+        int r = static_cast<int>(col1.r) - static_cast<int>(col2.r);
+        int g = static_cast<int>(col1.g) - static_cast<int>(col2.g);
+        int b = static_cast<int>(col1.b) - static_cast<int>(col2.b);
+        int a = static_cast<int>(col1.a) - static_cast<int>(col2.a);
+        r = (r > 255 ? 255 : (r < 0 ? 0 : r));
+        g = (g > 255 ? 255 : (g < 0 ? 0 : g));
+        b = (b > 255 ? 255 : (b < 0 ? 0 : b));
+        a = (a > 255 ? 255 : (a < 0 ? 0 : a));
+
+        return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
+                     static_cast<uint8_t>(a));
+    }
+
+    // Get the component-wise minimum of two colors
+    RGBA8 min(const RGBA8& col1, const RGBA8& col2) {
+        return RGBA8(std::min(col1.r, col2.r), std::min(col1.g, col2.g), std::min(col1.b, col2.b),
+                     std::min(col1.a, col2.a));
+    }
+
+    // Get the component-wise maximum of two colors
+    RGBA8 max(const RGBA8& col1, const RGBA8& col2) {
+        return RGBA8(std::max(col1.r, col2.r), std::max(col1.g, col2.g), std::max(col1.b, col2.b),
+                     std::max(col1.a, col2.a));
+    }
+
+    // Blend two RGBA8 color values parameterized by the provided factors in the range [0.f, 1.f]
+    RGBA8 mix(const RGBA8& col1, const RGBA8& col2, std::array<float, 4> fac) {
+        float r = static_cast<float>(col1.r) * (1.f - fac[0]) + static_cast<float>(col2.r) * fac[0];
+        float g = static_cast<float>(col1.g) * (1.f - fac[1]) + static_cast<float>(col2.g) * fac[1];
+        float b = static_cast<float>(col1.b) * (1.f - fac[2]) + static_cast<float>(col2.b) * fac[2];
+        float a = static_cast<float>(col1.a) * (1.f - fac[3]) + static_cast<float>(col2.a) * fac[3];
+
+        return RGBA8({static_cast<uint8_t>(std::round(r)), static_cast<uint8_t>(std::round(g)),
+                      static_cast<uint8_t>(std::round(b)), static_cast<uint8_t>(std::round(a))});
+    }
+
+    // Blend two RGBA8 color values parameterized by the provided RGBA8 factor
+    RGBA8 mix(const RGBA8& col1, const RGBA8& col2, const RGBA8& fac) {
+        std::array<float, 4> f = {{
+            static_cast<float>(fac.r) / 255.f,
+            static_cast<float>(fac.g) / 255.f,
+            static_cast<float>(fac.b) / 255.f,
+            static_cast<float>(fac.a) / 255.f,
+        }};
+        return mix(col1, col2, f);
+    }
+
+    constexpr std::array<RGBA8, 8> kColors = {{
+        // check operations over multiple channels
+        RGBA8(64, 0, 0, 0),
+        RGBA8(0, 64, 0, 0),
+        RGBA8(64, 0, 32, 0),
+        RGBA8(0, 64, 32, 0),
+        RGBA8(128, 0, 128, 128),
+        RGBA8(0, 128, 128, 128),
+
+        // check cases that may cause overflow
+        RGBA8(0, 0, 0, 0),
+        RGBA8(255, 255, 255, 255),
+    }};
+}  // namespace
+
+// Test compilation and usage of the fixture
+TEST_P(ColorStateTest, Basic) {
+    wgpu::BlendComponent blendComponent;
+    blendComponent.operation = wgpu::BlendOperation::Add;
+    blendComponent.srcFactor = wgpu::BlendFactor::One;
+    blendComponent.dstFactor = wgpu::BlendFactor::Zero;
+
+    wgpu::BlendState blend;
+    blend.color = blendComponent;
+    blend.alpha = blendComponent;
+
+    wgpu::ColorTargetState descriptor;
+    descriptor.blend = &blend;
+    descriptor.writeMask = wgpu::ColorWriteMask::All;
+
+    SetupSingleSourcePipelines(descriptor);
+
+    DoSingleSourceTest(RGBA8(0, 0, 0, 0), {RGBA8(255, 0, 0, 0)}, RGBA8(255, 0, 0, 0));
+}
+
+// The following tests check test that the blend operation works
+TEST_P(ColorStateTest, BlendOperationAdd) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<RGBA8, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) { return std::make_pair(color, base + color); });
+    CheckBlendOperation(base, wgpu::BlendOperation::Add, tests);
+}
+
+TEST_P(ColorStateTest, BlendOperationSubtract) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<RGBA8, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) { return std::make_pair(color, color - base); });
+    CheckBlendOperation(base, wgpu::BlendOperation::Subtract, tests);
+}
+
+TEST_P(ColorStateTest, BlendOperationReverseSubtract) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<RGBA8, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) { return std::make_pair(color, base - color); });
+    CheckBlendOperation(base, wgpu::BlendOperation::ReverseSubtract, tests);
+}
+
+TEST_P(ColorStateTest, BlendOperationMin) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<RGBA8, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) { return std::make_pair(color, min(base, color)); });
+    CheckBlendOperation(base, wgpu::BlendOperation::Min, tests);
+}
+
+TEST_P(ColorStateTest, BlendOperationMax) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<RGBA8, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) { return std::make_pair(color, max(base, color)); });
+    CheckBlendOperation(base, wgpu::BlendOperation::Max, tests);
+}
+
+// The following tests check that the Source blend factor works
+TEST_P(ColorStateTest, SrcBlendFactorZero) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests),
+        [&](const RGBA8& color) { return std::make_pair(TriangleSpec({{color}}), base); });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::Zero, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorOne) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests),
+        [&](const RGBA8& color) { return std::make_pair(TriangleSpec({{color}}), base + color); });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::One, wgpu::BlendFactor::One, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorSrc) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = color;
+                       fac.a = 0;
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::Src, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorOneMinusSrc) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = RGBA8(255, 255, 255, 255) - color;
+                       fac.a = 0;
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::OneMinusSrc, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorSrcAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac(color.a, color.a, color.a, color.a);
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::SrcAlpha, wgpu::BlendFactor::SrcAlpha, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorOneMinusSrcAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests), [&](const RGBA8& color) {
+            RGBA8 fac = RGBA8(255, 255, 255, 255) - RGBA8(color.a, color.a, color.a, color.a);
+            RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+            return std::make_pair(TriangleSpec({{color}}), expected);
+        });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::OneMinusSrcAlpha,
+                        wgpu::BlendFactor::OneMinusSrcAlpha, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorDst) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = base;
+                       fac.a = 0;
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::Dst, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorOneMinusDst) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = RGBA8(255, 255, 255, 255) - base;
+                       fac.a = 0;
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::OneMinusDst, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorDstAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac(base.a, base.a, base.a, base.a);
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::DstAlpha, wgpu::BlendFactor::DstAlpha, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorOneMinusDstAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests), [&](const RGBA8& color) {
+            RGBA8 fac = RGBA8(255, 255, 255, 255) - RGBA8(base.a, base.a, base.a, base.a);
+            RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+            return std::make_pair(TriangleSpec({{color}}), expected);
+        });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::OneMinusDstAlpha,
+                        wgpu::BlendFactor::OneMinusDstAlpha, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorSrcAlphaSaturated) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       uint8_t f = std::min(color.a, static_cast<uint8_t>(255 - base.a));
+                       RGBA8 fac(f, f, f, 255);
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::SrcAlphaSaturated,
+                        wgpu::BlendFactor::SrcAlphaSaturated, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorConstant) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests), [&](const RGBA8& color) {
+            auto triangleSpec = TriangleSpec({{color}, {{0.2f, 0.4f, 0.6f, 0.8f}}});
+            RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, triangleSpec.blendFactor);
+            return std::make_pair(triangleSpec, expected);
+        });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::Constant, wgpu::BlendFactor::Constant, tests);
+}
+
+TEST_P(ColorStateTest, SrcBlendFactorOneMinusConstant) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       auto triangleSpec = TriangleSpec({{color}, {{0.2f, 0.4f, 0.6f, 0.8f}}});
+                       std::array<float, 4> f = {{0.8f, 0.6f, 0.4f, 0.2f}};
+                       RGBA8 expected = base + mix(RGBA8(0, 0, 0, 0), color, f);
+                       return std::make_pair(triangleSpec, expected);
+                   });
+    CheckSrcBlendFactor(base, wgpu::BlendFactor::OneMinusConstant,
+                        wgpu::BlendFactor::OneMinusConstant, tests);
+}
+
+// The following tests check that the Destination blend factor works
+TEST_P(ColorStateTest, DstBlendFactorZero) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests),
+        [&](const RGBA8& color) { return std::make_pair(TriangleSpec({{color}}), color); });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::Zero, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorOne) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests),
+        [&](const RGBA8& color) { return std::make_pair(TriangleSpec({{color}}), base + color); });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::One, wgpu::BlendFactor::One, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorSrc) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = color;
+                       fac.a = 0;
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::Src, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorOneMinusSrc) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = RGBA8(255, 255, 255, 255) - color;
+                       fac.a = 0;
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::OneMinusSrc, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorSrcAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac(color.a, color.a, color.a, color.a);
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::SrcAlpha, wgpu::BlendFactor::SrcAlpha, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorOneMinusSrcAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests), [&](const RGBA8& color) {
+            RGBA8 fac = RGBA8(255, 255, 255, 255) - RGBA8(color.a, color.a, color.a, color.a);
+            RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+            return std::make_pair(TriangleSpec({{color}}), expected);
+        });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::OneMinusSrcAlpha,
+                        wgpu::BlendFactor::OneMinusSrcAlpha, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorDst) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = base;
+                       fac.a = 0;
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::Dst, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorOneMinusDst) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac = RGBA8(255, 255, 255, 255) - base;
+                       fac.a = 0;
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::OneMinusDst, wgpu::BlendFactor::Zero, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorDstAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       RGBA8 fac(base.a, base.a, base.a, base.a);
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::DstAlpha, wgpu::BlendFactor::DstAlpha, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorOneMinusDstAlpha) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests), [&](const RGBA8& color) {
+            RGBA8 fac = RGBA8(255, 255, 255, 255) - RGBA8(base.a, base.a, base.a, base.a);
+            RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+            return std::make_pair(TriangleSpec({{color}}), expected);
+        });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::OneMinusDstAlpha,
+                        wgpu::BlendFactor::OneMinusDstAlpha, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorSrcAlphaSaturated) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       uint8_t f = std::min(color.a, static_cast<uint8_t>(255 - base.a));
+                       RGBA8 fac(f, f, f, 255);
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, fac);
+                       return std::make_pair(TriangleSpec({{color}}), expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::SrcAlphaSaturated,
+                        wgpu::BlendFactor::SrcAlphaSaturated, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorConstant) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(
+        kColors.begin(), kColors.end(), std::back_inserter(tests), [&](const RGBA8& color) {
+            auto triangleSpec = TriangleSpec({{color}, {{0.2f, 0.4f, 0.6f, 0.8f}}});
+            RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, triangleSpec.blendFactor);
+            return std::make_pair(triangleSpec, expected);
+        });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::Constant, wgpu::BlendFactor::Constant, tests);
+}
+
+TEST_P(ColorStateTest, DstBlendFactorOneMinusConstant) {
+    RGBA8 base(32, 64, 128, 192);
+    std::vector<std::pair<TriangleSpec, RGBA8>> tests;
+    std::transform(kColors.begin(), kColors.end(), std::back_inserter(tests),
+                   [&](const RGBA8& color) {
+                       auto triangleSpec = TriangleSpec({{color}, {{0.2f, 0.4f, 0.6f, 0.8f}}});
+                       std::array<float, 4> f = {{0.8f, 0.6f, 0.4f, 0.2f}};
+                       RGBA8 expected = color + mix(RGBA8(0, 0, 0, 0), base, f);
+                       return std::make_pair(triangleSpec, expected);
+                   });
+    CheckDstBlendFactor(base, wgpu::BlendFactor::OneMinusConstant,
+                        wgpu::BlendFactor::OneMinusConstant, tests);
+}
+
+// Check that the color write mask works
+TEST_P(ColorStateTest, ColorWriteMask) {
+    wgpu::BlendComponent blendComponent;
+    blendComponent.operation = wgpu::BlendOperation::Add;
+    blendComponent.srcFactor = wgpu::BlendFactor::One;
+    blendComponent.dstFactor = wgpu::BlendFactor::One;
+
+    wgpu::BlendState blend;
+    blend.color = blendComponent;
+    blend.alpha = blendComponent;
+
+    wgpu::ColorTargetState descriptor;
+    descriptor.blend = &blend;
+    {
+        // Test single channel color write
+        descriptor.writeMask = wgpu::ColorWriteMask::Red;
+        SetupSingleSourcePipelines(descriptor);
+
+        RGBA8 base(32, 64, 128, 192);
+        for (auto& color : kColors) {
+            RGBA8 expected = base + RGBA8(color.r, 0, 0, 0);
+            DoSingleSourceTest(base, {color}, expected);
+        }
+    }
+
+    {
+        // Test multi channel color write
+        descriptor.writeMask = wgpu::ColorWriteMask::Green | wgpu::ColorWriteMask::Alpha;
+        SetupSingleSourcePipelines(descriptor);
+
+        RGBA8 base(32, 64, 128, 192);
+        for (auto& color : kColors) {
+            RGBA8 expected = base + RGBA8(0, color.g, 0, color.a);
+            DoSingleSourceTest(base, {color}, expected);
+        }
+    }
+
+    {
+        // Test no channel color write
+        descriptor.writeMask = wgpu::ColorWriteMask::None;
+        SetupSingleSourcePipelines(descriptor);
+
+        RGBA8 base(32, 64, 128, 192);
+        for (auto& color : kColors) {
+            DoSingleSourceTest(base, {color}, base);
+        }
+    }
+}
+
+// Check that the color write mask works when blending is disabled
+TEST_P(ColorStateTest, ColorWriteMaskBlendingDisabled) {
+    {
+        wgpu::BlendComponent blendComponent;
+        blendComponent.operation = wgpu::BlendOperation::Add;
+        blendComponent.srcFactor = wgpu::BlendFactor::One;
+        blendComponent.dstFactor = wgpu::BlendFactor::Zero;
+
+        wgpu::BlendState blend;
+        blend.color = blendComponent;
+        blend.alpha = blendComponent;
+
+        wgpu::ColorTargetState descriptor;
+        descriptor.blend = &blend;
+        descriptor.writeMask = wgpu::ColorWriteMask::Red;
+        SetupSingleSourcePipelines(descriptor);
+
+        RGBA8 base(32, 64, 128, 192);
+        RGBA8 expected(32, 0, 0, 0);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(testPipeline);
+            pass.SetBindGroup(0, MakeBindGroupForColors(std::array<RGBA8, 1>({{base}})));
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+        EXPECT_PIXEL_RGBA8_EQ(expected, renderPass.color, kRTSize / 2, kRTSize / 2);
+    }
+}
+
+// Test that independent color states on render targets works
+TEST_P(ColorStateTest, IndependentColorState) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_indexed_draw_buffers"));
+
+    std::array<wgpu::Texture, 4> renderTargets;
+    std::array<wgpu::TextureView, 4> renderTargetViews;
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = kRTSize;
+    descriptor.size.height = kRTSize;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+
+    for (uint32_t i = 0; i < 4; ++i) {
+        renderTargets[i] = device.CreateTexture(&descriptor);
+        renderTargetViews[i] = renderTargets[i].CreateView();
+    }
+
+    utils::ComboRenderPassDescriptor renderPass(
+        {renderTargetViews[0], renderTargetViews[1], renderTargetViews[2], renderTargetViews[3]});
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct MyBlock {
+            color0 : vec4<f32>,
+            color1 : vec4<f32>,
+            color2 : vec4<f32>,
+            color3 : vec4<f32>,
+        }
+
+        @group(0) @binding(0) var<uniform> myUbo : MyBlock;
+
+        struct FragmentOut {
+            @location(0) fragColor0 : vec4<f32>,
+            @location(1) fragColor1 : vec4<f32>,
+            @location(2) fragColor2 : vec4<f32>,
+            @location(3) fragColor3 : vec4<f32>,
+        }
+
+        @stage(fragment) fn main() -> FragmentOut {
+            var output : FragmentOut;
+            output.fragColor0 = myUbo.color0;
+            output.fragColor1 = myUbo.color1;
+            output.fragColor2 = myUbo.color2;
+            output.fragColor3 = myUbo.color3;
+            return output;
+        }
+    )");
+
+    utils::ComboRenderPipelineDescriptor baseDescriptor;
+    baseDescriptor.layout = pipelineLayout;
+    baseDescriptor.vertex.module = vsModule;
+    baseDescriptor.cFragment.module = fsModule;
+    baseDescriptor.cFragment.targetCount = 4;
+
+    basePipeline = device.CreateRenderPipeline(&baseDescriptor);
+
+    utils::ComboRenderPipelineDescriptor testDescriptor;
+    testDescriptor.layout = pipelineLayout;
+    testDescriptor.vertex.module = vsModule;
+    testDescriptor.cFragment.module = fsModule;
+    testDescriptor.cFragment.targetCount = 4;
+
+    // set color states
+    wgpu::BlendComponent blendComponent0;
+    blendComponent0.operation = wgpu::BlendOperation::Add;
+    blendComponent0.srcFactor = wgpu::BlendFactor::One;
+    blendComponent0.dstFactor = wgpu::BlendFactor::One;
+
+    wgpu::BlendState blend0;
+    blend0.color = blendComponent0;
+    blend0.alpha = blendComponent0;
+
+    wgpu::BlendComponent blendComponent1;
+    blendComponent1.operation = wgpu::BlendOperation::Subtract;
+    blendComponent1.srcFactor = wgpu::BlendFactor::One;
+    blendComponent1.dstFactor = wgpu::BlendFactor::One;
+
+    wgpu::BlendState blend1;
+    blend1.color = blendComponent1;
+    blend1.alpha = blendComponent1;
+
+    // Blend state intentionally omitted for target 2
+
+    wgpu::BlendComponent blendComponent3;
+    blendComponent3.operation = wgpu::BlendOperation::Min;
+    blendComponent3.srcFactor = wgpu::BlendFactor::One;
+    blendComponent3.dstFactor = wgpu::BlendFactor::One;
+
+    wgpu::BlendState blend3;
+    blend3.color = blendComponent3;
+    blend3.alpha = blendComponent3;
+
+    testDescriptor.cTargets[0].blend = &blend0;
+    testDescriptor.cTargets[1].blend = &blend1;
+    testDescriptor.cTargets[3].blend = &blend3;
+
+    testPipeline = device.CreateRenderPipeline(&testDescriptor);
+
+    for (unsigned int c = 0; c < kColors.size(); ++c) {
+        RGBA8 base = kColors[((c + 31) * 29) % kColors.size()];
+        RGBA8 color0 = kColors[((c + 19) * 13) % kColors.size()];
+        RGBA8 color1 = kColors[((c + 11) * 43) % kColors.size()];
+        RGBA8 color2 = kColors[((c + 7) * 3) % kColors.size()];
+        RGBA8 color3 = kColors[((c + 13) * 71) % kColors.size()];
+
+        RGBA8 expected0 = color0 + base;
+        RGBA8 expected1 = color1 - base;
+        RGBA8 expected2 = color2;
+        RGBA8 expected3 = min(color3, base);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetPipeline(basePipeline);
+            pass.SetBindGroup(
+                0, MakeBindGroupForColors(std::array<RGBA8, 4>({{base, base, base, base}})));
+            pass.Draw(3);
+
+            pass.SetPipeline(testPipeline);
+            pass.SetBindGroup(0, MakeBindGroupForColors(
+                                     std::array<RGBA8, 4>({{color0, color1, color2, color3}})));
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(expected0, renderTargets[0], kRTSize / 2, kRTSize / 2)
+            << "Attachment slot 0 should have been " << color0 << " + " << base << " = "
+            << expected0;
+        EXPECT_PIXEL_RGBA8_EQ(expected1, renderTargets[1], kRTSize / 2, kRTSize / 2)
+            << "Attachment slot 1 should have been " << color1 << " - " << base << " = "
+            << expected1;
+        EXPECT_PIXEL_RGBA8_EQ(expected2, renderTargets[2], kRTSize / 2, kRTSize / 2)
+            << "Attachment slot 2 should have been " << color2 << " = " << expected2
+            << "(no blending)";
+        EXPECT_PIXEL_RGBA8_EQ(expected3, renderTargets[3], kRTSize / 2, kRTSize / 2)
+            << "Attachment slot 3 should have been min(" << color3 << ", " << base
+            << ") = " << expected3;
+    }
+}
+
+// Test that the default blend color is correctly set at the beginning of every subpass
+TEST_P(ColorStateTest, DefaultBlendColor) {
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct MyBlock {
+            color : vec4<f32>
+        }
+
+        @group(0) @binding(0) var<uniform> myUbo : MyBlock;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return myUbo.color;
+        }
+    )");
+
+    utils::ComboRenderPipelineDescriptor baseDescriptor;
+    baseDescriptor.layout = pipelineLayout;
+    baseDescriptor.vertex.module = vsModule;
+    baseDescriptor.cFragment.module = fsModule;
+    baseDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    basePipeline = device.CreateRenderPipeline(&baseDescriptor);
+
+    utils::ComboRenderPipelineDescriptor testDescriptor;
+    testDescriptor.layout = pipelineLayout;
+    testDescriptor.vertex.module = vsModule;
+    testDescriptor.cFragment.module = fsModule;
+    testDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::BlendComponent blendComponent;
+    blendComponent.operation = wgpu::BlendOperation::Add;
+    blendComponent.srcFactor = wgpu::BlendFactor::Constant;
+    blendComponent.dstFactor = wgpu::BlendFactor::One;
+
+    wgpu::BlendState blend;
+    blend.color = blendComponent;
+    blend.alpha = blendComponent;
+
+    testDescriptor.cTargets[0].blend = &blend;
+
+    testPipeline = device.CreateRenderPipeline(&testDescriptor);
+    constexpr wgpu::Color kWhite{1.0f, 1.0f, 1.0f, 1.0f};
+
+    // Check that the initial blend color is (0,0,0,0)
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(basePipeline);
+            pass.SetBindGroup(0,
+                              MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(0, 0, 0, 0)}})));
+            pass.Draw(3);
+            pass.SetPipeline(testPipeline);
+            pass.SetBindGroup(
+                0, MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(255, 255, 255, 255)}})));
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), renderPass.color, kRTSize / 2, kRTSize / 2);
+    }
+
+    // Check that setting the blend color works
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(basePipeline);
+            pass.SetBindGroup(0,
+                              MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(0, 0, 0, 0)}})));
+            pass.Draw(3);
+            pass.SetPipeline(testPipeline);
+            pass.SetBlendConstant(&kWhite);
+            pass.SetBindGroup(
+                0, MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(255, 255, 255, 255)}})));
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(255, 255, 255, 255), renderPass.color, kRTSize / 2,
+                              kRTSize / 2);
+    }
+
+    // Check that the blend color is not inherited between render passes
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(basePipeline);
+            pass.SetBindGroup(0,
+                              MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(0, 0, 0, 0)}})));
+            pass.Draw(3);
+            pass.SetPipeline(testPipeline);
+            pass.SetBlendConstant(&kWhite);
+            pass.SetBindGroup(
+                0, MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(255, 255, 255, 255)}})));
+            pass.Draw(3);
+            pass.End();
+        }
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(basePipeline);
+            pass.SetBindGroup(0,
+                              MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(0, 0, 0, 0)}})));
+            pass.Draw(3);
+            pass.SetPipeline(testPipeline);
+            pass.SetBindGroup(
+                0, MakeBindGroupForColors(std::array<RGBA8, 1>({{RGBA8(255, 255, 255, 255)}})));
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), renderPass.color, kRTSize / 2, kRTSize / 2);
+    }
+}
+
+// This tests a problem in the OpenGL backend where a previous color write mask
+// persisted and prevented a render pass loadOp from fully clearing the output
+// attachment.
+TEST_P(ColorStateTest, ColorWriteMaskDoesNotAffectRenderPassLoadOpClear) {
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct MyBlock {
+            color : vec4<f32>
+        }
+
+        @group(0) @binding(0) var<uniform> myUbo : MyBlock;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return myUbo.color;
+        }
+    )");
+
+    utils::ComboRenderPipelineDescriptor baseDescriptor;
+    baseDescriptor.layout = pipelineLayout;
+    baseDescriptor.vertex.module = vsModule;
+    baseDescriptor.cFragment.module = fsModule;
+    baseDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    basePipeline = device.CreateRenderPipeline(&baseDescriptor);
+
+    utils::ComboRenderPipelineDescriptor testDescriptor;
+    testDescriptor.layout = pipelineLayout;
+    testDescriptor.vertex.module = vsModule;
+    testDescriptor.cFragment.module = fsModule;
+    testDescriptor.cTargets[0].format = renderPass.colorFormat;
+    testDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::Red;
+
+    testPipeline = device.CreateRenderPipeline(&testDescriptor);
+
+    RGBA8 base(32, 64, 128, 192);
+    RGBA8 expected(0, 0, 0, 0);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        // Clear the render attachment to |base|
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(basePipeline);
+        pass.SetBindGroup(0, MakeBindGroupForColors(std::array<RGBA8, 1>({{base}})));
+        pass.Draw(3);
+
+        // Set a pipeline that will dirty the color write mask
+        pass.SetPipeline(testPipeline);
+        pass.End();
+    }
+    {
+        // This renderpass' loadOp should clear all channels of the render attachment
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(expected, renderPass.color, kRTSize / 2, kRTSize / 2);
+}
+
+TEST_P(ColorStateTest, SparseAttachmentsDifferentColorMask) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_indexed_draw_buffers"));
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct Outputs {
+            @location(1) o1 : vec4<f32>,
+            @location(3) o3 : vec4<f32>,
+        }
+
+        @stage(fragment) fn main() -> Outputs {
+            return Outputs(vec4<f32>(1.0), vec4<f32>(0.0, 1.0, 1.0, 1.0));
+        }
+    )");
+
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = vsModule;
+    pipelineDesc.cFragment.module = fsModule;
+    pipelineDesc.cFragment.targetCount = 4;
+    pipelineDesc.cTargets[0].format = wgpu::TextureFormat::Undefined;
+    pipelineDesc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+    pipelineDesc.cTargets[1].format = wgpu::TextureFormat::RGBA8Unorm;
+    pipelineDesc.cTargets[2].format = wgpu::TextureFormat::Undefined;
+    pipelineDesc.cTargets[2].writeMask = wgpu::ColorWriteMask::None;
+    pipelineDesc.cTargets[3].format = wgpu::TextureFormat::RGBA8Unorm;
+    pipelineDesc.cTargets[3].writeMask = wgpu::ColorWriteMask::Green | wgpu::ColorWriteMask::Alpha;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+    wgpu::TextureDescriptor texDesc;
+    texDesc.dimension = wgpu::TextureDimension::e2D;
+    texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    texDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    texDesc.size = {1, 1};
+    wgpu::Texture attachment1 = device.CreateTexture(&texDesc);
+    wgpu::Texture attachment3 = device.CreateTexture(&texDesc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassColorAttachment colorAttachments[4]{};
+        colorAttachments[0].view = nullptr;
+        colorAttachments[1].view = attachment1.CreateView();
+        colorAttachments[1].loadOp = wgpu::LoadOp::Load;
+        colorAttachments[1].storeOp = wgpu::StoreOp::Store;
+        colorAttachments[2].view = nullptr;
+        colorAttachments[3].view = attachment3.CreateView();
+        colorAttachments[3].loadOp = wgpu::LoadOp::Load;
+        colorAttachments[3].storeOp = wgpu::StoreOp::Store;
+
+        wgpu::RenderPassDescriptor rpDesc;
+        rpDesc.colorAttachmentCount = 4;
+        rpDesc.colorAttachments = colorAttachments;
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&rpDesc);
+        pass.SetPipeline(pipeline);
+        pass.Draw(3);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kWhite, attachment1, 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, attachment3, 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(ColorStateTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/CommandEncoderTests.cpp b/src/dawn/tests/end2end/CommandEncoderTests.cpp
new file mode 100644
index 0000000..00d99c5
--- /dev/null
+++ b/src/dawn/tests/end2end/CommandEncoderTests.cpp
@@ -0,0 +1,53 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class CommandEncoderTests : public DawnTest {};
+
+// Tests WriteBuffer commands on CommandEncoder.
+TEST_P(CommandEncoderTests, WriteBuffer) {
+    wgpu::Buffer bufferA = utils::CreateBufferFromData(
+        device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc, {0, 0, 0, 0});
+    wgpu::Buffer bufferB = utils::CreateBufferFromData(
+        device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc, {0, 0, 0, 0});
+    wgpu::Buffer bufferC = utils::CreateBufferFromData(
+        device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc, {0, 0, 0, 0});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    const uint32_t kData1 = 1;
+    encoder.WriteBuffer(bufferA, 0, reinterpret_cast<const uint8_t*>(&kData1), sizeof(kData1));
+    encoder.CopyBufferToBuffer(bufferA, 0, bufferB, sizeof(uint32_t), 3 * sizeof(uint32_t));
+
+    const uint32_t kData2 = 2;
+    encoder.WriteBuffer(bufferB, 0, reinterpret_cast<const uint8_t*>(&kData2), sizeof(kData2));
+    encoder.CopyBufferToBuffer(bufferB, 0, bufferC, sizeof(uint32_t), 3 * sizeof(uint32_t));
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_EQ(0, bufferC, 0);
+    EXPECT_BUFFER_U32_EQ(2, bufferC, sizeof(uint32_t));
+    EXPECT_BUFFER_U32_EQ(1, bufferC, 2 * sizeof(uint32_t));
+    EXPECT_BUFFER_U32_EQ(0, bufferC, 3 * sizeof(uint32_t));
+}
+
+DAWN_INSTANTIATE_TEST(CommandEncoderTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp b/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
new file mode 100644
index 0000000..a8a9983
--- /dev/null
+++ b/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
@@ -0,0 +1,1334 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+// The helper struct to configure the copies between buffers and textures.
+struct CopyConfig {
+    wgpu::TextureDescriptor textureDescriptor;
+    wgpu::Extent3D copyExtent3D;
+    wgpu::Origin3D copyOrigin3D = {0, 0, 0};
+    uint32_t viewMipmapLevel = 0;
+    uint32_t bufferOffset = 0;
+    uint32_t bytesPerRowAlignment = kTextureBytesPerRowAlignment;
+    uint32_t rowsPerImage = wgpu::kCopyStrideUndefined;
+};
+
+namespace {
+    using TextureFormat = wgpu::TextureFormat;
+    DAWN_TEST_PARAM_STRUCT(CompressedTextureFormatTestParams, TextureFormat);
+}  // namespace
+
+class CompressedTextureFormatTest : public DawnTestWithParams<CompressedTextureFormatTestParams> {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        const wgpu::TextureFormat format = GetParam().mTextureFormat;
+        if (utils::IsBCTextureFormat(format) &&
+            SupportsFeatures({wgpu::FeatureName::TextureCompressionBC})) {
+            mIsFormatSupported = true;
+            return {wgpu::FeatureName::TextureCompressionBC};
+        }
+        if (utils::IsETC2TextureFormat(format) &&
+            SupportsFeatures({wgpu::FeatureName::TextureCompressionETC2})) {
+            mIsFormatSupported = true;
+            return {wgpu::FeatureName::TextureCompressionETC2};
+        }
+        if (utils::IsASTCTextureFormat(format) &&
+            SupportsFeatures({wgpu::FeatureName::TextureCompressionASTC})) {
+            mIsFormatSupported = true;
+            return {wgpu::FeatureName::TextureCompressionASTC};
+        }
+        return {};
+    }
+
+    bool IsFormatSupported() const {
+        return mIsFormatSupported;
+    }
+
+    uint32_t BlockWidthInTexels() const {
+        ASSERT(IsFormatSupported());
+        return utils::GetTextureFormatBlockWidth(GetParam().mTextureFormat);
+    }
+    uint32_t BlockHeightInTexels() const {
+        ASSERT(IsFormatSupported());
+        return utils::GetTextureFormatBlockHeight(GetParam().mTextureFormat);
+    }
+
+    // Compute the upload data for the copyConfig.
+    std::vector<uint8_t> UploadData(const CopyConfig& copyConfig) {
+        uint32_t copyWidthInBlock = copyConfig.copyExtent3D.width / BlockWidthInTexels();
+        uint32_t copyHeightInBlock = copyConfig.copyExtent3D.height / BlockHeightInTexels();
+        uint32_t copyBytesPerRow = 0;
+        if (copyConfig.bytesPerRowAlignment != 0) {
+            copyBytesPerRow = copyConfig.bytesPerRowAlignment;
+        } else {
+            copyBytesPerRow = copyWidthInBlock *
+                              utils::GetTexelBlockSizeInBytes(copyConfig.textureDescriptor.format);
+        }
+        uint32_t copyRowsPerImage = copyConfig.rowsPerImage;
+        if (copyRowsPerImage == wgpu::kCopyStrideUndefined) {
+            copyRowsPerImage = copyHeightInBlock;
+        }
+        uint32_t copyBytesPerImage = copyBytesPerRow * copyRowsPerImage;
+        uint32_t uploadBufferSize = copyConfig.bufferOffset +
+                                    copyBytesPerImage * copyConfig.copyExtent3D.depthOrArrayLayers;
+
+        // Fill data with the pre-prepared one-block compressed texture data.
+        std::vector<uint8_t> data(uploadBufferSize, 0);
+        std::vector<uint8_t> oneBlockCompressedTextureData = GetOneBlockFormatTextureData();
+        for (uint32_t layer = 0; layer < copyConfig.copyExtent3D.depthOrArrayLayers; ++layer) {
+            for (uint32_t h = 0; h < copyHeightInBlock; ++h) {
+                for (uint32_t w = 0; w < copyWidthInBlock; ++w) {
+                    uint32_t uploadBufferOffset = copyConfig.bufferOffset +
+                                                  copyBytesPerImage * layer + copyBytesPerRow * h +
+                                                  oneBlockCompressedTextureData.size() * w;
+                    std::memcpy(&data[uploadBufferOffset], oneBlockCompressedTextureData.data(),
+                                oneBlockCompressedTextureData.size() * sizeof(uint8_t));
+                }
+            }
+        }
+
+        return data;
+    }
+
+    // Copy the compressed texture data into the destination texture as is specified in
+    // copyConfig.
+    void InitializeDataInCompressedTexture(wgpu::Texture compressedTexture,
+                                           const CopyConfig& copyConfig) {
+        ASSERT(IsFormatSupported());
+
+        std::vector<uint8_t> data = UploadData(copyConfig);
+
+        // Copy texture data from a staging buffer to the destination texture.
+        wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(device, data.data(), data.size(),
+                                                                 wgpu::BufferUsage::CopySrc);
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(stagingBuffer, copyConfig.bufferOffset,
+                                         copyConfig.bytesPerRowAlignment, copyConfig.rowsPerImage);
+
+        wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(
+            compressedTexture, copyConfig.viewMipmapLevel, copyConfig.copyOrigin3D);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copyConfig.copyExtent3D);
+        wgpu::CommandBuffer copy = encoder.Finish();
+        queue.Submit(1, &copy);
+    }
+
+    // Create the bind group that includes a texture and a sampler.
+    wgpu::BindGroup CreateBindGroupForTest(wgpu::BindGroupLayout bindGroupLayout,
+                                           wgpu::Texture compressedTexture,
+                                           uint32_t baseArrayLayer = 0,
+                                           uint32_t baseMipLevel = 0) {
+        ASSERT(IsFormatSupported());
+
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.minFilter = wgpu::FilterMode::Nearest;
+        samplerDesc.magFilter = wgpu::FilterMode::Nearest;
+        wgpu::Sampler sampler = device.CreateSampler(&samplerDesc);
+
+        wgpu::TextureViewDescriptor textureViewDescriptor;
+        textureViewDescriptor.format = GetParam().mTextureFormat;
+        textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+        textureViewDescriptor.baseMipLevel = baseMipLevel;
+        textureViewDescriptor.baseArrayLayer = baseArrayLayer;
+        textureViewDescriptor.arrayLayerCount = 1;
+        textureViewDescriptor.mipLevelCount = 1;
+        wgpu::TextureView textureView = compressedTexture.CreateView(&textureViewDescriptor);
+
+        return utils::MakeBindGroup(device, bindGroupLayout, {{0, sampler}, {1, textureView}});
+    }
+
+    // Create a render pipeline for sampling from a texture and rendering into the render target.
+    wgpu::RenderPipeline CreateRenderPipelineForTest() {
+        ASSERT(IsFormatSupported());
+
+        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            struct VertexOut {
+                @location(0) texCoord : vec2 <f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-3.0,  1.0),
+                    vec2<f32>( 3.0,  1.0),
+                    vec2<f32>( 0.0, -2.0)
+                );
+                var output : VertexOut;
+                output.position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+                output.texCoord = vec2<f32>(output.position.x / 2.0, -output.position.y / 2.0) + vec2<f32>(0.5, 0.5);
+                return output;
+            })");
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture0 : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+                return textureSample(texture0, sampler0, texCoord);
+            })");
+        renderPipelineDescriptor.vertex.module = vsModule;
+        renderPipelineDescriptor.cFragment.module = fsModule;
+        renderPipelineDescriptor.cTargets[0].format = utils::BasicRenderPass::kDefaultColorFormat;
+
+        return device.CreateRenderPipeline(&renderPipelineDescriptor);
+    }
+
+    // Run the given render pipeline and bind group and verify the pixels in the render target.
+    void VerifyCompressedTexturePixelValues(wgpu::RenderPipeline renderPipeline,
+                                            wgpu::BindGroup bindGroup,
+                                            const wgpu::Extent3D& renderTargetSize,
+                                            const wgpu::Origin3D& expectedOrigin,
+                                            const wgpu::Extent3D& expectedExtent,
+                                            const std::vector<RGBA8>& expected) {
+        ASSERT(IsFormatSupported());
+
+        utils::BasicRenderPass renderPass =
+            utils::CreateBasicRenderPass(device, renderTargetSize.width, renderTargetSize.height);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(renderPipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_TEXTURE_EQ(expected.data(), renderPass.color, {expectedOrigin.x, expectedOrigin.y},
+                          {expectedExtent.width, expectedExtent.height});
+    }
+
+    // Run the tests that copies pre-prepared format data into a texture and verifies if we can
+    // render correctly with the pixel values sampled from the texture.
+    void TestCopyRegionIntoFormatTextures(const CopyConfig& config) {
+        ASSERT(IsFormatSupported());
+
+        wgpu::Texture texture = CreateTextureWithCompressedData(config);
+
+        VerifyTexture(config, texture);
+    }
+
+    void VerifyTexture(const CopyConfig& config, wgpu::Texture texture) {
+        wgpu::RenderPipeline renderPipeline = CreateRenderPipelineForTest();
+
+        wgpu::Extent3D virtualSizeAtLevel = GetVirtualSizeAtLevel(config);
+
+        // The copy region may exceed the subresource size because of the required paddings, so we
+        // should limit the size of the expectedData to make it match the real size of the render
+        // target.
+        wgpu::Extent3D noPaddingExtent3D = config.copyExtent3D;
+        if (config.copyOrigin3D.x + config.copyExtent3D.width > virtualSizeAtLevel.width) {
+            noPaddingExtent3D.width = virtualSizeAtLevel.width - config.copyOrigin3D.x;
+        }
+        if (config.copyOrigin3D.y + config.copyExtent3D.height > virtualSizeAtLevel.height) {
+            noPaddingExtent3D.height = virtualSizeAtLevel.height - config.copyOrigin3D.y;
+        }
+        noPaddingExtent3D.depthOrArrayLayers = 1u;
+
+        std::vector<RGBA8> expectedData = GetExpectedData(noPaddingExtent3D);
+
+        wgpu::Origin3D firstLayerCopyOrigin = {config.copyOrigin3D.x, config.copyOrigin3D.y, 0};
+        for (uint32_t layer = config.copyOrigin3D.z;
+             layer < config.copyOrigin3D.z + config.copyExtent3D.depthOrArrayLayers; ++layer) {
+            wgpu::BindGroup bindGroup = CreateBindGroupForTest(
+                renderPipeline.GetBindGroupLayout(0), texture, layer, config.viewMipmapLevel);
+            VerifyCompressedTexturePixelValues(renderPipeline, bindGroup, virtualSizeAtLevel,
+                                               firstLayerCopyOrigin, noPaddingExtent3D,
+                                               expectedData);
+        }
+    }
+
+    // Create a texture and initialize it with the pre-prepared compressed texture data.
+    wgpu::Texture CreateTextureWithCompressedData(CopyConfig config) {
+        wgpu::Texture texture = device.CreateTexture(&config.textureDescriptor);
+        InitializeDataInCompressedTexture(texture, config);
+        return texture;
+    }
+
+    // Record a texture-to-texture copy command into command encoder without finishing the
+    // encoding.
+    void RecordTextureToTextureCopy(wgpu::CommandEncoder encoder,
+                                    wgpu::Texture srcTexture,
+                                    wgpu::Texture dstTexture,
+                                    CopyConfig srcConfig,
+                                    CopyConfig dstConfig) {
+        wgpu::ImageCopyTexture imageCopyTextureSrc = utils::CreateImageCopyTexture(
+            srcTexture, srcConfig.viewMipmapLevel, srcConfig.copyOrigin3D);
+        wgpu::ImageCopyTexture imageCopyTextureDst = utils::CreateImageCopyTexture(
+            dstTexture, dstConfig.viewMipmapLevel, dstConfig.copyOrigin3D);
+        encoder.CopyTextureToTexture(&imageCopyTextureSrc, &imageCopyTextureDst,
+                                     &dstConfig.copyExtent3D);
+    }
+
+    wgpu::Texture CreateTextureFromTexture(wgpu::Texture srcTexture,
+                                           CopyConfig srcConfig,
+                                           CopyConfig dstConfig) {
+        wgpu::Texture dstTexture = device.CreateTexture(&dstConfig.textureDescriptor);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        RecordTextureToTextureCopy(encoder, srcTexture, dstTexture, srcConfig, dstConfig);
+        wgpu::CommandBuffer copy = encoder.Finish();
+        queue.Submit(1, &copy);
+
+        return dstTexture;
+    }
+
+    // Return the pre-prepared one-block texture data.
+    std::vector<uint8_t> GetOneBlockFormatTextureData() {
+        switch (GetParam().mTextureFormat) {
+            // The expected data represents 4x4 pixel images with the left side dark red and the
+            // right side dark green. We specify the same compressed data in both sRGB and
+            // non-sRGB tests, but the rendering result should be different because for sRGB
+            // formats, the red, green, and blue components are converted from an sRGB color
+            // space to a linear color space as part of filtering.
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+                return {0x0, 0xC0, 0x60, 0x6, 0x50, 0x50, 0x50, 0x50};
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return {0x50, 0x18, 0xfc, 0xf, 0x0,  0x30, 0xe3, 0xe1,
+                        0xe1, 0xe1, 0xc1, 0xf, 0xfc, 0xc0, 0xf,  0xfc};
+
+            // The expected data represents 4x4 pixel images with the left side dark red and the
+            // right side dark green. The pixels in the left side of the block all have an alpha
+            // value equal to 0x88. We specify the same compressed data in both sRGB and
+            // non-sRGB tests, but the rendering result should be different because for sRGB
+            // formats, the red, green, and blue components are converted from an sRGB color
+            // space to a linear color space as part of filtering, and any alpha component is
+            // left unchanged.
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+                return {0x88, 0xFF, 0x88, 0xFF, 0x88, 0xFF, 0x88, 0xFF,
+                        0x0,  0xC0, 0x60, 0x6,  0x50, 0x50, 0x50, 0x50};
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+                return {0x88, 0xFF, 0x40, 0x2, 0x24, 0x40, 0x2,  0x24,
+                        0x0,  0xC0, 0x60, 0x6, 0x50, 0x50, 0x50, 0x50};
+
+            // The expected data represents 4x4 pixel images with the left side red and the
+            // right side black.
+            case wgpu::TextureFormat::BC4RSnorm:
+                return {0x7F, 0x0, 0x40, 0x2, 0x24, 0x40, 0x2, 0x24};
+            case wgpu::TextureFormat::BC4RUnorm:
+                return {0xFF, 0x0, 0x40, 0x2, 0x24, 0x40, 0x2, 0x24};
+
+            // The expected data represents 4x4 pixel images with the left side red and the
+            // right side green and was encoded with DirectXTex from Microsoft.
+            case wgpu::TextureFormat::BC5RGSnorm:
+                return {0x7f, 0x81, 0x40, 0x2,  0x24, 0x40, 0x2,  0x24,
+                        0x7f, 0x81, 0x9,  0x90, 0x0,  0x9,  0x90, 0x0};
+            case wgpu::TextureFormat::BC5RGUnorm:
+                return {0xff, 0x0, 0x40, 0x2,  0x24, 0x40, 0x2,  0x24,
+                        0xff, 0x0, 0x9,  0x90, 0x0,  0x9,  0x90, 0x0};
+            case wgpu::TextureFormat::BC6HRGBFloat:
+                return {0xe3, 0x1f, 0x0, 0x0,  0x0, 0xe0, 0x1f, 0x0,
+                        0x0,  0xff, 0x0, 0xff, 0x0, 0xff, 0x0,  0xff};
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+                return {0xe3, 0x3d, 0x0, 0x0,  0x0, 0xe0, 0x3d, 0x0,
+                        0x0,  0xff, 0x0, 0xff, 0x0, 0xff, 0x0,  0xff};
+
+            // The expected data represents 4x4 pixel images with the left side dark red and the
+            // right side dark green. We specify the same compressed data in both sRGB and
+            // non-sRGB tests, but the rendering result should be different because for sRGB
+            // formats, the red, green, and blue components are converted from an sRGB color
+            // space to a linear color space as part of filtering.
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+                return {0x4, 0xc0, 0xc0, 0x2, 0x0, 0xff, 0x0, 0x0};
+
+            // The expected data represents 4x4 pixel images with the left side dark red and the
+            // right side dark green. The pixels in the left side of the block all have an alpha
+            // value equal to 0x88. We specify the same compressed data in both sRGB and
+            // non-sRGB tests, but the rendering result should be different because for sRGB
+            // formats, the red, green, and blue components are converted from an sRGB color
+            // space to a linear color space as part of filtering, and any alpha component is
+            // left unchanged.
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+                return {0xc0, 0x78, 0x49, 0x24, 0x92, 0xff, 0xff, 0xff,
+                        0x4,  0xc0, 0xc0, 0x2,  0x0,  0xff, 0x0,  0x0};
+
+            // The expected data represents 4x4 pixel image with the left side red and the right
+            // side black.
+            case wgpu::TextureFormat::EACR11Unorm:
+                return {0x84, 0x90, 0xff, 0xff, 0xff, 0x6d, 0xb6, 0xdb};
+            case wgpu::TextureFormat::EACR11Snorm:
+                return {0x2, 0x90, 0xff, 0xff, 0xff, 0x6d, 0xb6, 0xdb};
+
+            // The expected data represents 4x4 pixel image with the left side red and the right
+            // side green.
+            case wgpu::TextureFormat::EACRG11Unorm:
+                return {0x84, 0x90, 0xff, 0xff, 0xff, 0x6d, 0xb6, 0xdb,
+                        0x84, 0x90, 0x6d, 0xb6, 0xdb, 0xff, 0xff, 0xff};
+            case wgpu::TextureFormat::EACRG11Snorm:
+                return {0x2, 0x90, 0xff, 0xff, 0xff, 0x6d, 0xb6, 0xdb,
+                        0x2, 0x90, 0x6d, 0xb6, 0xdb, 0xff, 0xff, 0xff};
+
+            // The expected data is a texel block of the corresponding size where the left width / 2
+            // pixels are dark red with an alpha of 0x80 and the remaining (width - width / 2)
+            // pixels are dark green. We specify the same compressed data in both sRGB and non-sRGB
+            // tests, but the rendering result should be different because for sRGB formats, the
+            // red, green, and blue components are converted from an sRGB color space to a linear
+            // color space as part of filtering, and any alpha component is left unchanged.
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+                return {0x13, 0x80, 0xe9, 0x1, 0x0, 0xe8, 0x1,  0x0,
+                        0x0,  0xff, 0x1,  0x0, 0x0, 0x3f, 0xf0, 0x3};
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+                return {0x83, 0x80, 0xe9, 0x1, 0x0,  0xe8, 0x1,  0x0,
+                        0x0,  0xff, 0x1,  0x0, 0x80, 0x14, 0x90, 0x2};
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+                return {0x2, 0x81, 0xe9, 0x1, 0x0, 0xe8, 0x1,  0x0,
+                        0x0, 0xff, 0x1,  0x0, 0x0, 0x3f, 0xf0, 0x3};
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+                return {0x6, 0x80, 0xe9, 0x1, 0x0,  0xe8, 0x1,  0x0,
+                        0x0, 0xff, 0x1,  0x0, 0xff, 0x0,  0xff, 0x0};
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+                return {0x6, 0x81, 0xe9, 0x1,  0x0, 0xe8, 0x1,  0x0,
+                        0x0, 0xff, 0x1,  0xff, 0x3, 0xf0, 0x3f, 0x0};
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return {0x4, 0x80, 0xe9, 0x1, 0x0, 0xe8, 0x1,  0x0,
+                        0x0, 0xff, 0x1,  0x0, 0x0, 0x3f, 0xf0, 0x3};
+
+            default:
+                UNREACHABLE();
+                return {};
+        }
+    }
+
+    // Return the texture data that is decoded from the result of GetOneBlockFormatTextureData
+    // in RGBA8 formats. Since some compression methods may be lossy, we may use different colors
+    // to test different formats.
+    std::vector<RGBA8> GetExpectedData(const wgpu::Extent3D& testRegion) {
+        constexpr uint8_t kLeftAlpha = 0x88;
+        constexpr uint8_t kRightAlpha = 0xFF;
+
+        constexpr RGBA8 kBCDarkRed(198, 0, 0, 255);
+        constexpr RGBA8 kBCDarkGreen(0, 207, 0, 255);
+        constexpr RGBA8 kBCDarkRedSRGB(144, 0, 0, 255);
+        constexpr RGBA8 kBCDarkGreenSRGB(0, 159, 0, 255);
+
+        constexpr RGBA8 kETC2DarkRed(204, 0, 0, 255);
+        constexpr RGBA8 kETC2DarkGreen(0, 204, 0, 255);
+        constexpr RGBA8 kETC2DarkRedSRGB(154, 0, 0, 255);
+        constexpr RGBA8 kETC2DarkGreenSRGB(0, 154, 0, 255);
+
+        constexpr RGBA8 kASTCDarkRed(244, 0, 0, 128);
+        constexpr RGBA8 kASTCDarkGreen(0, 244, 0, 255);
+        constexpr RGBA8 kASTCDarkRedSRGB(231, 0, 0, 128);
+        constexpr RGBA8 kASTCDarkGreenSRGB(0, 231, 0, 255);
+
+        switch (GetParam().mTextureFormat) {
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+                return FillExpectedData(testRegion, kBCDarkRed, kBCDarkGreen);
+
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+            case wgpu::TextureFormat::BC3RGBAUnorm: {
+                constexpr RGBA8 kLeftColor = RGBA8(kBCDarkRed.r, 0, 0, kLeftAlpha);
+                constexpr RGBA8 kRightColor = RGBA8(0, kBCDarkGreen.g, 0, kRightAlpha);
+                return FillExpectedData(testRegion, kLeftColor, kRightColor);
+            }
+
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return FillExpectedData(testRegion, kBCDarkRedSRGB, kBCDarkGreenSRGB);
+
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb: {
+                constexpr RGBA8 kLeftColor = RGBA8(kBCDarkRedSRGB.r, 0, 0, kLeftAlpha);
+                constexpr RGBA8 kRightColor = RGBA8(0, kBCDarkGreenSRGB.g, 0, kRightAlpha);
+                return FillExpectedData(testRegion, kLeftColor, kRightColor);
+            }
+
+            case wgpu::TextureFormat::BC4RSnorm:
+            case wgpu::TextureFormat::BC4RUnorm:
+                return FillExpectedData(testRegion, RGBA8::kRed, RGBA8::kBlack);
+
+            case wgpu::TextureFormat::BC5RGSnorm:
+            case wgpu::TextureFormat::BC5RGUnorm:
+            case wgpu::TextureFormat::BC6HRGBFloat:
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+                return FillExpectedData(testRegion, RGBA8::kRed, RGBA8::kGreen);
+
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+                return FillExpectedData(testRegion, kETC2DarkRed, kETC2DarkGreen);
+
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+                return FillExpectedData(testRegion, kETC2DarkRedSRGB, kETC2DarkGreenSRGB);
+
+            case wgpu::TextureFormat::ETC2RGBA8Unorm: {
+                constexpr RGBA8 kLeftColor = RGBA8(kETC2DarkRed.r, 0, 0, kLeftAlpha);
+                constexpr RGBA8 kRightColor = RGBA8(0, kETC2DarkGreen.g, 0, kRightAlpha);
+                return FillExpectedData(testRegion, kLeftColor, kRightColor);
+            }
+
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb: {
+                constexpr RGBA8 kLeftColor = RGBA8(kETC2DarkRedSRGB.r, 0, 0, kLeftAlpha);
+                constexpr RGBA8 kRightColor = RGBA8(0, kETC2DarkGreenSRGB.g, 0, kRightAlpha);
+                return FillExpectedData(testRegion, kLeftColor, kRightColor);
+            }
+
+            case wgpu::TextureFormat::EACR11Unorm:
+            case wgpu::TextureFormat::EACR11Snorm:
+                return FillExpectedData(testRegion, RGBA8::kRed, RGBA8::kBlack);
+
+            case wgpu::TextureFormat::EACRG11Unorm:
+            case wgpu::TextureFormat::EACRG11Snorm:
+                return FillExpectedData(testRegion, RGBA8::kRed, RGBA8::kGreen);
+
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+                return FillExpectedData(testRegion, kASTCDarkRed, kASTCDarkGreen);
+
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return FillExpectedData(testRegion, kASTCDarkRedSRGB, kASTCDarkGreenSRGB);
+
+            default:
+                UNREACHABLE();
+                return {};
+        }
+    }
+
+    std::vector<RGBA8> FillExpectedData(const wgpu::Extent3D& testRegion,
+                                        RGBA8 leftColorInBlock,
+                                        RGBA8 rightColorInBlock) {
+        ASSERT(testRegion.depthOrArrayLayers == 1);
+
+        std::vector<RGBA8> expectedData(testRegion.width * testRegion.height, leftColorInBlock);
+        for (uint32_t y = 0; y < testRegion.height; ++y) {
+            for (uint32_t x = 0; x < testRegion.width; ++x) {
+                if (x % BlockWidthInTexels() >= BlockWidthInTexels() / 2) {
+                    expectedData[testRegion.width * y + x] = rightColorInBlock;
+                }
+            }
+        }
+        return expectedData;
+    }
+
+    // Returns a texture size given the number of texel blocks that should be tiled. For example,
+    // if a texel block size is 5x4, then GetTextureSizeFromBlocks(2, 2) -> {10, 8, 1}.
+    wgpu::Extent3D GetTextureSizeWithNumBlocks(uint32_t numBlockWidth,
+                                               uint32_t numBlockHeight,
+                                               uint32_t depthOrArrayLayers = 1) const {
+        return {numBlockWidth * BlockWidthInTexels(), numBlockHeight * BlockHeightInTexels(),
+                depthOrArrayLayers};
+    }
+
+    CopyConfig GetDefaultFullConfig(uint32_t depthOrArrayLayers = 1) const {
+        ASSERT(IsFormatSupported());
+
+        CopyConfig config;
+        config.textureDescriptor.format = GetParam().mTextureFormat;
+        config.textureDescriptor.usage = kDefaultFormatTextureUsage;
+        config.textureDescriptor.size = GetTextureSizeWithNumBlocks(
+            kUnalignedBlockSize, kUnalignedBlockSize, depthOrArrayLayers);
+        config.textureDescriptor.mipLevelCount = kMipmapLevelCount;
+        config.viewMipmapLevel = kMipmapLevelCount - 1;
+
+        const wgpu::Extent3D virtualSize = GetVirtualSizeAtLevel(config);
+        ASSERT(virtualSize.width % BlockWidthInTexels() != 0u);
+        ASSERT(virtualSize.height % BlockHeightInTexels() != 0u);
+
+        return config;
+    }
+
+    CopyConfig GetDefaultSmallConfig(uint32_t depthOrArrayLayers = 1) const {
+        ASSERT(IsFormatSupported());
+
+        CopyConfig config;
+        config.textureDescriptor.format = GetParam().mTextureFormat;
+        config.textureDescriptor.usage = kDefaultFormatTextureUsage;
+        config.textureDescriptor.size = GetTextureSizeWithNumBlocks(2, 2, depthOrArrayLayers);
+        return config;
+    }
+
+    CopyConfig GetDefaultSubresourceConfig(uint32_t depthOrArrayLayers = 1) const {
+        ASSERT(IsFormatSupported());
+
+        CopyConfig config;
+        config.textureDescriptor.format = GetParam().mTextureFormat;
+        config.textureDescriptor.usage = kDefaultFormatTextureUsage;
+        config.textureDescriptor.size =
+            GetPhysicalSizeAtLevel(GetDefaultFullConfig(depthOrArrayLayers));
+        config.viewMipmapLevel = config.textureDescriptor.mipLevelCount - 1;
+        return config;
+    }
+
+    // Note: Compressed formats are only valid with 2D (array) textures.
+    static wgpu::Extent3D GetVirtualSizeAtLevel(const CopyConfig& config) {
+        return {config.textureDescriptor.size.width >> config.viewMipmapLevel,
+                config.textureDescriptor.size.height >> config.viewMipmapLevel,
+                config.textureDescriptor.size.depthOrArrayLayers};
+    }
+
+    wgpu::Extent3D GetPhysicalSizeAtLevel(const CopyConfig& config) const {
+        wgpu::Extent3D sizeAtLevel = GetVirtualSizeAtLevel(config);
+        sizeAtLevel.width = (sizeAtLevel.width + BlockWidthInTexels() - 1) / BlockWidthInTexels() *
+                            BlockWidthInTexels();
+        sizeAtLevel.height = (sizeAtLevel.height + BlockHeightInTexels() - 1) /
+                             BlockHeightInTexels() * BlockHeightInTexels();
+        return sizeAtLevel;
+    }
+
+    static constexpr wgpu::TextureUsage kDefaultFormatTextureUsage =
+        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst;
+
+    // We choose a prime that is greater than the current max texel dimension size as a multiplier
+    // to compute test texture sizes so that we can be certain that its level 2 mipmap (x4)
+    // cannot be a multiple of the dimension. This is useful for testing padding at the edges of
+    // the mipmaps.
+    static constexpr uint32_t kUnalignedBlockSize = 13;
+    static constexpr uint32_t kMipmapLevelCount = 3;
+
+    bool mIsFormatSupported = false;
+};
+
+// Test copying into the whole texture with 2x2 blocks and sampling from it.
+TEST_P(CompressedTextureFormatTest, Basic) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    CopyConfig config = GetDefaultSmallConfig();
+    config.copyExtent3D = config.textureDescriptor.size;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test copying into a sub-region of a texture works correctly.
+TEST_P(CompressedTextureFormatTest, CopyIntoSubRegion) {
+    // TODO(crbug.com/dawn/976): Failing on Linux Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    CopyConfig config = GetDefaultSmallConfig();
+    config.copyOrigin3D = {BlockWidthInTexels(), BlockHeightInTexels(), 0};
+    config.copyExtent3D = {BlockWidthInTexels(), BlockHeightInTexels(), 1};
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test copying into the non-zero layer of a 2D array texture works correctly.
+TEST_P(CompressedTextureFormatTest, CopyIntoNonZeroArrayLayer) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // This test uses glTextureView() which is not supported in OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    constexpr uint32_t kArrayLayerCount = 3;
+
+    CopyConfig config = GetDefaultSmallConfig(kArrayLayerCount);
+    config.copyExtent3D = config.textureDescriptor.size;
+    config.copyExtent3D.depthOrArrayLayers = 1;
+    config.copyOrigin3D.z = kArrayLayerCount - 1;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test copying into a non-zero mipmap level of a texture.
+TEST_P(CompressedTextureFormatTest, CopyBufferIntoNonZeroMipmapLevel) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // This test uses glTextureView() which is not supported in OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    CopyConfig config = GetDefaultFullConfig();
+    // The virtual size of the texture at mipmap level == 2 is not a multiple of the texel
+    // dimensions so paddings are required in the copies.
+    config.copyExtent3D = GetPhysicalSizeAtLevel(config);
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test texture-to-texture whole-size copies.
+TEST_P(CompressedTextureFormatTest, CopyWholeTextureSubResourceIntoNonZeroMipmapLevel) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // This test uses glTextureView() which is not supported in OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // TODO(crbug.com/dawn/816): This consistently fails on with the 12th pixel being opaque
+    // black instead of opaque red on Win10 FYI Release (NVIDIA GeForce GTX 1660).
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsNvidia());
+
+    CopyConfig config = GetDefaultFullConfig();
+    // Add the usage bit for both source and destination textures so that we don't need to
+    // create two copy configs.
+    config.textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                     wgpu::TextureUsage::TextureBinding;
+
+    // The virtual size of the texture at mipmap level == 2 is not a multiple of the texel
+    // dimensions so paddings are required in the copies.
+    const wgpu::Extent3D kVirtualSize = GetVirtualSizeAtLevel(config);
+    config.copyExtent3D = GetPhysicalSizeAtLevel(config);
+
+    wgpu::Texture textureSrc = CreateTextureWithCompressedData(config);
+
+    // Create textureDst and copy from the content in textureSrc into it.
+    wgpu::Texture textureDst = CreateTextureFromTexture(textureSrc, config, config);
+
+    // Verify if we can use texture as sampled textures correctly.
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipelineForTest();
+    wgpu::BindGroup bindGroup =
+        CreateBindGroupForTest(renderPipeline.GetBindGroupLayout(0), textureDst,
+                               config.copyOrigin3D.z, config.viewMipmapLevel);
+
+    std::vector<RGBA8> expectedData = GetExpectedData(kVirtualSize);
+    VerifyCompressedTexturePixelValues(renderPipeline, bindGroup, kVirtualSize, config.copyOrigin3D,
+                                       kVirtualSize, expectedData);
+}
+
+// Test texture-to-texture partial copies where the physical size of the destination subresource is
+// different from its virtual size.
+TEST_P(CompressedTextureFormatTest, CopyIntoSubresourceWithPhysicalSizeNotEqualToVirtualSize) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // TODO(crbug.com/dawn/817): add workaround on the T2T copies where Extent3D fits in one
+    // subresource and does not fit in another one on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    CopyConfig srcConfig = GetDefaultSubresourceConfig();
+    srcConfig.textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+
+    CopyConfig dstConfig = GetDefaultFullConfig();
+
+    // The virtual size of the texture at mipmap level == 2 is not a multiple of the texel
+    // dimensions so paddings are required in the copies.
+    const wgpu::Extent3D kDstVirtualSize = GetVirtualSizeAtLevel(dstConfig);
+    const wgpu::Extent3D kDstPhysicalSize = GetPhysicalSizeAtLevel(dstConfig);
+    srcConfig.copyExtent3D = dstConfig.copyExtent3D = kDstPhysicalSize;
+
+    // Create textureSrc as the source texture and initialize it with pre-prepared compressed
+    // data.
+    wgpu::Texture textureSrc = CreateTextureWithCompressedData(srcConfig);
+    wgpu::ImageCopyTexture imageCopyTextureSrc = utils::CreateImageCopyTexture(
+        textureSrc, srcConfig.viewMipmapLevel, srcConfig.copyOrigin3D);
+
+    // Create textureDst and copy from the content in textureSrc into it.
+    wgpu::Texture textureDst = CreateTextureFromTexture(textureSrc, srcConfig, dstConfig);
+
+    // Verify if we can use texture as sampled textures correctly.
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipelineForTest();
+    wgpu::BindGroup bindGroup =
+        CreateBindGroupForTest(renderPipeline.GetBindGroupLayout(0), textureDst,
+                               dstConfig.copyOrigin3D.z, dstConfig.viewMipmapLevel);
+
+    std::vector<RGBA8> expectedData = GetExpectedData(kDstVirtualSize);
+    VerifyCompressedTexturePixelValues(renderPipeline, bindGroup, kDstVirtualSize,
+                                       dstConfig.copyOrigin3D, kDstVirtualSize, expectedData);
+}
+
+// Test texture-to-texture partial copies where the physical size of the source subresource is
+// different from its virtual size.
+TEST_P(CompressedTextureFormatTest, CopyFromSubresourceWithPhysicalSizeNotEqualToVirtualSize) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // TODO(crbug.com/dawn/817): add workaround on the T2T copies where Extent3D fits in one
+    // subresource and does not fit in another one on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    CopyConfig srcConfig = GetDefaultFullConfig();
+    srcConfig.textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+
+    CopyConfig dstConfig = GetDefaultSubresourceConfig();
+
+    // The virtual size of the texture at mipmap level == 2 is not a multiple of the texel
+    // dimensions so paddings are required in the copies.
+    const wgpu::Extent3D kSrcVirtualSize = GetVirtualSizeAtLevel(srcConfig);
+    const wgpu::Extent3D kDstVirtualSize = GetVirtualSizeAtLevel(dstConfig);
+    srcConfig.copyExtent3D = dstConfig.copyExtent3D = kDstVirtualSize;
+
+    ASSERT_GT(srcConfig.copyOrigin3D.x + srcConfig.copyExtent3D.width, kSrcVirtualSize.width);
+    ASSERT_GT(srcConfig.copyOrigin3D.y + srcConfig.copyExtent3D.height, kSrcVirtualSize.height);
+
+    // Create textureSrc as the source texture and initialize it with pre-prepared compressed
+    // data.
+    wgpu::Texture textureSrc = CreateTextureWithCompressedData(srcConfig);
+
+    // Create textureDst and copy from the content in textureSrc into it.
+    wgpu::Texture textureDst = CreateTextureFromTexture(textureSrc, srcConfig, dstConfig);
+
+    // Verify if we can use texture as sampled textures correctly.
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipelineForTest();
+    wgpu::BindGroup bindGroup =
+        CreateBindGroupForTest(renderPipeline.GetBindGroupLayout(0), textureDst,
+                               dstConfig.copyOrigin3D.z, dstConfig.viewMipmapLevel);
+
+    std::vector<RGBA8> expectedData = GetExpectedData(kDstVirtualSize);
+    VerifyCompressedTexturePixelValues(renderPipeline, bindGroup, kDstVirtualSize,
+                                       dstConfig.copyOrigin3D, kDstVirtualSize, expectedData);
+}
+
+// Test recording two texture-to-texture partial copies where the physical size of the source
+// subresource is different from its virtual size into one command buffer.
+TEST_P(CompressedTextureFormatTest, MultipleCopiesWithPhysicalSizeNotEqualToVirtualSize) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // TODO(crbug.com/dawn/817): add workaround on the T2T copies where Extent3D fits in one
+    // subresource and does not fit in another one on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    constexpr uint32_t kTotalCopyCount = 2;
+    std::array<CopyConfig, kTotalCopyCount> srcConfigs;
+    std::array<CopyConfig, kTotalCopyCount> dstConfigs;
+
+    srcConfigs[0] = GetDefaultFullConfig();
+    srcConfigs[0].textureDescriptor.usage =
+        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+    dstConfigs[0] = GetDefaultSubresourceConfig();
+    srcConfigs[0].copyExtent3D = dstConfigs[0].copyExtent3D = GetVirtualSizeAtLevel(dstConfigs[0]);
+
+    srcConfigs[1] = GetDefaultSubresourceConfig();
+    srcConfigs[1].textureDescriptor.usage =
+        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+    dstConfigs[1] = GetDefaultFullConfig();
+    srcConfigs[1].copyExtent3D = dstConfigs[1].copyExtent3D = GetVirtualSizeAtLevel(srcConfigs[1]);
+
+    std::array<wgpu::Extent3D, kTotalCopyCount> dstVirtualSizes;
+    for (uint32_t i = 0; i < kTotalCopyCount; ++i) {
+        dstVirtualSizes[i] = GetVirtualSizeAtLevel(dstConfigs[i]);
+    }
+
+    std::array<wgpu::Texture, kTotalCopyCount> srcTextures;
+    std::array<wgpu::Texture, kTotalCopyCount> dstTextures;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    for (uint32_t i = 0; i < kTotalCopyCount; ++i) {
+        // Create srcTextures as the source textures and initialize them with pre-prepared
+        // compressed data.
+        srcTextures[i] = CreateTextureWithCompressedData(srcConfigs[i]);
+        dstTextures[i] = device.CreateTexture(&dstConfigs[i].textureDescriptor);
+        RecordTextureToTextureCopy(encoder, srcTextures[i], dstTextures[i], srcConfigs[i],
+                                   dstConfigs[i]);
+    }
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipelineForTest();
+
+    for (uint32_t i = 0; i < kTotalCopyCount; ++i) {
+        // Verify if we can use dstTextures as sampled textures correctly.
+        wgpu::BindGroup bindGroup0 =
+            CreateBindGroupForTest(renderPipeline.GetBindGroupLayout(0), dstTextures[i],
+                                   dstConfigs[i].copyOrigin3D.z, dstConfigs[i].viewMipmapLevel);
+
+        std::vector<RGBA8> expectedData = GetExpectedData(dstVirtualSizes[i]);
+        VerifyCompressedTexturePixelValues(renderPipeline, bindGroup0, dstVirtualSizes[i],
+                                           dstConfigs[i].copyOrigin3D, dstVirtualSizes[i],
+                                           expectedData);
+    }
+}
+
+// A regression test for a bug for the toggle UseTemporaryBufferInCompressedTextureToTextureCopy on
+// Vulkan backend: test texture-to-texture partial copies with multiple array layers where the
+// physical size of the source subresource is different from its virtual size.
+TEST_P(CompressedTextureFormatTest, CopyWithMultipleLayerAndPhysicalSizeNotEqualToVirtualSize) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // TODO(crbug.com/dawn/817): add workaround on the T2T copies where Extent3D fits in one
+    // subresource and does not fit in another one on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    constexpr uint32_t kArrayLayerCount = 5;
+
+    CopyConfig srcConfig = GetDefaultFullConfig(kArrayLayerCount);
+    srcConfig.textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+
+    CopyConfig dstConfig = GetDefaultSubresourceConfig(kArrayLayerCount);
+
+    // The virtual size of the texture at mipmap level == 2 is not a multiple of the texel
+    // dimensions so paddings are required in the copies.
+    const wgpu::Extent3D kSrcVirtualSize = GetVirtualSizeAtLevel(srcConfig);
+    const wgpu::Extent3D kDstVirtualSize = GetVirtualSizeAtLevel(dstConfig);
+
+    srcConfig.copyExtent3D = dstConfig.copyExtent3D = kDstVirtualSize;
+    srcConfig.rowsPerImage = srcConfig.copyExtent3D.height / BlockHeightInTexels();
+    ASSERT_GT(srcConfig.copyOrigin3D.x + srcConfig.copyExtent3D.width, kSrcVirtualSize.width);
+    ASSERT_GT(srcConfig.copyOrigin3D.y + srcConfig.copyExtent3D.height, kSrcVirtualSize.height);
+
+    const wgpu::TextureFormat format = GetParam().mTextureFormat;
+    srcConfig.textureDescriptor.format = dstConfig.textureDescriptor.format = format;
+    srcConfig.bytesPerRowAlignment = Align(srcConfig.copyExtent3D.width / BlockWidthInTexels() *
+                                               utils::GetTexelBlockSizeInBytes(format),
+                                           kTextureBytesPerRowAlignment);
+    dstConfig.textureDescriptor.usage = kDefaultFormatTextureUsage;
+
+    // Create textureSrc as the source texture and initialize it with pre-prepared compressed
+    // data.
+    wgpu::Texture textureSrc = CreateTextureWithCompressedData(srcConfig);
+
+    // Create textureDst and copy from the content in textureSrc into it.
+    wgpu::Texture textureDst = CreateTextureFromTexture(textureSrc, srcConfig, dstConfig);
+
+    // We use the render pipeline to test if each layer can be correctly sampled with the
+    // expected data.
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipelineForTest();
+
+    const wgpu::Extent3D kExpectedDataRegionPerLayer = {kDstVirtualSize.width,
+                                                        kDstVirtualSize.height, 1u};
+    std::vector<RGBA8> kExpectedDataPerLayer = GetExpectedData(kExpectedDataRegionPerLayer);
+    const wgpu::Origin3D kCopyOriginPerLayer = {dstConfig.copyOrigin3D.x, dstConfig.copyOrigin3D.y,
+                                                0};
+    for (uint32_t copyLayer = 0; copyLayer < kArrayLayerCount; ++copyLayer) {
+        wgpu::BindGroup bindGroup =
+            CreateBindGroupForTest(renderPipeline.GetBindGroupLayout(0), textureDst,
+                                   dstConfig.copyOrigin3D.z + copyLayer, dstConfig.viewMipmapLevel);
+
+        VerifyCompressedTexturePixelValues(renderPipeline, bindGroup, kExpectedDataRegionPerLayer,
+                                           kCopyOriginPerLayer, kExpectedDataRegionPerLayer,
+                                           kExpectedDataPerLayer);
+    }
+}
+
+// Test the special case of the B2T copies on the D3D12 backend that the buffer offset and texture
+// extent exactly fit the RowPitch.
+TEST_P(CompressedTextureFormatTest, BufferOffsetAndExtentFitRowPitch) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    CopyConfig config = GetDefaultSmallConfig();
+    config.copyExtent3D = config.textureDescriptor.size;
+
+    const wgpu::TextureFormat format = GetParam().mTextureFormat;
+    const uint32_t blockCountPerRow = config.textureDescriptor.size.width / BlockWidthInTexels();
+    const uint32_t blockSizeInBytes = utils::GetTexelBlockSizeInBytes(format);
+    const uint32_t blockCountPerRowPitch = config.bytesPerRowAlignment / blockSizeInBytes;
+    config.bufferOffset = (blockCountPerRowPitch - blockCountPerRow) * blockSizeInBytes;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test the special case of the B2T copies on the D3D12 backend that the buffer offset exceeds the
+// slice pitch (slicePitch = bytesPerRow * (rowsPerImage / blockHeightInTexels)). On D3D12
+// backend the texelOffset.y will be greater than 0 after calcuting the texelOffset in the function
+// ComputeTexelOffsets().
+TEST_P(CompressedTextureFormatTest, BufferOffsetExceedsSlicePitch) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    CopyConfig config = GetDefaultSmallConfig();
+    config.copyExtent3D = config.textureDescriptor.size;
+
+    const wgpu::TextureFormat format = GetParam().mTextureFormat;
+    const wgpu::Extent3D textureSizeLevel = config.textureDescriptor.size;
+    const uint32_t blockCountPerRow = textureSizeLevel.width / BlockWidthInTexels();
+    const uint32_t slicePitchInBytes =
+        config.bytesPerRowAlignment * (textureSizeLevel.height / BlockHeightInTexels());
+    const uint32_t blockSizeInBytes = utils::GetTexelBlockSizeInBytes(format);
+    const uint32_t blockCountPerRowPitch = config.bytesPerRowAlignment / blockSizeInBytes;
+    config.bufferOffset = (blockCountPerRowPitch - blockCountPerRow) * blockSizeInBytes +
+                          config.bytesPerRowAlignment + slicePitchInBytes;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test the special case of the B2T copies on the D3D12 backend that the buffer offset and texture
+// extent exceed the RowPitch. On D3D12 backend two copies are required for this case.
+TEST_P(CompressedTextureFormatTest, CopyWithBufferOffsetAndExtentExceedRowPitch) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    constexpr uint32_t kExceedRowBlockCount = 1;
+
+    CopyConfig config = GetDefaultSmallConfig();
+    config.copyExtent3D = config.textureDescriptor.size;
+
+    const wgpu::TextureFormat format = GetParam().mTextureFormat;
+    const uint32_t blockCountPerRow = config.textureDescriptor.size.width / BlockWidthInTexels();
+    const uint32_t blockSizeInBytes = utils::GetTexelBlockSizeInBytes(format);
+    const uint32_t blockCountPerRowPitch = config.bytesPerRowAlignment / blockSizeInBytes;
+    config.bufferOffset =
+        (blockCountPerRowPitch - blockCountPerRow + kExceedRowBlockCount) * blockSizeInBytes;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test the special case of the B2T copies on the D3D12 backend that the slicePitch is equal to the
+// bytesPerRow. On D3D12 backend the texelOffset.z will be greater than 0 after calcuting the
+// texelOffset in the function ComputeTexelOffsets().
+TEST_P(CompressedTextureFormatTest, RowPitchEqualToSlicePitch) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    CopyConfig config = GetDefaultSmallConfig();
+    config.textureDescriptor.size = GetTextureSizeWithNumBlocks(2, 1);
+    config.copyExtent3D = config.textureDescriptor.size;
+
+    const wgpu::TextureFormat format = GetParam().mTextureFormat;
+    const uint32_t blockCountPerRow = config.textureDescriptor.size.width / BlockWidthInTexels();
+    const uint32_t slicePitchInBytes = config.bytesPerRowAlignment;
+    const uint32_t blockSizeInBytes = utils::GetTexelBlockSizeInBytes(format);
+    const uint32_t blockCountPerRowPitch = config.bytesPerRowAlignment / blockSizeInBytes;
+    config.bufferOffset =
+        (blockCountPerRowPitch - blockCountPerRow) * blockSizeInBytes + slicePitchInBytes;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test the workaround in the B2T copies when (bufferSize - bufferOffset < bytesPerImage *
+// copyExtent.depthOrArrayLayers) on Metal backends. As copyExtent.depthOrArrayLayers can only be 1
+// for compressed formats, on Metal backend we will use two copies to implement such copy.
+TEST_P(CompressedTextureFormatTest, LargeImageHeight) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    CopyConfig config = GetDefaultSmallConfig();
+    config.copyExtent3D = config.textureDescriptor.size;
+    config.rowsPerImage = config.textureDescriptor.size.height * 2 / BlockHeightInTexels();
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test the workaround in the B2T copies when (bufferSize - bufferOffset < bytesPerImage *
+// copyExtent.depthOrArrayLayers) and copyExtent needs to be clamped.
+TEST_P(CompressedTextureFormatTest, LargeImageHeightAndClampedCopyExtent) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // This test uses glTextureView() which is not supported in OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    CopyConfig config = GetDefaultFullConfig();
+
+    // The virtual size of the texture at mipmap level == 2 is not a multiple of the texel
+    // dimensions so paddings are required in the copies.
+    const wgpu::Extent3D kPhysicalSize = GetPhysicalSizeAtLevel(config);
+    config.copyExtent3D = kPhysicalSize;
+    config.rowsPerImage = kPhysicalSize.height * 2 / BlockHeightInTexels();
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test copying a whole 2D array texture with array layer count > 1 in one copy command works with
+// compressed formats.
+TEST_P(CompressedTextureFormatTest, CopyWhole2DArrayTexture) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // This test uses glTextureView() which is not supported in OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    constexpr uint32_t kArrayLayerCount = 3;
+
+    CopyConfig config = GetDefaultSmallConfig(kArrayLayerCount);
+    config.rowsPerImage = 8;
+    config.copyExtent3D = config.textureDescriptor.size;
+    config.copyExtent3D.depthOrArrayLayers = kArrayLayerCount;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+// Test copying a multiple 2D texture array layers in one copy command works.
+TEST_P(CompressedTextureFormatTest, CopyMultiple2DArrayLayers) {
+    // TODO(crbug.com/dawn/815): find out why this test fails on Windows Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsWindows());
+
+    DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+
+    // This test uses glTextureView() which is not supported in OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    constexpr uint32_t kArrayLayerCount = 3;
+
+    CopyConfig config = GetDefaultSmallConfig(kArrayLayerCount);
+    config.rowsPerImage = 8;
+
+    constexpr uint32_t kCopyBaseArrayLayer = 1;
+    constexpr uint32_t kCopyLayerCount = 2;
+    config.copyOrigin3D = {0, 0, kCopyBaseArrayLayer};
+    config.copyExtent3D = config.textureDescriptor.size;
+    config.copyExtent3D.depthOrArrayLayers = kCopyLayerCount;
+
+    TestCopyRegionIntoFormatTextures(config);
+}
+
+DAWN_INSTANTIATE_TEST_P(CompressedTextureFormatTest,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend(),
+                         VulkanBackend({"use_temporary_buffer_in_texture_to_texture_copy"})},
+                        std::vector<wgpu::TextureFormat>(utils::kCompressedFormats.begin(),
+                                                         utils::kCompressedFormats.end()));
+
+// Suite of regression tests that target specific compression types.
+class CompressedTextureFormatSpecificTest : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
+
+        std::vector<wgpu::FeatureName> features;
+        if (mIsBCFormatSupported) {
+            features.emplace_back(wgpu::FeatureName::TextureCompressionBC);
+        }
+        return features;
+    }
+
+    bool IsBCFormatSupported() const {
+        return mIsBCFormatSupported;
+    }
+
+    bool mIsBCFormatSupported = false;
+};
+
+// Testing a special code path: clearing a non-renderable texture when DynamicUploader
+// is unaligned doesn't throw validation errors.
+TEST_P(CompressedTextureFormatSpecificTest, BC1RGBAUnorm_UnalignedDynamicUploader) {
+    // CopyT2B for compressed texture formats is unimplemented on OpenGL.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+    DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
+
+    utils::UnalignDynamicUploader(device);
+
+    wgpu::TextureDescriptor textureDescriptor = {};
+    textureDescriptor.size = {4, 4, 1};
+    textureDescriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
+    textureDescriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 8;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(buffer, 0, 256);
+    wgpu::Extent3D copyExtent = {4, 4, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copyExtent);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+}
+
+DAWN_INSTANTIATE_TEST(CompressedTextureFormatSpecificTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend(),
+                      VulkanBackend({"use_temporary_buffer_in_texture_to_texture_copy"}));
+
+class CompressedTextureWriteTextureTest : public CompressedTextureFormatTest {
+  protected:
+    void SetUp() override {
+        CompressedTextureFormatTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+    }
+
+    // Write the compressed texture data into the destination texture as is specified in
+    // copyConfig.
+    void WriteToCompressedTexture(wgpu::Texture compressedTexture, const CopyConfig& copyConfig) {
+        ASSERT(IsFormatSupported());
+
+        std::vector<uint8_t> data = UploadData(copyConfig);
+
+        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(
+            copyConfig.bufferOffset, copyConfig.bytesPerRowAlignment, copyConfig.rowsPerImage);
+
+        wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(
+            compressedTexture, copyConfig.viewMipmapLevel, copyConfig.copyOrigin3D);
+
+        queue.WriteTexture(&imageCopyTexture, data.data(), data.size(), &textureDataLayout,
+                           &copyConfig.copyExtent3D);
+    }
+
+    // Run the tests that write pre-prepared format data into a texture and verifies if we can
+    // render correctly with the pixel values sampled from the texture.
+    void TestWriteRegionIntoFormatTextures(const CopyConfig& config) {
+        ASSERT(IsFormatSupported());
+
+        wgpu::Texture texture = device.CreateTexture(&config.textureDescriptor);
+        WriteToCompressedTexture(texture, config);
+
+        VerifyTexture(config, texture);
+    }
+};
+
+// Test WriteTexture to a 2D texture with all parameters non-defaults.
+TEST_P(CompressedTextureWriteTextureTest, Basic) {
+    // TODO(crbug.com/dawn/976): Failing on Linux Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    constexpr uint32_t kSizeWidthMultiplier = 5;
+    constexpr uint32_t kSizeHeightMultiplier = 6;
+    constexpr uint32_t kOriginWidthMultiplier = 1;
+    constexpr uint32_t kOriginHeightMultiplier = 2;
+    constexpr uint32_t kExtentWidthMultiplier = 3;
+    constexpr uint32_t kExtentHeightMultiplier = 4;
+
+    CopyConfig config;
+    config.textureDescriptor.usage = kDefaultFormatTextureUsage;
+    config.textureDescriptor.size = {BlockWidthInTexels() * kSizeWidthMultiplier,
+                                     BlockHeightInTexels() * kSizeHeightMultiplier, 1};
+    config.copyOrigin3D = {BlockWidthInTexels() * kOriginWidthMultiplier,
+                           BlockHeightInTexels() * kOriginHeightMultiplier, 0};
+    config.copyExtent3D = {BlockWidthInTexels() * kExtentWidthMultiplier,
+                           BlockHeightInTexels() * kExtentHeightMultiplier, 1};
+    config.bytesPerRowAlignment = 511;
+    config.rowsPerImage = 5;
+    config.textureDescriptor.format = GetParam().mTextureFormat;
+
+    TestWriteRegionIntoFormatTextures(config);
+}
+
+// Test writing to multiple 2D texture array layers.
+TEST_P(CompressedTextureWriteTextureTest, WriteMultiple2DArrayLayers) {
+    // TODO(crbug.com/dawn/976): Failing on Linux Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // TODO(b/198674734): Width multiplier set to 7 because 5 results in square size for ASTC6x5.
+    constexpr uint32_t kSizeWidthMultiplier = 7;
+    constexpr uint32_t kSizeHeightMultiplier = 6;
+    constexpr uint32_t kOriginWidthMultiplier = 1;
+    constexpr uint32_t kOriginHeightMultiplier = 2;
+    constexpr uint32_t kExtentWidthMultiplier = 3;
+    constexpr uint32_t kExtentHeightMultiplier = 4;
+
+    CopyConfig config;
+    config.textureDescriptor.usage = kDefaultFormatTextureUsage;
+    config.textureDescriptor.size = {BlockWidthInTexels() * kSizeWidthMultiplier,
+                                     BlockHeightInTexels() * kSizeHeightMultiplier, 9};
+    config.copyOrigin3D = {BlockWidthInTexels() * kOriginWidthMultiplier,
+                           BlockHeightInTexels() * kOriginHeightMultiplier, 3};
+    config.copyExtent3D = {BlockWidthInTexels() * kExtentWidthMultiplier,
+                           BlockHeightInTexels() * kExtentHeightMultiplier, 6};
+    config.bytesPerRowAlignment = 511;
+    config.rowsPerImage = 5;
+    config.textureDescriptor.format = GetParam().mTextureFormat;
+
+    TestWriteRegionIntoFormatTextures(config);
+}
+
+// Test writing textures where the physical size of the destination subresource is different from
+// its virtual size.
+TEST_P(CompressedTextureWriteTextureTest,
+       WriteIntoSubresourceWithPhysicalSizeNotEqualToVirtualSize) {
+    // TODO(crbug.com/dawn/976): Failing on Linux Intel OpenGL drivers.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    CopyConfig config = GetDefaultFullConfig();
+
+    // The virtual size of the texture at mipmap level == 2 is not a multiple of the texel
+    // dimensions so paddings are required in the copies. We then test against the expected
+    // physical size and a valid size smaller than the physical size for verification.
+    const wgpu::Extent3D kPhysicalSize = GetPhysicalSizeAtLevel(config);
+    for (unsigned int w : {kPhysicalSize.width - BlockWidthInTexels(), kPhysicalSize.width}) {
+        for (unsigned int h :
+             {kPhysicalSize.height - BlockHeightInTexels(), kPhysicalSize.height}) {
+            config.copyExtent3D = {w, h, 1};
+            TestWriteRegionIntoFormatTextures(config);
+        }
+    }
+}
+
+DAWN_INSTANTIATE_TEST_P(CompressedTextureWriteTextureTest,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(utils::kCompressedFormats.begin(),
+                                                         utils::kCompressedFormats.end()));
diff --git a/src/dawn/tests/end2end/ComputeCopyStorageBufferTests.cpp b/src/dawn/tests/end2end/ComputeCopyStorageBufferTests.cpp
new file mode 100644
index 0000000..0fbb821
--- /dev/null
+++ b/src/dawn/tests/end2end/ComputeCopyStorageBufferTests.cpp
@@ -0,0 +1,152 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <array>
+
+class ComputeCopyStorageBufferTests : public DawnTest {
+  public:
+    static constexpr int kInstances = 4;
+    static constexpr int kUintsPerInstance = 4;
+    static constexpr int kNumUints = kInstances * kUintsPerInstance;
+
+    void BasicTest(const char* shader);
+};
+
+void ComputeCopyStorageBufferTests::BasicTest(const char* shader) {
+    // Set up shader and pipeline
+    auto module = utils::CreateShaderModule(device, shader);
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = module;
+    csDesc.compute.entryPoint = "main";
+
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+    // Set up src storage buffer
+    wgpu::BufferDescriptor srcDesc;
+    srcDesc.size = kNumUints * sizeof(uint32_t);
+    srcDesc.usage =
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer src = device.CreateBuffer(&srcDesc);
+
+    std::array<uint32_t, kNumUints> expected;
+    for (uint32_t i = 0; i < kNumUints; ++i) {
+        expected[i] = (i + 1u) * 0x11111111u;
+    }
+    queue.WriteBuffer(src, 0, expected.data(), sizeof(expected));
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), src, 0, kNumUints);
+
+    // Set up dst storage buffer
+    wgpu::BufferDescriptor dstDesc;
+    dstDesc.size = kNumUints * sizeof(uint32_t);
+    dstDesc.usage =
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dst = device.CreateBuffer(&dstDesc);
+
+    std::array<uint32_t, kNumUints> zero{};
+    queue.WriteBuffer(dst, 0, zero.data(), sizeof(zero));
+
+    // Set up bind group and issue dispatch
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {
+                                                         {0, src, 0, kNumUints * sizeof(uint32_t)},
+                                                         {1, dst, 0, kNumUints * sizeof(uint32_t)},
+                                                     });
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(kInstances);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), dst, 0, kNumUints);
+}
+
+// Test that a trivial compute-shader memcpy implementation works.
+TEST_P(ComputeCopyStorageBufferTests, SizedArrayOfBasic) {
+    BasicTest(R"(
+        struct Buf {
+            s : array<vec4<u32>, 4>
+        }
+
+        @group(0) @binding(0) var<storage, read_write> src : Buf;
+        @group(0) @binding(1) var<storage, read_write> dst : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            let index : u32 = GlobalInvocationID.x;
+            if (index >= 4u) { return; }
+            dst.s[index] = src.s[index];
+        })");
+}
+
+// Test that a slightly-less-trivial compute-shader memcpy implementation works.
+TEST_P(ComputeCopyStorageBufferTests, SizedArrayOfStruct) {
+    BasicTest(R"(
+        struct S {
+            a : vec2<u32>,
+            b : vec2<u32>,
+        }
+
+        struct Buf {
+            s : array<S, 4>
+        }
+
+        @group(0) @binding(0) var<storage, read_write> src : Buf;
+        @group(0) @binding(1) var<storage, read_write> dst : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            let index : u32 = GlobalInvocationID.x;
+            if (index >= 4u) { return; }
+            dst.s[index] = src.s[index];
+        })");
+}
+
+// Test that a trivial compute-shader memcpy implementation works.
+TEST_P(ComputeCopyStorageBufferTests, UnsizedArrayOfBasic) {
+    BasicTest(R"(
+        struct Buf {
+            s : array<vec4<u32>>
+        }
+
+        @group(0) @binding(0) var<storage, read_write> src : Buf;
+        @group(0) @binding(1) var<storage, read_write> dst : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            let index : u32 = GlobalInvocationID.x;
+            if (index >= 4u) { return; }
+            dst.s[index] = src.s[index];
+        })");
+}
+
+DAWN_INSTANTIATE_TEST(ComputeCopyStorageBufferTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ComputeDispatchTests.cpp b/src/dawn/tests/end2end/ComputeDispatchTests.cpp
new file mode 100644
index 0000000..2d56619
--- /dev/null
+++ b/src/dawn/tests/end2end/ComputeDispatchTests.cpp
@@ -0,0 +1,329 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <initializer_list>
+
+constexpr static std::initializer_list<uint32_t> kSentinelData{0, 0, 0};
+
+class ComputeDispatchTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // Write workgroup number into the output buffer if we saw the biggest dispatch
+        // To make sure the dispatch was not called, write maximum u32 value for 0 dispatches
+        wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+            struct OutputBuf {
+                workGroups : vec3<u32>
+            }
+
+            @group(0) @binding(0) var<storage, read_write> output : OutputBuf;
+
+            @stage(compute) @workgroup_size(1, 1, 1)
+            fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>,
+                    @builtin(num_workgroups) dispatch : vec3<u32>) {
+                if (dispatch.x == 0u || dispatch.y == 0u || dispatch.z == 0u) {
+                    output.workGroups = vec3<u32>(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu);
+                    return;
+                }
+
+                if (all(GlobalInvocationID == dispatch - vec3<u32>(1u, 1u, 1u))) {
+                    output.workGroups = dispatch;
+                }
+            })");
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.compute.module = module;
+        csDesc.compute.entryPoint = "main";
+        pipeline = device.CreateComputePipeline(&csDesc);
+
+        // Test the use of the compute pipelines without using @num_workgroups
+        wgpu::ShaderModule moduleWithoutNumWorkgroups = utils::CreateShaderModule(device, R"(
+            struct InputBuf {
+                expectedDispatch : vec3<u32>
+            }
+            struct OutputBuf {
+                workGroups : vec3<u32>
+            }
+
+            @group(0) @binding(0) var<uniform> input : InputBuf;
+            @group(0) @binding(1) var<storage, read_write> output : OutputBuf;
+
+            @stage(compute) @workgroup_size(1, 1, 1)
+            fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+                let dispatch : vec3<u32> = input.expectedDispatch;
+
+                if (dispatch.x == 0u || dispatch.y == 0u || dispatch.z == 0u) {
+                    output.workGroups = vec3<u32>(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu);
+                    return;
+                }
+
+                if (all(GlobalInvocationID == dispatch - vec3<u32>(1u, 1u, 1u))) {
+                    output.workGroups = dispatch;
+                }
+            })");
+        csDesc.compute.module = moduleWithoutNumWorkgroups;
+        pipelineWithoutNumWorkgroups = device.CreateComputePipeline(&csDesc);
+    }
+
+    void DirectTest(uint32_t x, uint32_t y, uint32_t z) {
+        // Set up dst storage buffer to contain dispatch x, y, z
+        wgpu::Buffer dst = utils::CreateBufferFromData<uint32_t>(
+            device,
+            wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst,
+            kSentinelData);
+
+        // Set up bind group and issue dispatch
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                         {
+                                                             {0, dst, 0, 3 * sizeof(uint32_t)},
+                                                         });
+
+        wgpu::CommandBuffer commands;
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Dispatch(x, y, z);
+            pass.End();
+
+            commands = encoder.Finish();
+        }
+
+        queue.Submit(1, &commands);
+
+        std::vector<uint32_t> expected =
+            x == 0 || y == 0 || z == 0 ? kSentinelData : std::initializer_list<uint32_t>{x, y, z};
+
+        // Verify the dispatch got called if all group counts are not zero
+        EXPECT_BUFFER_U32_RANGE_EQ(&expected[0], dst, 0, 3);
+    }
+
+    void IndirectTest(std::vector<uint32_t> indirectBufferData,
+                      uint64_t indirectOffset,
+                      bool useNumWorkgroups = true) {
+        // Set up dst storage buffer to contain dispatch x, y, z
+        wgpu::Buffer dst = utils::CreateBufferFromData<uint32_t>(
+            device,
+            wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst,
+            kSentinelData);
+
+        wgpu::Buffer indirectBuffer = utils::CreateBufferFromData(
+            device, &indirectBufferData[0], indirectBufferData.size() * sizeof(uint32_t),
+            wgpu::BufferUsage::Indirect);
+
+        uint32_t indirectStart = indirectOffset / sizeof(uint32_t);
+
+        // Set up bind group and issue dispatch
+        wgpu::BindGroup bindGroup;
+        wgpu::ComputePipeline computePipelineForTest;
+
+        if (useNumWorkgroups) {
+            computePipelineForTest = pipeline;
+            bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                             {
+                                                 {0, dst, 0, 3 * sizeof(uint32_t)},
+                                             });
+        } else {
+            computePipelineForTest = pipelineWithoutNumWorkgroups;
+            wgpu::Buffer expectedBuffer =
+                utils::CreateBufferFromData(device, &indirectBufferData[indirectStart],
+                                            3 * sizeof(uint32_t), wgpu::BufferUsage::Uniform);
+            bindGroup =
+                utils::MakeBindGroup(device, pipelineWithoutNumWorkgroups.GetBindGroupLayout(0),
+                                     {
+                                         {0, expectedBuffer, 0, 3 * sizeof(uint32_t)},
+                                         {1, dst, 0, 3 * sizeof(uint32_t)},
+                                     });
+        }
+
+        wgpu::CommandBuffer commands;
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(computePipelineForTest);
+            pass.SetBindGroup(0, bindGroup);
+            pass.DispatchIndirect(indirectBuffer, indirectOffset);
+            pass.End();
+
+            commands = encoder.Finish();
+        }
+
+        queue.Submit(1, &commands);
+
+        std::vector<uint32_t> expected;
+
+        uint32_t maxComputeWorkgroupsPerDimension =
+            GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+        if (indirectBufferData[indirectStart] == 0 || indirectBufferData[indirectStart + 1] == 0 ||
+            indirectBufferData[indirectStart + 2] == 0 ||
+            indirectBufferData[indirectStart] > maxComputeWorkgroupsPerDimension ||
+            indirectBufferData[indirectStart + 1] > maxComputeWorkgroupsPerDimension ||
+            indirectBufferData[indirectStart + 2] > maxComputeWorkgroupsPerDimension) {
+            expected = kSentinelData;
+        } else {
+            expected.assign(indirectBufferData.begin() + indirectStart,
+                            indirectBufferData.begin() + indirectStart + 3);
+        }
+
+        // Verify the dispatch got called with group counts in indirect buffer if all group counts
+        // are not zero
+        EXPECT_BUFFER_U32_RANGE_EQ(&expected[0], dst, 0, 3);
+    }
+
+  private:
+    wgpu::ComputePipeline pipeline;
+    wgpu::ComputePipeline pipelineWithoutNumWorkgroups;
+};
+
+// Test basic direct
+TEST_P(ComputeDispatchTests, DirectBasic) {
+    DirectTest(2, 3, 4);
+}
+
+// Test no-op direct
+TEST_P(ComputeDispatchTests, DirectNoop) {
+    // All dimensions are 0s
+    DirectTest(0, 0, 0);
+
+    // Only x dimension is 0
+    DirectTest(0, 3, 4);
+
+    // Only y dimension is 0
+    DirectTest(2, 0, 4);
+
+    // Only z dimension is 0
+    DirectTest(2, 3, 0);
+}
+
+// Test basic indirect
+TEST_P(ComputeDispatchTests, IndirectBasic) {
+#ifdef DAWN_PLATFORM_32_BIT
+    // TODO(crbug.com/dawn/1196): Fails on Chromium's Quadro P400 bots
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+#endif
+    // TODO(crbug.com/dawn/1262): Fails with the full validation turned on.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsBackendValidationEnabled());
+
+    IndirectTest({2, 3, 4}, 0);
+}
+
+// Test basic indirect without using @num_workgroups
+TEST_P(ComputeDispatchTests, IndirectBasicWithoutNumWorkgroups) {
+    IndirectTest({2, 3, 4}, 0, false);
+}
+
+// Test no-op indirect
+TEST_P(ComputeDispatchTests, IndirectNoop) {
+    // All dimensions are 0s
+    IndirectTest({0, 0, 0}, 0);
+
+    // Only x dimension is 0
+    IndirectTest({0, 3, 4}, 0);
+
+    // Only y dimension is 0
+    IndirectTest({2, 0, 4}, 0);
+
+    // Only z dimension is 0
+    IndirectTest({2, 3, 0}, 0);
+}
+
+// Test indirect with buffer offset
+TEST_P(ComputeDispatchTests, IndirectOffset) {
+#ifdef DAWN_PLATFORM_32_BIT
+    // TODO(crbug.com/dawn/1196): Fails on Chromium's Quadro P400 bots
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+#endif
+    // TODO(crbug.com/dawn/1262): Fails with the full validation turned on.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsBackendValidationEnabled());
+
+    IndirectTest({0, 0, 0, 2, 3, 4}, 3 * sizeof(uint32_t));
+}
+
+// Test indirect with buffer offset without using @num_workgroups
+TEST_P(ComputeDispatchTests, IndirectOffsetWithoutNumWorkgroups) {
+    IndirectTest({0, 0, 0, 2, 3, 4}, 3 * sizeof(uint32_t), false);
+}
+
+// Test indirect dispatches at max limit.
+TEST_P(ComputeDispatchTests, MaxWorkgroups) {
+#ifdef DAWN_PLATFORM_32_BIT
+    // TODO(crbug.com/dawn/1196): Fails on Chromium's Quadro P400 bots
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+#endif
+    // TODO(crbug.com/dawn/1262): Fails with the full validation turned on.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsBackendValidationEnabled());
+
+    // TODO(crbug.com/dawn/1165): Fails with WARP
+    DAWN_SUPPRESS_TEST_IF(IsWARP());
+
+    uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+
+    // Test that the maximum works in each dimension.
+    // Note: Testing (max, max, max) is very slow.
+    IndirectTest({max, 3, 4}, 0);
+    IndirectTest({2, max, 4}, 0);
+    IndirectTest({2, 3, max}, 0);
+}
+
+// Test indirect dispatches exceeding the max limit are noop-ed.
+TEST_P(ComputeDispatchTests, ExceedsMaxWorkgroupsNoop) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    // TODO(crbug.com/dawn/839): Investigate why this test fails with WARP.
+    DAWN_SUPPRESS_TEST_IF(IsWARP());
+
+    uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+
+    // All dimensions are above the max
+    IndirectTest({max + 1, max + 1, max + 1}, 0);
+
+    // Only x dimension is above the max
+    IndirectTest({max + 1, 3, 4}, 0);
+    IndirectTest({2 * max, 3, 4}, 0);
+
+    // Only y dimension is above the max
+    IndirectTest({2, max + 1, 4}, 0);
+    IndirectTest({2, 2 * max, 4}, 0);
+
+    // Only z dimension is above the max
+    IndirectTest({2, 3, max + 1}, 0);
+    IndirectTest({2, 3, 2 * max}, 0);
+}
+
+// Test indirect dispatches exceeding the max limit with an offset are noop-ed.
+TEST_P(ComputeDispatchTests, ExceedsMaxWorkgroupsWithOffsetNoop) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    // TODO(crbug.com/dawn/839): Investigate why this test fails with WARP.
+    DAWN_SUPPRESS_TEST_IF(IsWARP());
+
+    uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+
+    IndirectTest({1, 2, 3, max + 1, 4, 5}, 1 * sizeof(uint32_t));
+    IndirectTest({1, 2, 3, max + 1, 4, 5}, 2 * sizeof(uint32_t));
+    IndirectTest({1, 2, 3, max + 1, 4, 5}, 3 * sizeof(uint32_t));
+}
+
+DAWN_INSTANTIATE_TEST(ComputeDispatchTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp b/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
new file mode 100644
index 0000000..cc2be7d
--- /dev/null
+++ b/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
@@ -0,0 +1,506 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <array>
+#include <functional>
+
+namespace {
+
+    // Helper for replacing all occurrences of substr in str with replacement
+    std::string ReplaceAll(std::string str,
+                           const std::string& substr,
+                           const std::string& replacement) {
+        size_t pos = 0;
+        while ((pos = str.find(substr, pos)) != std::string::npos) {
+            str.replace(pos, substr.length(), replacement);
+            pos += replacement.length();
+        }
+        return str;
+    }
+
+    // DataMatcherCallback is the callback function by DataMatcher.
+    // It is called for each contiguous sequence of bytes that should be checked
+    // for equality.
+    // offset and size are in units of bytes.
+    using DataMatcherCallback = std::function<void(uint32_t offset, uint32_t size)>;
+
+    // DataMatcher is a function pointer to a data matching function.
+    // size is the total number of bytes being considered for matching.
+    // The callback may be called once or multiple times, and may only consider
+    // part of the interval [0, size)
+    using DataMatcher = void (*)(uint32_t size, DataMatcherCallback);
+
+    // FullDataMatcher is a DataMatcher that calls callback with the interval
+    // [0, size)
+    void FullDataMatcher(uint32_t size, DataMatcherCallback callback) {
+        callback(0, size);
+    }
+
+    // StridedDataMatcher is a DataMatcher that calls callback with the strided
+    // intervals of length BYTES_TO_MATCH, skipping BYTES_TO_SKIP.
+    // For example: StridedDataMatcher<2, 4>(18, callback) will call callback
+    // with the intervals: [0, 2), [6, 8), [12, 14)
+    template <int BYTES_TO_MATCH, int BYTES_TO_SKIP>
+    void StridedDataMatcher(uint32_t size, DataMatcherCallback callback) {
+        uint32_t offset = 0;
+        while (offset < size) {
+            callback(offset, BYTES_TO_MATCH);
+            offset += BYTES_TO_MATCH + BYTES_TO_SKIP;
+        }
+    }
+
+    // Align returns the WGSL decoration for an explicit structure field alignment
+    std::string AlignDeco(uint32_t value) {
+        return "@align(" + std::to_string(value) + ") ";
+    }
+
+}  // namespace
+
+// Field holds test parameters for ComputeLayoutMemoryBufferTests.Fields
+struct Field {
+    const char* type;  // Type of the field
+    uint32_t align;    // Alignment of the type in bytes
+    uint32_t size;     // Natural size of the type in bytes
+
+    uint32_t padded_size = 0;                // Decorated (extended) size of the type in bytes
+    DataMatcher matcher = &FullDataMatcher;  // The matching method
+    bool storage_buffer_only = false;        // This should only be used for storage buffer tests
+
+    // Sets the padded_size to value.
+    // Returns this Field so calls can be chained.
+    Field& PaddedSize(uint32_t value) {
+        padded_size = value;
+        return *this;
+    }
+
+    // Sets the matcher to a StridedDataMatcher<BYTES_TO_MATCH, BYTES_TO_SKIP>.
+    // Returns this Field so calls can be chained.
+    template <int BYTES_TO_MATCH, int BYTES_TO_SKIP>
+    Field& Strided() {
+        matcher = &StridedDataMatcher<BYTES_TO_MATCH, BYTES_TO_SKIP>;
+        return *this;
+    }
+
+    // Marks that this should only be used for storage buffer tests.
+    // Returns this Field so calls can be chained.
+    Field& StorageBufferOnly() {
+        storage_buffer_only = true;
+        return *this;
+    }
+};
+
+// StorageClass is an enumerator of storage classes used by ComputeLayoutMemoryBufferTests.Fields
+enum class StorageClass {
+    Uniform,
+    Storage,
+};
+
+std::ostream& operator<<(std::ostream& o, StorageClass storageClass) {
+    switch (storageClass) {
+        case StorageClass::Uniform:
+            o << "uniform";
+            break;
+        case StorageClass::Storage:
+            o << "storage";
+            break;
+    }
+    return o;
+}
+
+std::ostream& operator<<(std::ostream& o, Field field) {
+    o << "@align(" << field.align << ") @size("
+      << (field.padded_size > 0 ? field.padded_size : field.size) << ") " << field.type;
+    return o;
+}
+
+DAWN_TEST_PARAM_STRUCT(ComputeLayoutMemoryBufferTestParams, StorageClass, Field);
+
+class ComputeLayoutMemoryBufferTests
+    : public DawnTestWithParams<ComputeLayoutMemoryBufferTestParams> {
+    void SetUp() override {
+        DawnTestBase::SetUp();
+    }
+};
+
+TEST_P(ComputeLayoutMemoryBufferTests, Fields) {
+    // Sentinel value markers codes used to check that the start and end of
+    // structures are correctly aligned. Each of these codes are distinct and
+    // are not likely to be confused with data.
+    constexpr uint32_t kDataHeaderCode = 0xa0b0c0a0u;
+    constexpr uint32_t kDataFooterCode = 0x40302010u;
+    constexpr uint32_t kInputHeaderCode = 0x91827364u;
+    constexpr uint32_t kInputFooterCode = 0x19283764u;
+
+    // Byte codes used for field padding. The MSB is set for each of these.
+    // The field data has the MSB 0.
+    constexpr uint8_t kDataAlignPaddingCode = 0xfeu;
+    constexpr uint8_t kFieldAlignPaddingCode = 0xfdu;
+    constexpr uint8_t kFieldSizePaddingCode = 0xdcu;
+    constexpr uint8_t kDataSizePaddingCode = 0xdbu;
+    constexpr uint8_t kInputFooterAlignPaddingCode = 0xdau;
+    constexpr uint8_t kInputTailPaddingCode = 0xd9u;
+
+    // Status codes returned by the shader.
+    constexpr uint32_t kStatusBadInputHeader = 100u;
+    constexpr uint32_t kStatusBadInputFooter = 101u;
+    constexpr uint32_t kStatusBadDataHeader = 102u;
+    constexpr uint32_t kStatusBadDataFooter = 103u;
+    constexpr uint32_t kStatusOk = 200u;
+
+    const Field& field = GetParam().mField;
+
+    const bool isUniform = GetParam().mStorageClass == StorageClass::Uniform;
+
+    std::string shader = R"(
+struct Data {
+    header : u32,
+    @align({field_align}) @size({field_size}) field : {field_type},
+    footer : u32,
+}
+
+struct Input {
+    header : u32,
+    {data_align}data : Data,
+    {footer_align}footer : u32,
+}
+
+struct Output {
+    data : {field_type}
+}
+
+struct Status {
+    code : u32
+}
+
+@group(0) @binding(0) var<{input_qualifiers}> input : Input;
+@group(0) @binding(1) var<storage, read_write> output : Output;
+@group(0) @binding(2) var<storage, read_write> status : Status;
+
+@stage(compute) @workgroup_size(1,1,1)
+fn main() {
+    if (input.header != {input_header_code}u) {
+        status.code = {status_bad_input_header}u;
+    } else if (input.footer != {input_footer_code}u) {
+        status.code = {status_bad_input_footer}u;
+    } else if (input.data.header != {data_header_code}u) {
+        status.code = {status_bad_data_header}u;
+    } else if (input.data.footer != {data_footer_code}u) {
+        status.code = {status_bad_data_footer}u;
+    } else {
+        status.code = {status_ok}u;
+        output.data = input.data.field;
+    }
+})";
+
+    // https://www.w3.org/TR/WGSL/#alignment-and-size
+    // Structure size: roundUp(AlignOf(S), OffsetOf(S, L) + SizeOf(S, L))
+    // https://www.w3.org/TR/WGSL/#storage-class-constraints
+    // RequiredAlignOf(S, uniform): roundUp(16, max(AlignOf(T0), ..., AlignOf(TN)))
+    uint32_t dataAlign = isUniform ? std::max(16u, field.align) : field.align;
+
+    // https://www.w3.org/TR/WGSL/#structure-layout-rules
+    // Note: When underlying the target is a Vulkan device, we assume the device does not support
+    // the scalarBlockLayout feature. Therefore, a data value must not be placed in the padding at
+    // the end of a structure or matrix, nor in the padding at the last element of an array.
+    uint32_t footerAlign = isUniform ? 16 : 4;
+
+    shader = ReplaceAll(shader, "{data_align}", isUniform ? AlignDeco(dataAlign) : "");
+    shader = ReplaceAll(shader, "{field_align}", std::to_string(field.align));
+    shader = ReplaceAll(shader, "{footer_align}", isUniform ? AlignDeco(footerAlign) : "");
+    shader = ReplaceAll(shader, "{field_size}",
+                        std::to_string(field.padded_size > 0 ? field.padded_size : field.size));
+    shader = ReplaceAll(shader, "{field_type}", field.type);
+    shader = ReplaceAll(shader, "{input_header_code}", std::to_string(kInputHeaderCode));
+    shader = ReplaceAll(shader, "{input_footer_code}", std::to_string(kInputFooterCode));
+    shader = ReplaceAll(shader, "{data_header_code}", std::to_string(kDataHeaderCode));
+    shader = ReplaceAll(shader, "{data_footer_code}", std::to_string(kDataFooterCode));
+    shader = ReplaceAll(shader, "{status_bad_input_header}", std::to_string(kStatusBadInputHeader));
+    shader = ReplaceAll(shader, "{status_bad_input_footer}", std::to_string(kStatusBadInputFooter));
+    shader = ReplaceAll(shader, "{status_bad_data_header}", std::to_string(kStatusBadDataHeader));
+    shader = ReplaceAll(shader, "{status_bad_data_footer}", std::to_string(kStatusBadDataFooter));
+    shader = ReplaceAll(shader, "{status_ok}", std::to_string(kStatusOk));
+    shader = ReplaceAll(shader, "{input_qualifiers}",
+                        isUniform ? "uniform"  //
+                                  : "storage, read_write");
+
+    // Set up shader and pipeline
+    auto module = utils::CreateShaderModule(device, shader.c_str());
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = module;
+    csDesc.compute.entryPoint = "main";
+
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+    // Build the input and expected data.
+    std::vector<uint8_t> inputData;     // The whole SSBO data
+    std::vector<uint8_t> expectedData;  // The expected data to be copied by the shader
+    {
+        auto PushU32 = [&inputData](uint32_t u32) {
+            inputData.emplace_back((u32 >> 0) & 0xff);
+            inputData.emplace_back((u32 >> 8) & 0xff);
+            inputData.emplace_back((u32 >> 16) & 0xff);
+            inputData.emplace_back((u32 >> 24) & 0xff);
+        };
+        auto AlignTo = [&inputData](uint32_t alignment, uint8_t code) {
+            uint32_t target = Align(inputData.size(), alignment);
+            uint32_t bytes = target - inputData.size();
+            for (uint32_t i = 0; i < bytes; i++) {
+                inputData.emplace_back(code);
+            }
+        };
+        PushU32(kInputHeaderCode);                  // Input.header
+        AlignTo(dataAlign, kDataAlignPaddingCode);  // Input.data
+        {
+            PushU32(kDataHeaderCode);                      // Input.data.header
+            AlignTo(field.align, kFieldAlignPaddingCode);  // Input.data.field
+            for (uint32_t i = 0; i < field.size; i++) {
+                // The data has the MSB cleared to distinguish it from the
+                // padding codes.
+                uint8_t code = i & 0x7f;
+                inputData.emplace_back(code);  // Input.data.field
+                expectedData.emplace_back(code);
+            }
+            for (uint32_t i = field.size; i < field.padded_size; i++) {
+                inputData.emplace_back(kFieldSizePaddingCode);  // Input.data.field padding
+            }
+            PushU32(kDataFooterCode);                    // Input.data.footer
+            AlignTo(field.align, kDataSizePaddingCode);  // Input.data padding
+        }
+        AlignTo(footerAlign, kInputFooterAlignPaddingCode);  // Input.footer @align
+        PushU32(kInputFooterCode);                           // Input.footer
+        AlignTo(256, kInputTailPaddingCode);                 // Input padding
+    }
+
+    // Set up input storage buffer
+    wgpu::Buffer inputBuf = utils::CreateBufferFromData(
+        device, inputData.data(), inputData.size(),
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst |
+            (isUniform ? wgpu::BufferUsage::Uniform : wgpu::BufferUsage::Storage));
+
+    // Set up output storage buffer
+    wgpu::BufferDescriptor outputDesc;
+    outputDesc.size = field.size;
+    outputDesc.usage =
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer outputBuf = device.CreateBuffer(&outputDesc);
+
+    // Set up status storage buffer
+    wgpu::BufferDescriptor statusDesc;
+    statusDesc.size = 4u;
+    statusDesc.usage =
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer statusBuf = device.CreateBuffer(&statusDesc);
+
+    // Set up bind group and issue dispatch
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {
+                                                         {0, inputBuf},
+                                                         {1, outputBuf},
+                                                         {2, statusBuf},
+                                                     });
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    // Check the status
+    EXPECT_BUFFER_U32_EQ(kStatusOk, statusBuf, 0) << "status code error" << std::endl
+                                                  << "Shader: " << shader;
+
+    // Check the data
+    field.matcher(field.size, [&](uint32_t offset, uint32_t size) {
+        EXPECT_BUFFER_U8_RANGE_EQ(expectedData.data() + offset, outputBuf, offset, size)
+            << "offset: " << offset;
+    });
+}
+
+namespace {
+
+    auto GenerateParams() {
+        auto params = MakeParamGenerator<ComputeLayoutMemoryBufferTestParams>(
+            {
+                D3D12Backend(), MetalBackend(), VulkanBackend(),
+                // TODO(crbug.com/dawn/942)
+                // There was a compiler error: Buffer block cannot be expressed as any of std430,
+                // std140, scalar, even with enhanced layouts. You can try flattening this block to
+                // support a more flexible layout.
+                // OpenGLBackend(),
+                // OpenGLESBackend(),
+            },
+            {StorageClass::Storage, StorageClass::Uniform},
+            {
+                // See https://www.w3.org/TR/WGSL/#alignment-and-size
+                // Scalar types with no custom alignment or size
+                Field{"i32", /* align */ 4, /* size */ 4},
+                Field{"u32", /* align */ 4, /* size */ 4},
+                Field{"f32", /* align */ 4, /* size */ 4},
+
+                // Scalar types with custom alignment
+                Field{"i32", /* align */ 16, /* size */ 4},
+                Field{"u32", /* align */ 16, /* size */ 4},
+                Field{"f32", /* align */ 16, /* size */ 4},
+
+                // Scalar types with custom size
+                Field{"i32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+                Field{"u32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+                Field{"f32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+
+                // Vector types with no custom alignment or size
+                Field{"vec2<i32>", /* align */ 8, /* size */ 8},
+                Field{"vec2<u32>", /* align */ 8, /* size */ 8},
+                Field{"vec2<f32>", /* align */ 8, /* size */ 8},
+                Field{"vec3<i32>", /* align */ 16, /* size */ 12},
+                Field{"vec3<u32>", /* align */ 16, /* size */ 12},
+                Field{"vec3<f32>", /* align */ 16, /* size */ 12},
+                Field{"vec4<i32>", /* align */ 16, /* size */ 16},
+                Field{"vec4<u32>", /* align */ 16, /* size */ 16},
+                Field{"vec4<f32>", /* align */ 16, /* size */ 16},
+
+                // Vector types with custom alignment
+                Field{"vec2<i32>", /* align */ 32, /* size */ 8},
+                Field{"vec2<u32>", /* align */ 32, /* size */ 8},
+                Field{"vec2<f32>", /* align */ 32, /* size */ 8},
+                Field{"vec3<i32>", /* align */ 32, /* size */ 12},
+                Field{"vec3<u32>", /* align */ 32, /* size */ 12},
+                Field{"vec3<f32>", /* align */ 32, /* size */ 12},
+                Field{"vec4<i32>", /* align */ 32, /* size */ 16},
+                Field{"vec4<u32>", /* align */ 32, /* size */ 16},
+                Field{"vec4<f32>", /* align */ 32, /* size */ 16},
+
+                // Vector types with custom size
+                Field{"vec2<i32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+                Field{"vec2<u32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+                Field{"vec2<f32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+                Field{"vec3<i32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+                Field{"vec3<u32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+                Field{"vec3<f32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+                Field{"vec4<i32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+                Field{"vec4<u32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+                Field{"vec4<f32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+
+                // Matrix types with no custom alignment or size
+                Field{"mat2x2<f32>", /* align */ 8, /* size */ 16},
+                Field{"mat3x2<f32>", /* align */ 8, /* size */ 24},
+                Field{"mat4x2<f32>", /* align */ 8, /* size */ 32},
+                Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}.Strided<12, 4>(),
+                Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}.Strided<12, 4>(),
+                Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
+                Field{"mat2x4<f32>", /* align */ 16, /* size */ 32},
+                Field{"mat3x4<f32>", /* align */ 16, /* size */ 48},
+                Field{"mat4x4<f32>", /* align */ 16, /* size */ 64},
+
+                // Matrix types with custom alignment
+                Field{"mat2x2<f32>", /* align */ 32, /* size */ 16},
+                Field{"mat3x2<f32>", /* align */ 32, /* size */ 24},
+                Field{"mat4x2<f32>", /* align */ 32, /* size */ 32},
+                Field{"mat2x3<f32>", /* align */ 32, /* size */ 32}.Strided<12, 4>(),
+                Field{"mat3x3<f32>", /* align */ 32, /* size */ 48}.Strided<12, 4>(),
+                Field{"mat4x3<f32>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
+                Field{"mat2x4<f32>", /* align */ 32, /* size */ 32},
+                Field{"mat3x4<f32>", /* align */ 32, /* size */ 48},
+                Field{"mat4x4<f32>", /* align */ 32, /* size */ 64},
+
+                // Matrix types with custom size
+                Field{"mat2x2<f32>", /* align */ 8, /* size */ 16}.PaddedSize(128),
+                Field{"mat3x2<f32>", /* align */ 8, /* size */ 24}.PaddedSize(128),
+                Field{"mat4x2<f32>", /* align */ 8, /* size */ 32}.PaddedSize(128),
+                Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}
+                    .PaddedSize(128)
+                    .Strided<12, 4>(),
+                Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}
+                    .PaddedSize(128)
+                    .Strided<12, 4>(),
+                Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}
+                    .PaddedSize(128)
+                    .Strided<12, 4>(),
+                Field{"mat2x4<f32>", /* align */ 16, /* size */ 32}.PaddedSize(128),
+                Field{"mat3x4<f32>", /* align */ 16, /* size */ 48}.PaddedSize(128),
+                Field{"mat4x4<f32>", /* align */ 16, /* size */ 64}.PaddedSize(128),
+
+                // Array types with no custom alignment or size.
+                // Note: The use of StorageBufferOnly() is due to UBOs requiring 16 byte alignment
+                // of array elements. See https://www.w3.org/TR/WGSL/#storage-class-constraints
+                Field{"array<u32, 1>", /* align */ 4, /* size */ 4}.StorageBufferOnly(),
+                Field{"array<u32, 2>", /* align */ 4, /* size */ 8}.StorageBufferOnly(),
+                Field{"array<u32, 3>", /* align */ 4, /* size */ 12}.StorageBufferOnly(),
+                Field{"array<u32, 4>", /* align */ 4, /* size */ 16}.StorageBufferOnly(),
+                Field{"array<vec4<u32>, 1>", /* align */ 16, /* size */ 16},
+                Field{"array<vec4<u32>, 2>", /* align */ 16, /* size */ 32},
+                Field{"array<vec4<u32>, 3>", /* align */ 16, /* size */ 48},
+                Field{"array<vec4<u32>, 4>", /* align */ 16, /* size */ 64},
+                Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
+
+                // Array types with custom alignment
+                Field{"array<u32, 1>", /* align */ 32, /* size */ 4}.StorageBufferOnly(),
+                Field{"array<u32, 2>", /* align */ 32, /* size */ 8}.StorageBufferOnly(),
+                Field{"array<u32, 3>", /* align */ 32, /* size */ 12}.StorageBufferOnly(),
+                Field{"array<u32, 4>", /* align */ 32, /* size */ 16}.StorageBufferOnly(),
+                Field{"array<vec4<u32>, 1>", /* align */ 32, /* size */ 16},
+                Field{"array<vec4<u32>, 2>", /* align */ 32, /* size */ 32},
+                Field{"array<vec4<u32>, 3>", /* align */ 32, /* size */ 48},
+                Field{"array<vec4<u32>, 4>", /* align */ 32, /* size */ 64},
+                Field{"array<vec3<u32>, 4>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
+
+                // Array types with custom size
+                Field{"array<u32, 1>", /* align */ 4, /* size */ 4}
+                    .PaddedSize(128)
+                    .StorageBufferOnly(),
+                Field{"array<u32, 2>", /* align */ 4, /* size */ 8}
+                    .PaddedSize(128)
+                    .StorageBufferOnly(),
+                Field{"array<u32, 3>", /* align */ 4, /* size */ 12}
+                    .PaddedSize(128)
+                    .StorageBufferOnly(),
+                Field{"array<u32, 4>", /* align */ 4, /* size */ 16}
+                    .PaddedSize(128)
+                    .StorageBufferOnly(),
+                Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}
+                    .PaddedSize(128)
+                    .Strided<12, 4>(),
+            });
+
+        std::vector<ComputeLayoutMemoryBufferTestParams> filtered;
+        for (auto param : params) {
+            if (param.mStorageClass != StorageClass::Storage && param.mField.storage_buffer_only) {
+                continue;
+            }
+            filtered.emplace_back(param);
+        }
+        return filtered;
+    }
+
+    INSTANTIATE_TEST_SUITE_P(
+        ,
+        ComputeLayoutMemoryBufferTests,
+        ::testing::ValuesIn(GenerateParams()),
+        DawnTestBase::PrintToStringParamName("ComputeLayoutMemoryBufferTests"));
+    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(ComputeLayoutMemoryBufferTests);
+
+}  // namespace
diff --git a/src/dawn/tests/end2end/ComputeSharedMemoryTests.cpp b/src/dawn/tests/end2end/ComputeSharedMemoryTests.cpp
new file mode 100644
index 0000000..5440544
--- /dev/null
+++ b/src/dawn/tests/end2end/ComputeSharedMemoryTests.cpp
@@ -0,0 +1,205 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <array>
+
+class ComputeSharedMemoryTests : public DawnTest {
+  public:
+    static constexpr uint32_t kInstances = 11;
+
+    void BasicTest(const char* shader);
+};
+
+void ComputeSharedMemoryTests::BasicTest(const char* shader) {
+    // Set up shader and pipeline
+    auto module = utils::CreateShaderModule(device, shader);
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = module;
+    csDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+    // Set up dst storage buffer
+    wgpu::BufferDescriptor dstDesc;
+    dstDesc.size = sizeof(uint32_t);
+    dstDesc.usage =
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dst = device.CreateBuffer(&dstDesc);
+
+    const uint32_t zero = 0;
+    queue.WriteBuffer(dst, 0, &zero, sizeof(zero));
+
+    // Set up bind group and issue dispatch
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {
+                                                         {0, dst, 0, sizeof(uint32_t)},
+                                                     });
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    const uint32_t expected = kInstances;
+    EXPECT_BUFFER_U32_RANGE_EQ(&expected, dst, 0, 1);
+}
+
+// Basic shared memory test
+TEST_P(ComputeSharedMemoryTests, Basic) {
+    BasicTest(R"(
+        let kTileSize : u32 = 4u;
+        let kInstances : u32 = 11u;
+
+        struct Dst {
+            x : u32
+        }
+
+        @group(0) @binding(0) var<storage, write> dst : Dst;
+        var<workgroup> tmp : u32;
+
+        @stage(compute) @workgroup_size(4,4,1)
+        fn main(@builtin(local_invocation_id) LocalInvocationID : vec3<u32>) {
+            let index : u32 = LocalInvocationID.y * kTileSize + LocalInvocationID.x;
+            if (index == 0u) {
+                tmp = 0u;
+            }
+            workgroupBarrier();
+            for (var i : u32 = 0u; i < kInstances; i = i + 1u) {
+                if (i == index) {
+                    tmp = tmp + 1u;
+                }
+                workgroupBarrier();
+            }
+            if (index == 0u) {
+                dst.x = tmp;
+            }
+        })");
+}
+
+// Test using assorted types in workgroup memory. MSL lacks constructors
+// for matrices in threadgroup memory. Basic test that reading and
+// writing a matrix in workgroup memory works.
+TEST_P(ComputeSharedMemoryTests, AssortedTypes) {
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct StructValues {
+            m: mat2x2<f32>
+        }
+
+        struct Dst {
+            d_struct : StructValues,
+            d_matrix : mat2x2<f32>,
+            d_array : array<u32, 4>,
+            d_vector : vec4<f32>,
+        }
+
+        @group(0) @binding(0) var<storage, write> dst : Dst;
+
+        var<workgroup> wg_struct : StructValues;
+        var<workgroup> wg_matrix : mat2x2<f32>;
+        var<workgroup> wg_array : array<u32, 4>;
+        var<workgroup> wg_vector : vec4<f32>;
+
+        @stage(compute) @workgroup_size(4,1,1)
+        fn main(@builtin(local_invocation_id) LocalInvocationID : vec3<u32>) {
+
+            let i = 4u * LocalInvocationID.x;
+            if (LocalInvocationID.x == 0u) {
+                wg_struct.m = mat2x2<f32>(
+                    vec2<f32>(f32(i), f32(i + 1u)),
+                    vec2<f32>(f32(i + 2u), f32(i + 3u)));
+            } else if (LocalInvocationID.x == 1u) {
+                wg_matrix = mat2x2<f32>(
+                    vec2<f32>(f32(i), f32(i + 1u)),
+                    vec2<f32>(f32(i + 2u), f32(i + 3u)));
+            } else if (LocalInvocationID.x == 2u) {
+                wg_array[0u] = i;
+                wg_array[1u] = i + 1u;
+                wg_array[2u] = i + 2u;
+                wg_array[3u] = i + 3u;
+            } else if (LocalInvocationID.x == 3u) {
+                wg_vector = vec4<f32>(
+                    f32(i), f32(i + 1u), f32(i + 2u), f32(i + 3u));
+            }
+
+            workgroupBarrier();
+
+            if (LocalInvocationID.x == 0u) {
+                dst.d_struct = wg_struct;
+                dst.d_matrix = wg_matrix;
+                dst.d_array = wg_array;
+                dst.d_vector = wg_vector;
+            }
+        }
+    )");
+    csDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+    // Set up dst storage buffer
+    wgpu::BufferDescriptor dstDesc;
+    dstDesc.size = 64;
+    dstDesc.usage =
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dst = device.CreateBuffer(&dstDesc);
+
+    // Set up bind group and issue dispatch
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {
+                                                         {0, dst},
+                                                     });
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    std::array<float, 4> expectedStruct = {0., 1., 2., 3.};
+    std::array<float, 4> expectedMatrix = {4., 5., 6., 7.};
+    std::array<uint32_t, 4> expectedArray = {8, 9, 10, 11};
+    std::array<float, 4> expectedVector = {12., 13., 14., 15.};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedStruct.data(), dst, 0, 4);
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedMatrix.data(), dst, 16, 4);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedArray.data(), dst, 32, 4);
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedVector.data(), dst, 48, 4);
+}
+
+DAWN_INSTANTIATE_TEST(ComputeSharedMemoryTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend(),
+                      VulkanBackend({}, {"use_vulkan_zero_initialize_workgroup_memory_extension"}));
diff --git a/src/dawn/tests/end2end/ComputeStorageBufferBarrierTests.cpp b/src/dawn/tests/end2end/ComputeStorageBufferBarrierTests.cpp
new file mode 100644
index 0000000..17ed6fe
--- /dev/null
+++ b/src/dawn/tests/end2end/ComputeStorageBufferBarrierTests.cpp
@@ -0,0 +1,415 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+class ComputeStorageBufferBarrierTests : public DawnTest {
+  protected:
+    static constexpr uint32_t kNumValues = 100;
+    static constexpr uint32_t kIterations = 100;
+};
+
+// Test that multiple dispatches to increment values in a storage buffer are synchronized.
+TEST_P(ComputeStorageBufferBarrierTests, AddIncrement) {
+    std::vector<uint32_t> data(kNumValues, 0);
+    std::vector<uint32_t> expected(kNumValues, 0x1234 * kIterations);
+
+    uint64_t bufferSize = static_cast<uint64_t>(data.size() * sizeof(uint32_t));
+    wgpu::Buffer buffer = utils::CreateBufferFromData(
+        device, data.data(), bufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            data : array<u32, 100>
+        }
+
+        @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            buf.data[GlobalInvocationID.x] = buf.data[GlobalInvocationID.x] + 0x1234u;
+        }
+    )");
+
+    wgpu::ComputePipelineDescriptor pipelineDesc = {};
+    pipelineDesc.compute.module = module;
+    pipelineDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer, 0, bufferSize}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    for (uint32_t i = 0; i < kIterations; ++i) {
+        pass.Dispatch(kNumValues);
+    }
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), buffer, 0, kNumValues);
+}
+
+// Test that multiple dispatches to increment values by ping-ponging between two storage buffers
+// are synchronized.
+TEST_P(ComputeStorageBufferBarrierTests, AddPingPong) {
+    std::vector<uint32_t> data(kNumValues, 0);
+    std::vector<uint32_t> expectedA(kNumValues, 0x1234 * kIterations);
+    std::vector<uint32_t> expectedB(kNumValues, 0x1234 * (kIterations - 1));
+
+    uint64_t bufferSize = static_cast<uint64_t>(data.size() * sizeof(uint32_t));
+
+    wgpu::Buffer bufferA = utils::CreateBufferFromData(
+        device, data.data(), bufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+
+    wgpu::Buffer bufferB = utils::CreateBufferFromData(
+        device, data.data(), bufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            data : array<u32, 100>
+        }
+
+        @group(0) @binding(0) var<storage, read_write> src : Buf;
+        @group(0) @binding(1) var<storage, read_write> dst : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            dst.data[GlobalInvocationID.x] = src.data[GlobalInvocationID.x] + 0x1234u;
+        }
+    )");
+
+    wgpu::ComputePipelineDescriptor pipelineDesc = {};
+    pipelineDesc.compute.module = module;
+    pipelineDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    wgpu::BindGroup bindGroupA = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferA, 0, bufferSize},
+                                                          {1, bufferB, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroupB = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferB, 0, bufferSize},
+                                                          {1, bufferA, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroups[2] = {bindGroupA, bindGroupB};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+
+    for (uint32_t i = 0; i < kIterations / 2; ++i) {
+        pass.SetBindGroup(0, bindGroups[0]);
+        pass.Dispatch(kNumValues);
+        pass.SetBindGroup(0, bindGroups[1]);
+        pass.Dispatch(kNumValues);
+    }
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedA.data(), bufferA, 0, kNumValues);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedB.data(), bufferB, 0, kNumValues);
+}
+
+// Test that multiple dispatches to increment values by ping-ponging between storage buffers and
+// read-only storage buffers are synchronized in one compute pass.
+TEST_P(ComputeStorageBufferBarrierTests, StorageAndReadonlyStoragePingPongInOnePass) {
+    std::vector<uint32_t> data(kNumValues, 0);
+    std::vector<uint32_t> expectedA(kNumValues, 0x1234 * kIterations);
+    std::vector<uint32_t> expectedB(kNumValues, 0x1234 * (kIterations - 1));
+
+    uint64_t bufferSize = static_cast<uint64_t>(data.size() * sizeof(uint32_t));
+
+    wgpu::Buffer bufferA = utils::CreateBufferFromData(
+        device, data.data(), bufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+
+    wgpu::Buffer bufferB = utils::CreateBufferFromData(
+        device, data.data(), bufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            data : array<u32, 100>
+        }
+
+        @group(0) @binding(0) var<storage, read> src : Buf;
+        @group(0) @binding(1) var<storage, read_write> dst : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            dst.data[GlobalInvocationID.x] = src.data[GlobalInvocationID.x] + 0x1234u;
+        }
+    )");
+
+    wgpu::ComputePipelineDescriptor pipelineDesc = {};
+    pipelineDesc.compute.module = module;
+    pipelineDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    wgpu::BindGroup bindGroupA = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferA, 0, bufferSize},
+                                                          {1, bufferB, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroupB = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferB, 0, bufferSize},
+                                                          {1, bufferA, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroups[2] = {bindGroupA, bindGroupB};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+
+    for (uint32_t i = 0; i < kIterations / 2; ++i) {
+        pass.SetBindGroup(0, bindGroups[0]);
+        pass.Dispatch(kNumValues);
+        pass.SetBindGroup(0, bindGroups[1]);
+        pass.Dispatch(kNumValues);
+    }
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedA.data(), bufferA, 0, kNumValues);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedB.data(), bufferB, 0, kNumValues);
+}
+
+// Test that Storage to Uniform buffer transitions work and synchronize correctly
+// by ping-ponging between Storage/Uniform usage in sequential compute passes.
+TEST_P(ComputeStorageBufferBarrierTests, UniformToStorageAddPingPong) {
+    std::vector<uint32_t> data(kNumValues, 0);
+    std::vector<uint32_t> expectedA(kNumValues, 0x1234 * kIterations);
+    std::vector<uint32_t> expectedB(kNumValues, 0x1234 * (kIterations - 1));
+
+    uint64_t bufferSize = static_cast<uint64_t>(data.size() * sizeof(uint32_t));
+
+    wgpu::Buffer bufferA = utils::CreateBufferFromData(
+        device, data.data(), bufferSize,
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopySrc);
+
+    wgpu::Buffer bufferB = utils::CreateBufferFromData(
+        device, data.data(), bufferSize,
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopySrc);
+
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            data : array<vec4<u32>, 25>
+        }
+
+        @group(0) @binding(0) var<uniform> src : Buf;
+        @group(0) @binding(1) var<storage, read_write> dst : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            dst.data[GlobalInvocationID.x] = src.data[GlobalInvocationID.x] +
+                vec4<u32>(0x1234u, 0x1234u, 0x1234u, 0x1234u);
+        }
+    )");
+
+    wgpu::ComputePipelineDescriptor pipelineDesc = {};
+    pipelineDesc.compute.module = module;
+    pipelineDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    wgpu::BindGroup bindGroupA = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferA, 0, bufferSize},
+                                                          {1, bufferB, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroupB = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferB, 0, bufferSize},
+                                                          {1, bufferA, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroups[2] = {bindGroupA, bindGroupB};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    for (uint32_t i = 0, b = 0; i < kIterations; ++i, b = 1 - b) {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroups[b]);
+        pass.Dispatch(kNumValues / 4);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedA.data(), bufferA, 0, kNumValues);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedB.data(), bufferB, 0, kNumValues);
+}
+
+// Test that Storage to Uniform buffer transitions work and synchronize correctly
+// by ping-ponging between Storage/Uniform usage in one compute pass.
+TEST_P(ComputeStorageBufferBarrierTests, UniformToStorageAddPingPongInOnePass) {
+    std::vector<uint32_t> data(kNumValues, 0);
+    std::vector<uint32_t> expectedA(kNumValues, 0x1234 * kIterations);
+    std::vector<uint32_t> expectedB(kNumValues, 0x1234 * (kIterations - 1));
+
+    uint64_t bufferSize = static_cast<uint64_t>(data.size() * sizeof(uint32_t));
+
+    wgpu::Buffer bufferA = utils::CreateBufferFromData(
+        device, data.data(), bufferSize,
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopySrc);
+
+    wgpu::Buffer bufferB = utils::CreateBufferFromData(
+        device, data.data(), bufferSize,
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopySrc);
+
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            data : array<vec4<u32>, 25>
+        }
+
+        @group(0) @binding(0) var<uniform> src : Buf;
+        @group(0) @binding(1) var<storage, read_write> dst : Buf;
+
+        @stage(compute) @workgroup_size(1)
+        fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+            dst.data[GlobalInvocationID.x] = src.data[GlobalInvocationID.x] +
+                vec4<u32>(0x1234u, 0x1234u, 0x1234u, 0x1234u);
+        }
+    )");
+
+    wgpu::ComputePipelineDescriptor pipelineDesc = {};
+    pipelineDesc.compute.module = module;
+    pipelineDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    wgpu::BindGroup bindGroupA = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferA, 0, bufferSize},
+                                                          {1, bufferB, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroupB = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, bufferB, 0, bufferSize},
+                                                          {1, bufferA, 0, bufferSize},
+                                                      });
+
+    wgpu::BindGroup bindGroups[2] = {bindGroupA, bindGroupB};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    for (uint32_t i = 0, b = 0; i < kIterations; ++i, b = 1 - b) {
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroups[b]);
+        pass.Dispatch(kNumValues / 4);
+    }
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedA.data(), bufferA, 0, kNumValues);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedB.data(), bufferB, 0, kNumValues);
+}
+
+// Test that barriers for dispatches correctly combine Indirect | Storage in backends with explicit
+// barriers. Do this by:
+//  1 - Initializing an indirect buffer with zeros.
+//  2 - Write ones into it with a compute shader.
+//  3 - Use the indirect buffer in a Dispatch while also reading its data.
+TEST_P(ComputeStorageBufferBarrierTests, IndirectBufferCorrectBarrier) {
+    wgpu::ComputePipelineDescriptor step2PipelineDesc;
+    step2PipelineDesc.compute.entryPoint = "main";
+    step2PipelineDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            data : array<u32, 3>
+        }
+        @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            buf.data = array<u32, 3>(1u, 1u, 1u);
+        }
+    )");
+    wgpu::ComputePipeline step2Pipeline = device.CreateComputePipeline(&step2PipelineDesc);
+
+    wgpu::ComputePipelineDescriptor step3PipelineDesc;
+    step3PipelineDesc.compute.entryPoint = "main";
+    step3PipelineDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            data : array<u32, 3>
+        }
+        @group(0) @binding(0) var<storage, read> buf : Buf;
+
+        struct Result {
+            data : u32
+        }
+        @group(0) @binding(1) var<storage, read_write> result : Result;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            result.data = 2u;
+            if (buf.data[0] == 1u && buf.data[1] == 1u && buf.data[2] == 1u) {
+                result.data = 1u;
+            }
+        }
+    )");
+    wgpu::ComputePipeline step3Pipeline = device.CreateComputePipeline(&step3PipelineDesc);
+
+    //  1 - Initializing an indirect buffer with zeros.
+    wgpu::Buffer buf = utils::CreateBufferFromData<uint32_t>(
+        device, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Indirect, {0u, 0u, 0u});
+
+    //  2 - Write ones into it with a compute shader.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+    wgpu::BindGroup step2Group =
+        utils::MakeBindGroup(device, step2Pipeline.GetBindGroupLayout(0), {{0, buf}});
+
+    pass.SetPipeline(step2Pipeline);
+    pass.SetBindGroup(0, step2Group);
+    pass.Dispatch(1);
+
+    //  3 - Use the indirect buffer in a Dispatch while also reading its data.
+    wgpu::Buffer resultBuffer = utils::CreateBufferFromData<uint32_t>(
+        device, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc, {0u});
+    wgpu::BindGroup step3Group = utils::MakeBindGroup(device, step3Pipeline.GetBindGroupLayout(0),
+                                                      {{0, buf}, {1, resultBuffer}});
+
+    pass.SetPipeline(step3Pipeline);
+    pass.SetBindGroup(0, step3Group);
+    pass.DispatchIndirect(buf, 0);
+
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_EQ(1u, resultBuffer, 0);
+}
+
+DAWN_INSTANTIATE_TEST(ComputeStorageBufferBarrierTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/CopyTests.cpp b/src/dawn/tests/end2end/CopyTests.cpp
new file mode 100644
index 0000000..d6920dd
--- /dev/null
+++ b/src/dawn/tests/end2end/CopyTests.cpp
@@ -0,0 +1,2540 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include <array>
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+// For MinimumBufferSpec bytesPerRow and rowsPerImage, compute a default from the copy extent.
+constexpr uint32_t kStrideComputeDefault = 0xFFFF'FFFEul;
+
+constexpr wgpu::TextureFormat kDefaultFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+class CopyTests {
+  protected:
+    struct TextureSpec {
+        wgpu::TextureFormat format = kDefaultFormat;
+        wgpu::Origin3D copyOrigin = {0, 0, 0};
+        wgpu::Extent3D textureSize;
+        uint32_t copyLevel = 0;
+        uint32_t levelCount = 1;
+    };
+
+    struct BufferSpec {
+        uint64_t size;
+        uint64_t offset;
+        uint32_t bytesPerRow;
+        uint32_t rowsPerImage;
+    };
+
+    static std::vector<uint8_t> GetExpectedTextureData(const utils::TextureDataCopyLayout& layout) {
+        uint32_t bytesPerTexelBlock = layout.bytesPerRow / layout.texelBlocksPerRow;
+        std::vector<uint8_t> textureData(layout.byteLength);
+        for (uint32_t layer = 0; layer < layout.mipSize.depthOrArrayLayers; ++layer) {
+            const uint32_t byteOffsetPerSlice = layout.bytesPerImage * layer;
+            for (uint32_t y = 0; y < layout.mipSize.height; ++y) {
+                for (uint32_t x = 0; x < layout.mipSize.width * bytesPerTexelBlock; ++x) {
+                    uint32_t i = x + y * layout.bytesPerRow;
+                    textureData[byteOffsetPerSlice + i] =
+                        static_cast<uint8_t>((x + 1 + (layer + 1) * y) % 256);
+                }
+            }
+        }
+        return textureData;
+    }
+
+    // TODO(crbug.com/dawn/818): remove this function when all the tests in this file support
+    // testing arbitrary formats.
+    static std::vector<RGBA8> GetExpectedTextureDataRGBA8(
+        const utils::TextureDataCopyLayout& layout) {
+        std::vector<RGBA8> textureData(layout.texelBlockCount);
+        for (uint32_t layer = 0; layer < layout.mipSize.depthOrArrayLayers; ++layer) {
+            const uint32_t texelIndexOffsetPerSlice = layout.texelBlocksPerImage * layer;
+            for (uint32_t y = 0; y < layout.mipSize.height; ++y) {
+                for (uint32_t x = 0; x < layout.mipSize.width; ++x) {
+                    uint32_t i = x + y * layout.texelBlocksPerRow;
+                    textureData[texelIndexOffsetPerSlice + i] =
+                        RGBA8(static_cast<uint8_t>((x + layer * x) % 256),
+                              static_cast<uint8_t>((y + layer * y) % 256),
+                              static_cast<uint8_t>(x / 256), static_cast<uint8_t>(y / 256));
+                }
+            }
+        }
+
+        return textureData;
+    }
+
+    static BufferSpec MinimumBufferSpec(uint32_t width,
+                                        uint32_t height,
+                                        uint32_t depth = 1,
+                                        wgpu::TextureFormat format = kDefaultFormat) {
+        return MinimumBufferSpec({width, height, depth}, kStrideComputeDefault,
+                                 depth == 1 ? wgpu::kCopyStrideUndefined : kStrideComputeDefault,
+                                 format);
+    }
+
+    static BufferSpec MinimumBufferSpec(wgpu::Extent3D copyExtent,
+                                        uint32_t overrideBytesPerRow = kStrideComputeDefault,
+                                        uint32_t overrideRowsPerImage = kStrideComputeDefault,
+                                        wgpu::TextureFormat format = kDefaultFormat) {
+        uint32_t bytesPerRow = utils::GetMinimumBytesPerRow(format, copyExtent.width);
+        if (overrideBytesPerRow != kStrideComputeDefault) {
+            bytesPerRow = overrideBytesPerRow;
+        }
+        uint32_t rowsPerImage = copyExtent.height;
+        if (overrideRowsPerImage != kStrideComputeDefault) {
+            rowsPerImage = overrideRowsPerImage;
+        }
+
+        uint32_t totalDataSize =
+            utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, format);
+        return {totalDataSize, 0, bytesPerRow, rowsPerImage};
+    }
+    static void CopyTextureData(uint32_t bytesPerTexelBlock,
+                                const void* srcData,
+                                uint32_t widthInBlocks,
+                                uint32_t heightInBlocks,
+                                uint32_t depthInBlocks,
+                                uint32_t srcBytesPerRow,
+                                uint32_t srcRowsPerImage,
+                                void* dstData,
+                                uint32_t dstBytesPerRow,
+                                uint32_t dstRowsPerImage) {
+        for (unsigned int z = 0; z < depthInBlocks; ++z) {
+            uint32_t srcDepthOffset = z * srcBytesPerRow * srcRowsPerImage;
+            uint32_t dstDepthOffset = z * dstBytesPerRow * dstRowsPerImage;
+            for (unsigned int y = 0; y < heightInBlocks; ++y) {
+                memcpy(static_cast<uint8_t*>(dstData) + dstDepthOffset + y * dstBytesPerRow,
+                       static_cast<const uint8_t*>(srcData) + srcDepthOffset + y * srcBytesPerRow,
+                       widthInBlocks * bytesPerTexelBlock);
+            }
+        }
+    }
+};
+
+class CopyTests_T2B : public CopyTests, public DawnTest {
+  protected:
+    void DoTest(const TextureSpec& textureSpec,
+                const BufferSpec& bufferSpec,
+                const wgpu::Extent3D& copySize,
+                wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D) {
+        // TODO(crbug.com/dawn/818): support testing arbitrary formats
+        ASSERT_EQ(kDefaultFormat, textureSpec.format);
+
+        const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(textureSpec.format);
+        // Create a texture that is `width` x `height` with (`level` + 1) mip levels.
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = dimension;
+        descriptor.size = textureSpec.textureSize;
+        descriptor.sampleCount = 1;
+        descriptor.format = textureSpec.format;
+        descriptor.mipLevelCount = textureSpec.levelCount;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        // Layout for initial data upload to texture.
+        // Some parts of this result are also reused later.
+        const utils::TextureDataCopyLayout copyLayout =
+            utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                textureSpec.format, textureSpec.textureSize, textureSpec.copyLevel, dimension);
+
+        // Initialize the source texture
+        std::vector<RGBA8> textureArrayData = GetExpectedTextureDataRGBA8(copyLayout);
+        {
+            wgpu::ImageCopyTexture imageCopyTexture =
+                utils::CreateImageCopyTexture(texture, textureSpec.copyLevel, {0, 0, 0});
+            wgpu::TextureDataLayout textureDataLayout =
+                utils::CreateTextureDataLayout(0, copyLayout.bytesPerRow, copyLayout.rowsPerImage);
+            queue.WriteTexture(&imageCopyTexture, textureArrayData.data(), copyLayout.byteLength,
+                               &textureDataLayout, &copyLayout.mipSize);
+        }
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        // Create a buffer of `size` and populate it with empty data (0,0,0,0) Note:
+        // Prepopulating the buffer with empty data ensures that there is not random data in the
+        // expectation and helps ensure that the padding due to the bytes per row is not modified
+        // by the copy.
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = bufferSpec.size;
+        bufferDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+        {
+            wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(
+                texture, textureSpec.copyLevel, textureSpec.copyOrigin);
+            wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+                buffer, bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
+            encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        uint64_t bufferOffset = bufferSpec.offset;
+
+        uint32_t copyLayer = copySize.depthOrArrayLayers;
+        uint32_t copyDepth = 1;
+        if (dimension == wgpu::TextureDimension::e3D) {
+            copyLayer = 1;
+            copyDepth = copySize.depthOrArrayLayers;
+        }
+
+        const wgpu::Extent3D copySizePerLayer = {copySize.width, copySize.height, copyDepth};
+        // Texels in single layer.
+        const uint32_t texelCountInCopyRegion = utils::GetTexelCountInCopyRegion(
+            bufferSpec.bytesPerRow, bufferSpec.rowsPerImage, copySizePerLayer, textureSpec.format);
+        const uint32_t maxArrayLayer = textureSpec.copyOrigin.z + copyLayer;
+        std::vector<RGBA8> expected(texelCountInCopyRegion);
+        for (uint32_t layer = textureSpec.copyOrigin.z; layer < maxArrayLayer; ++layer) {
+            // Copy the data used to create the upload buffer in the specified copy region to have
+            // the same format as the expected buffer data.
+            std::fill(expected.begin(), expected.end(), RGBA8());
+            const uint32_t texelIndexOffset = copyLayout.texelBlocksPerImage * layer;
+            const uint32_t expectedTexelArrayDataStartIndex =
+                texelIndexOffset + (textureSpec.copyOrigin.x +
+                                    textureSpec.copyOrigin.y * copyLayout.texelBlocksPerRow);
+
+            CopyTextureData(bytesPerTexel,
+                            textureArrayData.data() + expectedTexelArrayDataStartIndex,
+                            copySize.width, copySize.height, copyDepth, copyLayout.bytesPerRow,
+                            copyLayout.rowsPerImage, expected.data(), bufferSpec.bytesPerRow,
+                            bufferSpec.rowsPerImage);
+
+            EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<const uint32_t*>(expected.data()), buffer,
+                                       bufferOffset, static_cast<uint32_t>(expected.size()))
+                << "Texture to Buffer copy failed copying region [(" << textureSpec.copyOrigin.x
+                << ", " << textureSpec.copyOrigin.y << ", " << textureSpec.copyOrigin.z << "), ("
+                << textureSpec.copyOrigin.x + copySize.width << ", "
+                << textureSpec.copyOrigin.y + copySize.height << ", "
+                << textureSpec.copyOrigin.z + copySize.depthOrArrayLayers << ")) from "
+                << textureSpec.textureSize.width << " x " << textureSpec.textureSize.height
+                << " texture at mip level " << textureSpec.copyLevel << " layer " << layer << " to "
+                << bufferSpec.size << "-byte buffer with offset " << bufferOffset
+                << " and bytes per row " << bufferSpec.bytesPerRow << std::endl;
+
+            bufferOffset += bufferSpec.bytesPerRow * bufferSpec.rowsPerImage;
+        }
+    }
+};
+
+class CopyTests_B2T : public CopyTests, public DawnTest {
+  protected:
+    static void FillBufferData(RGBA8* data, size_t count) {
+        for (size_t i = 0; i < count; ++i) {
+            data[i] = RGBA8(static_cast<uint8_t>(i % 256), static_cast<uint8_t>((i / 256) % 256),
+                            static_cast<uint8_t>((i / 256 / 256) % 256), 255);
+        }
+    }
+
+    void DoTest(const TextureSpec& textureSpec,
+                const BufferSpec& bufferSpec,
+                const wgpu::Extent3D& copySize,
+                wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D) {
+        // TODO(crbug.com/dawn/818): support testing arbitrary formats
+        ASSERT_EQ(kDefaultFormat, textureSpec.format);
+        // Create a buffer of size `size` and populate it with data
+        const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(textureSpec.format);
+        std::vector<RGBA8> bufferData(bufferSpec.size / bytesPerTexel);
+        FillBufferData(bufferData.data(), bufferData.size());
+        wgpu::Buffer buffer =
+            utils::CreateBufferFromData(device, bufferData.data(), bufferSpec.size,
+                                        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+        // Create a texture that is `width` x `height` with (`level` + 1) mip levels.
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = dimension;
+        descriptor.size = textureSpec.textureSize;
+        descriptor.sampleCount = 1;
+        descriptor.format = textureSpec.format;
+        descriptor.mipLevelCount = textureSpec.levelCount;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+            buffer, bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, textureSpec.copyLevel, textureSpec.copyOrigin);
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        const utils::TextureDataCopyLayout copyLayout =
+            utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                textureSpec.format, textureSpec.textureSize, textureSpec.copyLevel, dimension,
+                bufferSpec.rowsPerImage);
+
+        uint32_t copyLayer = copySize.depthOrArrayLayers;
+        uint32_t copyDepth = 1;
+        if (dimension == wgpu::TextureDimension::e3D) {
+            copyLayer = 1;
+            copyDepth = copySize.depthOrArrayLayers;
+        }
+
+        uint64_t bufferOffset = bufferSpec.offset;
+        const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureSpec.format);
+        const uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureSpec.format);
+        const uint32_t texelCountPerLayer = copyDepth * (copyLayout.mipSize.width / blockWidth) *
+                                            (copyLayout.mipSize.height / blockHeight) *
+                                            bytesPerTexel;
+        for (uint32_t layer = 0; layer < copyLayer; ++layer) {
+            // Copy and pack the data used to create the buffer in the specified copy region to have
+            // the same format as the expected texture data.
+            std::vector<RGBA8> expected(texelCountPerLayer);
+            CopyTextureData(bytesPerTexel, bufferData.data() + bufferOffset / bytesPerTexel,
+                            copySize.width, copySize.height, copyDepth, bufferSpec.bytesPerRow,
+                            bufferSpec.rowsPerImage, expected.data(),
+                            copySize.width * bytesPerTexel, copySize.height);
+
+            EXPECT_TEXTURE_EQ(expected.data(), texture,
+                              {textureSpec.copyOrigin.x, textureSpec.copyOrigin.y,
+                               textureSpec.copyOrigin.z + layer},
+                              {copySize.width, copySize.height, copyDepth}, textureSpec.copyLevel)
+                << "Buffer to Texture copy failed copying " << bufferSpec.size
+                << "-byte buffer with offset " << bufferSpec.offset << " and bytes per row "
+                << bufferSpec.bytesPerRow << " to [(" << textureSpec.copyOrigin.x << ", "
+                << textureSpec.copyOrigin.y << "), (" << textureSpec.copyOrigin.x + copySize.width
+                << ", " << textureSpec.copyOrigin.y + copySize.height << ")) region of "
+                << textureSpec.textureSize.width << " x " << textureSpec.textureSize.height
+                << " texture at mip level " << textureSpec.copyLevel << " layer " << layer
+                << std::endl;
+            bufferOffset += copyLayout.bytesPerImage;
+        }
+    }
+};
+
+namespace {
+    // The CopyTests Texture to Texture in this class will validate both CopyTextureToTexture and
+    // CopyTextureToTextureInternal.
+    using UsageCopySrc = bool;
+    DAWN_TEST_PARAM_STRUCT(CopyTestsParams, UsageCopySrc);
+
+    using SrcColorFormat = wgpu::TextureFormat;
+    DAWN_TEST_PARAM_STRUCT(SrcColorFormatParams, SrcColorFormat);
+}  // namespace
+
+template <typename Parent>
+class CopyTests_T2TBase : public CopyTests, public Parent {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        return {wgpu::FeatureName::DawnInternalUsages};
+    }
+
+    void DoTest(const TextureSpec& srcSpec,
+                const TextureSpec& dstSpec,
+                const wgpu::Extent3D& copySize,
+                wgpu::TextureDimension srcDimension,
+                wgpu::TextureDimension dstDimension,
+                bool copyWithinSameTexture = false,
+                bool usageCopySrc = false) {
+        const wgpu::TextureFormat format = srcSpec.format;
+
+        wgpu::TextureDescriptor srcDescriptor;
+        srcDescriptor.dimension = srcDimension;
+        srcDescriptor.size = srcSpec.textureSize;
+        srcDescriptor.sampleCount = 1;
+        srcDescriptor.format = format;
+        srcDescriptor.mipLevelCount = srcSpec.levelCount;
+        srcDescriptor.usage = wgpu::TextureUsage::CopyDst;
+        // This test will have two versions, one where we check the normal CopyToCopy, and for that
+        // we will add the CopySrc usage, and the one where we test the CopyToCopyInternal, and then
+        // we have to add the CopySrc to the Internal usage.
+        wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+        srcDescriptor.nextInChain = &internalDesc;
+        if (usageCopySrc) {
+            srcDescriptor.usage |= wgpu::TextureUsage::CopySrc;
+        } else {
+            internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+        }
+        wgpu::Texture srcTexture = this->device.CreateTexture(&srcDescriptor);
+
+        wgpu::Texture dstTexture;
+        if (copyWithinSameTexture) {
+            dstTexture = srcTexture;
+        } else {
+            wgpu::TextureDescriptor dstDescriptor;
+            dstDescriptor.dimension = dstDimension;
+            dstDescriptor.size = dstSpec.textureSize;
+            dstDescriptor.sampleCount = 1;
+            dstDescriptor.format = dstSpec.format;
+            dstDescriptor.mipLevelCount = dstSpec.levelCount;
+            dstDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+            dstTexture = this->device.CreateTexture(&dstDescriptor);
+        }
+
+        // Create an upload buffer and use it to completely populate the subresources of the src
+        // texture that will be copied from at the given mip level.
+        const utils::TextureDataCopyLayout srcDataCopyLayout =
+            utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                format,
+                {srcSpec.textureSize.width, srcSpec.textureSize.height,
+                 srcDimension == wgpu::TextureDimension::e3D
+                     ? srcSpec.textureSize.depthOrArrayLayers
+                     : copySize.depthOrArrayLayers},
+                srcSpec.copyLevel, srcDimension);
+
+        // Initialize the source texture
+        const std::vector<uint8_t> srcTextureCopyData = GetExpectedTextureData(srcDataCopyLayout);
+        {
+            wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(
+                srcTexture, srcSpec.copyLevel, {0, 0, srcSpec.copyOrigin.z});
+            wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(
+                0, srcDataCopyLayout.bytesPerRow, srcDataCopyLayout.rowsPerImage);
+            this->queue.WriteTexture(&imageCopyTexture, srcTextureCopyData.data(),
+                                     srcDataCopyLayout.byteLength, &textureDataLayout,
+                                     &srcDataCopyLayout.mipSize);
+        }
+
+        wgpu::CommandEncoder encoder = this->device.CreateCommandEncoder();
+
+        // Perform the texture to texture copy
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcTexture, srcSpec.copyLevel, srcSpec.copyOrigin);
+        wgpu::ImageCopyTexture dstImageCopyTexture =
+            utils::CreateImageCopyTexture(dstTexture, dstSpec.copyLevel, dstSpec.copyOrigin);
+
+        if (usageCopySrc) {
+            encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &copySize);
+        } else {
+            encoder.CopyTextureToTextureInternal(&srcImageCopyTexture, &dstImageCopyTexture,
+                                                 &copySize);
+        }
+
+        // Create an output buffer and use it to completely populate the subresources of the dst
+        // texture that will be copied to at the given mip level.
+        const utils::TextureDataCopyLayout dstDataCopyLayout =
+            utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                format,
+                {dstSpec.textureSize.width, dstSpec.textureSize.height,
+                 dstDimension == wgpu::TextureDimension::e3D
+                     ? dstSpec.textureSize.depthOrArrayLayers
+                     : copySize.depthOrArrayLayers},
+                dstSpec.copyLevel, dstDimension);
+        wgpu::BufferDescriptor outputBufferDescriptor;
+        outputBufferDescriptor.size = dstDataCopyLayout.byteLength;
+        outputBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer outputBuffer = this->device.CreateBuffer(&outputBufferDescriptor);
+        const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+        const uint32_t expectedDstDataOffset = dstSpec.copyOrigin.x * bytesPerTexel +
+                                               dstSpec.copyOrigin.y * dstDataCopyLayout.bytesPerRow;
+        wgpu::ImageCopyBuffer outputImageCopyBuffer = utils::CreateImageCopyBuffer(
+            outputBuffer, expectedDstDataOffset, dstDataCopyLayout.bytesPerRow,
+            dstDataCopyLayout.rowsPerImage);
+        encoder.CopyTextureToBuffer(&dstImageCopyTexture, &outputImageCopyBuffer, &copySize);
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        this->queue.Submit(1, &commands);
+
+        // Validate if the data in outputBuffer is what we expected, including the untouched data
+        // outside of the copy.
+        {
+            // Validate the output buffer slice-by-slice regardless of whether the destination
+            // texture is 3D or 2D. The dimension here doesn't matter - we're only populating the
+            // CPU data to verify against.
+            uint32_t copyLayer = copySize.depthOrArrayLayers;
+            uint32_t copyDepth = 1;
+
+            const uint64_t validDataSizePerDstTextureLayer = utils::RequiredBytesInCopy(
+                dstDataCopyLayout.bytesPerRow, dstDataCopyLayout.mipSize.height,
+                dstDataCopyLayout.mipSize.width, dstDataCopyLayout.mipSize.height, copyDepth,
+                bytesPerTexel);
+
+            // expectedDstDataPerSlice stores one layer of the destination texture.
+            std::vector<uint8_t> expectedDstDataPerSlice(validDataSizePerDstTextureLayer);
+            for (uint32_t slice = 0; slice < copyLayer; ++slice) {
+                // For each source texture array slice involved in the copy, emulate the T2T copy
+                // on the CPU side by "copying" the copy data from the "source texture"
+                // (srcTextureCopyData) to the "destination texture" (expectedDstDataPerSlice).
+                std::fill(expectedDstDataPerSlice.begin(), expectedDstDataPerSlice.end(), 0);
+
+                const uint32_t srcBytesOffset = srcDataCopyLayout.bytesPerImage * slice;
+
+                // Get the offset of the srcTextureCopyData that contains the copy data on the
+                // slice-th texture array layer of the source texture.
+                const uint32_t srcTexelDataOffset =
+                    srcBytesOffset + (srcSpec.copyOrigin.x * bytesPerTexel +
+                                      srcSpec.copyOrigin.y * srcDataCopyLayout.bytesPerRow);
+                // Do the T2T "copy" on the CPU side to get the expected texel value at the
+                CopyTextureData(bytesPerTexel, &srcTextureCopyData[srcTexelDataOffset],
+                                copySize.width, copySize.height, copyDepth,
+                                srcDataCopyLayout.bytesPerRow, srcDataCopyLayout.rowsPerImage,
+                                &expectedDstDataPerSlice[expectedDstDataOffset],
+                                dstDataCopyLayout.bytesPerRow, dstDataCopyLayout.rowsPerImage);
+
+                // Compare the content of the destination texture at the (dstSpec.copyOrigin.z +
+                // slice)-th layer to its expected data after the copy (the outputBuffer contains
+                // the data of the destination texture since the dstSpec.copyOrigin.z-th layer).
+                uint64_t outputBufferExpectationBytesOffset =
+                    dstDataCopyLayout.bytesPerImage * slice;
+                EXPECT_BUFFER_U32_RANGE_EQ(
+                    reinterpret_cast<const uint32_t*>(expectedDstDataPerSlice.data()), outputBuffer,
+                    outputBufferExpectationBytesOffset,
+                    validDataSizePerDstTextureLayer / sizeof(uint32_t));
+            }
+        }
+    }
+};
+
+class CopyTests_T2T : public CopyTests_T2TBase<DawnTestWithParams<CopyTestsParams>> {
+  protected:
+    void DoTest(const TextureSpec& srcSpec,
+                const TextureSpec& dstSpec,
+                const wgpu::Extent3D& copySize,
+                bool copyWithinSameTexture = false,
+                wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D) {
+        DoTest(srcSpec, dstSpec, copySize, dimension, dimension, copyWithinSameTexture);
+    }
+
+    void DoTest(const TextureSpec& srcSpec,
+                const TextureSpec& dstSpec,
+                const wgpu::Extent3D& copySize,
+                wgpu::TextureDimension srcDimension,
+                wgpu::TextureDimension dstDimension,
+                bool copyWithinSameTexture = false) {
+        const bool usageCopySrc = GetParam().mUsageCopySrc;
+        // If we do this test with a CopyWithinSameTexture, it will need to have usageCopySrc in the
+        // public usage of the texture as it will later use a CopyTextureToBuffer, that needs the
+        // public usage of it.
+        DAWN_TEST_UNSUPPORTED_IF(!usageCopySrc && copyWithinSameTexture);
+
+        ASSERT_EQ(srcSpec.format, dstSpec.format);
+
+        CopyTests_T2TBase<DawnTestWithParams<CopyTestsParams>>::DoTest(
+            srcSpec, dstSpec, copySize, srcDimension, dstDimension, copyWithinSameTexture,
+            usageCopySrc);
+    }
+};
+
+class CopyTests_Formats : public CopyTests_T2TBase<DawnTestWithParams<SrcColorFormatParams>> {
+  protected:
+    // Texture format is compatible and could be copied to each other if the only diff is srgb-ness.
+    wgpu::TextureFormat GetCopyCompatibleFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::RGBA8Unorm:
+                return wgpu::TextureFormat::RGBA8UnormSrgb;
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+                return wgpu::TextureFormat::RGBA8Unorm;
+            case wgpu::TextureFormat::BGRA8Unorm:
+                return wgpu::TextureFormat::BGRA8UnormSrgb;
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+                return wgpu::TextureFormat::BGRA8Unorm;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    void DoTest(TextureSpec srcSpec,
+                TextureSpec dstSpec,
+                const wgpu::Extent3D& copySize,
+                wgpu::TextureDimension srcDimension = wgpu::TextureDimension::e2D,
+                wgpu::TextureDimension dstDimension = wgpu::TextureDimension::e2D) {
+        srcSpec.format = GetParam().mSrcColorFormat;
+        dstSpec.format = GetCopyCompatibleFormat(srcSpec.format);
+
+        CopyTests_T2TBase<DawnTestWithParams<SrcColorFormatParams>>::DoTest(
+            srcSpec, dstSpec, copySize, srcDimension, dstDimension);
+    }
+};
+
+class CopyTests_B2B : public DawnTest {
+  protected:
+    // This is the same signature as CopyBufferToBuffer except that the buffers are replaced by
+    // only their size.
+    void DoTest(uint64_t sourceSize,
+                uint64_t sourceOffset,
+                uint64_t destinationSize,
+                uint64_t destinationOffset,
+                uint64_t copySize) {
+        ASSERT(sourceSize % 4 == 0);
+        ASSERT(destinationSize % 4 == 0);
+
+        // Create our two test buffers, destination filled with zeros, source filled with non-zeroes
+        std::vector<uint32_t> zeroes(static_cast<size_t>(destinationSize / sizeof(uint32_t)));
+        wgpu::Buffer destination =
+            utils::CreateBufferFromData(device, zeroes.data(), zeroes.size() * sizeof(uint32_t),
+                                        wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc);
+
+        std::vector<uint32_t> sourceData(static_cast<size_t>(sourceSize / sizeof(uint32_t)));
+        for (size_t i = 0; i < sourceData.size(); i++) {
+            sourceData[i] = i + 1;
+        }
+        wgpu::Buffer source = utils::CreateBufferFromData(device, sourceData.data(),
+                                                          sourceData.size() * sizeof(uint32_t),
+                                                          wgpu::BufferUsage::CopySrc);
+
+        // Submit the copy
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, sourceOffset, destination, destinationOffset, copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Check destination is exactly the expected content.
+        EXPECT_BUFFER_U32_RANGE_EQ(zeroes.data(), destination, 0,
+                                   destinationOffset / sizeof(uint32_t));
+        EXPECT_BUFFER_U32_RANGE_EQ(sourceData.data() + sourceOffset / sizeof(uint32_t), destination,
+                                   destinationOffset, copySize / sizeof(uint32_t));
+        uint64_t copyEnd = destinationOffset + copySize;
+        EXPECT_BUFFER_U32_RANGE_EQ(zeroes.data(), destination, copyEnd,
+                                   (destinationSize - copyEnd) / sizeof(uint32_t));
+    }
+};
+
+class ClearBufferTests : public DawnTest {
+  protected:
+    // This is the same signature as ClearBuffer except that the buffers are replaced by
+    // only their size.
+    void DoTest(uint64_t bufferSize, uint64_t clearOffset, uint64_t clearSize) {
+        ASSERT(bufferSize % 4 == 0);
+        ASSERT(clearSize % 4 == 0);
+
+        // Create our test buffer, filled with non-zeroes
+        std::vector<uint32_t> bufferData(static_cast<size_t>(bufferSize / sizeof(uint32_t)));
+        for (size_t i = 0; i < bufferData.size(); i++) {
+            bufferData[i] = i + 1;
+        }
+        wgpu::Buffer buffer = utils::CreateBufferFromData(
+            device, bufferData.data(), bufferData.size() * sizeof(uint32_t),
+            wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc);
+
+        std::vector<uint8_t> fillData(static_cast<size_t>(clearSize), 0u);
+
+        // Submit the fill
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ClearBuffer(buffer, clearOffset, clearSize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Check destination is exactly the expected content.
+        EXPECT_BUFFER_U32_RANGE_EQ(bufferData.data(), buffer, 0, clearOffset / sizeof(uint32_t));
+        EXPECT_BUFFER_U8_RANGE_EQ(fillData.data(), buffer, clearOffset, clearSize);
+        uint64_t clearEnd = clearOffset + clearSize;
+        EXPECT_BUFFER_U32_RANGE_EQ(bufferData.data() + clearEnd / sizeof(uint32_t), buffer,
+                                   clearEnd, (bufferSize - clearEnd) / sizeof(uint32_t));
+    }
+};
+
+// Test that copying an entire texture with 256-byte aligned dimensions works
+TEST_P(CopyTests_T2B, FullTextureAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, kHeight, 1});
+}
+
+// Test noop copies
+TEST_P(CopyTests_T2B, ZeroSizedCopy) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {0, kHeight, 1});
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, 0, 1});
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, kHeight, 0});
+}
+
+// Test that copying an entire texture without 256-byte aligned dimensions works
+TEST_P(CopyTests_T2B, FullTextureUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, kHeight, 1});
+}
+
+// Test that reading pixels from a 256-byte aligned texture works
+TEST_P(CopyTests_T2B, PixelReadAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    BufferSpec pixelBuffer = MinimumBufferSpec(1, 1);
+
+    constexpr wgpu::Extent3D kCopySize = {1, 1, 1};
+    constexpr wgpu::Extent3D kTextureSize = {kWidth, kHeight, 1};
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = kTextureSize;
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, 0, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {0, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 3, kHeight / 7, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 7, kHeight / 3, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+}
+
+// Test that copying pixels from a texture that is not 256-byte aligned works
+TEST_P(CopyTests_T2B, PixelReadUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+    BufferSpec pixelBuffer = MinimumBufferSpec(1, 1);
+
+    constexpr wgpu::Extent3D kCopySize = {1, 1, 1};
+    constexpr wgpu::Extent3D kTextureSize = {kWidth, kHeight, 1};
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = kTextureSize;
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, 0, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {0, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 3, kHeight / 7, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 7, kHeight / 3, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+}
+
+// Test that copying regions with 256-byte aligned sizes works
+TEST_P(CopyTests_T2B, TextureRegionAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    for (unsigned int w : {64, 128, 256}) {
+        for (unsigned int h : {16, 32, 48}) {
+            TextureSpec textureSpec;
+            textureSpec.textureSize = {kWidth, kHeight, 1};
+            DoTest(textureSpec, MinimumBufferSpec(w, h), {w, h, 1});
+        }
+    }
+}
+
+// Test that copying regions without 256-byte aligned sizes works
+TEST_P(CopyTests_T2B, TextureRegionUnaligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int w : {13, 63, 65}) {
+        for (unsigned int h : {17, 19, 63}) {
+            TextureSpec textureSpec = defaultTextureSpec;
+            DoTest(textureSpec, MinimumBufferSpec(w, h), {w, h, 1});
+        }
+    }
+}
+
+// Test that copying mips with 256-byte aligned sizes works
+TEST_P(CopyTests_T2B, TextureMipAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i),
+               {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+// Test that copying mips when one dimension is 256-byte aligned and another dimension reach one
+// works
+TEST_P(CopyTests_T2B, TextureMipDimensionReachOne) {
+    constexpr uint32_t mipLevelCount = 4;
+    constexpr uint32_t kWidth = 256 << mipLevelCount;
+    constexpr uint32_t kHeight = 2;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    TextureSpec textureSpec = defaultTextureSpec;
+    textureSpec.levelCount = mipLevelCount;
+
+    for (unsigned int i = 0; i < 4; ++i) {
+        textureSpec.copyLevel = i;
+        DoTest(textureSpec,
+               MinimumBufferSpec(std::max(kWidth >> i, 1u), std::max(kHeight >> i, 1u)),
+               {std::max(kWidth >> i, 1u), std::max(kHeight >> i, 1u), 1});
+    }
+}
+
+// Test that copying mips without 256-byte aligned sizes works
+TEST_P(CopyTests_T2B, TextureMipUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i),
+               {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+// Test that copying with a 512-byte aligned buffer offset works
+TEST_P(CopyTests_T2B, OffsetBufferAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 0; i < 3; ++i) {
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+        uint64_t offset = 512 * i;
+        bufferSpec.size += offset;
+        bufferSpec.offset += offset;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying without a 512-byte aligned buffer offset works
+TEST_P(CopyTests_T2B, OffsetBufferUnaligned) {
+    constexpr uint32_t kWidth = 128;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(textureSpec.format);
+    for (uint32_t i = bytesPerTexel; i < 512; i += bytesPerTexel * 9) {
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+        bufferSpec.size += i;
+        bufferSpec.offset += i;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying without a 512-byte aligned buffer offset that is greater than the bytes per row
+// works
+TEST_P(CopyTests_T2B, OffsetBufferUnalignedSmallBytesPerRow) {
+    constexpr uint32_t kWidth = 32;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(textureSpec.format);
+    for (uint32_t i = 256 + bytesPerTexel; i < 512; i += bytesPerTexel * 9) {
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+        bufferSpec.size += i;
+        bufferSpec.offset += i;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying with a greater bytes per row than needed on a 256-byte aligned texture works
+TEST_P(CopyTests_T2B, BytesPerRowAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+    for (unsigned int i = 1; i < 4; ++i) {
+        bufferSpec.bytesPerRow += 256;
+        bufferSpec.size += 256 * kHeight;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying with a greater bytes per row than needed on a texture that is not 256-byte
+// aligned works
+TEST_P(CopyTests_T2B, BytesPerRowUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+    for (unsigned int i = 1; i < 4; ++i) {
+        bufferSpec.bytesPerRow += 256;
+        bufferSpec.size += 256 * kHeight;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying with bytesPerRow = 0 and bytesPerRow < bytesInACompleteRow works
+// when we're copying one row only
+TEST_P(CopyTests_T2B, BytesPerRowWithOneRowCopy) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    {
+        BufferSpec bufferSpec = MinimumBufferSpec(5, 1);
+
+        // bytesPerRow undefined
+        bufferSpec.bytesPerRow = wgpu::kCopyStrideUndefined;
+        DoTest(textureSpec, bufferSpec, {5, 1, 1});
+    }
+}
+
+TEST_P(CopyTests_T2B, StrideSpecialCases) {
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {4, 4, 4};
+
+    // bytesPerRow 0
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{0, 2, 2}, {0, 0, 2}, {0, 2, 0}, {0, 0, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, 0, 2), copyExtent);
+    }
+
+    // bytesPerRow undefined
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{2, 1, 1}, {2, 0, 1}, {2, 1, 0}, {2, 0, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, wgpu::kCopyStrideUndefined, 2),
+               copyExtent);
+    }
+
+    // rowsPerImage 0
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{2, 0, 2}, {2, 0, 0}, {0, 0, 2}, {0, 0, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, 256, 0), copyExtent);
+    }
+
+    // rowsPerImage undefined
+    for (const wgpu::Extent3D copyExtent : {wgpu::Extent3D{2, 2, 1}, {2, 2, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, 256, wgpu::kCopyStrideUndefined),
+               copyExtent);
+    }
+}
+
+// Test copying a single slice with rowsPerImage larger than copy height and rowsPerImage will not
+// take effect. If rowsPerImage takes effect, it looks like the copy may go past the end of the
+// buffer.
+TEST_P(CopyTests_T2B, RowsPerImageShouldNotCauseBufferOOBIfDepthOrArrayLayersIsOne) {
+    // Check various offsets to cover each code path in the 2D split code in TextureCopySplitter.
+    for (uint32_t offset : {0, 4, 64}) {
+        constexpr uint32_t kWidth = 250;
+        constexpr uint32_t kHeight = 3;
+
+        TextureSpec textureSpec;
+        textureSpec.textureSize = {kWidth, kHeight, 1};
+
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+        bufferSpec.rowsPerImage = 2 * kHeight;
+        bufferSpec.offset = offset;
+        bufferSpec.size += offset;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1}, wgpu::TextureDimension::e3D);
+    }
+}
+
+// Test copying a single row with bytesPerRow larger than copy width and bytesPerRow will not
+// take effect. If bytesPerRow takes effect, it looks like the copy may go past the end of the
+// buffer.
+TEST_P(CopyTests_T2B, BytesPerRowShouldNotCauseBufferOOBIfCopyHeightIsOne) {
+    // Check various offsets to cover each code path in the 2D split code in TextureCopySplitter.
+    for (uint32_t offset : {0, 4, 100}) {
+        constexpr uint32_t kWidth = 250;
+
+        TextureSpec textureSpec;
+        textureSpec.textureSize = {kWidth, 1, 1};
+
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, 1);
+        bufferSpec.bytesPerRow = 1280;  // the default bytesPerRow is 1024.
+        bufferSpec.offset = offset;
+        bufferSpec.size += offset;
+        DoTest(textureSpec, bufferSpec, {kWidth, 1, 1});
+        DoTest(textureSpec, bufferSpec, {kWidth, 1, 1}, wgpu::TextureDimension::e3D);
+    }
+}
+
+// A regression test for a bug on D3D12 backend that causes crash when doing texture-to-texture
+// copy one row with the texture format Depth32Float.
+TEST_P(CopyTests_T2B, CopyOneRowWithDepth32Float) {
+    // TODO(crbug.com/dawn/727): currently this test fails on many D3D12 drivers.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12());
+
+    constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::Depth32Float;
+    constexpr uint32_t kPixelsPerRow = 4u;
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.format = kFormat;
+    textureDescriptor.size = {kPixelsPerRow, 1, 1};
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment;
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    // Initialize the depth texture with 0.5f.
+    constexpr float kClearDepthValue = 0.5f;
+    utils::ComboRenderPassDescriptor renderPass({}, texture.CreateView());
+    renderPass.UnsetDepthStencilLoadStoreOpsForFormat(kFormat);
+    renderPass.cDepthStencilAttachmentInfo.depthClearValue = kClearDepthValue;
+    renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+    renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+    wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPass);
+    renderPassEncoder.End();
+
+    constexpr uint32_t kBufferCopyOffset = kTextureBytesPerRowAlignment;
+    const uint32_t kBufferSize =
+        kBufferCopyOffset + utils::GetTexelBlockSizeInBytes(kFormat) * kPixelsPerRow;
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = kBufferSize;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(buffer, kBufferCopyOffset, kTextureBytesPerRowAlignment);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+
+    wgpu::Extent3D copySize = textureDescriptor.size;
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    std::array<float, kPixelsPerRow> expectedValues;
+    std::fill(expectedValues.begin(), expectedValues.end(), kClearDepthValue);
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedValues.data(), buffer, kBufferCopyOffset, kPixelsPerRow);
+}
+
+// Test that copying whole texture 2D array layers in one texture-to-buffer-copy works.
+TEST_P(CopyTests_T2B, Texture2DArrayFull) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kLayers), {kWidth, kHeight, kLayers});
+}
+
+// Test that copying a range of texture 2D array layers in one texture-to-buffer-copy works.
+TEST_P(CopyTests_T2B, Texture2DArraySubRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 3u;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kCopyLayers),
+           {kWidth, kHeight, kCopyLayers});
+}
+
+// Test that copying texture 2D array mips with 256-byte aligned sizes works
+TEST_P(CopyTests_T2B, Texture2DArrayMip) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i, kLayers),
+               {kWidth >> i, kHeight >> i, kLayers});
+    }
+}
+
+// Test that copying from a range of texture 2D array layers in one texture-to-buffer-copy when
+// RowsPerImage is not equal to the height of the texture works.
+TEST_P(CopyTests_T2B, Texture2DArrayRegionNonzeroRowsPerImage) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 3u;
+
+    constexpr uint32_t kRowsPerImage = kHeight * 2;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kRowsPerImage, kCopyLayers);
+    bufferSpec.rowsPerImage = kRowsPerImage;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kCopyLayers});
+}
+
+// Test a special code path in the D3D12 backends when (BytesPerRow * RowsPerImage) is not a
+// multiple of 512.
+TEST_P(CopyTests_T2B, Texture2DArrayRegionWithOffsetOddRowsPerImage) {
+    constexpr uint32_t kWidth = 64;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 8u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 5u;
+
+    constexpr uint32_t kRowsPerImage = kHeight + 1;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kRowsPerImage, kCopyLayers);
+    bufferSpec.offset += 128u;
+    bufferSpec.size += 128u;
+    bufferSpec.rowsPerImage = kRowsPerImage;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kCopyLayers});
+}
+
+// Test a special code path in the D3D12 backends when (BytesPerRow * RowsPerImage) is a multiple
+// of 512.
+TEST_P(CopyTests_T2B, Texture2DArrayRegionWithOffsetEvenRowsPerImage) {
+    constexpr uint32_t kWidth = 64;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 8u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 4u;
+
+    constexpr uint32_t kRowsPerImage = kHeight + 2;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kRowsPerImage, kCopyLayers);
+    bufferSpec.offset += 128u;
+    bufferSpec.size += 128u;
+    bufferSpec.rowsPerImage = kRowsPerImage;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kCopyLayers});
+}
+
+// Test that copying whole 3D texture in one texture-to-buffer-copy works.
+TEST_P(CopyTests_T2B, Texture3DFull) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kDepth), {kWidth, kHeight, kDepth},
+           wgpu::TextureDimension::e3D);
+}
+
+// Test that copying a range of texture 3D depths in one texture-to-buffer-copy works.
+TEST_P(CopyTests_T2B, Texture3DSubRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6;
+    constexpr uint32_t kBaseDepth = 2u;
+    constexpr uint32_t kCopyDepth = 3u;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseDepth};
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kCopyDepth),
+           {kWidth / 2, kHeight / 2, kCopyDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_T2B, Texture3DNoSplitRowDataWithEmptyFirstRow) {
+    constexpr uint32_t kWidth = 2;
+    constexpr uint32_t kHeight = 4;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // Base: no split for a row + no empty first row
+    bufferSpec.offset = 60;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: no split for a row + empty first row
+    bufferSpec.offset = 260;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_T2B, Texture3DSplitRowDataWithoutEmptyFirstRow) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The test below is designed to test TextureCopySplitter for 3D textures on D3D12.
+    // This test will cover: split for a row + no empty first row for both split regions
+    bufferSpec.offset = 260;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_T2B, Texture3DSplitRowDataWithEmptyFirstRow) {
+    constexpr uint32_t kWidth = 39;
+    constexpr uint32_t kHeight = 4;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // This test will cover: split for a row + empty first row for the head block
+    bufferSpec.offset = 400;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: split for a row + empty first row for the tail block
+    bufferSpec.offset = 160;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_T2B, Texture3DCopyHeightIsOneCopyWidthIsTiny) {
+    constexpr uint32_t kWidth = 2;
+    constexpr uint32_t kHeight = 1;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // Base: no split for a row, no empty row, and copy height is 1
+    bufferSpec.offset = 60;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: no split for a row + empty first row, and copy height is 1
+    bufferSpec.offset = 260;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_T2B, Texture3DCopyHeightIsOneCopyWidthIsSmall) {
+    constexpr uint32_t kWidth = 39;
+    constexpr uint32_t kHeight = 1;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // This test will cover: split for a row + empty first row for the head block, and copy height
+    // is 1
+    bufferSpec.offset = 400;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: split for a row + empty first row for the tail block, and copy height
+    // is 1
+    bufferSpec.offset = 160;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+// Test that copying texture 3D array mips with 256-byte aligned sizes works
+TEST_P(CopyTests_T2B, Texture3DMipAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 64u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i, kDepth >> i),
+               {kWidth >> i, kHeight >> i, kDepth >> i}, wgpu::TextureDimension::e3D);
+    }
+}
+
+// Test that copying texture 3D array mips with 256-byte unaligned sizes works
+TEST_P(CopyTests_T2B, Texture3DMipUnaligned) {
+    constexpr uint32_t kWidth = 261;
+    constexpr uint32_t kHeight = 123;
+    constexpr uint32_t kDepth = 69u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i, kDepth >> i),
+               {kWidth >> i, kHeight >> i, kDepth >> i}, wgpu::TextureDimension::e3D);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(CopyTests_T2B,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+// Test that copying an entire texture with 256-byte aligned dimensions works
+TEST_P(CopyTests_B2T, FullTextureAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, kHeight, 1});
+}
+
+// Test noop copies.
+TEST_P(CopyTests_B2T, ZeroSizedCopy) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {0, kHeight, 1});
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, 0, 1});
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, kHeight, 0});
+}
+
+// Test that copying an entire texture without 256-byte aligned dimensions works
+TEST_P(CopyTests_B2T, FullTextureUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight), {kWidth, kHeight, 1});
+}
+
+// Test that reading pixels from a 256-byte aligned texture works
+TEST_P(CopyTests_B2T, PixelReadAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    BufferSpec pixelBuffer = MinimumBufferSpec(1, 1);
+
+    constexpr wgpu::Extent3D kCopySize = {1, 1, 1};
+    constexpr wgpu::Extent3D kTextureSize = {kWidth, kHeight, 1};
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = kTextureSize;
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, 0, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {0, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 3, kHeight / 7, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 7, kHeight / 3, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+}
+
+// Test that copying pixels from a texture that is not 256-byte aligned works
+TEST_P(CopyTests_B2T, PixelReadUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+    BufferSpec pixelBuffer = MinimumBufferSpec(1, 1);
+
+    constexpr wgpu::Extent3D kCopySize = {1, 1, 1};
+    constexpr wgpu::Extent3D kTextureSize = {kWidth, kHeight, 1};
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = kTextureSize;
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, 0, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {0, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth - 1, kHeight - 1, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 3, kHeight / 7, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+
+    {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyOrigin = {kWidth / 7, kHeight / 3, 0};
+        DoTest(textureSpec, pixelBuffer, kCopySize);
+    }
+}
+
+// Test that copying regions with 256-byte aligned sizes works
+TEST_P(CopyTests_B2T, TextureRegionAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    for (unsigned int w : {64, 128, 256}) {
+        for (unsigned int h : {16, 32, 48}) {
+            TextureSpec textureSpec;
+            textureSpec.textureSize = {kWidth, kHeight, 1};
+            DoTest(textureSpec, MinimumBufferSpec(w, h), {w, h, 1});
+        }
+    }
+}
+
+// Test that copying regions without 256-byte aligned sizes works
+TEST_P(CopyTests_B2T, TextureRegionUnaligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int w : {13, 63, 65}) {
+        for (unsigned int h : {17, 19, 63}) {
+            TextureSpec textureSpec = defaultTextureSpec;
+            DoTest(textureSpec, MinimumBufferSpec(w, h), {w, h, 1});
+        }
+    }
+}
+
+// Test that copying mips with 256-byte aligned sizes works
+TEST_P(CopyTests_B2T, TextureMipAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i),
+               {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+// Test that copying mips without 256-byte aligned sizes works
+TEST_P(CopyTests_B2T, TextureMipUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i),
+               {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+// Test that copying with a 512-byte aligned buffer offset works
+TEST_P(CopyTests_B2T, OffsetBufferAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 0; i < 3; ++i) {
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+        uint64_t offset = 512 * i;
+        bufferSpec.size += offset;
+        bufferSpec.offset += offset;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying without a 512-byte aligned buffer offset works
+TEST_P(CopyTests_B2T, OffsetBufferUnaligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(textureSpec.format);
+    for (uint32_t i = bytesPerTexel; i < 512; i += bytesPerTexel * 9) {
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+        bufferSpec.size += i;
+        bufferSpec.offset += i;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying without a 512-byte aligned buffer offset that is greater than the bytes per row
+// works
+TEST_P(CopyTests_B2T, OffsetBufferUnalignedSmallBytesPerRow) {
+    constexpr uint32_t kWidth = 32;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(textureSpec.format);
+    for (uint32_t i = 256 + bytesPerTexel; i < 512; i += bytesPerTexel * 9) {
+        BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+        bufferSpec.size += i;
+        bufferSpec.offset += i;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying with a greater bytes per row than needed on a 256-byte aligned texture works
+TEST_P(CopyTests_B2T, BytesPerRowAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+    for (unsigned int i = 1; i < 4; ++i) {
+        bufferSpec.bytesPerRow += 256;
+        bufferSpec.size += 256 * kHeight;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying with a greater bytes per row than needed on a texture that is not 256-byte
+// aligned works
+TEST_P(CopyTests_B2T, BytesPerRowUnaligned) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight);
+    for (unsigned int i = 1; i < 4; ++i) {
+        bufferSpec.bytesPerRow += 256;
+        bufferSpec.size += 256 * kHeight;
+        DoTest(textureSpec, bufferSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test that copying with bytesPerRow = 0 and bytesPerRow < bytesInACompleteRow works
+// when we're copying one row only
+TEST_P(CopyTests_B2T, BytesPerRowWithOneRowCopy) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+
+    {
+        BufferSpec bufferSpec = MinimumBufferSpec(5, 1);
+
+        // bytesPerRow undefined
+        bufferSpec.bytesPerRow = wgpu::kCopyStrideUndefined;
+        DoTest(textureSpec, bufferSpec, {5, 1, 1});
+    }
+}
+
+TEST_P(CopyTests_B2T, StrideSpecialCases) {
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {4, 4, 4};
+
+    // bytesPerRow 0
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{0, 2, 2}, {0, 0, 2}, {0, 2, 0}, {0, 0, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, 0, 2), copyExtent);
+    }
+
+    // bytesPerRow undefined
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{2, 1, 1}, {2, 0, 1}, {2, 1, 0}, {2, 0, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, wgpu::kCopyStrideUndefined, 2),
+               copyExtent);
+    }
+
+    // rowsPerImage 0
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{2, 0, 2}, {2, 0, 0}, {0, 0, 2}, {0, 0, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, 256, 0), copyExtent);
+    }
+
+    // rowsPerImage undefined
+    for (const wgpu::Extent3D copyExtent : {wgpu::Extent3D{2, 2, 1}, {2, 2, 0}}) {
+        DoTest(textureSpec, MinimumBufferSpec(copyExtent, 256, wgpu::kCopyStrideUndefined),
+               copyExtent);
+    }
+}
+
+// Test that copying whole texture 2D array layers in one texture-to-buffer-copy works.
+TEST_P(CopyTests_B2T, Texture2DArrayFull) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kLayers), {kWidth, kHeight, kLayers});
+}
+
+// Test that copying a range of texture 2D array layers in one texture-to-buffer-copy works.
+TEST_P(CopyTests_B2T, Texture2DArraySubRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 3u;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kCopyLayers),
+           {kWidth, kHeight, kCopyLayers});
+}
+
+// Test that copying into a range of texture 2D array layers in one texture-to-buffer-copy when
+// RowsPerImage is not equal to the height of the texture works.
+TEST_P(CopyTests_B2T, Texture2DArrayRegionNonzeroRowsPerImage) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 3u;
+
+    constexpr uint32_t kRowsPerImage = kHeight * 2;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kRowsPerImage, kCopyLayers);
+    bufferSpec.rowsPerImage = kRowsPerImage;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kCopyLayers});
+}
+
+// Test a special code path in the D3D12 backends when (BytesPerRow * RowsPerImage) is not a
+// multiple of 512.
+TEST_P(CopyTests_B2T, Texture2DArrayRegionWithOffsetOddRowsPerImage) {
+    constexpr uint32_t kWidth = 64;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 8u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 5u;
+
+    constexpr uint32_t kRowsPerImage = kHeight + 1;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kRowsPerImage, kCopyLayers);
+    bufferSpec.offset += 128u;
+    bufferSpec.size += 128u;
+    bufferSpec.rowsPerImage = kRowsPerImage;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kCopyLayers});
+}
+
+// Test a special code path in the D3D12 backends when (BytesPerRow * RowsPerImage) is a multiple
+// of 512.
+TEST_P(CopyTests_B2T, Texture2DArrayRegionWithOffsetEvenRowsPerImage) {
+    constexpr uint32_t kWidth = 64;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 8u;
+    constexpr uint32_t kBaseLayer = 2u;
+    constexpr uint32_t kCopyLayers = 5u;
+
+    constexpr uint32_t kRowsPerImage = kHeight + 2;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseLayer};
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kRowsPerImage, kCopyLayers);
+    bufferSpec.offset += 128u;
+    bufferSpec.size += 128u;
+    bufferSpec.rowsPerImage = kRowsPerImage;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kCopyLayers});
+}
+
+// Test that copying whole texture 3D in one buffer-to-texture-copy works.
+TEST_P(CopyTests_B2T, Texture3DFull) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kDepth), {kWidth, kHeight, kDepth},
+           wgpu::TextureDimension::e3D);
+}
+
+// Test that copying a range of texture 3D Depths in one texture-to-buffer-copy works.
+TEST_P(CopyTests_B2T, Texture3DSubRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6;
+    constexpr uint32_t kBaseDepth = 2u;
+    constexpr uint32_t kCopyDepth = 3u;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, kBaseDepth};
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, MinimumBufferSpec(kWidth, kHeight, kCopyDepth),
+           {kWidth / 2, kHeight / 2, kCopyDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_B2T, Texture3DNoSplitRowDataWithEmptyFirstRow) {
+    constexpr uint32_t kWidth = 2;
+    constexpr uint32_t kHeight = 4;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // Base: no split for a row + no empty first row
+    bufferSpec.offset = 60;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: no split for a row + empty first row
+    bufferSpec.offset = 260;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_B2T, Texture3DSplitRowDataWithoutEmptyFirstRow) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The test below is designed to test TextureCopySplitter for 3D textures on D3D12.
+    // This test will cover: split for a row + no empty first row for both split regions
+    bufferSpec.offset = 260;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_B2T, Texture3DSplitRowDataWithEmptyFirstRow) {
+    constexpr uint32_t kWidth = 39;
+    constexpr uint32_t kHeight = 4;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // This test will cover: split for a row + empty first row for the head block
+    bufferSpec.offset = 400;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: split for a row + empty first row for the tail block
+    bufferSpec.offset = 160;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_B2T, Texture3DCopyHeightIsOneCopyWidthIsTiny) {
+    constexpr uint32_t kWidth = 2;
+    constexpr uint32_t kHeight = 1;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // Base: no split for a row, no empty row, and copy height is 1
+    bufferSpec.offset = 60;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: no split for a row + empty first row, and copy height is 1
+    bufferSpec.offset = 260;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+TEST_P(CopyTests_B2T, Texture3DCopyHeightIsOneCopyWidthIsSmall) {
+    constexpr uint32_t kWidth = 39;
+    constexpr uint32_t kHeight = 1;
+    constexpr uint32_t kDepth = 3;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    BufferSpec bufferSpec = MinimumBufferSpec(kWidth, kHeight, kDepth);
+
+    // The tests below are designed to test TextureCopySplitter for 3D textures on D3D12.
+    // This test will cover: split for a row + empty first row for the head block, and copy height
+    // is 1
+    bufferSpec.offset = 400;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+
+    // This test will cover: split for a row + empty first row for the tail block, and copy height
+    // is 1
+    bufferSpec.offset = 160;
+    bufferSpec.size += bufferSpec.offset;
+    DoTest(textureSpec, bufferSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D);
+}
+
+// Test that copying texture 3D array mips with 256-byte aligned sizes works
+TEST_P(CopyTests_B2T, Texture3DMipAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 64u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i, kDepth >> i),
+               {kWidth >> i, kHeight >> i, kDepth >> i}, wgpu::TextureDimension::e3D);
+    }
+}
+
+// Test that copying texture 3D array mips with 256-byte unaligned sizes works
+TEST_P(CopyTests_B2T, Texture3DMipUnaligned) {
+    constexpr uint32_t kWidth = 261;
+    constexpr uint32_t kHeight = 123;
+    constexpr uint32_t kDepth = 69u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, MinimumBufferSpec(kWidth >> i, kHeight >> i, kDepth >> i),
+               {kWidth >> i, kHeight >> i, kDepth >> i}, wgpu::TextureDimension::e3D);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(CopyTests_B2T,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+TEST_P(CopyTests_T2T, Texture) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+    DoTest(textureSpec, textureSpec, {kWidth, kHeight, 1});
+}
+
+// Test noop copies.
+TEST_P(CopyTests_T2T, ZeroSizedCopy) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+    DoTest(textureSpec, textureSpec, {0, kHeight, 1});
+    DoTest(textureSpec, textureSpec, {kWidth, 0, 1});
+    DoTest(textureSpec, textureSpec, {kWidth, kHeight, 0});
+}
+
+TEST_P(CopyTests_T2T, TextureRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int w : {64, 128, 256}) {
+        for (unsigned int h : {16, 32, 48}) {
+            TextureSpec textureSpec = defaultTextureSpec;
+            DoTest(textureSpec, textureSpec, {w, h, 1});
+        }
+    }
+}
+
+TEST_P(CopyTests_T2T, TextureMip) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, textureSpec, {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+TEST_P(CopyTests_T2T, SingleMipSrcMultipleMipDst) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec srcTextureSpec = defaultTextureSpec;
+        srcTextureSpec.textureSize = {kWidth >> i, kHeight >> i, 1};
+
+        TextureSpec dstTextureSpec = defaultTextureSpec;
+        dstTextureSpec.textureSize = {kWidth, kHeight, 1};
+        dstTextureSpec.copyLevel = i;
+        dstTextureSpec.levelCount = i + 1;
+
+        DoTest(srcTextureSpec, dstTextureSpec, {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+TEST_P(CopyTests_T2T, MultipleMipSrcSingleMipDst) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec srcTextureSpec = defaultTextureSpec;
+        srcTextureSpec.textureSize = {kWidth, kHeight, 1};
+        srcTextureSpec.copyLevel = i;
+        srcTextureSpec.levelCount = i + 1;
+
+        TextureSpec dstTextureSpec = defaultTextureSpec;
+        dstTextureSpec.textureSize = {kWidth >> i, kHeight >> i, 1};
+
+        DoTest(srcTextureSpec, dstTextureSpec, {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+// Test that copying from one mip level to another mip level within the same 2D texture works.
+TEST_P(CopyTests_T2T, Texture2DSameTextureDifferentMipLevels) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+    defaultTextureSpec.levelCount = 6;
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec srcSpec = defaultTextureSpec;
+        srcSpec.copyLevel = i - 1;
+        TextureSpec dstSpec = defaultTextureSpec;
+        dstSpec.copyLevel = i;
+
+        DoTest(srcSpec, dstSpec, {kWidth >> i, kHeight >> i, 1}, true);
+    }
+}
+
+// Test copying the whole 2D array texture.
+TEST_P(CopyTests_T2T, Texture2DArrayFull) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    DoTest(textureSpec, textureSpec, {kWidth, kHeight, kLayers});
+}
+
+// Test copying a subresource region of the 2D array texture.
+TEST_P(CopyTests_T2T, Texture2DArrayRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    for (unsigned int w : {64, 128, 256}) {
+        for (unsigned int h : {16, 32, 48}) {
+            TextureSpec textureSpec = defaultTextureSpec;
+            DoTest(textureSpec, textureSpec, {w, h, kLayers});
+        }
+    }
+}
+
+// Test copying one slice of a 2D array texture.
+TEST_P(CopyTests_T2T, Texture2DArrayCopyOneSlice) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kSrcBaseLayer = 1u;
+    constexpr uint32_t kDstBaseLayer = 3u;
+    constexpr uint32_t kCopyArrayLayerCount = 1u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    TextureSpec srcTextureSpec = defaultTextureSpec;
+    srcTextureSpec.copyOrigin = {0, 0, kSrcBaseLayer};
+
+    TextureSpec dstTextureSpec = defaultTextureSpec;
+    dstTextureSpec.copyOrigin = {0, 0, kDstBaseLayer};
+
+    DoTest(srcTextureSpec, dstTextureSpec, {kWidth, kHeight, kCopyArrayLayerCount});
+}
+
+// Test copying multiple contiguous slices of a 2D array texture.
+TEST_P(CopyTests_T2T, Texture2DArrayCopyMultipleSlices) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kSrcBaseLayer = 0u;
+    constexpr uint32_t kDstBaseLayer = 3u;
+    constexpr uint32_t kCopyArrayLayerCount = 3u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    TextureSpec srcTextureSpec = defaultTextureSpec;
+    srcTextureSpec.copyOrigin = {0, 0, kSrcBaseLayer};
+
+    TextureSpec dstTextureSpec = defaultTextureSpec;
+    dstTextureSpec.copyOrigin = {0, 0, kDstBaseLayer};
+
+    DoTest(srcTextureSpec, dstTextureSpec, {kWidth, kHeight, kCopyArrayLayerCount});
+}
+
+// Test copying one texture slice within the same texture.
+TEST_P(CopyTests_T2T, CopyWithinSameTextureOneSlice) {
+    constexpr uint32_t kWidth = 256u;
+    constexpr uint32_t kHeight = 128u;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kSrcBaseLayer = 0u;
+    constexpr uint32_t kDstBaseLayer = 3u;
+    constexpr uint32_t kCopyArrayLayerCount = 1u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    TextureSpec srcTextureSpec = defaultTextureSpec;
+    srcTextureSpec.copyOrigin = {0, 0, kSrcBaseLayer};
+
+    TextureSpec dstTextureSpec = defaultTextureSpec;
+    dstTextureSpec.copyOrigin = {0, 0, kDstBaseLayer};
+
+    DoTest(srcTextureSpec, dstTextureSpec, {kWidth, kHeight, kCopyArrayLayerCount}, true);
+}
+
+// Test copying multiple contiguous texture slices within the same texture with non-overlapped
+// slices.
+TEST_P(CopyTests_T2T, CopyWithinSameTextureNonOverlappedSlices) {
+    constexpr uint32_t kWidth = 256u;
+    constexpr uint32_t kHeight = 128u;
+    constexpr uint32_t kLayers = 6u;
+    constexpr uint32_t kSrcBaseLayer = 0u;
+    constexpr uint32_t kDstBaseLayer = 3u;
+    constexpr uint32_t kCopyArrayLayerCount = 3u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kLayers};
+
+    TextureSpec srcTextureSpec = defaultTextureSpec;
+    srcTextureSpec.copyOrigin = {0, 0, kSrcBaseLayer};
+
+    TextureSpec dstTextureSpec = defaultTextureSpec;
+    dstTextureSpec.copyOrigin = {0, 0, kDstBaseLayer};
+
+    DoTest(srcTextureSpec, dstTextureSpec, {kWidth, kHeight, kCopyArrayLayerCount}, true);
+}
+
+// A regression test (from WebGPU CTS) for an Intel D3D12 driver bug about T2T copy with specific
+// texture formats. See http://crbug.com/1161355 for more details.
+TEST_P(CopyTests_T2T, CopyFromNonZeroMipLevelWithTexelBlockSizeLessThan4Bytes) {
+    // This test can pass on the Windows Intel Vulkan driver version 27.20.100.9168.
+    // TODO(crbug.com/dawn/819): enable this test on Intel Vulkan drivers after the upgrade of
+    // try bots.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsWindows() && IsIntel());
+
+    // This test also fails on D3D12 on Intel Windows. See http://crbug.com/1312066 for details.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWindows() && IsIntel());
+
+    constexpr std::array<wgpu::TextureFormat, 11> kFormats = {
+        {wgpu::TextureFormat::RG8Sint, wgpu::TextureFormat::RG8Uint, wgpu::TextureFormat::RG8Snorm,
+         wgpu::TextureFormat::RG8Unorm, wgpu::TextureFormat::R16Float, wgpu::TextureFormat::R16Sint,
+         wgpu::TextureFormat::R16Uint, wgpu::TextureFormat::R8Snorm, wgpu::TextureFormat::R8Unorm,
+         wgpu::TextureFormat::R8Sint, wgpu::TextureFormat::R8Uint}};
+
+    constexpr uint32_t kSrcLevelCount = 4;
+    constexpr uint32_t kDstLevelCount = 5;
+    constexpr uint32_t kSrcSize = 2 << kSrcLevelCount;
+    constexpr uint32_t kDstSize = 2 << kDstLevelCount;
+    ASSERT_LE(kSrcSize, kTextureBytesPerRowAlignment);
+    ASSERT_LE(kDstSize, kTextureBytesPerRowAlignment);
+
+    // The copyLayer to test:
+    // 1u (non-array texture), 3u (copyLayer < copyWidth), 5u (copyLayer > copyWidth)
+    constexpr std::array<uint32_t, 3> kTestTextureLayer = {1u, 3u, 5u};
+
+    for (wgpu::TextureFormat format : kFormats) {
+        if (HasToggleEnabled("disable_snorm_read") &&
+            (format == wgpu::TextureFormat::RG8Snorm || format == wgpu::TextureFormat::R8Snorm)) {
+            continue;
+        }
+
+        if (HasToggleEnabled("disable_r8_rg8_mipmaps") &&
+            (format == wgpu::TextureFormat::R8Unorm || format == wgpu::TextureFormat::RG8Unorm)) {
+            continue;
+        }
+
+        for (uint32_t textureLayer : kTestTextureLayer) {
+            const wgpu::Extent3D kUploadSize = {4u, 4u, textureLayer};
+
+            for (uint32_t srcLevel = 0; srcLevel < kSrcLevelCount; ++srcLevel) {
+                for (uint32_t dstLevel = 0; dstLevel < kDstLevelCount; ++dstLevel) {
+                    TextureSpec srcSpec;
+                    srcSpec.levelCount = kSrcLevelCount;
+                    srcSpec.format = format;
+                    srcSpec.copyLevel = srcLevel;
+                    srcSpec.textureSize = {kSrcSize, kSrcSize, textureLayer};
+
+                    TextureSpec dstSpec = srcSpec;
+                    dstSpec.levelCount = kDstLevelCount;
+                    dstSpec.copyLevel = dstLevel;
+                    dstSpec.textureSize = {kDstSize, kDstSize, textureLayer};
+
+                    DoTest(srcSpec, dstSpec, kUploadSize);
+                }
+            }
+        }
+    }
+}
+
+// Test that copying from one mip level to another mip level within the same 2D array texture works.
+TEST_P(CopyTests_T2T, Texture2DArraySameTextureDifferentMipLevels) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kLayers = 8u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kLayers};
+    defaultTextureSpec.levelCount = 6;
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec srcSpec = defaultTextureSpec;
+        srcSpec.copyLevel = i - 1;
+        TextureSpec dstSpec = defaultTextureSpec;
+        dstSpec.copyLevel = i;
+
+        DoTest(srcSpec, dstSpec, {kWidth >> i, kHeight >> i, kLayers}, true);
+    }
+}
+
+// Test that copying whole 3D texture in one texture-to-texture-copy works.
+TEST_P(CopyTests_T2T, Texture3DFull) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, textureSpec, {kWidth, kHeight, kDepth}, false, wgpu::TextureDimension::e3D);
+}
+
+// Test that copying from one mip level to another mip level within the same 3D texture works.
+TEST_P(CopyTests_T2T, Texture3DSameTextureDifferentMipLevels) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+    textureSpec.levelCount = 2;
+
+    TextureSpec dstSpec = textureSpec;
+    dstSpec.copyLevel = 1;
+
+    DoTest(textureSpec, dstSpec, {kWidth >> 1, kHeight >> 1, kDepth >> 1}, true,
+           wgpu::TextureDimension::e3D);
+}
+
+// Test that copying whole 3D texture to a 2D array in one texture-to-texture-copy works.
+TEST_P(CopyTests_T2T, Texture3DTo2DArrayFull) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, textureSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D,
+           wgpu::TextureDimension::e2D);
+}
+
+// Test that copying between 3D texture and 2D array textures works. It includes partial copy
+// for src and/or dst texture, non-zero offset (copy origin), non-zero mip level.
+TEST_P(CopyTests_T2T, Texture3DAnd2DArraySubRegion) {
+    // TODO(crbug.com/dawn/1216): Remove this suppression.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+
+    constexpr uint32_t kWidth = 8;
+    constexpr uint32_t kHeight = 4;
+    constexpr uint32_t kDepth = 2u;
+
+    TextureSpec baseSpec;
+    baseSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    TextureSpec srcSpec = baseSpec;
+    TextureSpec dstSpec = baseSpec;
+
+    // dst texture is a partial copy
+    dstSpec.textureSize = {kWidth * 2, kHeight * 2, kDepth * 2};
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D,
+           wgpu::TextureDimension::e2D);
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e2D,
+           wgpu::TextureDimension::e3D);
+
+    // src texture is a partial copy
+    srcSpec.textureSize = {kWidth * 2, kHeight * 2, kDepth * 2};
+    dstSpec = baseSpec;
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D,
+           wgpu::TextureDimension::e2D);
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e2D,
+           wgpu::TextureDimension::e3D);
+
+    // Both src and dst texture is a partial copy
+    srcSpec.textureSize = {kWidth * 2, kHeight * 2, kDepth * 2};
+    dstSpec.textureSize = {kWidth * 2, kHeight * 2, kDepth * 2};
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D,
+           wgpu::TextureDimension::e2D);
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e2D,
+           wgpu::TextureDimension::e3D);
+
+    // Non-zero offset (copy origin)
+    srcSpec = baseSpec;
+    dstSpec.textureSize = {kWidth * 2, kHeight * 2, kDepth * 2};
+    dstSpec.copyOrigin = {kWidth, kHeight, kDepth};
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D,
+           wgpu::TextureDimension::e2D);
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e2D,
+           wgpu::TextureDimension::e3D);
+
+    // Non-zero mip level
+    srcSpec = baseSpec;
+    dstSpec.textureSize = {kWidth * 2, kHeight * 2, kDepth * 2};
+    dstSpec.copyOrigin = {0, 0, 0};
+    dstSpec.copyLevel = 1;
+    dstSpec.levelCount = 2;
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e3D,
+           wgpu::TextureDimension::e2D);
+    DoTest(srcSpec, dstSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e2D,
+           wgpu::TextureDimension::e3D);
+}
+
+// Test that copying whole 2D array to a 3D texture in one texture-to-texture-copy works.
+TEST_P(CopyTests_T2T, Texture2DArrayTo3DFull) {
+    // TODO(crbug.com/dawn/1216): Remove this suppression.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, textureSpec, {kWidth, kHeight, kDepth}, wgpu::TextureDimension::e2D,
+           wgpu::TextureDimension::e3D);
+}
+
+// Test that copying subregion of a 3D texture in one texture-to-texture-copy works.
+TEST_P(CopyTests_T2T, Texture3DSubRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, textureSpec, {kWidth / 2, kHeight / 2, kDepth / 2}, false,
+           wgpu::TextureDimension::e3D);
+}
+
+// Test that copying subregion of a 3D texture to a 2D array in one texture-to-texture-copy works.
+TEST_P(CopyTests_T2T, Texture3DTo2DArraySubRegion) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, textureSpec, {kWidth / 2, kHeight / 2, kDepth / 2},
+           wgpu::TextureDimension::e3D, wgpu::TextureDimension::e2D);
+}
+
+// Test that copying subregion of a 2D array to a 3D texture to in one texture-to-texture-copy
+// works.
+TEST_P(CopyTests_T2T, Texture2DArrayTo3DSubRegion) {
+    // TODO(crbug.com/dawn/1216): Remove this suppression.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 6u;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    DoTest(textureSpec, textureSpec, {kWidth / 2, kHeight / 2, kDepth / 2},
+           wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D);
+}
+
+// Test that copying texture 3D array mips in one texture-to-texture-copy works
+TEST_P(CopyTests_T2T, Texture3DMipAligned) {
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+    constexpr uint32_t kDepth = 64u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, textureSpec, {kWidth >> i, kHeight >> i, kDepth >> i},
+               wgpu::TextureDimension::e3D, wgpu::TextureDimension::e3D);
+    }
+}
+
+// Test that copying texture 3D array mips in one texture-to-texture-copy works
+TEST_P(CopyTests_T2T, Texture3DMipUnaligned) {
+    constexpr uint32_t kWidth = 261;
+    constexpr uint32_t kHeight = 123;
+    constexpr uint32_t kDepth = 69u;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = {kWidth, kHeight, kDepth};
+
+    for (unsigned int i = 1; i < 6; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.copyLevel = i;
+        textureSpec.levelCount = i + 1;
+
+        DoTest(textureSpec, textureSpec, {kWidth >> i, kHeight >> i, kDepth >> i},
+               wgpu::TextureDimension::e3D, wgpu::TextureDimension::e3D);
+    }
+}
+
+DAWN_INSTANTIATE_TEST_P(CopyTests_T2T,
+                        {D3D12Backend(),
+                         D3D12Backend({"use_temp_buffer_in_small_format_texture_to_texture_copy_"
+                                       "from_greater_to_less_mip_level"}),
+                         MetalBackend(), OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+                        {true, false});
+
+// Test copying between textures that have srgb compatible texture formats;
+TEST_P(CopyTests_Formats, SrgbCompatibility) {
+    // Skip backends because which fails to support *-srgb formats
+    // and bgra* formats.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    constexpr uint32_t kWidth = 256;
+    constexpr uint32_t kHeight = 128;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+    DoTest(textureSpec, textureSpec, {kWidth, kHeight, 1});
+}
+
+DAWN_INSTANTIATE_TEST_P(CopyTests_Formats,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        {wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureFormat::RGBA8UnormSrgb,
+                         wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb});
+
+static constexpr uint64_t kSmallBufferSize = 4;
+static constexpr uint64_t kLargeBufferSize = 1 << 16;
+
+// Test copying full buffers
+TEST_P(CopyTests_B2B, FullCopy) {
+    DoTest(kSmallBufferSize, 0, kSmallBufferSize, 0, kSmallBufferSize);
+    DoTest(kLargeBufferSize, 0, kLargeBufferSize, 0, kLargeBufferSize);
+}
+
+// Test copying small pieces of a buffer at different corner case offsets
+TEST_P(CopyTests_B2B, SmallCopyInBigBuffer) {
+    constexpr uint64_t kEndOffset = kLargeBufferSize - kSmallBufferSize;
+    DoTest(kLargeBufferSize, 0, kLargeBufferSize, 0, kSmallBufferSize);
+    DoTest(kLargeBufferSize, kEndOffset, kLargeBufferSize, 0, kSmallBufferSize);
+    DoTest(kLargeBufferSize, 0, kLargeBufferSize, kEndOffset, kSmallBufferSize);
+    DoTest(kLargeBufferSize, kEndOffset, kLargeBufferSize, kEndOffset, kSmallBufferSize);
+}
+
+// Test zero-size copies
+TEST_P(CopyTests_B2B, ZeroSizedCopy) {
+    DoTest(kLargeBufferSize, 0, kLargeBufferSize, 0, 0);
+    DoTest(kLargeBufferSize, 0, kLargeBufferSize, kLargeBufferSize, 0);
+    DoTest(kLargeBufferSize, kLargeBufferSize, kLargeBufferSize, 0, 0);
+    DoTest(kLargeBufferSize, kLargeBufferSize, kLargeBufferSize, kLargeBufferSize, 0);
+}
+
+DAWN_INSTANTIATE_TEST(CopyTests_B2B,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+// Test clearing full buffers
+TEST_P(ClearBufferTests, FullClear) {
+    DoTest(kSmallBufferSize, 0, kSmallBufferSize);
+    DoTest(kLargeBufferSize, 0, kLargeBufferSize);
+}
+
+// Test clearing small pieces of a buffer at different corner case offsets
+TEST_P(ClearBufferTests, SmallClearInBigBuffer) {
+    constexpr uint64_t kEndOffset = kLargeBufferSize - kSmallBufferSize;
+    DoTest(kLargeBufferSize, 0, kSmallBufferSize);
+    DoTest(kLargeBufferSize, kSmallBufferSize, kSmallBufferSize);
+    DoTest(kLargeBufferSize, kEndOffset, kSmallBufferSize);
+}
+
+// Test zero-size clears
+TEST_P(ClearBufferTests, ZeroSizedClear) {
+    DoTest(kLargeBufferSize, 0, 0);
+    DoTest(kLargeBufferSize, kSmallBufferSize, 0);
+    DoTest(kLargeBufferSize, kLargeBufferSize, 0);
+}
+
+DAWN_INSTANTIATE_TEST(ClearBufferTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp b/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
new file mode 100644
index 0000000..207eba3
--- /dev/null
+++ b/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
@@ -0,0 +1,1166 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    static constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Set default texture size to single line texture for color conversion tests.
+    static constexpr uint64_t kDefaultTextureWidth = 10;
+    static constexpr uint64_t kDefaultTextureHeight = 1;
+
+    enum class ColorSpace : uint32_t {
+        SRGB = 0x00,
+        DisplayP3 = 0x01,
+    };
+
+    using SrcFormat = wgpu::TextureFormat;
+    using DstFormat = wgpu::TextureFormat;
+    using SrcOrigin = wgpu::Origin3D;
+    using DstOrigin = wgpu::Origin3D;
+    using CopySize = wgpu::Extent3D;
+    using FlipY = bool;
+    using SrcColorSpace = ColorSpace;
+    using DstColorSpace = ColorSpace;
+    using SrcAlphaMode = wgpu::AlphaMode;
+    using DstAlphaMode = wgpu::AlphaMode;
+
+    std::ostream& operator<<(std::ostream& o, wgpu::Origin3D origin) {
+        o << origin.x << ", " << origin.y << ", " << origin.z;
+        return o;
+    }
+
+    std::ostream& operator<<(std::ostream& o, wgpu::Extent3D copySize) {
+        o << copySize.width << ", " << copySize.height << ", " << copySize.depthOrArrayLayers;
+        return o;
+    }
+
+    std::ostream& operator<<(std::ostream& o, ColorSpace space) {
+        o << static_cast<uint32_t>(space);
+        return o;
+    }
+
+    DAWN_TEST_PARAM_STRUCT(AlphaTestParams, SrcAlphaMode, DstAlphaMode);
+    DAWN_TEST_PARAM_STRUCT(FormatTestParams, SrcFormat, DstFormat);
+    DAWN_TEST_PARAM_STRUCT(SubRectTestParams, SrcOrigin, DstOrigin, CopySize, FlipY);
+    DAWN_TEST_PARAM_STRUCT(ColorSpaceTestParams,
+                           DstFormat,
+                           SrcColorSpace,
+                           DstColorSpace,
+                           SrcAlphaMode,
+                           DstAlphaMode);
+
+    // Color Space table
+    struct ColorSpaceInfo {
+        ColorSpace index;
+        std::array<float, 9> toXYZD50;    // 3x3 row major transform matrix
+        std::array<float, 9> fromXYZD50;  // inverse transform matrix of toXYZD50, precomputed
+        std::array<float, 7> gammaDecodingParams;  // Follow { A, B, G, E, epsilon, C, F } order
+        std::array<float, 7> gammaEncodingParams;  // inverse op of decoding, precomputed
+        bool isNonLinear;
+        bool isExtended;  // For extended color space.
+    };
+    static constexpr size_t kSupportedColorSpaceCount = 2;
+    static constexpr std::array<ColorSpaceInfo, kSupportedColorSpaceCount> ColorSpaceTable = {{
+        // sRGB,
+        // Got primary attributes from https://drafts.csswg.org/css-color/#predefined-sRGB
+        // Use matrices from
+        // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html#WSMatrices
+        // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
+        // mathematics.
+        {
+            //
+            ColorSpace::SRGB,
+            {{
+                //
+                0.4360747, 0.3850649, 0.1430804,  //
+                0.2225045, 0.7168786, 0.0606169,  //
+                0.0139322, 0.0971045, 0.7141733   //
+            }},
+
+            {{
+                //
+                3.1338561, -1.6168667, -0.4906146,  //
+                -0.9787684, 1.9161415, 0.0334540,   //
+                0.0719453, -0.2289914, 1.4052427    //
+            }},
+
+            // {G, A, B, C, D, E, F, }
+            {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
+
+            {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
+
+            true,
+            true  //
+        },
+
+        // Display P3, got primary attributes from
+        // https://www.w3.org/TR/css-color-4/#valdef-color-display-p3
+        // Use equations found in
+        // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html,
+        // Use Bradford method to do D65 to D50 transform.
+        // Get matrices with help of http://www.russellcottrell.com/photo/matrixCalculator.htm
+        // Gamma-linear conversion params is the same as Srgb.
+        {
+            //
+            ColorSpace::DisplayP3,
+            {{
+                //
+                0.5151114, 0.2919612, 0.1571274,  //
+                0.2411865, 0.6922440, 0.0665695,  //
+                -0.0010491, 0.0418832, 0.7842659  //
+            }},
+
+            {{
+                //
+                2.4039872, -0.9898498, -0.3976181,  //
+                -0.8422138, 1.7988188, 0.0160511,   //
+                0.0481937, -0.0973889, 1.2736887    //
+            }},
+
+            // {G, A, B, C, D, E, F, }
+            {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
+
+            {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
+
+            true,
+            false  //
+        }
+        //
+    }};
+}  // anonymous namespace
+
+template <typename Parent>
+class CopyTextureForBrowserTests : public Parent {
+  protected:
+    struct TextureSpec {
+        wgpu::Origin3D copyOrigin = {};
+        wgpu::Extent3D textureSize = {kDefaultTextureWidth, kDefaultTextureHeight};
+        uint32_t level = 0;
+        wgpu::TextureFormat format = kTextureFormat;
+    };
+
+    enum class TextureCopyRole {
+        SOURCE,
+        DEST,
+    };
+
+    // Source texture contains red pixels and dst texture contains green pixels at start.
+    static std::vector<RGBA8> GetTextureData(
+        const utils::TextureDataCopyLayout& layout,
+        TextureCopyRole textureRole,
+        wgpu::AlphaMode srcAlphaMode = wgpu::AlphaMode::Premultiplied,
+        wgpu::AlphaMode dstAlphaMode = wgpu::AlphaMode::Unpremultiplied) {
+        std::array<uint8_t, 4> alpha = {0, 102, 153, 255};  // 0.0, 0.4, 0.6, 1.0
+        std::vector<RGBA8> textureData(layout.texelBlockCount);
+        for (uint32_t layer = 0; layer < layout.mipSize.depthOrArrayLayers; ++layer) {
+            const uint32_t sliceOffset = layout.texelBlocksPerImage * layer;
+            for (uint32_t y = 0; y < layout.mipSize.height; ++y) {
+                const uint32_t rowOffset = layout.texelBlocksPerRow * y;
+                for (uint32_t x = 0; x < layout.mipSize.width; ++x) {
+                    // Source textures will have variable pixel data to cover cases like
+                    // flipY.
+                    if (textureRole == TextureCopyRole::SOURCE) {
+                        if (srcAlphaMode != dstAlphaMode) {
+                            if (dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
+                                // For premultiply alpha test cases, we expect each channel in dst
+                                // texture will equal to the alpha channel value.
+                                ASSERT(srcAlphaMode == wgpu::AlphaMode::Unpremultiplied);
+                                textureData[sliceOffset + rowOffset + x] = RGBA8(
+                                    static_cast<uint8_t>(255), static_cast<uint8_t>(255),
+                                    static_cast<uint8_t>(255), static_cast<uint8_t>(alpha[x % 4]));
+                            } else {
+                                // For unpremultiply alpha test cases, we expect each channel in dst
+                                // texture will equal to 1.0.
+                                ASSERT(srcAlphaMode == wgpu::AlphaMode::Premultiplied);
+                                textureData[sliceOffset + rowOffset + x] =
+                                    RGBA8(static_cast<uint8_t>(alpha[x % 4]),
+                                          static_cast<uint8_t>(alpha[x % 4]),
+                                          static_cast<uint8_t>(alpha[x % 4]),
+                                          static_cast<uint8_t>(alpha[x % 4]));
+                            }
+
+                        } else {
+                            textureData[sliceOffset + rowOffset + x] =
+                                RGBA8(static_cast<uint8_t>((x + layer * x) % 256),
+                                      static_cast<uint8_t>((y + layer * y) % 256),
+                                      static_cast<uint8_t>(x % 256), static_cast<uint8_t>(x % 256));
+                        }
+                    } else {  // Dst textures will have be init as `green` to ensure subrect
+                              // copy not cross bound.
+                        textureData[sliceOffset + rowOffset + x] =
+                            RGBA8(static_cast<uint8_t>(0), static_cast<uint8_t>(255),
+                                  static_cast<uint8_t>(0), static_cast<uint8_t>(255));
+                    }
+                }
+            }
+        }
+
+        return textureData;
+    }
+
+    void SetUp() override {
+        Parent::SetUp();
+        pipeline = MakeTestPipeline();
+
+        uint32_t uniformBufferData[] = {
+            0,  // copy have flipY option
+            4,  // channelCount
+            0,
+            0,  // uvec2, subrect copy src origin
+            0,
+            0,  // uvec2, subrect copy dst origin
+            0,
+            0,  // uvec2, subrect copy size
+            0,  // srcAlphaMode, wgpu::AlphaMode::Premultiplied
+            0   // dstAlphaMode, wgpu::AlphaMode::Premultiplied
+        };
+
+        wgpu::BufferDescriptor uniformBufferDesc = {};
+        uniformBufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
+        uniformBufferDesc.size = sizeof(uniformBufferData);
+        uniformBuffer = this->device.CreateBuffer(&uniformBufferDesc);
+    }
+
+    // Do the bit-by-bit comparison between the source and destination texture with GPU (compute
+    // shader) instead of CPU after executing CopyTextureForBrowser() to avoid the errors caused by
+    // comparing a value generated on CPU to the one generated on GPU.
+    wgpu::ComputePipeline MakeTestPipeline() {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(this->device, R"(
+            struct Uniforms {
+                dstTextureFlipY : u32,
+                channelCount    : u32,
+                srcCopyOrigin   : vec2<u32>,
+                dstCopyOrigin   : vec2<u32>,
+                copySize        : vec2<u32>,
+                srcAlphaMode    : u32,
+                dstAlphaMode    : u32,
+            }
+            struct OutputBuf {
+                result : array<u32>
+            }
+            @group(0) @binding(0) var src : texture_2d<f32>;
+            @group(0) @binding(1) var dst : texture_2d<f32>;
+            @group(0) @binding(2) var<storage, read_write> output : OutputBuf;
+            @group(0) @binding(3) var<uniform> uniforms : Uniforms;
+            fn aboutEqual(value : f32, expect : f32) -> bool {
+                // The value diff should be smaller than the hard coded tolerance.
+                return abs(value - expect) < 0.01;
+            }
+            @stage(compute) @workgroup_size(1, 1, 1)
+            fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+                let srcSize = textureDimensions(src);
+                let dstSize = textureDimensions(dst);
+                let dstTexCoord = vec2<u32>(GlobalInvocationID.xy);
+                let nonCoveredColor = vec4<f32>(0.0, 1.0, 0.0, 1.0); // should be green
+
+                var success : bool = true;
+                if (dstTexCoord.x < uniforms.dstCopyOrigin.x ||
+                    dstTexCoord.y < uniforms.dstCopyOrigin.y ||
+                    dstTexCoord.x >= uniforms.dstCopyOrigin.x + uniforms.copySize.x ||
+                    dstTexCoord.y >= uniforms.dstCopyOrigin.y + uniforms.copySize.y) {
+                    success = success &&
+                              all(textureLoad(dst, vec2<i32>(dstTexCoord), 0) == nonCoveredColor);
+                } else {
+                    // Calculate source texture coord.
+                    var srcTexCoord = dstTexCoord - uniforms.dstCopyOrigin +
+                                                  uniforms.srcCopyOrigin;
+                    // Note that |flipY| equals flip src texture firstly and then do copy from src
+                    // subrect to dst subrect. This helps on blink part to handle some input texture
+                    // which is flipped and need to unpack flip during the copy.
+                    // We need to calculate the expect y coord based on this rule.
+                    if (uniforms.dstTextureFlipY == 1u) {
+                        srcTexCoord.y = u32(srcSize.y) - srcTexCoord.y - 1u;
+                    }
+
+                    var srcColor = textureLoad(src, vec2<i32>(srcTexCoord), 0);
+                    var dstColor = textureLoad(dst, vec2<i32>(dstTexCoord), 0);
+
+                    // Expect the dst texture channels should be all equal to alpha value
+                    // after premultiply.
+                    let premultiplied = 0u;
+                    let unpremultiplied = 1u;
+                    if (uniforms.srcAlphaMode != uniforms.dstAlphaMode) {
+                        if (uniforms.dstAlphaMode == premultiplied) {
+                            // srcAlphaMode == unpremultiplied
+                            srcColor = vec4<f32>(srcColor.rgb * srcColor.a, srcColor.a);
+                        }
+
+                        if (uniforms.dstAlphaMode == unpremultiplied) {
+                            // srcAlphaMode == premultiplied
+                            if (srcColor.a != 0.0) {
+                                srcColor = vec4<f32>(srcColor.rgb / srcColor.a, srcColor.a);
+                            }
+                        }
+                    }
+
+                    // Not use loop and variable index format to workaround
+                    // crbug.com/tint/638.
+                    switch(uniforms.channelCount) {
+                        case 1u: {
+                            success = success && aboutEqual(dstColor.r, srcColor.r);
+                            break;
+                        }
+                        case 2u: {
+                            success = success &&
+                                      aboutEqual(dstColor.r, srcColor.r) &&
+                                      aboutEqual(dstColor.g, srcColor.g);
+                            break;
+                        }
+                        case 4u: {
+                            success = success &&
+                                      aboutEqual(dstColor.r, srcColor.r) &&
+                                      aboutEqual(dstColor.g, srcColor.g) &&
+                                      aboutEqual(dstColor.b, srcColor.b) &&
+                                      aboutEqual(dstColor.a, srcColor.a);
+                            break;
+                        }
+                        default: {
+                            break;
+                        }
+                    }
+                }
+                let outputIndex = GlobalInvocationID.y * u32(dstSize.x) + GlobalInvocationID.x;
+                if (success) {
+                    output.result[outputIndex] = 1u;
+                } else {
+                    output.result[outputIndex] = 0u;
+                }
+            }
+         )");
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.compute.module = csModule;
+        csDesc.compute.entryPoint = "main";
+
+        return this->device.CreateComputePipeline(&csDesc);
+    }
+    static uint32_t GetTextureFormatComponentCount(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGBA32Float:
+                return 4;
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RG32Float:
+                return 2;
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::R32Float:
+                return 1;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    wgpu::Texture CreateTexture(const TextureSpec& spec, wgpu::TextureUsage usage) {
+        // Create and initialize src texture.
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = spec.textureSize;
+        descriptor.format = spec.format;
+        descriptor.mipLevelCount = spec.level + 1;
+        descriptor.usage = usage;
+        wgpu::Texture texture = this->device.CreateTexture(&descriptor);
+        return texture;
+    }
+
+    wgpu::Texture CreateAndInitTexture(const TextureSpec& spec,
+                                       wgpu::TextureUsage usage,
+                                       utils::TextureDataCopyLayout copyLayout,
+                                       void const* init,
+                                       uint32_t initBytes) {
+        wgpu::Texture texture = CreateTexture(spec, usage);
+
+        wgpu::ImageCopyTexture imageTextureInit =
+            utils::CreateImageCopyTexture(texture, spec.level, {0, 0});
+
+        wgpu::TextureDataLayout textureDataLayout;
+        textureDataLayout.offset = 0;
+        textureDataLayout.bytesPerRow = copyLayout.bytesPerRow;
+        textureDataLayout.rowsPerImage = copyLayout.rowsPerImage;
+
+        this->device.GetQueue().WriteTexture(&imageTextureInit, init, initBytes, &textureDataLayout,
+                                             &copyLayout.mipSize);
+        return texture;
+    }
+
+    void RunCopyExternalImageToTexture(const TextureSpec& srcSpec,
+                                       wgpu::Texture srcTexture,
+                                       const TextureSpec& dstSpec,
+                                       wgpu::Texture dstTexture,
+                                       const wgpu::Extent3D& copySize,
+                                       const wgpu::CopyTextureForBrowserOptions options) {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcTexture, srcSpec.level, srcSpec.copyOrigin);
+        wgpu::ImageCopyTexture dstImageCopyTexture =
+            utils::CreateImageCopyTexture(dstTexture, dstSpec.level, dstSpec.copyOrigin);
+        this->device.GetQueue().CopyTextureForBrowser(&srcImageCopyTexture, &dstImageCopyTexture,
+                                                      &copySize, &options);
+    }
+
+    void CheckResultInBuiltInComputePipeline(const TextureSpec& srcSpec,
+                                             wgpu::Texture srcTexture,
+                                             const TextureSpec& dstSpec,
+                                             wgpu::Texture dstTexture,
+                                             const wgpu::Extent3D& copySize,
+                                             const wgpu::CopyTextureForBrowserOptions options) {
+        // Update uniform buffer based on test config
+        uint32_t uniformBufferData[] = {
+            options.flipY,                                   // copy have flipY option
+            GetTextureFormatComponentCount(dstSpec.format),  // channelCount
+            srcSpec.copyOrigin.x,
+            srcSpec.copyOrigin.y,  // src texture copy origin
+            dstSpec.copyOrigin.x,
+            dstSpec.copyOrigin.y,  // dst texture copy origin
+            copySize.width,
+            copySize.height,  // copy size
+            static_cast<uint32_t>(options.srcAlphaMode),
+            static_cast<uint32_t>(options.dstAlphaMode)};
+
+        this->device.GetQueue().WriteBuffer(uniformBuffer, 0, uniformBufferData,
+                                            sizeof(uniformBufferData));
+
+        // Create output buffer to store result
+        wgpu::BufferDescriptor outputDesc;
+        outputDesc.size = dstSpec.textureSize.width * dstSpec.textureSize.height * sizeof(uint32_t);
+        outputDesc.usage =
+            wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer outputBuffer = this->device.CreateBuffer(&outputDesc);
+
+        // Create texture views for test.
+        wgpu::TextureViewDescriptor srcTextureViewDesc = {};
+        srcTextureViewDesc.baseMipLevel = srcSpec.level;
+        wgpu::TextureView srcTextureView = srcTexture.CreateView(&srcTextureViewDesc);
+
+        wgpu::TextureViewDescriptor dstTextureViewDesc = {};
+        dstTextureViewDesc.baseMipLevel = dstSpec.level;
+        wgpu::TextureView dstTextureView = dstTexture.CreateView(&dstTextureViewDesc);
+
+        // Create bind group based on the config.
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            this->device, pipeline.GetBindGroupLayout(0),
+            {{0, srcTextureView}, {1, dstTextureView}, {2, outputBuffer}, {3, uniformBuffer}});
+
+        // Start a pipeline to check pixel value in bit form.
+        wgpu::CommandEncoder testEncoder = this->device.CreateCommandEncoder();
+
+        wgpu::CommandBuffer testCommands;
+        {
+            wgpu::CommandEncoder encoder = this->device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Dispatch(dstSpec.textureSize.width,
+                          dstSpec.textureSize.height);  // Verify dst texture content
+            pass.End();
+
+            testCommands = encoder.Finish();
+        }
+        this->device.GetQueue().Submit(1, &testCommands);
+
+        std::vector<uint32_t> expectResult(dstSpec.textureSize.width * dstSpec.textureSize.height,
+                                           1);
+        EXPECT_BUFFER_U32_RANGE_EQ(expectResult.data(), outputBuffer, 0,
+                                   dstSpec.textureSize.width * dstSpec.textureSize.height);
+    }
+
+    void DoTest(const TextureSpec& srcSpec,
+                const TextureSpec& dstSpec,
+                const wgpu::Extent3D& copySize = {kDefaultTextureWidth, kDefaultTextureHeight},
+                const wgpu::CopyTextureForBrowserOptions options = {}) {
+        // Create and initialize src texture.
+        const utils::TextureDataCopyLayout srcCopyLayout =
+            utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                kTextureFormat,
+                {srcSpec.textureSize.width, srcSpec.textureSize.height,
+                 copySize.depthOrArrayLayers},
+                srcSpec.level);
+
+        std::vector<RGBA8> srcTextureArrayCopyData = GetTextureData(
+            srcCopyLayout, TextureCopyRole::SOURCE, options.srcAlphaMode, options.dstAlphaMode);
+
+        wgpu::TextureUsage srcUsage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                      wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture srcTexture =
+            CreateAndInitTexture(srcSpec, srcUsage, srcCopyLayout, srcTextureArrayCopyData.data(),
+                                 srcTextureArrayCopyData.size() * sizeof(RGBA8));
+
+        bool testSubRectCopy = srcSpec.copyOrigin.x > 0 || srcSpec.copyOrigin.y > 0 ||
+                               dstSpec.copyOrigin.x > 0 || dstSpec.copyOrigin.y > 0 ||
+                               srcSpec.textureSize.width > copySize.width ||
+                               srcSpec.textureSize.height > copySize.height ||
+                               dstSpec.textureSize.width > copySize.width ||
+                               dstSpec.textureSize.height > copySize.height;
+
+        // Create and init dst texture.
+        wgpu::Texture dstTexture;
+        wgpu::TextureUsage dstUsage =
+            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+
+        if (testSubRectCopy) {
+            // For subrect copy tests, dst texture use kTextureFormat always.
+            const utils::TextureDataCopyLayout dstCopyLayout =
+                utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                    kTextureFormat,
+                    {dstSpec.textureSize.width, dstSpec.textureSize.height,
+                     copySize.depthOrArrayLayers},
+                    dstSpec.level);
+
+            const std::vector<RGBA8> dstTextureArrayCopyData =
+                GetTextureData(dstCopyLayout, TextureCopyRole::DEST);
+            dstTexture = CreateAndInitTexture(dstSpec, dstUsage, dstCopyLayout,
+                                              dstTextureArrayCopyData.data(),
+                                              dstTextureArrayCopyData.size() * sizeof(RGBA8));
+        } else {
+            dstTexture = CreateTexture(dstSpec, dstUsage);
+        }
+
+        // Perform the texture to texture copy
+        RunCopyExternalImageToTexture(srcSpec, srcTexture, dstSpec, dstTexture, copySize, options);
+
+        // Check Result
+        CheckResultInBuiltInComputePipeline(srcSpec, srcTexture, dstSpec, dstTexture, copySize,
+                                            options);
+    }
+
+    wgpu::Buffer uniformBuffer;
+    wgpu::ComputePipeline pipeline;
+};
+
+class CopyTextureForBrowser_Basic : public CopyTextureForBrowserTests<DawnTest> {
+  protected:
+    void DoBasicCopyTest(const wgpu::Extent3D& copySize,
+                         const wgpu::CopyTextureForBrowserOptions options = {}) {
+        TextureSpec textureSpec;
+        textureSpec.textureSize = copySize;
+
+        DoTest(textureSpec, textureSpec, copySize, options);
+    }
+};
+
+class CopyTextureForBrowser_Formats
+    : public CopyTextureForBrowserTests<DawnTestWithParams<FormatTestParams>> {
+  protected:
+    bool IsDstFormatSrgbFormats() {
+        return GetParam().mDstFormat == wgpu::TextureFormat::RGBA8UnormSrgb ||
+               GetParam().mDstFormat == wgpu::TextureFormat::BGRA8UnormSrgb;
+    }
+
+    wgpu::TextureFormat GetNonSrgbFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+                return wgpu::TextureFormat::RGBA8Unorm;
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+                return wgpu::TextureFormat::BGRA8Unorm;
+            default:
+                return format;
+        }
+    }
+
+    void DoColorConversionTest() {
+        TextureSpec srcTextureSpec;
+        srcTextureSpec.format = GetParam().mSrcFormat;
+
+        TextureSpec dstTextureSpec;
+        dstTextureSpec.format = GetParam().mDstFormat;
+
+        wgpu::Extent3D copySize = {kDefaultTextureWidth, kDefaultTextureHeight};
+        wgpu::CopyTextureForBrowserOptions options = {};
+
+        // Create and init source texture.
+        // This fixed source texture data is for color conversion tests.
+        // The source data can fill a texture in default width and height.
+        std::vector<RGBA8> srcTextureArrayCopyData{
+            // Take RGBA8Unorm as example:
+            // R channel has different values
+            RGBA8(0, 255, 255, 255),    // r = 0.0
+            RGBA8(102, 255, 255, 255),  // r = 0.4
+            RGBA8(153, 255, 255, 255),  // r = 0.6
+
+            // G channel has different values
+            RGBA8(255, 0, 255, 255),    // g = 0.0
+            RGBA8(255, 102, 255, 255),  // g = 0.4
+            RGBA8(255, 153, 255, 255),  // g = 0.6
+
+            // B channel has different values
+            RGBA8(255, 255, 0, 255),    // b = 0.0
+            RGBA8(255, 255, 102, 255),  // b = 0.4
+            RGBA8(255, 255, 153, 255),  // b = 0.6
+
+            // A channel set to 0
+            RGBA8(255, 255, 255, 0)  // a = 0
+        };
+
+        const utils::TextureDataCopyLayout srcCopyLayout =
+            utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                kTextureFormat,
+                {srcTextureSpec.textureSize.width, srcTextureSpec.textureSize.height,
+                 copySize.depthOrArrayLayers},
+                srcTextureSpec.level);
+
+        wgpu::TextureUsage srcUsage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                      wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture srcTexture = CreateAndInitTexture(
+            srcTextureSpec, srcUsage, srcCopyLayout, srcTextureArrayCopyData.data(),
+            srcTextureArrayCopyData.size() * sizeof(RGBA8));
+
+        // Create dst texture.
+        wgpu::Texture dstTexture = CreateTexture(
+            dstTextureSpec, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+        // Perform the texture to texture copy
+        RunCopyExternalImageToTexture(srcTextureSpec, srcTexture, dstTextureSpec, dstTexture,
+                                      copySize, options);
+
+        wgpu::Texture result;
+        TextureSpec resultSpec = dstTextureSpec;
+
+        // To construct the expected value for the case that dst texture is srgb format,
+        // we need to ensure it is byte level equal to the comparable non-srgb format texture.
+        // We schedule an copy from srgb texture to non-srgb texture which keeps the bytes
+        // same and bypass the sampler to do gamma correction when comparing the expected values
+        // in compute shader.
+        if (IsDstFormatSrgbFormats()) {
+            resultSpec.format = GetNonSrgbFormat(dstTextureSpec.format);
+            wgpu::Texture intermediateTexture = CreateTexture(
+                resultSpec, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+            // Perform the texture to texture copy
+            wgpu::ImageCopyTexture dstImageCopyTexture =
+                utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+            wgpu::ImageCopyTexture intermediateImageCopyTexture =
+                utils::CreateImageCopyTexture(intermediateTexture, 0, {0, 0, 0});
+
+            encoder.CopyTextureToTexture(&dstImageCopyTexture, &intermediateImageCopyTexture,
+                                         &(dstTextureSpec.textureSize));
+            wgpu::CommandBuffer commands = encoder.Finish();
+            queue.Submit(1, &commands);
+
+            result = intermediateTexture;
+        } else {
+            result = dstTexture;
+        }
+
+        // Check Result
+        CheckResultInBuiltInComputePipeline(srcTextureSpec, srcTexture, resultSpec, result,
+                                            copySize, options);
+    }
+};
+
+class CopyTextureForBrowser_SubRects
+    : public CopyTextureForBrowserTests<DawnTestWithParams<SubRectTestParams>> {
+  protected:
+    void DoCopySubRectTest() {
+        TextureSpec srcTextureSpec;
+        srcTextureSpec.copyOrigin = GetParam().mSrcOrigin;
+        srcTextureSpec.textureSize = {6, 7};
+
+        TextureSpec dstTextureSpec;
+        dstTextureSpec.copyOrigin = GetParam().mDstOrigin;
+        dstTextureSpec.textureSize = {8, 5};
+        wgpu::CopyTextureForBrowserOptions options = {};
+        options.flipY = GetParam().mFlipY;
+
+        wgpu::Extent3D copySize = GetParam().mCopySize;
+
+        DoTest(srcTextureSpec, dstTextureSpec, copySize, options);
+    }
+};
+
+class CopyTextureForBrowser_AlphaMode
+    : public CopyTextureForBrowserTests<DawnTestWithParams<AlphaTestParams>> {
+  protected:
+    void DoAlphaModeTest() {
+        constexpr uint32_t kWidth = 10;
+        constexpr uint32_t kHeight = 10;
+
+        TextureSpec textureSpec;
+        textureSpec.textureSize = {kWidth, kHeight};
+
+        wgpu::CopyTextureForBrowserOptions options = {};
+        options.srcAlphaMode = GetParam().mSrcAlphaMode;
+        options.dstAlphaMode = GetParam().mDstAlphaMode;
+
+        DoTest(textureSpec, textureSpec, {kWidth, kHeight}, options);
+    }
+};
+
+class CopyTextureForBrowser_ColorSpace
+    : public CopyTextureForBrowserTests<DawnTestWithParams<ColorSpaceTestParams>> {
+  protected:
+    const ColorSpaceInfo& GetColorSpaceInfo(ColorSpace colorSpace) {
+        uint32_t index = static_cast<uint32_t>(colorSpace);
+        ASSERT(index < ColorSpaceTable.size());
+        ASSERT(ColorSpaceTable[index].index == colorSpace);
+        return ColorSpaceTable[index];
+    }
+
+    std::array<float, 9> GetConversionMatrix(ColorSpace src, ColorSpace dst) {
+        const ColorSpaceInfo& srcColorSpace = GetColorSpaceInfo(src);
+        const ColorSpaceInfo& dstColorSpace = GetColorSpaceInfo(dst);
+
+        const std::array<float, 9> toXYZD50 = srcColorSpace.toXYZD50;
+        const std::array<float, 9> fromXYZD50 = dstColorSpace.fromXYZD50;
+
+        // Fuse the transform matrix. The color space transformation equation is:
+        // Pixels = fromXYZD50 * toXYZD50 * Pixels.
+        // Calculate fromXYZD50 * toXYZD50 to simplify
+        // Add a padding in each row for Mat3x3 in wgsl uniform(mat3x3, Align(16), Size(48)).
+        std::array<float, 9> fuseMatrix = {};
+
+        // Mat3x3 * Mat3x3
+        for (uint32_t row = 0; row < 3; ++row) {
+            for (uint32_t col = 0; col < 3; ++col) {
+                // Transpose the matrix from row major to column major for wgsl.
+                fuseMatrix[col * 3 + row] = fromXYZD50[row * 3 + 0] * toXYZD50[col] +
+                                            fromXYZD50[row * 3 + 1] * toXYZD50[3 + col] +
+                                            fromXYZD50[row * 3 + 2] * toXYZD50[3 * 2 + col];
+            }
+        }
+
+        return fuseMatrix;
+    }
+
+    // TODO(crbug.com/dawn/1140): Generate source data automatically.
+    std::vector<RGBA8> GetSourceData(wgpu::AlphaMode srcTextureAlphaMode) {
+        if (srcTextureAlphaMode == wgpu::AlphaMode::Premultiplied) {
+            return std::vector<RGBA8>{
+                RGBA8(0, 102, 102, 102),  // a = 0.4
+                RGBA8(102, 0, 0, 102),    // a = 0.4
+                RGBA8(153, 0, 0, 153),    // a = 0.6
+                RGBA8(255, 0, 0, 255),    // a = 1.0
+
+                RGBA8(153, 0, 153, 153),  // a = 0.6
+                RGBA8(0, 102, 0, 102),    // a = 0.4
+                RGBA8(0, 153, 0, 153),    // a = 0.6
+                RGBA8(0, 255, 0, 255),    // a = 1.0
+
+                RGBA8(255, 255, 0, 255),  // a = 1.0
+                RGBA8(0, 0, 102, 102),    // a = 0.4
+                RGBA8(0, 0, 153, 153),    // a = 0.6
+                RGBA8(0, 0, 255, 255),    // a = 1.0
+            };
+        }
+
+        return std::vector<RGBA8>{
+            // Take RGBA8Unorm as example:
+            // R channel has different values
+            RGBA8(0, 255, 255, 255),  // r = 0.0
+            RGBA8(102, 0, 0, 255),    // r = 0.4
+            RGBA8(153, 0, 0, 255),    // r = 0.6
+            RGBA8(255, 0, 0, 255),    // r = 1.0
+
+            // G channel has different values
+            RGBA8(255, 0, 255, 255),  // g = 0.0
+            RGBA8(0, 102, 0, 255),    // g = 0.4
+            RGBA8(0, 153, 0, 255),    // g = 0.6
+            RGBA8(0, 255, 0, 255),    // g = 1.0
+
+            // B channel has different values
+            RGBA8(255, 255, 0, 255),  // b = 0.0
+            RGBA8(0, 0, 102, 255),    // b = 0.4
+            RGBA8(0, 0, 153, 255),    // b = 0.6
+            RGBA8(0, 0, 255, 255),    // b = 1.0
+        };
+    }
+
+    // TODO(crbug.com/dawn/1140): Current expected values are from ColorSync utils
+    // tool on Mac. Should implement CPU or compute shader algorithm to do color
+    // conversion and use the result as expected data.
+    std::vector<float> GetExpectedData(ColorSpace srcColorSpace,
+                                       ColorSpace dstColorSpace,
+                                       wgpu::AlphaMode srcTextureAlphaMode,
+                                       wgpu::AlphaMode dstTextureAlphaMode) {
+        if (srcTextureAlphaMode == wgpu::AlphaMode::Premultiplied) {
+            return GetExpectedDataForPremultipliedSource(srcColorSpace, dstColorSpace,
+                                                         dstTextureAlphaMode);
+        }
+
+        return GetExpectedDataForSeperateSource(srcColorSpace, dstColorSpace);
+    }
+
+    std::vector<float> GeneratePremultipliedResult(std::vector<float> result) {
+        // Four channels per pixel
+        for (uint32_t i = 0; i < result.size(); i += 4) {
+            result[i] *= result[i + 3];
+            result[i + 1] *= result[i + 3];
+            result[i + 2] *= result[i + 3];
+        }
+
+        return result;
+    }
+
+    std::vector<float> GetExpectedDataForPremultipliedSource(ColorSpace srcColorSpace,
+                                                             ColorSpace dstColorSpace,
+                                                             wgpu::AlphaMode dstTextureAlphaMode) {
+        if (srcColorSpace == dstColorSpace) {
+            std::vector<float> expected = {
+                0.0, 1.0, 1.0, 0.4,  //
+                1.0, 0.0, 0.0, 0.4,  //
+                1.0, 0.0, 0.0, 0.6,  //
+                1.0, 0.0, 0.0, 1.0,  //
+
+                1.0, 0.0, 1.0, 0.6,  //
+                0.0, 1.0, 0.0, 0.4,  //
+                0.0, 1.0, 0.0, 0.6,  //
+                0.0, 1.0, 0.0, 1.0,  //
+
+                1.0, 1.0, 0.0, 1.0,  //
+                0.0, 0.0, 1.0, 0.4,  //
+                0.0, 0.0, 1.0, 0.6,  //
+                0.0, 0.0, 1.0, 1.0,  //
+            };
+
+            return dstTextureAlphaMode == wgpu::AlphaMode::Premultiplied
+                       ? GeneratePremultipliedResult(expected)
+                       : expected;
+        }
+
+        switch (srcColorSpace) {
+            case ColorSpace::DisplayP3: {
+                switch (dstColorSpace) {
+                    case ColorSpace::SRGB: {
+                        std::vector<float> expected = {
+                            -0.5118, 1.0183,  1.0085,  0.4,  //
+                            1.093,   -0.2267, -0.1501, 0.4,  //
+                            1.093,   -0.2267, -0.1501, 0.6,  //
+                            1.093,   -0.2267, -0.1501, 1.0,  //
+
+                            1.093,   -0.2266, 1.0337,  0.6,  //
+                            -0.5118, 1.0183,  -0.3107, 0.4,  //
+                            -0.5118, 1.0183,  -0.3107, 0.6,  //
+                            -0.5118, 1.0183,  -0.3107, 1.0,  //
+
+                            0.9999,  1.0001,  -0.3462, 1.0,  //
+                            0.0002,  0.0004,  1.0419,  0.4,  //
+                            0.0002,  0.0004,  1.0419,  0.6,  //
+                            0.0002,  0.0004,  1.0419,  1.0,  //
+                        };
+
+                        return dstTextureAlphaMode == wgpu::AlphaMode::Premultiplied
+                                   ? GeneratePremultipliedResult(expected)
+                                   : expected;
+                    }
+                    default:
+                        UNREACHABLE();
+                }
+            }
+            default:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    std::vector<float> GetExpectedDataForSeperateSource(ColorSpace srcColorSpace,
+                                                        ColorSpace dstColorSpace) {
+        if (srcColorSpace == dstColorSpace) {
+            return std::vector<float>{
+                0.0, 1.0, 1.0, 1.0,  //
+                0.4, 0.0, 0.0, 1.0,  //
+                0.6, 0.0, 0.0, 1.0,  //
+                1.0, 0.0, 0.0, 1.0,  //
+
+                1.0, 0.0, 1.0, 1.0,  //
+                0.0, 0.4, 0.0, 1.0,  //
+                0.0, 0.6, 0.0, 1.0,  //
+                0.0, 1.0, 0.0, 1.0,  //
+
+                1.0, 1.0, 0.0, 1.0,  //
+                0.0, 0.0, 0.4, 1.0,  //
+                0.0, 0.0, 0.6, 1.0,  //
+                0.0, 0.0, 1.0, 1.0,  //
+            };
+        }
+
+        switch (srcColorSpace) {
+            case ColorSpace::DisplayP3: {
+                switch (dstColorSpace) {
+                    case ColorSpace::SRGB: {
+                        return std::vector<float>{
+                            -0.5118, 1.0183,  1.0085,  1.0,  //
+                            0.4401,  -0.0665, -0.0337, 1.0,  //
+                            0.6578,  -0.1199, -0.0723, 1.0,  //
+                            1.093,   -0.2267, -0.1501, 1.0,  //
+
+                            1.093,   -0.2266, 1.0337,  1.0,  //
+                            -0.1894, 0.4079,  -0.1027, 1.0,  //
+                            -0.2969, 0.6114,  -0.1720, 1.0,  //
+                            -0.5118, 1.0183,  -0.3107, 1.0,  //
+
+                            0.9999,  1.0001,  -0.3462, 1.0,  //
+                            0.0000,  0.0001,  0.4181,  1.0,  //
+                            0.0001,  0.0001,  0.6260,  1.0,  //
+                            0.0002,  0.0004,  1.0419,  1.0,  //
+                        };
+                    }
+                    default:
+                        UNREACHABLE();
+                }
+            }
+            default:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    void DoColorSpaceConversionTest() {
+        constexpr uint32_t kWidth = 12;
+        constexpr uint32_t kHeight = 1;
+
+        TextureSpec srcTextureSpec;
+        srcTextureSpec.textureSize = {kWidth, kHeight};
+
+        TextureSpec dstTextureSpec;
+        dstTextureSpec.textureSize = {kWidth, kHeight};
+        dstTextureSpec.format = GetParam().mDstFormat;
+
+        ColorSpace srcColorSpace = GetParam().mSrcColorSpace;
+        ColorSpace dstColorSpace = GetParam().mDstColorSpace;
+
+        ColorSpaceInfo srcColorSpaceInfo = GetColorSpaceInfo(srcColorSpace);
+        ColorSpaceInfo dstColorSpaceInfo = GetColorSpaceInfo(dstColorSpace);
+
+        std::array<float, 9> matrix = GetConversionMatrix(srcColorSpace, dstColorSpace);
+
+        wgpu::CopyTextureForBrowserOptions options = {};
+        options.needsColorSpaceConversion = srcColorSpace != dstColorSpace;
+        options.srcAlphaMode = GetParam().mSrcAlphaMode;
+        options.srcTransferFunctionParameters = srcColorSpaceInfo.gammaDecodingParams.data();
+        options.conversionMatrix = matrix.data();
+        options.dstTransferFunctionParameters = dstColorSpaceInfo.gammaEncodingParams.data();
+        options.dstAlphaMode = GetParam().mDstAlphaMode;
+
+        std::vector<RGBA8> sourceTextureData = GetSourceData(options.srcAlphaMode);
+        const wgpu::Extent3D& copySize = {kWidth, kHeight};
+
+        const utils::TextureDataCopyLayout srcCopyLayout =
+            utils::GetTextureDataCopyLayoutForTextureAtLevel(
+                kTextureFormat,
+                {srcTextureSpec.textureSize.width, srcTextureSpec.textureSize.height},
+                srcTextureSpec.level);
+
+        wgpu::TextureUsage srcUsage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                      wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture srcTexture = this->CreateAndInitTexture(
+            srcTextureSpec, srcUsage, srcCopyLayout, sourceTextureData.data(),
+            sourceTextureData.size() * sizeof(RGBA8));
+
+        // Create dst texture.
+        wgpu::Texture dstTexture = this->CreateTexture(
+            dstTextureSpec, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+        // Perform the texture to texture copy
+        this->RunCopyExternalImageToTexture(srcTextureSpec, srcTexture, dstTextureSpec, dstTexture,
+                                            copySize, options);
+
+        std::vector<float> expectedData = GetExpectedData(
+            srcColorSpace, dstColorSpace, options.srcAlphaMode, options.dstAlphaMode);
+
+        // The value provided by Apple's ColorSync Utility.
+        float tolerance = 0.001;
+        if (dstTextureSpec.format == wgpu::TextureFormat::RGBA16Float) {
+            EXPECT_TEXTURE_FLOAT16_EQ(expectedData.data(), dstTexture, {0, 0}, {kWidth, kHeight},
+                                      dstTextureSpec.format, tolerance);
+        } else {
+            EXPECT_TEXTURE_EQ(expectedData.data(), dstTexture, {0, 0}, {kWidth, kHeight},
+                              dstTextureSpec.format, tolerance);
+        }
+    }
+};
+
+// Verify CopyTextureForBrowserTests works with internal pipeline.
+// The case do copy without any transform.
+TEST_P(CopyTextureForBrowser_Basic, PassthroughCopy) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    DoBasicCopyTest({10, 1});
+}
+
+TEST_P(CopyTextureForBrowser_Basic, VerifyCopyOnXDirection) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    DoBasicCopyTest({1000, 1});
+}
+
+TEST_P(CopyTextureForBrowser_Basic, VerifyCopyOnYDirection) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    DoBasicCopyTest({1, 1000});
+}
+
+TEST_P(CopyTextureForBrowser_Basic, VerifyCopyFromLargeTexture) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    // TODO(crbug.com/dawn/1070): Flaky VK_DEVICE_LOST
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsIntel());
+
+    DoBasicCopyTest({899, 999});
+}
+
+TEST_P(CopyTextureForBrowser_Basic, VerifyFlipY) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    wgpu::CopyTextureForBrowserOptions options = {};
+    options.flipY = true;
+
+    DoBasicCopyTest({901, 1001}, options);
+}
+
+TEST_P(CopyTextureForBrowser_Basic, VerifyFlipYInSlimTexture) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    wgpu::CopyTextureForBrowserOptions options = {};
+    options.flipY = true;
+
+    DoBasicCopyTest({1, 1001}, options);
+}
+
+DAWN_INSTANTIATE_TEST(CopyTextureForBrowser_Basic,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+// Verify |CopyTextureForBrowser| doing color conversion correctly when
+// the source texture is RGBA8Unorm format.
+TEST_P(CopyTextureForBrowser_Formats, ColorConversion) {
+    // Skip OpenGLES backend because it fails on using RGBA8Unorm as
+    // source texture format.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    // Skip OpenGL backend on linux because it fails on using *-srgb format as
+    // dst texture format
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux() && IsDstFormatSrgbFormats());
+
+    DoColorConversionTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(
+    CopyTextureForBrowser_Formats,
+    {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+    std::vector<wgpu::TextureFormat>({wgpu::TextureFormat::RGBA8Unorm,
+                                      wgpu::TextureFormat::BGRA8Unorm}),
+    std::vector<wgpu::TextureFormat>(
+        {wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::R16Float, wgpu::TextureFormat::R32Float,
+         wgpu::TextureFormat::RG8Unorm, wgpu::TextureFormat::RG16Float,
+         wgpu::TextureFormat::RG32Float, wgpu::TextureFormat::RGBA8Unorm,
+         wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::BGRA8Unorm,
+         wgpu::TextureFormat::BGRA8UnormSrgb, wgpu::TextureFormat::RGB10A2Unorm,
+         wgpu::TextureFormat::RGBA16Float, wgpu::TextureFormat::RGBA32Float}));
+
+// Verify |CopyTextureForBrowser| doing subrect copy.
+// Source texture is a full red texture and dst texture is a full
+// green texture originally. After the subrect copy, affected part
+// in dst texture should be red and other part should remain green.
+TEST_P(CopyTextureForBrowser_SubRects, CopySubRect) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    // Tests skip due to crbug.com/dawn/592.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsBackendValidationEnabled());
+
+    // Tests skip due to crbug.com/dawn/1104.
+    DAWN_SUPPRESS_TEST_IF(IsWARP());
+
+    DoCopySubRectTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(CopyTextureForBrowser_SubRects,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::Origin3D>({{1, 1}, {1, 2}, {2, 1}}),
+                        std::vector<wgpu::Origin3D>({{1, 1}, {1, 2}, {2, 1}}),
+                        std::vector<wgpu::Extent3D>({{1, 1}, {2, 1}, {1, 2}, {2, 2}}),
+                        std::vector<bool>({true, false}));
+
+// Verify |CopyTextureForBrowser| doing alpha changes.
+// Test srcAlphaMode and dstAlphaMode: Premultiplied, Unpremultiplied.
+TEST_P(CopyTextureForBrowser_AlphaMode, alphaMode) {
+    // Skip OpenGLES backend because it fails on using RGBA8Unorm as
+    // source texture format.
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    // Tests skip due to crbug.com/dawn/1104.
+    DAWN_SUPPRESS_TEST_IF(IsWARP());
+
+    DoAlphaModeTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(CopyTextureForBrowser_AlphaMode,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::AlphaMode>({wgpu::AlphaMode::Premultiplied,
+                                                      wgpu::AlphaMode::Unpremultiplied}),
+                        std::vector<wgpu::AlphaMode>({wgpu::AlphaMode::Premultiplied,
+                                                      wgpu::AlphaMode::Unpremultiplied}));
+
+// Verify |CopyTextureForBrowser| doing color space conversion.
+TEST_P(CopyTextureForBrowser_ColorSpace, colorSpaceConversion) {
+    // TODO(crbug.com/dawn/1232): Program link error on OpenGLES backend
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() && IsLinux());
+
+    // Tests skip due to crbug.com/dawn/1104.
+    DAWN_SUPPRESS_TEST_IF(IsWARP());
+
+    DoColorSpaceConversionTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(CopyTextureForBrowser_ColorSpace,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>({wgpu::TextureFormat::RGBA16Float,
+                                                          wgpu::TextureFormat::RGBA32Float}),
+                        std::vector<ColorSpace>({ColorSpace::SRGB, ColorSpace::DisplayP3}),
+                        std::vector<ColorSpace>({ColorSpace::SRGB}),
+                        std::vector<wgpu::AlphaMode>({wgpu::AlphaMode::Premultiplied,
+                                                      wgpu::AlphaMode::Unpremultiplied}),
+                        std::vector<wgpu::AlphaMode>({wgpu::AlphaMode::Premultiplied,
+                                                      wgpu::AlphaMode::Unpremultiplied}));
diff --git a/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp b/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
new file mode 100644
index 0000000..4c6d742
--- /dev/null
+++ b/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
@@ -0,0 +1,965 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    struct CreatePipelineAsyncTask {
+        wgpu::ComputePipeline computePipeline = nullptr;
+        wgpu::RenderPipeline renderPipeline = nullptr;
+        bool isCompleted = false;
+        std::string message;
+    };
+}  // anonymous namespace
+
+class CreatePipelineAsyncTest : public DawnTest {
+  protected:
+    void ValidateCreateComputePipelineAsync(CreatePipelineAsyncTask* currentTask) {
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = sizeof(uint32_t);
+        bufferDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+        wgpu::Buffer ssbo = device.CreateBuffer(&bufferDesc);
+
+        wgpu::CommandBuffer commands;
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+            while (!currentTask->isCompleted) {
+                WaitABit();
+            }
+            ASSERT_TRUE(currentTask->message.empty());
+            ASSERT_NE(nullptr, currentTask->computePipeline.Get());
+            wgpu::BindGroup bindGroup =
+                utils::MakeBindGroup(device, currentTask->computePipeline.GetBindGroupLayout(0),
+                                     {
+                                         {0, ssbo, 0, sizeof(uint32_t)},
+                                     });
+            pass.SetBindGroup(0, bindGroup);
+            pass.SetPipeline(currentTask->computePipeline);
+
+            pass.Dispatch(1);
+            pass.End();
+
+            commands = encoder.Finish();
+        }
+
+        queue.Submit(1, &commands);
+
+        constexpr uint32_t kExpected = 1u;
+        EXPECT_BUFFER_U32_EQ(kExpected, ssbo, 0);
+    }
+
+    void ValidateCreateComputePipelineAsync() {
+        ValidateCreateComputePipelineAsync(&task);
+    }
+
+    void ValidateCreateRenderPipelineAsync(CreatePipelineAsyncTask* currentTask) {
+        constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::TextureDescriptor textureDescriptor;
+        textureDescriptor.size = {1, 1, 1};
+        textureDescriptor.format = kRenderAttachmentFormat;
+        textureDescriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture outputTexture = device.CreateTexture(&textureDescriptor);
+
+        utils::ComboRenderPassDescriptor renderPassDescriptor({outputTexture.CreateView()});
+        renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        renderPassDescriptor.cColorAttachments[0].clearValue = {1.f, 0.f, 0.f, 1.f};
+
+        wgpu::CommandBuffer commands;
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(&renderPassDescriptor);
+
+            while (!currentTask->isCompleted) {
+                WaitABit();
+            }
+            ASSERT_TRUE(currentTask->message.empty());
+            ASSERT_NE(nullptr, currentTask->renderPipeline.Get());
+
+            renderPassEncoder.SetPipeline(currentTask->renderPipeline);
+            renderPassEncoder.Draw(1);
+            renderPassEncoder.End();
+            commands = encoder.Finish();
+        }
+
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), outputTexture, 0, 0);
+    }
+
+    void ValidateCreateRenderPipelineAsync() {
+        ValidateCreateRenderPipelineAsync(&task);
+    }
+
+    void DoCreateRenderPipelineAsync(
+        const utils::ComboRenderPipelineDescriptor& renderPipelineDescriptor) {
+        device.CreateRenderPipelineAsync(
+            &renderPipelineDescriptor,
+            [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline returnPipeline,
+               const char* message, void* userdata) {
+                EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success,
+                          status);
+
+                CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+                task->renderPipeline = wgpu::RenderPipeline::Acquire(returnPipeline);
+                task->isCompleted = true;
+                task->message = message;
+            },
+            &task);
+    }
+
+    CreatePipelineAsyncTask task;
+};
+
+// Verify the basic use of CreateComputePipelineAsync works on all backends.
+TEST_P(CreatePipelineAsyncTest, BasicUseOfCreateComputePipelineAsync) {
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct SSBO {
+            value : u32
+        }
+        @group(0) @binding(0) var<storage, read_write> ssbo : SSBO;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            ssbo.value = 1u;
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    device.CreateComputePipelineAsync(
+        &csDesc,
+        [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success, status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->computePipeline = wgpu::ComputePipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+
+    ValidateCreateComputePipelineAsync();
+}
+
+// This is a regression test for a bug on the member "entryPoint" of FlatComputePipelineDescriptor.
+TEST_P(CreatePipelineAsyncTest, ReleaseEntryPointAfterCreatComputePipelineAsync) {
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct SSBO {
+            value : u32
+        }
+        @group(0) @binding(0) var<storage, read_write> ssbo : SSBO;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            ssbo.value = 1u;
+        })");
+
+    std::string entryPoint = "main";
+
+    csDesc.compute.entryPoint = entryPoint.c_str();
+
+    device.CreateComputePipelineAsync(
+        &csDesc,
+        [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success, status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->computePipeline = wgpu::ComputePipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+
+    entryPoint = "";
+    ValidateCreateComputePipelineAsync();
+}
+
+// Verify CreateComputePipelineAsync() works as expected when there is any error that happens during
+// the creation of the compute pipeline. The SPEC requires that during the call of
+// CreateComputePipelineAsync() any error won't be forwarded to the error scope / unhandled error
+// callback.
+TEST_P(CreatePipelineAsyncTest, CreateComputePipelineFailed) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct SSBO {
+            value : u32
+        }
+        @group(0) @binding(0) var<storage, read_write> ssbo : SSBO;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            ssbo.value = 1u;
+        })");
+    csDesc.compute.entryPoint = "main0";
+
+    device.CreateComputePipelineAsync(
+        &csDesc,
+        [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Error, status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->computePipeline = wgpu::ComputePipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+
+    while (!task.isCompleted) {
+        WaitABit();
+    }
+
+    ASSERT_FALSE(task.message.empty());
+    ASSERT_EQ(nullptr, task.computePipeline.Get());
+}
+
+// Verify the basic use of CreateRenderPipelineAsync() works on all backends.
+TEST_P(CreatePipelineAsyncTest, BasicUseOfCreateRenderPipelineAsync) {
+    constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    renderPipelineDescriptor.vertex.module = vsModule;
+    renderPipelineDescriptor.cFragment.module = fsModule;
+    renderPipelineDescriptor.cTargets[0].format = kRenderAttachmentFormat;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+    DoCreateRenderPipelineAsync(renderPipelineDescriptor);
+
+    ValidateCreateRenderPipelineAsync();
+}
+
+// Verify the render pipeline created with CreateRenderPipelineAsync() still works when the entry
+// points are released after the creation of the render pipeline.
+TEST_P(CreatePipelineAsyncTest, ReleaseEntryPointsAfterCreateRenderPipelineAsync) {
+    constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    renderPipelineDescriptor.vertex.module = vsModule;
+    renderPipelineDescriptor.cFragment.module = fsModule;
+    renderPipelineDescriptor.cTargets[0].format = kRenderAttachmentFormat;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+    std::string vertexEntryPoint = "main";
+    std::string fragmentEntryPoint = "main";
+    renderPipelineDescriptor.vertex.entryPoint = vertexEntryPoint.c_str();
+    renderPipelineDescriptor.cFragment.entryPoint = fragmentEntryPoint.c_str();
+
+    DoCreateRenderPipelineAsync(renderPipelineDescriptor);
+
+    vertexEntryPoint = "";
+    fragmentEntryPoint = "";
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.size = {1, 1, 1};
+    textureDescriptor.format = kRenderAttachmentFormat;
+    textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture outputTexture = device.CreateTexture(&textureDescriptor);
+
+    utils::ComboRenderPassDescriptor renderPassDescriptor({outputTexture.CreateView()});
+    renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPassDescriptor.cColorAttachments[0].clearValue = {1.f, 0.f, 0.f, 1.f};
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
+
+        while (!task.isCompleted) {
+            WaitABit();
+        }
+        ASSERT_TRUE(task.message.empty());
+        ASSERT_NE(nullptr, task.renderPipeline.Get());
+
+        renderPassEncoder.SetPipeline(task.renderPipeline);
+        renderPassEncoder.Draw(1);
+        renderPassEncoder.End();
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), outputTexture, 0, 0);
+}
+
+// Verify CreateRenderPipelineAsync() works as expected when there is any error that happens during
+// the creation of the render pipeline. The SPEC requires that during the call of
+// CreateRenderPipelineAsync() any error won't be forwarded to the error scope / unhandled error
+// callback.
+TEST_P(CreatePipelineAsyncTest, CreateRenderPipelineFailed) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::Depth32Float;
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    renderPipelineDescriptor.vertex.module = vsModule;
+    renderPipelineDescriptor.cFragment.module = fsModule;
+    renderPipelineDescriptor.cTargets[0].format = kRenderAttachmentFormat;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+    device.CreateRenderPipelineAsync(
+        &renderPipelineDescriptor,
+        [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Error, status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->renderPipeline = wgpu::RenderPipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+
+    while (!task.isCompleted) {
+        WaitABit();
+    }
+
+    ASSERT_FALSE(task.message.empty());
+    ASSERT_EQ(nullptr, task.computePipeline.Get());
+}
+
+// Verify there is no error when the device is released before the callback of
+// CreateComputePipelineAsync() is called.
+TEST_P(CreatePipelineAsyncTest, ReleaseDeviceBeforeCallbackOfCreateComputePipelineAsync) {
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1) fn main() {
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    device.CreateComputePipelineAsync(
+        &csDesc,
+        [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_DeviceDestroyed,
+                      status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->computePipeline = wgpu::ComputePipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+}
+
+// Verify there is no error when the device is released before the callback of
+// CreateRenderPipelineAsync() is called.
+TEST_P(CreatePipelineAsyncTest, ReleaseDeviceBeforeCallbackOfCreateRenderPipelineAsync) {
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    renderPipelineDescriptor.vertex.module = vsModule;
+    renderPipelineDescriptor.cFragment.module = fsModule;
+    renderPipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+    device.CreateRenderPipelineAsync(
+        &renderPipelineDescriptor,
+        [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_DeviceDestroyed,
+                      status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->renderPipeline = wgpu::RenderPipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+}
+
+// Verify there is no error when the device is destroyed before the callback of
+// CreateComputePipelineAsync() is called.
+TEST_P(CreatePipelineAsyncTest, DestroyDeviceBeforeCallbackOfCreateComputePipelineAsync) {
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1) fn main() {
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    device.CreateComputePipelineAsync(
+        &csDesc,
+        [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_DeviceDestroyed,
+                      status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->computePipeline = wgpu::ComputePipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+    ExpectDeviceDestruction();
+    device.Destroy();
+}
+
+// Verify there is no error when the device is destroyed before the callback of
+// CreateRenderPipelineAsync() is called.
+TEST_P(CreatePipelineAsyncTest, DestroyDeviceBeforeCallbackOfCreateRenderPipelineAsync) {
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    renderPipelineDescriptor.vertex.module = vsModule;
+    renderPipelineDescriptor.cFragment.module = fsModule;
+    renderPipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+    device.CreateRenderPipelineAsync(
+        &renderPipelineDescriptor,
+        [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_DeviceDestroyed,
+                      status);
+
+            CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+            task->renderPipeline = wgpu::RenderPipeline::Acquire(returnPipeline);
+            task->isCompleted = true;
+            task->message = message;
+        },
+        &task);
+    ExpectDeviceDestruction();
+    device.Destroy();
+}
+
+// Verify the code path of CreateComputePipelineAsync() to directly return the compute pipeline
+// object from cache works correctly.
+TEST_P(CreatePipelineAsyncTest, CreateSameComputePipelineTwice) {
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct SSBO {
+            value : u32
+        }
+        @group(0) @binding(0) var<storage, read_write> ssbo : SSBO;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            ssbo.value = 1u;
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+                       const char* message, void* userdata) {
+        EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success, status);
+
+        CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+        task->computePipeline = wgpu::ComputePipeline::Acquire(returnPipeline);
+        task->isCompleted = true;
+        task->message = message;
+    };
+
+    // Create a pipeline object and save it into anotherTask.computePipeline.
+    CreatePipelineAsyncTask anotherTask;
+    device.CreateComputePipelineAsync(&csDesc, callback, &anotherTask);
+    while (!anotherTask.isCompleted) {
+        WaitABit();
+    }
+    ASSERT_TRUE(anotherTask.message.empty());
+    ASSERT_NE(nullptr, anotherTask.computePipeline.Get());
+
+    // Create another pipeline object task.comnputepipeline with the same compute pipeline
+    // descriptor used in the creation of anotherTask.computePipeline. This time the pipeline
+    // object should be directly got from the pipeline object cache.
+    device.CreateComputePipelineAsync(&csDesc, callback, &task);
+    ValidateCreateComputePipelineAsync();
+}
+
+// Verify creating compute pipeline with same descriptor and CreateComputePipelineAsync() at the
+// same time works correctly.
+TEST_P(CreatePipelineAsyncTest, CreateSameComputePipelineTwiceAtSameTime) {
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.buffer.type = wgpu::BufferBindingType::Storage;
+    binding.visibility = wgpu::ShaderStage::Compute;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&desc);
+
+    wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
+    pipelineLayoutDesc.bindGroupLayoutCount = 1;
+    pipelineLayoutDesc.bindGroupLayouts = &bindGroupLayout;
+
+    wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.layout = pipelineLayout;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        struct SSBO {
+            value : u32
+        }
+        @group(0) @binding(0) var<storage, read_write> ssbo : SSBO;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            ssbo.value = 1u;
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+                       const char* message, void* userdata) {
+        EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success, status);
+
+        CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+        task->computePipeline = wgpu::ComputePipeline::Acquire(returnPipeline);
+        task->isCompleted = true;
+        task->message = message;
+    };
+
+    // Create two pipeline objects with same descriptor.
+    CreatePipelineAsyncTask anotherTask;
+    device.CreateComputePipelineAsync(&csDesc, callback, &task);
+    device.CreateComputePipelineAsync(&csDesc, callback, &anotherTask);
+
+    // Verify both task.computePipeline and anotherTask.computePipeline are created correctly.
+    ValidateCreateComputePipelineAsync(&anotherTask);
+    ValidateCreateComputePipelineAsync(&task);
+
+    // Verify task.computePipeline and anotherTask.computePipeline are pointing to the same Dawn
+    // object.
+    if (!UsesWire()) {
+        EXPECT_EQ(task.computePipeline.Get(), anotherTask.computePipeline.Get());
+    }
+}
+
+// Verify the basic use of CreateRenderPipelineAsync() works on all backends.
+TEST_P(CreatePipelineAsyncTest, CreateSameRenderPipelineTwiceAtSameTime) {
+    constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    renderPipelineDescriptor.vertex.module = vsModule;
+    renderPipelineDescriptor.cFragment.module = fsModule;
+    renderPipelineDescriptor.cTargets[0].format = kRenderAttachmentFormat;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+    auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline returnPipeline,
+                       const char* message, void* userdata) {
+        EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success, status);
+
+        CreatePipelineAsyncTask* task = static_cast<CreatePipelineAsyncTask*>(userdata);
+        task->renderPipeline = wgpu::RenderPipeline::Acquire(returnPipeline);
+        task->isCompleted = true;
+        task->message = message;
+    };
+
+    // Create two render pipelines with same descriptor.
+    CreatePipelineAsyncTask anotherTask;
+    device.CreateRenderPipelineAsync(&renderPipelineDescriptor, callback, &task);
+    device.CreateRenderPipelineAsync(&renderPipelineDescriptor, callback, &anotherTask);
+
+    // Verify task.renderPipeline and anotherTask.renderPipeline are both created correctly.
+    ValidateCreateRenderPipelineAsync(&task);
+    ValidateCreateRenderPipelineAsync(&anotherTask);
+
+    // Verify task.renderPipeline and anotherTask.renderPipeline are pointing to the same Dawn
+    // object.
+    if (!UsesWire()) {
+        EXPECT_EQ(task.renderPipeline.Get(), anotherTask.renderPipeline.Get());
+    }
+}
+
+// Verify calling CreateRenderPipelineAsync() with valid VertexBufferLayouts works on all backends.
+TEST_P(CreatePipelineAsyncTest, CreateRenderPipelineAsyncWithVertexBufferLayouts) {
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.size = {1, 1, 1};
+    textureDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture renderTarget = device.CreateTexture(&textureDescriptor);
+    wgpu::TextureView renderTargetView = renderTarget.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderTargetView});
+    {
+        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+        renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        struct VertexInput {
+            @location(0) input0: u32,
+            @location(1) input1: u32,
+        }
+
+        struct VertexOutput {
+            @location(0) vertexColorOut: vec4<f32>,
+            @builtin(position) position: vec4<f32>,
+        }
+
+        @stage(vertex)
+        fn main(vertexInput : VertexInput) -> VertexOutput {
+            var vertexOutput : VertexOutput;
+            vertexOutput.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            if (vertexInput.input0 == 1u && vertexInput.input1 == 2u) {
+                vertexOutput.vertexColorOut = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            } else {
+                vertexOutput.vertexColorOut = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+            return vertexOutput;
+        })");
+        renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        @stage(fragment)
+        fn main(@location(0) fragColorIn : vec4<f32>) -> @location(0) vec4<f32> {
+            return fragColorIn;
+        })");
+
+        renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        renderPipelineDescriptor.cFragment.targetCount = 1;
+        renderPipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        // Create a render pipeline with two VertexBufferLayouts
+        renderPipelineDescriptor.vertex.buffers = renderPipelineDescriptor.cBuffers.data();
+        renderPipelineDescriptor.vertex.bufferCount = 2;
+        renderPipelineDescriptor.cBuffers[0].attributeCount = 1;
+        renderPipelineDescriptor.cBuffers[0].attributes = &renderPipelineDescriptor.cAttributes[0];
+        renderPipelineDescriptor.cAttributes[0].format = wgpu::VertexFormat::Uint32;
+        renderPipelineDescriptor.cAttributes[0].shaderLocation = 0;
+        renderPipelineDescriptor.cBuffers[1].attributeCount = 1;
+        renderPipelineDescriptor.cBuffers[1].attributes = &renderPipelineDescriptor.cAttributes[1];
+        renderPipelineDescriptor.cAttributes[1].format = wgpu::VertexFormat::Uint32;
+        renderPipelineDescriptor.cAttributes[1].shaderLocation = 1;
+
+        DoCreateRenderPipelineAsync(renderPipelineDescriptor);
+    }
+
+    wgpu::Buffer vertexBuffer1 = utils::CreateBufferFromData(
+        device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Vertex, {1u});
+    wgpu::Buffer vertexBuffer2 = utils::CreateBufferFromData(
+        device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Vertex, {2u});
+
+    // Do the draw call with the render pipeline
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+
+        while (!task.isCompleted) {
+            WaitABit();
+        }
+        ASSERT_TRUE(task.message.empty());
+        ASSERT_NE(nullptr, task.renderPipeline.Get());
+        pass.SetPipeline(task.renderPipeline);
+
+        pass.SetVertexBuffer(0, vertexBuffer1);
+        pass.SetVertexBuffer(1, vertexBuffer2);
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // The color attachment will have the expected color when the vertex attribute values are
+    // fetched correctly.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), renderTarget, 0, 0);
+}
+
+// Verify calling CreateRenderPipelineAsync() with valid depthStencilState works on all backends.
+TEST_P(CreatePipelineAsyncTest, CreateRenderPipelineAsyncWithDepthStencilState) {
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.size = {1, 1, 1};
+    textureDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture renderTarget = device.CreateTexture(&textureDescriptor);
+    wgpu::TextureView renderTargetView = renderTarget.CreateView();
+
+    textureDescriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+    wgpu::Texture depthStencilTarget = device.CreateTexture(&textureDescriptor);
+    wgpu::TextureView depthStencilView = depthStencilTarget.CreateView();
+
+    // Clear the color attachment to green and the stencil aspect of the depth stencil attachment
+    // to 0.
+    utils::ComboRenderPassDescriptor renderPass({renderTargetView}, depthStencilView);
+    renderPass.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPass.cColorAttachments[0].clearValue = {0.0, 1.0, 0.0, 1.0};
+    renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+    renderPass.cDepthStencilAttachmentInfo.stencilClearValue = 0u;
+
+    wgpu::RenderPipeline pipeline;
+    {
+        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+        renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+        renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        @stage(fragment)
+        fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+        })");
+
+        renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        renderPipelineDescriptor.cFragment.targetCount = 1;
+        renderPipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        // Create a render pipeline with stencil compare function "Equal".
+        renderPipelineDescriptor.depthStencil = &renderPipelineDescriptor.cDepthStencil;
+        renderPipelineDescriptor.cDepthStencil.stencilFront.compare = wgpu::CompareFunction::Equal;
+
+        DoCreateRenderPipelineAsync(renderPipelineDescriptor);
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+
+        while (!task.isCompleted) {
+            WaitABit();
+        }
+        ASSERT_TRUE(task.message.empty());
+        ASSERT_NE(nullptr, task.renderPipeline.Get());
+        pass.SetPipeline(task.renderPipeline);
+
+        // The stencil reference is set to 1, so there should be no pixel that can pass the stencil
+        // test.
+        pass.SetStencilReference(1);
+
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // The color in the color attachment should not be changed after the draw call as no pixel can
+    // pass the stencil test.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), renderTarget, 0, 0);
+}
+
+// Verify calling CreateRenderPipelineAsync() with multisample.Count > 1 works on all backends.
+TEST_P(CreatePipelineAsyncTest, CreateRenderPipelineWithMultisampleState) {
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.size = {1, 1, 1};
+    textureDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture resolveTarget = device.CreateTexture(&textureDescriptor);
+    wgpu::TextureView resolveTargetView = resolveTarget.CreateView();
+
+    textureDescriptor.sampleCount = 4;
+    wgpu::Texture renderTarget = device.CreateTexture(&textureDescriptor);
+    wgpu::TextureView renderTargetView = renderTarget.CreateView();
+
+    // Set the multi-sampled render target, its resolve target to render pass and clear color to
+    // (1, 0, 0, 1).
+    utils::ComboRenderPassDescriptor renderPass({renderTargetView});
+    renderPass.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPass.cColorAttachments[0].clearValue = {1.0, 0.0, 0.0, 1.0};
+    renderPass.cColorAttachments[0].resolveTarget = resolveTargetView;
+
+    wgpu::RenderPipeline pipeline;
+    {
+        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+        renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+        renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        @stage(fragment)
+        fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+
+        renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        renderPipelineDescriptor.cFragment.targetCount = 1;
+        renderPipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        // Create a render pipeline with multisample.count == 4.
+        renderPipelineDescriptor.multisample.count = 4;
+
+        DoCreateRenderPipelineAsync(renderPipelineDescriptor);
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+
+        while (!task.isCompleted) {
+            WaitABit();
+        }
+        ASSERT_TRUE(task.message.empty());
+        ASSERT_NE(nullptr, task.renderPipeline.Get());
+        pass.SetPipeline(task.renderPipeline);
+
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // The color in resolveTarget should be the expected color (0, 1, 0, 1).
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), resolveTarget, 0, 0);
+}
+
+// Verify calling CreateRenderPipelineAsync() with valid BlendState works on all backends.
+TEST_P(CreatePipelineAsyncTest, CreateRenderPipelineAsyncWithBlendState) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_indexed_draw_buffers"));
+
+    std::array<wgpu::Texture, 2> renderTargets;
+    std::array<wgpu::TextureView, 2> renderTargetViews;
+
+    {
+        wgpu::TextureDescriptor textureDescriptor;
+        textureDescriptor.size = {1, 1, 1};
+        textureDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        textureDescriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+
+        for (uint32_t i = 0; i < renderTargets.size(); ++i) {
+            renderTargets[i] = device.CreateTexture(&textureDescriptor);
+            renderTargetViews[i] = renderTargets[i].CreateView();
+        }
+    }
+
+    // Prepare two color attachments
+    utils::ComboRenderPassDescriptor renderPass({renderTargetViews[0], renderTargetViews[1]});
+    renderPass.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPass.cColorAttachments[0].clearValue = {0.2, 0.0, 0.0, 0.2};
+    renderPass.cColorAttachments[1].loadOp = wgpu::LoadOp::Clear;
+    renderPass.cColorAttachments[1].clearValue = {0.0, 0.2, 0.0, 0.2};
+
+    {
+        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+        renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+        renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+         struct FragmentOut {
+            @location(0) fragColor0 : vec4<f32>,
+            @location(1) fragColor1 : vec4<f32>,
+        }
+
+        @stage(fragment) fn main() -> FragmentOut {
+            var output : FragmentOut;
+            output.fragColor0 = vec4<f32>(0.4, 0.0, 0.0, 0.4);
+            output.fragColor1 = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            return output;
+        })");
+
+        renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+        // Create a render pipeline with blending states
+        renderPipelineDescriptor.cFragment.targetCount = renderTargets.size();
+
+        // The blend operation for the first render target is "add".
+        wgpu::BlendComponent blendComponent0;
+        blendComponent0.operation = wgpu::BlendOperation::Add;
+        blendComponent0.srcFactor = wgpu::BlendFactor::One;
+        blendComponent0.dstFactor = wgpu::BlendFactor::One;
+
+        wgpu::BlendState blend0;
+        blend0.color = blendComponent0;
+        blend0.alpha = blendComponent0;
+
+        // The blend operation for the first render target is "subtract".
+        wgpu::BlendComponent blendComponent1;
+        blendComponent1.operation = wgpu::BlendOperation::Subtract;
+        blendComponent1.srcFactor = wgpu::BlendFactor::One;
+        blendComponent1.dstFactor = wgpu::BlendFactor::One;
+
+        wgpu::BlendState blend1;
+        blend1.color = blendComponent1;
+        blend1.alpha = blendComponent1;
+
+        renderPipelineDescriptor.cTargets[0].blend = &blend0;
+        renderPipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+        renderPipelineDescriptor.cTargets[1].blend = &blend1;
+        renderPipelineDescriptor.cTargets[1].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        DoCreateRenderPipelineAsync(renderPipelineDescriptor);
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+
+        while (!task.isCompleted) {
+            WaitABit();
+        }
+        ASSERT_TRUE(task.message.empty());
+        ASSERT_NE(nullptr, task.renderPipeline.Get());
+        pass.SetPipeline(task.renderPipeline);
+
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // When the blend states are all set correctly, the color of renderTargets[0] should be
+    // (0.6, 0, 0, 0.6) = colorAttachment0.clearValue + (0.4, 0.0, 0.0, 0.4), and the color of
+    // renderTargets[1] should be (0.8, 0, 0, 0.8) = (1, 0, 0, 1) - colorAttachment1.clearValue.
+    RGBA8 expected0 = {153, 0, 0, 153};
+    RGBA8 expected1 = {0, 204, 0, 204};
+    EXPECT_PIXEL_RGBA8_EQ(expected0, renderTargets[0], 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(expected1, renderTargets[1], 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(CreatePipelineAsyncTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/CullingTests.cpp b/src/dawn/tests/end2end/CullingTests.cpp
new file mode 100644
index 0000000..d37a70b
--- /dev/null
+++ b/src/dawn/tests/end2end/CullingTests.cpp
@@ -0,0 +1,133 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class CullingTest : public DawnTest {
+  protected:
+    wgpu::RenderPipeline CreatePipelineForTest(wgpu::FrontFace frontFace, wgpu::CullMode cullMode) {
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+        // Draw two triangles with different winding orders:
+        // 1. The top-left one is counterclockwise (CCW)
+        // 2. The bottom-right one is clockwise (CW)
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>(-1.0,  0.0),
+                    vec2<f32>( 0.0,  1.0),
+                    vec2<f32>( 0.0, -1.0),
+                    vec2<f32>( 1.0,  0.0),
+                    vec2<f32>( 1.0, -1.0));
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        // FragCoord of pixel(x, y) in framebuffer coordinate is (x + 0.5, y + 0.5). And we use
+        // RGBA8 format for the back buffer. So (FragCoord.xy - vec2(0.5)) / 255 in shader code
+        // will make the pixel's R and G channels exactly equal to the pixel's x and y coordinates.
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @stage(fragment)
+            fn main(@builtin(position) FragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+                return vec4<f32>(
+                    (FragCoord.xy - vec2<f32>(0.5, 0.5)) / vec2<f32>(255.0, 255.0),
+                    0.0, 1.0);
+            })");
+
+        // Set culling mode and front face according to the parameters
+        pipelineDescriptor.primitive.frontFace = frontFace;
+        pipelineDescriptor.primitive.cullMode = cullMode;
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::Texture Create2DTextureForTest(wgpu::TextureFormat format) {
+        wgpu::TextureDescriptor textureDescriptor;
+        textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+        textureDescriptor.format = format;
+        textureDescriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        textureDescriptor.mipLevelCount = 1;
+        textureDescriptor.sampleCount = 1;
+        textureDescriptor.size = {kSize, kSize, 1};
+        return device.CreateTexture(&textureDescriptor);
+    }
+
+    void DoTest(wgpu::FrontFace frontFace,
+                wgpu::CullMode cullMode,
+                bool isCCWTriangleCulled,
+                bool isCWTriangleCulled) {
+        wgpu::Texture colorTexture = Create2DTextureForTest(wgpu::TextureFormat::RGBA8Unorm);
+
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorTexture.CreateView()});
+        renderPassDescriptor.cColorAttachments[0].clearValue = {0.0, 0.0, 1.0, 1.0};
+        renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = commandEncoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(CreatePipelineForTest(frontFace, cullMode));
+        renderPass.Draw(6);
+        renderPass.End();
+        wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        const RGBA8 kBackgroundColor = RGBA8::kBlue;
+        const RGBA8 kTopLeftColor = RGBA8::kBlack;
+        constexpr RGBA8 kBottomRightColor = RGBA8(3, 3, 0, 255);
+
+        RGBA8 kCCWTriangleTopLeftColor = isCCWTriangleCulled ? kBackgroundColor : kTopLeftColor;
+        EXPECT_PIXEL_RGBA8_EQ(kCCWTriangleTopLeftColor, colorTexture, 0, 0);
+
+        RGBA8 kCWTriangleBottomRightColor =
+            isCWTriangleCulled ? kBackgroundColor : kBottomRightColor;
+        EXPECT_PIXEL_RGBA8_EQ(kCWTriangleBottomRightColor, colorTexture, kSize - 1, kSize - 1);
+    }
+
+    static constexpr uint32_t kSize = 4;
+};
+
+TEST_P(CullingTest, CullNoneWhenCCWIsFrontFace) {
+    DoTest(wgpu::FrontFace::CCW, wgpu::CullMode::None, false, false);
+}
+
+TEST_P(CullingTest, CullFrontFaceWhenCCWIsFrontFace) {
+    DoTest(wgpu::FrontFace::CCW, wgpu::CullMode::Front, true, false);
+}
+
+TEST_P(CullingTest, CullBackFaceWhenCCWIsFrontFace) {
+    DoTest(wgpu::FrontFace::CCW, wgpu::CullMode::Back, false, true);
+}
+
+TEST_P(CullingTest, CullNoneWhenCWIsFrontFace) {
+    DoTest(wgpu::FrontFace::CW, wgpu::CullMode::None, false, false);
+}
+
+TEST_P(CullingTest, CullFrontFaceWhenCWIsFrontFace) {
+    DoTest(wgpu::FrontFace::CW, wgpu::CullMode::Front, false, true);
+}
+
+TEST_P(CullingTest, CullBackFaceWhenCWIsFrontFace) {
+    DoTest(wgpu::FrontFace::CW, wgpu::CullMode::Back, true, false);
+}
+
+DAWN_INSTANTIATE_TEST(CullingTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/D3D12CachingTests.cpp b/src/dawn/tests/end2end/D3D12CachingTests.cpp
new file mode 100644
index 0000000..8ba2a07
--- /dev/null
+++ b/src/dawn/tests/end2end/D3D12CachingTests.cpp
@@ -0,0 +1,259 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#define EXPECT_CACHE_HIT(N, statement)              \
+    do {                                            \
+        size_t before = mPersistentCache.mHitCount; \
+        statement;                                  \
+        FlushWire();                                \
+        size_t after = mPersistentCache.mHitCount;  \
+        EXPECT_EQ(N, after - before);               \
+    } while (0)
+
+// FakePersistentCache implements a in-memory persistent cache.
+class FakePersistentCache : public dawn::platform::CachingInterface {
+  public:
+    // PersistentCache API
+    void StoreData(const WGPUDevice device,
+                   const void* key,
+                   size_t keySize,
+                   const void* value,
+                   size_t valueSize) override {
+        if (mIsDisabled)
+            return;
+        const std::string keyStr(reinterpret_cast<const char*>(key), keySize);
+
+        const uint8_t* value_start = reinterpret_cast<const uint8_t*>(value);
+        std::vector<uint8_t> entry_value(value_start, value_start + valueSize);
+
+        EXPECT_TRUE(mCache.insert({keyStr, std::move(entry_value)}).second);
+    }
+
+    size_t LoadData(const WGPUDevice device,
+                    const void* key,
+                    size_t keySize,
+                    void* value,
+                    size_t valueSize) override {
+        const std::string keyStr(reinterpret_cast<const char*>(key), keySize);
+        auto entry = mCache.find(keyStr);
+        if (entry == mCache.end()) {
+            return 0;
+        }
+        if (valueSize >= entry->second.size()) {
+            memcpy(value, entry->second.data(), entry->second.size());
+        }
+        mHitCount++;
+        return entry->second.size();
+    }
+
+    using Blob = std::vector<uint8_t>;
+    using FakeCache = std::unordered_map<std::string, Blob>;
+
+    FakeCache mCache;
+
+    size_t mHitCount = 0;
+    bool mIsDisabled = false;
+};
+
+// Test platform that only supports caching.
+class DawnTestPlatform : public dawn::platform::Platform {
+  public:
+    DawnTestPlatform(dawn::platform::CachingInterface* cachingInterface)
+        : mCachingInterface(cachingInterface) {
+    }
+    ~DawnTestPlatform() override = default;
+
+    dawn::platform::CachingInterface* GetCachingInterface(const void* fingerprint,
+                                                          size_t fingerprintSize) override {
+        return mCachingInterface;
+    }
+
+    dawn::platform::CachingInterface* mCachingInterface = nullptr;
+};
+
+class D3D12CachingTests : public DawnTest {
+  protected:
+    std::unique_ptr<dawn::platform::Platform> CreateTestPlatform() override {
+        return std::make_unique<DawnTestPlatform>(&mPersistentCache);
+    }
+
+    FakePersistentCache mPersistentCache;
+};
+
+// Test that duplicate WGSL still re-compiles HLSL even when the cache is not enabled.
+TEST_P(D3D12CachingTests, SameShaderNoCache) {
+    mPersistentCache.mIsDisabled = true;
+
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn vertex_main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        }
+
+        @stage(fragment) fn fragment_main() -> @location(0) vec4<f32> {
+          return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+        }
+    )");
+
+    // Store the WGSL shader into the cache.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        desc.vertex.module = module;
+        desc.vertex.entryPoint = "vertex_main";
+        desc.cFragment.module = module;
+        desc.cFragment.entryPoint = "fragment_main";
+
+        EXPECT_CACHE_HIT(0u, device.CreateRenderPipeline(&desc));
+    }
+
+    EXPECT_EQ(mPersistentCache.mCache.size(), 0u);
+
+    // Load the same WGSL shader from the cache.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        desc.vertex.module = module;
+        desc.vertex.entryPoint = "vertex_main";
+        desc.cFragment.module = module;
+        desc.cFragment.entryPoint = "fragment_main";
+
+        EXPECT_CACHE_HIT(0u, device.CreateRenderPipeline(&desc));
+    }
+
+    EXPECT_EQ(mPersistentCache.mCache.size(), 0u);
+}
+
+// Test creating a pipeline from two entrypoints in multiple stages will cache the correct number
+// of HLSL shaders. WGSL shader should result into caching 2 HLSL shaders (stage x
+// entrypoints)
+TEST_P(D3D12CachingTests, ReuseShaderWithMultipleEntryPointsPerStage) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn vertex_main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        }
+
+        @stage(fragment) fn fragment_main() -> @location(0) vec4<f32> {
+          return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+        }
+    )");
+
+    // Store the WGSL shader into the cache.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        desc.vertex.module = module;
+        desc.vertex.entryPoint = "vertex_main";
+        desc.cFragment.module = module;
+        desc.cFragment.entryPoint = "fragment_main";
+
+        EXPECT_CACHE_HIT(0u, device.CreateRenderPipeline(&desc));
+    }
+
+    EXPECT_EQ(mPersistentCache.mCache.size(), 2u);
+
+    // Load the same WGSL shader from the cache.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        desc.vertex.module = module;
+        desc.vertex.entryPoint = "vertex_main";
+        desc.cFragment.module = module;
+        desc.cFragment.entryPoint = "fragment_main";
+
+        // Cached HLSL shader calls LoadData twice (once to peek, again to get), so check 2 x
+        // kNumOfShaders hits.
+        EXPECT_CACHE_HIT(4u, device.CreateRenderPipeline(&desc));
+    }
+
+    EXPECT_EQ(mPersistentCache.mCache.size(), 2u);
+
+    // Modify the WGSL shader functions and make sure it doesn't hit.
+    wgpu::ShaderModule newModule = utils::CreateShaderModule(device, R"(
+      @stage(vertex) fn vertex_main() -> @builtin(position) vec4<f32> {
+          return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+      }
+
+      @stage(fragment) fn fragment_main() -> @location(0) vec4<f32> {
+        return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+      }
+  )");
+
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        desc.vertex.module = newModule;
+        desc.vertex.entryPoint = "vertex_main";
+        desc.cFragment.module = newModule;
+        desc.cFragment.entryPoint = "fragment_main";
+        EXPECT_CACHE_HIT(0u, device.CreateRenderPipeline(&desc));
+    }
+
+    // Cached HLSL shader calls LoadData twice (once to peek, again to get), so check 2 x
+    // kNumOfShaders hits.
+    EXPECT_EQ(mPersistentCache.mCache.size(), 4u);
+}
+
+// Test creating a WGSL shader with two entrypoints in the same stage will cache the correct number
+// of HLSL shaders. WGSL shader should result into caching 1 HLSL shader (stage x entrypoints)
+TEST_P(D3D12CachingTests, ReuseShaderWithMultipleEntryPoints) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Data {
+            data : u32
+        }
+        @binding(0) @group(0) var<storage, read_write> data : Data;
+
+        @stage(compute) @workgroup_size(1) fn write1() {
+            data.data = 1u;
+        }
+
+        @stage(compute) @workgroup_size(1) fn write42() {
+            data.data = 42u;
+        }
+    )");
+
+    // Store the WGSL shader into the cache.
+    {
+        wgpu::ComputePipelineDescriptor desc;
+        desc.compute.module = module;
+        desc.compute.entryPoint = "write1";
+        EXPECT_CACHE_HIT(0u, device.CreateComputePipeline(&desc));
+
+        desc.compute.module = module;
+        desc.compute.entryPoint = "write42";
+        EXPECT_CACHE_HIT(0u, device.CreateComputePipeline(&desc));
+    }
+
+    EXPECT_EQ(mPersistentCache.mCache.size(), 2u);
+
+    // Load the same WGSL shader from the cache.
+    {
+        wgpu::ComputePipelineDescriptor desc;
+        desc.compute.module = module;
+        desc.compute.entryPoint = "write1";
+
+        // Cached HLSL shader calls LoadData twice (once to peek, again to get), so check 2 x
+        // kNumOfShaders hits.
+        EXPECT_CACHE_HIT(2u, device.CreateComputePipeline(&desc));
+
+        desc.compute.module = module;
+        desc.compute.entryPoint = "write42";
+
+        // Cached HLSL shader calls LoadData twice, so check 2 x kNumOfShaders hits.
+        EXPECT_CACHE_HIT(2u, device.CreateComputePipeline(&desc));
+    }
+
+    EXPECT_EQ(mPersistentCache.mCache.size(), 2u);
+}
+
+DAWN_INSTANTIATE_TEST(D3D12CachingTests, D3D12Backend());
diff --git a/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp b/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
new file mode 100644
index 0000000..519f646
--- /dev/null
+++ b/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
@@ -0,0 +1,744 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include <d3d11.h>
+#include <d3d12.h>
+#include <dxgi1_4.h>
+#include <wrl/client.h>
+
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace {
+
+    using dawn::native::d3d12::kDXGIKeyedMutexAcquireReleaseKey;
+
+    class D3D12ResourceTestBase : public DawnTest {
+      protected:
+        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+            return {wgpu::FeatureName::DawnInternalUsages};
+        }
+
+      public:
+        void SetUp() override {
+            DawnTest::SetUp();
+            if (UsesWire()) {
+                return;
+            }
+
+            // Create the D3D11 device/contexts that will be used in subsequent tests
+            ComPtr<ID3D12Device> d3d12Device = dawn::native::d3d12::GetD3D12Device(device.Get());
+
+            const LUID adapterLuid = d3d12Device->GetAdapterLuid();
+
+            ComPtr<IDXGIFactory4> dxgiFactory;
+            HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
+            ASSERT_EQ(hr, S_OK);
+
+            ComPtr<IDXGIAdapter> dxgiAdapter;
+            hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
+            ASSERT_EQ(hr, S_OK);
+
+            ComPtr<ID3D11Device> d3d11Device;
+            D3D_FEATURE_LEVEL d3dFeatureLevel;
+            ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+            hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0,
+                                     nullptr, 0, D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
+                                     &d3d11DeviceContext);
+            ASSERT_EQ(hr, S_OK);
+
+            mD3d11Device = std::move(d3d11Device);
+            mD3d11DeviceContext = std::move(d3d11DeviceContext);
+
+            baseDawnDescriptor.dimension = wgpu::TextureDimension::e2D;
+            baseDawnDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            baseDawnDescriptor.size = {kTestWidth, kTestHeight, 1};
+            baseDawnDescriptor.sampleCount = 1;
+            baseDawnDescriptor.mipLevelCount = 1;
+            baseDawnDescriptor.usage =
+                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst;
+
+            baseD3dDescriptor.Width = kTestWidth;
+            baseD3dDescriptor.Height = kTestHeight;
+            baseD3dDescriptor.MipLevels = 1;
+            baseD3dDescriptor.ArraySize = 1;
+            baseD3dDescriptor.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+            baseD3dDescriptor.SampleDesc.Count = 1;
+            baseD3dDescriptor.SampleDesc.Quality = 0;
+            baseD3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
+            baseD3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
+            baseD3dDescriptor.CPUAccessFlags = 0;
+            baseD3dDescriptor.MiscFlags =
+                D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+        }
+
+      protected:
+        void WrapSharedHandle(const wgpu::TextureDescriptor* dawnDesc,
+                              const D3D11_TEXTURE2D_DESC* baseD3dDescriptor,
+                              wgpu::Texture* dawnTexture,
+                              ID3D11Texture2D** d3d11TextureOut,
+                              std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI>*
+                                  externalImageOut = nullptr) const {
+            ComPtr<ID3D11Texture2D> d3d11Texture;
+            HRESULT hr = mD3d11Device->CreateTexture2D(baseD3dDescriptor, nullptr, &d3d11Texture);
+            ASSERT_EQ(hr, S_OK);
+
+            ComPtr<IDXGIResource1> dxgiResource;
+            hr = d3d11Texture.As(&dxgiResource);
+            ASSERT_EQ(hr, S_OK);
+
+            HANDLE sharedHandle;
+            hr = dxgiResource->CreateSharedHandle(
+                nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
+                &sharedHandle);
+            ASSERT_EQ(hr, S_OK);
+
+            dawn::native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
+            externalImageDesc.cTextureDescriptor =
+                reinterpret_cast<const WGPUTextureDescriptor*>(dawnDesc);
+            externalImageDesc.sharedHandle = sharedHandle;
+
+            std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage =
+                dawn::native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
+
+            // Now that we've created all of our resources, we can close the handle
+            // since we no longer need it.
+            ::CloseHandle(sharedHandle);
+
+            // Cannot access a non-existent external image (ex. validation error).
+            if (externalImage == nullptr) {
+                return;
+            }
+
+            dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+            externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(dawnDesc->usage);
+
+            *dawnTexture = wgpu::Texture::Acquire(
+                externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+            *d3d11TextureOut = d3d11Texture.Detach();
+
+            if (externalImageOut != nullptr) {
+                *externalImageOut = std::move(externalImage);
+            }
+        }
+
+        static constexpr size_t kTestWidth = 10;
+        static constexpr size_t kTestHeight = 10;
+
+        ComPtr<ID3D11Device> mD3d11Device;
+        ComPtr<ID3D11DeviceContext> mD3d11DeviceContext;
+
+        D3D11_TEXTURE2D_DESC baseD3dDescriptor;
+        wgpu::TextureDescriptor baseDawnDescriptor;
+    };
+
+}  // anonymous namespace
+
+// A small fixture used to initialize default data for the D3D12Resource validation tests.
+// These tests are skipped if the harness is using the wire.
+class D3D12SharedHandleValidation : public D3D12ResourceTestBase {};
+
+// Test a successful wrapping of an D3D12Resource in a texture
+TEST_P(D3D12SharedHandleValidation, Success) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture);
+
+    ASSERT_NE(texture.Get(), nullptr);
+}
+
+// Test a successful wrapping of an D3D12Resource with DawnTextureInternalUsageDescriptor
+TEST_P(D3D12SharedHandleValidation, SuccessWithInternalUsageDescriptor) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    baseDawnDescriptor.nextInChain = &internalDesc;
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+    internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture);
+
+    ASSERT_NE(texture.Get(), nullptr);
+}
+
+// Test an error occurs if an invalid sType is the nextInChain
+TEST_P(D3D12SharedHandleValidation, InvalidTextureDescriptor) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    wgpu::ChainedStruct chainedDescriptor;
+    chainedDescriptor.sType = wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel;
+    baseDawnDescriptor.nextInChain = &chainedDescriptor;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor mip level count isn't 1
+TEST_P(D3D12SharedHandleValidation, InvalidMipLevelCount) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseDawnDescriptor.mipLevelCount = 2;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor depth isn't 1
+TEST_P(D3D12SharedHandleValidation, InvalidDepth) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseDawnDescriptor.size.depthOrArrayLayers = 2;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor sample count isn't 1
+TEST_P(D3D12SharedHandleValidation, InvalidSampleCount) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseDawnDescriptor.sampleCount = 4;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor width doesn't match the texture's
+TEST_P(D3D12SharedHandleValidation, InvalidWidth) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseDawnDescriptor.size.width = kTestWidth + 1;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor height doesn't match the texture's
+TEST_P(D3D12SharedHandleValidation, InvalidHeight) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseDawnDescriptor.size.height = kTestHeight + 1;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor format isn't compatible with the D3D12 Resource
+TEST_P(D3D12SharedHandleValidation, InvalidFormat) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseDawnDescriptor.format = wgpu::TextureFormat::R8Unorm;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the number of D3D mip levels is greater than 1.
+TEST_P(D3D12SharedHandleValidation, InvalidNumD3DMipLevels) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseD3dDescriptor.MipLevels = 2;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the number of array levels is greater than 1.
+TEST_P(D3D12SharedHandleValidation, InvalidD3DArraySize) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    baseD3dDescriptor.ArraySize = 2;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ASSERT_DEVICE_ERROR(
+        WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+class D3D12SharedHandleUsageTests : public D3D12ResourceTestBase {
+  protected:
+    // Submits a 1x1x1 copy from source to destination
+    void SimpleCopyTextureToTexture(wgpu::Texture source, wgpu::Texture destination) {
+        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(source, 0, {0, 0, 0});
+        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Clear a texture on a given device
+    void ClearImage(wgpu::Texture wrappedTexture,
+                    const wgpu::Color& clearColor,
+                    wgpu::Device wgpuDevice) {
+        wgpu::TextureView wrappedView = wrappedTexture.CreateView();
+
+        // Submit a clear operation
+        utils::ComboRenderPassDescriptor renderPassDescriptor({wrappedView}, {});
+        renderPassDescriptor.cColorAttachments[0].clearValue = clearColor;
+
+        wgpu::CommandEncoder encoder = wgpuDevice.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        wgpu::Queue queue = wgpuDevice.GetQueue();
+        queue.Submit(1, &commands);
+    }
+
+    void WrapAndClearD3D11Texture(const wgpu::TextureDescriptor* dawnDescriptor,
+                                  const D3D11_TEXTURE2D_DESC* d3dDescriptor,
+                                  wgpu::Texture* dawnTextureOut,
+                                  const wgpu::Color& clearColor,
+                                  ID3D11Texture2D** d3d11TextureOut,
+                                  IDXGIKeyedMutex** dxgiKeyedMutexOut,
+                                  bool isInitialized = true) const {
+        ComPtr<ID3D11Texture2D> d3d11Texture;
+        HRESULT hr = mD3d11Device->CreateTexture2D(d3dDescriptor, nullptr, &d3d11Texture);
+        ASSERT_EQ(hr, S_OK);
+
+        ComPtr<IDXGIResource1> dxgiResource;
+        hr = d3d11Texture.As(&dxgiResource);
+        ASSERT_EQ(hr, S_OK);
+
+        HANDLE sharedHandle;
+        hr = dxgiResource->CreateSharedHandle(
+            nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
+            &sharedHandle);
+        ASSERT_EQ(hr, S_OK);
+
+        ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+        hr = d3d11Texture.As(&dxgiKeyedMutex);
+        ASSERT_EQ(hr, S_OK);
+
+        ComPtr<ID3D11RenderTargetView> d3d11RTV;
+        hr = mD3d11Device->CreateRenderTargetView(d3d11Texture.Get(), nullptr, &d3d11RTV);
+        ASSERT_EQ(hr, S_OK);
+
+        hr = dxgiKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE);
+        ASSERT_EQ(hr, S_OK);
+
+        const float colorRGBA[] = {
+            static_cast<float>(clearColor.r), static_cast<float>(clearColor.g),
+            static_cast<float>(clearColor.b), static_cast<float>(clearColor.a)};
+        mD3d11DeviceContext->ClearRenderTargetView(d3d11RTV.Get(), colorRGBA);
+
+        hr = dxgiKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+        ASSERT_EQ(hr, S_OK);
+
+        dawn::native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc = {};
+        externalImageDesc.sharedHandle = sharedHandle;
+        externalImageDesc.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(dawnDescriptor);
+
+        std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage =
+            dawn::native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
+
+        dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+        externalAccessDesc.isInitialized = isInitialized;
+        externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(dawnDescriptor->usage);
+
+        *dawnTextureOut = wgpu::Texture::Acquire(
+            externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+        *d3d11TextureOut = d3d11Texture.Detach();
+        *dxgiKeyedMutexOut = dxgiKeyedMutex.Detach();
+    }
+
+    void ExpectPixelRGBA8EQ(ID3D11Texture2D* d3d11Texture,
+                            IDXGIKeyedMutex* dxgiKeyedMutex,
+                            const wgpu::Color& color) {
+        HRESULT hr = dxgiKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE);
+        ASSERT_EQ(hr, S_OK);
+
+        D3D11_TEXTURE2D_DESC texture2DDesc;
+        d3d11Texture->GetDesc(&texture2DDesc);
+
+        const CD3D11_TEXTURE2D_DESC texture2DStagingDesc(
+            texture2DDesc.Format,                             // Format
+            texture2DDesc.Width,                              // Width
+            texture2DDesc.Height,                             // Height
+            1,                                                // ArraySize
+            1,                                                // MipLevels
+            0,                                                // BindFlags
+            D3D11_USAGE_STAGING,                              // Usage
+            D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE);  // CPUAccessFlags
+
+        ComPtr<ID3D11Texture2D> spD3DTextureStaging;
+        hr = mD3d11Device->CreateTexture2D(&texture2DStagingDesc, nullptr, &spD3DTextureStaging);
+        ASSERT_EQ(hr, S_OK);
+
+        D3D11_BOX d3dRc;
+        d3dRc.back = 1;
+        d3dRc.front = 0;
+        d3dRc.top = 0;
+        d3dRc.left = 0;
+        d3dRc.bottom = texture2DDesc.Height;
+        d3dRc.right = texture2DDesc.Width;
+
+        mD3d11DeviceContext->CopySubresourceRegion(spD3DTextureStaging.Get(),  // pDstResource
+                                                   0,                          // DstSubresource
+                                                   0,                          // DstX
+                                                   0,                          // DstY
+                                                   0,                          // DstZ
+                                                   d3d11Texture,               // pSrcResource
+                                                   0,                          // SrcSubresource
+                                                   &d3dRc);                    // pSrcBox
+
+        D3D11_MAPPED_SUBRESOURCE mappedResource;
+        hr = mD3d11DeviceContext->Map(spD3DTextureStaging.Get(), 0, D3D11_MAP_READ_WRITE, 0,
+                                      &mappedResource);
+        ASSERT_EQ(hr, S_OK);
+
+        const uint8_t* colorData = static_cast<uint8_t*>(mappedResource.pData);
+        EXPECT_EQ(colorData[0], color.r * 255u);
+        EXPECT_EQ(colorData[1], color.g * 255u);
+        EXPECT_EQ(colorData[2], color.b * 255u);
+        EXPECT_EQ(colorData[3], color.a * 255u);
+
+        mD3d11DeviceContext->Unmap(spD3DTextureStaging.Get(), 0);
+
+        hr = dxgiKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+        ASSERT_EQ(hr, S_OK);
+    }
+};
+
+// 1. Create and clear a D3D11 texture
+// 2. Copy the wrapped texture to another dawn texture
+// 3. Readback the copied texture and ensure the color matches the original clear color.
+TEST_P(D3D12SharedHandleUsageTests, ClearInD3D11CopyAndReadbackInD3D12) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    const wgpu::Color clearColor{1.0f, 1.0f, 0.0f, 1.0f};
+    wgpu::Texture dawnSrcTexture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+    WrapAndClearD3D11Texture(&baseDawnDescriptor, &baseD3dDescriptor, &dawnSrcTexture, clearColor,
+                             &d3d11Texture, &dxgiKeyedMutex);
+    ASSERT_NE(dawnSrcTexture.Get(), nullptr);
+
+    // Create a texture on the device and copy the source texture to it.
+    wgpu::Texture dawnCopyDestTexture = device.CreateTexture(&baseDawnDescriptor);
+    SimpleCopyTextureToTexture(dawnSrcTexture, dawnCopyDestTexture);
+
+    // Readback the destination texture and ensure it contains the colors we used
+    // to clear the source texture on the D3D device.
+    EXPECT_PIXEL_RGBA8_EQ(
+        RGBA8(clearColor.r * 255u, clearColor.g * 255u, clearColor.b * 255u, clearColor.a * 255u),
+        dawnCopyDestTexture, 0, 0);
+}
+
+// 1. Create and clear a D3D11 texture
+// 2. Readback the wrapped texture and ensure the color matches the original clear color.
+TEST_P(D3D12SharedHandleUsageTests, ClearInD3D11ReadbackInD3D12) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    const wgpu::Color clearColor{1.0f, 1.0f, 0.0f, 1.0f};
+    wgpu::Texture dawnTexture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+    WrapAndClearD3D11Texture(&baseDawnDescriptor, &baseD3dDescriptor, &dawnTexture, clearColor,
+                             &d3d11Texture, &dxgiKeyedMutex);
+    ASSERT_NE(dawnTexture.Get(), nullptr);
+
+    // Readback the destination texture and ensure it contains the colors we used
+    // to clear the source texture on the D3D device.
+    EXPECT_PIXEL_RGBA8_EQ(
+        RGBA8(clearColor.r * 255, clearColor.g * 255, clearColor.b * 255, clearColor.a * 255),
+        dawnTexture, 0, 0);
+}
+
+// 1. Create and clear a D3D11 texture
+// 2. Wrap it in a Dawn texture and clear it to a different color
+// 3. Readback the texture with D3D11 and ensure we receive the color we cleared with Dawn.
+TEST_P(D3D12SharedHandleUsageTests, ClearInD3D12ReadbackInD3D11) {
+    // TODO(crbug.com/dawn/735): This test appears to hang for
+    // D3D12_Microsoft_Basic_Render_Driver_CPU when validation is enabled.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP() && IsBackendValidationEnabled());
+
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    const wgpu::Color d3d11ClearColor{1.0f, 1.0f, 0.0f, 1.0f};
+    wgpu::Texture dawnTexture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+    WrapAndClearD3D11Texture(&baseDawnDescriptor, &baseD3dDescriptor, &dawnTexture, d3d11ClearColor,
+                             &d3d11Texture, &dxgiKeyedMutex);
+    ASSERT_NE(dawnTexture.Get(), nullptr);
+
+    const wgpu::Color d3d12ClearColor{0.0f, 0.0f, 1.0f, 1.0f};
+    ClearImage(dawnTexture, d3d12ClearColor, device);
+
+    dawnTexture.Destroy();
+
+    // Now that Dawn (via D3D12) has finished writing to the texture, we should be
+    // able to read it back by copying it to a staging texture and verifying the
+    // color matches the D3D12 clear color.
+    ExpectPixelRGBA8EQ(d3d11Texture.Get(), dxgiKeyedMutex.Get(), d3d12ClearColor);
+}
+
+// 1. Create and clear a D3D11 texture
+// 2. Wrap it in a Dawn texture and clear the texture to two different colors.
+// 3. Readback the texture with D3D11.
+// 4. Verify the readback color was the final color cleared.
+TEST_P(D3D12SharedHandleUsageTests, ClearTwiceInD3D12ReadbackInD3D11) {
+    // TODO(crbug.com/dawn/735): This test appears to hang for
+    // D3D12_Microsoft_Basic_Render_Driver_CPU when validation is enabled.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP() && IsBackendValidationEnabled());
+
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    const wgpu::Color d3d11ClearColor{1.0f, 1.0f, 0.0f, 1.0f};
+    wgpu::Texture dawnTexture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+    WrapAndClearD3D11Texture(&baseDawnDescriptor, &baseD3dDescriptor, &dawnTexture, d3d11ClearColor,
+                             &d3d11Texture, &dxgiKeyedMutex);
+    ASSERT_NE(dawnTexture.Get(), nullptr);
+
+    const wgpu::Color d3d12ClearColor1{0.0f, 0.0f, 1.0f, 1.0f};
+    ClearImage(dawnTexture, d3d12ClearColor1, device);
+
+    const wgpu::Color d3d12ClearColor2{0.0f, 1.0f, 1.0f, 1.0f};
+    ClearImage(dawnTexture, d3d12ClearColor2, device);
+
+    dawnTexture.Destroy();
+
+    // Now that Dawn (via D3D12) has finished writing to the texture, we should be
+    // able to read it back by copying it to a staging texture and verifying the
+    // color matches the last D3D12 clear color.
+    ExpectPixelRGBA8EQ(d3d11Texture.Get(), dxgiKeyedMutex.Get(), d3d12ClearColor2);
+}
+
+// 1. Create and clear a D3D11 texture with clearColor
+// 2. Import the texture with isInitialized = false
+// 3. Verify clearColor is not visible in wrapped texture
+TEST_P(D3D12SharedHandleUsageTests, UninitializedTextureIsCleared) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    const wgpu::Color clearColor{1.0f, 0.0f, 0.0f, 1.0f};
+    wgpu::Texture dawnTexture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+    WrapAndClearD3D11Texture(&baseDawnDescriptor, &baseD3dDescriptor, &dawnTexture, clearColor,
+                             &d3d11Texture, &dxgiKeyedMutex, false);
+    ASSERT_NE(dawnTexture.Get(), nullptr);
+
+    // Readback the destination texture and ensure it contains the colors we used
+    // to clear the source texture on the D3D device.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), dawnTexture, 0, 0);
+}
+
+// 1. Create an external image from the DX11 texture.
+// 2. Produce two Dawn textures from the external image.
+// 3. Clear each Dawn texture and verify the texture was cleared to a unique color.
+TEST_P(D3D12SharedHandleUsageTests, ReuseExternalImage) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    // Create the first Dawn texture then clear it to red.
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage;
+    WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture,
+                     &externalImage);
+    {
+        const wgpu::Color solidRed{1.0f, 0.0f, 0.0f, 1.0f};
+        ASSERT_NE(texture.Get(), nullptr);
+        ClearImage(texture.Get(), solidRed, device);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0xFF, 0, 0, 0xFF), texture.Get(), 0, 0);
+    }
+
+    // Once finished with the first texture, destroy it so we may re-acquire the external image
+    // again.
+    texture.Destroy();
+
+    // Create another Dawn texture then clear it with another color.
+    dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+    externalAccessDesc.isInitialized = true;
+    externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(baseDawnDescriptor.usage);
+
+    texture =
+        wgpu::Texture::Acquire(externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+
+    // Check again that the new texture is still red
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0xFF, 0, 0, 0xFF), texture.Get(), 0, 0);
+
+    // Clear the new texture to blue
+    {
+        const wgpu::Color solidBlue{0.0f, 0.0f, 1.0f, 1.0f};
+        ASSERT_NE(texture.Get(), nullptr);
+        ClearImage(texture.Get(), solidBlue, device);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0xFF, 0xFF), texture.Get(), 0, 0);
+    }
+}
+
+TEST_P(D3D12SharedHandleUsageTests, RecursiveExternalImageAccess) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    // Create the first Dawn texture then clear it to red.
+    wgpu::Texture texture1;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage;
+    WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture1, &d3d11Texture,
+                     &externalImage);
+    {
+        const wgpu::Color solidRed{1.0f, 0.0f, 0.0f, 1.0f};
+        ASSERT_NE(texture1.Get(), nullptr);
+        ClearImage(texture1.Get(), solidRed, device);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0xFF, 0, 0, 0xFF), texture1.Get(), 0, 0);
+    }
+
+    // Create another Dawn texture then clear it with another color.
+    dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+    externalAccessDesc.isInitialized = true;
+    externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(baseDawnDescriptor.usage);
+
+    // Acquire the ExternalImageDXGI again without destroying the original texture.
+    wgpu::Texture texture2 =
+        wgpu::Texture::Acquire(externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+
+    // Check again that the new texture is still red
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0xFF, 0, 0, 0xFF), texture2.Get(), 0, 0);
+
+    // Clear the new texture to blue
+    {
+        const wgpu::Color solidBlue{0.0f, 0.0f, 1.0f, 1.0f};
+        ASSERT_NE(texture2.Get(), nullptr);
+        ClearImage(texture2.Get(), solidBlue, device);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0xFF, 0xFF), texture2.Get(), 0, 0);
+    }
+
+    // Check that the original texture is also blue
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0xFF, 0xFF), texture1.Get(), 0, 0);
+
+    texture1.Destroy();
+    texture2.Destroy();
+}
+
+// Produce a new texture with a usage not specified in the external image.
+TEST_P(D3D12SharedHandleUsageTests, ExternalImageUsage) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+    externalAccessDesc.isInitialized = true;
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage;
+    WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture,
+                     &externalImage);
+    ASSERT_NE(texture.Get(), nullptr);
+
+    externalAccessDesc.usage = WGPUTextureUsage_StorageBinding;
+    texture =
+        wgpu::Texture::Acquire(externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+    ASSERT_EQ(texture.Get(), nullptr);
+
+    externalAccessDesc.usage = WGPUTextureUsage_TextureBinding;
+    texture =
+        wgpu::Texture::Acquire(externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+    ASSERT_NE(texture.Get(), nullptr);
+}
+
+// Verify two Dawn devices can reuse the same external image.
+TEST_P(D3D12SharedHandleUsageTests, ReuseExternalImageWithMultipleDevices) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    wgpu::Texture texture;
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage;
+
+    // Create the Dawn texture then clear it to red using the first (default) device.
+    WrapSharedHandle(&baseDawnDescriptor, &baseD3dDescriptor, &texture, &d3d11Texture,
+                     &externalImage);
+    const wgpu::Color solidRed{1.0f, 0.0f, 0.0f, 1.0f};
+    ASSERT_NE(texture.Get(), nullptr);
+    ClearImage(texture.Get(), solidRed, device);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0xFF, 0, 0, 0xFF), texture.Get(), 0, 0);
+
+    // Release the texture so we can re-acquire another one from the same external image.
+    texture.Destroy();
+
+    // Create the Dawn texture then clear it to blue using the second device.
+    dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+    externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(baseDawnDescriptor.usage);
+
+    wgpu::Device otherDevice = wgpu::Device::Acquire(GetAdapter().CreateDevice());
+
+    wgpu::Texture otherTexture = wgpu::Texture::Acquire(
+        externalImage->ProduceTexture(otherDevice.Get(), &externalAccessDesc));
+
+    ASSERT_NE(otherTexture.Get(), nullptr);
+    const wgpu::Color solidBlue{0.0f, 0.0f, 1.0f, 1.0f};
+    ClearImage(otherTexture.Get(), solidBlue, otherDevice);
+
+    otherTexture.Destroy();
+
+    // Re-create the Dawn texture using the first (default) device.
+    externalAccessDesc.isInitialized = true;
+    texture =
+        wgpu::Texture::Acquire(externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+    ASSERT_NE(texture.Get(), nullptr);
+
+    // Ensure the texture is still blue.
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0xFF, 0xFF), texture.Get(), 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(D3D12SharedHandleValidation, D3D12Backend());
+DAWN_INSTANTIATE_TEST(D3D12SharedHandleUsageTests, D3D12Backend());
diff --git a/src/dawn/tests/end2end/DebugMarkerTests.cpp b/src/dawn/tests/end2end/DebugMarkerTests.cpp
new file mode 100644
index 0000000..1a30b0d
--- /dev/null
+++ b/src/dawn/tests/end2end/DebugMarkerTests.cpp
@@ -0,0 +1,53 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+class DebugMarkerTests : public DawnTest {};
+
+// Make sure that calling a marker API without a debugging tool attached doesn't cause a failure.
+TEST_P(DebugMarkerTests, NoFailureWithoutDebugToolAttached) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 4, 4);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    encoder.InsertDebugMarker("Marker");
+    encoder.PopDebugGroup();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+}
+
+DAWN_INSTANTIATE_TEST(DebugMarkerTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DeprecatedAPITests.cpp b/src/dawn/tests/end2end/DeprecatedAPITests.cpp
new file mode 100644
index 0000000..d182bc4
--- /dev/null
+++ b/src/dawn/tests/end2end/DeprecatedAPITests.cpp
@@ -0,0 +1,148 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains test for deprecated parts of Dawn's API while following WebGPU's evolution.
+// It contains test for the "old" behavior that will be deleted once users are migrated, tests that
+// a deprecation warning is emitted when the "old" behavior is used, and tests that an error is
+// emitted when both the old and the new behavior are used (when applicable).
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cmath>
+
+class DeprecationTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        // Skip when validation is off because warnings might be emitted during validation calls
+        DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+    }
+};
+
+// Test that setting attachment rather than view for render pass color and depth/stencil attachments
+// is deprecated.
+TEST_P(DeprecationTests, ReadOnlyDepthStencilStoreLoadOpsAttachment) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass;
+
+    // Check that setting load/store ops with read only depth/stencil attachments gives a warning.
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size = {1, 1, 1};
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    wgpu::Texture depthStencil = device.CreateTexture(&descriptor);
+
+    wgpu::RenderPassDepthStencilAttachment* depthAttachment =
+        &renderPass.renderPassInfo.cDepthStencilAttachmentInfo;
+    renderPass.renderPassInfo.depthStencilAttachment = depthAttachment;
+    depthAttachment->view = depthStencil.CreateView();
+    depthAttachment->depthReadOnly = true;
+    depthAttachment->stencilReadOnly = true;
+
+    depthAttachment->depthLoadOp = wgpu::LoadOp::Load;
+    depthAttachment->depthStoreOp = wgpu::StoreOp::Store;
+
+    EXPECT_DEPRECATION_WARNING(pass = encoder.BeginRenderPass(&renderPass.renderPassInfo));
+
+    depthAttachment->depthLoadOp = wgpu::LoadOp::Undefined;
+    depthAttachment->depthStoreOp = wgpu::StoreOp::Undefined;
+    depthAttachment->stencilLoadOp = wgpu::LoadOp::Load;
+    depthAttachment->stencilStoreOp = wgpu::StoreOp::Store;
+
+    EXPECT_DEPRECATION_WARNING(pass = encoder.BeginRenderPass(&renderPass.renderPassInfo));
+
+    pass.End();
+}
+
+// Test that setting the clearColor, clearDepth, or clearStencil values for render pass attachments
+// is deprecated. (dawn:1269)
+TEST_P(DeprecationTests, AttachmentClearColor) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass;
+
+    // Check that setting load/store ops with read only depth/stencil attachments gives a warning.
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size = {1, 1, 1};
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    wgpu::Texture depthStencil = device.CreateTexture(&descriptor);
+
+    wgpu::RenderPassDepthStencilAttachment* depthAttachment =
+        &renderPass.renderPassInfo.cDepthStencilAttachmentInfo;
+    renderPass.renderPassInfo.depthStencilAttachment = depthAttachment;
+    depthAttachment->view = depthStencil.CreateView();
+    depthAttachment->depthLoadOp = wgpu::LoadOp::Clear;
+    depthAttachment->stencilLoadOp = wgpu::LoadOp::Clear;
+
+    // A pass that uses none of the deprecated value should be fine.
+    pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.End();
+
+    depthAttachment->clearStencil = 1;
+
+    EXPECT_DEPRECATION_WARNING(pass = encoder.BeginRenderPass(&renderPass.renderPassInfo));
+    pass.End();
+
+    depthAttachment->clearStencil = 0;
+    depthAttachment->depthClearValue = 0.0f;
+    depthAttachment->clearDepth = 1.0f;
+
+    EXPECT_DEPRECATION_WARNING(pass = encoder.BeginRenderPass(&renderPass.renderPassInfo));
+    pass.End();
+
+    renderPass.renderPassInfo.depthStencilAttachment = nullptr;
+    renderPass.renderPassInfo.cColorAttachments[0].clearColor = {1.0, 2.0, 3.0, 4.0};
+    renderPass.renderPassInfo.cColorAttachments[0].clearValue = {5.0, 4.0, 3.0, 2.0};
+
+    EXPECT_DEPRECATION_WARNING(pass = encoder.BeginRenderPass(&renderPass.renderPassInfo));
+    pass.End();
+}
+
+// Test that endPass() is deprecated for both render and compute passes.
+TEST_P(DeprecationTests, EndPass) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    {
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        EXPECT_DEPRECATION_WARNING(pass.EndPass());
+    }
+
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+        EXPECT_DEPRECATION_WARNING(pass.EndPass());
+    }
+}
+
+DAWN_INSTANTIATE_TEST(DeprecationTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      NullBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DepthBiasTests.cpp b/src/dawn/tests/end2end/DepthBiasTests.cpp
new file mode 100644
index 0000000..f1b31de
--- /dev/null
+++ b/src/dawn/tests/end2end/DepthBiasTests.cpp
@@ -0,0 +1,366 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static unsigned int kRTSize = 2;
+
+enum class QuadAngle { Flat, TiltedX };
+
+class DepthBiasTests : public DawnTest {
+  protected:
+    void RunDepthBiasTest(wgpu::TextureFormat depthFormat,
+                          float depthClear,
+                          QuadAngle quadAngle,
+                          int32_t bias,
+                          float biasSlopeScale,
+                          float biasClamp) {
+        const char* vertexSource = nullptr;
+        switch (quadAngle) {
+            case QuadAngle::Flat:
+                // Draw a square at z = 0.25
+                vertexSource = R"(
+    @stage(vertex)
+    fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+        var pos = array<vec2<f32>, 6>(
+            vec2<f32>(-1.0, -1.0),
+            vec2<f32>( 1.0, -1.0),
+            vec2<f32>(-1.0,  1.0),
+            vec2<f32>(-1.0,  1.0),
+            vec2<f32>( 1.0, -1.0),
+            vec2<f32>( 1.0,  1.0));
+        return vec4<f32>(pos[VertexIndex], 0.25, 1.0);
+    })";
+                break;
+
+            case QuadAngle::TiltedX:
+                // Draw a square ranging from 0 to 0.5, bottom to top
+                vertexSource = R"(
+    @stage(vertex)
+    fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+        var pos = array<vec3<f32>, 6>(
+            vec3<f32>(-1.0, -1.0, 0.0),
+            vec3<f32>( 1.0, -1.0, 0.0),
+            vec3<f32>(-1.0,  1.0, 0.5),
+            vec3<f32>(-1.0,  1.0, 0.5),
+            vec3<f32>( 1.0, -1.0, 0.0),
+            vec3<f32>( 1.0,  1.0, 0.5));
+        return vec4<f32>(pos[VertexIndex], 1.0);
+    })";
+                break;
+        }
+
+        wgpu::ShaderModule vertexModule = utils::CreateShaderModule(device, vertexSource);
+
+        wgpu::ShaderModule fragmentModule = utils::CreateShaderModule(device, R"(
+    @stage(fragment) fn main() -> @location(0) vec4<f32> {
+        return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+    })");
+
+        {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.size = {kRTSize, kRTSize, 1};
+            descriptor.format = depthFormat;
+            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+            mDepthTexture = device.CreateTexture(&descriptor);
+        }
+
+        {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.size = {kRTSize, kRTSize, 1};
+            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+            mRenderTarget = device.CreateTexture(&descriptor);
+        }
+
+        // Create a render pass which clears depth to depthClear
+        utils::ComboRenderPassDescriptor renderPassDesc({mRenderTarget.CreateView()},
+                                                        mDepthTexture.CreateView());
+        renderPassDesc.UnsetDepthStencilLoadStoreOpsForFormat(depthFormat);
+        renderPassDesc.cDepthStencilAttachmentInfo.depthClearValue = depthClear;
+
+        // Create a render pipeline to render the quad
+        utils::ComboRenderPipelineDescriptor renderPipelineDesc;
+
+        renderPipelineDesc.vertex.module = vertexModule;
+        renderPipelineDesc.cFragment.module = fragmentModule;
+        wgpu::DepthStencilState* depthStencil = renderPipelineDesc.EnableDepthStencil(depthFormat);
+        depthStencil->depthWriteEnabled = true;
+        depthStencil->depthBias = bias;
+        depthStencil->depthBiasSlopeScale = biasSlopeScale;
+        depthStencil->depthBiasClamp = biasClamp;
+
+        if (depthFormat != wgpu::TextureFormat::Depth32Float) {
+            depthStencil->depthCompare = wgpu::CompareFunction::Greater;
+        }
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&renderPipelineDesc);
+
+        // Draw the quad (two triangles)
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPassDesc);
+        pass.SetPipeline(pipeline);
+        pass.Draw(6);
+        pass.End();
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    // Floating point depth buffers use the following formula to calculate bias
+    // bias = depthBias * 2 ** (exponent(max z of primitive) - number of bits in mantissa) +
+    //        slopeScale * maxSlope
+    // https://docs.microsoft.com/en-us/windows/win32/direct3d11/d3d10-graphics-programming-guide-output-merger-stage-depth-bias
+    // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdSetDepthBias.html
+    // https://developer.apple.com/documentation/metal/mtlrendercommandencoder/1516269-setdepthbias
+    //
+    // To get a final bias of 0.25 for primitives with z = 0.25, we can use
+    // depthBias = 0.25 / (2 ** (-2 - 23)) = 8388608
+    static constexpr int32_t kPointTwoFiveBiasForPointTwoFiveZOnFloat = 8388608;
+
+    wgpu::Texture mDepthTexture;
+    wgpu::Texture mRenderTarget;
+};
+
+// Test adding positive bias to output
+TEST_P(DepthBiasTests, PositiveBiasOnFloat) {
+    // NVIDIA GPUs under Vulkan seem to be using a different scale than everyone else.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    // OpenGL uses a different scale than the other APIs
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // Draw quad flat on z = 0.25 with 0.25 bias
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0, QuadAngle::Flat,
+                     kPointTwoFiveBiasForPointTwoFiveZOnFloat, 0, 0);
+
+    // Quad at z = 0.25 + 0.25 bias = 0.5
+    std::vector<float> expected = {
+        0.5, 0.5,  //
+        0.5, 0.5,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding positive bias to output with a clamp
+TEST_P(DepthBiasTests, PositiveBiasOnFloatWithClamp) {
+    // Clamping support in OpenGL is spotty
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // Draw quad flat on z = 0.25 with 0.25 bias clamped at 0.125.
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0, QuadAngle::Flat,
+                     kPointTwoFiveBiasForPointTwoFiveZOnFloat, 0, 0.125);
+
+    // Quad at z = 0.25 + min(0.25 bias, 0.125 clamp) = 0.375
+    std::vector<float> expected = {
+        0.375, 0.375,  //
+        0.375, 0.375,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding negative bias to output
+TEST_P(DepthBiasTests, NegativeBiasOnFloat) {
+    // NVIDIA GPUs seems to be using a different scale than everyone else
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    // OpenGL uses a different scale than the other APIs
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+
+    // Draw quad flat on z = 0.25 with -0.25 bias, depth clear of 0.125
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0.125, QuadAngle::Flat,
+                     -kPointTwoFiveBiasForPointTwoFiveZOnFloat, 0, 0);
+
+    // Quad at z = 0.25 - 0.25 bias = 0
+    std::vector<float> expected = {
+        0.0, 0.0,  //
+        0.0, 0.0,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding negative bias to output with a clamp
+TEST_P(DepthBiasTests, NegativeBiasOnFloatWithClamp) {
+    // Clamping support in OpenGL is spotty
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // Draw quad flat on z = 0.25 with -0.25 bias clamped at -0.125.
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0, QuadAngle::Flat,
+                     -kPointTwoFiveBiasForPointTwoFiveZOnFloat, 0, -0.125);
+
+    // Quad at z = 0.25 + max(-0.25 bias, -0.125 clamp) = 0.125
+    std::vector<float> expected = {
+        0.125, 0.125,  //
+        0.125, 0.125,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding positive infinite slope bias to output
+TEST_P(DepthBiasTests, PositiveInfinitySlopeBiasOnFloat) {
+    // NVIDIA GPUs do not clamp values to 1 when using Inf slope bias.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    // Draw quad with z from 0 to 0.5 with inf slope bias
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0.125, QuadAngle::TiltedX, 0,
+                     std::numeric_limits<float>::infinity(), 0);
+
+    // Value at the center of the pixel + (0.25 slope * Inf slope bias) = 1 (clamped)
+    std::vector<float> expected = {
+        1.0, 1.0,  //
+        1.0, 1.0,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding positive infinite slope bias to output
+TEST_P(DepthBiasTests, NegativeInfinityBiasOnFloat) {
+    // NVIDIA GPUs do not clamp values to 0 when using -Inf slope bias.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    // Draw quad with z from 0 to 0.5 with -inf slope bias
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0.125, QuadAngle::TiltedX, 0,
+                     -std::numeric_limits<float>::infinity(), 0);
+
+    // Value at the center of the pixel + (0.25 slope * -Inf slope bias) = 0 (clamped)
+    std::vector<float> expected = {
+        0.0, 0.0,  //
+        0.0, 0.0,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test tiledX quad with no bias
+TEST_P(DepthBiasTests, NoBiasTiltedXOnFloat) {
+    // Draw quad with z from 0 to 0.5 with no bias
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0, QuadAngle::TiltedX, 0, 0, 0);
+
+    // Depth values of TiltedX quad. Values at the center of the pixels.
+    std::vector<float> expected = {
+        0.375, 0.375,  //
+        0.125, 0.125,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding positive slope bias to output
+TEST_P(DepthBiasTests, PositiveSlopeBiasOnFloat) {
+    // Draw quad with z from 0 to 0.5 with a slope bias of 1
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0, QuadAngle::TiltedX, 0, 1, 0);
+
+    // Value at the center of the pixel + (0.25 slope * 1.0 slope bias)
+    std::vector<float> expected = {
+        0.625, 0.625,  //
+        0.375, 0.375,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding negative half slope bias to output
+TEST_P(DepthBiasTests, NegativeHalfSlopeBiasOnFloat) {
+    // Draw quad with z from 0 to 0.5 with a slope bias of -0.5
+    RunDepthBiasTest(wgpu::TextureFormat::Depth32Float, 0, QuadAngle::TiltedX, 0, -0.5, 0);
+
+    // Value at the center of the pixel + (0.25 slope * -0.5 slope bias)
+    std::vector<float> expected = {
+        0.25, 0.25,  //
+        0.0, 0.0,    //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mDepthTexture, {0, 0}, {kRTSize, kRTSize}, 0,
+                      wgpu::TextureAspect::DepthOnly);
+}
+
+// Test adding positive bias to output
+TEST_P(DepthBiasTests, PositiveBiasOn24bit) {
+    // Draw quad flat on z = 0.25 with 0.25 bias
+    RunDepthBiasTest(wgpu::TextureFormat::Depth24PlusStencil8, 0.4f, QuadAngle::Flat,
+                     0.25f * (1 << 25), 0, 0);
+
+    // Only the bottom left quad has colors. 0.5 quad > 0.4 clear.
+    // TODO(crbug.com/dawn/820): Switch to depth sampling once feature has been enabled.
+    std::vector<RGBA8> expected = {
+        RGBA8::kRed, RGBA8::kRed,  //
+        RGBA8::kRed, RGBA8::kRed,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mRenderTarget, {0, 0}, {kRTSize, kRTSize});
+}
+
+// Test adding positive bias to output with a clamp
+TEST_P(DepthBiasTests, PositiveBiasOn24bitWithClamp) {
+    // Clamping support in OpenGL is spotty
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // Draw quad flat on z = 0.25 with 0.25 bias clamped at 0.125.
+    RunDepthBiasTest(wgpu::TextureFormat::Depth24PlusStencil8, 0.4f, QuadAngle::Flat,
+                     0.25f * (1 << 25), 0, 0.1f);
+
+    // Since we cleared with a depth of 0.4 and clamped bias at 0.4, the depth test will fail. 0.25
+    // + 0.125 < 0.4 clear.
+    // TODO(crbug.com/dawn/820): Switch to depth sampling once feature has been enabled.
+    std::vector<RGBA8> zero = {
+        RGBA8::kZero, RGBA8::kZero,  //
+        RGBA8::kZero, RGBA8::kZero,  //
+    };
+
+    EXPECT_TEXTURE_EQ(zero.data(), mRenderTarget, {0, 0}, {kRTSize, kRTSize});
+}
+
+// Test adding positive bias to output
+TEST_P(DepthBiasTests, PositiveSlopeBiasOn24bit) {
+    // Draw quad with z from 0 to 0.5 with a slope bias of 1
+    RunDepthBiasTest(wgpu::TextureFormat::Depth24PlusStencil8, 0.4f, QuadAngle::TiltedX, 0, 1, 0);
+
+    // Only the top half of the quad has a depth > 0.4 clear
+    // TODO(crbug.com/dawn/820): Switch to depth sampling once feature has been enabled.
+    std::vector<RGBA8> expected = {
+        RGBA8::kRed, RGBA8::kRed,    //
+        RGBA8::kZero, RGBA8::kZero,  //
+    };
+
+    EXPECT_TEXTURE_EQ(expected.data(), mRenderTarget, {0, 0}, {kRTSize, kRTSize});
+}
+
+DAWN_INSTANTIATE_TEST(DepthBiasTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DepthStencilCopyTests.cpp b/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
new file mode 100644
index 0000000..a74359e
--- /dev/null
+++ b/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
@@ -0,0 +1,777 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include <array>
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    using TextureFormat = wgpu::TextureFormat;
+    DAWN_TEST_PARAM_STRUCT(DepthStencilCopyTestParams, TextureFormat);
+
+    constexpr std::array<wgpu::TextureFormat, 3> kValidDepthCopyTextureFormats = {
+        wgpu::TextureFormat::Depth16Unorm,
+        wgpu::TextureFormat::Depth32Float,
+        wgpu::TextureFormat::Depth32FloatStencil8,
+    };
+
+    constexpr std::array<wgpu::TextureFormat, 1> kValidDepthCopyFromBufferFormats = {
+        wgpu::TextureFormat::Depth16Unorm,
+    };
+}  // namespace
+
+class DepthStencilCopyTests : public DawnTestWithParams<DepthStencilCopyTestParams> {
+  protected:
+    void SetUp() override {
+        DawnTestWithParams<DepthStencilCopyTestParams>::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(!mIsFormatSupported);
+
+        // Draw a square in the bottom left quarter of the screen.
+        mVertexModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 0.0, -1.0),
+                    vec2<f32>(-1.0,  0.0),
+                    vec2<f32>(-1.0,  0.0),
+                    vec2<f32>( 0.0, -1.0),
+                    vec2<f32>( 0.0,  0.0));
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        switch (GetParam().mTextureFormat) {
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth24UnormStencil8};
+                }
+                return {};
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth32FloatStencil8};
+                }
+                return {};
+            default:
+                mIsFormatSupported = true;
+                return {};
+        }
+    }
+
+    bool IsValidDepthCopyTextureFormat() {
+        switch (GetParam().mTextureFormat) {
+            case wgpu::TextureFormat::Depth16Unorm:
+            case wgpu::TextureFormat::Depth32Float:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    wgpu::Texture CreateTexture(uint32_t width,
+                                uint32_t height,
+                                wgpu::TextureUsage usage,
+                                uint32_t mipLevelCount = 1) {
+        wgpu::TextureDescriptor texDescriptor = {};
+        texDescriptor.size = {width, height, 1};
+        texDescriptor.format = GetParam().mTextureFormat;
+        texDescriptor.usage = usage;
+        texDescriptor.mipLevelCount = mipLevelCount;
+        return device.CreateTexture(&texDescriptor);
+    }
+
+    wgpu::Texture CreateDepthStencilTexture(uint32_t width,
+                                            uint32_t height,
+                                            wgpu::TextureUsage usage,
+                                            uint32_t mipLevelCount = 1) {
+        wgpu::TextureDescriptor texDescriptor = {};
+        texDescriptor.size = {width, height, 1};
+        texDescriptor.format = GetParam().mTextureFormat;
+        texDescriptor.usage = usage;
+        texDescriptor.mipLevelCount = mipLevelCount;
+        return device.CreateTexture(&texDescriptor);
+    }
+
+    wgpu::Texture CreateDepthTexture(uint32_t width,
+                                     uint32_t height,
+                                     wgpu::TextureUsage usage,
+                                     uint32_t mipLevelCount = 1) {
+        wgpu::TextureDescriptor texDescriptor = {};
+        texDescriptor.size = {width, height, 1};
+        texDescriptor.format = GetParam().mTextureFormat;
+        texDescriptor.usage = usage;
+        texDescriptor.mipLevelCount = mipLevelCount;
+        return device.CreateTexture(&texDescriptor);
+    }
+
+    // Initialize the depth/stencil values for the texture using a render pass.
+    // The texture will be cleared to the "clear" values, and then bottom left corner will
+    // be written with the "region" values.
+    void InitializeDepthStencilTextureRegion(wgpu::Texture texture,
+                                             float clearDepth,
+                                             float regionDepth,
+                                             uint8_t clearStencil,
+                                             uint8_t regionStencil,
+                                             uint32_t mipLevel = 0) {
+        wgpu::TextureFormat format = GetParam().mTextureFormat;
+        // Create the render pass used for the initialization.
+        utils::ComboRenderPipelineDescriptor renderPipelineDesc;
+        renderPipelineDesc.vertex.module = mVertexModule;
+        renderPipelineDesc.cFragment.targetCount = 0;
+
+        wgpu::DepthStencilState* depthStencil = renderPipelineDesc.EnableDepthStencil(format);
+
+        if (utils::IsStencilOnlyFormat(format)) {
+            depthStencil->depthCompare = wgpu::CompareFunction::Always;
+            renderPipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+                @stage(fragment) fn main() {}
+            )");
+        } else {
+            depthStencil->depthWriteEnabled = true;
+            renderPipelineDesc.cFragment.module = utils::CreateShaderModule(device, std::string(R"(
+                @stage(fragment) fn main() -> @builtin(frag_depth) f32 {
+                    return )" + std::to_string(regionDepth) + R"(;
+                })")
+                                                                                        .c_str());
+        }
+        if (!utils::IsDepthOnlyFormat(format)) {
+            depthStencil->stencilFront.passOp = wgpu::StencilOperation::Replace;
+        }
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&renderPipelineDesc);
+
+        // Build the render pass used for initialization.
+        wgpu::TextureViewDescriptor viewDesc = {};
+        viewDesc.baseMipLevel = mipLevel;
+        viewDesc.mipLevelCount = 1;
+
+        utils::ComboRenderPassDescriptor renderPassDesc({}, texture.CreateView(&viewDesc));
+        renderPassDesc.UnsetDepthStencilLoadStoreOpsForFormat(format);
+        renderPassDesc.cDepthStencilAttachmentInfo.depthClearValue = clearDepth;
+        renderPassDesc.cDepthStencilAttachmentInfo.stencilClearValue = clearStencil;
+
+        // Draw the quad (two triangles)
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPassDesc);
+        pass.SetPipeline(pipeline);
+        pass.SetStencilReference(regionStencil);
+        pass.Draw(6);
+        pass.End();
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    wgpu::Texture CreateInitializeDepthStencilTextureAndCopyT2T(float clearDepth,
+                                                                float regionDepth,
+                                                                uint8_t clearStencil,
+                                                                uint8_t regionStencil,
+                                                                uint32_t width,
+                                                                uint32_t height,
+                                                                wgpu::TextureUsage usage,
+                                                                uint32_t mipLevel = 0) {
+        wgpu::Texture src = CreateDepthStencilTexture(
+            width, height, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+            mipLevel + 1);
+
+        wgpu::Texture dst = CreateDepthStencilTexture(
+            width, height, usage | wgpu::TextureUsage::CopyDst, mipLevel + 1);
+
+        InitializeDepthStencilTextureRegion(src, clearDepth, regionDepth, clearStencil,
+                                            regionStencil, mipLevel);
+
+        // Perform a T2T copy of all aspects
+        {
+            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+            wgpu::ImageCopyTexture srcView =
+                utils::CreateImageCopyTexture(src, mipLevel, {0, 0, 0});
+            wgpu::ImageCopyTexture dstView =
+                utils::CreateImageCopyTexture(dst, mipLevel, {0, 0, 0});
+            wgpu::Extent3D copySize = {width >> mipLevel, height >> mipLevel, 1};
+            commandEncoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
+
+            wgpu::CommandBuffer commands = commandEncoder.Finish();
+            queue.Submit(1, &commands);
+        }
+
+        return dst;
+    }
+
+    uint32_t BufferSizeForTextureCopy(
+        uint32_t width,
+        uint32_t height,
+        uint32_t depth,
+        wgpu::TextureFormat format = wgpu::TextureFormat::RGBA8Unorm) {
+        uint32_t bytesPerPixel = utils::GetTexelBlockSizeInBytes(format);
+        uint32_t bytesPerRow = Align(width * bytesPerPixel, kTextureBytesPerRowAlignment);
+        return (bytesPerRow * (height - 1) + width * bytesPerPixel) * depth;
+    }
+
+    wgpu::ShaderModule mVertexModule;
+
+  private:
+    bool mIsFormatSupported = false;
+};
+
+// Test copying both aspects in a T2T copy, then copying only stencil.
+TEST_P(DepthStencilCopyTests, T2TBothAspectsThenCopyStencil) {
+    // TODO(crbug.com/dawn/704): Readback after clear via stencil copy does not work
+    // on some Intel drivers.
+    // Maybe has to do with the RenderAttachment usage. Notably, a later test
+    // T2TBothAspectsThenCopyNonRenderableStencil does not use RenderAttachment and works correctly.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    // TODO(crbug.com/dawn/667): Work around some platforms' inability to read back stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    wgpu::Texture texture = CreateInitializeDepthStencilTextureAndCopyT2T(
+        0.1f, 0.3f, 1u, 3u, kWidth, kHeight,
+        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment);
+
+    // Check the stencil
+    std::vector<uint8_t> expectedData = {
+        1u, 1u, 1u, 1u,  //
+        1u, 1u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+    };
+    EXPECT_TEXTURE_EQ(expectedData.data(), texture, {0, 0}, {kWidth, kHeight}, 0,
+                      wgpu::TextureAspect::StencilOnly);
+}
+
+// Test that part of a non-renderable stencil aspect can be copied. Notably,
+// this test has different behavior on some platforms than T2TBothAspectsThenCopyStencil.
+TEST_P(DepthStencilCopyTests, T2TBothAspectsThenCopyNonRenderableStencil) {
+    // TODO(crbug.com/dawn/667): Work around some platforms' inability to read back stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    wgpu::Texture texture = CreateInitializeDepthStencilTextureAndCopyT2T(
+        0.1f, 0.3f, 1u, 3u, kWidth, kHeight, wgpu::TextureUsage::CopySrc);
+
+    // Check the stencil
+    std::vector<uint8_t> expectedData = {
+        1u, 1u, 1u, 1u,  //
+        1u, 1u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+    };
+    EXPECT_TEXTURE_EQ(expectedData.data(), texture, {0, 0}, {kWidth, kHeight}, 0,
+                      wgpu::TextureAspect::StencilOnly);
+}
+
+// Test that part of a non-renderable, non-zero mip stencil aspect can be copied. Notably,
+// this test has different behavior on some platforms than T2TBothAspectsThenCopyStencil.
+TEST_P(DepthStencilCopyTests, T2TBothAspectsThenCopyNonRenderableNonZeroMipStencil) {
+    /// TODO(crbug.com/dawn/704): Readback after clear via stencil copy does not work
+    // on some Intel drivers.
+    // Maybe has to do with the non-zero mip. Notably, a previous test
+    // T2TBothAspectsThenCopyNonRenderableStencil works correctly.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    // TODO(crbug.com/dawn/667): Work around some platforms' inability to read back stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    wgpu::Texture texture = CreateInitializeDepthStencilTextureAndCopyT2T(
+        0.1f, 0.3f, 1u, 3u, 9, 9, wgpu::TextureUsage::CopySrc, 1);
+
+    // Check the stencil
+    std::vector<uint8_t> expectedData = {
+        1u, 1u, 1u, 1u,  //
+        1u, 1u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+    };
+    EXPECT_TEXTURE_EQ(expectedData.data(), texture, {0, 0}, {4, 4}, 1,
+                      wgpu::TextureAspect::StencilOnly);
+}
+
+// Test copying both aspects in a T2T copy, then copying only depth.
+TEST_P(DepthStencilCopyTests, T2TBothAspectsThenCopyDepth) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsValidDepthCopyTextureFormat());
+
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    wgpu::Texture texture = CreateInitializeDepthStencilTextureAndCopyT2T(
+        0.1f, 0.3f, 1u, 3u, kWidth, kHeight, wgpu::TextureUsage::RenderAttachment);
+
+    // Check the depth
+    ExpectAttachmentDepthTestData(texture, GetParam().mTextureFormat, kWidth, kHeight, 0, 0,
+                                  {
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                  });
+}
+
+// Test copying both aspects in a T2T copy, then copying only depth at a nonzero mip.
+TEST_P(DepthStencilCopyTests, T2TBothAspectsThenCopyNonZeroMipDepth) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsValidDepthCopyTextureFormat());
+
+    wgpu::Texture texture = CreateInitializeDepthStencilTextureAndCopyT2T(
+        0.1f, 0.3f, 1u, 3u, 8, 8, wgpu::TextureUsage::RenderAttachment, 1);
+
+    // Check the depth
+    ExpectAttachmentDepthTestData(texture, GetParam().mTextureFormat, 4, 4, 0, 1,
+                                  {
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                  });
+}
+
+// Test copying both aspects in a T2T copy, then copying stencil, then copying depth
+TEST_P(DepthStencilCopyTests, T2TBothAspectsThenCopyStencilThenDepth) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsValidDepthCopyTextureFormat());
+
+    // TODO(crbug.com/dawn/667): Work around some platforms' inability to read back stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    wgpu::Texture texture = CreateInitializeDepthStencilTextureAndCopyT2T(
+        0.1f, 0.3f, 1u, 3u, kWidth, kHeight,
+        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment);
+
+    // Check the stencil
+    std::vector<uint8_t> expectedData = {
+        1u, 1u, 1u, 1u,  //
+        1u, 1u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+    };
+    EXPECT_TEXTURE_EQ(expectedData.data(), texture, {0, 0}, {kWidth, kHeight}, 0,
+                      wgpu::TextureAspect::StencilOnly);
+
+    // Check the depth
+    ExpectAttachmentDepthTestData(texture, GetParam().mTextureFormat, kWidth, kHeight, 0, 0,
+                                  {
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                  });
+}
+
+// Test copying both aspects in a T2T copy, then copying depth, then copying stencil
+TEST_P(DepthStencilCopyTests, T2TBothAspectsThenCopyDepthThenStencil) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsValidDepthCopyTextureFormat());
+
+    // TODO(crbug.com/dawn/704): Readback after clear via stencil copy does not work
+    // on some Intel drivers.
+    // It seems like the depth readback copy mutates the stencil because the previous
+    // test T2TBothAspectsThenCopyStencil passes.
+    // T2TBothAspectsThenCopyStencilThenDepth which checks stencil first also passes.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    // TODO(crbug.com/dawn/667): Work around the fact that some platforms are unable to read
+    // stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    wgpu::Texture texture = CreateInitializeDepthStencilTextureAndCopyT2T(
+        0.1f, 0.3f, 1u, 3u, kWidth, kHeight,
+        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment);
+
+    // Check the depth
+    ExpectAttachmentDepthTestData(texture, GetParam().mTextureFormat, kWidth, kHeight, 0, 0,
+                                  {
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.1, 0.1, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                      0.3, 0.3, 0.1, 0.1,  //
+                                  });
+
+    // Check the stencil
+    std::vector<uint8_t> expectedData = {
+        1u, 1u, 1u, 1u,  //
+        1u, 1u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+        3u, 3u, 1u, 1u,  //
+    };
+    EXPECT_TEXTURE_EQ(expectedData.data(), texture, {0, 0}, {kWidth, kHeight}, 0,
+                      wgpu::TextureAspect::StencilOnly);
+}
+
+class DepthCopyTests : public DepthStencilCopyTests {};
+
+// Test copying the depth-only aspect into a buffer.
+TEST_P(DepthCopyTests, FromDepthAspect) {
+    // TODO(crbug.com/dawn/1237): Depth16Unorm test failed on OpenGL and OpenGLES which says
+    // Invalid format and type combination in glReadPixels
+    DAWN_TEST_UNSUPPORTED_IF(GetParam().mTextureFormat == wgpu::TextureFormat::Depth16Unorm &&
+                             (IsOpenGL() || IsOpenGLES()));
+
+    // TODO(crbug.com/dawn/1291): These tests are failing on NVidia GLES
+    // when using Tint/GLSL.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES() && IsNvidia());
+
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    wgpu::Texture texture = CreateTexture(
+        kWidth, kHeight, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+    constexpr float kInitDepth = 0.2f;
+    InitializeDepthStencilTextureRegion(texture, 0.f, kInitDepth, 0, 0);
+
+    // This expectation is the test as it performs the CopyTextureToBuffer.
+    if (GetParam().mTextureFormat == wgpu::TextureFormat::Depth16Unorm) {
+        uint16_t expected = FloatToUnorm<uint16_t>(kInitDepth);
+        std::vector<uint16_t> expectedData = {
+            0,        0,        0, 0,  //
+            0,        0,        0, 0,  //
+            expected, expected, 0, 0,  //
+            expected, expected, 0, 0,  //
+        };
+        EXPECT_TEXTURE_EQ(expectedData.data(), texture, {0, 0}, {kWidth, kHeight}, 0,
+                          wgpu::TextureAspect::DepthOnly);
+    } else {
+        std::vector<float> expectedData = {
+            0.0,        0.0,        0.0, 0.0,  //
+            0.0,        0.0,        0.0, 0.0,  //
+            kInitDepth, kInitDepth, 0.0, 0.0,  //
+            kInitDepth, kInitDepth, 0.0, 0.0,  //
+        };
+        EXPECT_TEXTURE_EQ(expectedData.data(), texture, {0, 0}, {kWidth, kHeight}, 0,
+                          wgpu::TextureAspect::DepthOnly);
+    }
+}
+
+// Test copying the non-zero mip, depth-only aspect into a buffer.
+TEST_P(DepthCopyTests, FromNonZeroMipDepthAspect) {
+    // TODO(crbug.com/dawn/1237): Depth16Unorm test failed on OpenGL and OpenGLES which says
+    // Invalid format and type combination in glReadPixels
+    DAWN_TEST_UNSUPPORTED_IF(GetParam().mTextureFormat == wgpu::TextureFormat::Depth16Unorm &&
+                             (IsOpenGL() || IsOpenGLES()));
+
+    // TODO(crbug.com/dawn/1291): These tests are failing on NVidia GLES
+    // when using Tint/GLSL.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES() && IsNvidia());
+
+    wgpu::Texture depthTexture = CreateDepthTexture(
+        9, 9, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, 2);
+
+    constexpr float kInitDepth = 0.4f;
+    InitializeDepthStencilTextureRegion(depthTexture, 0.f, kInitDepth, 0, 0, /*mipLevel*/ 1);
+
+    // This expectation is the test as it performs the CopyTextureToBuffer.
+    if (GetParam().mTextureFormat == wgpu::TextureFormat::Depth16Unorm) {
+        uint16_t expected = FloatToUnorm<uint16_t>(kInitDepth);
+        std::vector<uint16_t> expectedData = {
+            0,        0,        0, 0,  //
+            0,        0,        0, 0,  //
+            expected, expected, 0, 0,  //
+            expected, expected, 0, 0,  //
+        };
+        EXPECT_TEXTURE_EQ(expectedData.data(), depthTexture, {0, 0}, {4, 4}, 1,
+                          wgpu::TextureAspect::DepthOnly);
+    } else {
+        std::vector<float> expectedData = {
+            0.0,        0.0,        0.0, 0.0,  //
+            0.0,        0.0,        0.0, 0.0,  //
+            kInitDepth, kInitDepth, 0.0, 0.0,  //
+            kInitDepth, kInitDepth, 0.0, 0.0,  //
+        };
+        EXPECT_TEXTURE_EQ(expectedData.data(), depthTexture, {0, 0}, {4, 4}, 1,
+                          wgpu::TextureAspect::DepthOnly);
+    }
+}
+
+class DepthCopyFromBufferTests : public DepthStencilCopyTests {};
+
+// Test copying the depth-only aspect from a buffer.
+TEST_P(DepthCopyFromBufferTests, BufferToDepthAspect) {
+    // TODO(crbug.com/dawn/1237): Depth16Unorm test failed on OpenGL and OpenGLES which says
+    // Invalid format and type combination in glReadPixels
+    DAWN_TEST_UNSUPPORTED_IF(GetParam().mTextureFormat == wgpu::TextureFormat::Depth16Unorm &&
+                             (IsOpenGL() || IsOpenGLES()));
+
+    constexpr uint32_t kWidth = 8;
+    constexpr uint32_t kHeight = 1;
+
+    wgpu::Texture destTexture =
+        CreateTexture(kWidth, kHeight, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = BufferSizeForTextureCopy(kWidth, kHeight, 1, GetParam().mTextureFormat);
+    descriptor.usage = wgpu::BufferUsage::CopySrc;
+    descriptor.mappedAtCreation = true;
+    wgpu::Buffer srcBuffer = device.CreateBuffer(&descriptor);
+
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(srcBuffer, 0, 256, kHeight);
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(destTexture, 0, {0, 0, 0}, wgpu::TextureAspect::DepthOnly);
+    wgpu::Extent3D extent = {kWidth, kHeight, 1};
+
+    constexpr float kInitDepth = 0.2f;
+
+    // This expectation is the test as it performs the CopyTextureToBuffer.
+    if (GetParam().mTextureFormat == wgpu::TextureFormat::Depth16Unorm) {
+        uint16_t expected = FloatToUnorm<uint16_t>(kInitDepth);
+        std::vector<uint16_t> expectedData = {
+            0, 0, expected, expected, 0, 0, expected, expected,
+        };
+        size_t expectedSize = expectedData.size() * sizeof(uint16_t);
+
+        memcpy(srcBuffer.GetMappedRange(0, expectedSize), expectedData.data(), expectedSize);
+        srcBuffer.Unmap();
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &extent);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_TEXTURE_EQ(expectedData.data(), destTexture, {0, 0}, {kWidth, kHeight}, 0,
+                          wgpu::TextureAspect::DepthOnly);
+    } else {
+        std::vector<float> expectedData = {
+            0.0, 0.0, kInitDepth, kInitDepth, 0.0, 0.0, kInitDepth, kInitDepth,
+        };
+        size_t expectedSize = expectedData.size() * sizeof(float);
+
+        memcpy(srcBuffer.GetMappedRange(0, expectedSize), expectedData.data(), expectedSize);
+        srcBuffer.Unmap();
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &extent);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_TEXTURE_EQ(expectedData.data(), destTexture, {0, 0}, {kWidth, kHeight}, 0,
+                          wgpu::TextureAspect::DepthOnly);
+    }
+}
+
+class StencilCopyTests : public DepthStencilCopyTests {};
+
+// Test copying the stencil-only aspect into a buffer.
+TEST_P(StencilCopyTests, FromStencilAspect) {
+    // TODO(crbug.com/dawn/667): Work around the fact that some platforms are unable to read
+    // stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    wgpu::Texture depthStencilTexture = CreateDepthStencilTexture(
+        kWidth, kHeight, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+    InitializeDepthStencilTextureRegion(depthStencilTexture, 0.f, 0.3f, 0u, 1u);
+
+    // This expectation is the test as it performs the CopyTextureToBuffer.
+    std::vector<uint8_t> expectedData = {
+        0u, 0u, 0u, 0u,  //
+        0u, 0u, 0u, 0u,  //
+        1u, 1u, 0u, 0u,  //
+        1u, 1u, 0u, 0u,  //
+    };
+    EXPECT_TEXTURE_EQ(expectedData.data(), depthStencilTexture, {0, 0}, {kWidth, kHeight}, 0,
+                      wgpu::TextureAspect::StencilOnly);
+}
+
+// Test copying the non-zero mip, stencil-only aspect into a buffer.
+TEST_P(StencilCopyTests, FromNonZeroMipStencilAspect) {
+    // TODO(crbug.com/dawn/704): Readback after clear via stencil copy does not work
+    // on some Intel drivers.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    // TODO(crbug.com/dawn/667): Work around some platforms' inability to read back stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    wgpu::Texture depthStencilTexture = CreateDepthStencilTexture(
+        9, 9, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, 2);
+
+    InitializeDepthStencilTextureRegion(depthStencilTexture, 0.f, 0.3f, 0u, 1u, 1u);
+
+    // This expectation is the test as it performs the CopyTextureToBuffer.
+    std::vector<uint8_t> expectedData = {
+        0u, 0u, 0u, 0u,  //
+        0u, 0u, 0u, 0u,  //
+        1u, 1u, 0u, 0u,  //
+        1u, 1u, 0u, 0u,  //
+    };
+    EXPECT_TEXTURE_EQ(expectedData.data(), depthStencilTexture, {0, 0}, {4, 4}, 1,
+                      wgpu::TextureAspect::StencilOnly);
+}
+
+// Test copying to the stencil-aspect of a buffer
+TEST_P(StencilCopyTests, ToStencilAspect) {
+    // Copies to a single aspect are unsupported on OpenGL.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // TODO(crbug.com/dawn/704): Readback after clear via stencil copy does not work
+    // on some Intel drivers.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    // TODO(crbug.com/dawn/1273): Fails on Win11 with D3D12 debug layer and full validation
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsBackendValidationEnabled());
+
+    // Create a stencil texture
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+    const bool hasDepth = !utils::IsStencilOnlyFormat(GetParam().mTextureFormat);
+
+    wgpu::Texture depthStencilTexture =
+        CreateDepthStencilTexture(kWidth, kHeight,
+                                  wgpu::TextureUsage::RenderAttachment |
+                                      wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst);
+
+    if (hasDepth) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+        // Clear depth to 0.7, so we can check that the stencil copy doesn't mutate the depth.
+        utils::ComboRenderPassDescriptor passDescriptor({}, depthStencilTexture.CreateView());
+        passDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(GetParam().mTextureFormat);
+        passDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.7;
+
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+        pass.End();
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    std::vector<uint8_t> stencilData = {
+        1u,  2u,  3u,  4u,   //
+        5u,  6u,  7u,  8u,   //
+        9u,  10u, 11u, 12u,  //
+        13u, 14u, 15u, 16u,  //
+    };
+
+    // After copying stencil data in, we will decrement stencil values in the bottom left
+    // of the screen. This is the expected result.
+    std::vector<uint8_t> expectedStencilData = {
+        1u,  2u,  3u,  4u,   //
+        5u,  6u,  7u,  8u,   //
+        8u,  9u,  11u, 12u,  //
+        12u, 13u, 15u, 16u,  //
+    };
+
+    // Upload the stencil data.
+    wgpu::TextureDataLayout stencilDataLayout = {};
+    stencilDataLayout.bytesPerRow = kWidth * sizeof(uint8_t);
+
+    wgpu::ImageCopyTexture stencilDataCopyTexture = utils::CreateImageCopyTexture(
+        depthStencilTexture, 0, {0, 0, 0}, wgpu::TextureAspect::StencilOnly);
+
+    wgpu::Extent3D writeSize = {kWidth, kHeight, 1};
+    queue.WriteTexture(&stencilDataCopyTexture, stencilData.data(),
+                       stencilData.size() * sizeof(uint8_t), &stencilDataLayout, &writeSize);
+
+    // Decrement the stencil value in a render pass to ensure the data is visible to the pipeline.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        // Create a render pipline which decrements the stencil value for passing fragments.
+        // A quad is drawn in the bottom left.
+        utils::ComboRenderPipelineDescriptor renderPipelineDesc;
+        renderPipelineDesc.vertex.module = mVertexModule;
+        renderPipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() {
+            })");
+        renderPipelineDesc.cFragment.targetCount = 0;
+        wgpu::DepthStencilState* depthStencil =
+            renderPipelineDesc.EnableDepthStencil(GetParam().mTextureFormat);
+        depthStencil->stencilFront.passOp = wgpu::StencilOperation::DecrementClamp;
+        if (!hasDepth) {
+            depthStencil->depthWriteEnabled = false;
+            depthStencil->depthCompare = wgpu::CompareFunction::Always;
+        }
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&renderPipelineDesc);
+
+        // Create a render pass which loads the stencil. We want to load the values we
+        // copied in. Also load the canary depth values so they're not lost.
+        utils::ComboRenderPassDescriptor passDescriptor({}, depthStencilTexture.CreateView());
+        passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        passDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(GetParam().mTextureFormat);
+
+        // Draw the quad in the bottom left (two triangles).
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+        pass.SetPipeline(pipeline);
+        pass.Draw(6);
+        pass.End();
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    // Copy back the stencil data and check it is correct.
+    EXPECT_TEXTURE_EQ(expectedStencilData.data(), depthStencilTexture, {0, 0}, {kWidth, kHeight}, 0,
+                      wgpu::TextureAspect::StencilOnly);
+
+    if (hasDepth) {
+        ExpectAttachmentDepthTestData(depthStencilTexture, GetParam().mTextureFormat, kWidth,
+                                      kHeight, 0, 0,
+                                      {
+                                          0.7, 0.7, 0.7, 0.7,  //
+                                          0.7, 0.7, 0.7, 0.7,  //
+                                          0.7, 0.7, 0.7, 0.7,  //
+                                          0.7, 0.7, 0.7, 0.7,  //
+                                      });
+    }
+}
+
+DAWN_INSTANTIATE_TEST_P(DepthStencilCopyTests,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         // Test with the vulkan_use_s8 toggle forced on and off.
+                         VulkanBackend({"vulkan_use_s8"}, {}),
+                         VulkanBackend({}, {"vulkan_use_s8"})},
+                        std::vector<wgpu::TextureFormat>(utils::kDepthAndStencilFormats.begin(),
+                                                         utils::kDepthAndStencilFormats.end()));
+
+DAWN_INSTANTIATE_TEST_P(DepthCopyTests,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(kValidDepthCopyTextureFormats.begin(),
+                                                         kValidDepthCopyTextureFormats.end()));
+
+DAWN_INSTANTIATE_TEST_P(DepthCopyFromBufferTests,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(kValidDepthCopyFromBufferFormats.begin(),
+                                                         kValidDepthCopyFromBufferFormats.end()));
+
+DAWN_INSTANTIATE_TEST_P(StencilCopyTests,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         // Test with the vulkan_use_s8 toggle forced on and off.
+                         VulkanBackend({"vulkan_use_s8"}, {}),
+                         VulkanBackend({}, {"vulkan_use_s8"})},
+                        std::vector<wgpu::TextureFormat>(utils::kStencilFormats.begin(),
+                                                         utils::kStencilFormats.end()));
diff --git a/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp b/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
new file mode 100644
index 0000000..f9714cf
--- /dev/null
+++ b/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
@@ -0,0 +1,286 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    using Format = wgpu::TextureFormat;
+    enum class Check {
+        CopyStencil,
+        StencilTest,
+        CopyDepth,
+        DepthTest,
+        SampleDepth,
+    };
+
+    std::ostream& operator<<(std::ostream& o, Check check) {
+        switch (check) {
+            case Check::CopyStencil:
+                o << "CopyStencil";
+                break;
+            case Check::StencilTest:
+                o << "StencilTest";
+                break;
+            case Check::CopyDepth:
+                o << "CopyDepth";
+                break;
+            case Check::DepthTest:
+                o << "DepthTest";
+                break;
+            case Check::SampleDepth:
+                o << "SampleDepth";
+                break;
+        }
+        return o;
+    }
+
+    DAWN_TEST_PARAM_STRUCT(DepthStencilLoadOpTestParams, Format, Check);
+
+    constexpr static uint32_t kRTSize = 16;
+    constexpr uint32_t kMipLevelCount = 2u;
+    constexpr std::array<float, kMipLevelCount> kDepthValues = {0.125f, 0.875f};
+    constexpr std::array<uint16_t, kMipLevelCount> kU16DepthValues = {8192u, 57343u};
+    constexpr std::array<uint8_t, kMipLevelCount> kStencilValues = {7u, 3u};
+
+    class DepthStencilLoadOpTests : public DawnTestWithParams<DepthStencilLoadOpTestParams> {
+      protected:
+        void SetUp() override {
+            DawnTestWithParams<DepthStencilLoadOpTestParams>::SetUp();
+
+            DAWN_TEST_UNSUPPORTED_IF(!mIsFormatSupported);
+
+            // Readback of Depth/Stencil textures not fully supported on GL right now.
+            // Also depends on glTextureView which is not supported on ES.
+            DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+            wgpu::TextureDescriptor descriptor;
+            descriptor.size = {kRTSize, kRTSize};
+            descriptor.format = GetParam().mFormat;
+            descriptor.mipLevelCount = kMipLevelCount;
+            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc |
+                               wgpu::TextureUsage::TextureBinding;
+
+            texture = device.CreateTexture(&descriptor);
+
+            wgpu::TextureViewDescriptor textureViewDesc = {};
+            textureViewDesc.mipLevelCount = 1;
+
+            for (uint32_t mipLevel = 0; mipLevel < kMipLevelCount; ++mipLevel) {
+                textureViewDesc.baseMipLevel = mipLevel;
+                textureViews[mipLevel] = texture.CreateView(&textureViewDesc);
+
+                utils::ComboRenderPassDescriptor renderPassDescriptor({}, textureViews[mipLevel]);
+                renderPassDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(GetParam().mFormat);
+                renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue =
+                    kDepthValues[mipLevel];
+                renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue =
+                    kStencilValues[mipLevel];
+                renderPassDescriptors.push_back(renderPassDescriptor);
+            }
+        }
+
+        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+            switch (GetParam().mFormat) {
+                case wgpu::TextureFormat::Depth24UnormStencil8:
+                    if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
+                        mIsFormatSupported = true;
+                        return {wgpu::FeatureName::Depth24UnormStencil8};
+                    }
+                    return {};
+                case wgpu::TextureFormat::Depth32FloatStencil8:
+                    if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
+                        mIsFormatSupported = true;
+                        return {wgpu::FeatureName::Depth32FloatStencil8};
+                    }
+                    return {};
+                default:
+                    mIsFormatSupported = true;
+                    return {};
+            }
+        }
+
+        void CheckMipLevel(uint32_t mipLevel) {
+            uint32_t mipSize = std::max(kRTSize >> mipLevel, 1u);
+
+            switch (GetParam().mCheck) {
+                case Check::SampleDepth: {
+                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                    ExpectSampledDepthData(texture, mipSize, mipSize, 0, mipLevel,
+                                           new detail::ExpectEq<float>(
+                                               expectedDepth.data(), expectedDepth.size(), 0.0001))
+                        << "sample depth mip " << mipLevel;
+                    break;
+                }
+
+                case Check::CopyDepth: {
+                    if (GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm) {
+                        std::vector<uint16_t> expectedDepth(mipSize * mipSize,
+                                                            kU16DepthValues[mipLevel]);
+                        EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
+                                          mipLevel, wgpu::TextureAspect::DepthOnly)
+                            << "copy depth mip " << mipLevel;
+                    } else {
+                        std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                        EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
+                                          mipLevel, wgpu::TextureAspect::DepthOnly)
+                            << "copy depth mip " << mipLevel;
+                    }
+
+                    break;
+                }
+
+                case Check::CopyStencil: {
+                    std::vector<uint8_t> expectedStencil(mipSize * mipSize,
+                                                         kStencilValues[mipLevel]);
+                    EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0}, {mipSize, mipSize},
+                                      mipLevel, wgpu::TextureAspect::StencilOnly)
+                        << "copy stencil mip " << mipLevel;
+                    break;
+                }
+
+                case Check::DepthTest: {
+                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                    ExpectAttachmentDepthTestData(texture, GetParam().mFormat, mipSize, mipSize, 0,
+                                                  mipLevel, expectedDepth)
+                        << "depth test mip " << mipLevel;
+                    break;
+                }
+
+                case Check::StencilTest: {
+                    ExpectAttachmentStencilTestData(texture, GetParam().mFormat, mipSize, mipSize,
+                                                    0, mipLevel, kStencilValues[mipLevel])
+                        << "stencil test mip " << mipLevel;
+                    break;
+                }
+            }
+        }
+
+        wgpu::Texture texture;
+        std::array<wgpu::TextureView, kMipLevelCount> textureViews;
+        // Vector instead of array because there is no default constructor.
+        std::vector<utils::ComboRenderPassDescriptor> renderPassDescriptors;
+
+      private:
+        bool mIsFormatSupported = false;
+    };
+
+}  // anonymous namespace
+
+// Check that clearing a mip level works at all.
+TEST_P(DepthStencilLoadOpTests, ClearMip0) {
+    // TODO(https://issuetracker.google.com/issues/204919030): SwiftShader does not clear
+    // Depth16Unorm correctly with some values.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsSwiftshader() &&
+                          GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.BeginRenderPass(&renderPassDescriptors[0]).End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    CheckMipLevel(0u);
+}
+
+// Check that clearing a non-zero mip level works at all.
+TEST_P(DepthStencilLoadOpTests, ClearMip1) {
+    // TODO(crbug.com/dawn/838): Sampling from the non-zero mip does not work.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel() && GetParam().mCheck == Check::SampleDepth);
+
+    // TODO(crbug.com/dawn/838): Copying from the non-zero mip here sometimes returns uninitialized
+    // data! (from mip 0 of a previous test run).
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel() && GetParam().mCheck == Check::CopyDepth);
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel() && GetParam().mCheck == Check::CopyStencil);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.BeginRenderPass(&renderPassDescriptors[1]).End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    CheckMipLevel(1u);
+}
+
+// Clear first mip then the second mip.  Check both mip levels.
+TEST_P(DepthStencilLoadOpTests, ClearBothMip0Then1) {
+    // TODO(crbug.com/dawn/838): Sampling from the non-zero mip does not work.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel() && GetParam().mCheck == Check::SampleDepth);
+
+    // TODO(https://issuetracker.google.com/issues/204919030): SwiftShader does not clear
+    // Depth16Unorm correctly with some values.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsSwiftshader() &&
+                          GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.BeginRenderPass(&renderPassDescriptors[0]).End();
+    encoder.BeginRenderPass(&renderPassDescriptors[1]).End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    CheckMipLevel(0u);
+    CheckMipLevel(1u);
+}
+
+// Clear second mip then the first mip. Check both mip levels.
+TEST_P(DepthStencilLoadOpTests, ClearBothMip1Then0) {
+    // TODO(crbug.com/dawn/838): Sampling from the non-zero mip does not work.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel() && GetParam().mCheck == Check::SampleDepth);
+
+    // TODO(https://issuetracker.google.com/issues/204919030): SwiftShader does not clear
+    // Depth16Unorm correctly with some values.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsSwiftshader() &&
+                          GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.BeginRenderPass(&renderPassDescriptors[1]).End();
+    encoder.BeginRenderPass(&renderPassDescriptors[0]).End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    CheckMipLevel(0u);
+    CheckMipLevel(1u);
+}
+
+namespace {
+
+    auto GenerateParams() {
+        auto params1 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
+            {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
+             OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+            {wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Depth16Unorm},
+            {Check::CopyDepth, Check::DepthTest, Check::SampleDepth});
+
+        auto params2 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
+            {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
+             OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+            {wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureFormat::Depth24UnormStencil8,
+             wgpu::TextureFormat::Depth32FloatStencil8},
+            {Check::CopyStencil, Check::StencilTest, Check::DepthTest, Check::SampleDepth});
+
+        std::vector<DepthStencilLoadOpTestParams> allParams;
+        allParams.insert(allParams.end(), params1.begin(), params1.end());
+        allParams.insert(allParams.end(), params2.begin(), params2.end());
+
+        return allParams;
+    }
+
+    INSTANTIATE_TEST_SUITE_P(,
+                             DepthStencilLoadOpTests,
+                             ::testing::ValuesIn(GenerateParams()),
+                             DawnTestBase::PrintToStringParamName("DepthStencilLoadOpTests"));
+    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DepthStencilLoadOpTests);
+
+}  // namespace
diff --git a/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp b/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
new file mode 100644
index 0000000..3047aeb
--- /dev/null
+++ b/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
@@ -0,0 +1,818 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    using TextureFormat = wgpu::TextureFormat;
+    DAWN_TEST_PARAM_STRUCT(DepthStencilSamplingTestParams, TextureFormat);
+
+    constexpr wgpu::CompareFunction kCompareFunctions[] = {
+        wgpu::CompareFunction::Never,        wgpu::CompareFunction::Less,
+        wgpu::CompareFunction::LessEqual,    wgpu::CompareFunction::Greater,
+        wgpu::CompareFunction::GreaterEqual, wgpu::CompareFunction::Equal,
+        wgpu::CompareFunction::NotEqual,     wgpu::CompareFunction::Always,
+    };
+
+    // Test a "normal" ref value between 0 and 1; as well as negative and > 1 refs.
+    constexpr float kCompareRefs[] = {-0.1, 0.4, 1.2};
+
+    // Test 0, below the ref, equal to, above the ref, and 1.
+    const std::vector<float> kNormalizedTextureValues = {0.0, 0.3, 0.4, 0.5, 1.0};
+
+    // Test the limits, and some values in between.
+    const std::vector<uint32_t> kStencilValues = {0, 1, 38, 255};
+
+}  // anonymous namespace
+
+class DepthStencilSamplingTest : public DawnTestWithParams<DepthStencilSamplingTestParams> {
+  protected:
+    enum class TestAspect {
+        Depth,
+        Stencil,
+    };
+
+    void SetUp() override {
+        DawnTestWithParams<DepthStencilSamplingTestParams>::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(!mIsFormatSupported);
+
+        wgpu::BufferDescriptor uniformBufferDesc;
+        uniformBufferDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+        uniformBufferDesc.size = sizeof(float);
+        mUniformBuffer = device.CreateBuffer(&uniformBufferDesc);
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        switch (GetParam().mTextureFormat) {
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth24UnormStencil8};
+                }
+                return {};
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth32FloatStencil8};
+                }
+                return {};
+            default:
+                mIsFormatSupported = true;
+                return {};
+        }
+    }
+
+    void GenerateSamplingShader(const std::vector<TestAspect>& aspects,
+                                const std::vector<uint32_t> components,
+                                std::ostringstream& shaderSource,
+                                std::ostringstream& shaderBody) {
+        shaderSource << "type StencilValues = array<u32, " << components.size() << ">;\n";
+        shaderSource << R"(
+            struct DepthResult {
+                value : f32
+            }
+            struct StencilResult {
+                values : StencilValues
+            })";
+        shaderSource << "\n";
+
+        uint32_t index = 0;
+        for (TestAspect aspect : aspects) {
+            switch (aspect) {
+                case TestAspect::Depth:
+                    shaderSource << "@group(0) @binding(" << 2 * index << ") var tex" << index
+                                 << " : texture_depth_2d;\n";
+
+                    shaderSource << "@group(0) @binding(" << 2 * index + 1
+                                 << ") var<storage, read_write> result" << index
+                                 << " : DepthResult;\n";
+
+                    ASSERT(components.size() == 1 && components[0] == 0);
+                    shaderBody << "\nresult" << index << ".value = textureLoad(tex" << index
+                               << ", vec2<i32>(0, 0), 0);";
+                    break;
+                case TestAspect::Stencil:
+                    shaderSource << "@group(0) @binding(" << 2 * index << ") var tex" << index
+                                 << " : texture_2d<u32>;\n";
+
+                    shaderSource << "@group(0) @binding(" << 2 * index + 1
+                                 << ") var<storage, read_write> result" << index
+                                 << " : StencilResult;\n";
+
+                    shaderBody << "var texel = textureLoad(tex" << index
+                               << ", vec2<i32>(0, 0), 0);";
+
+                    for (uint32_t i = 0; i < components.size(); ++i) {
+                        shaderBody << "\nresult" << index << ".values[" << i << "] = texel["
+                                   << components[i] << "];";
+                    }
+                    break;
+            }
+
+            index++;
+        }
+    }
+
+    wgpu::RenderPipeline CreateSamplingRenderPipeline(std::vector<TestAspect> aspects,
+                                                      std::vector<uint32_t> components) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+        std::ostringstream shaderSource;
+        std::ostringstream shaderOutputStruct;
+        std::ostringstream shaderBody;
+
+        GenerateSamplingShader(aspects, components, shaderSource, shaderBody);
+
+        shaderSource << "@stage(fragment) fn main() -> @location(0) vec4<f32> {\n";
+        shaderSource << shaderBody.str() << "return vec4<f32>();\n }";
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, shaderSource.str().c_str());
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::ComputePipeline CreateSamplingComputePipeline(std::vector<TestAspect> aspects,
+                                                        std::vector<uint32_t> components) {
+        std::ostringstream shaderSource;
+        std::ostringstream shaderBody;
+        GenerateSamplingShader(aspects, components, shaderSource, shaderBody);
+
+        shaderSource << "@stage(compute) @workgroup_size(1) fn main() { " << shaderBody.str()
+                     << "\n}";
+
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, shaderSource.str().c_str());
+
+        wgpu::ComputePipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.compute.module = csModule;
+        pipelineDescriptor.compute.entryPoint = "main";
+
+        return device.CreateComputePipeline(&pipelineDescriptor);
+    }
+
+    wgpu::RenderPipeline CreateSamplingRenderPipeline(std::vector<TestAspect> aspects,
+                                                      uint32_t componentIndex) {
+        return CreateSamplingRenderPipeline(std::move(aspects),
+                                            std::vector<uint32_t>{componentIndex});
+    }
+
+    wgpu::ComputePipeline CreateSamplingComputePipeline(std::vector<TestAspect> aspects,
+                                                        uint32_t componentIndex) {
+        return CreateSamplingComputePipeline(std::move(aspects),
+                                             std::vector<uint32_t>{componentIndex});
+    }
+
+    wgpu::RenderPipeline CreateComparisonRenderPipeline() {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var samp : sampler_comparison;
+            @group(0) @binding(1) var tex : texture_depth_2d;
+            struct Uniforms {
+                compareRef : f32
+            }
+            @group(0) @binding(2) var<uniform> uniforms : Uniforms;
+
+            @stage(fragment) fn main() -> @location(0) f32 {
+                return textureSampleCompare(tex, samp, vec2<f32>(0.5, 0.5), uniforms.compareRef);
+            })");
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R32Float;
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::ComputePipeline CreateComparisonComputePipeline() {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var samp : sampler_comparison;
+            @group(0) @binding(1) var tex : texture_depth_2d;
+            struct Uniforms {
+                compareRef : f32
+            }
+            @group(0) @binding(2) var<uniform> uniforms : Uniforms;
+
+            struct SamplerResult {
+                value : f32
+            }
+            @group(0) @binding(3) var<storage, read_write> samplerResult : SamplerResult;
+
+            @stage(compute) @workgroup_size(1) fn main() {
+                samplerResult.value = textureSampleCompare(tex, samp, vec2<f32>(0.5, 0.5), uniforms.compareRef);
+            })");
+
+        wgpu::ComputePipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.compute.module = csModule;
+        pipelineDescriptor.compute.entryPoint = "main";
+
+        return device.CreateComputePipeline(&pipelineDescriptor);
+    }
+
+    wgpu::Texture CreateInputTexture(wgpu::TextureFormat format) {
+        wgpu::TextureDescriptor inputTextureDesc;
+        inputTextureDesc.usage =
+            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+        inputTextureDesc.size = {1, 1, 1};
+        inputTextureDesc.format = format;
+        return device.CreateTexture(&inputTextureDesc);
+    }
+
+    wgpu::Texture CreateOutputTexture(wgpu::TextureFormat format) {
+        wgpu::TextureDescriptor outputTextureDesc;
+        outputTextureDesc.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        outputTextureDesc.size = {1, 1, 1};
+        outputTextureDesc.format = format;
+        return device.CreateTexture(&outputTextureDesc);
+    }
+
+    wgpu::Buffer CreateOutputBuffer(uint32_t componentCount = 1) {
+        wgpu::BufferDescriptor outputBufferDesc;
+        outputBufferDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+        outputBufferDesc.size = sizeof(uint32_t) * componentCount;
+        return device.CreateBuffer(&outputBufferDesc);
+    }
+
+    void UpdateInputDepth(wgpu::CommandEncoder commandEncoder,
+                          wgpu::Texture texture,
+                          wgpu::TextureFormat format,
+                          float depthValue) {
+        utils::ComboRenderPassDescriptor passDescriptor({}, texture.CreateView());
+        passDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(format);
+        passDescriptor.cDepthStencilAttachmentInfo.depthClearValue = depthValue;
+
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+        pass.End();
+    }
+
+    void UpdateInputStencil(wgpu::CommandEncoder commandEncoder,
+                            wgpu::Texture texture,
+                            wgpu::TextureFormat format,
+                            uint8_t stencilValue) {
+        utils::ComboRenderPassDescriptor passDescriptor({}, texture.CreateView());
+        passDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(format);
+        passDescriptor.cDepthStencilAttachmentInfo.stencilClearValue = stencilValue;
+
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+        pass.End();
+    }
+
+    template <typename T, typename CheckBufferFn>
+    void DoSamplingTestImpl(TestAspect aspect,
+                            wgpu::RenderPipeline pipeline,
+                            wgpu::TextureFormat format,
+                            std::vector<T> textureValues,
+                            uint32_t componentCount,
+                            CheckBufferFn CheckBuffer) {
+        wgpu::Texture inputTexture = CreateInputTexture(format);
+        wgpu::TextureViewDescriptor inputViewDesc = {};
+        switch (aspect) {
+            case TestAspect::Depth:
+                inputViewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+                break;
+            case TestAspect::Stencil:
+                inputViewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+                break;
+        }
+
+        wgpu::Buffer outputBuffer = CreateOutputBuffer(componentCount);
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {{0, inputTexture.CreateView(&inputViewDesc)}, {1, outputBuffer}});
+
+        for (size_t i = 0; i < textureValues.size(); ++i) {
+            // Set the input depth texture to the provided texture value
+            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+            switch (aspect) {
+                case TestAspect::Depth:
+                    UpdateInputDepth(commandEncoder, inputTexture, format, textureValues[i]);
+                    break;
+                case TestAspect::Stencil:
+                    UpdateInputStencil(commandEncoder, inputTexture, format, textureValues[i]);
+                    break;
+            }
+
+            // Render into the output texture
+            {
+                utils::BasicRenderPass renderPass =
+                    utils::CreateBasicRenderPass(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+                wgpu::RenderPassEncoder pass =
+                    commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
+                pass.SetPipeline(pipeline);
+                pass.SetBindGroup(0, bindGroup);
+                pass.Draw(1);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = commandEncoder.Finish();
+            queue.Submit(1, &commands);
+
+            CheckBuffer(textureValues[i], outputBuffer);
+        }
+    }
+
+    template <typename T, typename CheckBufferFn>
+    void DoSamplingTestImpl(TestAspect aspect,
+                            wgpu::ComputePipeline pipeline,
+                            wgpu::TextureFormat format,
+                            std::vector<T> textureValues,
+                            uint32_t componentCount,
+                            CheckBufferFn CheckBuffer) {
+        wgpu::Texture inputTexture = CreateInputTexture(format);
+        wgpu::TextureViewDescriptor inputViewDesc = {};
+        switch (aspect) {
+            case TestAspect::Depth:
+                inputViewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+                break;
+            case TestAspect::Stencil:
+                inputViewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+                break;
+        }
+
+        wgpu::Buffer outputBuffer = CreateOutputBuffer(componentCount);
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {{0, inputTexture.CreateView(&inputViewDesc)}, {1, outputBuffer}});
+
+        for (size_t i = 0; i < textureValues.size(); ++i) {
+            // Set the input depth texture to the provided texture value
+            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+            switch (aspect) {
+                case TestAspect::Depth:
+                    UpdateInputDepth(commandEncoder, inputTexture, format, textureValues[i]);
+                    break;
+                case TestAspect::Stencil:
+                    UpdateInputStencil(commandEncoder, inputTexture, format, textureValues[i]);
+                    break;
+            }
+
+            // Sample into the output buffer
+            {
+                wgpu::ComputePassEncoder pass = commandEncoder.BeginComputePass();
+                pass.SetPipeline(pipeline);
+                pass.SetBindGroup(0, bindGroup);
+                pass.Dispatch(1);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = commandEncoder.Finish();
+            queue.Submit(1, &commands);
+
+            CheckBuffer(textureValues[i], outputBuffer);
+        }
+    }
+
+    template <typename T>
+    void DoSamplingTest(TestAspect aspect,
+                        wgpu::RenderPipeline pipeline,
+                        wgpu::TextureFormat format,
+                        std::vector<T> textureValues,
+                        T tolerance = {}) {
+        DoSamplingTestImpl(aspect, pipeline, format, textureValues, 1,
+                           [this, tolerance](T expected, wgpu::Buffer buffer) {
+                               EXPECT_BUFFER(buffer, 0, sizeof(T),
+                                             new ::detail::ExpectEq<T>(expected, tolerance));
+                           });
+    }
+
+    template <typename T>
+    void DoSamplingTest(TestAspect aspect,
+                        wgpu::ComputePipeline pipeline,
+                        wgpu::TextureFormat format,
+                        std::vector<T> textureValues,
+                        T tolerance = {}) {
+        DoSamplingTestImpl(aspect, pipeline, format, textureValues, 1,
+                           [this, tolerance](T expected, wgpu::Buffer buffer) {
+                               EXPECT_BUFFER(buffer, 0, sizeof(T),
+                                             new ::detail::ExpectEq<T>(expected, tolerance));
+                           });
+    }
+
+    class ExtraStencilComponentsExpectation : public detail::Expectation {
+        using StencilData = std::array<uint32_t, 4>;
+
+      public:
+        ExtraStencilComponentsExpectation(uint32_t expected) : mExpected(expected) {
+        }
+
+        ~ExtraStencilComponentsExpectation() override = default;
+
+        testing::AssertionResult Check(const void* rawData, size_t size) override {
+            ASSERT(size == sizeof(StencilData));
+            const uint32_t* data = static_cast<const uint32_t*>(rawData);
+
+            StencilData ssss = {mExpected, mExpected, mExpected, mExpected};
+            StencilData s001 = {mExpected, 0, 0, 1};
+
+            if (memcmp(data, ssss.data(), size) == 0 || memcmp(data, s001.data(), size) == 0) {
+                return testing::AssertionSuccess();
+            }
+
+            return testing::AssertionFailure() << "Expected stencil data to be "
+                                               << "(" << ssss[0] << ", " << ssss[1] << ", "
+                                               << ssss[2] << ", " << ssss[3] << ") or "
+                                               << "(" << s001[0] << ", " << s001[1] << ", "
+                                               << s001[2] << ", " << s001[3] << "). Got "
+                                               << "(" << data[0] << ", " << data[1] << ", "
+                                               << data[2] << ", " << data[3] << ").";
+        }
+
+      private:
+        uint32_t mExpected;
+    };
+
+    void DoSamplingExtraStencilComponentsRenderTest(TestAspect aspect,
+                                                    wgpu::TextureFormat format,
+                                                    std::vector<uint8_t> textureValues) {
+        DoSamplingTestImpl(aspect,
+                           CreateSamplingRenderPipeline({TestAspect::Stencil}, {0, 1, 2, 3}),
+                           format, textureValues, 4, [&](uint32_t expected, wgpu::Buffer buffer) {
+                               EXPECT_BUFFER(buffer, 0, 4 * sizeof(uint32_t),
+                                             new ExtraStencilComponentsExpectation(expected));
+                           });
+    }
+
+    void DoSamplingExtraStencilComponentsComputeTest(TestAspect aspect,
+                                                     wgpu::TextureFormat format,
+                                                     std::vector<uint8_t> textureValues) {
+        DoSamplingTestImpl(aspect,
+                           CreateSamplingComputePipeline({TestAspect::Stencil}, {0, 1, 2, 3}),
+                           format, textureValues, 4, [&](uint32_t expected, wgpu::Buffer buffer) {
+                               EXPECT_BUFFER(buffer, 0, 4 * sizeof(uint32_t),
+                                             new ExtraStencilComponentsExpectation(expected));
+                           });
+    }
+
+    static bool CompareFunctionPasses(float compareRef,
+                                      wgpu::CompareFunction compare,
+                                      float textureValue) {
+        switch (compare) {
+            case wgpu::CompareFunction::Never:
+                return false;
+            case wgpu::CompareFunction::Less:
+                return compareRef < textureValue;
+            case wgpu::CompareFunction::LessEqual:
+                return compareRef <= textureValue;
+            case wgpu::CompareFunction::Greater:
+                return compareRef > textureValue;
+            case wgpu::CompareFunction::GreaterEqual:
+                return compareRef >= textureValue;
+            case wgpu::CompareFunction::Equal:
+                return compareRef == textureValue;
+            case wgpu::CompareFunction::NotEqual:
+                return compareRef != textureValue;
+            case wgpu::CompareFunction::Always:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    void DoDepthCompareRefTest(wgpu::RenderPipeline pipeline,
+                               wgpu::TextureFormat format,
+                               float compareRef,
+                               wgpu::CompareFunction compare,
+                               std::vector<float> textureValues) {
+        queue.WriteBuffer(mUniformBuffer, 0, &compareRef, sizeof(float));
+
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.compare = compare;
+        wgpu::Sampler sampler = device.CreateSampler(&samplerDesc);
+
+        wgpu::Texture inputTexture = CreateInputTexture(format);
+        wgpu::TextureViewDescriptor inputViewDesc = {};
+        inputViewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {
+                                     {0, sampler},
+                                     {1, inputTexture.CreateView(&inputViewDesc)},
+                                     {2, mUniformBuffer},
+                                 });
+
+        wgpu::Texture outputTexture = CreateOutputTexture(wgpu::TextureFormat::R32Float);
+        for (float textureValue : textureValues) {
+            // Set the input depth texture to the provided texture value
+            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+            UpdateInputDepth(commandEncoder, inputTexture, format, textureValue);
+
+            // Render into the output texture
+            {
+                utils::ComboRenderPassDescriptor passDescriptor({outputTexture.CreateView()});
+                wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+                pass.SetPipeline(pipeline);
+                pass.SetBindGroup(0, bindGroup);
+                pass.Draw(1);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = commandEncoder.Finish();
+            queue.Submit(1, &commands);
+
+            EXPECT_TEXTURE_EQ(CompareFunctionPasses(compareRef, compare, textureValue) ? 1.f : 0.f,
+                              outputTexture, {0, 0});
+        }
+    }
+
+    void DoDepthCompareRefTest(wgpu::ComputePipeline pipeline,
+                               wgpu::TextureFormat format,
+                               float compareRef,
+                               wgpu::CompareFunction compare,
+                               std::vector<float> textureValues) {
+        queue.WriteBuffer(mUniformBuffer, 0, &compareRef, sizeof(float));
+
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.compare = compare;
+        wgpu::Sampler sampler = device.CreateSampler(&samplerDesc);
+
+        wgpu::Texture inputTexture = CreateInputTexture(format);
+        wgpu::TextureViewDescriptor inputViewDesc = {};
+        inputViewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+        wgpu::Buffer outputBuffer = CreateOutputBuffer();
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {{0, sampler},
+                                  {1, inputTexture.CreateView(&inputViewDesc)},
+                                  {2, mUniformBuffer},
+                                  {3, outputBuffer}});
+
+        for (float textureValue : textureValues) {
+            // Set the input depth texture to the provided texture value
+            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+            UpdateInputDepth(commandEncoder, inputTexture, format, textureValue);
+
+            // Sample into the output buffer
+            {
+                wgpu::ComputePassEncoder pass = commandEncoder.BeginComputePass();
+                pass.SetPipeline(pipeline);
+                pass.SetBindGroup(0, bindGroup);
+                pass.Dispatch(1);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = commandEncoder.Finish();
+            queue.Submit(1, &commands);
+
+            float float0 = 0.f;
+            float float1 = 1.f;
+            float* expected =
+                CompareFunctionPasses(compareRef, compare, textureValue) ? &float1 : &float0;
+
+            EXPECT_BUFFER_U32_EQ(*reinterpret_cast<uint32_t*>(expected), outputBuffer, 0);
+        }
+    }
+
+  private:
+    wgpu::Buffer mUniformBuffer;
+    bool mIsFormatSupported = false;
+};
+
+// Test that sampling a depth/stencil texture at components 1, 2, and 3 yield 0, 0, and 1
+// respectively
+TEST_P(DepthStencilSamplingTest, SampleExtraComponents) {
+    // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::TextureFormat format = GetParam().mTextureFormat;
+
+    // TODO(crbug.com/dawn/1239): depth24unorm-stencil8 fails on D3D12 Nvidia old driver version.
+    DAWN_SUPPRESS_TEST_IF(format == wgpu::TextureFormat::Depth24UnormStencil8 && IsD3D12() &&
+                          IsNvidia());
+
+    DoSamplingExtraStencilComponentsRenderTest(TestAspect::Stencil, format,
+                                               {uint8_t(42), uint8_t(37)});
+
+    DoSamplingExtraStencilComponentsComputeTest(TestAspect::Stencil, format,
+                                                {uint8_t(42), uint8_t(37)});
+}
+
+// Test sampling both depth and stencil with a render/compute pipeline works.
+TEST_P(DepthStencilSamplingTest, SampleDepthAndStencilRender) {
+    // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::TextureFormat format = GetParam().mTextureFormat;
+
+    wgpu::SamplerDescriptor samplerDesc;
+    wgpu::Sampler sampler = device.CreateSampler(&samplerDesc);
+
+    wgpu::Texture inputTexture = CreateInputTexture(format);
+
+    wgpu::TextureViewDescriptor depthViewDesc = {};
+    depthViewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+    wgpu::TextureViewDescriptor stencilViewDesc = {};
+    stencilViewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+
+    float tolerance = format == wgpu::TextureFormat::Depth24UnormStencil8 ? 0.001f : 0.0f;
+
+    // With render pipeline
+    {
+        wgpu::RenderPipeline pipeline =
+            CreateSamplingRenderPipeline({TestAspect::Depth, TestAspect::Stencil}, 0);
+
+        wgpu::Buffer depthOutput = CreateOutputBuffer();
+        wgpu::Buffer stencilOutput = CreateOutputBuffer();
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {
+                                     {0, inputTexture.CreateView(&depthViewDesc)},
+                                     {1, depthOutput},
+                                     {2, inputTexture.CreateView(&stencilViewDesc)},
+                                     {3, stencilOutput},
+                                 });
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+        // Initialize both depth and stencil aspects.
+        utils::ComboRenderPassDescriptor passDescriptor({}, inputTexture.CreateView());
+        passDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.43f;
+        passDescriptor.cDepthStencilAttachmentInfo.stencilClearValue = 31;
+
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+        pass.End();
+
+        // Render into the output textures
+        {
+            utils::BasicRenderPass renderPass =
+                utils::CreateBasicRenderPass(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+            wgpu::RenderPassEncoder pass =
+                commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(1);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+
+        float expectedDepth = 0.0f;
+        memcpy(&expectedDepth, &passDescriptor.cDepthStencilAttachmentInfo.depthClearValue,
+               sizeof(float));
+        EXPECT_BUFFER(depthOutput, 0, sizeof(float),
+                      new ::detail::ExpectEq<float>(expectedDepth, tolerance));
+
+        uint8_t expectedStencil = 0;
+        memcpy(&expectedStencil, &passDescriptor.cDepthStencilAttachmentInfo.stencilClearValue,
+               sizeof(uint8_t));
+        EXPECT_BUFFER_U32_EQ(expectedStencil, stencilOutput, 0);
+    }
+
+    // With compute pipeline
+    {
+        wgpu::ComputePipeline pipeline =
+            CreateSamplingComputePipeline({TestAspect::Depth, TestAspect::Stencil}, 0);
+
+        wgpu::Buffer depthOutput = CreateOutputBuffer();
+        wgpu::Buffer stencilOutput = CreateOutputBuffer();
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {{0, inputTexture.CreateView(&depthViewDesc)},
+                                  {1, depthOutput},
+                                  {2, inputTexture.CreateView(&stencilViewDesc)},
+                                  {3, stencilOutput}});
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        // Initialize both depth and stencil aspects.
+        utils::ComboRenderPassDescriptor passDescriptor({}, inputTexture.CreateView());
+        passDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.43f;
+        passDescriptor.cDepthStencilAttachmentInfo.stencilClearValue = 31;
+
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+        pass.End();
+
+        // Sample into the output buffers
+        {
+            wgpu::ComputePassEncoder pass = commandEncoder.BeginComputePass();
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Dispatch(1);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+
+        float expectedDepth = 0.0f;
+        memcpy(&expectedDepth, &passDescriptor.cDepthStencilAttachmentInfo.depthClearValue,
+               sizeof(float));
+        EXPECT_BUFFER(depthOutput, 0, sizeof(float),
+                      new ::detail::ExpectEq<float>(expectedDepth, tolerance));
+
+        uint8_t expectedStencil = 0;
+        memcpy(&expectedStencil, &passDescriptor.cDepthStencilAttachmentInfo.stencilClearValue,
+               sizeof(uint8_t));
+        EXPECT_BUFFER_U32_EQ(expectedStencil, stencilOutput, 0);
+    }
+}
+
+class DepthSamplingTest : public DepthStencilSamplingTest {};
+
+// Test that sampling a depth texture with a render/compute pipeline works
+TEST_P(DepthSamplingTest, SampleDepthOnly) {
+    wgpu::TextureFormat format = GetParam().mTextureFormat;
+    float tolerance = format == wgpu::TextureFormat::Depth16Unorm ||
+                              format == wgpu::TextureFormat::Depth24UnormStencil8
+                          ? 0.001f
+                          : 0.0f;
+
+    // Test 0, between [0, 1], and 1.
+    DoSamplingTest(TestAspect::Depth, CreateSamplingRenderPipeline({TestAspect::Depth}, 0), format,
+                   kNormalizedTextureValues, tolerance);
+
+    DoSamplingTest(TestAspect::Depth, CreateSamplingComputePipeline({TestAspect::Depth}, 0), format,
+                   kNormalizedTextureValues, tolerance);
+}
+
+// Test that sampling in a render pipeline with all of the compare functions works.
+TEST_P(DepthSamplingTest, CompareFunctionsRender) {
+    // Initialization via renderPass loadOp doesn't work on Mac Intel.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    wgpu::TextureFormat format = GetParam().mTextureFormat;
+    // Test does not account for precision issues when comparison testing Depth16Unorm and
+    // Depth24UnormStencil8.
+    DAWN_TEST_UNSUPPORTED_IF(format == wgpu::TextureFormat::Depth16Unorm ||
+                             format == wgpu::TextureFormat::Depth24UnormStencil8);
+
+    wgpu::RenderPipeline pipeline = CreateComparisonRenderPipeline();
+
+    // Test a "normal" ref value between 0 and 1; as well as negative and > 1 refs.
+    for (float compareRef : kCompareRefs) {
+        // Test 0, below the ref, equal to, above the ref, and 1.
+        for (wgpu::CompareFunction f : kCompareFunctions) {
+            DoDepthCompareRefTest(pipeline, format, compareRef, f, kNormalizedTextureValues);
+        }
+    }
+}
+
+class StencilSamplingTest : public DepthStencilSamplingTest {};
+
+// Test that sampling a stencil texture with a render/compute pipeline works
+TEST_P(StencilSamplingTest, SampleStencilOnly) {
+    // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::TextureFormat format = GetParam().mTextureFormat;
+
+    DoSamplingTest(TestAspect::Stencil, CreateSamplingRenderPipeline({TestAspect::Stencil}, 0),
+                   format, kStencilValues);
+
+    DoSamplingTest(TestAspect::Stencil, CreateSamplingComputePipeline({TestAspect::Stencil}, 0),
+                   format, kStencilValues);
+}
+
+DAWN_INSTANTIATE_TEST_P(DepthStencilSamplingTest,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(utils::kDepthAndStencilFormats.begin(),
+                                                         utils::kDepthAndStencilFormats.end()));
+
+DAWN_INSTANTIATE_TEST_P(DepthSamplingTest,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(utils::kDepthFormats.begin(),
+                                                         utils::kDepthFormats.end()));
+
+DAWN_INSTANTIATE_TEST_P(StencilSamplingTest,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(),
+                         VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(utils::kStencilFormats.begin(),
+                                                         utils::kStencilFormats.end()));
diff --git a/src/dawn/tests/end2end/DepthStencilStateTests.cpp b/src/dawn/tests/end2end/DepthStencilStateTests.cpp
new file mode 100644
index 0000000..6c6acd0
--- /dev/null
+++ b/src/dawn/tests/end2end/DepthStencilStateTests.cpp
@@ -0,0 +1,837 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static unsigned int kRTSize = 64;
+
+class DepthStencilStateTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // TODO(crbug.com/dawn/737): Test output is wrong with D3D12 + WARP.
+        DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+        wgpu::TextureDescriptor renderTargetDescriptor;
+        renderTargetDescriptor.dimension = wgpu::TextureDimension::e2D;
+        renderTargetDescriptor.size.width = kRTSize;
+        renderTargetDescriptor.size.height = kRTSize;
+        renderTargetDescriptor.size.depthOrArrayLayers = 1;
+        renderTargetDescriptor.sampleCount = 1;
+        renderTargetDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        renderTargetDescriptor.mipLevelCount = 1;
+        renderTargetDescriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        renderTarget = device.CreateTexture(&renderTargetDescriptor);
+
+        renderTargetView = renderTarget.CreateView();
+
+        wgpu::TextureDescriptor depthDescriptor;
+        depthDescriptor.dimension = wgpu::TextureDimension::e2D;
+        depthDescriptor.size.width = kRTSize;
+        depthDescriptor.size.height = kRTSize;
+        depthDescriptor.size.depthOrArrayLayers = 1;
+        depthDescriptor.sampleCount = 1;
+        depthDescriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        depthDescriptor.mipLevelCount = 1;
+        depthDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        depthTexture = device.CreateTexture(&depthDescriptor);
+
+        depthTextureView = depthTexture.CreateView();
+
+        vsModule = utils::CreateShaderModule(device, R"(
+            struct UBO {
+                color : vec3<f32>,
+                depth : f32,
+            }
+            @group(0) @binding(0) var<uniform> ubo : UBO;
+
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                        vec2<f32>(-1.0,  1.0),
+                        vec2<f32>(-1.0, -1.0),
+                        vec2<f32>( 1.0, -1.0), // front-facing
+                        vec2<f32>(-1.0,  1.0),
+                        vec2<f32>( 1.0,  1.0),
+                        vec2<f32>( 1.0, -1.0)); // back-facing
+                return vec4<f32>(pos[VertexIndex], ubo.depth, 1.0);
+            })");
+
+        fsModule = utils::CreateShaderModule(device, R"(
+            struct UBO {
+                color : vec3<f32>,
+                depth : f32,
+            }
+            @group(0) @binding(0) var<uniform> ubo : UBO;
+
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(ubo.color, 1.0);
+            })");
+    }
+
+    struct TestSpec {
+        const wgpu::DepthStencilState& depthStencil;
+        RGBA8 color;
+        float depth;
+        uint32_t stencil;
+        wgpu::FrontFace frontFace = wgpu::FrontFace::CCW;
+        bool setStencilReference = true;
+    };
+
+    // Check whether a depth comparison function works as expected
+    // The less, equal, greater booleans denote wether the respective triangle should be visible
+    // based on the comparison function
+    void CheckDepthCompareFunction(wgpu::CompareFunction compareFunction,
+                                   bool less,
+                                   bool equal,
+                                   bool greater) {
+        wgpu::StencilFaceState stencilFace;
+        stencilFace.compare = wgpu::CompareFunction::Always;
+        stencilFace.failOp = wgpu::StencilOperation::Keep;
+        stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+        stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+        wgpu::DepthStencilState baseState;
+        baseState.depthWriteEnabled = true;
+        baseState.depthCompare = wgpu::CompareFunction::Always;
+        baseState.stencilBack = stencilFace;
+        baseState.stencilFront = stencilFace;
+        baseState.stencilReadMask = 0xff;
+        baseState.stencilWriteMask = 0xff;
+
+        wgpu::DepthStencilState state;
+        state.depthWriteEnabled = true;
+        state.depthCompare = compareFunction;
+        state.stencilBack = stencilFace;
+        state.stencilFront = stencilFace;
+        state.stencilReadMask = 0xff;
+        state.stencilWriteMask = 0xff;
+
+        RGBA8 baseColor = RGBA8(255, 255, 255, 255);
+        RGBA8 lessColor = RGBA8(255, 0, 0, 255);
+        RGBA8 equalColor = RGBA8(0, 255, 0, 255);
+        RGBA8 greaterColor = RGBA8(0, 0, 255, 255);
+
+        // Base triangle at depth 0.5, depth always, depth write enabled
+        TestSpec base = {baseState, baseColor, 0.5f, 0u};
+
+        // Draw the base triangle, then a triangle in stencilFront of the base triangle with the
+        // given depth comparison function
+        DoTest({base, {state, lessColor, 0.f, 0u}}, less ? lessColor : baseColor);
+
+        // Draw the base triangle, then a triangle in at the same depth as the base triangle with
+        // the given depth comparison function
+        DoTest({base, {state, equalColor, 0.5f, 0u}}, equal ? equalColor : baseColor);
+
+        // Draw the base triangle, then a triangle behind the base triangle with the given depth
+        // comparison function
+        DoTest({base, {state, greaterColor, 1.0f, 0u}}, greater ? greaterColor : baseColor);
+    }
+
+    // Check whether a stencil comparison function works as expected
+    // The less, equal, greater booleans denote wether the respective triangle should be visible
+    // based on the comparison function
+    void CheckStencilCompareFunction(wgpu::CompareFunction compareFunction,
+                                     bool less,
+                                     bool equal,
+                                     bool greater) {
+        wgpu::StencilFaceState baseStencilFaceDescriptor;
+        baseStencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+        baseStencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+        baseStencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+        baseStencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+        wgpu::DepthStencilState baseState;
+        baseState.depthWriteEnabled = false;
+        baseState.depthCompare = wgpu::CompareFunction::Always;
+        baseState.stencilBack = baseStencilFaceDescriptor;
+        baseState.stencilFront = baseStencilFaceDescriptor;
+        baseState.stencilReadMask = 0xff;
+        baseState.stencilWriteMask = 0xff;
+
+        wgpu::StencilFaceState stencilFaceDescriptor;
+        stencilFaceDescriptor.compare = compareFunction;
+        stencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+        stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+        stencilFaceDescriptor.passOp = wgpu::StencilOperation::Keep;
+        wgpu::DepthStencilState state;
+        state.depthWriteEnabled = false;
+        state.depthCompare = wgpu::CompareFunction::Always;
+        state.stencilBack = stencilFaceDescriptor;
+        state.stencilFront = stencilFaceDescriptor;
+        state.stencilReadMask = 0xff;
+        state.stencilWriteMask = 0xff;
+
+        RGBA8 baseColor = RGBA8(255, 255, 255, 255);
+        RGBA8 lessColor = RGBA8(255, 0, 0, 255);
+        RGBA8 equalColor = RGBA8(0, 255, 0, 255);
+        RGBA8 greaterColor = RGBA8(0, 0, 255, 255);
+
+        // Base triangle with stencil reference 1
+        TestSpec base = {baseState, baseColor, 0.0f, 1u};
+
+        // Draw the base triangle, then a triangle with stencil reference 0 with the given stencil
+        // comparison function
+        DoTest({base, {state, lessColor, 0.f, 0u}}, less ? lessColor : baseColor);
+
+        // Draw the base triangle, then a triangle with stencil reference 1 with the given stencil
+        // comparison function
+        DoTest({base, {state, equalColor, 0.f, 1u}}, equal ? equalColor : baseColor);
+
+        // Draw the base triangle, then a triangle with stencil reference 2 with the given stencil
+        // comparison function
+        DoTest({base, {state, greaterColor, 0.f, 2u}}, greater ? greaterColor : baseColor);
+    }
+
+    // Given the provided `initialStencil` and `reference`, check that applying the
+    // `stencilOperation` produces the `expectedStencil`
+    void CheckStencilOperation(wgpu::StencilOperation stencilOperation,
+                               uint32_t initialStencil,
+                               uint32_t reference,
+                               uint32_t expectedStencil) {
+        wgpu::StencilFaceState baseStencilFaceDescriptor;
+        baseStencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+        baseStencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+        baseStencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+        baseStencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+        wgpu::DepthStencilState baseState;
+        baseState.depthWriteEnabled = false;
+        baseState.depthCompare = wgpu::CompareFunction::Always;
+        baseState.stencilBack = baseStencilFaceDescriptor;
+        baseState.stencilFront = baseStencilFaceDescriptor;
+        baseState.stencilReadMask = 0xff;
+        baseState.stencilWriteMask = 0xff;
+
+        wgpu::StencilFaceState stencilFaceDescriptor;
+        stencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+        stencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+        stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+        stencilFaceDescriptor.passOp = stencilOperation;
+        wgpu::DepthStencilState state;
+        state.depthWriteEnabled = false;
+        state.depthCompare = wgpu::CompareFunction::Always;
+        state.stencilBack = stencilFaceDescriptor;
+        state.stencilFront = stencilFaceDescriptor;
+        state.stencilReadMask = 0xff;
+        state.stencilWriteMask = 0xff;
+
+        CheckStencil(
+            {
+                // Wipe the stencil buffer with the initialStencil value
+                {baseState, RGBA8(255, 255, 255, 255), 0.f, initialStencil},
+
+                // Draw a triangle with the provided stencil operation and reference
+                {state, RGBA8(255, 0, 0, 255), 0.f, reference},
+            },
+            expectedStencil);
+    }
+
+    // Draw a list of test specs, and check if the stencil value is equal to the expected value
+    void CheckStencil(std::vector<TestSpec> testParams, uint32_t expectedStencil) {
+        wgpu::StencilFaceState stencilFaceDescriptor;
+        stencilFaceDescriptor.compare = wgpu::CompareFunction::Equal;
+        stencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+        stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+        stencilFaceDescriptor.passOp = wgpu::StencilOperation::Keep;
+        wgpu::DepthStencilState state;
+        state.depthWriteEnabled = false;
+        state.depthCompare = wgpu::CompareFunction::Always;
+        state.stencilBack = stencilFaceDescriptor;
+        state.stencilFront = stencilFaceDescriptor;
+        state.stencilReadMask = 0xff;
+        state.stencilWriteMask = 0xff;
+
+        testParams.push_back({state, RGBA8(0, 255, 0, 255), 0, expectedStencil});
+        DoTest(testParams, RGBA8(0, 255, 0, 255));
+    }
+
+    // Each test param represents a pair of triangles with a color, depth, stencil value, and
+    // depthStencil state, one frontfacing, one backfacing Draw the triangles in order and check the
+    // expected colors for the frontfaces and backfaces
+    void DoTest(const std::vector<TestSpec>& testParams,
+                const RGBA8& expectedFront,
+                const RGBA8& expectedBack,
+                bool isSingleEncoderMultiplePass = false) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        struct TriangleData {
+            float color[3];
+            float depth;
+        };
+
+        utils::ComboRenderPassDescriptor renderPass({renderTargetView}, depthTextureView);
+        wgpu::RenderPassEncoder pass;
+
+        if (isSingleEncoderMultiplePass) {
+            // The render pass to clear up the depthTextureView (using LoadOp = clear)
+            utils::ComboRenderPassDescriptor clearingPass({renderTargetView}, depthTextureView);
+
+            // The render pass to do the test with depth and stencil result kept
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+
+            // Clear the depthStencilView at the beginning
+            {
+                pass = encoder.BeginRenderPass(&renderPass);
+                pass.End();
+            }
+        } else {
+            pass = encoder.BeginRenderPass(&renderPass);
+        }
+
+        for (size_t i = 0; i < testParams.size(); ++i) {
+            const TestSpec& test = testParams[i];
+
+            if (isSingleEncoderMultiplePass) {
+                pass = encoder.BeginRenderPass(&renderPass);
+            }
+
+            TriangleData data = {
+                {static_cast<float>(test.color.r) / 255.f, static_cast<float>(test.color.g) / 255.f,
+                 static_cast<float>(test.color.b) / 255.f},
+                test.depth,
+            };
+            // Upload a buffer for each triangle's depth and color data
+            wgpu::Buffer buffer = utils::CreateBufferFromData(device, &data, sizeof(TriangleData),
+                                                              wgpu::BufferUsage::Uniform);
+
+            // Create a pipeline for the triangles with the test spec's depth stencil state
+
+            utils::ComboRenderPipelineDescriptor descriptor;
+            descriptor.vertex.module = vsModule;
+            descriptor.cFragment.module = fsModule;
+            wgpu::DepthStencilState* depthStencil = descriptor.EnableDepthStencil();
+            *depthStencil = test.depthStencil;
+            depthStencil->format = wgpu::TextureFormat::Depth24PlusStencil8;
+            descriptor.primitive.frontFace = test.frontFace;
+
+            wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+            // Create a bind group for the data
+            wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+                device, pipeline.GetBindGroupLayout(0), {{0, buffer, 0, sizeof(TriangleData)}});
+
+            pass.SetPipeline(pipeline);
+            if (test.setStencilReference) {
+                pass.SetStencilReference(test.stencil);  // Set the stencil reference
+            }
+            pass.SetBindGroup(0,
+                              bindGroup);  // Set the bind group which contains color and depth data
+            pass.Draw(6);
+
+            if (isSingleEncoderMultiplePass) {
+                pass.End();
+            }
+        }
+
+        if (!isSingleEncoderMultiplePass) {
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(expectedFront, renderTarget, kRTSize / 4, kRTSize / 2)
+            << "Front face check failed";
+        EXPECT_PIXEL_RGBA8_EQ(expectedBack, renderTarget, 3 * kRTSize / 4, kRTSize / 2)
+            << "Back face check failed";
+    }
+
+    void DoTest(const std::vector<TestSpec>& testParams,
+                const RGBA8& expected,
+                bool isSingleEncoderMultiplePass = false) {
+        DoTest(testParams, expected, expected, isSingleEncoderMultiplePass);
+    }
+
+    wgpu::Texture renderTarget;
+    wgpu::Texture depthTexture;
+    wgpu::TextureView renderTargetView;
+    wgpu::TextureView depthTextureView;
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+};
+
+// Test compilation and usage of the fixture
+TEST_P(DepthStencilStateTest, Basic) {
+    wgpu::StencilFaceState stencilFace;
+    stencilFace.compare = wgpu::CompareFunction::Always;
+    stencilFace.failOp = wgpu::StencilOperation::Keep;
+    stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+    stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+    wgpu::DepthStencilState state;
+    state.depthWriteEnabled = false;
+    state.depthCompare = wgpu::CompareFunction::Always;
+    state.stencilBack = stencilFace;
+    state.stencilFront = stencilFace;
+    state.stencilReadMask = 0xff;
+    state.stencilWriteMask = 0xff;
+
+    DoTest(
+        {
+            {state, RGBA8(0, 255, 0, 255), 0.5f, 0u},
+        },
+        RGBA8(0, 255, 0, 255));
+}
+
+// Test defaults: depth and stencil tests disabled
+TEST_P(DepthStencilStateTest, DepthStencilDisabled) {
+    wgpu::StencilFaceState stencilFace;
+    stencilFace.compare = wgpu::CompareFunction::Always;
+    stencilFace.failOp = wgpu::StencilOperation::Keep;
+    stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+    stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+    wgpu::DepthStencilState state;
+    state.depthWriteEnabled = false;
+    state.depthCompare = wgpu::CompareFunction::Always;
+    state.stencilBack = stencilFace;
+    state.stencilFront = stencilFace;
+    state.stencilReadMask = 0xff;
+    state.stencilWriteMask = 0xff;
+
+    TestSpec specs[3] = {
+        {state, RGBA8(255, 0, 0, 255), 0.0f, 0u},
+        {state, RGBA8(0, 255, 0, 255), 0.5f, 0u},
+        {state, RGBA8(0, 0, 255, 255), 1.0f, 0u},
+    };
+
+    // Test that for all combinations, the last triangle drawn is the one visible
+    // We check against three triangles because the stencil test may modify results
+    for (uint32_t last = 0; last < 3; ++last) {
+        uint32_t i = (last + 1) % 3;
+        uint32_t j = (last + 2) % 3;
+        DoTest({specs[i], specs[j], specs[last]}, specs[last].color);
+        DoTest({specs[j], specs[i], specs[last]}, specs[last].color);
+    }
+}
+
+// The following tests check that each depth comparison function works
+TEST_P(DepthStencilStateTest, DepthAlways) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::Always, true, true, true);
+}
+
+TEST_P(DepthStencilStateTest, DepthEqual) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::Equal, false, true, false);
+}
+
+TEST_P(DepthStencilStateTest, DepthGreater) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::Greater, false, false, true);
+}
+
+TEST_P(DepthStencilStateTest, DepthGreaterEqual) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::GreaterEqual, false, true, true);
+}
+
+TEST_P(DepthStencilStateTest, DepthLess) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::Less, true, false, false);
+}
+
+TEST_P(DepthStencilStateTest, DepthLessEqual) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::LessEqual, true, true, false);
+}
+
+TEST_P(DepthStencilStateTest, DepthNever) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::Never, false, false, false);
+}
+
+TEST_P(DepthStencilStateTest, DepthNotEqual) {
+    CheckDepthCompareFunction(wgpu::CompareFunction::NotEqual, true, false, true);
+}
+
+// Test that disabling depth writes works and leaves the depth buffer unchanged
+TEST_P(DepthStencilStateTest, DepthWriteDisabled) {
+    wgpu::StencilFaceState stencilFace;
+    stencilFace.compare = wgpu::CompareFunction::Always;
+    stencilFace.failOp = wgpu::StencilOperation::Keep;
+    stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+    stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+    wgpu::DepthStencilState baseState;
+    baseState.depthWriteEnabled = true;
+    baseState.depthCompare = wgpu::CompareFunction::Always;
+    baseState.stencilBack = stencilFace;
+    baseState.stencilFront = stencilFace;
+    baseState.stencilReadMask = 0xff;
+    baseState.stencilWriteMask = 0xff;
+
+    wgpu::DepthStencilState noDepthWrite;
+    noDepthWrite.depthWriteEnabled = false;
+    noDepthWrite.depthCompare = wgpu::CompareFunction::Always;
+    noDepthWrite.stencilBack = stencilFace;
+    noDepthWrite.stencilFront = stencilFace;
+    noDepthWrite.stencilReadMask = 0xff;
+    noDepthWrite.stencilWriteMask = 0xff;
+
+    wgpu::DepthStencilState checkState;
+    checkState.depthWriteEnabled = false;
+    checkState.depthCompare = wgpu::CompareFunction::Equal;
+    checkState.stencilBack = stencilFace;
+    checkState.stencilFront = stencilFace;
+    checkState.stencilReadMask = 0xff;
+    checkState.stencilWriteMask = 0xff;
+
+    DoTest(
+        {
+            {baseState, RGBA8(255, 255, 255, 255), 1.f,
+             0u},  // Draw a base triangle with depth enabled
+            {noDepthWrite, RGBA8(0, 0, 0, 255), 0.f,
+             0u},  // Draw a second triangle without depth enabled
+            {checkState, RGBA8(0, 255, 0, 255), 1.f,
+             0u},  // Draw a third triangle which should occlude the second even though it is behind
+                   // it
+        },
+        RGBA8(0, 255, 0, 255));
+}
+
+// The following tests check that each stencil comparison function works
+TEST_P(DepthStencilStateTest, StencilAlways) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::Always, true, true, true);
+}
+
+TEST_P(DepthStencilStateTest, StencilEqual) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::Equal, false, true, false);
+}
+
+TEST_P(DepthStencilStateTest, StencilGreater) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::Greater, false, false, true);
+}
+
+TEST_P(DepthStencilStateTest, StencilGreaterEqual) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::GreaterEqual, false, true, true);
+}
+
+TEST_P(DepthStencilStateTest, StencilLess) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::Less, true, false, false);
+}
+
+TEST_P(DepthStencilStateTest, StencilLessEqual) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::LessEqual, true, true, false);
+}
+
+TEST_P(DepthStencilStateTest, StencilNever) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::Never, false, false, false);
+}
+
+TEST_P(DepthStencilStateTest, StencilNotEqual) {
+    CheckStencilCompareFunction(wgpu::CompareFunction::NotEqual, true, false, true);
+}
+
+// The following tests check that each stencil operation works
+TEST_P(DepthStencilStateTest, StencilKeep) {
+    CheckStencilOperation(wgpu::StencilOperation::Keep, 1, 3, 1);
+}
+
+TEST_P(DepthStencilStateTest, StencilZero) {
+    CheckStencilOperation(wgpu::StencilOperation::Zero, 1, 3, 0);
+}
+
+TEST_P(DepthStencilStateTest, StencilReplace) {
+    CheckStencilOperation(wgpu::StencilOperation::Replace, 1, 3, 3);
+}
+
+TEST_P(DepthStencilStateTest, StencilInvert) {
+    CheckStencilOperation(wgpu::StencilOperation::Invert, 0xf0, 3, 0x0f);
+}
+
+TEST_P(DepthStencilStateTest, StencilIncrementClamp) {
+    CheckStencilOperation(wgpu::StencilOperation::IncrementClamp, 1, 3, 2);
+    CheckStencilOperation(wgpu::StencilOperation::IncrementClamp, 0xff, 3, 0xff);
+}
+
+TEST_P(DepthStencilStateTest, StencilIncrementWrap) {
+    CheckStencilOperation(wgpu::StencilOperation::IncrementWrap, 1, 3, 2);
+    CheckStencilOperation(wgpu::StencilOperation::IncrementWrap, 0xff, 3, 0);
+}
+
+TEST_P(DepthStencilStateTest, StencilDecrementClamp) {
+    CheckStencilOperation(wgpu::StencilOperation::DecrementClamp, 1, 3, 0);
+    CheckStencilOperation(wgpu::StencilOperation::DecrementClamp, 0, 3, 0);
+}
+
+TEST_P(DepthStencilStateTest, StencilDecrementWrap) {
+    CheckStencilOperation(wgpu::StencilOperation::DecrementWrap, 1, 3, 0);
+    CheckStencilOperation(wgpu::StencilOperation::DecrementWrap, 0, 3, 0xff);
+}
+
+// Check that the setting a stencil read mask works
+TEST_P(DepthStencilStateTest, StencilReadMask) {
+    wgpu::StencilFaceState baseStencilFaceDescriptor;
+    baseStencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+    baseStencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+    wgpu::DepthStencilState baseState;
+    baseState.depthWriteEnabled = false;
+    baseState.depthCompare = wgpu::CompareFunction::Always;
+    baseState.stencilBack = baseStencilFaceDescriptor;
+    baseState.stencilFront = baseStencilFaceDescriptor;
+    baseState.stencilReadMask = 0xff;
+    baseState.stencilWriteMask = 0xff;
+
+    wgpu::StencilFaceState stencilFaceDescriptor;
+    stencilFaceDescriptor.compare = wgpu::CompareFunction::Equal;
+    stencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.passOp = wgpu::StencilOperation::Keep;
+    wgpu::DepthStencilState state;
+    state.depthWriteEnabled = false;
+    state.depthCompare = wgpu::CompareFunction::Always;
+    state.stencilBack = stencilFaceDescriptor;
+    state.stencilFront = stencilFaceDescriptor;
+    state.stencilReadMask = 0x2;
+    state.stencilWriteMask = 0xff;
+
+    RGBA8 baseColor = RGBA8(255, 255, 255, 255);
+    RGBA8 red = RGBA8(255, 0, 0, 255);
+    RGBA8 green = RGBA8(0, 255, 0, 255);
+
+    TestSpec base = {baseState, baseColor, 0.5f, 3u};  // Base triangle to set the stencil to 3
+    DoTest({base, {state, red, 0.f, 1u}}, baseColor);  // Triangle with stencil reference 1 and read
+                                                       // mask 2 does not draw because (3 & 2 != 1)
+    DoTest({base, {state, green, 0.f, 2u}},
+           green);  // Triangle with stencil reference 2 and read mask 2 draws because (3 & 2 == 2)
+}
+
+// Check that setting a stencil write mask works
+TEST_P(DepthStencilStateTest, StencilWriteMask) {
+    wgpu::StencilFaceState baseStencilFaceDescriptor;
+    baseStencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+    baseStencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+    wgpu::DepthStencilState baseState;
+    baseState.depthWriteEnabled = false;
+    baseState.depthCompare = wgpu::CompareFunction::Always;
+    baseState.stencilBack = baseStencilFaceDescriptor;
+    baseState.stencilFront = baseStencilFaceDescriptor;
+    baseState.stencilReadMask = 0xff;
+    baseState.stencilWriteMask = 0x1;
+
+    wgpu::StencilFaceState stencilFaceDescriptor;
+    stencilFaceDescriptor.compare = wgpu::CompareFunction::Equal;
+    stencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.passOp = wgpu::StencilOperation::Keep;
+    wgpu::DepthStencilState state;
+    state.depthWriteEnabled = false;
+    state.depthCompare = wgpu::CompareFunction::Always;
+    state.stencilBack = stencilFaceDescriptor;
+    state.stencilFront = stencilFaceDescriptor;
+    state.stencilReadMask = 0xff;
+    state.stencilWriteMask = 0xff;
+
+    RGBA8 baseColor = RGBA8(255, 255, 255, 255);
+    RGBA8 green = RGBA8(0, 255, 0, 255);
+
+    TestSpec base = {baseState, baseColor, 0.5f,
+                     3u};  // Base triangle with stencil reference 3 and mask 1 to set the stencil 1
+    DoTest({base, {state, green, 0.f, 2u}},
+           baseColor);  // Triangle with stencil reference 2 does not draw because 2 != (3 & 1)
+    DoTest({base, {state, green, 0.f, 1u}},
+           green);  // Triangle with stencil reference 1 draws because 1 == (3 & 1)
+}
+
+// Test that the stencil operation is executed on stencil fail
+TEST_P(DepthStencilStateTest, StencilFail) {
+    wgpu::StencilFaceState baseStencilFaceDescriptor;
+    baseStencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+    baseStencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+    wgpu::DepthStencilState baseState;
+    baseState.depthWriteEnabled = false;
+    baseState.depthCompare = wgpu::CompareFunction::Always;
+    baseState.stencilBack = baseStencilFaceDescriptor;
+    baseState.stencilFront = baseStencilFaceDescriptor;
+    baseState.stencilReadMask = 0xff;
+    baseState.stencilWriteMask = 0xff;
+
+    wgpu::StencilFaceState stencilFaceDescriptor;
+    stencilFaceDescriptor.compare = wgpu::CompareFunction::Less;
+    stencilFaceDescriptor.failOp = wgpu::StencilOperation::Replace;
+    stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.passOp = wgpu::StencilOperation::Keep;
+    wgpu::DepthStencilState state;
+    state.depthWriteEnabled = false;
+    state.depthCompare = wgpu::CompareFunction::Always;
+    state.stencilBack = stencilFaceDescriptor;
+    state.stencilFront = stencilFaceDescriptor;
+    state.stencilReadMask = 0xff;
+    state.stencilWriteMask = 0xff;
+
+    CheckStencil(
+        {
+            {baseState, RGBA8(255, 255, 255, 255), 1.f, 1},  // Triangle to set stencil value to 1
+            {state, RGBA8(0, 0, 0, 255), 0.f,
+             2}  // Triangle with stencil reference 2 fails the Less comparison function
+        },
+        2);  // Replace the stencil on failure, so it should be 2
+}
+
+// Test that the stencil operation is executed on stencil pass, depth fail
+TEST_P(DepthStencilStateTest, StencilDepthFail) {
+    wgpu::StencilFaceState baseStencilFaceDescriptor;
+    baseStencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+    baseStencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+    wgpu::DepthStencilState baseState;
+    baseState.depthWriteEnabled = true;
+    baseState.depthCompare = wgpu::CompareFunction::Always;
+    baseState.stencilBack = baseStencilFaceDescriptor;
+    baseState.stencilFront = baseStencilFaceDescriptor;
+    baseState.stencilReadMask = 0xff;
+    baseState.stencilWriteMask = 0xff;
+
+    wgpu::StencilFaceState stencilFaceDescriptor;
+    stencilFaceDescriptor.compare = wgpu::CompareFunction::Greater;
+    stencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Replace;
+    stencilFaceDescriptor.passOp = wgpu::StencilOperation::Keep;
+    wgpu::DepthStencilState state;
+    state.depthWriteEnabled = true;
+    state.depthCompare = wgpu::CompareFunction::Less;
+    state.stencilBack = stencilFaceDescriptor;
+    state.stencilFront = stencilFaceDescriptor;
+    state.stencilReadMask = 0xff;
+    state.stencilWriteMask = 0xff;
+
+    CheckStencil({{baseState, RGBA8(255, 255, 255, 255), 0.f,
+                   1},  // Triangle to set stencil value to 1. Depth is 0
+                  {state, RGBA8(0, 0, 0, 255), 1.f,
+                   2}},  // Triangle with stencil reference 2 passes the Greater comparison
+                         // function. At depth 1, it fails the Less depth test
+                 2);     // Replace the stencil on stencil pass, depth failure, so it should be 2
+}
+
+// Test that the stencil operation is executed on stencil pass, depth pass
+TEST_P(DepthStencilStateTest, StencilDepthPass) {
+    wgpu::StencilFaceState baseStencilFaceDescriptor;
+    baseStencilFaceDescriptor.compare = wgpu::CompareFunction::Always;
+    baseStencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    baseStencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+    wgpu::DepthStencilState baseState;
+    baseState.depthWriteEnabled = true;
+    baseState.depthCompare = wgpu::CompareFunction::Always;
+    baseState.stencilBack = baseStencilFaceDescriptor;
+    baseState.stencilFront = baseStencilFaceDescriptor;
+    baseState.stencilReadMask = 0xff;
+    baseState.stencilWriteMask = 0xff;
+
+    wgpu::StencilFaceState stencilFaceDescriptor;
+    stencilFaceDescriptor.compare = wgpu::CompareFunction::Greater;
+    stencilFaceDescriptor.failOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.depthFailOp = wgpu::StencilOperation::Keep;
+    stencilFaceDescriptor.passOp = wgpu::StencilOperation::Replace;
+    wgpu::DepthStencilState state;
+    state.depthWriteEnabled = true;
+    state.depthCompare = wgpu::CompareFunction::Less;
+    state.stencilBack = stencilFaceDescriptor;
+    state.stencilFront = stencilFaceDescriptor;
+    state.stencilReadMask = 0xff;
+    state.stencilWriteMask = 0xff;
+
+    CheckStencil({{baseState, RGBA8(255, 255, 255, 255), 1.f,
+                   1},  // Triangle to set stencil value to 1. Depth is 0
+                  {state, RGBA8(0, 0, 0, 255), 0.f,
+                   2}},  // Triangle with stencil reference 2 passes the Greater comparison
+                         // function. At depth 0, it pass the Less depth test
+                 2);     // Replace the stencil on stencil pass, depth pass, so it should be 2
+}
+
+// Test that creating a render pipeline works with for all depth and combined formats
+TEST_P(DepthStencilStateTest, CreatePipelineWithAllFormats) {
+    constexpr wgpu::TextureFormat kDepthStencilFormats[] = {
+        wgpu::TextureFormat::Depth32Float,
+        wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth24Plus,
+        wgpu::TextureFormat::Depth16Unorm,
+    };
+
+    for (wgpu::TextureFormat depthStencilFormat : kDepthStencilFormats) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.EnableDepthStencil(depthStencilFormat);
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+}
+
+// Test that the front and back stencil states are set correctly (and take frontFace into account)
+TEST_P(DepthStencilStateTest, StencilFrontAndBackFace) {
+    wgpu::DepthStencilState state;
+    state.stencilFront.compare = wgpu::CompareFunction::Always;
+    state.stencilBack.compare = wgpu::CompareFunction::Never;
+
+    // The front facing triangle passes the stencil comparison but the back facing one doesn't.
+    DoTest({{state, RGBA8::kRed, 0.f, 0u, wgpu::FrontFace::CCW}}, RGBA8::kRed, RGBA8::kZero);
+    DoTest({{state, RGBA8::kRed, 0.f, 0u, wgpu::FrontFace::CW}}, RGBA8::kZero, RGBA8::kRed);
+}
+
+// Test that the depth reference of a new render pass is initialized to default value 0
+TEST_P(DepthStencilStateTest, StencilReferenceInitialized) {
+    wgpu::DepthStencilState stencilAlwaysReplaceState;
+    stencilAlwaysReplaceState.stencilFront.compare = wgpu::CompareFunction::Always;
+    stencilAlwaysReplaceState.stencilFront.passOp = wgpu::StencilOperation::Replace;
+    stencilAlwaysReplaceState.stencilBack.compare = wgpu::CompareFunction::Always;
+    stencilAlwaysReplaceState.stencilBack.passOp = wgpu::StencilOperation::Replace;
+
+    wgpu::DepthStencilState stencilEqualKeepState;
+    stencilEqualKeepState.stencilFront.compare = wgpu::CompareFunction::Equal;
+    stencilEqualKeepState.stencilFront.passOp = wgpu::StencilOperation::Keep;
+    stencilEqualKeepState.stencilBack.compare = wgpu::CompareFunction::Equal;
+    stencilEqualKeepState.stencilBack.passOp = wgpu::StencilOperation::Keep;
+
+    // Test that stencil reference is not inherited
+    {
+        // First pass sets the stencil to 0x1, and the second pass tests the stencil
+        // Only set the stencil reference in the first pass, and test that for other pass it should
+        // be default value rather than inherited
+        std::vector<TestSpec> testParams = {
+            {stencilAlwaysReplaceState, RGBA8::kRed, 0.f, 0x1, wgpu::FrontFace::CCW, true},
+            {stencilEqualKeepState, RGBA8::kGreen, 0.f, 0x0, wgpu::FrontFace::CCW, false}};
+
+        // Since the stencil reference is not inherited, second draw won't pass the stencil test
+        DoTest(testParams, RGBA8::kZero, RGBA8::kZero, true);
+    }
+
+    // Test that stencil reference is initialized as zero for new render pass
+    {
+        // First pass sets the stencil to 0x1, the second pass sets the stencil to its default
+        // value, and the third pass tests if the stencil is zero
+        std::vector<TestSpec> testParams = {
+            {stencilAlwaysReplaceState, RGBA8::kRed, 0.f, 0x1, wgpu::FrontFace::CCW, true},
+            {stencilAlwaysReplaceState, RGBA8::kGreen, 0.f, 0x1, wgpu::FrontFace::CCW, false},
+            {stencilEqualKeepState, RGBA8::kBlue, 0.f, 0x0, wgpu::FrontFace::CCW, true}};
+
+        // The third draw should pass the stencil test since the second pass set it to default zero
+        DoTest(testParams, RGBA8::kBlue, RGBA8::kBlue, true);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(DepthStencilStateTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend({"vulkan_use_d32s8"}, {}),
+                      VulkanBackend({}, {"vulkan_use_d32s8"}));
diff --git a/src/dawn/tests/end2end/DestroyTests.cpp b/src/dawn/tests/end2end/DestroyTests.cpp
new file mode 100644
index 0000000..d908c7e
--- /dev/null
+++ b/src/dawn/tests/end2end/DestroyTests.cpp
@@ -0,0 +1,203 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+using ::testing::HasSubstr;
+
+constexpr uint32_t kRTSize = 4;
+
+class DestroyTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+              @stage(vertex)
+              fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                  return pos;
+              })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+              @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                  return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+              })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+
+        vertexBuffer = utils::CreateBufferFromData<float>(
+            device, wgpu::BufferUsage::Vertex,
+            {// The bottom left triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 1.0f});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.BeginRenderPass(&renderPass.renderPassInfo).End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    utils::BasicRenderPass renderPass;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+
+    wgpu::CommandBuffer CreateTriangleCommandBuffer() {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.Draw(3);
+            pass.End();
+        }
+        wgpu::CommandBuffer commands = encoder.Finish();
+        return commands;
+    }
+};
+
+// Destroy before submit will result in error, and nothing drawn
+TEST_P(DestroyTest, BufferDestroyBeforeSubmit) {
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::CommandBuffer commands = CreateTriangleCommandBuffer();
+    vertexBuffer.Destroy();
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, 1, 3);
+}
+
+// Destroy after submit will draw successfully
+TEST_P(DestroyTest, BufferDestroyAfterSubmit) {
+    RGBA8 filled(0, 255, 0, 255);
+
+    wgpu::CommandBuffer commands = CreateTriangleCommandBuffer();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+    vertexBuffer.Destroy();
+}
+
+// First submit succeeds, draws triangle, second submit fails
+// after destroy is called on the buffer, pixel does not change
+TEST_P(DestroyTest, BufferSubmitDestroySubmit) {
+    RGBA8 filled(0, 255, 0, 255);
+
+    wgpu::CommandBuffer commands = CreateTriangleCommandBuffer();
+    queue.Submit(1, &commands);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+
+    vertexBuffer.Destroy();
+
+    // Submit fails because vertex buffer was destroyed
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+
+    // Pixel stays the same
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+}
+
+// Destroy texture before submit should fail submit
+TEST_P(DestroyTest, TextureDestroyBeforeSubmit) {
+    wgpu::CommandBuffer commands = CreateTriangleCommandBuffer();
+    renderPass.color.Destroy();
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Destroy after submit will draw successfully
+TEST_P(DestroyTest, TextureDestroyAfterSubmit) {
+    RGBA8 filled(0, 255, 0, 255);
+
+    wgpu::CommandBuffer commands = CreateTriangleCommandBuffer();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+    renderPass.color.Destroy();
+}
+
+// First submit succeeds, draws triangle, second submit fails
+// after destroy is called on the texture
+TEST_P(DestroyTest, TextureSubmitDestroySubmit) {
+    RGBA8 filled(0, 255, 0, 255);
+
+    wgpu::CommandBuffer commands = CreateTriangleCommandBuffer();
+    queue.Submit(1, &commands);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+
+    renderPass.color.Destroy();
+
+    // Submit fails because texture was destroyed
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Attempting to set an object label after it has been destroyed should not cause an error.
+TEST_P(DestroyTest, DestroyThenSetLabel) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    std::string label = "test";
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    buffer.Destroy();
+    buffer.SetLabel(label.c_str());
+}
+
+// Device destroy before buffer submit will result in error.
+TEST_P(DestroyTest, DestroyDeviceBeforeSubmit) {
+    // TODO(crbug.com/dawn/628) Add more comprehensive tests with destroy and backends.
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    wgpu::CommandBuffer commands = CreateTriangleCommandBuffer();
+
+    // Tests normally don't expect a device lost error, but since we are destroying the device, we
+    // actually do, so we need to override the default device lost callback.
+    ExpectDeviceDestruction();
+    device.Destroy();
+    ASSERT_DEVICE_ERROR_MSG(queue.Submit(1, &commands), HasSubstr("[Device] is lost."));
+}
+
+// Regression test for crbug.com/1276928 where a lingering BGL reference in Vulkan with at least one
+// BG instance could cause bad memory reads because members in the BGL whose destuctors expected a
+// live device were not released until after the device was destroyed.
+TEST_P(DestroyTest, DestroyDeviceLingeringBGL) {
+    // Create and hold the layout reference so that its destructor gets called after the device has
+    // been destroyed via device.Destroy().
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+    utils::MakeBindGroup(device, layout, {{0, device.CreateSampler()}});
+
+    // Tests normally don't expect a device lost error, but since we are destroying the device, we
+    // actually do, so we need to override the default device lost callback.
+    ExpectDeviceDestruction();
+    device.Destroy();
+}
+
+DAWN_INSTANTIATE_TEST(DestroyTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DeviceInitializationTests.cpp b/src/dawn/tests/end2end/DeviceInitializationTests.cpp
new file mode 100644
index 0000000..f30aa72
--- /dev/null
+++ b/src/dawn/tests/end2end/DeviceInitializationTests.cpp
@@ -0,0 +1,106 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/dawn_proc.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class DeviceInitializationTest : public testing::Test {
+    void SetUp() override {
+        dawnProcSetProcs(&dawn::native::GetProcs());
+    }
+
+    void TearDown() override {
+        dawnProcSetProcs(nullptr);
+    }
+};
+
+// Test that device operations are still valid if the reference to the instance
+// is dropped.
+TEST_F(DeviceInitializationTest, DeviceOutlivesInstance) {
+    // Get properties of all available adapters and then free the instance.
+    // We want to create a device on a fresh instance and adapter each time.
+    std::vector<wgpu::AdapterProperties> availableAdapterProperties;
+    {
+        auto instance = std::make_unique<dawn::native::Instance>();
+        instance->DiscoverDefaultAdapters();
+        for (const dawn::native::Adapter& adapter : instance->GetAdapters()) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            if (properties.backendType == wgpu::BackendType::Null) {
+                continue;
+            }
+            availableAdapterProperties.push_back(properties);
+        }
+    }
+
+    for (const wgpu::AdapterProperties& desiredProperties : availableAdapterProperties) {
+        wgpu::Device device;
+
+        auto instance = std::make_unique<dawn::native::Instance>();
+        instance->DiscoverDefaultAdapters();
+        for (dawn::native::Adapter& adapter : instance->GetAdapters()) {
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+
+            if (properties.deviceID == desiredProperties.deviceID &&
+                properties.vendorID == desiredProperties.vendorID &&
+                properties.adapterType == desiredProperties.adapterType &&
+                properties.backendType == desiredProperties.backendType) {
+                // Create the device, destroy the instance, and break out of the loop.
+                device = wgpu::Device::Acquire(adapter.CreateDevice());
+                instance.reset();
+                break;
+            }
+        }
+
+        // Now, test that the device can still be used by testing a buffer copy.
+        wgpu::Buffer src =
+            utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::CopySrc, {1, 2, 3, 4});
+
+        wgpu::Buffer dst = utils::CreateBufferFromData<uint32_t>(
+            device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead, {0, 0, 0, 0});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(src, 0, dst, 0, 4 * sizeof(uint32_t));
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        device.GetQueue().Submit(1, &commands);
+
+        bool done = false;
+        dst.MapAsync(
+            wgpu::MapMode::Read, 0, 4 * sizeof(uint32_t),
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                EXPECT_EQ(status, WGPUBufferMapAsyncStatus_Success);
+                *static_cast<bool*>(userdata) = true;
+            },
+            &done);
+
+        // Note: we can't actually test this if Tick moves over to
+        // wgpuInstanceProcessEvents. We can still test that object creation works
+        // without crashing.
+        while (!done) {
+            device.Tick();
+            utils::USleep(100);
+        }
+
+        const uint32_t* mapping = static_cast<const uint32_t*>(dst.GetConstMappedRange());
+        EXPECT_EQ(mapping[0], 1u);
+        EXPECT_EQ(mapping[1], 2u);
+        EXPECT_EQ(mapping[2], 3u);
+        EXPECT_EQ(mapping[3], 4u);
+    }
+}
diff --git a/src/dawn/tests/end2end/DeviceLostTests.cpp b/src/dawn/tests/end2end/DeviceLostTests.cpp
new file mode 100644
index 0000000..e255a6d
--- /dev/null
+++ b/src/dawn/tests/end2end/DeviceLostTests.cpp
@@ -0,0 +1,523 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include <gmock/gmock.h>
+#include "dawn/tests/MockCallback.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cstring>
+
+using namespace testing;
+
+class MockDeviceLostCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUDeviceLostReason reason, const char* message, void* userdata));
+};
+
+static std::unique_ptr<MockDeviceLostCallback> mockDeviceLostCallback;
+static void ToMockDeviceLostCallback(WGPUDeviceLostReason reason,
+                                     const char* message,
+                                     void* userdata) {
+    mockDeviceLostCallback->Call(reason, message, userdata);
+    DawnTestBase* self = static_cast<DawnTestBase*>(userdata);
+    self->StartExpectDeviceError();
+}
+
+class MockQueueWorkDoneCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
+};
+
+static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
+static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
+    mockQueueWorkDoneCallback->Call(status, userdata);
+}
+
+static const int fakeUserData = 0;
+
+class DeviceLostTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        mockDeviceLostCallback = std::make_unique<MockDeviceLostCallback>();
+        mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
+        // SetDeviceLostCallback will trigger the callback task manager and clean all deferred
+        // callback tasks, so it should be called at the beginning of each test to prevent
+        // unexpectedly triggering callback tasks created during test
+        device.SetDeviceLostCallback(ToMockDeviceLostCallback, this);
+    }
+
+    void TearDown() override {
+        mockDeviceLostCallback = nullptr;
+        mockQueueWorkDoneCallback = nullptr;
+        DawnTest::TearDown();
+    }
+
+    void LoseForTesting() {
+        EXPECT_CALL(*mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
+            .Times(1);
+        device.LoseForTesting();
+    }
+
+    static void MapFailCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+        EXPECT_EQ(WGPUBufferMapAsyncStatus_DeviceLost, status);
+        EXPECT_EQ(&fakeUserData, userdata);
+    }
+};
+
+// Test that DeviceLostCallback is invoked when LostForTestimg is called
+TEST_P(DeviceLostTest, DeviceLostCallbackIsCalled) {
+    LoseForTesting();
+}
+
+// Test that submit fails when device is lost
+TEST_P(DeviceLostTest, SubmitFails) {
+    wgpu::CommandBuffer commands;
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    commands = encoder.Finish();
+
+    LoseForTesting();
+    ASSERT_DEVICE_ERROR(queue.Submit(0, &commands));
+}
+
+// Test that CreateBindGroupLayout fails when device is lost
+TEST_P(DeviceLostTest, CreateBindGroupLayoutFails) {
+    LoseForTesting();
+
+    wgpu::BindGroupLayoutEntry entry;
+    entry.binding = 0;
+    entry.visibility = wgpu::ShaderStage::None;
+    entry.buffer.type = wgpu::BufferBindingType::Uniform;
+    wgpu::BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = 1;
+    descriptor.entries = &entry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroupLayout(&descriptor));
+}
+
+// Test that GetBindGroupLayout fails when device is lost
+TEST_P(DeviceLostTest, GetBindGroupLayoutFails) {
+    wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+        struct UniformBuffer {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> ubo : UniformBuffer;
+        @stage(compute) @workgroup_size(1) fn main() {
+        })");
+
+    wgpu::ComputePipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&descriptor);
+
+    LoseForTesting();
+    ASSERT_DEVICE_ERROR(pipeline.GetBindGroupLayout(0).Get());
+}
+
+// Test that CreateBindGroup fails when device is lost
+TEST_P(DeviceLostTest, CreateBindGroupFails) {
+    LoseForTesting();
+
+    wgpu::BindGroupEntry entry;
+    entry.binding = 0;
+    entry.sampler = nullptr;
+    entry.textureView = nullptr;
+    entry.buffer = nullptr;
+    entry.offset = 0;
+    entry.size = 0;
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.entryCount = 1;
+    descriptor.entries = &entry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+}
+
+// Test that CreatePipelineLayout fails when device is lost
+TEST_P(DeviceLostTest, CreatePipelineLayoutFails) {
+    LoseForTesting();
+
+    wgpu::PipelineLayoutDescriptor descriptor;
+    descriptor.bindGroupLayoutCount = 0;
+    descriptor.bindGroupLayouts = nullptr;
+    ASSERT_DEVICE_ERROR(device.CreatePipelineLayout(&descriptor));
+}
+
+// Tests that CreateRenderBundleEncoder fails when device is lost
+TEST_P(DeviceLostTest, CreateRenderBundleEncoderFails) {
+    LoseForTesting();
+
+    wgpu::RenderBundleEncoderDescriptor descriptor;
+    descriptor.colorFormatsCount = 0;
+    descriptor.colorFormats = nullptr;
+    ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&descriptor));
+}
+
+// Tests that CreateComputePipeline fails when device is lost
+TEST_P(DeviceLostTest, CreateComputePipelineFails) {
+    LoseForTesting();
+
+    wgpu::ComputePipelineDescriptor descriptor = {};
+    descriptor.layout = nullptr;
+    descriptor.compute.module = nullptr;
+    ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&descriptor));
+}
+
+// Tests that CreateRenderPipeline fails when device is lost
+TEST_P(DeviceLostTest, CreateRenderPipelineFails) {
+    LoseForTesting();
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Tests that CreateSampler fails when device is lost
+TEST_P(DeviceLostTest, CreateSamplerFails) {
+    LoseForTesting();
+
+    ASSERT_DEVICE_ERROR(device.CreateSampler());
+}
+
+// Tests that CreateShaderModule fails when device is lost
+TEST_P(DeviceLostTest, CreateShaderModuleFails) {
+    LoseForTesting();
+
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+        @stage(fragment)
+        fn main(@location(0) color : vec4<f32>) -> @location(0) vec4<f32> {
+            return color;
+        })"));
+}
+
+// Tests that CreateSwapChain fails when device is lost
+TEST_P(DeviceLostTest, CreateSwapChainFails) {
+    LoseForTesting();
+
+    wgpu::SwapChainDescriptor descriptor = {};
+    ASSERT_DEVICE_ERROR(device.CreateSwapChain(nullptr, &descriptor));
+}
+
+// Tests that CreateTexture fails when device is lost
+TEST_P(DeviceLostTest, CreateTextureFails) {
+    LoseForTesting();
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size.width = 4;
+    descriptor.size.height = 4;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.mipLevelCount = 1;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+}
+
+// Test that CreateBuffer fails when device is lost
+TEST_P(DeviceLostTest, CreateBufferFails) {
+    LoseForTesting();
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = sizeof(float);
+    bufferDescriptor.usage = wgpu::BufferUsage::CopySrc;
+    ASSERT_DEVICE_ERROR(device.CreateBuffer(&bufferDescriptor));
+}
+
+// Test that buffer.MapAsync for writing fails after device is lost
+TEST_P(DeviceLostTest, BufferMapAsyncFailsForWriting) {
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 4;
+    bufferDescriptor.usage = wgpu::BufferUsage::MapWrite;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    LoseForTesting();
+    ASSERT_DEVICE_ERROR(buffer.MapAsync(wgpu::MapMode::Write, 0, 4, MapFailCallback,
+                                        const_cast<int*>(&fakeUserData)));
+}
+
+// Test that BufferMapAsync for writing calls back with device lost status when device lost after
+// mapping
+TEST_P(DeviceLostTest, BufferMapAsyncBeforeLossFailsForWriting) {
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 4;
+    bufferDescriptor.usage = wgpu::BufferUsage::MapWrite;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    buffer.MapAsync(wgpu::MapMode::Write, 0, 4, MapFailCallback, const_cast<int*>(&fakeUserData));
+
+    LoseForTesting();
+}
+
+// Test that buffer.Unmap fails after device is lost
+TEST_P(DeviceLostTest, BufferUnmapFails) {
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = sizeof(float);
+    bufferDescriptor.usage = wgpu::BufferUsage::MapWrite;
+    bufferDescriptor.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    LoseForTesting();
+    ASSERT_DEVICE_ERROR(buffer.Unmap());
+}
+
+// Test that mappedAtCreation fails after device is lost
+TEST_P(DeviceLostTest, CreateBufferMappedAtCreationFails) {
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = sizeof(float);
+    bufferDescriptor.usage = wgpu::BufferUsage::MapWrite;
+    bufferDescriptor.mappedAtCreation = true;
+
+    LoseForTesting();
+    ASSERT_DEVICE_ERROR(device.CreateBuffer(&bufferDescriptor));
+}
+
+// Test that BufferMapAsync for reading fails after device is lost
+TEST_P(DeviceLostTest, BufferMapAsyncFailsForReading) {
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 4;
+    bufferDescriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    LoseForTesting();
+    ASSERT_DEVICE_ERROR(buffer.MapAsync(wgpu::MapMode::Read, 0, 4, MapFailCallback,
+                                        const_cast<int*>(&fakeUserData)));
+}
+
+// Test that BufferMapAsync for reading calls back with device lost status when device lost after
+// mapping
+TEST_P(DeviceLostTest, BufferMapAsyncBeforeLossFailsForReading) {
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = sizeof(float);
+    bufferDescriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    buffer.MapAsync(wgpu::MapMode::Read, 0, 4, MapFailCallback, const_cast<int*>(&fakeUserData));
+
+    LoseForTesting();
+}
+
+// Test that WriteBuffer fails after device is lost
+TEST_P(DeviceLostTest, WriteBufferFails) {
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = sizeof(float);
+    bufferDescriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    LoseForTesting();
+    float data = 12.0f;
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buffer, 0, &data, sizeof(data)));
+}
+
+// Test it's possible to GetMappedRange on a buffer created mapped after device loss
+TEST_P(DeviceLostTest, GetMappedRange_CreateBufferMappedAtCreationAfterLoss) {
+    LoseForTesting();
+
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::CopySrc;
+    desc.mappedAtCreation = true;
+    ASSERT_DEVICE_ERROR(wgpu::Buffer buffer = device.CreateBuffer(&desc));
+
+    ASSERT_NE(buffer.GetMappedRange(), nullptr);
+}
+
+// Test that device loss doesn't change the result of GetMappedRange, mappedAtCreation version.
+TEST_P(DeviceLostTest, GetMappedRange_CreateBufferMappedAtCreationBeforeLoss) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::CopySrc;
+    desc.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    void* rangeBeforeLoss = buffer.GetMappedRange();
+    LoseForTesting();
+
+    ASSERT_NE(buffer.GetMappedRange(), nullptr);
+    ASSERT_EQ(buffer.GetMappedRange(), rangeBeforeLoss);
+}
+
+// Test that device loss doesn't change the result of GetMappedRange, mapping for reading version.
+TEST_P(DeviceLostTest, GetMappedRange_MapAsyncReading) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    buffer.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
+    queue.Submit(0, nullptr);
+
+    const void* rangeBeforeLoss = buffer.GetConstMappedRange();
+    LoseForTesting();
+
+    ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+    ASSERT_EQ(buffer.GetConstMappedRange(), rangeBeforeLoss);
+}
+
+// Test that device loss doesn't change the result of GetMappedRange, mapping for writing version.
+TEST_P(DeviceLostTest, GetMappedRange_MapAsyncWriting) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    buffer.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
+    queue.Submit(0, nullptr);
+
+    const void* rangeBeforeLoss = buffer.GetConstMappedRange();
+    LoseForTesting();
+
+    ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+    ASSERT_EQ(buffer.GetConstMappedRange(), rangeBeforeLoss);
+}
+
+// TODO mapasync read + resolve + loss getmappedrange != nullptr.
+// TODO mapasync write + resolve + loss getmappedrange != nullptr.
+
+// Test that Command Encoder Finish fails when device lost
+TEST_P(DeviceLostTest, CommandEncoderFinishFails) {
+    wgpu::CommandBuffer commands;
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    LoseForTesting();
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test that QueueOnSubmittedWorkDone fails after device is lost.
+TEST_P(DeviceLostTest, QueueOnSubmittedWorkDoneFails) {
+    LoseForTesting();
+
+    // callback should have device lost status
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, nullptr))
+        .Times(1);
+    ASSERT_DEVICE_ERROR(queue.OnSubmittedWorkDone(0, ToMockQueueWorkDone, nullptr));
+}
+
+// Test that QueueOnSubmittedWorkDone when the device is lost after calling OnSubmittedWorkDone
+TEST_P(DeviceLostTest, QueueOnSubmittedWorkDoneBeforeLossFails) {
+    // callback should have device lost status
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, nullptr))
+        .Times(1);
+    queue.OnSubmittedWorkDone(0, ToMockQueueWorkDone, nullptr);
+
+    LoseForTesting();
+}
+
+// Test that LostForTesting can only be called on one time
+TEST_P(DeviceLostTest, LoseForTestingOnce) {
+    // First LoseForTesting call should occur normally. The callback is already set in SetUp.
+    EXPECT_CALL(*mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this)).Times(1);
+    device.LoseForTesting();
+
+    // Second LoseForTesting call should result in no callbacks. The LoseForTesting will return
+    // without doing anything when it sees that device has already been lost.
+    device.SetDeviceLostCallback(ToMockDeviceLostCallback, this);
+    EXPECT_CALL(*mockDeviceLostCallback, Call(_, _, this)).Times(0);
+    device.LoseForTesting();
+}
+
+TEST_P(DeviceLostTest, DeviceLostDoesntCallUncapturedError) {
+    // Set no callback.
+    device.SetDeviceLostCallback(nullptr, nullptr);
+
+    // Set the uncaptured error callback which should not be called on
+    // device lost.
+    MockCallback<WGPUErrorCallback> mockErrorCallback;
+    device.SetUncapturedErrorCallback(mockErrorCallback.Callback(),
+                                      mockErrorCallback.MakeUserdata(nullptr));
+    EXPECT_CALL(mockErrorCallback, Call(_, _, _)).Times(Exactly(0));
+    device.LoseForTesting();
+}
+
+// Test that WGPUCreatePipelineAsyncStatus_DeviceLost can be correctly returned when device is lost
+// before the callback of Create*PipelineAsync() is called.
+TEST_P(DeviceLostTest, DeviceLostBeforeCreatePipelineAsyncCallback) {
+    wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1) fn main() {
+        })");
+
+    wgpu::ComputePipelineDescriptor descriptor;
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+                       const char* message, void* userdata) {
+        EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_DeviceLost, status);
+    };
+
+    device.CreateComputePipelineAsync(&descriptor, callback, nullptr);
+    LoseForTesting();
+}
+
+// This is a regression test for crbug.com/1212385 where Dawn didn't clean up all
+// references to bind group layouts such that the cache was non-empty at the end
+// of shut down.
+TEST_P(DeviceLostTest, FreeBindGroupAfterDeviceLossWithPendingCommands) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = sizeof(float);
+    bufferDesc.usage = wgpu::BufferUsage::Storage;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer, 0, sizeof(float)}});
+
+    // Advance the pending command serial. We only a need a couple of these to repro the bug,
+    // but include extra so this does not become a change-detecting test if the specific serial
+    // value is sensitive.
+    queue.Submit(0, nullptr);
+    queue.Submit(0, nullptr);
+    queue.Submit(0, nullptr);
+    queue.Submit(0, nullptr);
+    queue.Submit(0, nullptr);
+    queue.Submit(0, nullptr);
+
+    LoseForTesting();
+
+    // Releasing the bing group places the bind group layout into a queue in the Vulkan backend
+    // for recycling of descriptor sets. So, after these release calls there is still one last
+    // reference to the BGL which wouldn't be freed until the pending serial passes.
+    // Since the device is lost, destruction will clean up immediately without waiting for the
+    // serial. The implementation needs to be sure to clear these BGL references. At the end of
+    // Device shut down, we ASSERT that the BGL cache is empty.
+    bgl = nullptr;
+    bg = nullptr;
+}
+
+// Attempting to set an object label after device loss should not cause an error.
+TEST_P(DeviceLostTest, SetLabelAfterDeviceLoss) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    std::string label = "test";
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    LoseForTesting();
+    buffer.SetLabel(label.c_str());
+}
+
+DAWN_INSTANTIATE_TEST(DeviceLostTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      NullBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DrawIndexedIndirectTests.cpp b/src/dawn/tests/end2end/DrawIndexedIndirectTests.cpp
new file mode 100644
index 0000000..7c6e511
--- /dev/null
+++ b/src/dawn/tests/end2end/DrawIndexedIndirectTests.cpp
@@ -0,0 +1,711 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 4;
+
+class DrawIndexedIndirectTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                return pos;
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
+        descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Uint32;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+
+        vertexBuffer = utils::CreateBufferFromData<float>(
+            device, wgpu::BufferUsage::Vertex,
+            {// First quad: the first 3 vertices represent the bottom left triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f,
+             0.0f, 1.0f,
+
+             // Second quad: the first 3 vertices represent the top right triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, -1.0f, -1.0f,
+             0.0f, 1.0f});
+    }
+
+    utils::BasicRenderPass renderPass;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+
+    wgpu::Buffer CreateIndirectBuffer(std::initializer_list<uint32_t> indirectParamList) {
+        return utils::CreateBufferFromData<uint32_t>(
+            device, wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage, indirectParamList);
+    }
+
+    wgpu::Buffer CreateIndexBuffer(std::initializer_list<uint32_t> indexList) {
+        return utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, indexList);
+    }
+
+    wgpu::CommandBuffer EncodeDrawCommands(std::initializer_list<uint32_t> bufferList,
+                                           wgpu::Buffer indexBuffer,
+                                           uint64_t indexOffset,
+                                           uint64_t indirectOffset) {
+        wgpu::Buffer indirectBuffer = CreateIndirectBuffer(bufferList);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, indexOffset);
+            pass.DrawIndexedIndirect(indirectBuffer, indirectOffset);
+            pass.End();
+        }
+
+        return encoder.Finish();
+    }
+
+    void TestDraw(wgpu::CommandBuffer commands, RGBA8 bottomLeftExpected, RGBA8 topRightExpected) {
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(bottomLeftExpected, renderPass.color, 1, 3);
+        EXPECT_PIXEL_RGBA8_EQ(topRightExpected, renderPass.color, 3, 1);
+    }
+
+    void Test(std::initializer_list<uint32_t> bufferList,
+              uint64_t indexOffset,
+              uint64_t indirectOffset,
+              RGBA8 bottomLeftExpected,
+              RGBA8 topRightExpected) {
+        wgpu::Buffer indexBuffer =
+            CreateIndexBuffer({0, 1, 2, 0, 3, 1,
+                               // The indices below are added to test negatve baseVertex
+                               0 + 4, 1 + 4, 2 + 4, 0 + 4, 3 + 4, 1 + 4});
+        TestDraw(EncodeDrawCommands(bufferList, indexBuffer, indexOffset, indirectOffset),
+                 bottomLeftExpected, topRightExpected);
+    }
+};
+
+// The most basic DrawIndexed triangle draw.
+TEST_P(DrawIndexedIndirectTest, Uint32) {
+    // TODO(crbug.com/dawn/789): Test is failing after a roll on SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test a draw with no indices.
+    Test({0, 0, 0, 0, 0}, 0, 0, notFilled, notFilled);
+
+    // Test a draw with only the first 3 indices of the first quad (bottom left triangle)
+    Test({3, 1, 0, 0, 0}, 0, 0, filled, notFilled);
+
+    // Test a draw with only the last 3 indices of the first quad (top right triangle)
+    Test({3, 1, 3, 0, 0}, 0, 0, notFilled, filled);
+
+    // Test a draw with all 6 indices (both triangles).
+    Test({6, 1, 0, 0, 0}, 0, 0, filled, filled);
+}
+
+// Test the parameter 'baseVertex' of DrawIndexed() works.
+TEST_P(DrawIndexedIndirectTest, BaseVertex) {
+    // TODO(crbug.com/dawn/161): add workaround for OpenGL index buffer offset (could be compute
+    // shader that adds it to the draw calls)
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // TODO(crbug.com/dawn/966): Fails on Metal Intel, likely because @builtin(vertex_index)
+    // doesn't take into account BaseVertex, which breaks programmable vertex pulling.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test a draw with only the first 3 indices of the second quad (top right triangle)
+    Test({3, 1, 0, 4, 0}, 0, 0, notFilled, filled);
+
+    // Test a draw with only the last 3 indices of the second quad (bottom left triangle)
+    Test({3, 1, 3, 4, 0}, 0, 0, filled, notFilled);
+
+    const int negFour = -4;
+    uint32_t unsignedNegFour;
+    std::memcpy(&unsignedNegFour, &negFour, sizeof(int));
+
+    // Test negative baseVertex
+    // Test a draw with only the first 3 indices of the first quad (bottom left triangle)
+    Test({3, 1, 0, unsignedNegFour, 0}, 6 * sizeof(uint32_t), 0, filled, notFilled);
+
+    // Test a draw with only the last 3 indices of the first quad (top right triangle)
+    Test({3, 1, 3, unsignedNegFour, 0}, 6 * sizeof(uint32_t), 0, notFilled, filled);
+}
+
+TEST_P(DrawIndexedIndirectTest, IndirectOffset) {
+    // TODO(crbug.com/dawn/789): Test is failing after a roll on SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/966): Fails on Metal Intel, likely because @builtin(vertex_index)
+    // doesn't take into account BaseVertex, which breaks programmable vertex pulling.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test an offset draw call, with indirect buffer containing 2 calls:
+    // 1) first 3 indices of the second quad (top right triangle)
+    // 2) last 3 indices of the second quad
+
+    // Test #1 (no offset)
+    Test({3, 1, 0, 4, 0, 3, 1, 3, 4, 0}, 0, 0, notFilled, filled);
+
+    // Offset to draw #2
+    Test({3, 1, 0, 4, 0, 3, 1, 3, 4, 0}, 0, 5 * sizeof(uint32_t), filled, notFilled);
+}
+
+TEST_P(DrawIndexedIndirectTest, BasicValidation) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1});
+
+    // Test a draw with an excessive indexCount. Should draw nothing.
+    TestDraw(EncodeDrawCommands({7, 1, 0, 0, 0}, indexBuffer, 0, 0), notFilled, notFilled);
+
+    // Test a draw with an excessive firstIndex. Should draw nothing.
+    TestDraw(EncodeDrawCommands({3, 1, 7, 0, 0}, indexBuffer, 0, 0), notFilled, notFilled);
+
+    // Test a valid draw. Should draw only the second triangle.
+    TestDraw(EncodeDrawCommands({3, 1, 3, 0, 0}, indexBuffer, 0, 0), notFilled, filled);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateWithOffsets) {
+    // TODO(crbug.com/dawn/161): The GL/GLES backend doesn't support indirect index buffer offsets
+    // yet.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 1, 2});
+
+    // Test that validation properly accounts for index buffer offset.
+    TestDraw(EncodeDrawCommands({3, 1, 0, 0, 0}, indexBuffer, 6 * sizeof(uint32_t), 0), filled,
+             notFilled);
+    TestDraw(EncodeDrawCommands({4, 1, 0, 0, 0}, indexBuffer, 6 * sizeof(uint32_t), 0), notFilled,
+             notFilled);
+    TestDraw(EncodeDrawCommands({3, 1, 4, 0, 0}, indexBuffer, 3 * sizeof(uint32_t), 0), notFilled,
+             notFilled);
+
+    // Test that validation properly accounts for indirect buffer offset.
+    TestDraw(
+        EncodeDrawCommands({3, 1, 0, 0, 0, 1000, 1, 0, 0, 0}, indexBuffer, 0, 4 * sizeof(uint32_t)),
+        notFilled, notFilled);
+    TestDraw(EncodeDrawCommands({3, 1, 0, 0, 0, 1000, 1, 0, 0, 0}, indexBuffer, 0, 0), filled,
+             notFilled);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateMultiplePasses) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 1, 2});
+
+    // Test validation with multiple passes in a row. Namely this is exercising that scratch buffer
+    // data for use with a previous pass's validation commands is not overwritten before it can be
+    // used.
+    TestDraw(EncodeDrawCommands({10, 1, 0, 0, 0}, indexBuffer, 0, 0), notFilled, notFilled);
+    TestDraw(EncodeDrawCommands({6, 1, 0, 0, 0}, indexBuffer, 0, 0), filled, filled);
+    TestDraw(EncodeDrawCommands({4, 1, 6, 0, 0}, indexBuffer, 0, 0), notFilled, notFilled);
+    TestDraw(EncodeDrawCommands({3, 1, 6, 0, 0}, indexBuffer, 0, 0), filled, notFilled);
+    TestDraw(EncodeDrawCommands({3, 1, 3, 0, 0}, indexBuffer, 0, 0), notFilled, filled);
+    TestDraw(EncodeDrawCommands({6, 1, 3, 0, 0}, indexBuffer, 0, 0), filled, filled);
+    TestDraw(EncodeDrawCommands({6, 1, 6, 0, 0}, indexBuffer, 0, 0), notFilled, notFilled);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateMultipleDraws) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Validate multiple draw calls using the same index and indirect buffers as input, but with
+    // different indirect offsets.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::Buffer indirectBuffer =
+            CreateIndirectBuffer({3, 1, 3, 0, 0, 10, 1, 0, 0, 0, 3, 1, 6, 0, 0});
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 1, 2, 0, 3, 1}), wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(indirectBuffer, 0);
+        pass.DrawIndexedIndirect(indirectBuffer, 20);
+        pass.DrawIndexedIndirect(indirectBuffer, 40);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    queue.Submit(1, &commands);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 3, 1);
+
+    // Validate multiple draw calls using the same indirect buffer but different index buffers as
+    // input.
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::Buffer indirectBuffer =
+            CreateIndirectBuffer({3, 1, 3, 0, 0, 10, 1, 0, 0, 0, 3, 1, 6, 0, 0});
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 1, 2, 0, 3, 1}), wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(indirectBuffer, 0);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 3, 1, 0, 2, 1}), wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(indirectBuffer, 20);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 2, 1}),
+                            wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(indirectBuffer, 40);
+        pass.End();
+    }
+    commands = encoder.Finish();
+
+    queue.Submit(1, &commands);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 3, 1);
+
+    // Validate multiple draw calls using the same index buffer but different indirect buffers as
+    // input.
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 1, 2, 0, 3, 1}), wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(CreateIndirectBuffer({3, 1, 3, 0, 0}), 0);
+        pass.DrawIndexedIndirect(CreateIndirectBuffer({10, 1, 0, 0, 0}), 0);
+        pass.DrawIndexedIndirect(CreateIndirectBuffer({3, 1, 6, 0, 0}), 0);
+        pass.End();
+    }
+    commands = encoder.Finish();
+
+    queue.Submit(1, &commands);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 3, 1);
+
+    // Validate multiple draw calls across different index and indirect buffers.
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 1, 2, 0, 3, 1}), wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(CreateIndirectBuffer({3, 1, 3, 0, 0}), 0);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 1, 2, 0, 3, 1}), wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(CreateIndirectBuffer({10, 1, 0, 0, 0}), 0);
+        pass.SetIndexBuffer(CreateIndexBuffer({0, 3, 1}), wgpu::IndexFormat::Uint32, 0);
+        pass.DrawIndexedIndirect(CreateIndirectBuffer({3, 1, 3, 0, 0}), 0);
+        pass.End();
+    }
+    commands = encoder.Finish();
+
+    queue.Submit(1, &commands);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 3, 1);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateEncodeMultipleThenSubmitInOrder) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 1, 2});
+
+    wgpu::CommandBuffer commands[7];
+    commands[0] = EncodeDrawCommands({10, 1, 0, 0, 0}, indexBuffer, 0, 0);
+    commands[1] = EncodeDrawCommands({6, 1, 0, 0, 0}, indexBuffer, 0, 0);
+    commands[2] = EncodeDrawCommands({4, 1, 6, 0, 0}, indexBuffer, 0, 0);
+    commands[3] = EncodeDrawCommands({3, 1, 6, 0, 0}, indexBuffer, 0, 0);
+    commands[4] = EncodeDrawCommands({3, 1, 3, 0, 0}, indexBuffer, 0, 0);
+    commands[5] = EncodeDrawCommands({6, 1, 3, 0, 0}, indexBuffer, 0, 0);
+    commands[6] = EncodeDrawCommands({6, 1, 6, 0, 0}, indexBuffer, 0, 0);
+
+    TestDraw(commands[0], notFilled, notFilled);
+    TestDraw(commands[1], filled, filled);
+    TestDraw(commands[2], notFilled, notFilled);
+    TestDraw(commands[3], filled, notFilled);
+    TestDraw(commands[4], notFilled, filled);
+    TestDraw(commands[5], filled, filled);
+    TestDraw(commands[6], notFilled, notFilled);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateEncodeMultipleThenSubmitAtOnce) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1124): Fails on Intel+Vulkan+Windows for drivers
+    // older than 27.20.100.8587, which bots are actively using.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsVulkan() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 1, 2});
+
+    wgpu::CommandBuffer commands[5];
+    commands[0] = EncodeDrawCommands({10, 1, 0, 0, 0}, indexBuffer, 0, 0);
+    commands[1] = EncodeDrawCommands({6, 1, 0, 0, 0}, indexBuffer, 0, 0);
+    commands[2] = EncodeDrawCommands({4, 1, 6, 0, 0}, indexBuffer, 0, 0);
+    commands[3] = EncodeDrawCommands({3, 1, 6, 0, 0}, indexBuffer, 0, 0);
+    commands[4] = EncodeDrawCommands({3, 1, 3, 0, 0}, indexBuffer, 0, 0);
+
+    queue.Submit(5, commands);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 3, 1);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateEncodeMultipleThenSubmitOutOfOrder) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 1, 2});
+
+    wgpu::CommandBuffer commands[7];
+    commands[0] = EncodeDrawCommands({10, 1, 0, 0, 0}, indexBuffer, 0, 0);
+    commands[1] = EncodeDrawCommands({6, 1, 0, 0, 0}, indexBuffer, 0, 0);
+    commands[2] = EncodeDrawCommands({4, 1, 6, 0, 0}, indexBuffer, 0, 0);
+    commands[3] = EncodeDrawCommands({3, 1, 6, 0, 0}, indexBuffer, 0, 0);
+    commands[4] = EncodeDrawCommands({3, 1, 3, 0, 0}, indexBuffer, 0, 0);
+    commands[5] = EncodeDrawCommands({6, 1, 3, 0, 0}, indexBuffer, 0, 0);
+    commands[6] = EncodeDrawCommands({6, 1, 6, 0, 0}, indexBuffer, 0, 0);
+
+    TestDraw(commands[6], notFilled, notFilled);
+    TestDraw(commands[5], filled, filled);
+    TestDraw(commands[4], notFilled, filled);
+    TestDraw(commands[3], filled, notFilled);
+    TestDraw(commands[2], notFilled, notFilled);
+    TestDraw(commands[1], filled, filled);
+    TestDraw(commands[0], notFilled, notFilled);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateWithBundlesInSamePass) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indirectBuffer =
+        CreateIndirectBuffer({3, 1, 3, 0, 0, 10, 1, 0, 0, 0, 3, 1, 6, 0, 0});
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 1, 2});
+
+    std::vector<wgpu::RenderBundle> bundles;
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::RenderBundleEncoder bundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        bundleEncoder.SetPipeline(pipeline);
+        bundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        bundleEncoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 0);
+        bundleEncoder.DrawIndexedIndirect(indirectBuffer, 20);
+        bundles.push_back(bundleEncoder.Finish());
+    }
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::RenderBundleEncoder bundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        bundleEncoder.SetPipeline(pipeline);
+        bundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        bundleEncoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 0);
+        bundleEncoder.DrawIndexedIndirect(indirectBuffer, 40);
+        bundles.push_back(bundleEncoder.Finish());
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.ExecuteBundles(bundles.size(), bundles.data());
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    queue.Submit(1, &commands);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, 3, 1);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateWithBundlesInDifferentPasses) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows only.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indirectBuffer =
+        CreateIndirectBuffer({3, 1, 3, 0, 0, 10, 1, 0, 0, 0, 3, 1, 6, 0, 0});
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1, 0, 1, 2});
+
+    wgpu::CommandBuffer commands[2];
+    {
+        wgpu::RenderBundle bundle;
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::RenderBundleEncoder bundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        bundleEncoder.SetPipeline(pipeline);
+        bundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        bundleEncoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 0);
+        bundleEncoder.DrawIndexedIndirect(indirectBuffer, 20);
+        bundle = bundleEncoder.Finish();
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        renderPass.renderPassInfo.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.ExecuteBundles(1, &bundle);
+        pass.End();
+
+        commands[0] = encoder.Finish();
+    }
+
+    {
+        wgpu::RenderBundle bundle;
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::RenderBundleEncoder bundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        bundleEncoder.SetPipeline(pipeline);
+        bundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        bundleEncoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 0);
+        bundleEncoder.DrawIndexedIndirect(indirectBuffer, 40);
+        bundle = bundleEncoder.Finish();
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        renderPass.renderPassInfo.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.ExecuteBundles(1, &bundle);
+        pass.End();
+
+        commands[1] = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands[1]);
+    queue.Submit(1, &commands[0]);
+
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, 3, 1);
+}
+
+TEST_P(DrawIndexedIndirectTest, ValidateReusedBundleWithChangingParams) {
+    // TODO(crbug.com/dawn/789): Test is failing under SwANGLE on Windows.
+    DAWN_SUPPRESS_TEST_IF(IsANGLE() && IsWindows());
+
+    // TODO(crbug.com/dawn/1124): Fails on Intel+Vulkan+Windows for drivers
+    // older than 27.20.100.8587, which bots are actively using.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsVulkan() && IsWindows());
+
+    // TODO(crbug.com/dawn/1292): Some Intel OpenGL drivers don't seem to like
+    // the offsets that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL() && IsLinux());
+
+    // It doesn't make sense to test invalid inputs when validation is disabled.
+    DAWN_SUPPRESS_TEST_IF(HasToggleEnabled("skip_validation"));
+
+    RGBA8 filled(0, 255, 0, 255);
+    // RGBA8 notFilled(0, 0, 0, 0);
+
+    wgpu::Buffer indirectBuffer = CreateIndirectBuffer({0, 0, 0, 0, 0});
+    wgpu::Buffer indexBuffer = CreateIndexBuffer({0, 1, 2, 0, 3, 1});
+
+    // Encode a single bundle that always uses indirectBuffer offset 0 for its params.
+    wgpu::RenderBundle bundle;
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::RenderBundleEncoder bundleEncoder = device.CreateRenderBundleEncoder(&desc);
+    bundleEncoder.SetPipeline(pipeline);
+    bundleEncoder.SetVertexBuffer(0, vertexBuffer);
+    bundleEncoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 0);
+    bundleEncoder.DrawIndexedIndirect(indirectBuffer, 0);
+    bundle = bundleEncoder.Finish();
+
+    wgpu::ShaderModule paramWriterModule = utils::CreateShaderModule(device,
+                                                                     R"(
+            struct Input { firstIndex: u32 }
+            struct Params {
+                indexCount: u32,
+                instanceCount: u32,
+                firstIndex: u32,
+            }
+            @group(0) @binding(0) var<uniform> input: Input;
+            @group(0) @binding(1) var<storage, write> params: Params;
+            @stage(compute) @workgroup_size(1) fn main() {
+                params.indexCount = 3u;
+                params.instanceCount = 1u;
+                params.firstIndex = input.firstIndex;
+            }
+        )");
+
+    wgpu::ComputePipelineDescriptor computeDesc;
+    computeDesc.compute.module = paramWriterModule;
+    computeDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline computePipeline = device.CreateComputePipeline(&computeDesc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    auto encodeComputePassToUpdateFirstIndex = [&](uint32_t newFirstIndex) {
+        wgpu::Buffer input = utils::CreateBufferFromData<uint32_t>(
+            device, wgpu::BufferUsage::Uniform, {newFirstIndex});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, computePipeline.GetBindGroupLayout(0),
+            {{0, input, 0, sizeof(uint32_t)}, {1, indirectBuffer, 0, 5 * sizeof(uint32_t)}});
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(computePipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+    };
+
+    auto encodeRenderPassToExecuteBundle = [&](wgpu::LoadOp colorLoadOp) {
+        renderPass.renderPassInfo.cColorAttachments[0].loadOp = colorLoadOp;
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.ExecuteBundles(1, &bundle);
+        pass.End();
+    };
+
+    encodeComputePassToUpdateFirstIndex(0);
+    encodeRenderPassToExecuteBundle(wgpu::LoadOp::Clear);
+    encodeComputePassToUpdateFirstIndex(3);
+    encodeRenderPassToExecuteBundle(wgpu::LoadOp::Load);
+    encodeComputePassToUpdateFirstIndex(6);
+    encodeRenderPassToExecuteBundle(wgpu::LoadOp::Load);
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 3, 1);
+}
+
+DAWN_INSTANTIATE_TEST(DrawIndexedIndirectTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DrawIndexedTests.cpp b/src/dawn/tests/end2end/DrawIndexedTests.cpp
new file mode 100644
index 0000000..c28b297
--- /dev/null
+++ b/src/dawn/tests/end2end/DrawIndexedTests.cpp
@@ -0,0 +1,163 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 4;
+
+class DrawIndexedTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                return pos;
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
+        descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Uint32;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+
+        vertexBuffer = utils::CreateBufferFromData<float>(
+            device, wgpu::BufferUsage::Vertex,
+            {// First quad: the first 3 vertices represent the bottom left triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f,
+             0.0f, 1.0f,
+
+             // Second quad: the first 3 vertices represent the top right triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, -1.0f, -1.0f,
+             0.0f, 1.0f});
+        indexBuffer = utils::CreateBufferFromData<uint32_t>(
+            device, wgpu::BufferUsage::Index,
+            {0, 1, 2, 0, 3, 1,
+             // The indices below are added to test negatve baseVertex
+             0 + 4, 1 + 4, 2 + 4, 0 + 4, 3 + 4, 1 + 4});
+        zeroSizedIndexBuffer =
+            utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {});
+    }
+
+    utils::BasicRenderPass renderPass;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+    wgpu::Buffer indexBuffer;
+    wgpu::Buffer zeroSizedIndexBuffer;
+
+    void Test(uint32_t indexCount,
+              uint32_t instanceCount,
+              uint32_t firstIndex,
+              int32_t baseVertex,
+              uint32_t firstInstance,
+              uint64_t bufferOffset,
+              RGBA8 bottomLeftExpected,
+              RGBA8 topRightExpected) {
+        // Regular draw with a reasonable index buffer
+        TestImplementation(indexCount, instanceCount, firstIndex, baseVertex, firstInstance,
+                           bufferOffset, indexBuffer, bottomLeftExpected, topRightExpected);
+    }
+
+    void TestZeroSizedIndexBufferDraw(uint32_t indexCount,
+                                      uint32_t firstIndex,
+                                      RGBA8 bottomLeftExpected,
+                                      RGBA8 topRightExpected) {
+        TestImplementation(indexCount, 1, firstIndex, 0, 0, 0, zeroSizedIndexBuffer,
+                           bottomLeftExpected, topRightExpected);
+    }
+
+    void TestImplementation(uint32_t indexCount,
+                            uint32_t instanceCount,
+                            uint32_t firstIndex,
+                            int32_t baseVertex,
+                            uint32_t firstInstance,
+                            uint64_t bufferOffset,
+                            const wgpu::Buffer& curIndexBuffer,
+                            RGBA8 bottomLeftExpected,
+                            RGBA8 topRightExpected) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.SetIndexBuffer(curIndexBuffer, wgpu::IndexFormat::Uint32, bufferOffset);
+            pass.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(bottomLeftExpected, renderPass.color, 1, 3);
+        EXPECT_PIXEL_RGBA8_EQ(topRightExpected, renderPass.color, 3, 1);
+    }
+};
+
+// The most basic DrawIndexed triangle draw.
+TEST_P(DrawIndexedTest, Uint32) {
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test a draw with no indices.
+    Test(0, 0, 0, 0, 0, 0, notFilled, notFilled);
+    // Test a draw with only the first 3 indices of the first quad (bottom left triangle)
+    Test(3, 1, 0, 0, 0, 0, filled, notFilled);
+    // Test a draw with only the last 3 indices of the first quad (top right triangle)
+    Test(3, 1, 3, 0, 0, 0, notFilled, filled);
+    // Test a draw with all 6 indices (both triangles).
+    Test(6, 1, 0, 0, 0, 0, filled, filled);
+}
+
+// Test the parameter 'baseVertex' of DrawIndexed() works.
+TEST_P(DrawIndexedTest, BaseVertex) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_base_vertex"));
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test a draw with only the first 3 indices of the second quad (top right triangle)
+    Test(3, 1, 0, 4, 0, 0, notFilled, filled);
+    // Test a draw with only the last 3 indices of the second quad (bottom left triangle)
+    Test(3, 1, 3, 4, 0, 0, filled, notFilled);
+
+    // Test negative baseVertex
+    // Test a draw with only the first 3 indices of the first quad (bottom left triangle)
+    Test(3, 1, 0, -4, 0, 6 * sizeof(uint32_t), filled, notFilled);
+    // Test a draw with only the last 3 indices of the first quad (top right triangle)
+    Test(3, 1, 3, -4, 0, 6 * sizeof(uint32_t), notFilled, filled);
+}
+
+DAWN_INSTANTIATE_TEST(DrawIndexedTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DrawIndirectTests.cpp b/src/dawn/tests/end2end/DrawIndirectTests.cpp
new file mode 100644
index 0000000..e878281
--- /dev/null
+++ b/src/dawn/tests/end2end/DrawIndirectTests.cpp
@@ -0,0 +1,128 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 4;
+
+class DrawIndirectTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                return pos;
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
+        descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Uint32;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+
+        vertexBuffer = utils::CreateBufferFromData<float>(
+            device, wgpu::BufferUsage::Vertex,
+            {// The bottom left triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 1.0f,
+
+             // The top right triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f});
+    }
+
+    utils::BasicRenderPass renderPass;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+
+    void Test(std::initializer_list<uint32_t> bufferList,
+              uint64_t indirectOffset,
+              RGBA8 bottomLeftExpected,
+              RGBA8 topRightExpected) {
+        wgpu::Buffer indirectBuffer =
+            utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Indirect, bufferList);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.DrawIndirect(indirectBuffer, indirectOffset);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(bottomLeftExpected, renderPass.color, 1, 3);
+        EXPECT_PIXEL_RGBA8_EQ(topRightExpected, renderPass.color, 3, 1);
+    }
+};
+
+// The basic triangle draw.
+TEST_P(DrawIndirectTest, Uint32) {
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test a draw with no indices.
+    Test({0, 0, 0, 0}, 0, notFilled, notFilled);
+
+    // Test a draw with only the first 3 indices (bottom left triangle)
+    Test({3, 1, 0, 0}, 0, filled, notFilled);
+
+    // Test a draw with only the last 3 indices (top right triangle)
+    Test({3, 1, 3, 0}, 0, notFilled, filled);
+
+    // Test a draw with all 6 indices (both triangles).
+    Test({6, 1, 0, 0}, 0, filled, filled);
+}
+
+TEST_P(DrawIndirectTest, IndirectOffset) {
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test an offset draw call, with indirect buffer containing 2 calls:
+    // 1) only the first 3 indices (bottom left triangle)
+    // 2) only the last 3 indices (top right triangle)
+
+    // Test #1 (no offset)
+    Test({3, 1, 0, 0, 3, 1, 3, 0}, 0, filled, notFilled);
+
+    // Offset to draw #2
+    Test({3, 1, 0, 0, 3, 1, 3, 0}, 4 * sizeof(uint32_t), notFilled, filled);
+}
+
+DAWN_INSTANTIATE_TEST(DrawIndirectTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DrawTests.cpp b/src/dawn/tests/end2end/DrawTests.cpp
new file mode 100644
index 0000000..55e86ce
--- /dev/null
+++ b/src/dawn/tests/end2end/DrawTests.cpp
@@ -0,0 +1,108 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 4;
+
+class DrawTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                return pos;
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+
+        vertexBuffer = utils::CreateBufferFromData<float>(
+            device, wgpu::BufferUsage::Vertex,
+            {// The bottom left triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 1.0f,
+
+             // The top right triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f});
+    }
+
+    utils::BasicRenderPass renderPass;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+
+    void Test(uint32_t vertexCount,
+              uint32_t instanceCount,
+              uint32_t firstIndex,
+              uint32_t firstInstance,
+              RGBA8 bottomLeftExpected,
+              RGBA8 topRightExpected) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.Draw(vertexCount, instanceCount, firstIndex, firstInstance);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(bottomLeftExpected, renderPass.color, 1, 3);
+        EXPECT_PIXEL_RGBA8_EQ(topRightExpected, renderPass.color, 3, 1);
+    }
+};
+
+// The basic triangle draw.
+TEST_P(DrawTest, Uint32) {
+    RGBA8 filled(0, 255, 0, 255);
+    RGBA8 notFilled(0, 0, 0, 0);
+
+    // Test a draw with no indices.
+    Test(0, 0, 0, 0, notFilled, notFilled);
+    // Test a draw with only the first 3 indices (bottom left triangle)
+    Test(3, 1, 0, 0, filled, notFilled);
+    // Test a draw with only the last 3 indices (top right triangle)
+    Test(3, 1, 3, 0, notFilled, filled);
+    // Test a draw with all 6 indices (both triangles).
+    Test(6, 1, 0, 0, filled, filled);
+}
+
+DAWN_INSTANTIATE_TEST(DrawTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp b/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
new file mode 100644
index 0000000..3c11b17
--- /dev/null
+++ b/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
@@ -0,0 +1,586 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <numeric>
+
+constexpr uint32_t kRTSize = 400;
+constexpr uint32_t kBindingSize = 8;
+
+class DynamicBufferOffsetTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        mMinUniformBufferOffsetAlignment =
+            GetSupportedLimits().limits.minUniformBufferOffsetAlignment;
+
+        // Mix up dynamic and non dynamic resources in one bind group and using not continuous
+        // binding number to cover more cases.
+        std::vector<uint32_t> uniformData(mMinUniformBufferOffsetAlignment / sizeof(uint32_t) + 2);
+        uniformData[0] = 1;
+        uniformData[1] = 2;
+
+        mUniformBuffers[0] = utils::CreateBufferFromData(device, uniformData.data(),
+                                                         sizeof(uint32_t) * uniformData.size(),
+                                                         wgpu::BufferUsage::Uniform);
+
+        uniformData[uniformData.size() - 2] = 5;
+        uniformData[uniformData.size() - 1] = 6;
+
+        // Dynamic uniform buffer
+        mUniformBuffers[1] = utils::CreateBufferFromData(device, uniformData.data(),
+                                                         sizeof(uint32_t) * uniformData.size(),
+                                                         wgpu::BufferUsage::Uniform);
+
+        wgpu::BufferDescriptor storageBufferDescriptor;
+        storageBufferDescriptor.size = sizeof(uint32_t) * uniformData.size();
+        storageBufferDescriptor.usage =
+            wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+        mStorageBuffers[0] = device.CreateBuffer(&storageBufferDescriptor);
+
+        // Dynamic storage buffer
+        mStorageBuffers[1] = device.CreateBuffer(&storageBufferDescriptor);
+
+        // Default bind group layout
+        mBindGroupLayouts[0] = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Uniform},
+                     {1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Storage},
+                     {3, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Uniform, true},
+                     {4, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Storage, true}});
+
+        // Default bind group
+        mBindGroups[0] = utils::MakeBindGroup(device, mBindGroupLayouts[0],
+                                              {{0, mUniformBuffers[0], 0, kBindingSize},
+                                               {1, mStorageBuffers[0], 0, kBindingSize},
+                                               {3, mUniformBuffers[1], 0, kBindingSize},
+                                               {4, mStorageBuffers[1], 0, kBindingSize}});
+
+        // Extra uniform buffer for inheriting test
+        mUniformBuffers[2] = utils::CreateBufferFromData(device, uniformData.data(),
+                                                         sizeof(uint32_t) * uniformData.size(),
+                                                         wgpu::BufferUsage::Uniform);
+
+        // Bind group layout for inheriting test
+        mBindGroupLayouts[1] = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Uniform}});
+
+        // Bind group for inheriting test
+        mBindGroups[1] = utils::MakeBindGroup(device, mBindGroupLayouts[1],
+                                              {{0, mUniformBuffers[2], 0, kBindingSize}});
+    }
+    // Create objects to use as resources inside test bind groups.
+
+    uint32_t mMinUniformBufferOffsetAlignment;
+    wgpu::BindGroup mBindGroups[2];
+    wgpu::BindGroupLayout mBindGroupLayouts[2];
+    wgpu::Buffer mUniformBuffers[3];
+    wgpu::Buffer mStorageBuffers[2];
+    wgpu::Texture mColorAttachment;
+
+    wgpu::RenderPipeline CreateRenderPipeline(bool isInheritedPipeline = false) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0, 0.0),
+                    vec2<f32>(-1.0, 1.0),
+                    vec2<f32>( 0.0, 1.0));
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        // Construct fragment shader source
+        std::ostringstream fs;
+        std::string multipleNumber = isInheritedPipeline ? "2" : "1";
+        fs << R"(
+            struct Buf {
+                value : vec2<u32>
+            }
+
+            @group(0) @binding(0) var<uniform> uBufferNotDynamic : Buf;
+            @group(0) @binding(1) var<storage, read_write> sBufferNotDynamic : Buf;
+            @group(0) @binding(3) var<uniform> uBuffer : Buf;
+            @group(0) @binding(4) var<storage, read_write> sBuffer : Buf;
+        )";
+
+        if (isInheritedPipeline) {
+            fs << R"(
+                @group(1) @binding(0) var<uniform> paddingBlock : Buf;
+            )";
+        }
+
+        fs << "let multipleNumber : u32 = " << multipleNumber << "u;\n";
+        fs << R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                sBufferNotDynamic.value = uBufferNotDynamic.value.xy;
+                sBuffer.value = vec2<u32>(multipleNumber, multipleNumber) * (uBuffer.value.xy + uBufferNotDynamic.value.xy);
+                return vec4<f32>(f32(uBuffer.value.x) / 255.0, f32(uBuffer.value.y) / 255.0,
+                                      1.0, 1.0);
+            }
+        )";
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fs.str().c_str());
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor;
+        if (isInheritedPipeline) {
+            pipelineLayoutDescriptor.bindGroupLayoutCount = 2;
+        } else {
+            pipelineLayoutDescriptor.bindGroupLayoutCount = 1;
+        }
+        pipelineLayoutDescriptor.bindGroupLayouts = mBindGroupLayouts;
+        pipelineDescriptor.layout = device.CreatePipelineLayout(&pipelineLayoutDescriptor);
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::ComputePipeline CreateComputePipeline(bool isInheritedPipeline = false) {
+        // Construct compute shader source
+        std::ostringstream cs;
+        std::string multipleNumber = isInheritedPipeline ? "2" : "1";
+        cs << R"(
+            struct Buf {
+                value : vec2<u32>
+            }
+
+            @group(0) @binding(0) var<uniform> uBufferNotDynamic : Buf;
+            @group(0) @binding(1) var<storage, read_write> sBufferNotDynamic : Buf;
+            @group(0) @binding(3) var<uniform> uBuffer : Buf;
+            @group(0) @binding(4) var<storage, read_write> sBuffer : Buf;
+        )";
+
+        if (isInheritedPipeline) {
+            cs << R"(
+                @group(1) @binding(0) var<uniform> paddingBlock : Buf;
+            )";
+        }
+
+        cs << "let multipleNumber : u32 = " << multipleNumber << "u;\n";
+        cs << R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+                sBufferNotDynamic.value = uBufferNotDynamic.value.xy;
+                sBuffer.value = vec2<u32>(multipleNumber, multipleNumber) * (uBuffer.value.xy + uBufferNotDynamic.value.xy);
+            }
+        )";
+
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, cs.str().c_str());
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.compute.module = csModule;
+        csDesc.compute.entryPoint = "main";
+
+        wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor;
+        if (isInheritedPipeline) {
+            pipelineLayoutDescriptor.bindGroupLayoutCount = 2;
+        } else {
+            pipelineLayoutDescriptor.bindGroupLayoutCount = 1;
+        }
+        pipelineLayoutDescriptor.bindGroupLayouts = mBindGroupLayouts;
+        csDesc.layout = device.CreatePipelineLayout(&pipelineLayoutDescriptor);
+
+        return device.CreateComputePipeline(&csDesc);
+    }
+};
+
+// Dynamic offsets are all zero and no effect to result.
+TEST_P(DynamicBufferOffsetTests, BasicRenderPipeline) {
+    wgpu::RenderPipeline pipeline = CreateRenderPipeline();
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    std::array<uint32_t, 2> offsets = {0, 0};
+    wgpu::RenderPassEncoder renderPassEncoder =
+        commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
+    renderPassEncoder.SetPipeline(pipeline);
+    renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    renderPassEncoder.Draw(3);
+    renderPassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {2, 4};
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 255, 255), renderPass.color, 0, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
+}
+
+// Have non-zero dynamic offsets.
+TEST_P(DynamicBufferOffsetTests, SetDynamicOffsetsRenderPipeline) {
+    wgpu::RenderPipeline pipeline = CreateRenderPipeline();
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
+                                       mMinUniformBufferOffsetAlignment};
+    wgpu::RenderPassEncoder renderPassEncoder =
+        commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
+    renderPassEncoder.SetPipeline(pipeline);
+    renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    renderPassEncoder.Draw(3);
+    renderPassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {6, 8};
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(5, 6, 255, 255), renderPass.color, 0, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
+                               mMinUniformBufferOffsetAlignment, expectedData.size());
+}
+
+// Dynamic offsets are all zero and no effect to result.
+TEST_P(DynamicBufferOffsetTests, BasicComputePipeline) {
+    wgpu::ComputePipeline pipeline = CreateComputePipeline();
+
+    std::array<uint32_t, 2> offsets = {0, 0};
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+    computePassEncoder.SetPipeline(pipeline);
+    computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {2, 4};
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
+}
+
+// Have non-zero dynamic offsets.
+TEST_P(DynamicBufferOffsetTests, SetDynamicOffsetsComputePipeline) {
+    wgpu::ComputePipeline pipeline = CreateComputePipeline();
+
+    std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
+                                       mMinUniformBufferOffsetAlignment};
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+    computePassEncoder.SetPipeline(pipeline);
+    computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {6, 8};
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
+                               mMinUniformBufferOffsetAlignment, expectedData.size());
+}
+
+// Test inherit dynamic offsets on render pipeline
+TEST_P(DynamicBufferOffsetTests, InheritDynamicOffsetsRenderPipeline) {
+    // Using default pipeline and setting dynamic offsets
+    wgpu::RenderPipeline pipeline = CreateRenderPipeline();
+    wgpu::RenderPipeline testPipeline = CreateRenderPipeline(true);
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
+                                       mMinUniformBufferOffsetAlignment};
+    wgpu::RenderPassEncoder renderPassEncoder =
+        commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
+    renderPassEncoder.SetPipeline(pipeline);
+    renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    renderPassEncoder.Draw(3);
+    renderPassEncoder.SetPipeline(testPipeline);
+    renderPassEncoder.SetBindGroup(1, mBindGroups[1]);
+    renderPassEncoder.Draw(3);
+    renderPassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {12, 16};
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(5, 6, 255, 255), renderPass.color, 0, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
+                               mMinUniformBufferOffsetAlignment, expectedData.size());
+}
+
+// Test inherit dynamic offsets on compute pipeline
+// TODO(shaobo.yan@intel.com) : Try this test on GTX1080 and cannot reproduce the failure.
+// Suspect it is due to dawn doesn't handle sync between two dispatch and disable this case.
+// Will double check root cause after got GTX1660.
+TEST_P(DynamicBufferOffsetTests, InheritDynamicOffsetsComputePipeline) {
+    DAWN_SUPPRESS_TEST_IF(IsWindows());
+    wgpu::ComputePipeline pipeline = CreateComputePipeline();
+    wgpu::ComputePipeline testPipeline = CreateComputePipeline(true);
+
+    std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
+                                       mMinUniformBufferOffsetAlignment};
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+    computePassEncoder.SetPipeline(pipeline);
+    computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.SetPipeline(testPipeline);
+    computePassEncoder.SetBindGroup(1, mBindGroups[1]);
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {12, 16};
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
+                               mMinUniformBufferOffsetAlignment, expectedData.size());
+}
+
+// Setting multiple dynamic offsets for the same bindgroup in one render pass.
+TEST_P(DynamicBufferOffsetTests, UpdateDynamicOffsetsMultipleTimesRenderPipeline) {
+    // Using default pipeline and setting dynamic offsets
+    wgpu::RenderPipeline pipeline = CreateRenderPipeline();
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
+                                       mMinUniformBufferOffsetAlignment};
+    std::array<uint32_t, 2> testOffsets = {0, 0};
+
+    wgpu::RenderPassEncoder renderPassEncoder =
+        commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
+    renderPassEncoder.SetPipeline(pipeline);
+    renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    renderPassEncoder.Draw(3);
+    renderPassEncoder.SetBindGroup(0, mBindGroups[0], testOffsets.size(), testOffsets.data());
+    renderPassEncoder.Draw(3);
+    renderPassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {2, 4};
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 255, 255), renderPass.color, 0, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
+}
+
+// Setting multiple dynamic offsets for the same bindgroup in one compute pass.
+TEST_P(DynamicBufferOffsetTests, UpdateDynamicOffsetsMultipleTimesComputePipeline) {
+    wgpu::ComputePipeline pipeline = CreateComputePipeline();
+
+    std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
+                                       mMinUniformBufferOffsetAlignment};
+    std::array<uint32_t, 2> testOffsets = {0, 0};
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+    computePassEncoder.SetPipeline(pipeline);
+    computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.SetBindGroup(0, mBindGroups[0], testOffsets.size(), testOffsets.data());
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint32_t> expectedData = {2, 4};
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
+}
+
+namespace {
+    using ReadBufferUsage = wgpu::BufferUsage;
+    using OOBRead = bool;
+    using OOBWrite = bool;
+
+    DAWN_TEST_PARAM_STRUCT(ClampedOOBDynamicBufferOffsetParams, ReadBufferUsage, OOBRead, OOBWrite);
+}  // anonymous namespace
+
+class ClampedOOBDynamicBufferOffsetTests
+    : public DawnTestWithParams<ClampedOOBDynamicBufferOffsetParams> {};
+
+// Test robust buffer access behavior for out of bounds accesses to dynamic buffer bindings.
+TEST_P(ClampedOOBDynamicBufferOffsetTests, CheckOOBAccess) {
+    static constexpr uint32_t kArrayLength = 10u;
+
+    // Out-of-bounds access will start halfway into the array and index off the end.
+    static constexpr uint32_t kOOBOffset = kArrayLength / 2;
+
+    wgpu::BufferBindingType sourceBindingType;
+    switch (GetParam().mReadBufferUsage) {
+        case wgpu::BufferUsage::Uniform:
+            sourceBindingType = wgpu::BufferBindingType::Uniform;
+            break;
+        case wgpu::BufferUsage::Storage:
+            sourceBindingType = wgpu::BufferBindingType::ReadOnlyStorage;
+            break;
+        default:
+            UNREACHABLE();
+    }
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, sourceBindingType, true},
+                 {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage, true}});
+    wgpu::PipelineLayout layout = utils::MakeBasicPipelineLayout(device, &bgl);
+
+    wgpu::ComputePipeline pipeline;
+    {
+        std::ostringstream shader;
+        shader << "let kArrayLength: u32 = " << kArrayLength << "u;\n";
+        if (GetParam().mOOBRead) {
+            shader << "let kReadOffset: u32 = " << kOOBOffset << "u;\n";
+        } else {
+            shader << "let kReadOffset: u32 = 0u;\n";
+        }
+
+        if (GetParam().mOOBWrite) {
+            shader << "let kWriteOffset: u32 = " << kOOBOffset << "u;\n";
+        } else {
+            shader << "let kWriteOffset: u32 = 0u;\n";
+        }
+        switch (GetParam().mReadBufferUsage) {
+            case wgpu::BufferUsage::Uniform:
+                shader << R"(
+                    struct Src {
+                        values : array<vec4<u32>, kArrayLength>
+                    }
+                    @group(0) @binding(0) var<uniform> src : Src;
+                )";
+                break;
+            case wgpu::BufferUsage::Storage:
+                shader << R"(
+                    struct Src {
+                        values : array<vec4<u32>>
+                    }
+                    @group(0) @binding(0) var<storage, read> src : Src;
+                )";
+                break;
+            default:
+                UNREACHABLE();
+        }
+
+        shader << R"(
+            struct Dst {
+                values : array<vec4<u32>>
+            }
+            @group(0) @binding(1) var<storage, read_write> dst : Dst;
+        )";
+        shader << R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+                for (var i: u32 = 0u; i < kArrayLength; i = i + 1u) {
+                    dst.values[i + kWriteOffset] = src.values[i + kReadOffset];
+                }
+            }
+        )";
+        wgpu::ComputePipelineDescriptor pipelineDesc;
+        pipelineDesc.layout = layout;
+        pipelineDesc.compute.module = utils::CreateShaderModule(device, shader.str().c_str());
+        pipelineDesc.compute.entryPoint = "main";
+        pipeline = device.CreateComputePipeline(&pipelineDesc);
+    }
+
+    uint32_t minUniformBufferOffsetAlignment =
+        GetSupportedLimits().limits.minUniformBufferOffsetAlignment;
+    uint32_t minStorageBufferOffsetAlignment =
+        GetSupportedLimits().limits.minStorageBufferOffsetAlignment;
+
+    uint32_t arrayByteLength = kArrayLength * 4 * sizeof(uint32_t);
+
+    uint32_t uniformBufferOffset = Align(arrayByteLength, minUniformBufferOffsetAlignment);
+    uint32_t storageBufferOffset = Align(arrayByteLength, minStorageBufferOffsetAlignment);
+
+    // Enough space to bind at a dynamic offset.
+    uint32_t uniformBufferSize = uniformBufferOffset + arrayByteLength;
+    uint32_t storageBufferSize = storageBufferOffset + arrayByteLength;
+
+    // Buffers are padded so we can check that bytes after the bound range are not changed.
+    static constexpr uint32_t kEndPadding = 16;
+
+    uint64_t srcBufferSize;
+    uint32_t srcBufferByteOffset;
+    uint32_t dstBufferByteOffset = storageBufferOffset;
+    uint64_t dstBufferSize = storageBufferSize + kEndPadding;
+    switch (GetParam().mReadBufferUsage) {
+        case wgpu::BufferUsage::Uniform:
+            srcBufferSize = uniformBufferSize + kEndPadding;
+            srcBufferByteOffset = uniformBufferOffset;
+            break;
+        case wgpu::BufferUsage::Storage:
+            srcBufferSize = storageBufferSize + kEndPadding;
+            srcBufferByteOffset = storageBufferOffset;
+            break;
+        default:
+            UNREACHABLE();
+    }
+
+    std::vector<uint32_t> srcData(srcBufferSize / sizeof(uint32_t));
+    std::vector<uint32_t> expectedDst(dstBufferSize / sizeof(uint32_t));
+
+    // Fill the src buffer with 0, 1, 2, ...
+    std::iota(srcData.begin(), srcData.end(), 0);
+    wgpu::Buffer src = utils::CreateBufferFromData(device, &srcData[0], srcBufferSize,
+                                                   GetParam().mReadBufferUsage);
+
+    // Fill the dst buffer with 0xFF.
+    memset(expectedDst.data(), 0xFF, dstBufferSize);
+    wgpu::Buffer dst =
+        utils::CreateBufferFromData(device, &expectedDst[0], dstBufferSize,
+                                    wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+
+    // Produce expected data assuming the implementation performs clamping.
+    for (uint32_t i = 0; i < kArrayLength; ++i) {
+        uint32_t readIndex = GetParam().mOOBRead ? std::min(kOOBOffset + i, kArrayLength - 1) : i;
+        uint32_t writeIndex = GetParam().mOOBWrite ? std::min(kOOBOffset + i, kArrayLength - 1) : i;
+
+        for (uint32_t c = 0; c < 4; ++c) {
+            uint32_t value = srcData[srcBufferByteOffset / 4 + 4 * readIndex + c];
+            expectedDst[dstBufferByteOffset / 4 + 4 * writeIndex + c] = value;
+        }
+    }
+
+    std::array<uint32_t, 2> dynamicOffsets = {srcBufferByteOffset, dstBufferByteOffset};
+
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl,
+                                                     {
+                                                         {0, src, 0, arrayByteLength},
+                                                         {1, dst, 0, arrayByteLength},
+                                                     });
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+    computePassEncoder.SetPipeline(pipeline);
+    computePassEncoder.SetBindGroup(0, bindGroup, dynamicOffsets.size(), dynamicOffsets.data());
+    computePassEncoder.Dispatch(1);
+    computePassEncoder.End();
+    wgpu::CommandBuffer commands = commandEncoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedDst.data(), dst, 0, dstBufferSize / sizeof(uint32_t));
+}
+
+DAWN_INSTANTIATE_TEST(DynamicBufferOffsetTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+// Only instantiate on D3D12 / Metal where we are sure of the robustness implementation.
+// Tint injects clamping in the shader. OpenGL(ES) / Vulkan robustness is less constrained.
+DAWN_INSTANTIATE_TEST_P(ClampedOOBDynamicBufferOffsetTests,
+                        {D3D12Backend(), MetalBackend()},
+                        {wgpu::BufferUsage::Uniform, wgpu::BufferUsage::Storage},
+                        {false, true},
+                        {false, true});
diff --git a/src/dawn/tests/end2end/EntryPointTests.cpp b/src/dawn/tests/end2end/EntryPointTests.cpp
new file mode 100644
index 0000000..2503db2
--- /dev/null
+++ b/src/dawn/tests/end2end/EntryPointTests.cpp
@@ -0,0 +1,150 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class EntryPointTests : public DawnTest {};
+
+// Test creating a render pipeline from two entryPoints in the same module.
+TEST_P(EntryPointTests, FragAndVertexSameModule) {
+    // TODO(crbug.com/dawn/658): Crashes on bots
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn vertex_main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        }
+
+        @stage(fragment) fn fragment_main() -> @location(0) vec4<f32> {
+          return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+        }
+    )");
+
+    // Create a point pipeline from the module.
+    utils::ComboRenderPipelineDescriptor desc;
+    desc.vertex.module = module;
+    desc.vertex.entryPoint = "vertex_main";
+    desc.cFragment.module = module;
+    desc.cFragment.entryPoint = "fragment_main";
+    desc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    desc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+
+    // Render the point and check that it was rendered.
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.Draw(1);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+// Test creating two compute pipelines from the same module.
+TEST_P(EntryPointTests, TwoComputeInModule) {
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.buffer.type = wgpu::BufferBindingType::Storage;
+    binding.visibility = wgpu::ShaderStage::Compute;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&desc);
+
+    wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
+    pipelineLayoutDesc.bindGroupLayoutCount = 1;
+    pipelineLayoutDesc.bindGroupLayouts = &bindGroupLayout;
+
+    wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Data {
+            data : u32
+        }
+        @binding(0) @group(0) var<storage, read_write> data : Data;
+
+        @stage(compute) @workgroup_size(1) fn write1() {
+            data.data = 1u;
+            return;
+        }
+
+        @stage(compute) @workgroup_size(1) fn write42() {
+            data.data = 42u;
+            return;
+        }
+    )");
+
+    // Create both pipelines from the module.
+    wgpu::ComputePipelineDescriptor pipelineDesc;
+    pipelineDesc.layout = pipelineLayout;
+    pipelineDesc.compute.module = module;
+
+    pipelineDesc.compute.entryPoint = "write1";
+    wgpu::ComputePipeline write1 = device.CreateComputePipeline(&pipelineDesc);
+
+    pipelineDesc.compute.entryPoint = "write42";
+    wgpu::ComputePipeline write42 = device.CreateComputePipeline(&pipelineDesc);
+
+    // Create the bindGroup.
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = 4;
+    bufferDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    wgpu::BindGroup group = utils::MakeBindGroup(device, bindGroupLayout, {{0, buffer}});
+
+    // Use the first pipeline and check it wrote 1.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(write1);
+        pass.SetBindGroup(0, group);
+        pass.Dispatch(1);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER_U32_EQ(1, buffer, 0);
+    }
+
+    // Use the second pipeline and check it wrote 42.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(write42);
+        pass.SetBindGroup(0, group);
+        pass.Dispatch(42);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER_U32_EQ(42, buffer, 0);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(EntryPointTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ExternalTextureTests.cpp b/src/dawn/tests/end2end/ExternalTextureTests.cpp
new file mode 100644
index 0000000..5843764
--- /dev/null
+++ b/src/dawn/tests/end2end/ExternalTextureTests.cpp
@@ -0,0 +1,261 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    wgpu::Texture Create2DTexture(wgpu::Device device,
+                                  uint32_t width,
+                                  uint32_t height,
+                                  wgpu::TextureFormat format,
+                                  wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = format;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
+    }
+
+    class ExternalTextureTests : public DawnTest {
+      protected:
+        static constexpr uint32_t kWidth = 4;
+        static constexpr uint32_t kHeight = 4;
+        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+        static constexpr wgpu::TextureUsage kSampledUsage = wgpu::TextureUsage::TextureBinding;
+    };
+}  // anonymous namespace
+
+TEST_P(ExternalTextureTests, CreateExternalTextureSuccess) {
+    wgpu::Texture texture = Create2DTexture(device, kWidth, kHeight, kFormat, kSampledUsage);
+
+    // Create a texture view for the external texture
+    wgpu::TextureView view = texture.CreateView();
+
+    // Create an ExternalTextureDescriptor from the texture view
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = view;
+
+    // Import the external texture
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    ASSERT_NE(externalTexture.Get(), nullptr);
+}
+
+TEST_P(ExternalTextureTests, SampleExternalTexture) {
+    // TODO(crbug.com/dawn/1263): SPIR-V has an issue compiling the output from Tint's external
+    // texture transform. Re-enable this test for OpenGL when the switch to Tint is complete.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    const wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var positions = array<vec4<f32>, 3>(
+                vec4<f32>(-1.0, 1.0, 0.0, 1.0),
+                vec4<f32>(-1.0, -1.0, 0.0, 1.0),
+                vec4<f32>(1.0, 1.0, 0.0, 1.0)
+            );
+            return positions[VertexIndex];
+        })");
+
+    const wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var s : sampler;
+        @group(0) @binding(1) var t : texture_external;
+
+        @stage(fragment) fn main(@builtin(position) FragCoord : vec4<f32>)
+                                 -> @location(0) vec4<f32> {
+            return textureSampleLevel(t, s, FragCoord.xy / vec2<f32>(4.0, 4.0));
+        })");
+
+    wgpu::Texture sampledTexture =
+        Create2DTexture(device, kWidth, kHeight, kFormat,
+                        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment);
+    wgpu::Texture renderTexture =
+        Create2DTexture(device, kWidth, kHeight, kFormat,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment);
+
+    // Create a texture view for the external texture
+    wgpu::TextureView externalView = sampledTexture.CreateView();
+
+    // Initialize texture with green to ensure it is sampled from later.
+    {
+        utils::ComboRenderPassDescriptor renderPass({externalView}, nullptr);
+        renderPass.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    // Pipeline Creation
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.cTargets[0].format = kFormat;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+    // Create an ExternalTextureDescriptor from the texture view
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = externalView;
+
+    // Import the external texture
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a sampler and bind group
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {{0, sampler}, {1, externalTexture}});
+
+    // Run the shader, which should sample from the external texture and draw a triangle into the
+    // upper left corner of the render texture.
+    wgpu::TextureView renderView = renderTexture.CreateView();
+    utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+    {
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Draw(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderTexture, 0, 0);
+}
+
+TEST_P(ExternalTextureTests, SampleMultiplanarExternalTexture) {
+    // TODO(crbug.com/dawn/1263): SPIR-V has an issue compiling the output from Tint's external
+    // texture transform. Re-enable this test for OpenGL when the switch to Tint is complete.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    const wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var positions = array<vec4<f32>, 3>(
+                vec4<f32>(-1.0, 1.0, 0.0, 1.0),
+                vec4<f32>(-1.0, -1.0, 0.0, 1.0),
+                vec4<f32>(1.0, 1.0, 0.0, 1.0)
+            );
+            return positions[VertexIndex];
+        })");
+
+    const wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var s : sampler;
+        @group(0) @binding(1) var t : texture_external;
+
+        @stage(fragment) fn main(@builtin(position) FragCoord : vec4<f32>)
+                                 -> @location(0) vec4<f32> {
+            return textureSampleLevel(t, s, FragCoord.xy / vec2<f32>(4.0, 4.0));
+        })");
+
+    wgpu::Texture sampledTexturePlane0 =
+        Create2DTexture(device, kWidth, kHeight, wgpu::TextureFormat::R8Unorm,
+                        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment);
+    wgpu::Texture sampledTexturePlane1 =
+        Create2DTexture(device, kWidth, kHeight, wgpu::TextureFormat::RG8Unorm,
+                        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment);
+
+    wgpu::Texture renderTexture =
+        Create2DTexture(device, kWidth, kHeight, kFormat,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment);
+
+    // Create a texture view for the external texture
+    wgpu::TextureView externalViewPlane0 = sampledTexturePlane0.CreateView();
+    wgpu::TextureView externalViewPlane1 = sampledTexturePlane1.CreateView();
+
+    struct ConversionExpectation {
+        double y;
+        double u;
+        double v;
+        RGBA8 rgba;
+    };
+
+    std::array<ConversionExpectation, 4> expectations = {{{0.0f, 0.5f, 0.5f, RGBA8::kBlack},
+                                                          {0.298f, 0.329f, 1.0f, RGBA8::kRed},
+                                                          {0.584f, -0.168f, -0.823f, RGBA8::kGreen},
+                                                          {0.113f, 1.0f, 0.419f, RGBA8::kBlue}}};
+
+    for (ConversionExpectation expectation : expectations) {
+        // Initialize the texture planes with YUV data
+        {
+            utils::ComboRenderPassDescriptor renderPass({externalViewPlane0, externalViewPlane1},
+                                                        nullptr);
+            renderPass.cColorAttachments[0].clearValue = {expectation.y, 0.0f, 0.0f, 0.0f};
+            renderPass.cColorAttachments[1].clearValue = {expectation.u, expectation.v, 0.0f, 0.0f};
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            queue.Submit(1, &commands);
+        }
+
+        // Pipeline Creation
+        utils::ComboRenderPipelineDescriptor descriptor;
+        // descriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].format = kFormat;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        // Create an ExternalTextureDescriptor from the texture views
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = externalViewPlane0;
+        externalDesc.plane1 = externalViewPlane1;
+
+        // Import the external texture
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+        // Create a sampler and bind group
+        wgpu::Sampler sampler = device.CreateSampler();
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                         {{0, sampler}, {1, externalTexture}});
+
+        // Run the shader, which should sample from the external texture and draw a triangle into
+        // the upper left corner of the render texture.
+        wgpu::TextureView renderView = renderTexture.CreateView();
+        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(expectation.rgba, renderTexture, 0, 0);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(ExternalTextureTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp b/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
new file mode 100644
index 0000000..525cdac
--- /dev/null
+++ b/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
@@ -0,0 +1,287 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include <sstream>
+#include <vector>
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 1;
+
+enum class DrawMode {
+    NonIndexed,
+    Indexed,
+    NonIndexedIndirect,
+    IndexedIndirect,
+};
+
+enum class CheckIndex : uint32_t {
+    Vertex = 0x0000001,
+    Instance = 0x0000002,
+};
+
+namespace dawn {
+    template <>
+    struct IsDawnBitmask<CheckIndex> {
+        static constexpr bool enable = true;
+    };
+}  // namespace dawn
+
+class FirstIndexOffsetTests : public DawnTest {
+  public:
+    void TestVertexIndex(DrawMode mode, uint32_t firstVertex);
+    void TestInstanceIndex(DrawMode mode, uint32_t firstInstance);
+    void TestBothIndices(DrawMode mode, uint32_t firstVertex, uint32_t firstInstance);
+
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        // TODO(tint:451): Remove once "flat" is supported under OpenGL(ES).
+        DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+    }
+
+  private:
+    void TestImpl(DrawMode mode,
+                  CheckIndex checkIndex,
+                  uint32_t vertexIndex,
+                  uint32_t instanceIndex);
+};
+
+void FirstIndexOffsetTests::TestVertexIndex(DrawMode mode, uint32_t firstVertex) {
+    TestImpl(mode, CheckIndex::Vertex, firstVertex, 0);
+}
+
+void FirstIndexOffsetTests::TestInstanceIndex(DrawMode mode, uint32_t firstInstance) {
+    TestImpl(mode, CheckIndex::Instance, 0, firstInstance);
+}
+
+void FirstIndexOffsetTests::TestBothIndices(DrawMode mode,
+                                            uint32_t firstVertex,
+                                            uint32_t firstInstance) {
+    using wgpu::operator|;
+    TestImpl(mode, CheckIndex::Vertex | CheckIndex::Instance, firstVertex, firstInstance);
+}
+
+// Conditionally tests if first/baseVertex and/or firstInstance have been correctly passed to the
+// vertex shader. Since vertex shaders can't write to storage buffers, we pass vertex/instance
+// indices to a fragment shader via u32 attributes. The fragment shader runs once and writes the
+// values to a storage buffer. If vertex index is used, the vertex buffer is padded with 0s.
+void FirstIndexOffsetTests::TestImpl(DrawMode mode,
+                                     CheckIndex checkIndex,
+                                     uint32_t firstVertex,
+                                     uint32_t firstInstance) {
+    using wgpu::operator&;
+
+    std::stringstream vertexInputs;
+    std::stringstream vertexOutputs;
+    std::stringstream vertexBody;
+    std::stringstream fragmentInputs;
+    std::stringstream fragmentBody;
+
+    vertexInputs << "  @location(0) position : vec4<f32>,\n";
+    vertexOutputs << "  @builtin(position) position : vec4<f32>,\n";
+
+    if ((checkIndex & CheckIndex::Vertex) != 0) {
+        vertexInputs << "  @builtin(vertex_index) vertex_index : u32,\n";
+        vertexOutputs << "  @location(1) @interpolate(flat) vertex_index : u32,\n";
+        vertexBody << "  output.vertex_index = input.vertex_index;\n";
+
+        fragmentInputs << "  @location(1) @interpolate(flat) vertex_index : u32,\n";
+        fragmentBody << "  _ = atomicMin(&idx_vals.vertex_index, input.vertex_index);\n";
+    }
+    if ((checkIndex & CheckIndex::Instance) != 0) {
+        vertexInputs << "  @builtin(instance_index) instance_index : u32,\n";
+        vertexOutputs << "  @location(2) @interpolate(flat) instance_index : u32,\n";
+        vertexBody << "  output.instance_index = input.instance_index;\n";
+
+        fragmentInputs << "  @location(2) @interpolate(flat) instance_index : u32,\n";
+        fragmentBody << "  _ = atomicMin(&idx_vals.instance_index, input.instance_index);\n";
+    }
+
+    std::string vertexShader = R"(
+struct VertexInputs {
+)" + vertexInputs.str() + R"(
+}
+struct VertexOutputs {
+)" + vertexOutputs.str() + R"(
+}
+@stage(vertex) fn main(input : VertexInputs) -> VertexOutputs {
+  var output : VertexOutputs;
+)" + vertexBody.str() + R"(
+  output.position = input.position;
+  return output;
+})";
+
+    std::string fragmentShader = R"(
+struct IndexVals {
+  vertex_index : atomic<u32>,
+  instance_index : atomic<u32>,
+}
+@group(0) @binding(0) var<storage, read_write> idx_vals : IndexVals;
+
+struct FragInputs {
+)" + fragmentInputs.str() + R"(
+}
+@stage(fragment) fn main(input : FragInputs) {
+)" + fragmentBody.str() + R"(
+})";
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    constexpr uint32_t kComponentsPerVertex = 4;
+
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, vertexShader.c_str());
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, fragmentShader.c_str());
+    pipelineDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+    pipelineDesc.vertex.bufferCount = 1;
+    pipelineDesc.cBuffers[0].arrayStride = kComponentsPerVertex * sizeof(float);
+    pipelineDesc.cBuffers[0].attributeCount = 1;
+    pipelineDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    pipelineDesc.cTargets[0].format = renderPass.colorFormat;
+    pipelineDesc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+    std::vector<float> vertexData(firstVertex * kComponentsPerVertex);
+    vertexData.insert(vertexData.end(), {0, 0, 0, 1});
+    vertexData.insert(vertexData.end(), {0, 0, 0, 1});
+    wgpu::Buffer vertices = utils::CreateBufferFromData(
+        device, vertexData.data(), vertexData.size() * sizeof(float), wgpu::BufferUsage::Vertex);
+    wgpu::Buffer indices =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0});
+
+    const uint32_t bufferInitialVertex =
+        checkIndex & CheckIndex::Vertex ? std::numeric_limits<uint32_t>::max() : 0;
+    const uint32_t bufferInitialInstance =
+        checkIndex & CheckIndex::Instance ? std::numeric_limits<uint32_t>::max() : 0;
+    wgpu::Buffer buffer =
+        utils::CreateBufferFromData(device, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Storage,
+                                    {bufferInitialVertex, bufferInitialInstance});
+
+    wgpu::Buffer indirectBuffer;
+    switch (mode) {
+        case DrawMode::NonIndexed:
+        case DrawMode::Indexed:
+            break;
+        case DrawMode::NonIndexedIndirect:
+            // With DrawIndirect firstInstance is reserved and must be 0 according to spec.
+            ASSERT_EQ(firstInstance, 0u);
+            indirectBuffer = utils::CreateBufferFromData<uint32_t>(
+                device, wgpu::BufferUsage::Indirect, {1, 1, firstVertex, firstInstance});
+            break;
+        case DrawMode::IndexedIndirect:
+            // With DrawIndexedIndirect firstInstance is reserved and must be 0 according to spec.
+            ASSERT_EQ(firstInstance, 0u);
+            indirectBuffer = utils::CreateBufferFromData<uint32_t>(
+                device, wgpu::BufferUsage::Indirect, {1, 1, 0, firstVertex, firstInstance});
+            break;
+        default:
+            FAIL();
+    }
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetVertexBuffer(0, vertices);
+    pass.SetBindGroup(0, bindGroup);
+    // Do a first draw to make sure the offset values are correctly updated on the next draw.
+    // We should only see the values from the second draw.
+    pass.Draw(1, 1, firstVertex + 1, firstInstance + 1);
+    switch (mode) {
+        case DrawMode::NonIndexed:
+            pass.Draw(1, 1, firstVertex, firstInstance);
+            break;
+        case DrawMode::Indexed:
+            pass.SetIndexBuffer(indices, wgpu::IndexFormat::Uint32);
+            pass.DrawIndexed(1, 1, 0, firstVertex, firstInstance);
+            break;
+        case DrawMode::NonIndexedIndirect:
+            pass.DrawIndirect(indirectBuffer, 0);
+            break;
+        case DrawMode::IndexedIndirect:
+            pass.SetIndexBuffer(indices, wgpu::IndexFormat::Uint32);
+            pass.DrawIndexedIndirect(indirectBuffer, 0);
+            break;
+        default:
+            FAIL();
+    }
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::array<uint32_t, 2> expected = {firstVertex, firstInstance};
+    // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
+    // Until then the expected values should always be {0, 0}.
+    if (IsD3D12() && (mode == DrawMode::NonIndexedIndirect || mode == DrawMode::IndexedIndirect)) {
+        expected = {0, 0};
+    }
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), buffer, 0, expected.size());
+}
+
+// Test that vertex_index starts at 7 when drawn using Draw()
+TEST_P(FirstIndexOffsetTests, NonIndexedVertexOffset) {
+    TestVertexIndex(DrawMode::NonIndexed, 7);
+}
+
+// Test that instance_index starts at 11 when drawn using Draw()
+TEST_P(FirstIndexOffsetTests, NonIndexedInstanceOffset) {
+    TestInstanceIndex(DrawMode::NonIndexed, 11);
+}
+
+// Test that vertex_index and instance_index start at 7 and 11 respectively when drawn using Draw()
+TEST_P(FirstIndexOffsetTests, NonIndexedBothOffset) {
+    TestBothIndices(DrawMode::NonIndexed, 7, 11);
+}
+
+// Test that vertex_index starts at 7 when drawn using DrawIndexed()
+TEST_P(FirstIndexOffsetTests, IndexedVertex) {
+    TestVertexIndex(DrawMode::Indexed, 7);
+}
+
+// Test that instance_index starts at 11 when drawn using DrawIndexed()
+TEST_P(FirstIndexOffsetTests, IndexedInstance) {
+    TestInstanceIndex(DrawMode::Indexed, 11);
+}
+
+// Test that vertex_index and instance_index start at 7 and 11 respectively when drawn using
+// DrawIndexed()
+TEST_P(FirstIndexOffsetTests, IndexedBothOffset) {
+    TestBothIndices(DrawMode::Indexed, 7, 11);
+}
+
+// There are no instance_index tests because the spec forces it to be 0.
+
+// Test that vertex_index starts at 7 when drawn using DrawIndirect()
+TEST_P(FirstIndexOffsetTests, NonIndexedIndirectVertexOffset) {
+    TestVertexIndex(DrawMode::NonIndexedIndirect, 7);
+}
+
+// Test that vertex_index starts at 7 when drawn using DrawIndexedIndirect()
+TEST_P(FirstIndexOffsetTests, IndexedIndirectVertex) {
+    TestVertexIndex(DrawMode::IndexedIndirect, 7);
+}
+
+DAWN_INSTANTIATE_TEST(FirstIndexOffsetTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/GpuMemorySynchronizationTests.cpp b/src/dawn/tests/end2end/GpuMemorySynchronizationTests.cpp
new file mode 100644
index 0000000..0dd0aec
--- /dev/null
+++ b/src/dawn/tests/end2end/GpuMemorySynchronizationTests.cpp
@@ -0,0 +1,650 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class GpuMemorySyncTests : public DawnTest {
+  protected:
+    wgpu::Buffer CreateBuffer() {
+        wgpu::BufferDescriptor srcDesc;
+        srcDesc.size = 4;
+        srcDesc.usage =
+            wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage;
+        wgpu::Buffer buffer = device.CreateBuffer(&srcDesc);
+
+        int myData = 0;
+        queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
+        return buffer;
+    }
+
+    std::tuple<wgpu::ComputePipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForCompute(
+        const wgpu::Buffer& buffer) {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+            struct Data {
+                a : i32
+            }
+            @group(0) @binding(0) var<storage, read_write> data : Data;
+            @stage(compute) @workgroup_size(1) fn main() {
+                data.a = data.a + 1;
+            })");
+
+        wgpu::ComputePipelineDescriptor cpDesc;
+        cpDesc.compute.module = csModule;
+        cpDesc.compute.entryPoint = "main";
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+        return std::make_tuple(pipeline, bindGroup);
+    }
+
+    std::tuple<wgpu::RenderPipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForRender(
+        const wgpu::Buffer& buffer,
+        wgpu::TextureFormat colorFormat) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            struct Data {
+                i : i32
+            }
+            @group(0) @binding(0) var<storage, read_write> data : Data;
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                data.i = data.i + 1;
+                return vec4<f32>(f32(data.i) / 255.0, 0.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor rpDesc;
+        rpDesc.vertex.module = vsModule;
+        rpDesc.cFragment.module = fsModule;
+        rpDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        rpDesc.cTargets[0].format = colorFormat;
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+        return std::make_tuple(pipeline, bindGroup);
+    }
+};
+
+// Clear storage buffer with zero. Then read data, add one, and write the result to storage buffer
+// in compute pass. Iterate this read-add-write steps per compute pass a few time. The successive
+// iteration reads the result in buffer from last iteration, which makes the iterations a data
+// dependency chain. The test verifies that data in buffer among iterations in compute passes is
+// correctly synchronized.
+TEST_P(GpuMemorySyncTests, ComputePass) {
+    // Create pipeline, bind group, and buffer for compute pass.
+    wgpu::Buffer buffer = CreateBuffer();
+    auto [compute, bindGroup] = CreatePipelineAndBindGroupForCompute(buffer);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    // Iterate the read-add-write operations in compute pass a few times.
+    int iteration = 3;
+    for (int i = 0; i < iteration; ++i) {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(compute);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Verify the result.
+    EXPECT_BUFFER_U32_EQ(iteration, buffer, 0);
+}
+
+// Clear storage buffer with zero. Then read data, add one, and write the result to storage buffer
+// in render pass. Iterate this read-add-write steps per render pass a few time. The successive
+// iteration reads the result in buffer from last iteration, which makes the iterations a data
+// dependency chain. In addition, color output by fragment shader depends on the data in storage
+// buffer, so we can check color in render target to verify that data in buffer among iterations in
+// render passes is correctly synchronized.
+TEST_P(GpuMemorySyncTests, RenderPass) {
+    // Create pipeline, bind group, and buffer for render pass.
+    wgpu::Buffer buffer = CreateBuffer();
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    auto [render, bindGroup] = CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    // Iterate the read-add-write operations in render pass a few times.
+    int iteration = 3;
+    for (int i = 0; i < iteration; ++i) {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(render);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Verify the result.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(iteration, 0, 0, 255), renderPass.color, 0, 0);
+}
+
+// Write into a storage buffer in a render pass. Then read that data in a compute
+// pass. And verify the data flow is correctly synchronized.
+TEST_P(GpuMemorySyncTests, RenderPassToComputePass) {
+    // Create pipeline, bind group, and buffer for render pass and compute pass.
+    wgpu::Buffer buffer = CreateBuffer();
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+
+    auto [render, bindGroup0] = CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
+    auto [compute, bindGroup1] = CreatePipelineAndBindGroupForCompute(buffer);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    // Write data into a storage buffer in render pass.
+    wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass0.SetPipeline(render);
+    pass0.SetBindGroup(0, bindGroup0);
+    pass0.Draw(1);
+    pass0.End();
+
+    // Read that data in compute pass.
+    wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
+    pass1.SetPipeline(compute);
+    pass1.SetBindGroup(0, bindGroup1);
+    pass1.Dispatch(1);
+    pass1.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Verify the result.
+    EXPECT_BUFFER_U32_EQ(2, buffer, 0);
+}
+
+// Write into a storage buffer in a compute pass. Then read that data in a render
+// pass. And verify the data flow is correctly synchronized.
+TEST_P(GpuMemorySyncTests, ComputePassToRenderPass) {
+    // Create pipeline, bind group, and buffer for compute pass and render pass.
+    wgpu::Buffer buffer = CreateBuffer();
+    auto [compute, bindGroup1] = CreatePipelineAndBindGroupForCompute(buffer);
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    auto [render, bindGroup0] = CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    // Write data into a storage buffer in compute pass.
+    wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+    pass0.SetPipeline(compute);
+    pass0.SetBindGroup(0, bindGroup1);
+    pass0.Dispatch(1);
+    pass0.End();
+
+    // Read that data in render pass.
+    wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass1.SetPipeline(render);
+    pass1.SetBindGroup(0, bindGroup0);
+    pass1.Draw(1);
+    pass1.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Verify the result.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(2, 0, 0, 255), renderPass.color, 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(GpuMemorySyncTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class StorageToUniformSyncTests : public DawnTest {
+  protected:
+    void CreateBuffer() {
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = sizeof(float);
+        bufferDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform;
+        mBuffer = device.CreateBuffer(&bufferDesc);
+    }
+
+    std::tuple<wgpu::ComputePipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForCompute() {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+            struct Data {
+                a : f32
+            }
+            @group(0) @binding(0) var<storage, read_write> data : Data;
+            @stage(compute) @workgroup_size(1) fn main() {
+                data.a = 1.0;
+            })");
+
+        wgpu::ComputePipelineDescriptor cpDesc;
+        cpDesc.compute.module = csModule;
+        cpDesc.compute.entryPoint = "main";
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, mBuffer}});
+        return std::make_tuple(pipeline, bindGroup);
+    }
+
+    std::tuple<wgpu::RenderPipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForRender(
+        wgpu::TextureFormat colorFormat) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            struct Contents {
+                color : f32
+            }
+            @group(0) @binding(0) var<uniform> contents : Contents;
+
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(contents.color, 0.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor rpDesc;
+        rpDesc.vertex.module = vsModule;
+        rpDesc.cFragment.module = fsModule;
+        rpDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        rpDesc.cTargets[0].format = colorFormat;
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, mBuffer}});
+        return std::make_tuple(pipeline, bindGroup);
+    }
+
+    wgpu::Buffer mBuffer;
+};
+
+// Write into a storage buffer in compute pass in a command buffer. Then read that data in a render
+// pass. The two passes use the same command buffer.
+TEST_P(StorageToUniformSyncTests, ReadAfterWriteWithSameCommandBuffer) {
+    // Create pipeline, bind group, and buffer for compute pass and render pass.
+    CreateBuffer();
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    auto [compute, computeBindGroup] = CreatePipelineAndBindGroupForCompute();
+    auto [render, renderBindGroup] = CreatePipelineAndBindGroupForRender(renderPass.colorFormat);
+
+    // Write data into a storage buffer in compute pass.
+    wgpu::CommandEncoder encoder0 = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass0 = encoder0.BeginComputePass();
+    pass0.SetPipeline(compute);
+    pass0.SetBindGroup(0, computeBindGroup);
+    pass0.Dispatch(1);
+    pass0.End();
+
+    // Read that data in render pass.
+    wgpu::RenderPassEncoder pass1 = encoder0.BeginRenderPass(&renderPass.renderPassInfo);
+    pass1.SetPipeline(render);
+    pass1.SetBindGroup(0, renderBindGroup);
+    pass1.Draw(1);
+    pass1.End();
+
+    wgpu::CommandBuffer commands = encoder0.Finish();
+    queue.Submit(1, &commands);
+
+    // Verify the rendering result.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+// Write into a storage buffer in compute pass in a command buffer. Then read that data in a render
+// pass. The two passes use the different command buffers. The command buffers are submitted to the
+// queue in one shot.
+TEST_P(StorageToUniformSyncTests, ReadAfterWriteWithDifferentCommandBuffers) {
+    // Create pipeline, bind group, and buffer for compute pass and render pass.
+    CreateBuffer();
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    auto [compute, computeBindGroup] = CreatePipelineAndBindGroupForCompute();
+    auto [render, renderBindGroup] = CreatePipelineAndBindGroupForRender(renderPass.colorFormat);
+
+    // Write data into a storage buffer in compute pass.
+    wgpu::CommandBuffer cb[2];
+    wgpu::CommandEncoder encoder0 = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass0 = encoder0.BeginComputePass();
+    pass0.SetPipeline(compute);
+    pass0.SetBindGroup(0, computeBindGroup);
+    pass0.Dispatch(1);
+    pass0.End();
+    cb[0] = encoder0.Finish();
+
+    // Read that data in render pass.
+    wgpu::CommandEncoder encoder1 = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass1 = encoder1.BeginRenderPass(&renderPass.renderPassInfo);
+    pass1.SetPipeline(render);
+    pass1.SetBindGroup(0, renderBindGroup);
+    pass1.Draw(1);
+    pass1.End();
+
+    cb[1] = encoder1.Finish();
+    queue.Submit(2, cb);
+
+    // Verify the rendering result.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+// Write into a storage buffer in compute pass in a command buffer. Then read that data in a render
+// pass. The two passes use the different command buffers. The command buffers are submitted to the
+// queue separately.
+TEST_P(StorageToUniformSyncTests, ReadAfterWriteWithDifferentQueueSubmits) {
+    // Create pipeline, bind group, and buffer for compute pass and render pass.
+    CreateBuffer();
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+    auto [compute, computeBindGroup] = CreatePipelineAndBindGroupForCompute();
+    auto [render, renderBindGroup] = CreatePipelineAndBindGroupForRender(renderPass.colorFormat);
+
+    // Write data into a storage buffer in compute pass.
+    wgpu::CommandBuffer cb[2];
+    wgpu::CommandEncoder encoder0 = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass0 = encoder0.BeginComputePass();
+    pass0.SetPipeline(compute);
+    pass0.SetBindGroup(0, computeBindGroup);
+    pass0.Dispatch(1);
+    pass0.End();
+    cb[0] = encoder0.Finish();
+    queue.Submit(1, &cb[0]);
+
+    // Read that data in render pass.
+    wgpu::CommandEncoder encoder1 = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass1 = encoder1.BeginRenderPass(&renderPass.renderPassInfo);
+    pass1.SetPipeline(render);
+    pass1.SetBindGroup(0, renderBindGroup);
+    pass1.Draw(1);
+    pass1.End();
+
+    cb[1] = encoder1.Finish();
+    queue.Submit(1, &cb[1]);
+
+    // Verify the rendering result.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(StorageToUniformSyncTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+constexpr int kRTSize = 8;
+constexpr int kVertexBufferStride = 4 * sizeof(float);
+
+class MultipleWriteThenMultipleReadTests : public DawnTest {
+  protected:
+    wgpu::Buffer CreateZeroedBuffer(uint64_t size, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor srcDesc;
+        srcDesc.size = size;
+        srcDesc.usage = usage;
+        wgpu::Buffer buffer = device.CreateBuffer(&srcDesc);
+
+        std::vector<uint8_t> zeros(size, 0);
+        queue.WriteBuffer(buffer, 0, zeros.data(), size);
+
+        return buffer;
+    }
+};
+
+// Write into a few storage buffers in compute pass. Then read that data in a render pass. The
+// readonly buffers in render pass include vertex buffer, index buffer, uniform buffer, and readonly
+// storage buffer. Data to be read in all of these buffers in render pass depend on the write
+// operation in compute pass.
+TEST_P(MultipleWriteThenMultipleReadTests, SeparateBuffers) {
+    // Create pipeline, bind group, and different buffers for compute pass.
+    wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+        struct VBContents {
+            pos : array<vec4<f32>, 4>
+        }
+        @group(0) @binding(0) var<storage, read_write> vbContents : VBContents;
+
+        struct IBContents {
+            indices : array<vec4<i32>, 2>
+        }
+        @group(0) @binding(1) var<storage, read_write> ibContents : IBContents;
+
+        struct ColorContents {
+            color : f32
+        }
+        @group(0) @binding(2) var<storage, read_write> uniformContents : ColorContents;
+        @group(0) @binding(3) var<storage, read_write> storageContents : ColorContents;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            vbContents.pos[0] = vec4<f32>(-1.0, 1.0, 0.0, 1.0);
+            vbContents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0);
+            vbContents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0);
+            vbContents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0);
+            let dummy : i32 = 0;
+            ibContents.indices[0] = vec4<i32>(0, 1, 2, 0);
+            ibContents.indices[1] = vec4<i32>(2, 3, dummy, dummy);
+            uniformContents.color = 1.0;
+            storageContents.color = 1.0;
+        })");
+
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.compute.module = csModule;
+    cpDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline cp = device.CreateComputePipeline(&cpDesc);
+    wgpu::Buffer vertexBuffer = CreateZeroedBuffer(
+        kVertexBufferStride * 4,
+        wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+    wgpu::Buffer indexBuffer = CreateZeroedBuffer(
+        sizeof(int) * 4 * 2,
+        wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+    wgpu::Buffer uniformBuffer =
+        CreateZeroedBuffer(sizeof(float), wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage |
+                                              wgpu::BufferUsage::CopyDst);
+    wgpu::Buffer storageBuffer =
+        CreateZeroedBuffer(sizeof(float), wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+
+    wgpu::BindGroup bindGroup0 = utils::MakeBindGroup(
+        device, cp.GetBindGroupLayout(0),
+        {{0, vertexBuffer}, {1, indexBuffer}, {2, uniformBuffer}, {3, storageBuffer}});
+    // Write data into storage buffers in compute pass.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+    pass0.SetPipeline(cp);
+    pass0.SetBindGroup(0, bindGroup0);
+    pass0.Dispatch(1);
+    pass0.End();
+
+    // Create pipeline, bind group, and reuse buffers in render pass.
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+            return pos;
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            color : f32
+        }
+
+        @group(0) @binding(0) var<uniform> uniformBuffer : Buf;
+        @group(0) @binding(1) var<storage, read> storageBuffer : Buf;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(uniformBuffer.color, storageBuffer.color, 0.0, 1.0);
+        })");
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = vsModule;
+    rpDesc.cFragment.module = fsModule;
+    rpDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+    rpDesc.vertex.bufferCount = 1;
+    rpDesc.cBuffers[0].arrayStride = kVertexBufferStride;
+    rpDesc.cBuffers[0].attributeCount = 1;
+    rpDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    rpDesc.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline rp = device.CreateRenderPipeline(&rpDesc);
+
+    wgpu::BindGroup bindGroup1 = utils::MakeBindGroup(device, rp.GetBindGroupLayout(0),
+                                                      {{0, uniformBuffer}, {1, storageBuffer}});
+
+    // Read data in buffers in render pass.
+    wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass1.SetPipeline(rp);
+    pass1.SetVertexBuffer(0, vertexBuffer);
+    pass1.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 0);
+    pass1.SetBindGroup(0, bindGroup1);
+    pass1.DrawIndexed(6);
+    pass1.End();
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    // Verify the rendering result.
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, max, max);
+}
+
+// Write into a storage buffer in compute pass. Then read that data in buffer in a render pass. The
+// buffer is composed of vertices, indices, uniforms and readonly storage. Data to be read in the
+// buffer in render pass depend on the write operation in compute pass.
+TEST_P(MultipleWriteThenMultipleReadTests, OneBuffer) {
+    // TODO(crbug.com/dawn/646): diagnose and fix this OpenGL ES failure.
+    // "Push constant block cannot be expressed as neither std430 nor std140. ES-targets do not
+    // support GL_ARB_enhanced_layouts."
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+
+    // Create pipeline, bind group, and a complex buffer for compute pass.
+    wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+        struct Contents {
+            @align(256) pos : array<vec4<f32>, 4>,
+            @align(256) indices : array<vec4<i32>, 2>,
+            @align(256) color0 : f32,
+            @align(256) color1 : f32,
+        }
+
+        @group(0) @binding(0) var<storage, read_write> contents : Contents;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            contents.pos[0] = vec4<f32>(-1.0, 1.0, 0.0, 1.0);
+            contents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0);
+            contents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0);
+            contents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0);
+            let dummy : i32 = 0;
+            contents.indices[0] = vec4<i32>(0, 1, 2, 0);
+            contents.indices[1] = vec4<i32>(2, 3, dummy, dummy);
+            contents.color0 = 1.0;
+            contents.color1 = 1.0;
+        })");
+
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.compute.module = csModule;
+    cpDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline cp = device.CreateComputePipeline(&cpDesc);
+    struct Data {
+        float pos[4][4];
+        char padding0[256 - sizeof(float) * 16];
+        int indices[2][4];
+        char padding1[256 - sizeof(int) * 8];
+        float color0;
+        char padding2[256 - sizeof(float)];
+        float color1;
+        char padding3[256 - sizeof(float)];
+    };
+    wgpu::Buffer buffer = CreateZeroedBuffer(
+        sizeof(Data), wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index |
+                          wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage |
+                          wgpu::BufferUsage::CopyDst);
+    wgpu::BindGroup bindGroup0 =
+        utils::MakeBindGroup(device, cp.GetBindGroupLayout(0), {{0, buffer}});
+
+    // Write various data (vertices, indices, and uniforms) into the buffer in compute pass.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+    pass0.SetPipeline(cp);
+    pass0.SetBindGroup(0, bindGroup0);
+    pass0.Dispatch(1);
+    pass0.End();
+
+    // Create pipeline, bind group, and reuse the buffer in render pass.
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+            return pos;
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct Buf {
+            color : f32
+        }
+        @group(0) @binding(0) var<uniform> uniformBuffer : Buf;
+        @group(0) @binding(1) var<storage, read> storageBuffer : Buf;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(uniformBuffer.color, storageBuffer.color, 0.0, 1.0);
+        })");
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = vsModule;
+    rpDesc.cFragment.module = fsModule;
+    rpDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+    rpDesc.vertex.bufferCount = 1;
+    rpDesc.cBuffers[0].arrayStride = kVertexBufferStride;
+    rpDesc.cBuffers[0].attributeCount = 1;
+    rpDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    rpDesc.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline rp = device.CreateRenderPipeline(&rpDesc);
+
+    wgpu::BindGroup bindGroup1 =
+        utils::MakeBindGroup(device, rp.GetBindGroupLayout(0),
+                             {{0, buffer, offsetof(Data, color0), sizeof(float)},
+                              {1, buffer, offsetof(Data, color1), sizeof(float)}});
+
+    // Read various data in the buffer in render pass.
+    wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass1.SetPipeline(rp);
+    pass1.SetVertexBuffer(0, buffer);
+    pass1.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, offsetof(Data, indices));
+    pass1.SetBindGroup(0, bindGroup1);
+    pass1.DrawIndexed(6);
+    pass1.End();
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    // Verify the rendering result.
+    uint32_t min = 1, max = kRTSize - 3;
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, min, min);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, max, min);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, min, max);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kYellow, renderPass.color, max, max);
+}
+
+DAWN_INSTANTIATE_TEST(MultipleWriteThenMultipleReadTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp b/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
new file mode 100644
index 0000000..a8aa59b
--- /dev/null
+++ b/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
@@ -0,0 +1,461 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/MetalBackend.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <CoreVideo/CVPixelBuffer.h>
+#include <IOSurface/IOSurface.h>
+
+namespace {
+
+    void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
+        CFNumberRef number = CFNumberCreate(nullptr, kCFNumberSInt32Type, &value);
+        CFDictionaryAddValue(dictionary, key, number);
+        CFRelease(number);
+    }
+
+    class ScopedIOSurfaceRef {
+      public:
+        ScopedIOSurfaceRef() : mSurface(nullptr) {
+        }
+        explicit ScopedIOSurfaceRef(IOSurfaceRef surface) : mSurface(surface) {
+        }
+
+        ~ScopedIOSurfaceRef() {
+            if (mSurface != nullptr) {
+                CFRelease(mSurface);
+                mSurface = nullptr;
+            }
+        }
+
+        IOSurfaceRef get() const {
+            return mSurface;
+        }
+
+        ScopedIOSurfaceRef(ScopedIOSurfaceRef&& other) {
+            if (mSurface != nullptr) {
+                CFRelease(mSurface);
+            }
+            mSurface = other.mSurface;
+            other.mSurface = nullptr;
+        }
+
+        ScopedIOSurfaceRef& operator=(ScopedIOSurfaceRef&& other) {
+            if (mSurface != nullptr) {
+                CFRelease(mSurface);
+            }
+            mSurface = other.mSurface;
+            other.mSurface = nullptr;
+
+            return *this;
+        }
+
+        ScopedIOSurfaceRef(const ScopedIOSurfaceRef&) = delete;
+        ScopedIOSurfaceRef& operator=(const ScopedIOSurfaceRef&) = delete;
+
+      private:
+        IOSurfaceRef mSurface = nullptr;
+    };
+
+    ScopedIOSurfaceRef CreateSinglePlaneIOSurface(uint32_t width,
+                                                  uint32_t height,
+                                                  uint32_t format,
+                                                  uint32_t bytesPerElement) {
+        CFMutableDictionaryRef dict =
+            CFDictionaryCreateMutable(kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks,
+                                      &kCFTypeDictionaryValueCallBacks);
+        AddIntegerValue(dict, kIOSurfaceWidth, width);
+        AddIntegerValue(dict, kIOSurfaceHeight, height);
+        AddIntegerValue(dict, kIOSurfacePixelFormat, format);
+        AddIntegerValue(dict, kIOSurfaceBytesPerElement, bytesPerElement);
+
+        IOSurfaceRef ioSurface = IOSurfaceCreate(dict);
+        EXPECT_NE(nullptr, ioSurface);
+        CFRelease(dict);
+
+        return ScopedIOSurfaceRef(ioSurface);
+    }
+
+    class IOSurfaceTestBase : public DawnTest {
+      public:
+        wgpu::Texture WrapIOSurface(const wgpu::TextureDescriptor* descriptor,
+                                    IOSurfaceRef ioSurface,
+                                    bool isInitialized = true) {
+            dawn::native::metal::ExternalImageDescriptorIOSurface externDesc;
+            externDesc.cTextureDescriptor =
+                reinterpret_cast<const WGPUTextureDescriptor*>(descriptor);
+            externDesc.ioSurface = ioSurface;
+            externDesc.isInitialized = isInitialized;
+            WGPUTexture texture = dawn::native::metal::WrapIOSurface(device.Get(), &externDesc);
+            return wgpu::Texture::Acquire(texture);
+        }
+    };
+
+}  // anonymous namespace
+
+// A small fixture used to initialize default data for the IOSurface validation tests.
+// These tests are skipped if the harness is using the wire.
+class IOSurfaceValidationTests : public IOSurfaceTestBase {
+  public:
+    IOSurfaceValidationTests() {
+        defaultIOSurface = CreateSinglePlaneIOSurface(10, 10, kCVPixelFormatType_32BGRA, 4);
+
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.format = wgpu::TextureFormat::BGRA8Unorm;
+        descriptor.size = {10, 10, 1};
+        descriptor.sampleCount = 1;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    }
+
+  protected:
+    wgpu::TextureDescriptor descriptor;
+    ScopedIOSurfaceRef defaultIOSurface;
+};
+
+// Test a successful wrapping of an IOSurface in a texture
+TEST_P(IOSurfaceValidationTests, Success) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get());
+    ASSERT_NE(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the texture descriptor is invalid
+TEST_P(IOSurfaceValidationTests, InvalidTextureDescriptor) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    wgpu::ChainedStruct chainedDescriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor dimension isn't 2D
+// TODO(crbug.com/dawn/814): Test 1D textures when implemented
+TEST_P(IOSurfaceValidationTests, InvalidTextureDimension) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.dimension = wgpu::TextureDimension::e3D;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor mip level count isn't 1
+TEST_P(IOSurfaceValidationTests, InvalidMipLevelCount) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.mipLevelCount = 2;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor depth isn't 1
+TEST_P(IOSurfaceValidationTests, InvalidDepth) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.size.depthOrArrayLayers = 2;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor sample count isn't 1
+TEST_P(IOSurfaceValidationTests, InvalidSampleCount) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.sampleCount = 4;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor width doesn't match the surface's
+TEST_P(IOSurfaceValidationTests, InvalidWidth) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.size.width = 11;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor height doesn't match the surface's
+TEST_P(IOSurfaceValidationTests, InvalidHeight) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.size.height = 11;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor format isn't compatible with the IOSurface's
+TEST_P(IOSurfaceValidationTests, InvalidFormat) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.format = wgpu::TextureFormat::R8Unorm;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapIOSurface(&descriptor, defaultIOSurface.get()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Fixture to test using IOSurfaces through different usages.
+// These tests are skipped if the harness is using the wire.
+class IOSurfaceUsageTests : public IOSurfaceTestBase {
+  public:
+    // Test that sampling a 1x1 works.
+    void DoSampleTest(IOSurfaceRef ioSurface,
+                      wgpu::TextureFormat format,
+                      void* data,
+                      size_t dataSize,
+                      RGBA8 expectedColor) {
+        // Write the data to the IOSurface
+        IOSurfaceLock(ioSurface, 0, nullptr);
+        memcpy(IOSurfaceGetBaseAddress(ioSurface), data, dataSize);
+        IOSurfaceUnlock(ioSurface, 0, nullptr);
+
+        // The simplest texture sampling pipeline.
+        wgpu::RenderPipeline pipeline;
+        {
+            wgpu::ShaderModule vs = utils::CreateShaderModule(device, R"(
+                struct VertexOut {
+                    @location(0) texCoord : vec2<f32>,
+                    @builtin(position) position : vec4<f32>,
+                }
+
+                @stage(vertex)
+                fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+                    var pos = array<vec2<f32>, 6>(
+                        vec2<f32>(-2.0, -2.0),
+                        vec2<f32>(-2.0,  2.0),
+                        vec2<f32>( 2.0, -2.0),
+                        vec2<f32>(-2.0,  2.0),
+                        vec2<f32>( 2.0, -2.0),
+                        vec2<f32>( 2.0,  2.0));
+
+                    var texCoord = array<vec2<f32>, 6>(
+                        vec2<f32>(0.0, 0.0),
+                        vec2<f32>(0.0, 1.0),
+                        vec2<f32>(1.0, 0.0),
+                        vec2<f32>(0.0, 1.0),
+                        vec2<f32>(1.0, 0.0),
+                        vec2<f32>(1.0, 1.0));
+
+                    var output : VertexOut;
+                    output.position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+                    output.texCoord = texCoord[VertexIndex];
+                    return output;
+                }
+            )");
+            wgpu::ShaderModule fs = utils::CreateShaderModule(device, R"(
+                @group(0) @binding(0) var sampler0 : sampler;
+                @group(0) @binding(1) var texture0 : texture_2d<f32>;
+
+                @stage(fragment)
+                fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+                    return textureSample(texture0, sampler0, texCoord);
+                }
+            )");
+
+            utils::ComboRenderPipelineDescriptor descriptor;
+            descriptor.vertex.module = vs;
+            descriptor.cFragment.module = fs;
+            descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+            pipeline = device.CreateRenderPipeline(&descriptor);
+        }
+
+        // The bindgroup containing the texture view for the ioSurface as well as the sampler.
+        wgpu::BindGroup bindGroup;
+        {
+            wgpu::TextureDescriptor textureDescriptor;
+            textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+            textureDescriptor.format = format;
+            textureDescriptor.size = {1, 1, 1};
+            textureDescriptor.sampleCount = 1;
+            textureDescriptor.mipLevelCount = 1;
+            textureDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+            wgpu::Texture wrappingTexture = WrapIOSurface(&textureDescriptor, ioSurface);
+
+            wgpu::TextureView textureView = wrappingTexture.CreateView();
+
+            wgpu::Sampler sampler = device.CreateSampler();
+
+            bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                             {{0, sampler}, {1, textureView}});
+        }
+
+        // Submit commands samping from the ioSurface and writing the result to renderPass.color
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(expectedColor, renderPass.color, 0, 0);
+    }
+
+    // Test that clearing using BeginRenderPass writes correct data in the ioSurface.
+    void DoClearTest(IOSurfaceRef ioSurface,
+                     wgpu::TextureFormat format,
+                     void* data,
+                     size_t dataSize) {
+        // Get a texture view for the ioSurface
+        wgpu::TextureDescriptor textureDescriptor;
+        textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+        textureDescriptor.format = format;
+        textureDescriptor.size = {1, 1, 1};
+        textureDescriptor.sampleCount = 1;
+        textureDescriptor.mipLevelCount = 1;
+        textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        wgpu::Texture ioSurfaceTexture = WrapIOSurface(&textureDescriptor, ioSurface);
+
+        wgpu::TextureView ioSurfaceView = ioSurfaceTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPassDescriptor({ioSurfaceView}, {});
+        renderPassDescriptor.cColorAttachments[0].clearValue = {1 / 255.0f, 2 / 255.0f, 3 / 255.0f,
+                                                                4 / 255.0f};
+
+        // Execute commands to clear the ioSurface
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Wait for the commands touching the IOSurface to be scheduled
+        dawn::native::metal::WaitForCommandsToBeScheduled(device.Get());
+
+        // Check the correct data was written
+        IOSurfaceLock(ioSurface, kIOSurfaceLockReadOnly, nullptr);
+        ASSERT_EQ(0, memcmp(IOSurfaceGetBaseAddress(ioSurface), data, dataSize));
+        IOSurfaceUnlock(ioSurface, kIOSurfaceLockReadOnly, nullptr);
+    }
+};
+
+// Test sampling from a R8 IOSurface
+TEST_P(IOSurfaceUsageTests, SampleFromR8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface =
+        CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_OneComponent8, 1);
+
+    uint8_t data = 0x01;
+    DoSampleTest(ioSurface.get(), wgpu::TextureFormat::R8Unorm, &data, sizeof(data),
+                 RGBA8(1, 0, 0, 255));
+}
+
+// Test clearing a R8 IOSurface
+TEST_P(IOSurfaceUsageTests, ClearR8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface =
+        CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_OneComponent8, 1);
+
+    uint8_t data = 0x01;
+    DoClearTest(ioSurface.get(), wgpu::TextureFormat::R8Unorm, &data, sizeof(data));
+}
+
+// Test sampling from a RG8 IOSurface
+TEST_P(IOSurfaceUsageTests, SampleFromRG8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface =
+        CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_TwoComponent8, 2);
+
+    uint16_t data = 0x0102;  // Stored as (G, R)
+    DoSampleTest(ioSurface.get(), wgpu::TextureFormat::RG8Unorm, &data, sizeof(data),
+                 RGBA8(2, 1, 0, 255));
+}
+
+// Test clearing a RG8 IOSurface
+TEST_P(IOSurfaceUsageTests, ClearRG8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface =
+        CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_TwoComponent8, 2);
+
+    uint16_t data = 0x0201;
+    DoClearTest(ioSurface.get(), wgpu::TextureFormat::RG8Unorm, &data, sizeof(data));
+}
+
+// Test sampling from a BGRA8 IOSurface
+TEST_P(IOSurfaceUsageTests, SampleFromBGRA8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface = CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_32BGRA, 4);
+
+    uint32_t data = 0x01020304;  // Stored as (A, R, G, B)
+    DoSampleTest(ioSurface.get(), wgpu::TextureFormat::BGRA8Unorm, &data, sizeof(data),
+                 RGBA8(2, 3, 4, 1));
+}
+
+// Test clearing a BGRA8 IOSurface
+TEST_P(IOSurfaceUsageTests, ClearBGRA8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface = CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_32BGRA, 4);
+
+    uint32_t data = 0x04010203;
+    DoClearTest(ioSurface.get(), wgpu::TextureFormat::BGRA8Unorm, &data, sizeof(data));
+}
+
+// Test sampling from an RGBA8 IOSurface
+TEST_P(IOSurfaceUsageTests, SampleFromRGBA8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface = CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_32RGBA, 4);
+
+    uint32_t data = 0x01020304;  // Stored as (A, B, G, R)
+    DoSampleTest(ioSurface.get(), wgpu::TextureFormat::RGBA8Unorm, &data, sizeof(data),
+                 RGBA8(4, 3, 2, 1));
+}
+
+// Test clearing an RGBA8 IOSurface
+TEST_P(IOSurfaceUsageTests, ClearRGBA8IOSurface) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedIOSurfaceRef ioSurface = CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_32RGBA, 4);
+
+    uint32_t data = 0x04030201;
+    DoClearTest(ioSurface.get(), wgpu::TextureFormat::RGBA8Unorm, &data, sizeof(data));
+}
+
+// Test that texture with color is cleared when isInitialized = false
+TEST_P(IOSurfaceUsageTests, UninitializedTextureIsCleared) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    ScopedIOSurfaceRef ioSurface = CreateSinglePlaneIOSurface(1, 1, kCVPixelFormatType_32RGBA, 4);
+    uint32_t data = 0x04030201;
+
+    IOSurfaceLock(ioSurface.get(), 0, nullptr);
+    memcpy(IOSurfaceGetBaseAddress(ioSurface.get()), &data, sizeof(data));
+    IOSurfaceUnlock(ioSurface.get(), 0, nullptr);
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+    textureDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDescriptor.size = {1, 1, 1};
+    textureDescriptor.sampleCount = 1;
+    textureDescriptor.mipLevelCount = 1;
+    textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+
+    // wrap ioSurface and ensure color is not visible when isInitialized set to false
+    wgpu::Texture ioSurfaceTexture = WrapIOSurface(&textureDescriptor, ioSurface.get(), false);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), ioSurfaceTexture, 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(IOSurfaceValidationTests, MetalBackend());
+DAWN_INSTANTIATE_TEST(IOSurfaceUsageTests, MetalBackend());
diff --git a/src/dawn/tests/end2end/IndexFormatTests.cpp b/src/dawn/tests/end2end/IndexFormatTests.cpp
new file mode 100644
index 0000000..b563c64
--- /dev/null
+++ b/src/dawn/tests/end2end/IndexFormatTests.cpp
@@ -0,0 +1,492 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 400;
+
+class IndexFormatTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    }
+
+    utils::BasicRenderPass renderPass;
+
+    wgpu::RenderPipeline MakeTestPipeline(
+        wgpu::IndexFormat format,
+        wgpu::PrimitiveTopology primitiveTopology = wgpu::PrimitiveTopology::TriangleStrip) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            struct VertexIn {
+                @location(0) pos : vec4<f32>,
+                @builtin(vertex_index) idx : u32,
+            }
+
+            @stage(vertex) fn main(input : VertexIn) -> @builtin(position) vec4<f32> {
+                // 0xFFFFFFFE is a designated invalid index used by some tests.
+                if (input.idx == 0xFFFFFFFEu) {
+                    return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                }
+                return input.pos;
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = primitiveTopology;
+        descriptor.primitive.stripIndexFormat = format;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+};
+
+// Test that the Uint32 index format is correctly interpreted
+TEST_P(IndexFormatTest, Uint32) {
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(wgpu::IndexFormat::Uint32);
+
+    wgpu::Buffer vertexBuffer = utils::CreateBufferFromData<float>(
+        device, wgpu::BufferUsage::Vertex,
+        {-1.0f, -1.0f, 0.0f, 1.0f,  // Note Vertices[0] = Vertices[1]
+         -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 1.0f, 0.0f, 1.0f});
+    // If this is interpreted as Uint16, then it would be 0, 1, 0, ... and would draw nothing.
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {1, 2, 3});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.DrawIndexed(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 100, 300);
+}
+
+// Test that the Uint16 index format is correctly interpreted
+TEST_P(IndexFormatTest, Uint16) {
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(wgpu::IndexFormat::Uint16);
+
+    wgpu::Buffer vertexBuffer = utils::CreateBufferFromData<float>(
+        device, wgpu::BufferUsage::Vertex,
+        {-1.0f, -1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 1.0f, 0.0f, 1.0f});
+    // If this is interpreted as uint32, it will have index 1 and 2 be both 0 and render nothing
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint16_t>(device, wgpu::BufferUsage::Index, {1, 2, 0, 0, 0, 0});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16);
+        pass.DrawIndexed(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 100, 300);
+}
+
+// Test that the index format used is the format of the last set pipeline. This is to
+// prevent a case in D3D12 where the index format would be captured from the last
+// pipeline on SetIndexBuffer.
+TEST_P(IndexFormatTest, ChangePipelineAfterSetIndexBuffer) {
+    wgpu::RenderPipeline pipeline32 = MakeTestPipeline(wgpu::IndexFormat::Uint32);
+    wgpu::RenderPipeline pipeline16 = MakeTestPipeline(wgpu::IndexFormat::Uint16);
+
+    wgpu::Buffer vertexBuffer = utils::CreateBufferFromData<float>(
+        device, wgpu::BufferUsage::Vertex,
+        {-1.0f, -1.0f, 0.0f, 1.0f,  // Note Vertices[0] = Vertices[1]
+         -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 1.0f, 0.0f, 1.0f});
+    // If this is interpreted as Uint16, then it would be 0, 1, 0, ... and would draw nothing.
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {1, 2, 3});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline16);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.SetPipeline(pipeline32);
+        pass.DrawIndexed(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 100, 300);
+}
+
+// Test that setting the index buffer before the pipeline works, this is important
+// for backends where the index format is passed inside the call to SetIndexBuffer
+// because it needs to be done lazily (to query the format from the last pipeline).
+TEST_P(IndexFormatTest, SetIndexBufferBeforeSetPipeline) {
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(wgpu::IndexFormat::Uint32);
+
+    wgpu::Buffer vertexBuffer = utils::CreateBufferFromData<float>(
+        device, wgpu::BufferUsage::Vertex,
+        {-1.0f, -1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 1.0f, 0.0f, 1.0f});
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.DrawIndexed(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), renderPass.color, 100, 300);
+}
+
+// Test that index buffers of multiple formats can be used with a pipeline that
+// doesn't use strip primitive topology.
+TEST_P(IndexFormatTest, SetIndexBufferDifferentFormats) {
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(wgpu::IndexFormat::Undefined, wgpu::PrimitiveTopology::TriangleList);
+
+    wgpu::Buffer vertexBuffer = utils::CreateBufferFromData<float>(
+        device, wgpu::BufferUsage::Vertex,
+        {-1.0f, -1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 1.0f, 0.0f, 1.0f});
+    wgpu::Buffer indexBuffer32 =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
+    wgpu::Buffer indexBuffer16 =
+        utils::CreateBufferFromData<uint16_t>(device, wgpu::BufferUsage::Index, {0, 1, 2, 0});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetIndexBuffer(indexBuffer32, wgpu::IndexFormat::Uint32);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.DrawIndexed(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), renderPass.color, 100, 300);
+
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetIndexBuffer(indexBuffer16, wgpu::IndexFormat::Uint16);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.DrawIndexed(3);
+        pass.End();
+    }
+
+    commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), renderPass.color, 100, 300);
+}
+
+// Tests for primitive restart use vertices like in the drawing and draw the following
+// indices: 0 1 2 PRIM_RESTART 3 4 5. Then A and B should be written but not C.
+//      |--------------|
+//      |      0---1   |
+//      |       \ B|   |
+//      |         \|   |
+//      |  3   C   2   |
+//      |  |\          |
+//      |  |A \        |
+//      |  4---5       |
+//      |--------------|
+
+class TriangleStripPrimitiveRestartTests : public IndexFormatTest {
+  protected:
+    wgpu::Buffer mVertexBuffer;
+
+    void SetUp() override {
+        IndexFormatTest::SetUp();
+        mVertexBuffer = utils::CreateBufferFromData<float>(device, wgpu::BufferUsage::Vertex,
+                                                           {
+                                                               0.0f,  1.0f,  0.0f, 1.0f,  // 0
+                                                               1.0f,  1.0f,  0.0f, 1.0f,  // 1
+                                                               1.0f,  0.0f,  0.0f, 1.0f,  // 2
+                                                               -1.0f, 0.0f,  0.0f, 1.0f,  // 3
+                                                               -1.0f, -1.0f, 0.0f, 1.0f,  // 4
+                                                               0.0f,  -1.0f, 0.0f, 1.0f,  // 5
+                                                           });
+    }
+};
+
+// Test use of primitive restart with an Uint32 index format
+TEST_P(TriangleStripPrimitiveRestartTests, Uint32PrimitiveRestart) {
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(wgpu::IndexFormat::Uint32);
+
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index,
+                                              {
+                                                  0,
+                                                  1,
+                                                  2,
+                                                  0xFFFFFFFFu,
+                                                  3,
+                                                  4,
+                                                  5,
+                                              });
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, mVertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.DrawIndexed(7);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 50, 350);  // A
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 350, 50);  // B
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 198, 200);  // C
+}
+
+// Same as the above test, but uses an OOB index to emulate primitive restart being disabled,
+// causing point C to be written to.
+TEST_P(TriangleStripPrimitiveRestartTests, Uint32WithoutPrimitiveRestart) {
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(wgpu::IndexFormat::Uint32);
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index,
+                                              {
+                                                  0,
+                                                  1,
+                                                  2,
+                                                  // Not a valid index.
+                                                  0xFFFFFFFEu,
+                                                  3,
+                                                  4,
+                                                  5,
+                                              });
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, mVertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.DrawIndexed(7);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 50, 350);   // A
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 350, 50);   // B
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 198, 200);  // C
+}
+
+// Test use of primitive restart with an Uint16 index format
+TEST_P(TriangleStripPrimitiveRestartTests, Uint16PrimitiveRestart) {
+    wgpu::RenderPipeline pipeline = MakeTestPipeline(wgpu::IndexFormat::Uint16);
+
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint16_t>(device, wgpu::BufferUsage::Index,
+                                              {
+                                                  0,
+                                                  1,
+                                                  2,
+                                                  0xFFFFu,
+                                                  3,
+                                                  4,
+                                                  5,
+                                                  // This value is for padding.
+                                                  0xFFFFu,
+                                              });
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, mVertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16);
+        pass.DrawIndexed(7);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 50, 350);  // A
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 350, 50);  // B
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 198, 200);  // C
+}
+
+// Tests for primitive restart use vertices like in the drawing and draw the following
+// indices: 0 1 PRIM_RESTART 2 3. Then 1 and 2 should be written but not A.
+//      |--------------|
+//      |      3      0|
+//      |      |      ||
+//      |      |      ||
+//      |      2  A   1|
+//      |              |
+//      |              |
+//      |              |
+//      |--------------|
+
+class LineStripPrimitiveRestartTests : public IndexFormatTest {
+  protected:
+  protected:
+    wgpu::Buffer mVertexBuffer;
+
+    void SetUp() override {
+        IndexFormatTest::SetUp();
+        mVertexBuffer = utils::CreateBufferFromData<float>(device, wgpu::BufferUsage::Vertex,
+                                                           {
+                                                               1.0f, 1.0f, 0.0f, 1.0f,  // 0
+                                                               1.0f, 0.0f, 0.0f, 1.0f,  // 1
+                                                               0.0f, 0.0f, 0.0f, 1.0f,  // 2
+                                                               0.0f, 1.0f, 0.0f, 1.0f   // 3
+                                                           });
+    }
+};
+
+TEST_P(LineStripPrimitiveRestartTests, Uint32PrimitiveRestart) {
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(wgpu::IndexFormat::Uint32, wgpu::PrimitiveTopology::LineStrip);
+
+    wgpu::Buffer indexBuffer = utils::CreateBufferFromData<uint32_t>(
+        device, wgpu::BufferUsage::Index, {0, 1, 0xFFFFFFFFu, 2, 3});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, mVertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.DrawIndexed(5);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 399, 199);  // 1
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 199, 199);  // 2
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 300, 199);   // A
+}
+
+// Same as the above test, but uses an OOB index to emulate primitive restart being disabled,
+// causing point A to be written to.
+TEST_P(LineStripPrimitiveRestartTests, Uint32WithoutPrimitiveRestart) {
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(wgpu::IndexFormat::Uint32, wgpu::PrimitiveTopology::LineStrip);
+
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index,
+                                              {0, 1,  // Not a valid index
+                                               0xFFFFFFFEu, 2, 3});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, mVertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.DrawIndexed(5);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 399, 199);  // 1
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 199, 199);  // 2
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 300, 199);  // A
+}
+
+TEST_P(LineStripPrimitiveRestartTests, Uint16PrimitiveRestart) {
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(wgpu::IndexFormat::Uint16, wgpu::PrimitiveTopology::LineStrip);
+
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint16_t>(device, wgpu::BufferUsage::Index,
+                                              {0, 1, 0xFFFFu, 2, 3,  // This value is for padding.
+                                               0xFFFFu});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, mVertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16);
+        pass.DrawIndexed(5);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 399, 199);  // 1
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 199, 199);  // 2
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 300, 199);   // A
+}
+
+DAWN_INSTANTIATE_TEST(IndexFormatTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+DAWN_INSTANTIATE_TEST(TriangleStripPrimitiveRestartTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+DAWN_INSTANTIATE_TEST(LineStripPrimitiveRestartTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/MaxLimitTests.cpp b/src/dawn/tests/end2end/MaxLimitTests.cpp
new file mode 100644
index 0000000..2d43a75
--- /dev/null
+++ b/src/dawn/tests/end2end/MaxLimitTests.cpp
@@ -0,0 +1,244 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/common/Platform.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class MaxLimitTests : public DawnTest {
+  public:
+    wgpu::RequiredLimits GetRequiredLimits(const wgpu::SupportedLimits& supported) override {
+        wgpu::RequiredLimits required = {};
+        required.limits = supported.limits;
+        return required;
+    }
+};
+
+// Test using the maximum amount of workgroup memory works
+TEST_P(MaxLimitTests, MaxComputeWorkgroupStorageSize) {
+    uint32_t maxComputeWorkgroupStorageSize =
+        GetSupportedLimits().limits.maxComputeWorkgroupStorageSize;
+
+    std::string shader = R"(
+        struct Dst {
+            value0 : u32,
+            value1 : u32,
+        }
+
+        @group(0) @binding(0) var<storage, write> dst : Dst;
+
+        struct WGData {
+          value0 : u32,
+          // padding such that value0 and value1 are the first and last bytes of the memory.
+          @size()" + std::to_string(maxComputeWorkgroupStorageSize / 4 - 2) +
+                         R"() padding : u32,
+          value1 : u32,
+        }
+        var<workgroup> wg_data : WGData;
+
+        @stage(compute) @workgroup_size(2,1,1)
+        fn main(@builtin(local_invocation_index) LocalInvocationIndex : u32) {
+            if (LocalInvocationIndex == 0u) {
+                // Put data into the first and last byte of workgroup memory.
+                wg_data.value0 = 79u;
+                wg_data.value1 = 42u;
+            }
+
+            workgroupBarrier();
+
+            if (LocalInvocationIndex == 1u) {
+                // Read data out of workgroup memory into a storage buffer.
+                dst.value0 = wg_data.value0;
+                dst.value1 = wg_data.value1;
+            }
+        }
+    )";
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, shader.c_str());
+    csDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+    // Set up dst storage buffer
+    wgpu::BufferDescriptor dstDesc;
+    dstDesc.size = 8;
+    dstDesc.usage =
+        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dst = device.CreateBuffer(&dstDesc);
+
+    // Set up bind group and issue dispatch
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {
+                                                         {0, dst},
+                                                     });
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Dispatch(1);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_EQ(79, dst, 0);
+    EXPECT_BUFFER_U32_EQ(42, dst, 4);
+}
+
+// Test using the maximum uniform/storage buffer binding size works
+TEST_P(MaxLimitTests, MaxBufferBindingSize) {
+    // The uniform buffer layout used in this test is not supported on ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // TODO(crbug.com/dawn/1172)
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsIntel());
+
+    // TODO(crbug.com/dawn/1217): Remove this suppression.
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsNvidia());
+
+    for (wgpu::BufferUsage usage : {wgpu::BufferUsage::Storage, wgpu::BufferUsage::Uniform}) {
+        uint64_t maxBufferBindingSize;
+        std::string shader;
+        switch (usage) {
+            case wgpu::BufferUsage::Storage:
+                maxBufferBindingSize = GetSupportedLimits().limits.maxStorageBufferBindingSize;
+                // TODO(crbug.com/dawn/1160): Usually can't actually allocate a buffer this large
+                // because allocating the buffer for zero-initialization fails.
+                maxBufferBindingSize =
+                    std::min(maxBufferBindingSize, uint64_t(2) * 1024 * 1024 * 1024);
+                // With WARP or on 32-bit platforms, such large buffer allocations often fail.
+#ifdef DAWN_PLATFORM_32_BIT
+                if (IsWindows()) {
+                    continue;
+                }
+#endif
+                if (IsWARP()) {
+                    maxBufferBindingSize =
+                        std::min(maxBufferBindingSize, uint64_t(512) * 1024 * 1024);
+                }
+                shader = R"(
+                  struct Buf {
+                      values : array<u32>
+                  }
+
+                  struct Result {
+                      value0 : u32,
+                      value1 : u32,
+                  }
+
+                  @group(0) @binding(0) var<storage, read> buf : Buf;
+                  @group(0) @binding(1) var<storage, write> result : Result;
+
+                  @stage(compute) @workgroup_size(1,1,1)
+                  fn main() {
+                      result.value0 = buf.values[0];
+                      result.value1 = buf.values[arrayLength(&buf.values) - 1u];
+                  }
+              )";
+                break;
+            case wgpu::BufferUsage::Uniform:
+                maxBufferBindingSize = GetSupportedLimits().limits.maxUniformBufferBindingSize;
+
+                // Clamp to not exceed the maximum i32 value for the WGSL @size(x) annotation.
+                maxBufferBindingSize = std::min(maxBufferBindingSize,
+                                                uint64_t(std::numeric_limits<int32_t>::max()) + 8);
+
+                shader = R"(
+                  struct Buf {
+                      value0 : u32,
+                      // padding such that value0 and value1 are the first and last bytes of the memory.
+                      @size()" +
+                         std::to_string(maxBufferBindingSize - 8) + R"() padding : u32,
+                      value1 : u32,
+                  }
+
+                  struct Result {
+                      value0 : u32,
+                      value1 : u32,
+                  }
+
+                  @group(0) @binding(0) var<uniform> buf : Buf;
+                  @group(0) @binding(1) var<storage, write> result : Result;
+
+                  @stage(compute) @workgroup_size(1,1,1)
+                  fn main() {
+                      result.value0 = buf.value0;
+                      result.value1 = buf.value1;
+                  }
+              )";
+                break;
+            default:
+                UNREACHABLE();
+        }
+
+        device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
+
+        wgpu::BufferDescriptor bufDesc;
+        bufDesc.size = Align(maxBufferBindingSize, 4);
+        bufDesc.usage = usage | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer buffer = device.CreateBuffer(&bufDesc);
+
+        WGPUErrorType oomResult;
+        device.PopErrorScope([](WGPUErrorType type, const char*,
+                                void* userdata) { *static_cast<WGPUErrorType*>(userdata) = type; },
+                             &oomResult);
+        FlushWire();
+        // Max buffer size is smaller than the max buffer binding size.
+        DAWN_TEST_UNSUPPORTED_IF(oomResult == WGPUErrorType_OutOfMemory);
+
+        wgpu::BufferDescriptor resultBufDesc;
+        resultBufDesc.size = 8;
+        resultBufDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+        wgpu::Buffer resultBuffer = device.CreateBuffer(&resultBufDesc);
+
+        uint32_t value0 = 89234;
+        queue.WriteBuffer(buffer, 0, &value0, sizeof(value0));
+
+        uint32_t value1 = 234;
+        uint64_t value1Offset = Align(maxBufferBindingSize - sizeof(value1), 4);
+        queue.WriteBuffer(buffer, value1Offset, &value1, sizeof(value1));
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.compute.module = utils::CreateShaderModule(device, shader.c_str());
+        csDesc.compute.entryPoint = "main";
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                         {{0, buffer}, {1, resultBuffer}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER_U32_EQ(value0, resultBuffer, 0)
+            << "maxBufferBindingSize=" << maxBufferBindingSize << "; offset=" << 0
+            << "; usage=" << usage;
+        EXPECT_BUFFER_U32_EQ(value1, resultBuffer, 4)
+            << "maxBufferBindingSize=" << maxBufferBindingSize << "; offset=" << value1Offset
+            << "; usage=" << usage;
+    }
+}
+
+DAWN_INSTANTIATE_TEST(MaxLimitTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/MemoryAllocationStressTests.cpp b/src/dawn/tests/end2end/MemoryAllocationStressTests.cpp
new file mode 100644
index 0000000..59fe068
--- /dev/null
+++ b/src/dawn/tests/end2end/MemoryAllocationStressTests.cpp
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+class MemoryAllocationStressTests : public DawnTest {};
+
+// Test memory allocation is freed correctly when creating and destroying large buffers.
+// It will consume a total of 100G of memory, 1G each time. Expect not to trigger out of memory on
+// devices with gpu memory less than 100G.
+TEST_P(MemoryAllocationStressTests, LargeBuffer) {
+    // TODO(crbug.com/dawn/957): Memory leak on D3D12, the memory of destroyed buffer cannot be
+    // released.
+    DAWN_TEST_UNSUPPORTED_IF(IsD3D12());
+
+    // TODO(crbug.com/dawn/957): Check whether it can be reproduced on each backend.
+    DAWN_TEST_UNSUPPORTED_IF(IsMetal() || IsOpenGL() || IsOpenGLES() || IsVulkan());
+
+    uint32_t count = 100;
+    for (uint32_t i = 0; i < count; i++) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 1024 * 1024 * 1024;  // 1G
+        descriptor.usage = wgpu::BufferUsage::Storage;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+        buffer.Destroy();
+    }
+}
+
+DAWN_INSTANTIATE_TEST(MemoryAllocationStressTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/MultisampledRenderingTests.cpp b/src/dawn/tests/end2end/MultisampledRenderingTests.cpp
new file mode 100644
index 0000000..bc6bb40
--- /dev/null
+++ b/src/dawn/tests/end2end/MultisampledRenderingTests.cpp
@@ -0,0 +1,1115 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class MultisampledRenderingTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // TODO(crbug.com/dawn/738): Test output is wrong with D3D12 + WARP.
+        DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+        InitTexturesForTest();
+    }
+
+    void InitTexturesForTest() {
+        mMultisampledColorTexture = CreateTextureForRenderAttachment(kColorFormat, kSampleCount);
+        mMultisampledColorView = mMultisampledColorTexture.CreateView();
+        mResolveTexture = CreateTextureForRenderAttachment(kColorFormat, 1);
+        mResolveView = mResolveTexture.CreateView();
+
+        mDepthStencilTexture = CreateTextureForRenderAttachment(kDepthStencilFormat, kSampleCount);
+        mDepthStencilView = mDepthStencilTexture.CreateView();
+    }
+
+    wgpu::RenderPipeline CreateRenderPipelineWithOneOutputForTest(
+        bool testDepth,
+        uint32_t sampleMask = 0xFFFFFFFF,
+        bool alphaToCoverageEnabled = false,
+        bool flipTriangle = false) {
+        const char* kFsOneOutputWithDepth = R"(
+            struct U {
+                color : vec4<f32>,
+                depth : f32,
+            }
+            @group(0) @binding(0) var<uniform> uBuffer : U;
+
+            struct FragmentOut {
+                @location(0) color : vec4<f32>,
+                @builtin(frag_depth) depth : f32,
+            }
+
+            @stage(fragment) fn main() -> FragmentOut {
+                var output : FragmentOut;
+                output.color = uBuffer.color;
+                output.depth = uBuffer.depth;
+                return output;
+            })";
+
+        const char* kFsOneOutputWithoutDepth = R"(
+            struct U {
+                color : vec4<f32>
+            }
+            @group(0) @binding(0) var<uniform> uBuffer : U;
+
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return uBuffer.color;
+            })";
+
+        const char* fs = testDepth ? kFsOneOutputWithDepth : kFsOneOutputWithoutDepth;
+
+        return CreateRenderPipelineForTest(fs, 1, testDepth, sampleMask, alphaToCoverageEnabled,
+                                           flipTriangle);
+    }
+
+    wgpu::RenderPipeline CreateRenderPipelineWithTwoOutputsForTest(
+        uint32_t sampleMask = 0xFFFFFFFF,
+        bool alphaToCoverageEnabled = false) {
+        const char* kFsTwoOutputs = R"(
+            struct U {
+                color0 : vec4<f32>,
+                color1 : vec4<f32>,
+            }
+            @group(0) @binding(0) var<uniform> uBuffer : U;
+
+            struct FragmentOut {
+                @location(0) color0 : vec4<f32>,
+                @location(1) color1 : vec4<f32>,
+            }
+
+            @stage(fragment) fn main() -> FragmentOut {
+                var output : FragmentOut;
+                output.color0 = uBuffer.color0;
+                output.color1 = uBuffer.color1;
+                return output;
+            })";
+
+        return CreateRenderPipelineForTest(kFsTwoOutputs, 2, false, sampleMask,
+                                           alphaToCoverageEnabled);
+    }
+
+    wgpu::Texture CreateTextureForRenderAttachment(wgpu::TextureFormat format,
+                                                   uint32_t sampleCount,
+                                                   uint32_t mipLevelCount = 1,
+                                                   uint32_t arrayLayerCount = 1) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = kWidth << (mipLevelCount - 1);
+        descriptor.size.height = kHeight << (mipLevelCount - 1);
+        descriptor.size.depthOrArrayLayers = arrayLayerCount;
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        return device.CreateTexture(&descriptor);
+    }
+
+    void EncodeRenderPassForTest(wgpu::CommandEncoder commandEncoder,
+                                 const wgpu::RenderPassDescriptor& renderPass,
+                                 const wgpu::RenderPipeline& pipeline,
+                                 const float* uniformData,
+                                 uint32_t uniformDataSize) {
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, uniformData, uniformDataSize, wgpu::BufferUsage::Uniform);
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                         {{0, uniformBuffer, 0, uniformDataSize}});
+
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+        renderPassEncoder.SetPipeline(pipeline);
+        renderPassEncoder.SetBindGroup(0, bindGroup);
+        renderPassEncoder.Draw(3);
+        renderPassEncoder.End();
+    }
+
+    void EncodeRenderPassForTest(wgpu::CommandEncoder commandEncoder,
+                                 const wgpu::RenderPassDescriptor& renderPass,
+                                 const wgpu::RenderPipeline& pipeline,
+                                 const wgpu::Color& color) {
+        const float uniformData[4] = {static_cast<float>(color.r), static_cast<float>(color.g),
+                                      static_cast<float>(color.b), static_cast<float>(color.a)};
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, uniformData,
+                                sizeof(float) * 4);
+    }
+
+    utils::ComboRenderPassDescriptor CreateComboRenderPassDescriptorForTest(
+        std::initializer_list<wgpu::TextureView> colorViews,
+        std::initializer_list<wgpu::TextureView> resolveTargetViews,
+        wgpu::LoadOp colorLoadOp,
+        wgpu::LoadOp depthStencilLoadOp,
+        bool hasDepthStencilAttachment) {
+        ASSERT(colorViews.size() == resolveTargetViews.size());
+
+        constexpr wgpu::Color kClearColor = {0.0f, 0.0f, 0.0f, 0.0f};
+        constexpr float kClearDepth = 1.0f;
+
+        utils::ComboRenderPassDescriptor renderPass(colorViews);
+        uint32_t i = 0;
+        for (const wgpu::TextureView& resolveTargetView : resolveTargetViews) {
+            renderPass.cColorAttachments[i].loadOp = colorLoadOp;
+            renderPass.cColorAttachments[i].clearValue = kClearColor;
+            renderPass.cColorAttachments[i].resolveTarget = resolveTargetView;
+            ++i;
+        }
+
+        renderPass.cDepthStencilAttachmentInfo.depthClearValue = kClearDepth;
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = depthStencilLoadOp;
+
+        if (hasDepthStencilAttachment) {
+            renderPass.cDepthStencilAttachmentInfo.view = mDepthStencilView;
+            renderPass.depthStencilAttachment = &renderPass.cDepthStencilAttachmentInfo;
+        }
+
+        return renderPass;
+    }
+
+    void VerifyResolveTarget(const wgpu::Color& inputColor,
+                             wgpu::Texture resolveTexture,
+                             uint32_t mipmapLevel = 0,
+                             uint32_t arrayLayer = 0,
+                             const float msaaCoverage = 0.5f) {
+        // In this test we only check the pixel in the middle of the texture.
+        constexpr uint32_t kMiddleX = (kWidth - 1) / 2;
+        constexpr uint32_t kMiddleY = (kHeight - 1) / 2;
+
+        RGBA8 expectedColor = ExpectedMSAAColor(inputColor, msaaCoverage);
+        EXPECT_TEXTURE_EQ(&expectedColor, resolveTexture, {kMiddleX, kMiddleY, arrayLayer}, {1, 1},
+                          mipmapLevel);
+    }
+
+    constexpr static uint32_t kWidth = 3;
+    constexpr static uint32_t kHeight = 3;
+    constexpr static uint32_t kSampleCount = 4;
+    constexpr static wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr static wgpu::TextureFormat kDepthStencilFormat =
+        wgpu::TextureFormat::Depth24PlusStencil8;
+
+    constexpr static uint32_t kFirstSampleMaskBit = 0x00000001;
+    constexpr static uint32_t kSecondSampleMaskBit = 0x00000002;
+    constexpr static uint32_t kThirdSampleMaskBit = 0x00000004;
+    constexpr static uint32_t kFourthSampleMaskBit = 0x00000008;
+
+    wgpu::Texture mMultisampledColorTexture;
+    wgpu::TextureView mMultisampledColorView;
+    wgpu::Texture mResolveTexture;
+    wgpu::TextureView mResolveView;
+    wgpu::Texture mDepthStencilTexture;
+    wgpu::TextureView mDepthStencilView;
+
+    wgpu::RenderPipeline CreateRenderPipelineForTest(const char* fs,
+                                                     uint32_t numColorAttachments,
+                                                     bool hasDepthStencilAttachment,
+                                                     uint32_t sampleMask = 0xFFFFFFFF,
+                                                     bool alphaToCoverageEnabled = false,
+                                                     bool flipTriangle = false) {
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+        // Draw a bottom-right triangle. In standard 4xMSAA pattern, for the pixels on diagonal,
+        // only two of the samples will be touched.
+        const char* vs = R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>( 1.0, -1.0)
+                );
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })";
+
+        // Draw a bottom-left triangle.
+        const char* vsFlipped = R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0)
+                );
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })";
+
+        if (flipTriangle) {
+            pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, vsFlipped);
+        } else {
+            pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, vs);
+        }
+
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, fs);
+
+        if (hasDepthStencilAttachment) {
+            wgpu::DepthStencilState* depthStencil =
+                pipelineDescriptor.EnableDepthStencil(kDepthStencilFormat);
+            depthStencil->depthWriteEnabled = true;
+            depthStencil->depthCompare = wgpu::CompareFunction::Less;
+        }
+
+        pipelineDescriptor.multisample.count = kSampleCount;
+        pipelineDescriptor.multisample.mask = sampleMask;
+        pipelineDescriptor.multisample.alphaToCoverageEnabled = alphaToCoverageEnabled;
+
+        pipelineDescriptor.cFragment.targetCount = numColorAttachments;
+        for (uint32_t i = 0; i < numColorAttachments; ++i) {
+            pipelineDescriptor.cTargets[i].format = kColorFormat;
+        }
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+        return pipeline;
+    }
+
+    RGBA8 ExpectedMSAAColor(const wgpu::Color color, const double msaaCoverage) {
+        RGBA8 result;
+        result.r = static_cast<uint8_t>(std::min(255.0, 256 * color.r * msaaCoverage));
+        result.g = static_cast<uint8_t>(std::min(255.0, 256 * color.g * msaaCoverage));
+        result.b = static_cast<uint8_t>(std::min(255.0, 256 * color.b * msaaCoverage));
+        result.a = static_cast<uint8_t>(std::min(255.0, 256 * color.a * msaaCoverage));
+        return result;
+    }
+};
+
+// Test using one multisampled color attachment with resolve target can render correctly.
+TEST_P(MultisampledRenderingTest, ResolveInto2DTexture) {
+    constexpr bool kTestDepth = false;
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(kTestDepth);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+
+    // storeOp should not affect the result in the resolve target.
+    for (wgpu::StoreOp storeOp : {wgpu::StoreOp::Store, wgpu::StoreOp::Discard}) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+        // Draw a green triangle.
+        {
+            utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+                {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+                kTestDepth);
+            renderPass.cColorAttachments[0].storeOp = storeOp;
+            std::array<float, 4> kUniformData = {kGreen.r, kGreen.g, kGreen.b, kGreen.a};
+            constexpr uint32_t kSize = sizeof(kUniformData);
+            EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(),
+                                    kSize);
+        }
+
+        wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        VerifyResolveTarget(kGreen, mResolveTexture);
+    }
+}
+
+// Test multisampled rendering with depth test works correctly.
+TEST_P(MultisampledRenderingTest, MultisampledRenderingWithDepthTest) {
+    constexpr bool kTestDepth = true;
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(kTestDepth);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+    constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.8f};
+
+    // In first render pass we draw a green triangle with depth value == 0.2f.
+    {
+        utils::ComboRenderPassDescriptor renderPass =
+            CreateComboRenderPassDescriptorForTest({mMultisampledColorView}, {mResolveView},
+                                                   wgpu::LoadOp::Clear, wgpu::LoadOp::Clear, true);
+        std::array<float, 5> kUniformData = {kGreen.r, kGreen.g, kGreen.b, kGreen.a,  // Color
+                                             0.2f};                                   // depth
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(), kSize);
+    }
+
+    // In second render pass we draw a red triangle with depth value == 0.5f.
+    // This red triangle should not be displayed because it is behind the green one that is drawn in
+    // the last render pass.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Load, wgpu::LoadOp::Load,
+            kTestDepth);
+
+        std::array<float, 5> kUniformData = {kRed.r, kRed.g, kRed.b, kRed.a,  // color
+                                             0.5f};                           // depth
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(), kSize);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    // The color of the pixel in the middle of mResolveTexture should be green if MSAA resolve runs
+    // correctly with depth test.
+    VerifyResolveTarget(kGreen, mResolveTexture);
+}
+
+// Test rendering into a multisampled color attachment and doing MSAA resolve in another render pass
+// works correctly.
+TEST_P(MultisampledRenderingTest, ResolveInAnotherRenderPass) {
+    constexpr bool kTestDepth = false;
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(kTestDepth);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+
+    // In first render pass we draw a green triangle and do not set the resolve target.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {nullptr}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+            kTestDepth);
+
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+    }
+
+    // In second render pass we ony do MSAA resolve with no draw call.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Load, wgpu::LoadOp::Load,
+            kTestDepth);
+
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+        renderPassEncoder.End();
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kGreen, mResolveTexture);
+}
+
+// Test doing MSAA resolve into multiple resolve targets works correctly.
+TEST_P(MultisampledRenderingTest, ResolveIntoMultipleResolveTargets) {
+    // TODO(dawn:462): Issue in the D3D12 validation layers.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia() && IsBackendValidationEnabled());
+
+    wgpu::TextureView multisampledColorView2 =
+        CreateTextureForRenderAttachment(kColorFormat, kSampleCount).CreateView();
+    wgpu::Texture resolveTexture2 = CreateTextureForRenderAttachment(kColorFormat, 1);
+    wgpu::TextureView resolveView2 = resolveTexture2.CreateView();
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineWithTwoOutputsForTest();
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+    constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.8f};
+    constexpr bool kTestDepth = false;
+
+    // Draw a red triangle to the first color attachment, and a blue triangle to the second color
+    // attachment, and do MSAA resolve on two render targets in one render pass.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView, multisampledColorView2}, {mResolveView, resolveView2},
+            wgpu::LoadOp::Clear, wgpu::LoadOp::Clear, kTestDepth);
+
+        std::array<float, 8> kUniformData = {
+            static_cast<float>(kRed.r),   static_cast<float>(kRed.g),
+            static_cast<float>(kRed.b),   static_cast<float>(kRed.a),
+            static_cast<float>(kGreen.r), static_cast<float>(kGreen.g),
+            static_cast<float>(kGreen.b), static_cast<float>(kGreen.a)};
+        constexpr uint32_t kSize = sizeof(kUniformData);
+
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(), kSize);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kRed, mResolveTexture);
+    VerifyResolveTarget(kGreen, resolveTexture2);
+}
+
+// Test doing MSAA resolve on one multisampled texture twice works correctly.
+TEST_P(MultisampledRenderingTest, ResolveOneMultisampledTextureTwice) {
+    constexpr bool kTestDepth = false;
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(kTestDepth);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+
+    wgpu::Texture resolveTexture2 = CreateTextureForRenderAttachment(kColorFormat, 1);
+
+    // In first render pass we draw a green triangle and specify mResolveView as the resolve target.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+            kTestDepth);
+
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+    }
+
+    // In second render pass we do MSAA resolve into resolveTexture2.
+    {
+        wgpu::TextureView resolveView2 = resolveTexture2.CreateView();
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {resolveView2}, wgpu::LoadOp::Load, wgpu::LoadOp::Load,
+            kTestDepth);
+
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+        renderPassEncoder.End();
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kGreen, mResolveTexture);
+    VerifyResolveTarget(kGreen, resolveTexture2);
+}
+
+// Test using a layer of a 2D texture as resolve target works correctly.
+TEST_P(MultisampledRenderingTest, ResolveIntoOneMipmapLevelOf2DTexture) {
+    // TODO(dawn:462): Issue in the D3D12 validation layers.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsBackendValidationEnabled());
+
+    constexpr uint32_t kBaseMipLevel = 2;
+
+    wgpu::TextureViewDescriptor textureViewDescriptor;
+    textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+    textureViewDescriptor.format = kColorFormat;
+    textureViewDescriptor.baseArrayLayer = 0;
+    textureViewDescriptor.arrayLayerCount = 1;
+    textureViewDescriptor.mipLevelCount = 1;
+    textureViewDescriptor.baseMipLevel = kBaseMipLevel;
+
+    wgpu::Texture resolveTexture =
+        CreateTextureForRenderAttachment(kColorFormat, 1, kBaseMipLevel + 1, 1);
+    wgpu::TextureView resolveView = resolveTexture.CreateView(&textureViewDescriptor);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+    constexpr bool kTestDepth = false;
+
+    // Draw a green triangle and do MSAA resolve.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {resolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+            kTestDepth);
+        wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(kTestDepth);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kGreen, resolveTexture, kBaseMipLevel, 0);
+}
+
+// Test using a level or a layer of a 2D array texture as resolve target works correctly.
+TEST_P(MultisampledRenderingTest, ResolveInto2DArrayTexture) {
+    // TODO(dawn:462): Issue in the D3D12 validation layers.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsBackendValidationEnabled());
+
+    wgpu::TextureView multisampledColorView2 =
+        CreateTextureForRenderAttachment(kColorFormat, kSampleCount).CreateView();
+
+    wgpu::TextureViewDescriptor baseTextureViewDescriptor;
+    baseTextureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+    baseTextureViewDescriptor.format = kColorFormat;
+    baseTextureViewDescriptor.arrayLayerCount = 1;
+    baseTextureViewDescriptor.mipLevelCount = 1;
+
+    // Create resolveTexture1 with only 1 mipmap level.
+    constexpr uint32_t kBaseArrayLayer1 = 2;
+    constexpr uint32_t kBaseMipLevel1 = 0;
+    wgpu::Texture resolveTexture1 =
+        CreateTextureForRenderAttachment(kColorFormat, 1, kBaseMipLevel1 + 1, kBaseArrayLayer1 + 1);
+    wgpu::TextureViewDescriptor resolveViewDescriptor1 = baseTextureViewDescriptor;
+    resolveViewDescriptor1.baseArrayLayer = kBaseArrayLayer1;
+    resolveViewDescriptor1.baseMipLevel = kBaseMipLevel1;
+    wgpu::TextureView resolveView1 = resolveTexture1.CreateView(&resolveViewDescriptor1);
+
+    // Create resolveTexture2 with (kBaseMipLevel2 + 1) mipmap levels and resolve into its last
+    // mipmap level.
+    constexpr uint32_t kBaseArrayLayer2 = 5;
+    constexpr uint32_t kBaseMipLevel2 = 3;
+    wgpu::Texture resolveTexture2 =
+        CreateTextureForRenderAttachment(kColorFormat, 1, kBaseMipLevel2 + 1, kBaseArrayLayer2 + 1);
+    wgpu::TextureViewDescriptor resolveViewDescriptor2 = baseTextureViewDescriptor;
+    resolveViewDescriptor2.baseArrayLayer = kBaseArrayLayer2;
+    resolveViewDescriptor2.baseMipLevel = kBaseMipLevel2;
+    wgpu::TextureView resolveView2 = resolveTexture2.CreateView(&resolveViewDescriptor2);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineWithTwoOutputsForTest();
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+    constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.8f};
+    constexpr bool kTestDepth = false;
+
+    // Draw a red triangle to the first color attachment, and a green triangle to the second color
+    // attachment, and do MSAA resolve on two render targets in one render pass.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView, multisampledColorView2}, {resolveView1, resolveView2},
+            wgpu::LoadOp::Clear, wgpu::LoadOp::Clear, kTestDepth);
+
+        std::array<float, 8> kUniformData = {kRed.r,   kRed.g,   kRed.b,   kRed.a,     // color1
+                                             kGreen.r, kGreen.g, kGreen.b, kGreen.a};  // color2
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(), kSize);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kRed, resolveTexture1, kBaseMipLevel1, kBaseArrayLayer1);
+    VerifyResolveTarget(kGreen, resolveTexture2, kBaseMipLevel2, kBaseArrayLayer2);
+}
+
+// Test using one multisampled color attachment with resolve target can render correctly
+// with a non-default sample mask.
+TEST_P(MultisampledRenderingTest, ResolveInto2DTextureWithSampleMask) {
+    constexpr bool kTestDepth = false;
+    // The second and third samples are included,
+    // only the second one is covered by the triangle.
+    constexpr uint32_t kSampleMask = kSecondSampleMaskBit | kThirdSampleMaskBit;
+    constexpr float kMSAACoverage = 0.25f;
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline =
+        CreateRenderPipelineWithOneOutputForTest(kTestDepth, kSampleMask);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+
+    // Draw a green triangle.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+            kTestDepth);
+
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kGreen, mResolveTexture, 0, 0, kMSAACoverage);
+}
+
+// Test using one multisampled color attachment with resolve target can render correctly
+// with the final sample mask empty.
+TEST_P(MultisampledRenderingTest, ResolveInto2DTextureWithEmptyFinalSampleMask) {
+    constexpr bool kTestDepth = false;
+    // The third and fourth samples are included,
+    // none of which is covered by the triangle.
+    constexpr uint32_t kSampleMask = kThirdSampleMaskBit | kFourthSampleMaskBit;
+    constexpr float kMSAACoverage = 0.00f;
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline =
+        CreateRenderPipelineWithOneOutputForTest(kTestDepth, kSampleMask);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+
+    // Draw a green triangle.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+            kTestDepth);
+
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kGreen, mResolveTexture, 0, 0, kMSAACoverage);
+}
+
+// Test doing MSAA resolve into multiple resolve targets works correctly with a non-default sample
+// mask.
+TEST_P(MultisampledRenderingTest, ResolveIntoMultipleResolveTargetsWithSampleMask) {
+    wgpu::TextureView multisampledColorView2 =
+        CreateTextureForRenderAttachment(kColorFormat, kSampleCount).CreateView();
+    wgpu::Texture resolveTexture2 = CreateTextureForRenderAttachment(kColorFormat, 1);
+    wgpu::TextureView resolveView2 = resolveTexture2.CreateView();
+
+    // The first and fourth samples are included,
+    // only the first one is covered by the triangle.
+    constexpr uint32_t kSampleMask = kFirstSampleMaskBit | kFourthSampleMaskBit;
+    constexpr float kMSAACoverage = 0.25f;
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineWithTwoOutputsForTest(kSampleMask);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+    constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.8f};
+    constexpr bool kTestDepth = false;
+
+    // Draw a red triangle to the first color attachment, and a blue triangle to the second color
+    // attachment, and do MSAA resolve on two render targets in one render pass.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView, multisampledColorView2}, {mResolveView, resolveView2},
+            wgpu::LoadOp::Clear, wgpu::LoadOp::Clear, kTestDepth);
+
+        std::array<float, 8> kUniformData = {kRed.r,   kRed.g,   kRed.b,   kRed.a,     // color1
+                                             kGreen.r, kGreen.g, kGreen.b, kGreen.a};  // color2
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(), kSize);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kRed, mResolveTexture, 0, 0, kMSAACoverage);
+    VerifyResolveTarget(kGreen, resolveTexture2, 0, 0, kMSAACoverage);
+}
+
+// Test multisampled rendering with depth test works correctly with a non-default sample mask.
+TEST_P(MultisampledRenderingTest, MultisampledRenderingWithDepthTestAndSampleMask) {
+    constexpr bool kTestDepth = true;
+    // The second sample is included in the first render pass and it's covered by the triangle.
+    constexpr uint32_t kSampleMaskGreen = kSecondSampleMaskBit;
+    // The first and second samples are included in the second render pass,
+    // both are covered by the triangle.
+    constexpr uint32_t kSampleMaskRed = kFirstSampleMaskBit | kSecondSampleMaskBit;
+    constexpr float kMSAACoverage = 0.50f;
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipelineGreen =
+        CreateRenderPipelineWithOneOutputForTest(kTestDepth, kSampleMaskGreen);
+    wgpu::RenderPipeline pipelineRed =
+        CreateRenderPipelineWithOneOutputForTest(kTestDepth, kSampleMaskRed);
+
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+    constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.8f};
+
+    // In first render pass we draw a green triangle with depth value == 0.2f.
+    // We will only write to the second sample.
+    {
+        utils::ComboRenderPassDescriptor renderPass =
+            CreateComboRenderPassDescriptorForTest({mMultisampledColorView}, {mResolveView},
+                                                   wgpu::LoadOp::Clear, wgpu::LoadOp::Clear, true);
+        std::array<float, 5> kUniformData = {kGreen.r, kGreen.g, kGreen.b, kGreen.a,  // Color
+                                             0.2f};                                   // depth
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipelineGreen, kUniformData.data(),
+                                kSize);
+    }
+
+    // In second render pass we draw a red triangle with depth value == 0.5f.
+    // We will only write to the first sample, since the second one is red with a smaller depth
+    // value.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Load, wgpu::LoadOp::Load,
+            kTestDepth);
+
+        std::array<float, 5> kUniformData = {kRed.r, kRed.g, kRed.b, kRed.a,  // color
+                                             0.5f};                           // depth
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipelineRed, kUniformData.data(),
+                                kSize);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    constexpr wgpu::Color kHalfGreenHalfRed = {(kGreen.r + kRed.r) / 2.0, (kGreen.g + kRed.g) / 2.0,
+                                               (kGreen.b + kRed.b) / 2.0,
+                                               (kGreen.a + kRed.a) / 2.0};
+
+    // The color of the pixel in the middle of mResolveTexture should be half green and half
+    // red if MSAA resolve runs correctly with depth test.
+    VerifyResolveTarget(kHalfGreenHalfRed, mResolveTexture, 0, 0, kMSAACoverage);
+}
+
+// Test using one multisampled color attachment with resolve target can render correctly
+// with non-default sample mask and shader-output mask.
+TEST_P(MultisampledRenderingTest, ResolveInto2DTextureWithSampleMaskAndShaderOutputMask) {
+    // TODO(github.com/KhronosGroup/SPIRV-Cross/issues/1626): SPIRV-Cross produces bad GLSL for
+    // unsigned SampleMask builtins
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    // TODO(crbug.com/dawn/673): Work around or enforce via validation that sample variables are not
+    // supported on some platforms.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_sample_variables"));
+
+    // TODO(crbug.com/dawn/571): Fails on Metal / D3D12 because SPIRV-Cross produces bad shaders
+    // for the SPIR-V outputted by Tint. Reenable once we use Tint's MSL / HLSL generators.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() || IsMetal());
+
+    constexpr bool kTestDepth = false;
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+    // The second and third samples are included in the shader-output mask.
+    // The first and third samples are included in the sample mask.
+    // Since we're now looking at a fully covered pixel, the rasterization mask
+    // includes all the samples.
+    // Thus the final mask includes only the third sample.
+    constexpr float kMSAACoverage = 0.25f;
+    constexpr uint32_t kSampleMask = kFirstSampleMaskBit | kThirdSampleMaskBit;
+    const char* fs = R"(
+        struct U {
+            color : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uBuffer : U;
+
+        struct FragmentOut {
+            @location(0) color : vec4<f32>,
+            @builtin(sample_mask) sampleMask : u32,
+        }
+
+        @stage(fragment) fn main() -> FragmentOut {
+            var output : FragmentOut;
+            output.color = uBuffer.color;
+            output.sampleMask = 6u;
+            return output;
+        })";
+
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineForTest(fs, 1, false, kSampleMask);
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+
+    // Draw a green triangle.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+            kTestDepth);
+
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    RGBA8 expectedColor = ExpectedMSAAColor(kGreen, kMSAACoverage);
+    EXPECT_TEXTURE_EQ(&expectedColor, mResolveTexture, {1, 0}, {1, 1});
+}
+
+// Test doing MSAA resolve into multiple resolve targets works correctly with a non-default
+// shader-output mask.
+TEST_P(MultisampledRenderingTest, ResolveIntoMultipleResolveTargetsWithShaderOutputMask) {
+    // TODO(github.com/KhronosGroup/SPIRV-Cross/issues/1626): SPIRV-Cross produces bad GLSL for
+    // unsigned SampleMask builtins
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    // TODO(crbug.com/dawn/673): Work around or enforce via validation that sample variables are not
+    // supported on some platforms.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_sample_variables"));
+
+    // TODO(crbug.com/dawn/571): Fails on Metal / D3D12 because SPIRV-Cross produces bad shaders
+    // for the SPIR-V outputted by Tint. Reenable once we use Tint's MSL / HLSL generators.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() || IsMetal());
+
+    wgpu::TextureView multisampledColorView2 =
+        CreateTextureForRenderAttachment(kColorFormat, kSampleCount).CreateView();
+    wgpu::Texture resolveTexture2 = CreateTextureForRenderAttachment(kColorFormat, 1);
+    wgpu::TextureView resolveView2 = resolveTexture2.CreateView();
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    // The second and third samples are included in the shader-output mask,
+    // only the first one is covered by the triangle.
+    constexpr float kMSAACoverage = 0.25f;
+    const char* fs = R"(
+        struct U {
+            color0 : vec4<f32>,
+            color1 : vec4<f32>,
+        }
+        @group(0) @binding(0) var<uniform> uBuffer : U;
+
+        struct FragmentOut {
+            @location(0) color0 : vec4<f32>,
+            @location(1) color1 : vec4<f32>,
+            @builtin(sample_mask) sampleMask : u32,
+        }
+
+        @stage(fragment) fn main() -> FragmentOut {
+            var output : FragmentOut;
+            output.color0 = uBuffer.color0;
+            output.color1 = uBuffer.color1;
+            output.sampleMask = 6u;
+            return output;
+        })";
+
+    wgpu::RenderPipeline pipeline = CreateRenderPipelineForTest(fs, 2, false);
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.8f};
+    constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.8f};
+    constexpr bool kTestDepth = false;
+
+    // Draw a red triangle to the first color attachment, and a blue triangle to the second color
+    // attachment, and do MSAA resolve on two render targets in one render pass.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView, multisampledColorView2}, {mResolveView, resolveView2},
+            wgpu::LoadOp::Clear, wgpu::LoadOp::Clear, kTestDepth);
+
+        std::array<float, 8> kUniformData = {kRed.r,   kRed.g,   kRed.b,   kRed.a,     // color1
+                                             kGreen.r, kGreen.g, kGreen.b, kGreen.a};  // color2
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(), kSize);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    VerifyResolveTarget(kRed, mResolveTexture, 0, 0, kMSAACoverage);
+    VerifyResolveTarget(kGreen, resolveTexture2, 0, 0, kMSAACoverage);
+}
+
+// Test using one multisampled color attachment with resolve target can render correctly
+// with alphaToCoverageEnabled.
+TEST_P(MultisampledRenderingTest, ResolveInto2DTextureWithAlphaToCoverage) {
+    constexpr bool kTestDepth = false;
+    constexpr uint32_t kSampleMask = 0xFFFFFFFF;
+    constexpr bool kAlphaToCoverageEnabled = true;
+
+    // Setting alpha <= 0 must result in alpha-to-coverage mask being empty.
+    // Setting alpha = 0.5f should result in alpha-to-coverage mask including half the samples,
+    // but this is not guaranteed by the spec. The Metal spec seems to guarantee that this is
+    // indeed the case.
+    // Setting alpha >= 1 must result in alpha-to-coverage mask being full.
+    for (float alpha : {-1.0f, 0.0f, 0.5f, 1.0f, 2.0f}) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(
+            kTestDepth, kSampleMask, kAlphaToCoverageEnabled);
+
+        const wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, alpha};
+
+        // Draw a green triangle.
+        {
+            utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+                {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+                kTestDepth);
+
+            EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+        }
+
+        wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        // For alpha = {0, 0.5, 1} we expect msaaCoverage to correspond to the value of alpha.
+        float msaaCoverage = alpha;
+        if (alpha < 0.0f) {
+            msaaCoverage = 0.0f;
+        }
+        if (alpha > 1.0f) {
+            msaaCoverage = 1.0f;
+        }
+
+        RGBA8 expectedColor = ExpectedMSAAColor(kGreen, msaaCoverage);
+        EXPECT_TEXTURE_EQ(&expectedColor, mResolveTexture, {1, 0}, {1, 1});
+    }
+}
+
+// Test doing MSAA resolve into multiple resolve targets works correctly with
+// alphaToCoverage. The alphaToCoverage mask is computed based on the alpha
+// component of the first color render attachment.
+TEST_P(MultisampledRenderingTest, ResolveIntoMultipleResolveTargetsWithAlphaToCoverage) {
+    wgpu::TextureView multisampledColorView2 =
+        CreateTextureForRenderAttachment(kColorFormat, kSampleCount).CreateView();
+    wgpu::Texture resolveTexture2 = CreateTextureForRenderAttachment(kColorFormat, 1);
+    wgpu::TextureView resolveView2 = resolveTexture2.CreateView();
+    constexpr uint32_t kSampleMask = 0xFFFFFFFF;
+    constexpr float kMSAACoverage = 0.50f;
+    constexpr bool kAlphaToCoverageEnabled = true;
+
+    // The alpha-to-coverage mask should not depend on the alpha component of the
+    // second color render attachment.
+    // We test alpha = 0.51f and 0.99f instead of 0.50f and 1.00f because there are some rounding
+    // differences on QuadroP400 devices in that case.
+    for (float alpha : {0.0f, 0.51f, 0.99f}) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPipeline pipeline =
+            CreateRenderPipelineWithTwoOutputsForTest(kSampleMask, kAlphaToCoverageEnabled);
+
+        constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.51f};
+        const wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, alpha};
+        constexpr bool kTestDepth = false;
+
+        // Draw a red triangle to the first color attachment, and a blue triangle to the second
+        // color attachment, and do MSAA resolve on two render targets in one render pass.
+        {
+            utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+                {mMultisampledColorView, multisampledColorView2}, {mResolveView, resolveView2},
+                wgpu::LoadOp::Clear, wgpu::LoadOp::Clear, kTestDepth);
+
+            std::array<float, 8> kUniformData = {
+                static_cast<float>(kRed.r),   static_cast<float>(kRed.g),
+                static_cast<float>(kRed.b),   static_cast<float>(kRed.a),
+                static_cast<float>(kGreen.r), static_cast<float>(kGreen.g),
+                static_cast<float>(kGreen.b), static_cast<float>(kGreen.a)};
+            constexpr uint32_t kSize = sizeof(kUniformData);
+            EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kUniformData.data(),
+                                    kSize);
+        }
+
+        wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        // Alpha to coverage affects both the color outputs, but the mask is computed
+        // using only the first one.
+        RGBA8 expectedRed = ExpectedMSAAColor(kRed, kMSAACoverage);
+        RGBA8 expectedGreen = ExpectedMSAAColor(kGreen, kMSAACoverage);
+        EXPECT_TEXTURE_EQ(&expectedRed, mResolveTexture, {1, 0}, {1, 1});
+        EXPECT_TEXTURE_EQ(&expectedGreen, resolveTexture2, {1, 0}, {1, 1});
+    }
+}
+
+// Test multisampled rendering with depth test works correctly with alphaToCoverage.
+TEST_P(MultisampledRenderingTest, MultisampledRenderingWithDepthTestAndAlphaToCoverage) {
+    // This test fails because Swiftshader is off-by-one with its ((a+b)/2 + (c+d)/2)/2 fast resolve
+    // algorithm.
+    DAWN_SUPPRESS_TEST_IF(IsSwiftshader() || IsANGLE());
+
+    constexpr bool kTestDepth = true;
+    constexpr uint32_t kSampleMask = 0xFFFFFFFF;
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPipeline pipelineGreen =
+        CreateRenderPipelineWithOneOutputForTest(kTestDepth, kSampleMask, true);
+    wgpu::RenderPipeline pipelineRed =
+        CreateRenderPipelineWithOneOutputForTest(kTestDepth, kSampleMask, false);
+
+    // We test alpha = 0.51f and 0.81f instead of 0.50f and 0.80f because there are some
+    // rounding differences on QuadroP400 devices in that case.
+    constexpr wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, 0.51f};
+    constexpr wgpu::Color kRed = {0.8f, 0.0f, 0.0f, 0.81f};
+
+    // In first render pass we draw a green triangle with depth value == 0.2f.
+    // We will only write to half the samples since the alphaToCoverage mode
+    // is enabled for that render pass.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+            kTestDepth);
+        std::array<float, 5> kUniformData = {kGreen.r, kGreen.g, kGreen.b, kGreen.a,  // Color
+                                             0.2f};                                   // depth
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipelineGreen, kUniformData.data(),
+                                kSize);
+    }
+
+    // In second render pass we draw a red triangle with depth value == 0.5f.
+    // We will write to all the samples since the alphaToCoverageMode is diabled for
+    // that render pass.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+            {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Load, wgpu::LoadOp::Load,
+            kTestDepth);
+
+        std::array<float, 5> kUniformData = {kRed.r, kRed.g, kRed.b, kRed.a,  // color
+                                             0.5f};                           // depth
+        constexpr uint32_t kSize = sizeof(kUniformData);
+        EncodeRenderPassForTest(commandEncoder, renderPass, pipelineRed, kUniformData.data(),
+                                kSize);
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    constexpr wgpu::Color kHalfGreenHalfRed = {(kGreen.r + kRed.r) / 2.0, (kGreen.g + kRed.g) / 2.0,
+                                               (kGreen.b + kRed.b) / 2.0,
+                                               (kGreen.a + kRed.a) / 2.0};
+    RGBA8 expectedColor = ExpectedMSAAColor(kHalfGreenHalfRed, 1.0f);
+
+    EXPECT_TEXTURE_EQ(&expectedColor, mResolveTexture, {1, 0}, {1, 1});
+}
+
+// Test using one multisampled color attachment with resolve target can render correctly
+// with alphaToCoverageEnabled and a sample mask.
+TEST_P(MultisampledRenderingTest, ResolveInto2DTextureWithAlphaToCoverageAndSampleMask) {
+    // This test fails because Swiftshader is off-by-one with its ((a+b)/2 + (c+d)/2)/2 fast resolve
+    // algorithm.
+    DAWN_SUPPRESS_TEST_IF(IsSwiftshader() || IsANGLE());
+
+    // TODO(dawn:491): This doesn't work on Metal, because we're using both the shader-output
+    // mask (emulting the sampleMask from RenderPipeline) and alpha-to-coverage at the same
+    // time. See the issue: https://github.com/gpuweb/gpuweb/issues/959.
+    DAWN_SUPPRESS_TEST_IF(IsMetal());
+
+    constexpr bool kTestDepth = false;
+    constexpr float kMSAACoverage = 0.50f;
+    constexpr uint32_t kSampleMask = kFirstSampleMaskBit | kThirdSampleMaskBit;
+    constexpr bool kAlphaToCoverageEnabled = true;
+
+    // For those values of alpha we expect the proportion of samples to be covered
+    // to correspond to the value of alpha.
+    // We're assuming in the case of alpha = 0.50f that the implementation
+    // dependendent algorithm will choose exactly one of the first and third samples.
+    for (float alpha : {0.0f, 0.50f, 1.00f}) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(
+            kTestDepth, kSampleMask, kAlphaToCoverageEnabled);
+
+        const wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, alpha - 0.01f};
+
+        // Draw a green triangle.
+        {
+            utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+                {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+                kTestDepth);
+
+            EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+        }
+
+        wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        RGBA8 expectedColor = ExpectedMSAAColor(kGreen, kMSAACoverage * alpha);
+        EXPECT_TEXTURE_EQ(&expectedColor, mResolveTexture, {1, 0}, {1, 1});
+    }
+}
+
+// Test using one multisampled color attachment with resolve target can render correctly
+// with alphaToCoverageEnabled and a rasterization mask.
+TEST_P(MultisampledRenderingTest, ResolveInto2DTextureWithAlphaToCoverageAndRasterizationMask) {
+    // This test fails because Swiftshader is off-by-one with its ((a+b)/2 + (c+d)/2)/2 fast resolve
+    // algorithm.
+    DAWN_SUPPRESS_TEST_IF(IsSwiftshader() || IsANGLE());
+
+    constexpr bool kTestDepth = false;
+    constexpr float kMSAACoverage = 0.50f;
+    constexpr uint32_t kSampleMask = 0xFFFFFFFF;
+    constexpr bool kAlphaToCoverageEnabled = true;
+    constexpr bool kFlipTriangle = true;
+
+    // For those values of alpha we expect the proportion of samples to be covered
+    // to correspond to the value of alpha.
+    // We're assuming in the case of alpha = 0.50f that the implementation
+    // dependendent algorithm will choose exactly one of the samples covered by the
+    // triangle.
+    for (float alpha : {0.0f, 0.50f, 1.00f}) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPipeline pipeline = CreateRenderPipelineWithOneOutputForTest(
+            kTestDepth, kSampleMask, kAlphaToCoverageEnabled, kFlipTriangle);
+
+        const wgpu::Color kGreen = {0.0f, 0.8f, 0.0f, alpha - 0.01f};
+
+        // Draw a green triangle.
+        {
+            utils::ComboRenderPassDescriptor renderPass = CreateComboRenderPassDescriptorForTest(
+                {mMultisampledColorView}, {mResolveView}, wgpu::LoadOp::Clear, wgpu::LoadOp::Clear,
+                kTestDepth);
+
+            EncodeRenderPassForTest(commandEncoder, renderPass, pipeline, kGreen);
+        }
+
+        wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        VerifyResolveTarget(kGreen, mResolveTexture, 0, 0, kMSAACoverage * alpha);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(MultisampledRenderingTest,
+                      D3D12Backend(),
+                      D3D12Backend({}, {"use_d3d12_resource_heap_tier2"}),
+                      D3D12Backend({}, {"use_d3d12_render_pass"}),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend(),
+                      MetalBackend({"emulate_store_and_msaa_resolve"}),
+                      MetalBackend({"always_resolve_into_zero_level_and_layer"}),
+                      MetalBackend({"always_resolve_into_zero_level_and_layer",
+                                    "emulate_store_and_msaa_resolve"}));
diff --git a/src/dawn/tests/end2end/MultisampledSamplingTests.cpp b/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
new file mode 100644
index 0000000..25ede94
--- /dev/null
+++ b/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
@@ -0,0 +1,268 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    // https://github.com/gpuweb/gpuweb/issues/108
+    // Vulkan, Metal, and D3D11 have the same standard multisample pattern. D3D12 is the same as
+    // D3D11 but it was left out of the documentation.
+    // {0.375, 0.125}, {0.875, 0.375}, {0.125 0.625}, {0.625, 0.875}
+    // In this test, we store them in -1 to 1 space because it makes it
+    // simpler to upload vertex data. Y is flipped because there is a flip between clip space and
+    // rasterization space.
+    static constexpr std::array<std::array<float, 2>, 4> kSamplePositions = {
+        {{0.375 * 2 - 1, 1 - 0.125 * 2},
+         {0.875 * 2 - 1, 1 - 0.375 * 2},
+         {0.125 * 2 - 1, 1 - 0.625 * 2},
+         {0.625 * 2 - 1, 1 - 0.875 * 2}}};
+}  // anonymous namespace
+
+class MultisampledSamplingTest : public DawnTest {
+  protected:
+    static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::R8Unorm;
+    static constexpr wgpu::TextureFormat kDepthFormat = wgpu::TextureFormat::Depth32Float;
+
+    static constexpr wgpu::TextureFormat kDepthOutFormat = wgpu::TextureFormat::R32Float;
+    static constexpr uint32_t kSampleCount = 4;
+
+    // Render pipeline for drawing to a multisampled color and depth attachment.
+    wgpu::RenderPipeline drawPipeline;
+
+    // A compute pipeline to texelFetch the sample locations and output the results to a buffer.
+    wgpu::ComputePipeline checkSamplePipeline;
+
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // TODO(crbug.com/dawn/1030): Compute pipeline compilation crashes.
+        DAWN_SUPPRESS_TEST_IF(IsLinux() && IsVulkan() && IsIntel());
+
+        {
+            utils::ComboRenderPipelineDescriptor desc;
+
+            desc.vertex.module = utils::CreateShaderModule(device, R"(
+                @stage(vertex)
+                fn main(@location(0) pos : vec2<f32>) -> @builtin(position) vec4<f32> {
+                    return vec4<f32>(pos, 0.0, 1.0);
+                })");
+
+            desc.cFragment.module = utils::CreateShaderModule(device, R"(
+                struct FragmentOut {
+                    @location(0) color : f32,
+                    @builtin(frag_depth) depth : f32,
+                }
+
+                @stage(fragment) fn main() -> FragmentOut {
+                    var output : FragmentOut;
+                    output.color = 1.0;
+                    output.depth = 0.7;
+                    return output;
+                })");
+
+            desc.primitive.stripIndexFormat = wgpu::IndexFormat::Uint32;
+            desc.vertex.bufferCount = 1;
+            desc.cBuffers[0].attributeCount = 1;
+            desc.cBuffers[0].arrayStride = 2 * sizeof(float);
+            desc.cAttributes[0].format = wgpu::VertexFormat::Float32x2;
+
+            wgpu::DepthStencilState* depthStencil = desc.EnableDepthStencil(kDepthFormat);
+            depthStencil->depthWriteEnabled = true;
+
+            desc.multisample.count = kSampleCount;
+            desc.cFragment.targetCount = 1;
+            desc.cTargets[0].format = kColorFormat;
+
+            desc.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
+
+            drawPipeline = device.CreateRenderPipeline(&desc);
+        }
+        {
+            wgpu::ComputePipelineDescriptor desc = {};
+            desc.compute.entryPoint = "main";
+            desc.compute.module = utils::CreateShaderModule(device, R"(
+                @group(0) @binding(0) var texture0 : texture_multisampled_2d<f32>;
+                @group(0) @binding(1) var texture1 : texture_depth_multisampled_2d;
+
+                struct Results {
+                    colorSamples : array<f32, 4>,
+                    depthSamples : array<f32, 4>,
+                }
+                @group(0) @binding(2) var<storage, read_write> results : Results;
+
+                @stage(compute) @workgroup_size(1) fn main() {
+                    for (var i : i32 = 0; i < 4; i = i + 1) {
+                        results.colorSamples[i] = textureLoad(texture0, vec2<i32>(0, 0), i).x;
+                        results.depthSamples[i] = textureLoad(texture1, vec2<i32>(0, 0), i);
+                    }
+                })");
+
+            checkSamplePipeline = device.CreateComputePipeline(&desc);
+        }
+    }
+};
+
+// Test that the multisampling sample positions are correct. This test works by drawing a
+// thin quad multiple times from left to right and from top to bottom on a 1x1 canvas.
+// Each time, the quad should cover a single sample position.
+// After drawing, a compute shader fetches all of the samples (both color and depth),
+// and we check that only the one covered has data.
+// We "scan" the vertical and horizontal dimensions separately to check that the triangle
+// must cover both the X and Y coordinates of the sample position (no false positives if
+// it covers the X position but not the Y, or vice versa).
+TEST_P(MultisampledSamplingTest, SamplePositions) {
+    static constexpr wgpu::Extent3D kTextureSize = {1, 1, 1};
+
+    wgpu::Texture colorTexture;
+    {
+        wgpu::TextureDescriptor desc = {};
+        desc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+        desc.size = kTextureSize;
+        desc.format = kColorFormat;
+        desc.sampleCount = kSampleCount;
+        colorTexture = device.CreateTexture(&desc);
+    }
+
+    wgpu::Texture depthTexture;
+    {
+        wgpu::TextureDescriptor desc = {};
+        desc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+        desc.size = kTextureSize;
+        desc.format = kDepthFormat;
+        desc.sampleCount = kSampleCount;
+        depthTexture = device.CreateTexture(&desc);
+    }
+
+    static constexpr float kQuadWidth = 0.075;
+    std::vector<float> vBufferData;
+
+    // Add vertices for vertical quads
+    for (uint32_t s = 0; s < kSampleCount; ++s) {
+        // clang-format off
+        vBufferData.insert(vBufferData.end(), {
+            kSamplePositions[s][0] - kQuadWidth, -1.0,
+            kSamplePositions[s][0] - kQuadWidth,  1.0,
+            kSamplePositions[s][0] + kQuadWidth, -1.0,
+            kSamplePositions[s][0] + kQuadWidth,  1.0,
+        });
+        // clang-format on
+    }
+
+    // Add vertices for horizontal quads
+    for (uint32_t s = 0; s < kSampleCount; ++s) {
+        // clang-format off
+        vBufferData.insert(vBufferData.end(), {
+            -1.0, kSamplePositions[s][1] - kQuadWidth,
+            -1.0, kSamplePositions[s][1] + kQuadWidth,
+             1.0, kSamplePositions[s][1] - kQuadWidth,
+             1.0, kSamplePositions[s][1] + kQuadWidth,
+        });
+        // clang-format on
+    }
+
+    wgpu::Buffer vBuffer = utils::CreateBufferFromData(
+        device, vBufferData.data(), static_cast<uint32_t>(vBufferData.size() * sizeof(float)),
+        wgpu::BufferUsage::Vertex);
+
+    static constexpr uint32_t kQuadNumBytes = 8 * sizeof(float);
+
+    wgpu::TextureView colorView = colorTexture.CreateView();
+    wgpu::TextureView depthView = depthTexture.CreateView();
+
+    static constexpr uint64_t kResultSize = 4 * sizeof(float) + 4 * sizeof(float);
+    uint64_t alignedResultSize = Align(kResultSize, 256);
+
+    wgpu::BufferDescriptor outputBufferDesc = {};
+    outputBufferDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+    outputBufferDesc.size = alignedResultSize * 8;
+    wgpu::Buffer outputBuffer = device.CreateBuffer(&outputBufferDesc);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    for (uint32_t iter = 0; iter < 2; ++iter) {
+        for (uint32_t sample = 0; sample < kSampleCount; ++sample) {
+            uint32_t sampleOffset = (iter * kSampleCount + sample);
+
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthView);
+            renderPass.cDepthStencilAttachmentInfo.depthClearValue = 0.f;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+            wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+            renderPassEncoder.SetPipeline(drawPipeline);
+            renderPassEncoder.SetVertexBuffer(0, vBuffer, kQuadNumBytes * sampleOffset,
+                                              kQuadNumBytes);
+            renderPassEncoder.Draw(4);
+            renderPassEncoder.End();
+
+            wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+            computePassEncoder.SetPipeline(checkSamplePipeline);
+            computePassEncoder.SetBindGroup(
+                0, utils::MakeBindGroup(
+                       device, checkSamplePipeline.GetBindGroupLayout(0),
+                       {{0, colorView},
+                        {1, depthView},
+                        {2, outputBuffer, alignedResultSize * sampleOffset, kResultSize}}));
+            computePassEncoder.Dispatch(1);
+            computePassEncoder.End();
+        }
+    }
+
+    wgpu::CommandBuffer commandBuffer = commandEncoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    std::array<float, 8> expectedData;
+
+    expectedData = {1, 0, 0, 0, 0.7, 0, 0, 0};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 0 * alignedResultSize, 8)
+        << "vertical sample 0";
+
+    expectedData = {0, 1, 0, 0, 0, 0.7, 0, 0};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 1 * alignedResultSize, 8)
+        << "vertical sample 1";
+
+    expectedData = {0, 0, 1, 0, 0, 0, 0.7, 0};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 2 * alignedResultSize, 8)
+        << "vertical sample 2";
+
+    expectedData = {0, 0, 0, 1, 0, 0, 0, 0.7};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 3 * alignedResultSize, 8)
+        << "vertical sample 3";
+
+    expectedData = {1, 0, 0, 0, 0.7, 0, 0, 0};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 4 * alignedResultSize, 8)
+        << "horizontal sample 0";
+
+    expectedData = {0, 1, 0, 0, 0, 0.7, 0, 0};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 5 * alignedResultSize, 8)
+        << "horizontal sample 1";
+
+    expectedData = {0, 0, 1, 0, 0, 0, 0.7, 0};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 6 * alignedResultSize, 8)
+        << "horizontal sample 2";
+
+    expectedData = {0, 0, 0, 1, 0, 0, 0, 0.7};
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expectedData.data(), outputBuffer, 7 * alignedResultSize, 8)
+        << "horizontal sample 3";
+}
+
+DAWN_INSTANTIATE_TEST(MultisampledSamplingTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/NonzeroBufferCreationTests.cpp b/src/dawn/tests/end2end/NonzeroBufferCreationTests.cpp
new file mode 100644
index 0000000..55f238f
--- /dev/null
+++ b/src/dawn/tests/end2end/NonzeroBufferCreationTests.cpp
@@ -0,0 +1,141 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include <array>
+#include <vector>
+
+class NonzeroBufferCreationTests : public DawnTest {
+  public:
+    void MapReadAsyncAndWait(wgpu::Buffer buffer, uint64_t offset, uint64_t size) {
+        bool done = false;
+        buffer.MapAsync(
+            wgpu::MapMode::Read, offset, size,
+            [](WGPUBufferMapAsyncStatus status, void* userdata) {
+                ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+                *static_cast<bool*>(userdata) = true;
+            },
+            &done);
+
+        while (!done) {
+            WaitABit();
+        }
+    }
+};
+
+// Verify that each byte of the buffer has all been initialized to 1 with the toggle enabled when it
+// is created with CopyDst usage.
+TEST_P(NonzeroBufferCreationTests, BufferCreationWithCopyDstUsage) {
+    constexpr uint32_t kSize = 32u;
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = kSize;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    std::vector<uint8_t> expectedData(kSize, uint8_t(1u));
+    EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()), buffer, 0,
+                               kSize / sizeof(uint32_t));
+}
+
+// Verify that each byte of the buffer has all been initialized to 1 with the toggle enabled when it
+// is created with MapWrite without CopyDst usage.
+TEST_P(NonzeroBufferCreationTests, BufferCreationWithMapWriteWithoutCopyDstUsage) {
+    constexpr uint32_t kSize = 32u;
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = kSize;
+    descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    std::vector<uint8_t> expectedData(kSize, uint8_t(1u));
+    EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()), buffer, 0,
+                               kSize / sizeof(uint32_t));
+}
+
+// Verify that each byte of the buffer has all been initialized to 1 with the toggle enabled when
+// it is created with mappedAtCreation == true.
+TEST_P(NonzeroBufferCreationTests, BufferCreationWithMappedAtCreation) {
+    // When we use Dawn wire, the lazy initialization of the buffers with mappedAtCreation == true
+    // are done in the Dawn wire and we don't plan to get it work with the toggle
+    // "nonzero_clear_resources_on_creation_for_testing" (we will have more tests on it in the
+    // BufferZeroInitTests.
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    constexpr uint32_t kSize = 32u;
+
+    wgpu::BufferDescriptor defaultDescriptor;
+    defaultDescriptor.size = kSize;
+    defaultDescriptor.mappedAtCreation = true;
+
+    const std::vector<uint8_t> expectedData(kSize, uint8_t(1u));
+    const uint32_t* expectedDataPtr = reinterpret_cast<const uint32_t*>(expectedData.data());
+
+    // Buffer with MapRead usage
+    {
+        wgpu::BufferDescriptor descriptor = defaultDescriptor;
+        descriptor.usage = wgpu::BufferUsage::MapRead;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
+        buffer.Unmap();
+
+        MapReadAsyncAndWait(buffer, 0, kSize);
+        mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
+        buffer.Unmap();
+    }
+
+    // Buffer with MapWrite usage
+    {
+        wgpu::BufferDescriptor descriptor = defaultDescriptor;
+        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
+        buffer.Unmap();
+
+        EXPECT_BUFFER_U32_RANGE_EQ(expectedDataPtr, buffer, 0, kSize / sizeof(uint32_t));
+    }
+
+    // Buffer with neither MapRead nor MapWrite usage
+    {
+        wgpu::BufferDescriptor descriptor = defaultDescriptor;
+        descriptor.usage = wgpu::BufferUsage::CopySrc;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
+        EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
+        buffer.Unmap();
+
+        EXPECT_BUFFER_U32_RANGE_EQ(expectedDataPtr, buffer, 0, kSize / sizeof(uint32_t));
+    }
+}
+
+DAWN_INSTANTIATE_TEST(NonzeroBufferCreationTests,
+                      D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                                   {"lazy_clear_resource_on_first_use"}),
+                      MetalBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                   {"lazy_clear_resource_on_first_use"}),
+                      OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                    {"lazy_clear_resource_on_first_use"}),
+                      OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                      {"lazy_clear_resource_on_first_use"}),
+                      VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                    {"lazy_clear_resource_on_first_use"}));
diff --git a/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp b/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
new file mode 100644
index 0000000..5327406
--- /dev/null
+++ b/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
@@ -0,0 +1,425 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    using Format = wgpu::TextureFormat;
+    using Aspect = wgpu::TextureAspect;
+    using Usage = wgpu::TextureUsage;
+    using Dimension = wgpu::TextureDimension;
+    using DepthOrArrayLayers = uint32_t;
+    using MipCount = uint32_t;
+    using Mip = uint32_t;
+    using SampleCount = uint32_t;
+
+    DAWN_TEST_PARAM_STRUCT(Params,
+                           Format,
+                           Aspect,
+                           Usage,
+                           Dimension,
+                           DepthOrArrayLayers,
+                           MipCount,
+                           Mip,
+                           SampleCount);
+
+    template <typename T>
+    class ExpectNonZero : public detail::CustomTextureExpectation {
+      public:
+        uint32_t DataSize() override {
+            return sizeof(T);
+        }
+
+        testing::AssertionResult Check(const void* data, size_t size) override {
+            ASSERT(size % DataSize() == 0 && size > 0);
+            const T* actual = static_cast<const T*>(data);
+            T value = *actual;
+            if (value == T(0)) {
+                return testing::AssertionFailure()
+                       << "Expected data to be non-zero, was " << value << std::endl;
+            }
+            for (size_t i = 0; i < size / DataSize(); ++i) {
+                if (actual[i] != value) {
+                    return testing::AssertionFailure()
+                           << "Expected data[" << i << "] to match non-zero value " << value
+                           << ", actual " << actual[i] << std::endl;
+                }
+            }
+
+            return testing::AssertionSuccess();
+        }
+    };
+
+#define EXPECT_TEXTURE_NONZERO(T, ...) \
+    AddTextureExpectation(__FILE__, __LINE__, new ExpectNonZero<T>(), __VA_ARGS__)
+
+    class NonzeroTextureCreationTests : public DawnTestWithParams<Params> {
+      protected:
+        constexpr static uint32_t kSize = 128;
+
+        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+            if (GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+                SupportsFeatures({wgpu::FeatureName::TextureCompressionBC})) {
+                return {wgpu::FeatureName::TextureCompressionBC};
+            }
+            return {};
+        }
+
+        void Run() {
+            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+                                     !SupportsFeatures({wgpu::FeatureName::TextureCompressionBC}));
+
+            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+            // reading from Snorm textures.
+            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::RGBA8Snorm &&
+                                     HasToggleEnabled("disable_snorm_read"));
+
+            // TODO(crbug.com/dawn/791): Determine Intel specific platforms this occurs on, and
+            // implement a workaround on all backends (happens on Windows too, but not on our test
+            // machines).
+            DAWN_SUPPRESS_TEST_IF(
+                (GetParam().mFormat == wgpu::TextureFormat::Depth32Float ||
+                 GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8) &&
+                IsMetal() && IsIntel() && GetParam().mMip != 0);
+
+            // TODO(crbug.com/dawn/1071): Implement a workaround on Intel/Metal backends.
+            DAWN_SUPPRESS_TEST_IF((GetParam().mFormat == wgpu::TextureFormat::R8Unorm ||
+                                   GetParam().mFormat == wgpu::TextureFormat::RG8Unorm) &&
+                                  GetParam().mMipCount > 1 &&
+                                  HasToggleEnabled("disable_r8_rg8_mipmaps"));
+
+            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+            // reading from depth/stencil.
+            DAWN_SUPPRESS_TEST_IF(GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8 &&
+                                  GetParam().mAspect == wgpu::TextureAspect::StencilOnly &&
+                                  HasToggleEnabled("disable_depth_stencil_read"));
+
+            // TODO(crbug.com/dawn/593): Test depends on glTextureView which is unsupported on GLES.
+            DAWN_SUPPRESS_TEST_IF(GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8 &&
+                                  GetParam().mAspect == wgpu::TextureAspect::DepthOnly &&
+                                  IsOpenGLES());
+
+            // GL may support the feature, but reading data back is not implemented.
+            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+                                     (IsOpenGL() || IsOpenGLES()));
+
+            wgpu::TextureDescriptor descriptor;
+            descriptor.dimension = GetParam().mDimension;
+            descriptor.size.width = kSize;
+            descriptor.size.height = kSize;
+            descriptor.size.depthOrArrayLayers = GetParam().mDepthOrArrayLayers;
+            descriptor.sampleCount = GetParam().mSampleCount;
+            descriptor.format = GetParam().mFormat;
+            descriptor.usage = GetParam().mUsage;
+            descriptor.mipLevelCount = GetParam().mMipCount;
+
+            wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+            uint32_t mip = GetParam().mMip;
+            uint32_t mipSize = std::max(kSize >> mip, 1u);
+            uint32_t depthOrArrayLayers = GetParam().mDimension == wgpu::TextureDimension::e3D
+                                              ? std::max(GetParam().mDepthOrArrayLayers >> mip, 1u)
+                                              : GetParam().mDepthOrArrayLayers;
+            switch (GetParam().mFormat) {
+                case wgpu::TextureFormat::R8Unorm: {
+                    if (GetParam().mSampleCount > 1) {
+                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 1,
+                                                    GetParam().mSampleCount, 0, mip,
+                                                    new ExpectNonZero<float>());
+                    } else {
+                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint8_t>(), texture, {0, 0, 0},
+                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
+                    }
+                    break;
+                }
+                case wgpu::TextureFormat::RG8Unorm: {
+                    if (GetParam().mSampleCount > 1) {
+                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 2,
+                                                    GetParam().mSampleCount, 0, mip,
+                                                    new ExpectNonZero<float>());
+                    } else {
+                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint16_t>(), texture, {0, 0, 0},
+                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
+                    }
+                    break;
+                }
+                case wgpu::TextureFormat::RGBA8Unorm:
+                case wgpu::TextureFormat::RGBA8Snorm: {
+                    if (GetParam().mSampleCount > 1) {
+                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 4,
+                                                    GetParam().mSampleCount, 0, mip,
+                                                    new ExpectNonZero<float>());
+                    } else {
+                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint32_t>(), texture, {0, 0, 0},
+                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
+                    }
+                    break;
+                }
+                case wgpu::TextureFormat::Depth32Float: {
+                    EXPECT_TEXTURE_EQ(new ExpectNonZero<float>(), texture, {0, 0, 0},
+                                      {mipSize, mipSize, depthOrArrayLayers}, mip);
+                    break;
+                }
+                case wgpu::TextureFormat::Depth24PlusStencil8: {
+                    switch (GetParam().mAspect) {
+                        case wgpu::TextureAspect::DepthOnly: {
+                            for (uint32_t arrayLayer = 0;
+                                 arrayLayer < GetParam().mDepthOrArrayLayers; ++arrayLayer) {
+                                ExpectSampledDepthData(texture, mipSize, mipSize, arrayLayer, mip,
+                                                       new ExpectNonZero<float>())
+                                    << "arrayLayer " << arrayLayer;
+                            }
+                            break;
+                        }
+                        case wgpu::TextureAspect::StencilOnly: {
+                            uint32_t texelCount = mipSize * mipSize * depthOrArrayLayers;
+                            std::vector<uint8_t> expectedStencil(texelCount, 1);
+                            EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0, 0},
+                                              {mipSize, mipSize, depthOrArrayLayers}, mip,
+                                              wgpu::TextureAspect::StencilOnly);
+
+                            break;
+                        }
+                        default:
+                            UNREACHABLE();
+                    }
+                    break;
+                }
+                case wgpu::TextureFormat::BC1RGBAUnorm: {
+                    // Set buffer with dirty data so we know it is cleared by the lazy cleared
+                    // texture copy
+                    uint32_t blockWidth = utils::GetTextureFormatBlockWidth(GetParam().mFormat);
+                    uint32_t blockHeight = utils::GetTextureFormatBlockHeight(GetParam().mFormat);
+                    wgpu::Extent3D copySize = {Align(mipSize, blockWidth),
+                                               Align(mipSize, blockHeight), depthOrArrayLayers};
+
+                    uint32_t bytesPerRow =
+                        utils::GetMinimumBytesPerRow(GetParam().mFormat, copySize.width);
+                    uint32_t rowsPerImage = copySize.height / blockHeight;
+
+                    uint64_t bufferSize = utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage,
+                                                                     copySize, GetParam().mFormat);
+
+                    std::vector<uint8_t> data(bufferSize, 100);
+                    wgpu::Buffer bufferDst = utils::CreateBufferFromData(
+                        device, data.data(), bufferSize, wgpu::BufferUsage::CopySrc);
+
+                    wgpu::ImageCopyBuffer imageCopyBuffer =
+                        utils::CreateImageCopyBuffer(bufferDst, 0, bytesPerRow, rowsPerImage);
+                    wgpu::ImageCopyTexture imageCopyTexture =
+                        utils::CreateImageCopyTexture(texture, mip, {0, 0, 0});
+
+                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+                    wgpu::CommandBuffer commands = encoder.Finish();
+                    queue.Submit(1, &commands);
+
+                    uint32_t copiedWidthInBytes =
+                        utils::GetTexelBlockSizeInBytes(GetParam().mFormat) * copySize.width /
+                        blockWidth;
+                    uint8_t* d = data.data();
+                    for (uint32_t z = 0; z < depthOrArrayLayers; ++z) {
+                        for (uint32_t row = 0; row < copySize.height / blockHeight; ++row) {
+                            std::fill_n(d, copiedWidthInBytes, 1);
+                            d += bytesPerRow;
+                        }
+                    }
+                    EXPECT_BUFFER_U8_RANGE_EQ(data.data(), bufferDst, 0, bufferSize);
+                    break;
+                }
+                default:
+                    UNREACHABLE();
+            }
+        }
+    };
+
+    class NonzeroNonrenderableTextureCreationTests : public NonzeroTextureCreationTests {};
+    class NonzeroCompressedTextureCreationTests : public NonzeroTextureCreationTests {};
+    class NonzeroDepthTextureCreationTests : public NonzeroTextureCreationTests {};
+    class NonzeroDepthStencilTextureCreationTests : public NonzeroTextureCreationTests {};
+    class NonzeroMultisampledTextureCreationTests : public NonzeroTextureCreationTests {};
+
+}  // anonymous namespace
+
+// Test that texture clears to a non-zero value because toggle is enabled.
+TEST_P(NonzeroTextureCreationTests, TextureCreationClears) {
+    Run();
+}
+
+// Test that texture clears to a non-zero value because toggle is enabled.
+TEST_P(NonzeroNonrenderableTextureCreationTests, TextureCreationClears) {
+    Run();
+}
+
+// Test that texture clears to a non-zero value because toggle is enabled.
+TEST_P(NonzeroCompressedTextureCreationTests, TextureCreationClears) {
+    Run();
+}
+
+// Test that texture clears to a non-zero value because toggle is enabled.
+TEST_P(NonzeroDepthTextureCreationTests, TextureCreationClears) {
+    Run();
+}
+
+// Test that texture clears to a non-zero value because toggle is enabled.
+TEST_P(NonzeroDepthStencilTextureCreationTests, TextureCreationClears) {
+    Run();
+}
+
+// Test that texture clears to a non-zero value because toggle is enabled.
+TEST_P(NonzeroMultisampledTextureCreationTests, TextureCreationClears) {
+    Run();
+}
+
+DAWN_INSTANTIATE_TEST_P(
+    NonzeroTextureCreationTests,
+    {D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                  {"lazy_clear_resource_on_first_use"}),
+     MetalBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                  {"lazy_clear_resource_on_first_use"}),
+     OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                   {"lazy_clear_resource_on_first_use"}),
+     OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                     {"lazy_clear_resource_on_first_use"}),
+     VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                   {"lazy_clear_resource_on_first_use"})},
+    {wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, wgpu::TextureFormat::RGBA8Unorm},
+    {wgpu::TextureAspect::All},
+    {wgpu::TextureUsage(wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc),
+     wgpu::TextureUsage::CopySrc},
+    {wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D},
+    {1u, 7u},          // depth or array layers
+    {4u},              // mip count
+    {0u, 1u, 2u, 3u},  // mip
+    {1u}               // sample count
+);
+
+DAWN_INSTANTIATE_TEST_P(NonzeroNonrenderableTextureCreationTests,
+                        {D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                                      {"lazy_clear_resource_on_first_use"}),
+                         MetalBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                      {"lazy_clear_resource_on_first_use"}),
+                         OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                       {"lazy_clear_resource_on_first_use"}),
+                         OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                         {"lazy_clear_resource_on_first_use"}),
+                         VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                       {"lazy_clear_resource_on_first_use"})},
+                        {wgpu::TextureFormat::RGBA8Snorm},
+                        {wgpu::TextureAspect::All},
+                        {wgpu::TextureUsage::CopySrc},
+                        {wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D},
+                        {1u, 7u},          // depth or array layers
+                        {4u},              // mip count
+                        {0u, 1u, 2u, 3u},  // mip
+                        {1u}               // sample count
+);
+
+DAWN_INSTANTIATE_TEST_P(NonzeroCompressedTextureCreationTests,
+                        {D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                                      {"lazy_clear_resource_on_first_use"}),
+                         MetalBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                      {"lazy_clear_resource_on_first_use"}),
+                         OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                       {"lazy_clear_resource_on_first_use"}),
+                         OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                         {"lazy_clear_resource_on_first_use"}),
+                         VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                       {"lazy_clear_resource_on_first_use"})},
+                        {wgpu::TextureFormat::BC1RGBAUnorm},
+                        {wgpu::TextureAspect::All},
+                        {wgpu::TextureUsage::CopySrc},
+                        {wgpu::TextureDimension::e2D},
+                        {1u, 7u},          // depth or array layers
+                        {4u},              // mip count
+                        {0u, 1u, 2u, 3u},  // mip
+                        {1u}               // sample count
+);
+
+DAWN_INSTANTIATE_TEST_P(NonzeroDepthTextureCreationTests,
+                        {D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                                      {"lazy_clear_resource_on_first_use"}),
+                         MetalBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                      {"lazy_clear_resource_on_first_use"}),
+                         OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                       {"lazy_clear_resource_on_first_use"}),
+                         OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                         {"lazy_clear_resource_on_first_use"}),
+                         VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                                       {"lazy_clear_resource_on_first_use"})},
+                        {wgpu::TextureFormat::Depth32Float},
+                        {wgpu::TextureAspect::All},
+                        {wgpu::TextureUsage(wgpu::TextureUsage::RenderAttachment |
+                                            wgpu::TextureUsage::CopySrc),
+                         wgpu::TextureUsage::CopySrc},
+                        {wgpu::TextureDimension::e2D},
+                        {1u, 7u},          // depth or array layers
+                        {4u},              // mip count
+                        {0u, 1u, 2u, 3u},  // mip
+                        {1u}               // sample count
+);
+
+DAWN_INSTANTIATE_TEST_P(
+    NonzeroDepthStencilTextureCreationTests,
+    {D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                  {"lazy_clear_resource_on_first_use"}),
+     MetalBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                  {"lazy_clear_resource_on_first_use"}),
+     OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                   {"lazy_clear_resource_on_first_use"}),
+     OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                     {"lazy_clear_resource_on_first_use"}),
+     VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                   {"lazy_clear_resource_on_first_use"})},
+    {wgpu::TextureFormat::Depth24PlusStencil8},
+    {wgpu::TextureAspect::DepthOnly, wgpu::TextureAspect::StencilOnly},
+    {wgpu::TextureUsage(wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc |
+                        wgpu::TextureUsage::TextureBinding),
+     wgpu::TextureUsage(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc)},
+    {wgpu::TextureDimension::e2D},
+    {1u, 7u},          // depth or array layers
+    {4u},              // mip count
+    {0u, 1u, 2u, 3u},  // mip
+    {1u}               // sample count
+);
+
+DAWN_INSTANTIATE_TEST_P(
+    NonzeroMultisampledTextureCreationTests,
+    {D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                  {"lazy_clear_resource_on_first_use"}),
+     MetalBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                  {"lazy_clear_resource_on_first_use"}),
+     OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                   {"lazy_clear_resource_on_first_use"}),
+     OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                     {"lazy_clear_resource_on_first_use"}),
+     VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
+                   {"lazy_clear_resource_on_first_use"})},
+    {wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, wgpu::TextureFormat::RGBA8Unorm},
+    {wgpu::TextureAspect::All},
+    {wgpu::TextureUsage(wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding),
+     wgpu::TextureUsage::TextureBinding},
+    {wgpu::TextureDimension::e2D},
+    {1u},  // depth or array layers
+    {1u},  // mip count
+    {0u},  // mip
+    {4u}   // sample count
+);
diff --git a/src/dawn/tests/end2end/ObjectCachingTests.cpp b/src/dawn/tests/end2end/ObjectCachingTests.cpp
new file mode 100644
index 0000000..85139c9
--- /dev/null
+++ b/src/dawn/tests/end2end/ObjectCachingTests.cpp
@@ -0,0 +1,368 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class ObjectCachingTest : public DawnTest {};
+
+// Test that BindGroupLayouts are correctly deduplicated.
+TEST_P(ObjectCachingTest, BindGroupLayoutDeduplication) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+    wgpu::BindGroupLayout sameBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+    wgpu::BindGroupLayout otherBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+
+    EXPECT_NE(bgl.Get(), otherBgl.Get());
+    EXPECT_EQ(bgl.Get() == sameBgl.Get(), !UsesWire());
+}
+
+// Test that two similar bind group layouts won't refer to the same one if they differ by dynamic.
+TEST_P(ObjectCachingTest, BindGroupLayoutDynamic) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true}});
+    wgpu::BindGroupLayout sameBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true}});
+    wgpu::BindGroupLayout otherBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, false}});
+
+    EXPECT_NE(bgl.Get(), otherBgl.Get());
+    EXPECT_EQ(bgl.Get() == sameBgl.Get(), !UsesWire());
+}
+
+// Test that two similar bind group layouts won't refer to the same one if they differ by
+// textureComponentType
+TEST_P(ObjectCachingTest, BindGroupLayoutTextureComponentType) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+    wgpu::BindGroupLayout sameBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+    wgpu::BindGroupLayout otherBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Uint}});
+
+    EXPECT_NE(bgl.Get(), otherBgl.Get());
+    EXPECT_EQ(bgl.Get() == sameBgl.Get(), !UsesWire());
+}
+
+// Test that two similar bind group layouts won't refer to the same one if they differ by
+// viewDimension
+TEST_P(ObjectCachingTest, BindGroupLayoutViewDimension) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+    wgpu::BindGroupLayout sameBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+    wgpu::BindGroupLayout otherBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float,
+                  wgpu::TextureViewDimension::e2DArray}});
+
+    EXPECT_NE(bgl.Get(), otherBgl.Get());
+    EXPECT_EQ(bgl.Get() == sameBgl.Get(), !UsesWire());
+}
+
+// Test that an error object doesn't try to uncache itself
+TEST_P(ObjectCachingTest, ErrorObjectDoesntUncache) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    ASSERT_DEVICE_ERROR(
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform},
+                     {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}}));
+}
+
+// Test that PipelineLayouts are correctly deduplicated.
+TEST_P(ObjectCachingTest, PipelineLayoutDeduplication) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+    wgpu::BindGroupLayout otherBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+
+    wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
+    wgpu::PipelineLayout samePl = utils::MakeBasicPipelineLayout(device, &bgl);
+    wgpu::PipelineLayout otherPl1 = utils::MakeBasicPipelineLayout(device, nullptr);
+    wgpu::PipelineLayout otherPl2 = utils::MakeBasicPipelineLayout(device, &otherBgl);
+
+    EXPECT_NE(pl.Get(), otherPl1.Get());
+    EXPECT_NE(pl.Get(), otherPl2.Get());
+    EXPECT_EQ(pl.Get() == samePl.Get(), !UsesWire());
+}
+
+// Test that ShaderModules are correctly deduplicated.
+TEST_P(ObjectCachingTest, ShaderModuleDeduplication) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule sameModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+    wgpu::ShaderModule otherModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })");
+
+    EXPECT_NE(module.Get(), otherModule.Get());
+    EXPECT_EQ(module.Get() == sameModule.Get(), !UsesWire());
+}
+
+// Test that ComputePipeline are correctly deduplicated wrt. their ShaderModule
+TEST_P(ObjectCachingTest, ComputePipelineDeduplicationOnShaderModule) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        var<workgroup> i : u32;
+        @stage(compute) @workgroup_size(1) fn main() {
+            i = 0u;
+        })");
+    wgpu::ShaderModule sameModule = utils::CreateShaderModule(device, R"(
+        var<workgroup> i : u32;
+        @stage(compute) @workgroup_size(1) fn main() {
+            i = 0u;
+        })");
+    wgpu::ShaderModule otherModule = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1) fn main() {
+        })");
+
+    EXPECT_NE(module.Get(), otherModule.Get());
+    EXPECT_EQ(module.Get() == sameModule.Get(), !UsesWire());
+
+    wgpu::PipelineLayout layout = utils::MakeBasicPipelineLayout(device, nullptr);
+
+    wgpu::ComputePipelineDescriptor desc;
+    desc.compute.entryPoint = "main";
+    desc.layout = layout;
+
+    desc.compute.module = module;
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&desc);
+
+    desc.compute.module = sameModule;
+    wgpu::ComputePipeline samePipeline = device.CreateComputePipeline(&desc);
+
+    desc.compute.module = otherModule;
+    wgpu::ComputePipeline otherPipeline = device.CreateComputePipeline(&desc);
+
+    EXPECT_NE(pipeline.Get(), otherPipeline.Get());
+    EXPECT_EQ(pipeline.Get() == samePipeline.Get(), !UsesWire());
+}
+
+// Test that ComputePipeline are correctly deduplicated wrt. their layout
+TEST_P(ObjectCachingTest, ComputePipelineDeduplicationOnLayout) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+    wgpu::BindGroupLayout otherBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+
+    wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
+    wgpu::PipelineLayout samePl = utils::MakeBasicPipelineLayout(device, &bgl);
+    wgpu::PipelineLayout otherPl = utils::MakeBasicPipelineLayout(device, nullptr);
+
+    EXPECT_NE(pl.Get(), otherPl.Get());
+    EXPECT_EQ(pl.Get() == samePl.Get(), !UsesWire());
+
+    wgpu::ComputePipelineDescriptor desc;
+    desc.compute.entryPoint = "main";
+    desc.compute.module = utils::CreateShaderModule(device, R"(
+            var<workgroup> i : u32;
+            @stage(compute) @workgroup_size(1) fn main() {
+                i = 0u;
+            })");
+
+    desc.layout = pl;
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&desc);
+
+    desc.layout = samePl;
+    wgpu::ComputePipeline samePipeline = device.CreateComputePipeline(&desc);
+
+    desc.layout = otherPl;
+    wgpu::ComputePipeline otherPipeline = device.CreateComputePipeline(&desc);
+
+    EXPECT_NE(pipeline.Get(), otherPipeline.Get());
+    EXPECT_EQ(pipeline.Get() == samePipeline.Get(), !UsesWire());
+}
+
+// Test that RenderPipelines are correctly deduplicated wrt. their layout
+TEST_P(ObjectCachingTest, RenderPipelineDeduplicationOnLayout) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+    wgpu::BindGroupLayout otherBgl = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+
+    wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
+    wgpu::PipelineLayout samePl = utils::MakeBasicPipelineLayout(device, &bgl);
+    wgpu::PipelineLayout otherPl = utils::MakeBasicPipelineLayout(device, nullptr);
+
+    EXPECT_NE(pl.Get(), otherPl.Get());
+    EXPECT_EQ(pl.Get() == samePl.Get(), !UsesWire());
+
+    utils::ComboRenderPipelineDescriptor desc;
+    desc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+    desc.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })");
+    desc.cFragment.module = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() {
+        })");
+
+    desc.layout = pl;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+
+    desc.layout = samePl;
+    wgpu::RenderPipeline samePipeline = device.CreateRenderPipeline(&desc);
+
+    desc.layout = otherPl;
+    wgpu::RenderPipeline otherPipeline = device.CreateRenderPipeline(&desc);
+
+    EXPECT_NE(pipeline.Get(), otherPipeline.Get());
+    EXPECT_EQ(pipeline.Get() == samePipeline.Get(), !UsesWire());
+}
+
+// Test that RenderPipelines are correctly deduplicated wrt. their vertex module
+TEST_P(ObjectCachingTest, RenderPipelineDeduplicationOnVertexModule) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })");
+    wgpu::ShaderModule sameModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })");
+    wgpu::ShaderModule otherModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+        })");
+
+    EXPECT_NE(module.Get(), otherModule.Get());
+    EXPECT_EQ(module.Get() == sameModule.Get(), !UsesWire());
+
+    utils::ComboRenderPipelineDescriptor desc;
+    desc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+    desc.cFragment.module = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() {
+            })");
+
+    desc.vertex.module = module;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+
+    desc.vertex.module = sameModule;
+    wgpu::RenderPipeline samePipeline = device.CreateRenderPipeline(&desc);
+
+    desc.vertex.module = otherModule;
+    wgpu::RenderPipeline otherPipeline = device.CreateRenderPipeline(&desc);
+
+    EXPECT_NE(pipeline.Get(), otherPipeline.Get());
+    EXPECT_EQ(pipeline.Get() == samePipeline.Get(), !UsesWire());
+}
+
+// Test that RenderPipelines are correctly deduplicated wrt. their fragment module
+TEST_P(ObjectCachingTest, RenderPipelineDeduplicationOnFragmentModule) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() {
+        })");
+    wgpu::ShaderModule sameModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() {
+        })");
+    wgpu::ShaderModule otherModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })");
+
+    EXPECT_NE(module.Get(), otherModule.Get());
+    EXPECT_EQ(module.Get() == sameModule.Get(), !UsesWire());
+
+    utils::ComboRenderPipelineDescriptor desc;
+    desc.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })");
+
+    desc.cFragment.module = module;
+    desc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+
+    desc.cFragment.module = sameModule;
+    wgpu::RenderPipeline samePipeline = device.CreateRenderPipeline(&desc);
+
+    desc.cFragment.module = otherModule;
+    wgpu::RenderPipeline otherPipeline = device.CreateRenderPipeline(&desc);
+
+    EXPECT_NE(pipeline.Get(), otherPipeline.Get());
+    EXPECT_EQ(pipeline.Get() == samePipeline.Get(), !UsesWire());
+}
+
+// Test that Samplers are correctly deduplicated.
+TEST_P(ObjectCachingTest, SamplerDeduplication) {
+    wgpu::SamplerDescriptor samplerDesc;
+    wgpu::Sampler sampler = device.CreateSampler(&samplerDesc);
+
+    wgpu::SamplerDescriptor sameSamplerDesc;
+    wgpu::Sampler sameSampler = device.CreateSampler(&sameSamplerDesc);
+
+    wgpu::SamplerDescriptor otherSamplerDescAddressModeU;
+    otherSamplerDescAddressModeU.addressModeU = wgpu::AddressMode::Repeat;
+    wgpu::Sampler otherSamplerAddressModeU = device.CreateSampler(&otherSamplerDescAddressModeU);
+
+    wgpu::SamplerDescriptor otherSamplerDescAddressModeV;
+    otherSamplerDescAddressModeV.addressModeV = wgpu::AddressMode::Repeat;
+    wgpu::Sampler otherSamplerAddressModeV = device.CreateSampler(&otherSamplerDescAddressModeV);
+
+    wgpu::SamplerDescriptor otherSamplerDescAddressModeW;
+    otherSamplerDescAddressModeW.addressModeW = wgpu::AddressMode::Repeat;
+    wgpu::Sampler otherSamplerAddressModeW = device.CreateSampler(&otherSamplerDescAddressModeW);
+
+    wgpu::SamplerDescriptor otherSamplerDescMagFilter;
+    otherSamplerDescMagFilter.magFilter = wgpu::FilterMode::Linear;
+    wgpu::Sampler otherSamplerMagFilter = device.CreateSampler(&otherSamplerDescMagFilter);
+
+    wgpu::SamplerDescriptor otherSamplerDescMinFilter;
+    otherSamplerDescMinFilter.minFilter = wgpu::FilterMode::Linear;
+    wgpu::Sampler otherSamplerMinFilter = device.CreateSampler(&otherSamplerDescMinFilter);
+
+    wgpu::SamplerDescriptor otherSamplerDescMipmapFilter;
+    otherSamplerDescMipmapFilter.mipmapFilter = wgpu::FilterMode::Linear;
+    wgpu::Sampler otherSamplerMipmapFilter = device.CreateSampler(&otherSamplerDescMipmapFilter);
+
+    wgpu::SamplerDescriptor otherSamplerDescLodMinClamp;
+    otherSamplerDescLodMinClamp.lodMinClamp += 1;
+    wgpu::Sampler otherSamplerLodMinClamp = device.CreateSampler(&otherSamplerDescLodMinClamp);
+
+    wgpu::SamplerDescriptor otherSamplerDescLodMaxClamp;
+    otherSamplerDescLodMaxClamp.lodMaxClamp += 1;
+    wgpu::Sampler otherSamplerLodMaxClamp = device.CreateSampler(&otherSamplerDescLodMaxClamp);
+
+    wgpu::SamplerDescriptor otherSamplerDescCompareFunction;
+    otherSamplerDescCompareFunction.compare = wgpu::CompareFunction::Always;
+    wgpu::Sampler otherSamplerCompareFunction =
+        device.CreateSampler(&otherSamplerDescCompareFunction);
+
+    EXPECT_NE(sampler.Get(), otherSamplerAddressModeU.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerAddressModeV.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerAddressModeW.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerMagFilter.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerMinFilter.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerMipmapFilter.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerLodMinClamp.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerLodMaxClamp.Get());
+    EXPECT_NE(sampler.Get(), otherSamplerCompareFunction.Get());
+    EXPECT_EQ(sampler.Get() == sameSampler.Get(), !UsesWire());
+}
+
+DAWN_INSTANTIATE_TEST(ObjectCachingTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/OpArrayLengthTests.cpp b/src/dawn/tests/end2end/OpArrayLengthTests.cpp
new file mode 100644
index 0000000..b115e46
--- /dev/null
+++ b/src/dawn/tests/end2end/OpArrayLengthTests.cpp
@@ -0,0 +1,279 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class OpArrayLengthTest : public DawnTest {
+  protected:
+    void SetUp() {
+        DawnTest::SetUp();
+
+        // Create buffers of various size to check the length() implementation
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = 4;
+        bufferDesc.usage = wgpu::BufferUsage::Storage;
+        mStorageBuffer4 = device.CreateBuffer(&bufferDesc);
+
+        bufferDesc.size = 256;
+        mStorageBuffer256 = device.CreateBuffer(&bufferDesc);
+
+        bufferDesc.size = 512 + 256;
+        mStorageBuffer512 = device.CreateBuffer(&bufferDesc);
+
+        // Put them all in a bind group for tests to bind them easily.
+        wgpu::ShaderStage kAllStages =
+            wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Compute;
+        mBindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, kAllStages, wgpu::BufferBindingType::ReadOnlyStorage},
+                     {1, kAllStages, wgpu::BufferBindingType::ReadOnlyStorage},
+                     {2, kAllStages, wgpu::BufferBindingType::ReadOnlyStorage}});
+
+        mBindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                          {
+                                              {0, mStorageBuffer4, 0, 4},
+                                              {1, mStorageBuffer256, 0, wgpu::kWholeSize},
+                                              {2, mStorageBuffer512, 256, wgpu::kWholeSize},
+                                          });
+
+        // Common shader code to use these buffers in shaders, assuming they are in bindgroup index
+        // 0.
+        mShaderInterface = R"(
+            struct DataBuffer {
+                data : array<f32>
+            }
+
+            // The length should be 1 because the buffer is 4-byte long.
+            @group(0) @binding(0) var<storage, read> buffer1 : DataBuffer;
+
+            // The length should be 64 because the buffer is 256 bytes long.
+            @group(0) @binding(1) var<storage, read> buffer2 : DataBuffer;
+
+            // The length should be (512 - 16*4) / 8 = 56 because the buffer is 512 bytes long
+            // and the structure is 8 bytes big.
+            struct Buffer3Data {
+                a : f32,
+                b : i32,
+            }
+
+            struct Buffer3 {
+                @size(64) garbage : mat4x4<f32>,
+                data : array<Buffer3Data>,
+            }
+            @group(0) @binding(2) var<storage, read> buffer3 : Buffer3;
+        )";
+
+        // See comments in the shader for an explanation of these values
+        mExpectedLengths = {1, 64, 56};
+    }
+
+    wgpu::Buffer mStorageBuffer4;
+    wgpu::Buffer mStorageBuffer256;
+    wgpu::Buffer mStorageBuffer512;
+
+    wgpu::BindGroupLayout mBindGroupLayout;
+    wgpu::BindGroup mBindGroup;
+    std::string mShaderInterface;
+    std::array<uint32_t, 3> mExpectedLengths;
+};
+
+// Test OpArrayLength in the compute stage
+TEST_P(OpArrayLengthTest, Compute) {
+    // TODO(crbug.com/dawn/197): The computations for length() of unsized buffer is broken on
+    // Nvidia OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsNvidia() && (IsOpenGL() || IsOpenGLES()));
+
+    // TODO(crbug.com/dawn/1292): Some Intel drivers don't seem to like the
+    // (spurious but harmless) offset=64 that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL());
+
+    // Create a buffer to hold the result sizes and create a bindgroup for it.
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+    bufferDesc.size = sizeof(uint32_t) * mExpectedLengths.size();
+    wgpu::Buffer resultBuffer = device.CreateBuffer(&bufferDesc);
+
+    wgpu::BindGroupLayout resultLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+
+    wgpu::BindGroup resultBindGroup =
+        utils::MakeBindGroup(device, resultLayout, {{0, resultBuffer, 0, wgpu::kWholeSize}});
+
+    // Create the compute pipeline that stores the length()s in the result buffer.
+    wgpu::BindGroupLayout bgls[] = {mBindGroupLayout, resultLayout};
+    wgpu::PipelineLayoutDescriptor plDesc;
+    plDesc.bindGroupLayoutCount = 2;
+    plDesc.bindGroupLayouts = bgls;
+    wgpu::PipelineLayout pl = device.CreatePipelineLayout(&plDesc);
+
+    wgpu::ComputePipelineDescriptor pipelineDesc;
+    pipelineDesc.layout = pl;
+    pipelineDesc.compute.entryPoint = "main";
+    pipelineDesc.compute.module = utils::CreateShaderModule(device, (R"(
+        struct ResultBuffer {
+            data : array<u32, 3>
+        }
+        @group(1) @binding(0) var<storage, read_write> result : ResultBuffer;
+        )" + mShaderInterface + R"(
+        @stage(compute) @workgroup_size(1) fn main() {
+            result.data[0] = arrayLength(&buffer1.data);
+            result.data[1] = arrayLength(&buffer2.data);
+            result.data[2] = arrayLength(&buffer3.data);
+        })")
+                                                                        .c_str());
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    // Run a single instance of the compute shader
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, mBindGroup);
+    pass.SetBindGroup(1, resultBindGroup);
+    pass.Dispatch(1);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(mExpectedLengths.data(), resultBuffer, 0, 3);
+}
+
+// Test OpArrayLength in the fragment stage
+TEST_P(OpArrayLengthTest, Fragment) {
+    // TODO(crbug.com/dawn/197): The computations for length() of unsized buffer is broken on
+    // Nvidia OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsNvidia() && (IsOpenGL() || IsOpenGLES()));
+
+    // TODO(crbug.com/dawn/1292): Some Intel drivers don't seem to like the
+    // (spurious but harmless) offset=64 that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL());
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+
+    // Create the pipeline that computes the length of the buffers and writes it to the only render
+    // pass pixel.
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, (mShaderInterface + R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            var fragColor : vec4<f32>;
+            fragColor.r = f32(arrayLength(&buffer1.data)) / 255.0;
+            fragColor.g = f32(arrayLength(&buffer2.data)) / 255.0;
+            fragColor.b = f32(arrayLength(&buffer3.data)) / 255.0;
+            fragColor.a = 0.0;
+            return fragColor;
+        })")
+                                                                        .c_str());
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+    descriptor.cTargets[0].format = renderPass.colorFormat;
+    descriptor.layout = utils::MakeBasicPipelineLayout(device, &mBindGroupLayout);
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+    // "Draw" the lengths to the texture.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, mBindGroup);
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 expectedColor = RGBA8(mExpectedLengths[0], mExpectedLengths[1], mExpectedLengths[2], 0);
+    EXPECT_PIXEL_RGBA8_EQ(expectedColor, renderPass.color, 0, 0);
+}
+
+// Test OpArrayLength in the vertex stage
+TEST_P(OpArrayLengthTest, Vertex) {
+    // TODO(crbug.com/dawn/197): The computations for length() of unsized buffer is broken on
+    // Nvidia OpenGL. Also failing on all GLES (NV, Intel, SwANGLE).
+    DAWN_SUPPRESS_TEST_IF(IsNvidia() && IsOpenGL());
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+
+    // TODO(crbug.com/dawn/1292): Some Intel drivers don't seem to like the
+    // (spurious but harmless) offset=64 that Tint/GLSL produces.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsOpenGL());
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+
+    // Create the pipeline that computes the length of the buffers and writes it to the only render
+    // pass pixel.
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, (mShaderInterface + R"(
+        struct VertexOut {
+            @location(0) color : vec4<f32>,
+            @builtin(position) position : vec4<f32>,
+        }
+
+        @stage(vertex) fn main() -> VertexOut {
+            var output : VertexOut;
+            output.color.r = f32(arrayLength(&buffer1.data)) / 255.0;
+            output.color.g = f32(arrayLength(&buffer2.data)) / 255.0;
+            output.color.b = f32(arrayLength(&buffer3.data)) / 255.0;
+            output.color.a = 0.0;
+
+            output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            return output;
+        })")
+                                                                        .c_str());
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment)
+        fn main(@location(0) color : vec4<f32>) -> @location(0) vec4<f32> {
+            return color;
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+    descriptor.cTargets[0].format = renderPass.colorFormat;
+    descriptor.layout = utils::MakeBasicPipelineLayout(device, &mBindGroupLayout);
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+    // "Draw" the lengths to the texture.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, mBindGroup);
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    RGBA8 expectedColor = RGBA8(mExpectedLengths[0], mExpectedLengths[1], mExpectedLengths[2], 0);
+    EXPECT_PIXEL_RGBA8_EQ(expectedColor, renderPass.color, 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(OpArrayLengthTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/PipelineLayoutTests.cpp b/src/dawn/tests/end2end/PipelineLayoutTests.cpp
new file mode 100644
index 0000000..fef5f81
--- /dev/null
+++ b/src/dawn/tests/end2end/PipelineLayoutTests.cpp
@@ -0,0 +1,75 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Constants.h"
+#include "dawn/tests/DawnTest.h"
+
+#include <vector>
+
+class PipelineLayoutTests : public DawnTest {};
+
+// Test creating a PipelineLayout with multiple BGLs where the first BGL uses the max number of
+// dynamic buffers. This is a regression test for crbug.com/dawn/449 which would overflow when
+// dynamic offset bindings were at max. Test is successful if the pipeline layout is created
+// without error.
+TEST_P(PipelineLayoutTests, DynamicBuffersOverflow) {
+    // Create the first bind group layout which uses max number of dynamic buffers bindings.
+    wgpu::BindGroupLayout bglA;
+    {
+        std::vector<wgpu::BindGroupLayoutEntry> entries;
+        for (uint32_t i = 0; i < kMaxDynamicStorageBuffersPerPipelineLayout; i++) {
+            wgpu::BindGroupLayoutEntry entry;
+            entry.binding = i;
+            entry.visibility = wgpu::ShaderStage::Compute;
+            entry.buffer.type = wgpu::BufferBindingType::Storage;
+            entry.buffer.hasDynamicOffset = true;
+
+            entries.push_back(entry);
+        }
+
+        wgpu::BindGroupLayoutDescriptor descriptor;
+        descriptor.entryCount = static_cast<uint32_t>(entries.size());
+        descriptor.entries = entries.data();
+        bglA = device.CreateBindGroupLayout(&descriptor);
+    }
+
+    // Create the second bind group layout that has one non-dynamic buffer binding.
+    // It is in the fragment stage to avoid the max per-stage storage buffer limit.
+    wgpu::BindGroupLayout bglB;
+    {
+        wgpu::BindGroupLayoutDescriptor descriptor;
+        wgpu::BindGroupLayoutEntry entry;
+        entry.binding = 0;
+        entry.visibility = wgpu::ShaderStage::Fragment;
+        entry.buffer.type = wgpu::BufferBindingType::Storage;
+
+        descriptor.entryCount = 1;
+        descriptor.entries = &entry;
+        bglB = device.CreateBindGroupLayout(&descriptor);
+    }
+
+    // Create a pipeline layout using both bind group layouts.
+    wgpu::PipelineLayoutDescriptor descriptor;
+    std::vector<wgpu::BindGroupLayout> bindgroupLayouts = {bglA, bglB};
+    descriptor.bindGroupLayoutCount = bindgroupLayouts.size();
+    descriptor.bindGroupLayouts = bindgroupLayouts.data();
+    device.CreatePipelineLayout(&descriptor);
+}
+
+DAWN_INSTANTIATE_TEST(PipelineLayoutTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/PrimitiveStateTests.cpp b/src/dawn/tests/end2end/PrimitiveStateTests.cpp
new file mode 100644
index 0000000..d689c07
--- /dev/null
+++ b/src/dawn/tests/end2end/PrimitiveStateTests.cpp
@@ -0,0 +1,296 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static unsigned int kRTSize = 1;
+
+class DepthClampingTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::DepthClamping}));
+
+        wgpu::TextureDescriptor renderTargetDescriptor;
+        renderTargetDescriptor.size = {kRTSize, kRTSize};
+        renderTargetDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        renderTargetDescriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        renderTarget = device.CreateTexture(&renderTargetDescriptor);
+
+        renderTargetView = renderTarget.CreateView();
+
+        wgpu::TextureDescriptor depthDescriptor;
+        depthDescriptor.dimension = wgpu::TextureDimension::e2D;
+        depthDescriptor.size = {kRTSize, kRTSize};
+        depthDescriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        depthDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        depthTexture = device.CreateTexture(&depthDescriptor);
+
+        depthTextureView = depthTexture.CreateView();
+
+        vsModule = utils::CreateShaderModule(device, R"(
+            struct UBO {
+                color : vec3<f32>,
+                depth : f32,
+            }
+            @group(0) @binding(0) var<uniform> ubo : UBO;
+
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, ubo.depth, 1.0);
+            })");
+
+        fsModule = utils::CreateShaderModule(device, R"(
+            struct UBO {
+                color : vec3<f32>,
+                depth : f32,
+            }
+            @group(0) @binding(0) var<uniform> ubo : UBO;
+
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(ubo.color, 1.0);
+            })");
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        std::vector<wgpu::FeatureName> requiredFeatures = {};
+        if (SupportsFeatures({wgpu::FeatureName::DepthClamping})) {
+            requiredFeatures.push_back(wgpu::FeatureName::DepthClamping);
+        }
+        return requiredFeatures;
+    }
+
+    struct TestSpec {
+        wgpu::PrimitiveDepthClampingState* depthClampingState;
+        RGBA8 color;
+        float depth;
+        wgpu::CompareFunction depthCompareFunction;
+    };
+
+    // Each test param represents a pair of triangles with a color, depth, stencil value, and
+    // depthStencil state, one frontfacing, one backfacing Draw the triangles in order and check the
+    // expected colors for the frontfaces and backfaces
+    void DoTest(const std::vector<TestSpec>& testParams, const RGBA8& expected) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        struct TriangleData {
+            float color[3];
+            float depth;
+        };
+
+        utils::ComboRenderPassDescriptor renderPass({renderTargetView}, depthTextureView);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+
+        for (size_t i = 0; i < testParams.size(); ++i) {
+            const TestSpec& test = testParams[i];
+
+            TriangleData data = {
+                {static_cast<float>(test.color.r) / 255.f, static_cast<float>(test.color.g) / 255.f,
+                 static_cast<float>(test.color.b) / 255.f},
+                test.depth,
+            };
+            // Upload a buffer for each triangle's depth and color data
+            wgpu::Buffer buffer = utils::CreateBufferFromData(device, &data, sizeof(TriangleData),
+                                                              wgpu::BufferUsage::Uniform);
+
+            // Create a pipeline for the triangles with the test spec's params.
+            utils::ComboRenderPipelineDescriptor descriptor;
+            descriptor.primitive.nextInChain = test.depthClampingState;
+            descriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+            descriptor.vertex.module = vsModule;
+            descriptor.cFragment.module = fsModule;
+            wgpu::DepthStencilState* depthStencil = descriptor.EnableDepthStencil();
+            depthStencil->depthWriteEnabled = true;
+            depthStencil->depthCompare = test.depthCompareFunction;
+            depthStencil->format = wgpu::TextureFormat::Depth24PlusStencil8;
+
+            wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+            // Create a bind group for the data
+            wgpu::BindGroup bindGroup =
+                utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(1);
+        }
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(expected, renderTarget, 0, 0) << "Pixel check failed";
+    }
+
+    wgpu::Texture renderTarget;
+    wgpu::Texture depthTexture;
+    wgpu::TextureView renderTargetView;
+    wgpu::TextureView depthTextureView;
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+};
+
+// Test that fragments beyond the far plane are clamped to 1.0 if depth clamping is enabled.
+TEST_P(DepthClampingTest, ClampOnBeyondFarPlane) {
+    wgpu::PrimitiveDepthClampingState clampingState;
+    clampingState.clampDepth = true;
+
+    DoTest(
+        {
+            // Draw a red triangle at depth 1.
+            {
+                nullptr,               /* depthClampingState */
+                RGBA8(255, 0, 0, 255), /* color */
+                1.f,                   /* depth */
+                wgpu::CompareFunction::Always,
+            },
+            // Draw a green triangle at depth 2 which should get clamped to 1.
+            {
+                &clampingState,
+                RGBA8(0, 255, 0, 255), /* color */
+                2.f,                   /* depth */
+                wgpu::CompareFunction::Equal,
+            },
+        },
+        // Since we draw the green triangle with an "equal" depth compare function, the resulting
+        // fragment should be green.
+        RGBA8(0, 255, 0, 255));
+}
+
+// Test that fragments beyond the near plane are clamped to 0.0 if depth clamping is enabled.
+TEST_P(DepthClampingTest, ClampOnBeyondNearPlane) {
+    wgpu::PrimitiveDepthClampingState clampingState;
+    clampingState.clampDepth = true;
+
+    DoTest(
+        {
+            // Draw a red triangle at depth 0.
+            {
+                nullptr,               /* depthClampingState */
+                RGBA8(255, 0, 0, 255), /* color */
+                0.f,                   /* depth */
+                wgpu::CompareFunction::Always,
+            },
+            // Draw a green triangle at depth -1 which should get clamped to 0.
+            {
+                &clampingState,
+                RGBA8(0, 255, 0, 255), /* color */
+                -1.f,                  /* depth */
+                wgpu::CompareFunction::Equal,
+            },
+        },
+        // Since we draw the green triangle with an "equal" depth compare function, the resulting
+        // fragment should be green.
+        RGBA8(0, 255, 0, 255));
+}
+
+// Test that fragments inside the view frustum are unaffected by depth clamping.
+TEST_P(DepthClampingTest, ClampOnInsideViewFrustum) {
+    wgpu::PrimitiveDepthClampingState clampingState;
+    clampingState.clampDepth = true;
+
+    DoTest(
+        {
+            {
+                &clampingState,
+                RGBA8(0, 255, 0, 255), /* color */
+                0.5f,                  /* depth */
+                wgpu::CompareFunction::Always,
+            },
+        },
+        RGBA8(0, 255, 0, 255));
+}
+
+// Test that fragments outside the view frustum are clipped if depth clamping is disabled.
+TEST_P(DepthClampingTest, ClampOffOutsideViewFrustum) {
+    wgpu::PrimitiveDepthClampingState clampingState;
+    clampingState.clampDepth = false;
+
+    DoTest(
+        {
+            {
+                &clampingState,
+                RGBA8(0, 255, 0, 255), /* color */
+                2.f,                   /* depth */
+                wgpu::CompareFunction::Always,
+            },
+            {
+                &clampingState,
+                RGBA8(0, 255, 0, 255), /* color */
+                -1.f,                  /* depth */
+                wgpu::CompareFunction::Always,
+            },
+        },
+        RGBA8(0, 0, 0, 0));
+}
+
+// Test that fragments outside the view frustum are clipped if clampDepth is left unspecified.
+TEST_P(DepthClampingTest, ClampUnspecifiedOutsideViewFrustum) {
+    DoTest(
+        {
+            {
+                nullptr,               /* depthClampingState */
+                RGBA8(0, 255, 0, 255), /* color */
+                -1.f,                  /* depth */
+                wgpu::CompareFunction::Always,
+            },
+            {
+                nullptr,               /* depthClampingState */
+                RGBA8(0, 255, 0, 255), /* color */
+                2.f,                   /* depth */
+                wgpu::CompareFunction::Always,
+            },
+        },
+        RGBA8(0, 0, 0, 0));
+}
+
+// Test that fragments are properly clipped or clamped if multiple render pipelines are used
+// within the same render pass with differing clampDepth values.
+TEST_P(DepthClampingTest, MultipleRenderPipelines) {
+    wgpu::PrimitiveDepthClampingState clampingState;
+    clampingState.clampDepth = true;
+
+    wgpu::PrimitiveDepthClampingState clippingState;
+    clippingState.clampDepth = false;
+
+    DoTest(
+        {
+            // Draw green with clamping
+            {
+                &clampingState,
+                RGBA8(0, 255, 0, 255), /* color */
+                2.f,                   /* depth */
+                wgpu::CompareFunction::Always,
+            },
+            // Draw red with clipping
+            {
+                &clippingState,
+                RGBA8(255, 0, 0, 255), /* color */
+                2.f,                   /* depth */
+                wgpu::CompareFunction::Always,
+            },
+        },
+        RGBA8(0, 255, 0, 255));  // Result should be green
+}
+
+DAWN_INSTANTIATE_TEST(DepthClampingTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/PrimitiveTopologyTests.cpp b/src/dawn/tests/end2end/PrimitiveTopologyTests.cpp
new file mode 100644
index 0000000..97043cf
--- /dev/null
+++ b/src/dawn/tests/end2end/PrimitiveTopologyTests.cpp
@@ -0,0 +1,303 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+// Primitive topology tests work by drawing the following vertices with all the different primitive
+// topology states:
+// -------------------------------------
+// |                                   |
+// |        1        2        5        |
+// |                                   |
+// |                                   |
+// |                                   |
+// |                                   |
+// |        0        3        4        |
+// |                                   |
+// -------------------------------------
+//
+// Points: This case looks exactly like above
+//
+// Lines
+// -------------------------------------
+// |                                   |
+// |        1        2        5        |
+// |        |        |        |        |
+// |        |        |        |        |
+// |        |        |        |        |
+// |        |        |        |        |
+// |        0        3        4        |
+// |                                   |
+// -------------------------------------
+//
+// Line Strip
+// -------------------------------------
+// |                                   |
+// |        1--------2        5        |
+// |        |        |        |        |
+// |        |        |        |        |
+// |        |        |        |        |
+// |        |        |        |        |
+// |        0        3--------4        |
+// |                                   |
+// -------------------------------------
+//
+// Triangle
+// -------------------------------------
+// |                                   |
+// |        1--------2        5        |
+// |        |xxxxxxx         x|        |
+// |        |xxxxx         xxx|        |
+// |        |xxx         xxxxx|        |
+// |        |x         xxxxxxx|        |
+// |        0        3--------4        |
+// |                                   |
+// -------------------------------------
+//
+// Triangle Strip
+// -------------------------------------
+// |                                   |
+// |        1--------2        5        |
+// |        |xxxxxxxxx       x|        |
+// |        |xxxxxxxxxxx   xxx|        |
+// |        |xxx   xxxxxxxxxxx|        |
+// |        |x      xxxxxxxxxx|        |
+// |        0        3--------4        |
+// |                                   |
+// -------------------------------------
+//
+// Each of these different states is a superset of some of the previous states,
+// so for every state, we check any new added test locations that are not contained in previous
+// states We also check that the test locations of subsequent states are untouched
+
+constexpr static unsigned int kRTSize = 32;
+
+struct TestLocation {
+    unsigned int x, y;
+};
+
+constexpr TestLocation GetMidpoint(const TestLocation& a, const TestLocation& b) noexcept {
+    return {(a.x + b.x) / 2, (a.y + b.y) / 2};
+}
+
+constexpr TestLocation GetCentroid(const TestLocation& a,
+                                   const TestLocation& b,
+                                   const TestLocation& c) noexcept {
+    return {(a.x + b.x + c.x) / 3, (a.y + b.y + c.y) / 3};
+}
+
+// clang-format off
+// Offset towards one corner to avoid x or y symmetry false positives
+constexpr static unsigned int kOffset = kRTSize / 8;
+
+constexpr static TestLocation kPointTestLocations[] = {
+    { kRTSize * 1 / 4 + kOffset, kRTSize * 1 / 4 + kOffset },
+    { kRTSize * 1 / 4 + kOffset, kRTSize * 3 / 4 + kOffset },
+    { kRTSize * 2 / 4 + kOffset, kRTSize * 3 / 4 + kOffset },
+    { kRTSize * 2 / 4 + kOffset, kRTSize * 1 / 4 + kOffset },
+    { kRTSize * 3 / 4 + kOffset, kRTSize * 1 / 4 + kOffset },
+    { kRTSize * 3 / 4 + kOffset, kRTSize * 3 / 4 + kOffset },
+};
+
+constexpr static TestLocation kLineTestLocations[] = {
+    GetMidpoint(kPointTestLocations[0], kPointTestLocations[1]),
+    GetMidpoint(kPointTestLocations[2], kPointTestLocations[3]),
+    GetMidpoint(kPointTestLocations[4], kPointTestLocations[5]),
+};
+
+constexpr static TestLocation kLineStripTestLocations[] = {
+    GetMidpoint(kPointTestLocations[1], kPointTestLocations[2]),
+    GetMidpoint(kPointTestLocations[3], kPointTestLocations[4]),
+};
+
+constexpr static TestLocation kTriangleTestLocations[] = {
+    GetCentroid(kPointTestLocations[0], kPointTestLocations[1], kPointTestLocations[2]),
+    GetCentroid(kPointTestLocations[3], kPointTestLocations[4], kPointTestLocations[5]),
+};
+
+constexpr static TestLocation kTriangleStripTestLocations[] = {
+    GetCentroid(kPointTestLocations[1], kPointTestLocations[2], kPointTestLocations[3]),
+    GetCentroid(kPointTestLocations[2], kPointTestLocations[3], kPointTestLocations[4]),
+};
+
+constexpr static float kRTSizef = static_cast<float>(kRTSize);
+constexpr static float kVertices[] = {
+    2.f * (kPointTestLocations[0].x + 0.5f) / kRTSizef - 1.f, -2.f * (kPointTestLocations[0].y + 0.5f) / kRTSizef + 1.0f, 0.f, 1.f,
+    2.f * (kPointTestLocations[1].x + 0.5f) / kRTSizef - 1.f, -2.f * (kPointTestLocations[1].y + 0.5f) / kRTSizef + 1.0f, 0.f, 1.f,
+    2.f * (kPointTestLocations[2].x + 0.5f) / kRTSizef - 1.f, -2.f * (kPointTestLocations[2].y + 0.5f) / kRTSizef + 1.0f, 0.f, 1.f,
+    2.f * (kPointTestLocations[3].x + 0.5f) / kRTSizef - 1.f, -2.f * (kPointTestLocations[3].y + 0.5f) / kRTSizef + 1.0f, 0.f, 1.f,
+    2.f * (kPointTestLocations[4].x + 0.5f) / kRTSizef - 1.f, -2.f * (kPointTestLocations[4].y + 0.5f) / kRTSizef + 1.0f, 0.f, 1.f,
+    2.f * (kPointTestLocations[5].x + 0.5f) / kRTSizef - 1.f, -2.f * (kPointTestLocations[5].y + 0.5f) / kRTSizef + 1.0f, 0.f, 1.f,
+};
+// clang-format on
+
+class PrimitiveTopologyTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                return pos;
+            })");
+
+        fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
+                                                   wgpu::BufferUsage::Vertex);
+    }
+
+    struct LocationSpec {
+        const TestLocation* locations;
+        size_t count;
+        bool include;
+    };
+
+    template <std::size_t N>
+    constexpr LocationSpec TestPoints(TestLocation const (&points)[N], bool include) noexcept {
+        return {points, N, include};
+    }
+
+    // Draw the vertices with the given primitive topology and check the pixel values of the test
+    // locations
+    void DoTest(wgpu::PrimitiveTopology primitiveTopology,
+                const std::vector<LocationSpec>& locationSpecs) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        descriptor.primitive.topology = primitiveTopology;
+        if (primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip ||
+            primitiveTopology == wgpu::PrimitiveTopology::LineStrip) {
+            descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Uint32;
+        }
+
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        for (auto& locationSpec : locationSpecs) {
+            for (size_t i = 0; i < locationSpec.count; ++i) {
+                // If this pixel is included, check that it is green. Otherwise, check that it is
+                // black
+                RGBA8 color = locationSpec.include ? RGBA8::kGreen : RGBA8::kZero;
+                EXPECT_PIXEL_RGBA8_EQ(color, renderPass.color, locationSpec.locations[i].x,
+                                      locationSpec.locations[i].y)
+                    << "Expected (" << locationSpec.locations[i].x << ", "
+                    << locationSpec.locations[i].y << ") to be " << color;
+            }
+        }
+    }
+
+    utils::BasicRenderPass renderPass;
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+    wgpu::Buffer vertexBuffer;
+};
+
+// Test Point primitive topology
+TEST_P(PrimitiveTopologyTest, PointList) {
+    DoTest(wgpu::PrimitiveTopology::PointList,
+           {
+               // Check that the points are drawn
+               TestPoints(kPointTestLocations, true),
+
+               // Check that line and triangle locations are untouched
+               TestPoints(kLineTestLocations, false),
+               TestPoints(kLineStripTestLocations, false),
+               TestPoints(kTriangleTestLocations, false),
+               TestPoints(kTriangleStripTestLocations, false),
+           });
+}
+
+// Test Line primitive topology
+TEST_P(PrimitiveTopologyTest, LineList) {
+    DoTest(wgpu::PrimitiveTopology::LineList,
+           {
+               // Check that lines are drawn
+               TestPoints(kLineTestLocations, true),
+
+               // Check that line strip and triangle locations are untouched
+               TestPoints(kLineStripTestLocations, false),
+               TestPoints(kTriangleTestLocations, false),
+               TestPoints(kTriangleStripTestLocations, false),
+           });
+}
+
+// Test LineStrip primitive topology
+TEST_P(PrimitiveTopologyTest, LineStrip) {
+    DoTest(wgpu::PrimitiveTopology::LineStrip, {
+                                                   // Check that lines are drawn
+                                                   TestPoints(kLineTestLocations, true),
+                                                   TestPoints(kLineStripTestLocations, true),
+
+                                                   // Check that triangle locations are untouched
+                                                   TestPoints(kTriangleTestLocations, false),
+                                                   TestPoints(kTriangleStripTestLocations, false),
+                                               });
+}
+
+// Test Triangle primitive topology
+TEST_P(PrimitiveTopologyTest, TriangleList) {
+    DoTest(wgpu::PrimitiveTopology::TriangleList,
+           {
+               // Check that triangles are drawn
+               TestPoints(kTriangleTestLocations, true),
+
+               // Check that triangle strip locations are untouched
+               TestPoints(kTriangleStripTestLocations, false),
+           });
+}
+
+// Test TriangleStrip primitive topology
+TEST_P(PrimitiveTopologyTest, TriangleStrip) {
+    DoTest(wgpu::PrimitiveTopology::TriangleStrip,
+           {
+               TestPoints(kTriangleTestLocations, true),
+               TestPoints(kTriangleStripTestLocations, true),
+           });
+}
+
+DAWN_INSTANTIATE_TEST(PrimitiveTopologyTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/QueryTests.cpp b/src/dawn/tests/end2end/QueryTests.cpp
new file mode 100644
index 0000000..0b8f4ac
--- /dev/null
+++ b/src/dawn/tests/end2end/QueryTests.cpp
@@ -0,0 +1,849 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class QueryTests : public DawnTest {
+  protected:
+    wgpu::Buffer CreateResolveBuffer(uint64_t size) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc |
+                           wgpu::BufferUsage::CopyDst;
+        return device.CreateBuffer(&descriptor);
+    }
+};
+
+// Clear the content of the result buffer into 0xFFFFFFFF.
+constexpr static uint64_t kSentinelValue = ~uint64_t(0u);
+constexpr static uint64_t kZero = 0u;
+constexpr uint64_t kMinDestinationOffset = 256;
+constexpr uint64_t kMinCount = kMinDestinationOffset / sizeof(uint64_t);
+
+class OcclusionExpectation : public detail::Expectation {
+  public:
+    enum class Result { Zero, NonZero };
+
+    ~OcclusionExpectation() override = default;
+
+    OcclusionExpectation(Result expected) {
+        mExpected = expected;
+    }
+
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        ASSERT(size % sizeof(uint64_t) == 0);
+        const uint64_t* actual = static_cast<const uint64_t*>(data);
+        for (size_t i = 0; i < size / sizeof(uint64_t); i++) {
+            if (actual[i] == kSentinelValue) {
+                return testing::AssertionFailure()
+                       << "Data[" << i << "] was not written (it kept the sentinel value of "
+                       << kSentinelValue << ")." << std::endl;
+            }
+            if (mExpected == Result::Zero && actual[i] != 0) {
+                return testing::AssertionFailure()
+                       << "Expected data[" << i << "] to be zero, actual: " << actual[i] << "."
+                       << std::endl;
+            }
+            if (mExpected == Result::NonZero && actual[i] == 0) {
+                return testing::AssertionFailure()
+                       << "Expected data[" << i << "] to be non-zero." << std::endl;
+            }
+        }
+
+        return testing::AssertionSuccess();
+    }
+
+  private:
+    Result mExpected;
+};
+
+class OcclusionQueryTests : public QueryTests {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // Create basic render pipeline
+        vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 1.0, -1.0));
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+    }
+
+    struct ScissorRect {
+        uint32_t x;
+        uint32_t y;
+        uint32_t width;
+        uint32_t height;
+    };
+
+    wgpu::QuerySet CreateOcclusionQuerySet(uint32_t count) {
+        wgpu::QuerySetDescriptor descriptor;
+        descriptor.count = count;
+        descriptor.type = wgpu::QueryType::Occlusion;
+        return device.CreateQuerySet(&descriptor);
+    }
+
+    wgpu::Texture CreateRenderTexture(wgpu::TextureFormat format) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = {kRTSize, kRTSize, 1};
+        descriptor.format = format;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        return device.CreateTexture(&descriptor);
+    }
+
+    void TestOcclusionQueryWithDepthStencilTest(bool depthTestEnabled,
+                                                bool stencilTestEnabled,
+                                                OcclusionExpectation::Result expected) {
+        constexpr uint32_t kQueryCount = 1;
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        // Enable depth and stencil tests and set comparison tests never pass.
+        wgpu::DepthStencilState* depthStencil =
+            descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+        depthStencil->depthCompare =
+            depthTestEnabled ? wgpu::CompareFunction::Never : wgpu::CompareFunction::Always;
+        depthStencil->stencilFront.compare =
+            stencilTestEnabled ? wgpu::CompareFunction::Never : wgpu::CompareFunction::Always;
+        depthStencil->stencilBack.compare =
+            stencilTestEnabled ? wgpu::CompareFunction::Never : wgpu::CompareFunction::Always;
+
+        wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&descriptor);
+
+        wgpu::Texture renderTarget = CreateRenderTexture(wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::TextureView renderTargetView = renderTarget.CreateView();
+
+        wgpu::Texture depthTexture = CreateRenderTexture(wgpu::TextureFormat::Depth24PlusStencil8);
+        wgpu::TextureView depthTextureView = depthTexture.CreateView();
+
+        wgpu::QuerySet querySet = CreateOcclusionQuerySet(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+        // Set all bits in buffer to check 0 is correctly written if there is no sample passed the
+        // occlusion testing
+        queue.WriteBuffer(destination, 0, &kSentinelValue, sizeof(kSentinelValue));
+
+        utils::ComboRenderPassDescriptor renderPass({renderTargetView}, depthTextureView);
+        renderPass.occlusionQuerySet = querySet;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(renderPipeline);
+        pass.SetStencilReference(0);
+        pass.BeginOcclusionQuery(0);
+        pass.Draw(3);
+        pass.EndOcclusionQuery();
+        pass.End();
+
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, sizeof(uint64_t), new OcclusionExpectation(expected));
+    }
+
+    void TestOcclusionQueryWithScissorTest(ScissorRect rect,
+                                           OcclusionExpectation::Result expected) {
+        constexpr uint32_t kQueryCount = 1;
+
+        wgpu::QuerySet querySet = CreateOcclusionQuerySet(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+        // Set all bits in buffer to check 0 is correctly written if there is no sample passed the
+        // occlusion testing
+        queue.WriteBuffer(destination, 0, &kSentinelValue, sizeof(kSentinelValue));
+
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+        renderPass.renderPassInfo.occlusionQuerySet = querySet;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetScissorRect(rect.x, rect.y, rect.width, rect.height);
+        pass.BeginOcclusionQuery(0);
+        pass.Draw(3);
+        pass.EndOcclusionQuery();
+        pass.End();
+
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, sizeof(uint64_t), new OcclusionExpectation(expected));
+    }
+
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+
+    wgpu::RenderPipeline pipeline;
+
+    constexpr static unsigned int kRTSize = 4;
+};
+
+// Test creating query set with the type of Occlusion
+TEST_P(OcclusionQueryTests, QuerySetCreation) {
+    // Zero-sized query set is allowed.
+    CreateOcclusionQuerySet(0);
+
+    CreateOcclusionQuerySet(1);
+}
+
+// Test destroying query set
+TEST_P(OcclusionQueryTests, QuerySetDestroy) {
+    wgpu::QuerySet querySet = CreateOcclusionQuerySet(1);
+    querySet.Destroy();
+}
+
+// Draw a bottom right triangle with depth/stencil testing enabled and check whether there is
+// sample passed the testing by non-precise occlusion query with the results:
+// zero indicates that no sample passed depth/stencil testing,
+// non-zero indicates that at least one sample passed depth/stencil testing.
+TEST_P(OcclusionQueryTests, QueryWithDepthStencilTest) {
+    // Disable depth/stencil testing, the samples always pass the testing, the expected occlusion
+    // result is non-zero.
+    TestOcclusionQueryWithDepthStencilTest(false, false, OcclusionExpectation::Result::NonZero);
+
+    // Only enable depth testing and set the samples never pass the testing, the expected occlusion
+    // result is zero.
+    TestOcclusionQueryWithDepthStencilTest(true, false, OcclusionExpectation::Result::Zero);
+
+    // Only enable stencil testing and set the samples never pass the testing, the expected
+    // occlusion result is zero.
+    TestOcclusionQueryWithDepthStencilTest(false, true, OcclusionExpectation::Result::Zero);
+}
+
+// Draw a bottom right triangle with scissor testing enabled and check whether there is
+// sample passed the testing by non-precise occlusion query with the results:
+// zero indicates that no sample passed scissor testing,
+// non-zero indicates that at least one sample passed scissor testing.
+TEST_P(OcclusionQueryTests, QueryWithScissorTest) {
+    // TODO(hao.x.li@intel.com): It's failed weirdly on Intel TGL(Window Vulkan) which says
+    // the destination buffer keep sentinel value in the second case, it cannot be reproduced with
+    // any debug actions including Vulkan validation layers enabled, and takes time to find out if
+    // the WriteBuffer and ResolveQuerySet are not executed in order or the ResolveQuerySet does not
+    // copy the result to the buffer. In order to integrate end2end tests to Intel driver CL without
+    // unknown issues, skip it until we find the root cause.
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsIntel());
+
+    // Test there are samples passed scissor testing, the expected occlusion result is non-zero.
+    TestOcclusionQueryWithScissorTest({2, 1, 2, 1}, OcclusionExpectation::Result::NonZero);
+
+    // Test there is no sample passed scissor testing, the expected occlusion result is zero.
+    TestOcclusionQueryWithScissorTest({0, 0, 2, 1}, OcclusionExpectation::Result::Zero);
+}
+
+// Test begin occlusion query with same query index on different render pass
+TEST_P(OcclusionQueryTests, Rewrite) {
+    constexpr uint32_t kQueryCount = 1;
+
+    wgpu::QuerySet querySet = CreateOcclusionQuerySet(kQueryCount);
+    wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+    // Set all bits in buffer to check 0 is correctly written if there is no sample passed the
+    // occlusion testing
+    queue.WriteBuffer(destination, 0, &kSentinelValue, sizeof(kSentinelValue));
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    renderPass.renderPassInfo.occlusionQuerySet = querySet;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    // Begin occlusion without draw call
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.BeginOcclusionQuery(0);
+    pass.EndOcclusionQuery();
+    pass.End();
+
+    // Begin occlusion with same query index with draw call
+    wgpu::RenderPassEncoder rewritePass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    rewritePass.SetPipeline(pipeline);
+    rewritePass.BeginOcclusionQuery(0);
+    rewritePass.Draw(3);
+    rewritePass.EndOcclusionQuery();
+    rewritePass.End();
+
+    encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER(destination, 0, sizeof(uint64_t),
+                  new OcclusionExpectation(OcclusionExpectation::Result::NonZero));
+}
+
+// Test resolving occlusion query correctly if the queries are written sparsely, which also tests
+// the query resetting at the start of render passes on Vulkan backend.
+TEST_P(OcclusionQueryTests, ResolveSparseQueries) {
+    // TODO(hao.x.li@intel.com): Fails on Intel Windows Vulkan due to a driver issue that
+    // vkCmdFillBuffer and vkCmdCopyQueryPoolResults are not executed in order, skip it util
+    // the issue is fixed.
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsIntel());
+
+    // TODO(hao.x.li@intel.com): Investigate why it's failed on D3D12 on Nvidia when running with
+    // the previous occlusion tests. Expect resolve to 0 for these unwritten queries but the
+    // occlusion result of the previous tests is got.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+
+    constexpr uint32_t kQueryCount = 7;
+
+    wgpu::QuerySet querySet = CreateOcclusionQuerySet(kQueryCount);
+    wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+    // Set sentinel values to check the queries are resolved correctly if the queries are
+    // written sparsely.
+    std::vector<uint64_t> sentinelValues(kQueryCount, kSentinelValue);
+    queue.WriteBuffer(destination, 0, sentinelValues.data(), kQueryCount * sizeof(uint64_t));
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    renderPass.renderPassInfo.occlusionQuerySet = querySet;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+
+    // Write queries sparsely for testing the query resetting on Vulkan and resolving unwritten
+    // queries to 0.
+    // 0 - not written (tests starting with not written).
+    // 1 - written (tests combing multiple written, although other tests already do it).
+    // 2 - written.
+    // 3 - not written (tests skipping over not written in the middle).
+    // 4 - not written.
+    // 5 - written (tests another written query in the middle).
+    // 6 - not written (tests the last query not being written).
+    pass.BeginOcclusionQuery(1);
+    pass.Draw(3);
+    pass.EndOcclusionQuery();
+    pass.BeginOcclusionQuery(2);
+    pass.Draw(3);
+    pass.EndOcclusionQuery();
+    pass.BeginOcclusionQuery(5);
+    pass.Draw(3);
+    pass.EndOcclusionQuery();
+    pass.End();
+
+    encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // The query at index 0 should be resolved to 0.
+    EXPECT_BUFFER_U64_RANGE_EQ(&kZero, destination, 0, 1);
+    EXPECT_BUFFER(destination, sizeof(uint64_t), 2 * sizeof(uint64_t),
+                  new OcclusionExpectation(OcclusionExpectation::Result::NonZero));
+    // The queries at index 3 and 4 should be resolved to 0.
+    std::vector<uint64_t> zeros(2, kZero);
+    EXPECT_BUFFER_U64_RANGE_EQ(zeros.data(), destination, 3 * sizeof(uint64_t), 2);
+    EXPECT_BUFFER(destination, 5 * sizeof(uint64_t), sizeof(uint64_t),
+                  new OcclusionExpectation(OcclusionExpectation::Result::NonZero));
+    // The query at index 6 should be resolved to 0.
+    EXPECT_BUFFER_U64_RANGE_EQ(&kZero, destination, 6 * sizeof(uint64_t), 1);
+}
+
+// Test resolving occlusion query to 0 if all queries are not written
+TEST_P(OcclusionQueryTests, ResolveWithoutWritten) {
+    // TODO(hao.x.li@intel.com): Investigate why it's failed on D3D12 on Nvidia when running with
+    // the previous occlusion tests. Expect resolve to 0 but the occlusion result of the previous
+    // tests is got.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsNvidia());
+
+    constexpr uint32_t kQueryCount = 1;
+
+    wgpu::QuerySet querySet = CreateOcclusionQuerySet(kQueryCount);
+    wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+    // Set sentinel values to check 0 is correctly written if resolving query set without
+    // any written.
+    queue.WriteBuffer(destination, 0, &kSentinelValue, sizeof(kSentinelValue));
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U64_RANGE_EQ(&kZero, destination, 0, 1);
+}
+
+// Test resolving occlusion query to the destination buffer with offset
+TEST_P(OcclusionQueryTests, ResolveToBufferWithOffset) {
+    constexpr uint32_t kQueryCount = 2;
+
+    wgpu::QuerySet querySet = CreateOcclusionQuerySet(kQueryCount);
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    renderPass.renderPassInfo.occlusionQuerySet = querySet;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.BeginOcclusionQuery(0);
+    pass.Draw(3);
+    pass.EndOcclusionQuery();
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    constexpr uint64_t kBufferSize = kQueryCount * sizeof(uint64_t) + kMinDestinationOffset;
+    constexpr uint64_t kCount = kQueryCount + kMinCount;
+
+    // Resolve the query result to first slot in the buffer, other slots should not be written.
+    {
+        wgpu::Buffer destination = CreateResolveBuffer(kBufferSize);
+        // Set sentinel values to check the query is resolved to the correct slot of the buffer.
+        std::vector<uint64_t> sentinelValues(kCount, kSentinelValue);
+        queue.WriteBuffer(destination, 0, sentinelValues.data(), kBufferSize);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, 1, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, sizeof(uint64_t),
+                      new OcclusionExpectation(OcclusionExpectation::Result::NonZero));
+        EXPECT_BUFFER_U64_RANGE_EQ(sentinelValues.data(), destination, sizeof(uint64_t),
+                                   kCount - 1);
+    }
+
+    // Resolve the query result to second slot in the buffer, the first one should not be written.
+    {
+        wgpu::Buffer destination = CreateResolveBuffer(kBufferSize);
+        // Set sentinel values to check the query is resolved to the correct slot of the buffer.
+        std::vector<uint64_t> sentinelValues(kCount, kSentinelValue);
+        queue.WriteBuffer(destination, 0, sentinelValues.data(), kBufferSize);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, 1, destination, kMinDestinationOffset);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER_U64_RANGE_EQ(sentinelValues.data(), destination, 0, kMinCount);
+        EXPECT_BUFFER(destination, kMinDestinationOffset, sizeof(uint64_t),
+                      new OcclusionExpectation(OcclusionExpectation::Result::NonZero));
+    }
+}
+
+DAWN_INSTANTIATE_TEST(OcclusionQueryTests, D3D12Backend(), MetalBackend(), VulkanBackend());
+
+class PipelineStatisticsQueryTests : public QueryTests {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // Skip all tests if pipeline statistics feature is not supported
+        DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::PipelineStatisticsQuery}));
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        std::vector<wgpu::FeatureName> requiredFeatures = {};
+        if (SupportsFeatures({wgpu::FeatureName::PipelineStatisticsQuery})) {
+            requiredFeatures.push_back(wgpu::FeatureName::PipelineStatisticsQuery);
+        }
+
+        return requiredFeatures;
+    }
+
+    wgpu::QuerySet CreateQuerySetForPipelineStatistics(
+        uint32_t queryCount,
+        std::vector<wgpu::PipelineStatisticName> pipelineStatistics = {}) {
+        wgpu::QuerySetDescriptor descriptor;
+        descriptor.count = queryCount;
+        descriptor.type = wgpu::QueryType::PipelineStatistics;
+
+        if (pipelineStatistics.size() > 0) {
+            descriptor.pipelineStatistics = pipelineStatistics.data();
+            descriptor.pipelineStatisticsCount = pipelineStatistics.size();
+        }
+        return device.CreateQuerySet(&descriptor);
+    }
+};
+
+// Test creating query set with the type of PipelineStatistics
+TEST_P(PipelineStatisticsQueryTests, QuerySetCreation) {
+    // Zero-sized query set is allowed.
+    CreateQuerySetForPipelineStatistics(0, {wgpu::PipelineStatisticName::ClipperInvocations,
+                                            wgpu::PipelineStatisticName::VertexShaderInvocations});
+
+    CreateQuerySetForPipelineStatistics(1, {wgpu::PipelineStatisticName::ClipperInvocations,
+                                            wgpu::PipelineStatisticName::VertexShaderInvocations});
+}
+
+DAWN_INSTANTIATE_TEST(PipelineStatisticsQueryTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class TimestampExpectation : public detail::Expectation {
+  public:
+    ~TimestampExpectation() override = default;
+
+    // Expect the timestamp results are greater than 0.
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        ASSERT(size % sizeof(uint64_t) == 0);
+        const uint64_t* timestamps = static_cast<const uint64_t*>(data);
+        for (size_t i = 0; i < size / sizeof(uint64_t); i++) {
+            if (timestamps[i] == 0) {
+                return testing::AssertionFailure()
+                       << "Expected data[" << i << "] to be greater than 0." << std::endl;
+            }
+        }
+
+        return testing::AssertionSuccess();
+    }
+};
+
+class TimestampQueryTests : public QueryTests {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // Skip all tests if timestamp feature is not supported
+        DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}));
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        std::vector<wgpu::FeatureName> requiredFeatures = {};
+        if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
+            requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
+        }
+        return requiredFeatures;
+    }
+
+    wgpu::QuerySet CreateQuerySetForTimestamp(uint32_t queryCount) {
+        wgpu::QuerySetDescriptor descriptor;
+        descriptor.count = queryCount;
+        descriptor.type = wgpu::QueryType::Timestamp;
+        return device.CreateQuerySet(&descriptor);
+    }
+};
+
+// Test creating query set with the type of Timestamp
+TEST_P(TimestampQueryTests, QuerySetCreation) {
+    // Zero-sized query set is allowed.
+    CreateQuerySetForTimestamp(0);
+
+    CreateQuerySetForTimestamp(1);
+}
+
+// Test calling timestamp query from command encoder
+TEST_P(TimestampQueryTests, TimestampOnCommandEncoder) {
+    constexpr uint32_t kQueryCount = 2;
+
+    // Write timestamp with different query indexes
+    {
+        wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.WriteTimestamp(querySet, 1);
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+    }
+
+    // Write timestamp with same query index outside pass on same encoder
+    {
+        wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.WriteTimestamp(querySet, 1);
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.WriteTimestamp(querySet, 1);
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+    }
+}
+
+// Test calling timestamp query from render pass encoder
+TEST_P(TimestampQueryTests, TimestampOnRenderPass) {
+    constexpr uint32_t kQueryCount = 2;
+
+    // Write timestamp with different query indexes
+    {
+        wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.WriteTimestamp(querySet, 0);
+        pass.WriteTimestamp(querySet, 1);
+        pass.End();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+    }
+
+    // Write timestamp with same query index, not need test rewrite inside render pass due to it's
+    // not allowed
+    {
+        wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.WriteTimestamp(querySet, 1);
+
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.WriteTimestamp(querySet, 0);
+        pass.WriteTimestamp(querySet, 1);
+        pass.End();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+    }
+}
+
+// Test calling timestamp query from compute pass encoder
+TEST_P(TimestampQueryTests, TimestampOnComputePass) {
+    constexpr uint32_t kQueryCount = 2;
+
+    // Write timestamp with different query indexes
+    {
+        wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.WriteTimestamp(querySet, 0);
+        pass.WriteTimestamp(querySet, 1);
+        pass.End();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+    }
+
+    // Write timestamp with same query index on both the outside and the inside of the compute pass
+    {
+        wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.WriteTimestamp(querySet, 1);
+
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.WriteTimestamp(querySet, 0);
+        pass.WriteTimestamp(querySet, 1);
+        pass.End();
+
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+    }
+
+    // Write timestamp with same query index inside compute pass
+    {
+        wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+        wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.WriteTimestamp(querySet, 0);
+        pass.WriteTimestamp(querySet, 1);
+        pass.WriteTimestamp(querySet, 0);
+        pass.WriteTimestamp(querySet, 1);
+        pass.End();
+
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+    }
+}
+
+// Test resolving timestamp query from another different encoder
+TEST_P(TimestampQueryTests, ResolveFromAnotherEncoder) {
+    constexpr uint32_t kQueryCount = 2;
+
+    wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+    wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+    wgpu::CommandEncoder timestampEncoder = device.CreateCommandEncoder();
+    timestampEncoder.WriteTimestamp(querySet, 0);
+    timestampEncoder.WriteTimestamp(querySet, 1);
+    wgpu::CommandBuffer timestampCommands = timestampEncoder.Finish();
+    queue.Submit(1, &timestampCommands);
+
+    wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
+    resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
+    queue.Submit(1, &resolveCommands);
+
+    EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+}
+
+// Test resolving timestamp query correctly if the queries are written sparsely
+TEST_P(TimestampQueryTests, ResolveSparseQueries) {
+    // TODO(hao.x.li@intel.com): Fails on Intel Windows Vulkan due to a driver issue that
+    // vkCmdFillBuffer and vkCmdCopyQueryPoolResults are not executed in order, skip it util
+    // the issue is fixed.
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsIntel());
+
+    constexpr uint32_t kQueryCount = 4;
+
+    wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+    wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+    // Set sentinel values to check the queries are resolved correctly if the queries are
+    // written sparsely
+    std::vector<uint64_t> sentinelValues{0, kSentinelValue, 0, kSentinelValue};
+    queue.WriteBuffer(destination, 0, sentinelValues.data(), kQueryCount * sizeof(uint64_t));
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.WriteTimestamp(querySet, 0);
+    encoder.WriteTimestamp(querySet, 2);
+    encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER(destination, 0, sizeof(uint64_t), new TimestampExpectation);
+    // The query with no value written should be resolved to 0.
+    EXPECT_BUFFER_U64_RANGE_EQ(&kZero, destination, sizeof(uint64_t), 1);
+    EXPECT_BUFFER(destination, 2 * sizeof(uint64_t), sizeof(uint64_t), new TimestampExpectation);
+    // The query with no value written should be resolved to 0.
+    EXPECT_BUFFER_U64_RANGE_EQ(&kZero, destination, 3 * sizeof(uint64_t), 1);
+}
+
+// Test resolving timestamp query to 0 if all queries are not written
+TEST_P(TimestampQueryTests, ResolveWithoutWritten) {
+    constexpr uint32_t kQueryCount = 2;
+
+    wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+    wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+    // Set sentinel values to check 0 is correctly written if resolving query set with no
+    // query is written
+    std::vector<uint64_t> sentinelValues(kQueryCount, kSentinelValue);
+    queue.WriteBuffer(destination, 0, sentinelValues.data(), kQueryCount * sizeof(uint64_t));
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    std::vector<uint64_t> expectedZeros(kQueryCount);
+    EXPECT_BUFFER_U64_RANGE_EQ(expectedZeros.data(), destination, 0, kQueryCount);
+}
+
+// Test resolving timestamp query to one slot in the buffer
+TEST_P(TimestampQueryTests, ResolveToBufferWithOffset) {
+    // TODO(hao.x.li@intel.com): Fails on Intel Windows Vulkan due to a driver issue that
+    // vkCmdFillBuffer and vkCmdCopyQueryPoolResults are not executed in order, skip it util
+    // the issue is fixed.
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsIntel());
+
+    constexpr uint32_t kQueryCount = 2;
+    constexpr uint64_t kBufferSize = kQueryCount * sizeof(uint64_t) + kMinDestinationOffset;
+    constexpr uint64_t kCount = kQueryCount + kMinCount;
+
+    wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+
+    // Resolve the query result to first slot in the buffer, other slots should not be written
+    {
+        wgpu::Buffer destination = CreateResolveBuffer(kBufferSize);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.ResolveQuerySet(querySet, 0, 1, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        std::vector<uint64_t> zeros(kCount - 1, kZero);
+        EXPECT_BUFFER(destination, 0, sizeof(uint64_t), new TimestampExpectation);
+        EXPECT_BUFFER_U64_RANGE_EQ(zeros.data(), destination, sizeof(uint64_t), kCount - 1);
+    }
+
+    // Resolve the query result to the buffer with offset, the slots before the offset
+    // should not be written
+    {
+        wgpu::Buffer destination = CreateResolveBuffer(kBufferSize);
+        // Set sentinel values to check the query is resolved to the correct slot of the buffer.
+        std::vector<uint64_t> sentinelValues(kCount, kZero);
+        queue.WriteBuffer(destination, 0, sentinelValues.data(), kBufferSize);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(querySet, 0);
+        encoder.ResolveQuerySet(querySet, 0, 1, destination, kMinDestinationOffset);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        std::vector<uint64_t> zeros(kMinCount, kZero);
+        EXPECT_BUFFER_U64_RANGE_EQ(zeros.data(), destination, 0, kMinCount);
+        EXPECT_BUFFER(destination, kMinDestinationOffset, sizeof(uint64_t),
+                      new TimestampExpectation);
+    }
+}
+
+// Test resolving a query set twice into the same destination buffer with potentially overlapping
+// ranges
+TEST_P(TimestampQueryTests, ResolveTwiceToSameBuffer) {
+    // TODO(hao.x.li@intel.com): Fails on Intel Windows Vulkan due to a driver issue that
+    // vkCmdFillBuffer and vkCmdCopyQueryPoolResults are not executed in order, skip it util
+    // the issue is fixed.
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsIntel());
+
+    constexpr uint32_t kQueryCount = kMinCount + 2;
+
+    wgpu::QuerySet querySet = CreateQuerySetForTimestamp(kQueryCount);
+    wgpu::Buffer destination = CreateResolveBuffer(kQueryCount * sizeof(uint64_t));
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    for (uint32_t i = 0; i < kQueryCount; i++) {
+        encoder.WriteTimestamp(querySet, i);
+    }
+    encoder.ResolveQuerySet(querySet, 0, kMinCount + 1, destination, 0);
+    encoder.ResolveQuerySet(querySet, kMinCount, 2, destination, kMinDestinationOffset);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), new TimestampExpectation);
+}
+
+DAWN_INSTANTIATE_TEST(TimestampQueryTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/QueueTests.cpp b/src/dawn/tests/end2end/QueueTests.cpp
new file mode 100644
index 0000000..f9dd8c8
--- /dev/null
+++ b/src/dawn/tests/end2end/QueueTests.cpp
@@ -0,0 +1,711 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class QueueTests : public DawnTest {};
+
+// Test that GetQueue always returns the same object.
+TEST_P(QueueTests, GetQueueSameObject) {
+    wgpu::Queue q1 = device.GetQueue();
+    wgpu::Queue q2 = device.GetQueue();
+    EXPECT_EQ(q1.Get(), q2.Get());
+}
+
+DAWN_INSTANTIATE_TEST(QueueTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      NullBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class QueueWriteBufferTests : public DawnTest {};
+
+// Test the simplest WriteBuffer setting one u32 at offset 0.
+TEST_P(QueueWriteBufferTests, SmallDataAtZero) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    uint32_t value = 0x01020304;
+    queue.WriteBuffer(buffer, 0, &value, sizeof(value));
+
+    EXPECT_BUFFER_U32_EQ(value, buffer, 0);
+}
+
+// Test an empty WriteBuffer
+TEST_P(QueueWriteBufferTests, ZeroSized) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    uint32_t initialValue = 0x42;
+    queue.WriteBuffer(buffer, 0, &initialValue, sizeof(initialValue));
+
+    queue.WriteBuffer(buffer, 0, nullptr, 0);
+
+    // The content of the buffer isn't changed
+    EXPECT_BUFFER_U32_EQ(initialValue, buffer, 0);
+}
+
+// Call WriteBuffer at offset 0 via a u32 twice. Test that data is updated accoordingly.
+TEST_P(QueueWriteBufferTests, SetTwice) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    uint32_t value = 0x01020304;
+    queue.WriteBuffer(buffer, 0, &value, sizeof(value));
+
+    EXPECT_BUFFER_U32_EQ(value, buffer, 0);
+
+    value = 0x05060708;
+    queue.WriteBuffer(buffer, 0, &value, sizeof(value));
+
+    EXPECT_BUFFER_U32_EQ(value, buffer, 0);
+}
+
+// Test that WriteBuffer offset works.
+TEST_P(QueueWriteBufferTests, SmallDataAtOffset) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4000;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    constexpr uint64_t kOffset = 2000;
+    uint32_t value = 0x01020304;
+    queue.WriteBuffer(buffer, kOffset, &value, sizeof(value));
+
+    EXPECT_BUFFER_U32_EQ(value, buffer, kOffset);
+}
+
+// Stress test for many calls to WriteBuffer
+TEST_P(QueueWriteBufferTests, ManyWriteBuffer) {
+    // Note: Increasing the size of the buffer will likely cause timeout issues.
+    // In D3D12, timeout detection occurs when the GPU scheduler tries but cannot preempt the task
+    // executing these commands in-flight. If this takes longer than ~2s, a device reset occurs and
+    // fails the test. Since GPUs may or may not complete by then, this test must be disabled OR
+    // modified to be well-below the timeout limit.
+
+    // TODO(crbug.com/dawn/228): Re-enable once the issue with Metal on 10.14.6 is fixed.
+    DAWN_SUPPRESS_TEST_IF(IsMacOS() && IsIntel() && IsMetal());
+
+    // The Vulkan Validation Layers' memory barrier validation keeps track of every range written
+    // to independently which causes validation of each WriteBuffer to take increasing time, and
+    // this test to take forever. Skip it when VVLs are enabled.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsBackendValidationEnabled());
+
+    constexpr uint64_t kSize = 4000 * 1000;
+    constexpr uint32_t kElements = 250 * 250;
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = kSize;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    std::vector<uint32_t> expectedData;
+    for (uint32_t i = 0; i < kElements; ++i) {
+        queue.WriteBuffer(buffer, i * sizeof(uint32_t), &i, sizeof(i));
+        expectedData.push_back(i);
+    }
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
+}
+
+// Test using WriteBuffer for lots of data
+TEST_P(QueueWriteBufferTests, LargeWriteBuffer) {
+    constexpr uint64_t kSize = 4000 * 1000;
+    constexpr uint32_t kElements = 1000 * 1000;
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = kSize;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    std::vector<uint32_t> expectedData;
+    for (uint32_t i = 0; i < kElements; ++i) {
+        expectedData.push_back(i);
+    }
+
+    queue.WriteBuffer(buffer, 0, expectedData.data(), kElements * sizeof(uint32_t));
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
+}
+
+// Test using WriteBuffer for super large data block
+TEST_P(QueueWriteBufferTests, SuperLargeWriteBuffer) {
+    constexpr uint64_t kSize = 12000 * 1000;
+    constexpr uint64_t kElements = 3000 * 1000;
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = kSize;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    std::vector<uint32_t> expectedData;
+    for (uint32_t i = 0; i < kElements; ++i) {
+        expectedData.push_back(i);
+    }
+
+    queue.WriteBuffer(buffer, 0, expectedData.data(), kElements * sizeof(uint32_t));
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
+}
+
+// Test a special code path: writing when dynamic uploader already contatins some unaligned
+// data, it might be necessary to use a ring buffer with properly aligned offset.
+TEST_P(QueueWriteBufferTests, UnalignedDynamicUploader) {
+    utils::UnalignDynamicUploader(device);
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    uint32_t value = 0x01020304;
+    queue.WriteBuffer(buffer, 0, &value, sizeof(value));
+
+    EXPECT_BUFFER_U32_EQ(value, buffer, 0);
+}
+
+DAWN_INSTANTIATE_TEST(QueueWriteBufferTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+// For MinimumDataSpec bytesPerRow and rowsPerImage, compute a default from the copy extent.
+constexpr uint32_t kStrideComputeDefault = 0xFFFF'FFFEul;
+
+class QueueWriteTextureTests : public DawnTest {
+  protected:
+    static constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    struct TextureSpec {
+        wgpu::Origin3D copyOrigin;
+        wgpu::Extent3D textureSize;
+        uint32_t level;
+    };
+
+    struct DataSpec {
+        uint64_t size;
+        uint64_t offset;
+        uint32_t bytesPerRow;
+        uint32_t rowsPerImage;
+    };
+
+    static DataSpec MinimumDataSpec(wgpu::Extent3D writeSize,
+                                    uint32_t overrideBytesPerRow = kStrideComputeDefault,
+                                    uint32_t overrideRowsPerImage = kStrideComputeDefault) {
+        uint32_t bytesPerRow = writeSize.width * utils::GetTexelBlockSizeInBytes(kTextureFormat);
+        if (overrideBytesPerRow != kStrideComputeDefault) {
+            bytesPerRow = overrideBytesPerRow;
+        }
+        uint32_t rowsPerImage = writeSize.height;
+        if (overrideRowsPerImage != kStrideComputeDefault) {
+            rowsPerImage = overrideRowsPerImage;
+        }
+
+        uint32_t totalDataSize =
+            utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, writeSize, kTextureFormat);
+        return {totalDataSize, 0, bytesPerRow, rowsPerImage};
+    }
+
+    static void PackTextureData(const uint8_t* srcData,
+                                uint32_t width,
+                                uint32_t height,
+                                uint32_t srcBytesPerRow,
+                                RGBA8* dstData,
+                                uint32_t dstTexelPerRow,
+                                uint32_t texelBlockSize) {
+        for (uint64_t y = 0; y < height; ++y) {
+            for (uint64_t x = 0; x < width; ++x) {
+                uint64_t src = x * texelBlockSize + y * srcBytesPerRow;
+                uint64_t dst = x + y * dstTexelPerRow;
+
+                dstData[dst] = {srcData[src], srcData[src + 1], srcData[src + 2], srcData[src + 3]};
+            }
+        }
+    }
+
+    static void FillData(uint8_t* data, size_t count) {
+        for (size_t i = 0; i < count; ++i) {
+            data[i] = static_cast<uint8_t>(i % 253);
+        }
+    }
+
+    void DoTest(const TextureSpec& textureSpec,
+                const DataSpec& dataSpec,
+                const wgpu::Extent3D& copySize) {
+        // Create data of size `size` and populate it
+        std::vector<uint8_t> data(dataSpec.size);
+        FillData(data.data(), data.size());
+
+        // Create a texture that is `width` x `height` with (`level` + 1) mip levels.
+        wgpu::TextureDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size = textureSpec.textureSize;
+        descriptor.format = kTextureFormat;
+        descriptor.mipLevelCount = textureSpec.level + 1;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(
+            dataSpec.offset, dataSpec.bytesPerRow, dataSpec.rowsPerImage);
+
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, textureSpec.level, textureSpec.copyOrigin);
+
+        queue.WriteTexture(&imageCopyTexture, data.data(), dataSpec.size, &textureDataLayout,
+                           &copySize);
+
+        const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(kTextureFormat);
+        wgpu::Extent3D mipSize = {textureSpec.textureSize.width >> textureSpec.level,
+                                  textureSpec.textureSize.height >> textureSpec.level,
+                                  textureSpec.textureSize.depthOrArrayLayers};
+        uint32_t bytesPerRow = dataSpec.bytesPerRow;
+        if (bytesPerRow == wgpu::kCopyStrideUndefined) {
+            bytesPerRow = mipSize.width * bytesPerTexel;
+        }
+        uint32_t alignedBytesPerRow = Align(bytesPerRow, bytesPerTexel);
+        uint32_t appliedRowsPerImage =
+            dataSpec.rowsPerImage > 0 ? dataSpec.rowsPerImage : mipSize.height;
+        uint32_t bytesPerImage = bytesPerRow * appliedRowsPerImage;
+
+        const uint32_t maxArrayLayer = textureSpec.copyOrigin.z + copySize.depthOrArrayLayers;
+
+        uint64_t dataOffset = dataSpec.offset;
+        const uint32_t texelCountLastLayer =
+            (alignedBytesPerRow / bytesPerTexel) * (mipSize.height - 1) + mipSize.width;
+        for (uint32_t slice = textureSpec.copyOrigin.z; slice < maxArrayLayer; ++slice) {
+            // Pack the data in the specified copy region to have the same
+            // format as the expected texture data.
+            std::vector<RGBA8> expected(texelCountLastLayer);
+            PackTextureData(data.data() + dataOffset, copySize.width, copySize.height,
+                            dataSpec.bytesPerRow, expected.data(), copySize.width, bytesPerTexel);
+
+            EXPECT_TEXTURE_EQ(expected.data(), texture,
+                              {textureSpec.copyOrigin.x, textureSpec.copyOrigin.y, slice},
+                              {copySize.width, copySize.height}, textureSpec.level)
+                << "Write to texture failed copying " << dataSpec.size << "-byte data with offset "
+                << dataSpec.offset << " and bytes per row " << dataSpec.bytesPerRow << " to [("
+                << textureSpec.copyOrigin.x << ", " << textureSpec.copyOrigin.y << "), ("
+                << textureSpec.copyOrigin.x + copySize.width << ", "
+                << textureSpec.copyOrigin.y + copySize.height << ")) region of "
+                << textureSpec.textureSize.width << " x " << textureSpec.textureSize.height
+                << " texture at mip level " << textureSpec.level << " layer " << slice << std::endl;
+
+            dataOffset += bytesPerImage;
+        }
+    }
+
+    void DoSimpleWriteTextureTest(uint32_t width, uint32_t height) {
+        constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr uint32_t kPixelSize = 4;
+
+        std::vector<uint32_t> data(width * height);
+        for (size_t i = 0; i < data.size(); i++) {
+            data[i] = 0xFFFFFFFF;
+        }
+
+        wgpu::TextureDescriptor descriptor = {};
+        descriptor.size = {width, height, 1};
+        descriptor.format = kFormat;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+        wgpu::TextureDataLayout textureDataLayout =
+            utils::CreateTextureDataLayout(0, width * kPixelSize);
+        wgpu::Extent3D copyExtent = {width, height, 1};
+        device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), width * height * kPixelSize,
+                                       &textureDataLayout, &copyExtent);
+
+        EXPECT_TEXTURE_EQ(data.data(), texture, {0, 0}, {width, height});
+    }
+};
+
+// Test writing the whole texture for varying texture sizes.
+TEST_P(QueueWriteTextureTests, VaryingTextureSize) {
+    for (unsigned int w : {127, 128}) {
+        for (unsigned int h : {63, 64}) {
+            for (unsigned int d : {1, 3, 4}) {
+                TextureSpec textureSpec;
+                textureSpec.textureSize = {w, h, d};
+                textureSpec.copyOrigin = {0, 0, 0};
+                textureSpec.level = 0;
+
+                DoTest(textureSpec, MinimumDataSpec({w, h, d}), {w, h, d});
+            }
+        }
+    }
+}
+
+// Test uploading a large amount of data with writeTexture.
+TEST_P(QueueWriteTextureTests, LargeWriteTexture) {
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {2048, 2048, 2};
+    textureSpec.copyOrigin = {0, 0, 0};
+    textureSpec.level = 0;
+
+    DoTest(textureSpec, MinimumDataSpec(textureSpec.textureSize), textureSpec.textureSize);
+}
+
+// Test writing a pixel with an offset.
+TEST_P(QueueWriteTextureTests, VaryingTextureOffset) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+    DataSpec pixelData = MinimumDataSpec({1, 1, 1});
+
+    constexpr wgpu::Extent3D kCopySize = {1, 1, 1};
+    constexpr wgpu::Extent3D kTextureSize = {kWidth, kHeight, 1};
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = kTextureSize;
+    defaultTextureSpec.level = 0;
+
+    for (unsigned int w : {0u, kWidth / 7, kWidth / 3, kWidth - 1}) {
+        for (unsigned int h : {0u, kHeight / 7, kHeight / 3, kHeight - 1}) {
+            TextureSpec textureSpec = defaultTextureSpec;
+            textureSpec.copyOrigin = {w, h, 0};
+            DoTest(textureSpec, pixelData, kCopySize);
+        }
+    }
+}
+
+// Test writing a pixel with an offset to a texture array
+TEST_P(QueueWriteTextureTests, VaryingTextureArrayOffset) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+    constexpr uint32_t kDepth = 62;
+    DataSpec pixelData = MinimumDataSpec({1, 1, 1});
+
+    constexpr wgpu::Extent3D kCopySize = {1, 1, 1};
+    constexpr wgpu::Extent3D kTextureSize = {kWidth, kHeight, kDepth};
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.textureSize = kTextureSize;
+    defaultTextureSpec.level = 0;
+
+    for (unsigned int w : {0u, kWidth / 7, kWidth / 3, kWidth - 1}) {
+        for (unsigned int h : {0u, kHeight / 7, kHeight / 3, kHeight - 1}) {
+            for (unsigned int d : {0u, kDepth / 7, kDepth / 3, kDepth - 1}) {
+                TextureSpec textureSpec = defaultTextureSpec;
+                textureSpec.copyOrigin = {w, h, d};
+                DoTest(textureSpec, pixelData, kCopySize);
+            }
+        }
+    }
+}
+
+// Test writing with varying write sizes.
+TEST_P(QueueWriteTextureTests, VaryingWriteSize) {
+    constexpr uint32_t kWidth = 257;
+    constexpr uint32_t kHeight = 127;
+    for (unsigned int w : {13, 63, 128, 256}) {
+        for (unsigned int h : {16, 19, 32, 63}) {
+            TextureSpec textureSpec;
+            textureSpec.copyOrigin = {0, 0, 0};
+            textureSpec.level = 0;
+            textureSpec.textureSize = {kWidth, kHeight, 1};
+            DoTest(textureSpec, MinimumDataSpec({w, h, 1}), {w, h, 1});
+        }
+    }
+}
+
+// Test writing with varying write sizes to texture arrays.
+TEST_P(QueueWriteTextureTests, VaryingArrayWriteSize) {
+    constexpr uint32_t kWidth = 257;
+    constexpr uint32_t kHeight = 127;
+    constexpr uint32_t kDepth = 65;
+    for (unsigned int w : {13, 63, 128, 256}) {
+        for (unsigned int h : {16, 19, 32, 63}) {
+            for (unsigned int d : {3, 6}) {
+                TextureSpec textureSpec;
+                textureSpec.copyOrigin = {0, 0, 0};
+                textureSpec.level = 0;
+                textureSpec.textureSize = {kWidth, kHeight, kDepth};
+                DoTest(textureSpec, MinimumDataSpec({w, h, d}), {w, h, d});
+            }
+        }
+    }
+}
+
+// Test writing to varying mips
+TEST_P(QueueWriteTextureTests, TextureWriteToMip) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec defaultTextureSpec;
+    defaultTextureSpec.copyOrigin = {0, 0, 0};
+    defaultTextureSpec.textureSize = {kWidth, kHeight, 1};
+
+    for (unsigned int i = 1; i < 4; ++i) {
+        TextureSpec textureSpec = defaultTextureSpec;
+        textureSpec.level = i;
+        DoTest(textureSpec, MinimumDataSpec({kWidth >> i, kHeight >> i, 1}),
+               {kWidth >> i, kHeight >> i, 1});
+    }
+}
+
+// Test writing with different multiples of texel block size as data offset
+TEST_P(QueueWriteTextureTests, VaryingDataOffset) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, 0};
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+    textureSpec.level = 0;
+
+    for (uint64_t offset : {1, 2, 4, 17, 64, 128, 300}) {
+        DataSpec dataSpec = MinimumDataSpec({kWidth, kHeight, 1});
+        dataSpec.size += offset;
+        dataSpec.offset += offset;
+        DoTest(textureSpec, dataSpec, {kWidth, kHeight, 1});
+    }
+}
+
+// Test writing with rowsPerImage greater than needed.
+TEST_P(QueueWriteTextureTests, VaryingRowsPerImage) {
+    constexpr uint32_t kWidth = 65;
+    constexpr uint32_t kHeight = 31;
+    constexpr uint32_t kDepth = 17;
+
+    constexpr wgpu::Extent3D copySize = {kWidth - 1, kHeight - 1, kDepth - 1};
+
+    for (unsigned int r : {1, 2, 3, 64, 200}) {
+        TextureSpec textureSpec;
+        textureSpec.copyOrigin = {1, 1, 1};
+        textureSpec.textureSize = {kWidth, kHeight, kDepth};
+        textureSpec.level = 0;
+
+        DataSpec dataSpec = MinimumDataSpec(copySize, kStrideComputeDefault, copySize.height + r);
+        DoTest(textureSpec, dataSpec, copySize);
+    }
+}
+
+// Test with bytesPerRow greater than needed
+TEST_P(QueueWriteTextureTests, VaryingBytesPerRow) {
+    constexpr uint32_t kWidth = 257;
+    constexpr uint32_t kHeight = 129;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+    textureSpec.copyOrigin = {1, 2, 0};
+    textureSpec.level = 0;
+
+    constexpr wgpu::Extent3D copyExtent = {17, 19, 1};
+
+    for (unsigned int b : {1, 2, 3, 4}) {
+        uint32_t bytesPerRow =
+            copyExtent.width * utils::GetTexelBlockSizeInBytes(kTextureFormat) + b;
+        DoTest(textureSpec, MinimumDataSpec(copyExtent, bytesPerRow), copyExtent);
+    }
+}
+
+// Test that writing with bytesPerRow = 0 and bytesPerRow < bytesInACompleteRow works
+// when we're copying one row only
+TEST_P(QueueWriteTextureTests, BytesPerRowWithOneRowCopy) {
+    constexpr uint32_t kWidth = 259;
+    constexpr uint32_t kHeight = 127;
+
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, 0};
+    textureSpec.textureSize = {kWidth, kHeight, 1};
+    textureSpec.level = 0;
+
+    {
+        constexpr wgpu::Extent3D copyExtent = {5, 1, 1};
+        DataSpec dataSpec = MinimumDataSpec(copyExtent);
+
+        // bytesPerRow undefined
+        dataSpec.bytesPerRow = wgpu::kCopyStrideUndefined;
+        DoTest(textureSpec, dataSpec, copyExtent);
+    }
+}
+
+// Test with bytesPerRow greater than needed in a write to a texture array.
+TEST_P(QueueWriteTextureTests, VaryingArrayBytesPerRow) {
+    constexpr uint32_t kWidth = 257;
+    constexpr uint32_t kHeight = 129;
+    constexpr uint32_t kLayers = 65;
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = {kWidth, kHeight, kLayers};
+    textureSpec.copyOrigin = {1, 2, 3};
+    textureSpec.level = 0;
+
+    constexpr wgpu::Extent3D copyExtent = {17, 19, 21};
+
+    // Test with bytesPerRow divisible by blockWidth
+    for (unsigned int b : {1, 2, 3, 65, 300}) {
+        uint32_t bytesPerRow =
+            (copyExtent.width + b) * utils::GetTexelBlockSizeInBytes(kTextureFormat);
+        uint32_t rowsPerImage = 23;
+        DoTest(textureSpec, MinimumDataSpec(copyExtent, bytesPerRow, rowsPerImage), copyExtent);
+    }
+
+    // Test with bytesPerRow not divisible by blockWidth
+    for (unsigned int b : {1, 2, 3, 19, 301}) {
+        uint32_t bytesPerRow =
+            copyExtent.width * utils::GetTexelBlockSizeInBytes(kTextureFormat) + b;
+        uint32_t rowsPerImage = 23;
+        DoTest(textureSpec, MinimumDataSpec(copyExtent, bytesPerRow, rowsPerImage), copyExtent);
+    }
+}
+
+// Test valid special cases of bytesPerRow and rowsPerImage (0 or undefined).
+TEST_P(QueueWriteTextureTests, StrideSpecialCases) {
+    TextureSpec textureSpec;
+    textureSpec.copyOrigin = {0, 0, 0};
+    textureSpec.textureSize = {4, 4, 4};
+    textureSpec.level = 0;
+
+    // bytesPerRow 0
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{0, 2, 2}, {0, 0, 2}, {0, 2, 0}, {0, 0, 0}}) {
+        DoTest(textureSpec, MinimumDataSpec(copyExtent, 0, 2), copyExtent);
+    }
+
+    // bytesPerRow undefined
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{2, 1, 1}, {2, 0, 1}, {2, 1, 0}, {2, 0, 0}}) {
+        DoTest(textureSpec, MinimumDataSpec(copyExtent, wgpu::kCopyStrideUndefined, 2), copyExtent);
+    }
+
+    // rowsPerImage 0
+    for (const wgpu::Extent3D copyExtent :
+         {wgpu::Extent3D{2, 0, 2}, {2, 0, 0}, {0, 0, 2}, {0, 0, 0}}) {
+        DoTest(textureSpec, MinimumDataSpec(copyExtent, 256, 0), copyExtent);
+    }
+
+    // rowsPerImage undefined
+    for (const wgpu::Extent3D copyExtent : {wgpu::Extent3D{2, 2, 1}, {2, 2, 0}}) {
+        DoTest(textureSpec, MinimumDataSpec(copyExtent, 256, wgpu::kCopyStrideUndefined),
+               copyExtent);
+    }
+}
+
+// Testing a special code path: writing when dynamic uploader already contatins some unaligned
+// data, it might be necessary to use a ring buffer with properly aligned offset.
+TEST_P(QueueWriteTextureTests, UnalignedDynamicUploader) {
+    utils::UnalignDynamicUploader(device);
+
+    constexpr wgpu::Extent3D size = {10, 10, 1};
+
+    TextureSpec textureSpec;
+    textureSpec.textureSize = size;
+    textureSpec.copyOrigin = {0, 0, 0};
+    textureSpec.level = 0;
+
+    DoTest(textureSpec, MinimumDataSpec(size), size);
+}
+
+// This tests for a bug that occurred within the D3D12 CopyTextureSplitter, which incorrectly copied
+// data when the internal offset was larger than 256, but less than 512 and the copy size was 64
+// width or less with a height of 1.
+TEST_P(QueueWriteTextureTests, WriteTo64x1TextureFromUnalignedDynamicUploader) {
+    // First, WriteTexture with 96 pixels, or 384 bytes to create an offset in the dynamic uploader.
+    DoSimpleWriteTextureTest(96, 1);
+
+    // Now test writing to a 64x1 texture. Because a 64x1 texture's row pitch is equal to its slice
+    // pitch, the texture copy offset could be calculated incorrectly inside the internal D3D12
+    // TextureCopySplitter.
+    DoSimpleWriteTextureTest(64, 1);
+}
+
+// This tests for a bug in the allocation of internal staging buffer, which incorrectly copied depth
+// stencil data to the internal offset that is not a multiple of 4.
+TEST_P(QueueWriteTextureTests, WriteStencilAspectWithSourceOffsetUnalignedTo4) {
+    // Copies to a single aspect are unsupported on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+    textureDescriptor.size = {1, 1, 1};
+    wgpu::Texture dstTexture1 = device.CreateTexture(&textureDescriptor);
+    wgpu::Texture dstTexture2 = device.CreateTexture(&textureDescriptor);
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 8u;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer outputBuffer = device.CreateBuffer(&bufferDescriptor);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    constexpr wgpu::Extent3D kWriteSize = {1, 1, 1};
+    constexpr uint8_t kData[] = {1, 2};
+    constexpr uint32_t kBytesPerRowForWriteTexture = 1u;
+
+    std::vector<uint8_t> expectedData(8, 0);
+
+    // In the first call of queue.writeTexture(), Dawn will allocate a new staging buffer in its
+    // internal ring buffer and write the user data into it at the offset 0.
+    {
+        constexpr uint32_t kDataOffset1 = 0u;
+        wgpu::TextureDataLayout textureDataLayout =
+            utils::CreateTextureDataLayout(kDataOffset1, kBytesPerRowForWriteTexture);
+        wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(
+            dstTexture1, 0, {0, 0, 0}, wgpu::TextureAspect::StencilOnly);
+        queue.WriteTexture(&imageCopyTexture, kData, sizeof(kData), &textureDataLayout,
+                           &kWriteSize);
+
+        constexpr uint32_t kOutputBufferOffset1 = 0u;
+        wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+            outputBuffer, kOutputBufferOffset1, kTextureBytesPerRowAlignment);
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &kWriteSize);
+
+        expectedData[kOutputBufferOffset1] = kData[kDataOffset1];
+    }
+
+    // In the second call of queue.writeTexture(), Dawn will still use the same staging buffer
+    // allocated in the first call, whose first 2 bytes have been used in the first call of
+    // queue.writeTexture(). Dawn should write the user data at the offset 4 bytes since the
+    // destination texture aspect is stencil.
+    {
+        constexpr uint32_t kDataOffset2 = 1u;
+        wgpu::TextureDataLayout textureDataLayout =
+            utils::CreateTextureDataLayout(kDataOffset2, kBytesPerRowForWriteTexture);
+        wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(
+            dstTexture2, 0, {0, 0, 0}, wgpu::TextureAspect::StencilOnly);
+        queue.WriteTexture(&imageCopyTexture, kData, sizeof(kData), &textureDataLayout,
+                           &kWriteSize);
+
+        constexpr uint32_t kOutputBufferOffset2 = 4u;
+        wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+            outputBuffer, kOutputBufferOffset2, kTextureBytesPerRowAlignment);
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &kWriteSize);
+
+        expectedData[kOutputBufferOffset2] = kData[kDataOffset2];
+    }
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    queue.Submit(1, &commandBuffer);
+
+    EXPECT_BUFFER_U8_RANGE_EQ(expectedData.data(), outputBuffer, 0, 8);
+}
+
+DAWN_INSTANTIATE_TEST(QueueWriteTextureTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/QueueTimelineTests.cpp b/src/dawn/tests/end2end/QueueTimelineTests.cpp
new file mode 100644
index 0000000..96f4a03
--- /dev/null
+++ b/src/dawn/tests/end2end/QueueTimelineTests.cpp
@@ -0,0 +1,101 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock.h>
+#include "dawn/tests/DawnTest.h"
+
+using namespace testing;
+
+class MockMapCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+};
+
+static std::unique_ptr<MockMapCallback> mockMapCallback;
+static void ToMockMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+    EXPECT_EQ(status, WGPUBufferMapAsyncStatus_Success);
+    mockMapCallback->Call(status, userdata);
+}
+
+class MockQueueWorkDoneCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
+};
+
+static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
+static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
+    mockQueueWorkDoneCallback->Call(status, userdata);
+}
+
+class QueueTimelineTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        mockMapCallback = std::make_unique<MockMapCallback>();
+        mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
+
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::MapRead;
+        mMapReadBuffer = device.CreateBuffer(&descriptor);
+    }
+
+    void TearDown() override {
+        mockMapCallback = nullptr;
+        mockQueueWorkDoneCallback = nullptr;
+        DawnTest::TearDown();
+    }
+
+    wgpu::Buffer mMapReadBuffer;
+};
+
+// Test that mMapReadBuffer.MapAsync callback happens before queue.OnWorkDone callback
+// when queue.OnSubmittedWorkDone is called after mMapReadBuffer.MapAsync. The callback order should
+// happen in the order the functions are called.
+TEST_P(QueueTimelineTests, MapRead_OnWorkDone) {
+    testing::InSequence sequence;
+    EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1);
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
+
+    mMapReadBuffer.MapAsync(wgpu::MapMode::Read, 0, wgpu::kWholeMapSize, ToMockMapCallback, this);
+
+    queue.OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this);
+
+    WaitForAllOperations();
+    mMapReadBuffer.Unmap();
+}
+
+// Test that queue.OnWorkDone callback happens before mMapReadBuffer.MapAsync callback when
+// queue.Signal is called before mMapReadBuffer.MapAsync. The callback order should
+// happen in the order the functions are called.
+TEST_P(QueueTimelineTests, OnWorkDone_MapRead) {
+    testing::InSequence sequence;
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
+    EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1);
+
+    queue.OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this);
+
+    mMapReadBuffer.MapAsync(wgpu::MapMode::Read, 0, wgpu::kWholeMapSize, ToMockMapCallback, this);
+
+    WaitForAllOperations();
+    mMapReadBuffer.Unmap();
+}
+
+DAWN_INSTANTIATE_TEST(QueueTimelineTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp b/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
new file mode 100644
index 0000000..fd6690e
--- /dev/null
+++ b/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
@@ -0,0 +1,332 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static uint32_t kSize = 4;
+
+namespace {
+    using TextureFormat = wgpu::TextureFormat;
+    DAWN_TEST_PARAM_STRUCT(ReadOnlyDepthStencilAttachmentTestsParams, TextureFormat);
+}  // namespace
+
+class ReadOnlyDepthStencilAttachmentTests
+    : public DawnTestWithParams<ReadOnlyDepthStencilAttachmentTestsParams> {
+  protected:
+    struct DepthStencilValues {
+        float depthInitValue;
+        uint32_t stencilInitValue;
+        uint32_t stencilRefValue;
+    };
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        switch (GetParam().mTextureFormat) {
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth24UnormStencil8};
+                }
+
+                return {};
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth32FloatStencil8};
+                }
+
+                return {};
+            default:
+                mIsFormatSupported = true;
+                return {};
+        }
+    }
+
+    bool IsFormatSupported() const {
+        return mIsFormatSupported;
+    }
+
+    wgpu::RenderPipeline CreateRenderPipeline(wgpu::TextureAspect aspect,
+                                              wgpu::TextureFormat format,
+                                              bool sampleFromAttachment) {
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+        // Draw a rectangle via two triangles. The depth value of the top of the rectangle is 0.4.
+        // The depth value of the bottom is 0.0. The depth value gradually change from 0.4 to 0.0
+        // from the top to the bottom. The top part will compare with the depth values and fail to
+        // pass the depth test. The bottom part will compare with the depth values in depth buffer
+        // and pass the depth test, and sample from the depth buffer in fragment shader in the same
+        // pipeline.
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec3<f32>, 6>(
+                    vec3<f32>(-1.0,  1.0, 0.4),
+                    vec3<f32>(-1.0, -1.0, 0.0),
+                    vec3<f32>( 1.0,  1.0, 0.4),
+                    vec3<f32>( 1.0,  1.0, 0.4),
+                    vec3<f32>(-1.0, -1.0, 0.0),
+                    vec3<f32>( 1.0, -1.0, 0.0));
+                return vec4<f32>(pos[VertexIndex], 1.0);
+            })");
+
+        if (!sampleFromAttachment) {
+            // Draw a solid blue into color buffer if not sample from depth/stencil attachment.
+            pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 1.0, 0.0);
+            })");
+        } else {
+            // Sample from depth/stencil attachment and draw that sampled texel into color buffer.
+            if (aspect == wgpu::TextureAspect::DepthOnly) {
+                pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+                @group(0) @binding(0) var samp : sampler;
+                @group(0) @binding(1) var tex : texture_depth_2d;
+
+                @stage(fragment)
+                fn main(@builtin(position) FragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+                    return vec4<f32>(textureSample(tex, samp, FragCoord.xy), 0.0, 0.0, 0.0);
+                })");
+            } else {
+                ASSERT(aspect == wgpu::TextureAspect::StencilOnly);
+                pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+                @group(0) @binding(0) var tex : texture_2d<u32>;
+
+                @stage(fragment)
+                fn main(@builtin(position) FragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+                    var texel = textureLoad(tex, vec2<i32>(FragCoord.xy), 0);
+                    return vec4<f32>(f32(texel[0]) / 255.0, 0.0, 0.0, 0.0);
+                })");
+            }
+        }
+
+        // Enable depth or stencil test. But depth/stencil write is not enabled.
+        wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil(format);
+        if (aspect == wgpu::TextureAspect::DepthOnly) {
+            depthStencil->depthCompare = wgpu::CompareFunction::LessEqual;
+        } else {
+            depthStencil->stencilFront.compare = wgpu::CompareFunction::LessEqual;
+        }
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::Texture CreateTexture(wgpu::TextureFormat format, wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor descriptor = {};
+        descriptor.size = {kSize, kSize, 1};
+        descriptor.format = format;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
+    }
+
+    void DoTest(wgpu::TextureAspect aspect,
+                wgpu::TextureFormat format,
+                wgpu::Texture colorTexture,
+                DepthStencilValues* values,
+                bool sampleFromAttachment) {
+        wgpu::TextureUsage dsTextureUsage = wgpu::TextureUsage::RenderAttachment;
+        if (sampleFromAttachment) {
+            dsTextureUsage |= wgpu::TextureUsage::TextureBinding;
+        }
+        wgpu::Texture depthStencilTexture = CreateTexture(format, dsTextureUsage);
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+        // Note that we must encompass all aspects for texture view used in attachment.
+        wgpu::TextureView depthStencilViewInAttachment = depthStencilTexture.CreateView();
+        utils::ComboRenderPassDescriptor passDescriptorInit({}, depthStencilViewInAttachment);
+        passDescriptorInit.UnsetDepthStencilLoadStoreOpsForFormat(format);
+        if (aspect == wgpu::TextureAspect::DepthOnly) {
+            passDescriptorInit.cDepthStencilAttachmentInfo.depthClearValue = values->depthInitValue;
+        } else {
+            ASSERT(aspect == wgpu::TextureAspect::StencilOnly);
+            passDescriptorInit.cDepthStencilAttachmentInfo.stencilClearValue =
+                values->stencilInitValue;
+        }
+        wgpu::RenderPassEncoder passInit = commandEncoder.BeginRenderPass(&passDescriptorInit);
+        passInit.End();
+
+        // Note that we can only select one single aspect for texture view used in bind group.
+        wgpu::TextureViewDescriptor viewDesc = {};
+        viewDesc.aspect = aspect;
+        wgpu::TextureView depthStencilViewInBindGroup = depthStencilTexture.CreateView(&viewDesc);
+
+        // Create a render pass to initialize the depth/stencil attachment.
+        utils::ComboRenderPassDescriptor passDescriptor({colorTexture.CreateView()},
+                                                        depthStencilViewInAttachment);
+        // Set both aspects to readonly. We have to do this if the format has both aspects, or
+        // it doesn't impact anything if the format has only one aspect.
+        passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        passDescriptor.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        // Create a render pass with readonly depth/stencil attachment. The attachment has already
+        // been initialized. The pipeline in this render pass will sample from the attachment.
+        // The pipeline will read from the attachment to do depth/stencil test too.
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&passDescriptor);
+        wgpu::RenderPipeline pipeline = CreateRenderPipeline(aspect, format, sampleFromAttachment);
+        pass.SetPipeline(pipeline);
+        if (aspect == wgpu::TextureAspect::DepthOnly) {
+            if (sampleFromAttachment) {
+                wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+                    device, pipeline.GetBindGroupLayout(0),
+                    {{0, device.CreateSampler()}, {1, depthStencilViewInBindGroup}});
+                pass.SetBindGroup(0, bindGroup);
+            }
+        } else {
+            ASSERT(aspect == wgpu::TextureAspect::StencilOnly);
+            if (sampleFromAttachment) {
+                wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+                    device, pipeline.GetBindGroupLayout(0), {{0, depthStencilViewInBindGroup}});
+                pass.SetBindGroup(0, bindGroup);
+            }
+            pass.SetStencilReference(values->stencilRefValue);
+        }
+        pass.Draw(6);
+        pass.End();
+
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+  private:
+    bool mIsFormatSupported = false;
+};
+
+class ReadOnlyDepthAttachmentTests : public ReadOnlyDepthStencilAttachmentTests {
+  protected:
+    void SetUp() override {
+        ReadOnlyDepthStencilAttachmentTests::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+    }
+};
+
+TEST_P(ReadOnlyDepthAttachmentTests, SampleFromAttachment) {
+    wgpu::Texture colorTexture =
+        CreateTexture(wgpu::TextureFormat::RGBA8Unorm,
+                      wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+    wgpu::TextureFormat depthFormat = GetParam().mTextureFormat;
+
+    DepthStencilValues values;
+    values.depthInitValue = 0.2;
+
+    DoTest(wgpu::TextureAspect::DepthOnly, depthFormat, colorTexture, &values, true);
+
+    // The top part is not rendered by the pipeline. Its color is the default clear color for
+    // color attachment.
+    const std::vector<RGBA8> kExpectedTopColors(kSize * kSize / 2, {0, 0, 0, 0});
+    // The bottom part is rendered, whose red channel is sampled from depth attachment, which
+    // is initialized into 0.2.
+    const std::vector<RGBA8> kExpectedBottomColors(kSize * kSize / 2,
+                                                   {static_cast<uint8_t>(0.2 * 255), 0, 0, 0});
+    EXPECT_TEXTURE_EQ(kExpectedTopColors.data(), colorTexture, {0, 0}, {kSize, kSize / 2});
+    EXPECT_TEXTURE_EQ(kExpectedBottomColors.data(), colorTexture, {0, kSize / 2},
+                      {kSize, kSize / 2});
+}
+
+TEST_P(ReadOnlyDepthAttachmentTests, NotSampleFromAttachment) {
+    wgpu::Texture colorTexture =
+        CreateTexture(wgpu::TextureFormat::RGBA8Unorm,
+                      wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+    wgpu::TextureFormat depthFormat = GetParam().mTextureFormat;
+
+    DepthStencilValues values;
+    values.depthInitValue = 0.2;
+
+    DoTest(wgpu::TextureAspect::DepthOnly, depthFormat, colorTexture, &values, false);
+
+    // The top part is not rendered by the pipeline. Its color is the default clear color for
+    // color attachment.
+    const std::vector<RGBA8> kExpectedTopColors(kSize * kSize / 2, {0, 0, 0, 0});
+    // The bottom part is rendered. Its color is set to blue.
+    const std::vector<RGBA8> kExpectedBottomColors(kSize * kSize / 2, {0, 0, 255, 0});
+    EXPECT_TEXTURE_EQ(kExpectedTopColors.data(), colorTexture, {0, 0}, {kSize, kSize / 2});
+    EXPECT_TEXTURE_EQ(kExpectedBottomColors.data(), colorTexture, {0, kSize / 2},
+                      {kSize, kSize / 2});
+}
+
+class ReadOnlyStencilAttachmentTests : public ReadOnlyDepthStencilAttachmentTests {
+  protected:
+    void SetUp() override {
+        ReadOnlyDepthStencilAttachmentTests::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(!IsFormatSupported());
+    }
+};
+
+TEST_P(ReadOnlyStencilAttachmentTests, SampleFromAttachment) {
+    wgpu::Texture colorTexture =
+        CreateTexture(wgpu::TextureFormat::RGBA8Unorm,
+                      wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+    wgpu::TextureFormat stencilFormat = GetParam().mTextureFormat;
+
+    DepthStencilValues values;
+    values.stencilInitValue = 3;
+    values.stencilRefValue = 2;
+    // stencilRefValue < stencilValue (stencilInitValue), so stencil test passes. The pipeline
+    // samples from stencil buffer and writes into color buffer.
+    DoTest(wgpu::TextureAspect::StencilOnly, stencilFormat, colorTexture, &values, true);
+    const std::vector<RGBA8> kSampledColors(kSize * kSize, {3, 0, 0, 0});
+    EXPECT_TEXTURE_EQ(kSampledColors.data(), colorTexture, {0, 0}, {kSize, kSize});
+
+    values.stencilInitValue = 1;
+    // stencilRefValue > stencilValue (stencilInitValue), so stencil test fails. The pipeline
+    // doesn't change color buffer. Sampled data from stencil buffer is discarded.
+    DoTest(wgpu::TextureAspect::StencilOnly, stencilFormat, colorTexture, &values, true);
+    const std::vector<RGBA8> kInitColors(kSize * kSize, {0, 0, 0, 0});
+    EXPECT_TEXTURE_EQ(kInitColors.data(), colorTexture, {0, 0}, {kSize, kSize});
+}
+
+TEST_P(ReadOnlyStencilAttachmentTests, NotSampleFromAttachment) {
+    wgpu::Texture colorTexture =
+        CreateTexture(wgpu::TextureFormat::RGBA8Unorm,
+                      wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc);
+
+    wgpu::TextureFormat stencilFormat = GetParam().mTextureFormat;
+
+    DepthStencilValues values;
+    values.stencilInitValue = 3;
+    values.stencilRefValue = 2;
+    // stencilRefValue < stencilValue (stencilInitValue), so stencil test passes. The pipeline
+    // draw solid blue into color buffer.
+    DoTest(wgpu::TextureAspect::StencilOnly, stencilFormat, colorTexture, &values, false);
+    const std::vector<RGBA8> kSampledColors(kSize * kSize, {0, 0, 255, 0});
+    EXPECT_TEXTURE_EQ(kSampledColors.data(), colorTexture, {0, 0}, {kSize, kSize});
+
+    values.stencilInitValue = 1;
+    // stencilRefValue > stencilValue (stencilInitValue), so stencil test fails. The pipeline
+    // doesn't change color buffer. drawing data is discarded.
+    DoTest(wgpu::TextureAspect::StencilOnly, stencilFormat, colorTexture, &values, false);
+    const std::vector<RGBA8> kInitColors(kSize * kSize, {0, 0, 0, 0});
+    EXPECT_TEXTURE_EQ(kInitColors.data(), colorTexture, {0, 0}, {kSize, kSize});
+}
+
+DAWN_INSTANTIATE_TEST_P(ReadOnlyDepthAttachmentTests,
+                        {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}),
+                         MetalBackend(), VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(utils::kDepthFormats.begin(),
+                                                         utils::kDepthFormats.end()));
+DAWN_INSTANTIATE_TEST_P(ReadOnlyStencilAttachmentTests,
+                        {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}),
+                         MetalBackend(), VulkanBackend()},
+                        std::vector<wgpu::TextureFormat>(utils::kStencilFormats.begin(),
+                                                         utils::kStencilFormats.end()));
diff --git a/src/dawn/tests/end2end/RenderAttachmentTests.cpp b/src/dawn/tests/end2end/RenderAttachmentTests.cpp
new file mode 100644
index 0000000..eacc666
--- /dev/null
+++ b/src/dawn/tests/end2end/RenderAttachmentTests.cpp
@@ -0,0 +1,84 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class RenderAttachmentTest : public DawnTest {};
+
+// Test that it is ok to have more fragment outputs than color attachments.
+// There should be no backend validation errors or indexing out-of-bounds.
+TEST_P(RenderAttachmentTest, MoreFragmentOutputsThanAttachments) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct Output {
+            @location(0) color0 : vec4<f32>,
+            @location(1) color1 : vec4<f32>,
+            @location(2) color2 : vec4<f32>,
+            @location(3) color3 : vec4<f32>,
+        }
+
+        @stage(fragment)
+        fn main() -> Output {
+            var output : Output;
+            output.color0 = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            output.color1 = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            output.color2 = vec4<f32>(0.0, 0.0, 1.0, 1.0);
+            output.color3 = vec4<f32>(1.0, 1.0, 0.0, 1.0);
+            return output;
+        })");
+
+    // Fragment outputs 1, 2, 3 are written in the shader, but unused by the pipeline.
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = vsModule;
+    pipelineDesc.cFragment.module = fsModule;
+    pipelineDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+    pipelineDesc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    pipelineDesc.cFragment.targetCount = 1;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::TextureDescriptor textureDesc;
+    textureDesc.size = {1, 1, 1};
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+
+    wgpu::Texture renderTarget = device.CreateTexture(&textureDesc);
+    utils::ComboRenderPassDescriptor renderPass({renderTarget.CreateView()});
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+    pass.SetPipeline(pipeline);
+    pass.Draw(1);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderTarget, 0, 0);
+}
+
+DAWN_INSTANTIATE_TEST(RenderAttachmentTest,
+                      D3D12Backend(),
+                      D3D12Backend({}, {"use_d3d12_render_pass"}),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/RenderBundleTests.cpp b/src/dawn/tests/end2end/RenderBundleTests.cpp
new file mode 100644
index 0000000..98ccce8
--- /dev/null
+++ b/src/dawn/tests/end2end/RenderBundleTests.cpp
@@ -0,0 +1,201 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 4;
+const RGBA8 kColors[2] = {RGBA8::kGreen, RGBA8::kBlue};
+
+// RenderBundleTest tests simple usage of RenderBundles to draw. The implementaiton
+// of RenderBundle is shared significantly with render pass execution which is
+// tested in all other rendering tests.
+class RenderBundleTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                return pos;
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            struct Ubo {
+                color : vec4<f32>
+            }
+            @group(0) @binding(0) var<uniform> fragmentUniformBuffer : Ubo;
+
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return fragmentUniformBuffer.color;
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+
+        float colors0[] = {kColors[0].r / 255.f, kColors[0].g / 255.f, kColors[0].b / 255.f,
+                           kColors[0].a / 255.f};
+        float colors1[] = {kColors[1].r / 255.f, kColors[1].g / 255.f, kColors[1].b / 255.f,
+                           kColors[1].a / 255.f};
+
+        wgpu::Buffer buffer0 = utils::CreateBufferFromData(device, colors0, 4 * sizeof(float),
+                                                           wgpu::BufferUsage::Uniform);
+        wgpu::Buffer buffer1 = utils::CreateBufferFromData(device, colors1, 4 * sizeof(float),
+                                                           wgpu::BufferUsage::Uniform);
+
+        bindGroups[0] = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                             {{0, buffer0, 0, 4 * sizeof(float)}});
+        bindGroups[1] = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                             {{0, buffer1, 0, 4 * sizeof(float)}});
+
+        vertexBuffer = utils::CreateBufferFromData<float>(
+            device, wgpu::BufferUsage::Vertex,
+            {// The bottom left triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 1.0f,
+
+             // The top right triangle
+             -1.0f, 1.0f, 0.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f});
+    }
+
+    utils::BasicRenderPass renderPass;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+    wgpu::BindGroup bindGroups[2];
+};
+
+// Basic test of RenderBundle.
+TEST_P(RenderBundleTest, Basic) {
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.colorFormat;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+    renderBundleEncoder.SetPipeline(pipeline);
+    renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+    renderBundleEncoder.SetBindGroup(0, bindGroups[0]);
+    renderBundleEncoder.Draw(6);
+
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(kColors[0], renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(kColors[0], renderPass.color, 3, 1);
+}
+
+// Test execution of multiple render bundles
+TEST_P(RenderBundleTest, MultipleBundles) {
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.colorFormat;
+
+    wgpu::RenderBundle renderBundles[2];
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        renderBundleEncoder.SetBindGroup(0, bindGroups[0]);
+        renderBundleEncoder.Draw(3);
+
+        renderBundles[0] = renderBundleEncoder.Finish();
+    }
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        renderBundleEncoder.SetBindGroup(0, bindGroups[1]);
+        renderBundleEncoder.Draw(3, 1, 3);
+
+        renderBundles[1] = renderBundleEncoder.Finish();
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.ExecuteBundles(2, renderBundles);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(kColors[0], renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(kColors[1], renderPass.color, 3, 1);
+}
+
+// Test execution of a bundle along with render pass commands.
+TEST_P(RenderBundleTest, BundleAndRenderPassCommands) {
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.colorFormat;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+    renderBundleEncoder.SetPipeline(pipeline);
+    renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+    renderBundleEncoder.SetBindGroup(0, bindGroups[0]);
+    renderBundleEncoder.Draw(3);
+
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.ExecuteBundles(1, &renderBundle);
+
+    pass.SetPipeline(pipeline);
+    pass.SetVertexBuffer(0, vertexBuffer);
+    pass.SetBindGroup(0, bindGroups[1]);
+    pass.Draw(3, 1, 3);
+
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(kColors[0], renderPass.color, 1, 3);
+    EXPECT_PIXEL_RGBA8_EQ(kColors[1], renderPass.color, 3, 1);
+}
+
+DAWN_INSTANTIATE_TEST(RenderBundleTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp b/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
new file mode 100644
index 0000000..1ec75b7
--- /dev/null
+++ b/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
@@ -0,0 +1,294 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <array>
+
+constexpr static unsigned int kRTSize = 16;
+
+class DrawQuad {
+  public:
+    DrawQuad() {
+    }
+    DrawQuad(wgpu::Device device, const char* vsSource, const char* fsSource) : device(device) {
+        vsModule = utils::CreateShaderModule(device, vsSource);
+        fsModule = utils::CreateShaderModule(device, fsSource);
+
+        pipelineLayout = utils::MakeBasicPipelineLayout(device, nullptr);
+    }
+
+    void Draw(wgpu::RenderPassEncoder* pass) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.layout = pipelineLayout;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        auto renderPipeline = device.CreateRenderPipeline(&descriptor);
+
+        pass->SetPipeline(renderPipeline);
+        pass->Draw(6, 1, 0, 0);
+    }
+
+  private:
+    wgpu::Device device;
+    wgpu::ShaderModule vsModule = {};
+    wgpu::ShaderModule fsModule = {};
+    wgpu::PipelineLayout pipelineLayout = {};
+};
+
+class RenderPassLoadOpTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = kRTSize;
+        descriptor.size.height = kRTSize;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        renderTarget = device.CreateTexture(&descriptor);
+
+        renderTargetView = renderTarget.CreateView();
+
+        std::fill(expectZero.begin(), expectZero.end(), RGBA8::kZero);
+
+        std::fill(expectGreen.begin(), expectGreen.end(), RGBA8::kGreen);
+
+        std::fill(expectBlue.begin(), expectBlue.end(), RGBA8::kBlue);
+
+        // draws a blue quad on the right half of the screen
+        const char* vsSource = R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>( 0.0, -1.0),
+                    vec2<f32>( 1.0, -1.0),
+                    vec2<f32>( 0.0,  1.0),
+                    vec2<f32>( 0.0,  1.0),
+                    vec2<f32>( 1.0, -1.0),
+                    vec2<f32>( 1.0,  1.0));
+
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })";
+
+        const char* fsSource = R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 1.0, 1.0);
+            })";
+        blueQuad = DrawQuad(device, vsSource, fsSource);
+    }
+
+    template <class T>
+    void TestIntegerClearColor(wgpu::TextureFormat format,
+                               const wgpu::Color& clearColor,
+                               const std::array<T, 4>& expectedPixelValue) {
+        constexpr wgpu::Extent3D kTextureSize = {1, 1, 1};
+
+        wgpu::TextureDescriptor textureDescriptor;
+        textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+        textureDescriptor.size = kTextureSize;
+        textureDescriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        textureDescriptor.format = format;
+        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+        utils::ComboRenderPassDescriptor renderPassDescriptor({texture.CreateView()});
+        renderPassDescriptor.cColorAttachments[0].clearValue = clearColor;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.End();
+
+        const uint64_t bufferSize = sizeof(T) * expectedPixelValue.size();
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = bufferSize;
+        bufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(buffer, 0, kTextureBytesPerRowAlignment);
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &kTextureSize);
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<const uint32_t*>(expectedPixelValue.data()),
+                                   buffer, 0, bufferSize / sizeof(uint32_t));
+    }
+
+    wgpu::Texture renderTarget;
+    wgpu::TextureView renderTargetView;
+
+    std::array<RGBA8, kRTSize * kRTSize> expectZero;
+    std::array<RGBA8, kRTSize * kRTSize> expectGreen;
+    std::array<RGBA8, kRTSize * kRTSize> expectBlue;
+
+    DrawQuad blueQuad = {};
+};
+
+// Tests clearing, loading, and drawing into color attachments
+TEST_P(RenderPassLoadOpTests, ColorClearThenLoadAndDraw) {
+    // Part 1: clear once, check to make sure it's cleared
+    utils::ComboRenderPassDescriptor renderPassClearZero({renderTargetView});
+    auto commandsClearZeroEncoder = device.CreateCommandEncoder();
+    auto clearZeroPass = commandsClearZeroEncoder.BeginRenderPass(&renderPassClearZero);
+    clearZeroPass.End();
+    auto commandsClearZero = commandsClearZeroEncoder.Finish();
+
+    utils::ComboRenderPassDescriptor renderPassClearGreen({renderTargetView});
+    renderPassClearGreen.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
+    auto commandsClearGreenEncoder = device.CreateCommandEncoder();
+    auto clearGreenPass = commandsClearGreenEncoder.BeginRenderPass(&renderPassClearGreen);
+    clearGreenPass.End();
+    auto commandsClearGreen = commandsClearGreenEncoder.Finish();
+
+    queue.Submit(1, &commandsClearZero);
+    EXPECT_TEXTURE_EQ(expectZero.data(), renderTarget, {0, 0}, {kRTSize, kRTSize});
+
+    queue.Submit(1, &commandsClearGreen);
+    EXPECT_TEXTURE_EQ(expectGreen.data(), renderTarget, {0, 0}, {kRTSize, kRTSize});
+
+    // Part 2: draw a blue quad into the right half of the render target, and check result
+    utils::ComboRenderPassDescriptor renderPassLoad({renderTargetView});
+    renderPassLoad.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+    wgpu::CommandBuffer commandsLoad;
+    {
+        auto encoder = device.CreateCommandEncoder();
+        auto pass = encoder.BeginRenderPass(&renderPassLoad);
+        blueQuad.Draw(&pass);
+        pass.End();
+        commandsLoad = encoder.Finish();
+    }
+
+    queue.Submit(1, &commandsLoad);
+    // Left half should still be green
+    EXPECT_TEXTURE_EQ(expectGreen.data(), renderTarget, {0, 0}, {kRTSize / 2, kRTSize});
+    // Right half should now be blue
+    EXPECT_TEXTURE_EQ(expectBlue.data(), renderTarget, {kRTSize / 2, 0}, {kRTSize / 2, kRTSize});
+}
+
+// Test clearing a color attachment with signed and unsigned integer formats.
+TEST_P(RenderPassLoadOpTests, LoadOpClearOnIntegerFormats) {
+    // RGBA8Uint
+    {
+        constexpr wgpu::Color kClearColor = {2.f, 3.3f, 254.8f, 255.0f};
+        constexpr std::array<uint8_t, 4> kExpectedPixelValue = {2, 3, 254, 255};
+        TestIntegerClearColor<uint8_t>(wgpu::TextureFormat::RGBA8Uint, kClearColor,
+                                       kExpectedPixelValue);
+    }
+
+    // RGBA8Sint
+    {
+        constexpr wgpu::Color kClearColor = {2.f, -3.3f, 126.8f, -128.0f};
+        constexpr std::array<int8_t, 4> kExpectedPixelValue = {2, -3, 126, -128};
+        TestIntegerClearColor<int8_t>(wgpu::TextureFormat::RGBA8Sint, kClearColor,
+                                      kExpectedPixelValue);
+    }
+
+    // RGBA16Uint
+    {
+        constexpr wgpu::Color kClearColor = {2.f, 3.3f, 512.7f, 65535.f};
+        constexpr std::array<uint16_t, 4> kExpectedPixelValue = {2, 3, 512, 65535u};
+        TestIntegerClearColor<uint16_t>(wgpu::TextureFormat::RGBA16Uint, kClearColor,
+                                        kExpectedPixelValue);
+    }
+
+    // RGBA16Sint
+    {
+        constexpr wgpu::Color kClearColor = {2.f, -3.3f, 32767.8f, -32768.0f};
+        constexpr std::array<int16_t, 4> kExpectedPixelValue = {2, -3, 32767, -32768};
+        TestIntegerClearColor<int16_t>(wgpu::TextureFormat::RGBA16Sint, kClearColor,
+                                       kExpectedPixelValue);
+    }
+
+    // RGBA32Uint
+    {
+        constexpr wgpu::Color kClearColor = {2.f, 3.3f, 65534.8f, 65537.f};
+        constexpr std::array<uint32_t, 4> kExpectedPixelValue = {2, 3, 65534, 65537};
+        TestIntegerClearColor<uint32_t>(wgpu::TextureFormat::RGBA32Uint, kClearColor,
+                                        kExpectedPixelValue);
+    }
+
+    // RGBA32Sint
+    {
+        constexpr wgpu::Color kClearColor = {2.f, -3.3f, 65534.8f, -65537.f};
+        constexpr std::array<int32_t, 4> kExpectedPixelValue = {2, -3, 65534, -65537};
+        TestIntegerClearColor<int32_t>(wgpu::TextureFormat::RGBA32Sint, kClearColor,
+                                       kExpectedPixelValue);
+    }
+}
+
+// This test verifies that input double values are being rendered correctly when clearing.
+TEST_P(RenderPassLoadOpTests, LoadOpClearIntegerFormatsToLargeValues) {
+    // TODO(http://crbug.com/dawn/537): Implemement a workaround to enable clearing integer formats
+    // to large values on D3D12.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12());
+
+    // TODO(crbug.com/dawn/1109): Re-enable once fixed on Mac Mini 8,1s w/ 11.5.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel() && IsMacOS(11, 5));
+
+    constexpr double kUint32MaxDouble = 4294967295.0;
+    constexpr uint32_t kUint32Max = static_cast<uint32_t>(kUint32MaxDouble);
+    // RGBA32Uint for UINT32_MAX
+    {
+        constexpr wgpu::Color kClearColor = {kUint32MaxDouble, kUint32MaxDouble, kUint32MaxDouble,
+                                             kUint32MaxDouble};
+        constexpr std::array<uint32_t, 4> kExpectedPixelValue = {kUint32Max, kUint32Max, kUint32Max,
+                                                                 kUint32Max};
+        TestIntegerClearColor<uint32_t>(wgpu::TextureFormat::RGBA32Uint, kClearColor,
+                                        kExpectedPixelValue);
+    }
+
+    constexpr double kSint32MaxDouble = 2147483647.0;
+    constexpr int32_t kSint32Max = static_cast<int32_t>(kSint32MaxDouble);
+
+    constexpr double kSint32MinDouble = -2147483648.0;
+    constexpr int32_t kSint32Min = static_cast<int32_t>(kSint32MinDouble);
+
+    // RGBA32Sint for SINT32 upper bound.
+    {
+        constexpr wgpu::Color kClearColor = {kSint32MaxDouble, kSint32MaxDouble, kSint32MaxDouble,
+                                             kSint32MaxDouble};
+        constexpr std::array<int32_t, 4> kExpectedPixelValue = {kSint32Max, kSint32Max, kSint32Max,
+                                                                kSint32Max};
+        TestIntegerClearColor<int32_t>(wgpu::TextureFormat::RGBA32Sint, kClearColor,
+                                       kExpectedPixelValue);
+    }
+
+    // RGBA32Sint for SINT32 lower bound.
+    {
+        constexpr wgpu::Color kClearColor = {kSint32MinDouble, kSint32MinDouble, kSint32MinDouble,
+                                             kSint32MinDouble};
+        constexpr std::array<int32_t, 4> kExpectedPixelValue = {kSint32Min, kSint32Min, kSint32Min,
+                                                                kSint32Min};
+        TestIntegerClearColor<int32_t>(wgpu::TextureFormat::RGBA32Sint, kClearColor,
+                                       kExpectedPixelValue);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(RenderPassLoadOpTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/RenderPassTests.cpp b/src/dawn/tests/end2end/RenderPassTests.cpp
new file mode 100644
index 0000000..85d39c0
--- /dev/null
+++ b/src/dawn/tests/end2end/RenderPassTests.cpp
@@ -0,0 +1,172 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 16;
+constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+class RenderPassTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // Shaders to draw a bottom-left triangle in blue.
+        mVSModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0, -1.0),
+                    vec2<f32>(-1.0, -1.0));
+
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 1.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = mVSModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.cTargets[0].format = kFormat;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+    }
+
+    wgpu::Texture CreateDefault2DTexture() {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = kRTSize;
+        descriptor.size.height = kRTSize;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = kFormat;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::ShaderModule mVSModule;
+    wgpu::RenderPipeline pipeline;
+};
+
+// Test using two different render passes in one commandBuffer works correctly.
+TEST_P(RenderPassTest, TwoRenderPassesInOneCommandBuffer) {
+    if (IsOpenGL() || IsMetal()) {
+        // crbug.com/950768
+        // This test is consistently failing on OpenGL and flaky on Metal.
+        return;
+    }
+
+    wgpu::Texture renderTarget1 = CreateDefault2DTexture();
+    wgpu::Texture renderTarget2 = CreateDefault2DTexture();
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    {
+        // In the first render pass we clear renderTarget1 to red and draw a blue triangle in the
+        // bottom left of renderTarget1.
+        utils::ComboRenderPassDescriptor renderPass({renderTarget1.CreateView()});
+        renderPass.cColorAttachments[0].clearValue = {1.0f, 0.0f, 0.0f, 1.0f};
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline);
+        pass.Draw(3);
+        pass.End();
+    }
+
+    {
+        // In the second render pass we clear renderTarget2 to green and draw a blue triangle in the
+        // bottom left of renderTarget2.
+        utils::ComboRenderPassDescriptor renderPass({renderTarget2.CreateView()});
+        renderPass.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline);
+        pass.Draw(3);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kBlue, renderTarget1, 1, kRTSize - 1);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderTarget1, kRTSize - 1, 1);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kBlue, renderTarget2, 1, kRTSize - 1);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderTarget2, kRTSize - 1, 1);
+}
+
+// Verify that the content in the color attachment will not be changed if there is no corresponding
+// fragment shader outputs in the render pipeline, the load operation is LoadOp::Load and the store
+// operation is StoreOp::Store.
+TEST_P(RenderPassTest, NoCorrespondingFragmentShaderOutputs) {
+    wgpu::Texture renderTarget = CreateDefault2DTexture();
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::TextureView renderTargetView = renderTarget.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderTargetView});
+    renderPass.cColorAttachments[0].clearValue = {1.0f, 0.0f, 0.0f, 1.0f};
+    renderPass.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPass.cColorAttachments[0].storeOp = wgpu::StoreOp::Store;
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+
+    {
+        // First we draw a blue triangle in the bottom left of renderTarget.
+        pass.SetPipeline(pipeline);
+        pass.Draw(3);
+    }
+
+    {
+        // Next we use a pipeline whose fragment shader has no outputs.
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() {
+            })");
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = mVSModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.cTargets[0].format = kFormat;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+        wgpu::RenderPipeline pipelineWithNoFragmentOutput =
+            device.CreateRenderPipeline(&descriptor);
+
+        pass.SetPipeline(pipelineWithNoFragmentOutput);
+        pass.Draw(3);
+    }
+
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kBlue, renderTarget, 1, kRTSize - 1);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderTarget, kRTSize - 1, 1);
+}
+
+DAWN_INSTANTIATE_TEST(RenderPassTest,
+                      D3D12Backend(),
+                      D3D12Backend({}, {"use_d3d12_render_pass"}),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp b/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
new file mode 100644
index 0000000..78856a8
--- /dev/null
+++ b/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
@@ -0,0 +1,127 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+enum class Type { B2TCopy, T2BCopy };
+
+constexpr static wgpu::Extent3D kCopySize = {1, 1, 2};
+constexpr static uint64_t kOffset = 0;
+constexpr static uint64_t kBytesPerRow = 256;
+constexpr static uint64_t kRowsPerImagePadding = 1;
+constexpr static uint64_t kRowsPerImage = kRowsPerImagePadding + kCopySize.height;
+constexpr static wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+// Tests in this file are used to expose an error on D3D12 about required minimum buffer size.
+// See detailed bug reports at crbug.com/dawn/1278, 1288, 1289.
+
+// When we do B2T or T2B copy from/to a buffer with paddings, it may wrongly calculate
+// the required buffer size on D3D12.
+
+// Using the data in this test as an example, in which copySize = {1, 1, 2}, offset = 0, bytesPerRow
+// = 256, and rowsPerImage = 2 (there is 1-row padding for every image), and assuming we are copying
+// a non-compressed format like rgba8unorm, the required minimum buffer size should be:
+//   offset + bytesPerRow * rowsPerImage * (copySize.depthOrArrayLayers - 1)
+//     + bytesPerRow * (copySize.height - 1) + bytesPerBlock * copySize.width.
+// It is 0 + 256 * 2 * (2 - 1) + 256 * (1 - 1) + 4 * 1 = 516.
+
+// However, the required minimum buffer size on D3D12 (including WARP) is:
+//   offset + bytesPerRow * rowsPerImage * (copySize.depthOrArrayLayers - 1)
+//     + bytesPerRow * (rowsPerImage - 1) + bytesPerBlock * copySize.width.
+// Or
+//   offset + bytesPerRow * rowsPerImage * copySize.depthOrArrayLayers
+//     + bytesPerBlock * copySize.width - bytesPerRow.
+// It is 0 + 256 * 2 * (2 - 1) + 256 * (2 - 1) + 4 * 1 = 772.
+
+// It looks like D3D12 requires unnecessary buffer storage for rowsPerImagePadding in the last
+// image. It does respect bytesPerRowPadding in the last row and doesn't require storage for
+// that part, though.
+
+class RequiredBufferSizeInCopyTests : public DawnTest {
+  protected:
+    void DoTest(const uint64_t bufferSize, Type copyType) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = bufferSize;
+        descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        wgpu::TextureDescriptor texDesc = {};
+        texDesc.dimension = wgpu::TextureDimension::e3D;
+        texDesc.size = kCopySize;
+        texDesc.format = kFormat;
+        texDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture texture = device.CreateTexture(&texDesc);
+
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(buffer, kOffset, kBytesPerRow, kRowsPerImage);
+
+        wgpu::CommandEncoder encoder = this->device.CreateCommandEncoder();
+        switch (copyType) {
+            case Type::T2BCopy: {
+                std::vector<uint32_t> expectedData(bufferSize / 4, 1);
+                wgpu::TextureDataLayout textureDataLayout =
+                    utils::CreateTextureDataLayout(kOffset, kBytesPerRow, kRowsPerImage);
+
+                queue.WriteTexture(&imageCopyTexture, expectedData.data(), bufferSize,
+                                   &textureDataLayout, &kCopySize);
+
+                encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &kCopySize);
+                break;
+            }
+            case Type::B2TCopy:
+                encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &kCopySize);
+                break;
+        }
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+};
+
+TEST_P(RequiredBufferSizeInCopyTests, T2BCopyWithAbundantBufferSize) {
+    uint64_t size = kOffset + kBytesPerRow * kRowsPerImage * kCopySize.depthOrArrayLayers;
+    DoTest(size, Type::T2BCopy);
+}
+
+TEST_P(RequiredBufferSizeInCopyTests, B2TCopyWithAbundantBufferSize) {
+    uint64_t size = kOffset + kBytesPerRow * kRowsPerImage * kCopySize.depthOrArrayLayers;
+    DoTest(size, Type::B2TCopy);
+}
+
+TEST_P(RequiredBufferSizeInCopyTests, T2BCopyWithMininumBufferSize) {
+    // TODO(crbug.com/dawn/1278, 1288, 1289): Required buffer size for copy is wrong on D3D12.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12());
+    uint64_t size =
+        kOffset + utils::RequiredBytesInCopy(kBytesPerRow, kRowsPerImage, kCopySize, kFormat);
+    DoTest(size, Type::T2BCopy);
+}
+
+TEST_P(RequiredBufferSizeInCopyTests, B2TCopyWithMininumBufferSize) {
+    // TODO(crbug.com/dawn/1278, 1288, 1289): Required buffer size for copy is wrong on D3D12.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12());
+    uint64_t size =
+        kOffset + utils::RequiredBytesInCopy(kBytesPerRow, kRowsPerImage, kCopySize, kFormat);
+    DoTest(size, Type::B2TCopy);
+}
+
+DAWN_INSTANTIATE_TEST(RequiredBufferSizeInCopyTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp b/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
new file mode 100644
index 0000000..86d94ca
--- /dev/null
+++ b/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
@@ -0,0 +1,293 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cmath>
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static unsigned int kRTSize = 16;
+
+namespace {
+    // MipLevel colors, ordering from base level to high level
+    // each mipmap of the texture is having a different color
+    // so we can check if the sampler anisotropic filtering is fetching
+    // from the correct miplevel
+    const std::array<RGBA8, 3> colors = {RGBA8::kRed, RGBA8::kGreen, RGBA8::kBlue};
+}  // namespace
+
+class SamplerFilterAnisotropicTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        mRenderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            struct Uniforms {
+                matrix : mat4x4<f32>
+            }
+
+            struct VertexIn {
+                @location(0) position : vec4<f32>,
+                @location(1) uv : vec2<f32>,
+            }
+
+            @group(0) @binding(2) var<uniform> uniforms : Uniforms;
+
+            struct VertexOut {
+                @location(0) uv : vec2<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex)
+            fn main(input : VertexIn) -> VertexOut {
+                var output : VertexOut;
+                output.uv = input.uv;
+                output.position = uniforms.matrix * input.position;
+                return output;
+            }
+        )");
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture0 : texture_2d<f32>;
+
+            struct FragmentIn {
+                @location(0) uv: vec2<f32>,
+                @builtin(position) fragCoord : vec4<f32>,
+            }
+
+            @stage(fragment)
+            fn main(input : FragmentIn) -> @location(0) vec4<f32> {
+                return textureSample(texture0, sampler0, input.uv);
+            })");
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cBuffers[0].attributeCount = 2;
+        pipelineDescriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+        pipelineDescriptor.cAttributes[1].shaderLocation = 1;
+        pipelineDescriptor.cAttributes[1].offset = 4 * sizeof(float);
+        pipelineDescriptor.cAttributes[1].format = wgpu::VertexFormat::Float32x2;
+        pipelineDescriptor.vertex.bufferCount = 1;
+        pipelineDescriptor.cBuffers[0].arrayStride = 6 * sizeof(float);
+        pipelineDescriptor.cTargets[0].format = mRenderPass.colorFormat;
+
+        mPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+        mBindGroupLayout = mPipeline.GetBindGroupLayout(0);
+
+        InitTexture();
+    }
+
+    void InitTexture() {
+        const uint32_t mipLevelCount = colors.size();
+
+        const uint32_t textureWidthLevel0 = 1 << (mipLevelCount - 1);
+        const uint32_t textureHeightLevel0 = 1 << (mipLevelCount - 1);
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = textureWidthLevel0;
+        descriptor.size.height = textureHeightLevel0;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        const uint32_t rowPixels = kTextureBytesPerRowAlignment / sizeof(RGBA8);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        // Populate each mip level with a different color
+        for (uint32_t level = 0; level < mipLevelCount; ++level) {
+            const uint32_t texWidth = textureWidthLevel0 >> level;
+            const uint32_t texHeight = textureHeightLevel0 >> level;
+
+            const RGBA8 color = colors[level];
+
+            std::vector<RGBA8> data(rowPixels * texHeight, color);
+            wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+                device, data.data(), data.size() * sizeof(RGBA8), wgpu::BufferUsage::CopySrc);
+            wgpu::ImageCopyBuffer imageCopyBuffer =
+                utils::CreateImageCopyBuffer(stagingBuffer, 0, kTextureBytesPerRowAlignment);
+            wgpu::ImageCopyTexture imageCopyTexture =
+                utils::CreateImageCopyTexture(texture, level, {0, 0, 0});
+            wgpu::Extent3D copySize = {texWidth, texHeight, 1};
+            encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+        }
+        wgpu::CommandBuffer copy = encoder.Finish();
+        queue.Submit(1, &copy);
+
+        mTextureView = texture.CreateView();
+    }
+
+    // void TestFilterAnisotropic(const FilterAnisotropicTestCase& testCase) {
+    void TestFilterAnisotropic(const uint16_t maxAnisotropy) {
+        wgpu::Sampler sampler;
+        {
+            wgpu::SamplerDescriptor descriptor = {};
+            descriptor.minFilter = wgpu::FilterMode::Linear;
+            descriptor.magFilter = wgpu::FilterMode::Linear;
+            descriptor.mipmapFilter = wgpu::FilterMode::Linear;
+            descriptor.maxAnisotropy = maxAnisotropy;
+            sampler = device.CreateSampler(&descriptor);
+        }
+
+        // The transform matrix gives us a slanted plane
+        // Tweaking happens at: https://jsfiddle.net/t8k7c95o/5/
+        // You can get an idea of what the test looks like at the url rendered by webgl
+        std::array<float, 16> transform = {-1.7320507764816284,
+                                           1.8322050568049563e-16,
+                                           -6.176817699518044e-17,
+                                           -6.170640314703498e-17,
+                                           -2.1211504944260596e-16,
+                                           -1.496108889579773,
+                                           0.5043753981590271,
+                                           0.5038710236549377,
+                                           0,
+                                           -43.63650894165039,
+                                           -43.232173919677734,
+                                           -43.18894577026367,
+                                           0,
+                                           21.693578720092773,
+                                           21.789791107177734,
+                                           21.86800193786621};
+        wgpu::Buffer transformBuffer = utils::CreateBufferFromData(
+            device, transform.data(), sizeof(transform), wgpu::BufferUsage::Uniform);
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, mBindGroupLayout,
+            {{0, sampler}, {1, mTextureView}, {2, transformBuffer, 0, sizeof(transform)}});
+
+        // The plane is scaled on z axis in the transform matrix
+        // so uv here is also scaled
+        // vertex attribute layout:
+        // position : vec4, uv : vec2
+        const float vertexData[] = {
+            -0.5, 0.5, -0.5, 1, 0, 0,  0.5, 0.5, -0.5, 1, 1, 0, -0.5, 0.5, 0.5, 1, 0, 50,
+            -0.5, 0.5, 0.5,  1, 0, 50, 0.5, 0.5, -0.5, 1, 1, 0, 0.5,  0.5, 0.5, 1, 1, 50,
+        };
+        wgpu::Buffer vertexBuffer = utils::CreateBufferFromData(
+            device, vertexData, sizeof(vertexData), wgpu::BufferUsage::Vertex);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&mRenderPass.renderPassInfo);
+            pass.SetPipeline(mPipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // https://jsfiddle.net/t8k7c95o/5/
+        // (x, y) -> (8, [0,15)) full readpixels result on Win10 Nvidia D3D12 GPU
+        // maxAnisotropy: 1
+        //  0 - 00 00 00
+        //  1 - 00 00 ff
+        //  2 - 00 00 ff
+        //  3 - 00 00 ff
+        //  4 - 00 f9 06
+        //  5 - 00 f9 06
+        //  6 - f2 0d 00
+        //  7 - f2 0d 00
+        //  8 - ff 00 00
+        //  9 - ff 00 00
+        // 10 - ff 00 00
+        // 11 - ff 00 00
+        // 12 - ff 00 00
+        // 13 - ff 00 00
+        // 14 - ff 00 00
+        // 15 - ff 00 00
+
+        // maxAnisotropy: 2
+        //  0 - 00 00 00
+        //  1 - 00 00 ff
+        //  2 - 00 7e 81
+        //  3 - 00 7e 81
+        //  4 - ff 00 00
+        //  5 - ff 00 00
+        //  6 - ff 00 00
+        //  7 - ff 00 00
+        //  8 - ff 00 00
+        //  9 - ff 00 00
+        // 10 - ff 00 00
+        // 11 - ff 00 00
+        // 12 - ff 00 00
+        // 13 - ff 00 00
+        // 14 - ff 00 00
+        // 15 - ff 00 00
+
+        // maxAnisotropy: 16
+        //  0 - 00 00 00
+        //  1 - 00 00 ff
+        //  2 - dd 22 00
+        //  3 - dd 22 00
+        //  4 - ff 00 00
+        //  5 - ff 00 00
+        //  6 - ff 00 00
+        //  7 - ff 00 00
+        //  8 - ff 00 00
+        //  9 - ff 00 00
+        // 10 - ff 00 00
+        // 11 - ff 00 00
+        // 12 - ff 00 00
+        // 13 - ff 00 00
+        // 14 - ff 00 00
+        // 15 - ff 00 00
+
+        if (maxAnisotropy >= 16) {
+            EXPECT_PIXEL_RGBA8_BETWEEN(colors[0], colors[1], mRenderPass.color, 8, 2);
+            EXPECT_PIXEL_RGBA8_EQ(colors[0], mRenderPass.color, 8, 6);
+        } else if (maxAnisotropy == 2) {
+            EXPECT_PIXEL_RGBA8_BETWEEN(colors[1], colors[2], mRenderPass.color, 8, 2);
+            EXPECT_PIXEL_RGBA8_EQ(colors[0], mRenderPass.color, 8, 6);
+        } else if (maxAnisotropy <= 1) {
+            EXPECT_PIXEL_RGBA8_EQ(colors[2], mRenderPass.color, 8, 2);
+            EXPECT_PIXEL_RGBA8_BETWEEN(colors[0], colors[1], mRenderPass.color, 8, 6);
+        }
+    }
+
+    utils::BasicRenderPass mRenderPass;
+    wgpu::BindGroupLayout mBindGroupLayout;
+    wgpu::RenderPipeline mPipeline;
+    wgpu::TextureView mTextureView;
+};
+
+TEST_P(SamplerFilterAnisotropicTest, SlantedPlaneMipmap) {
+    // TODO(crbug.com/dawn/740): Test output is wrong with D3D12 + WARP.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+    const uint16_t maxAnisotropyLists[] = {1, 2, 16, 128};
+    for (uint16_t t : maxAnisotropyLists) {
+        TestFilterAnisotropic(t);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(SamplerFilterAnisotropicTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/SamplerTests.cpp b/src/dawn/tests/end2end/SamplerTests.cpp
new file mode 100644
index 0000000..491cae0
--- /dev/null
+++ b/src/dawn/tests/end2end/SamplerTests.cpp
@@ -0,0 +1,185 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cmath>
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static unsigned int kRTSize = 64;
+
+namespace {
+    struct AddressModeTestCase {
+        wgpu::AddressMode mMode;
+        uint8_t mExpected2;
+        uint8_t mExpected3;
+    };
+    AddressModeTestCase addressModes[] = {
+        {
+            wgpu::AddressMode::Repeat,
+            0,
+            255,
+        },
+        {
+            wgpu::AddressMode::MirrorRepeat,
+            255,
+            0,
+        },
+        {
+            wgpu::AddressMode::ClampToEdge,
+            255,
+            255,
+        },
+    };
+}  // namespace
+
+class SamplerTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        mRenderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        auto vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>(-2.0, -2.0),
+                    vec2<f32>(-2.0,  2.0),
+                    vec2<f32>( 2.0, -2.0),
+                    vec2<f32>(-2.0,  2.0),
+                    vec2<f32>( 2.0, -2.0),
+                    vec2<f32>( 2.0,  2.0));
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            }
+        )");
+        auto fsModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture0 : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@builtin(position) FragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+                return textureSample(texture0, sampler0, FragCoord.xy / vec2<f32>(2.0, 2.0));
+            })");
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].format = mRenderPass.colorFormat;
+
+        mPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+        mBindGroupLayout = mPipeline.GetBindGroupLayout(0);
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = 2;
+        descriptor.size.height = 2;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        // Create a 2x2 checkerboard texture, with black in the top left and bottom right corners.
+        const uint32_t rowPixels = kTextureBytesPerRowAlignment / sizeof(RGBA8);
+        RGBA8 data[rowPixels * 2];
+        data[0] = data[rowPixels + 1] = RGBA8::kBlack;
+        data[1] = data[rowPixels] = RGBA8::kWhite;
+
+        wgpu::Buffer stagingBuffer =
+            utils::CreateBufferFromData(device, data, sizeof(data), wgpu::BufferUsage::CopySrc);
+        wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(stagingBuffer, 0, 256);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+        wgpu::Extent3D copySize = {2, 2, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+
+        wgpu::CommandBuffer copy = encoder.Finish();
+        queue.Submit(1, &copy);
+
+        mTextureView = texture.CreateView();
+    }
+
+    void TestAddressModes(AddressModeTestCase u, AddressModeTestCase v, AddressModeTestCase w) {
+        wgpu::Sampler sampler;
+        {
+            wgpu::SamplerDescriptor descriptor = {};
+            descriptor.minFilter = wgpu::FilterMode::Nearest;
+            descriptor.magFilter = wgpu::FilterMode::Nearest;
+            descriptor.mipmapFilter = wgpu::FilterMode::Nearest;
+            descriptor.addressModeU = u.mMode;
+            descriptor.addressModeV = v.mMode;
+            descriptor.addressModeW = w.mMode;
+            sampler = device.CreateSampler(&descriptor);
+        }
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, mBindGroupLayout, {{0, sampler}, {1, mTextureView}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&mRenderPass.renderPassInfo);
+            pass.SetPipeline(mPipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        RGBA8 expectedU2(u.mExpected2, u.mExpected2, u.mExpected2, 255);
+        RGBA8 expectedU3(u.mExpected3, u.mExpected3, u.mExpected3, 255);
+        RGBA8 expectedV2(v.mExpected2, v.mExpected2, v.mExpected2, 255);
+        RGBA8 expectedV3(v.mExpected3, v.mExpected3, v.mExpected3, 255);
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kBlack, mRenderPass.color, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kWhite, mRenderPass.color, 0, 1);
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kWhite, mRenderPass.color, 1, 0);
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kBlack, mRenderPass.color, 1, 1);
+        EXPECT_PIXEL_RGBA8_EQ(expectedU2, mRenderPass.color, 2, 0);
+        EXPECT_PIXEL_RGBA8_EQ(expectedU3, mRenderPass.color, 3, 0);
+        EXPECT_PIXEL_RGBA8_EQ(expectedV2, mRenderPass.color, 0, 2);
+        EXPECT_PIXEL_RGBA8_EQ(expectedV3, mRenderPass.color, 0, 3);
+        // TODO: add tests for W address mode, once Dawn supports 3D textures
+    }
+
+    utils::BasicRenderPass mRenderPass;
+    wgpu::BindGroupLayout mBindGroupLayout;
+    wgpu::RenderPipeline mPipeline;
+    wgpu::TextureView mTextureView;
+};
+
+// Test drawing a rect with a checkerboard texture with different address modes.
+TEST_P(SamplerTest, AddressMode) {
+    for (auto u : addressModes) {
+        for (auto v : addressModes) {
+            for (auto w : addressModes) {
+                TestAddressModes(u, v, w);
+            }
+        }
+    }
+}
+
+DAWN_INSTANTIATE_TEST(SamplerTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ScissorTests.cpp b/src/dawn/tests/end2end/ScissorTests.cpp
new file mode 100644
index 0000000..a660440
--- /dev/null
+++ b/src/dawn/tests/end2end/ScissorTests.cpp
@@ -0,0 +1,159 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class ScissorTest : public DawnTest {
+  protected:
+    wgpu::RenderPipeline CreateQuadPipeline(wgpu::TextureFormat format) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0, -1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0, -1.0));
+                return vec4<f32>(pos[VertexIndex], 0.5, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].format = format;
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+};
+
+// Test that by default the scissor test is disabled and the whole attachment can be drawn to.
+TEST_P(ScissorTest, DefaultsToWholeRenderTarget) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 100, 100);
+    wgpu::RenderPipeline pipeline = CreateQuadPipeline(renderPass.colorFormat);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 99);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 99, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 99, 99);
+}
+
+// Test setting a partial scissor (not empty, not full attachment)
+TEST_P(ScissorTest, PartialRect) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 100, 100);
+    wgpu::RenderPipeline pipeline = CreateQuadPipeline(renderPass.colorFormat);
+
+    constexpr uint32_t kX = 3;
+    constexpr uint32_t kY = 7;
+    constexpr uint32_t kW = 5;
+    constexpr uint32_t kH = 13;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetScissorRect(kX, kY, kW, kH);
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Test the two opposite corners of the scissor box. With one pixel inside and on outside
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, kX - 1, kY - 1);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, kX, kY);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, kX + kW, kY + kH);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, kX + kW - 1, kY + kH - 1);
+}
+
+// Test setting an empty scissor
+TEST_P(ScissorTest, EmptyRect) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 2, 2);
+    wgpu::RenderPipeline pipeline = CreateQuadPipeline(renderPass.colorFormat);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetScissorRect(1, 1, 0, 0);
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Test that no pixel was written.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 0, 1);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 1, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 1, 1);
+}
+// Test that the scissor setting doesn't get inherited between renderpasses
+TEST_P(ScissorTest, NoInheritanceBetweenRenderPass) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 100, 100);
+    wgpu::RenderPipeline pipeline = CreateQuadPipeline(renderPass.colorFormat);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    // RenderPass 1 set the scissor
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetScissorRect(1, 1, 1, 1);
+        pass.End();
+    }
+    // RenderPass 2 draw a full quad, it shouldn't be scissored
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 99);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 99, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 99, 99);
+}
+
+DAWN_INSTANTIATE_TEST(ScissorTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ShaderFloat16Tests.cpp b/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
new file mode 100644
index 0000000..e2d95f3
--- /dev/null
+++ b/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
@@ -0,0 +1,179 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+class ShaderFloat16Tests : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        mIsShaderFloat16Supported = SupportsFeatures({wgpu::FeatureName::DawnShaderFloat16});
+        if (!mIsShaderFloat16Supported) {
+            return {};
+        }
+
+        return {wgpu::FeatureName::DawnShaderFloat16};
+    }
+
+    bool IsShaderFloat16Supported() const {
+        return mIsShaderFloat16Supported;
+    }
+
+    bool mIsShaderFloat16Supported = false;
+};
+
+// Test basic 16bit float arithmetic and 16bit storage features.
+// TODO(crbug.com/tint/404): Implement float16 in Tint.
+TEST_P(ShaderFloat16Tests, DISABLED_Basic16BitFloatFeaturesTest) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsShaderFloat16Supported());
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsIntel());  // Flaky crashes. crbug.com/dawn/586
+
+    uint16_t uniformData[] = {Float32ToFloat16(1.23), Float32ToFloat16(0.0)};  // 0.0 is a padding.
+    wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+        device, &uniformData, sizeof(uniformData), wgpu::BufferUsage::Uniform);
+
+    uint16_t bufferInData[] = {Float32ToFloat16(2.34), Float32ToFloat16(0.0)};  // 0.0 is a padding.
+    wgpu::Buffer bufferIn = utils::CreateBufferFromData(device, &bufferInData, sizeof(bufferInData),
+                                                        wgpu::BufferUsage::Storage);
+
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = 2 * sizeof(uint16_t);
+    bufferDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+    wgpu::Buffer bufferOut = device.CreateBuffer(&bufferDesc);
+
+    // SPIR-V ASM produced by glslang for the following fragment shader:
+    //
+    //   #version 450
+    //   #extension GL_AMD_gpu_shader_half_float : require
+    //
+    //   struct S {
+    //       float16_t f;
+    //       float16_t padding;
+    //   };
+    //   layout(std140, set = 0, binding = 0) uniform uniformBuf { S c; };
+    //   layout(std140, set = 0, binding = 1) readonly buffer bufA { S a; };
+    //   layout(std140, set = 0, binding = 2) buffer bufB { S b; };
+    //
+    //   void main() {
+    //       b.f = a.f + c.f;
+    //   }
+
+    wgpu::ShaderModule module = utils::CreateShaderModuleFromASM(device, R"(
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 10
+; Bound: 26
+; Schema: 0
+               OpCapability Shader
+               OpCapability Float16
+               OpCapability StorageBuffer16BitAccess
+               OpCapability UniformAndStorageBuffer16BitAccess
+               OpExtension "SPV_KHR_16bit_storage"
+          %1 = OpExtInstImport "GLSL.std.450"
+               OpMemoryModel Logical GLSL450
+               OpEntryPoint GLCompute %main "main"
+               OpExecutionMode %main LocalSize 1 1 1
+               OpSource GLSL 450
+               OpSourceExtension "GL_AMD_gpu_shader_half_float"
+               OpName %main "main"
+               OpName %S "S"
+               OpMemberName %S 0 "f"
+               OpMemberName %S 1 "padding"
+               OpName %bufB "bufB"
+               OpMemberName %bufB 0 "b"
+               OpName %_ ""
+               OpName %bufA "bufA"
+               OpMemberName %bufA 0 "a"
+               OpName %__0 ""
+               OpName %uniformBuf "uniformBuf"
+               OpMemberName %uniformBuf 0 "c"
+               OpName %__1 ""
+               OpMemberDecorate %S 0 Offset 0
+               OpMemberDecorate %S 1 Offset 2
+               OpMemberDecorate %bufB 0 Offset 0
+               OpDecorate %bufB BufferBlock
+               OpDecorate %_ DescriptorSet 0
+               OpDecorate %_ Binding 2
+               OpMemberDecorate %bufA 0 NonWritable
+               OpMemberDecorate %bufA 0 Offset 0
+               OpDecorate %bufA BufferBlock
+               OpDecorate %__0 DescriptorSet 0
+               OpDecorate %__0 Binding 1
+               OpMemberDecorate %uniformBuf 0 Offset 0
+               OpDecorate %uniformBuf Block
+               OpDecorate %__1 DescriptorSet 0
+               OpDecorate %__1 Binding 0
+       %void = OpTypeVoid
+          %3 = OpTypeFunction %void
+       %half = OpTypeFloat 16
+          %S = OpTypeStruct %half %half
+       %bufB = OpTypeStruct %S
+%_ptr_Uniform_bufB = OpTypePointer Uniform %bufB
+          %_ = OpVariable %_ptr_Uniform_bufB Uniform
+        %int = OpTypeInt 32 1
+      %int_0 = OpConstant %int 0
+       %bufA = OpTypeStruct %S
+%_ptr_Uniform_bufA = OpTypePointer Uniform %bufA
+        %__0 = OpVariable %_ptr_Uniform_bufA Uniform
+%_ptr_Uniform_half = OpTypePointer Uniform %half
+ %uniformBuf = OpTypeStruct %S
+%_ptr_Uniform_uniformBuf = OpTypePointer Uniform %uniformBuf
+        %__1 = OpVariable %_ptr_Uniform_uniformBuf Uniform
+       %main = OpFunction %void None %3
+          %5 = OpLabel
+         %17 = OpAccessChain %_ptr_Uniform_half %__0 %int_0 %int_0
+         %18 = OpLoad %half %17
+         %22 = OpAccessChain %_ptr_Uniform_half %__1 %int_0 %int_0
+         %23 = OpLoad %half %22
+         %24 = OpFAdd %half %18 %23
+         %25 = OpAccessChain %_ptr_Uniform_half %_ %int_0 %int_0
+               OpStore %25 %24
+               OpReturn
+               OpFunctionEnd
+    )");
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = module;
+    csDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {
+                                                         {0, uniformBuffer, 0, sizeof(uniformData)},
+                                                         {1, bufferIn, 0, sizeof(bufferInData)},
+                                                         {2, bufferOut},
+                                                     });
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Dispatch(1);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    uint16_t expected[] = {Float32ToFloat16(3.57), Float32ToFloat16(0.0)};  // 0.0 is a padding.
+
+    EXPECT_BUFFER_U16_RANGE_EQ(expected, bufferOut, 0, 2);
+}
+
+DAWN_INSTANTIATE_TEST(ShaderFloat16Tests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ShaderTests.cpp b/src/dawn/tests/end2end/ShaderTests.cpp
new file mode 100644
index 0000000..8951d9a
--- /dev/null
+++ b/src/dawn/tests/end2end/ShaderTests.cpp
@@ -0,0 +1,742 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <numeric>
+#include <vector>
+
+class ShaderTests : public DawnTest {
+  public:
+    wgpu::Buffer CreateBuffer(const uint32_t count) {
+        std::vector<uint32_t> data(count, 0);
+        uint64_t bufferSize = static_cast<uint64_t>(data.size() * sizeof(uint32_t));
+        return utils::CreateBufferFromData(device, data.data(), bufferSize,
+                                           wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+    }
+    wgpu::ComputePipeline CreateComputePipeline(
+        const std::string& shader,
+        const char* entryPoint,
+        const std::vector<wgpu::ConstantEntry>* constants = nullptr) {
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.compute.module = utils::CreateShaderModule(device, shader.c_str());
+        csDesc.compute.entryPoint = entryPoint;
+        if (constants) {
+            csDesc.compute.constants = constants->data();
+            csDesc.compute.constantCount = constants->size();
+        }
+        return device.CreateComputePipeline(&csDesc);
+    }
+};
+
+// Test that log2 is being properly calculated, base on crbug.com/1046622
+TEST_P(ShaderTests, ComputeLog2) {
+    uint32_t const kSteps = 19;
+    std::vector<uint32_t> expected{0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 32};
+    wgpu::Buffer buffer = CreateBuffer(kSteps);
+
+    std::string shader = R"(
+struct Buf {
+    data : array<u32, 19>
+}
+
+@group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+@stage(compute) @workgroup_size(1) fn main() {
+    let factor : f32 = 1.0001;
+
+    buf.data[0] = u32(log2(1.0 * factor));
+    buf.data[1] = u32(log2(2.0 * factor));
+    buf.data[2] = u32(log2(3.0 * factor));
+    buf.data[3] = u32(log2(4.0 * factor));
+    buf.data[4] = u32(log2(7.0 * factor));
+    buf.data[5] = u32(log2(8.0 * factor));
+    buf.data[6] = u32(log2(15.0 * factor));
+    buf.data[7] = u32(log2(16.0 * factor));
+    buf.data[8] = u32(log2(31.0 * factor));
+    buf.data[9] = u32(log2(32.0 * factor));
+    buf.data[10] = u32(log2(63.0 * factor));
+    buf.data[11] = u32(log2(64.0 * factor));
+    buf.data[12] = u32(log2(127.0 * factor));
+    buf.data[13] = u32(log2(128.0 * factor));
+    buf.data[14] = u32(log2(255.0 * factor));
+    buf.data[15] = u32(log2(256.0 * factor));
+    buf.data[16] = u32(log2(511.0 * factor));
+    buf.data[17] = u32(log2(512.0 * factor));
+    buf.data[18] = u32(log2(4294967295.0 * factor));
+})";
+
+    wgpu::ComputePipeline pipeline = CreateComputePipeline(shader, "main");
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), buffer, 0, kSteps);
+}
+
+TEST_P(ShaderTests, BadWGSL) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    std::string shader = R"(
+I am an invalid shader and should never pass validation!
+})";
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, shader.c_str()));
+}
+
+// Tests that shaders using non-struct function parameters and return values for shader stage I/O
+// can compile and link successfully.
+TEST_P(ShaderTests, WGSLParamIO) {
+    std::string vertexShader = R"(
+@stage(vertex)
+fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+    var pos = array<vec2<f32>, 3>(
+        vec2<f32>(-1.0,  1.0),
+        vec2<f32>( 1.0,  1.0),
+        vec2<f32>( 0.0, -1.0));
+    return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+})";
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexShader.c_str());
+
+    std::string fragmentShader = R"(
+@stage(fragment)
+fn main(@builtin(position) fragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+    return vec4<f32>(fragCoord.xy, 0.0, 1.0);
+})";
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentShader.c_str());
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = vsModule;
+    rpDesc.cFragment.module = fsModule;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
+}
+
+// Tests that a vertex shader using struct function parameters and return values for shader stage
+// I/O can compile and link successfully against a fragement shader using compatible non-struct I/O.
+TEST_P(ShaderTests, WGSLMixedStructParamIO) {
+    std::string vertexShader = R"(
+struct VertexIn {
+    @location(0) position : vec3<f32>,
+    @location(1) color : vec4<f32>,
+}
+
+struct VertexOut {
+    @location(0) color : vec4<f32>,
+    @builtin(position) position : vec4<f32>,
+}
+
+@stage(vertex)
+fn main(input : VertexIn) -> VertexOut {
+    var output : VertexOut;
+    output.position = vec4<f32>(input.position, 1.0);
+    output.color = input.color;
+    return output;
+})";
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexShader.c_str());
+
+    std::string fragmentShader = R"(
+@stage(fragment)
+fn main(@location(0) color : vec4<f32>) -> @location(0) vec4<f32> {
+    return color;
+})";
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentShader.c_str());
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = vsModule;
+    rpDesc.cFragment.module = fsModule;
+    rpDesc.vertex.bufferCount = 1;
+    rpDesc.cBuffers[0].attributeCount = 2;
+    rpDesc.cBuffers[0].arrayStride = 28;
+    rpDesc.cAttributes[0].shaderLocation = 0;
+    rpDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x3;
+    rpDesc.cAttributes[1].shaderLocation = 1;
+    rpDesc.cAttributes[1].format = wgpu::VertexFormat::Float32x4;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
+}
+
+// Tests that shaders using struct function parameters and return values for shader stage I/O
+// can compile and link successfully.
+TEST_P(ShaderTests, WGSLStructIO) {
+    std::string vertexShader = R"(
+struct VertexIn {
+    @location(0) position : vec3<f32>,
+    @location(1) color : vec4<f32>,
+}
+
+struct VertexOut {
+    @location(0) color : vec4<f32>,
+    @builtin(position) position : vec4<f32>,
+}
+
+@stage(vertex)
+fn main(input : VertexIn) -> VertexOut {
+    var output : VertexOut;
+    output.position = vec4<f32>(input.position, 1.0);
+    output.color = input.color;
+    return output;
+})";
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexShader.c_str());
+
+    std::string fragmentShader = R"(
+struct FragmentIn {
+    @location(0) color : vec4<f32>,
+    @builtin(position) fragCoord : vec4<f32>,
+}
+
+@stage(fragment)
+fn main(input : FragmentIn) -> @location(0) vec4<f32> {
+    return input.color * input.fragCoord;
+})";
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentShader.c_str());
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = vsModule;
+    rpDesc.cFragment.module = fsModule;
+    rpDesc.vertex.bufferCount = 1;
+    rpDesc.cBuffers[0].attributeCount = 2;
+    rpDesc.cBuffers[0].arrayStride = 28;
+    rpDesc.cAttributes[0].shaderLocation = 0;
+    rpDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x3;
+    rpDesc.cAttributes[1].shaderLocation = 1;
+    rpDesc.cAttributes[1].format = wgpu::VertexFormat::Float32x4;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
+}
+
+// Tests that shaders I/O structs that us compatible locations but are not sorted by hand can link.
+TEST_P(ShaderTests, WGSLUnsortedStructIO) {
+    std::string vertexShader = R"(
+struct VertexIn {
+    @location(0) position : vec3<f32>,
+    @location(1) color : vec4<f32>,
+}
+
+struct VertexOut {
+    @builtin(position) position : vec4<f32>,
+    @location(0) color : vec4<f32>,
+}
+
+@stage(vertex)
+fn main(input : VertexIn) -> VertexOut {
+    var output : VertexOut;
+    output.position = vec4<f32>(input.position, 1.0);
+    output.color = input.color;
+    return output;
+})";
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexShader.c_str());
+
+    std::string fragmentShader = R"(
+struct FragmentIn {
+    @location(0) color : vec4<f32>,
+    @builtin(position) fragCoord : vec4<f32>,
+}
+
+@stage(fragment)
+fn main(input : FragmentIn) -> @location(0) vec4<f32> {
+    return input.color * input.fragCoord;
+})";
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentShader.c_str());
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = vsModule;
+    rpDesc.cFragment.module = fsModule;
+    rpDesc.vertex.bufferCount = 1;
+    rpDesc.cBuffers[0].attributeCount = 2;
+    rpDesc.cBuffers[0].arrayStride = 28;
+    rpDesc.cAttributes[0].shaderLocation = 0;
+    rpDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x3;
+    rpDesc.cAttributes[1].shaderLocation = 1;
+    rpDesc.cAttributes[1].format = wgpu::VertexFormat::Float32x4;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
+}
+
+// Tests that shaders I/O structs can be shared between vertex and fragment shaders.
+TEST_P(ShaderTests, WGSLSharedStructIO) {
+    std::string shader = R"(
+struct VertexIn {
+    @location(0) position : vec3<f32>,
+    @location(1) color : vec4<f32>,
+}
+
+struct VertexOut {
+    @location(0) color : vec4<f32>,
+    @builtin(position) position : vec4<f32>,
+}
+
+@stage(vertex)
+fn vertexMain(input : VertexIn) -> VertexOut {
+    var output : VertexOut;
+    output.position = vec4<f32>(input.position, 1.0);
+    output.color = input.color;
+    return output;
+}
+
+@stage(fragment)
+fn fragmentMain(input : VertexOut) -> @location(0) vec4<f32> {
+    return input.color;
+})";
+    wgpu::ShaderModule shaderModule = utils::CreateShaderModule(device, shader.c_str());
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = shaderModule;
+    rpDesc.vertex.entryPoint = "vertexMain";
+    rpDesc.cFragment.module = shaderModule;
+    rpDesc.cFragment.entryPoint = "fragmentMain";
+    rpDesc.vertex.bufferCount = 1;
+    rpDesc.cBuffers[0].attributeCount = 2;
+    rpDesc.cBuffers[0].arrayStride = 28;
+    rpDesc.cAttributes[0].shaderLocation = 0;
+    rpDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x3;
+    rpDesc.cAttributes[1].shaderLocation = 1;
+    rpDesc.cAttributes[1].format = wgpu::VertexFormat::Float32x4;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
+}
+
+// This is a regression test for an issue caused by the FirstIndexOffset transfrom being done before
+// the BindingRemapper, causing an intermediate AST to be invalid (and fail the overall
+// compilation).
+TEST_P(ShaderTests, FirstIndexOffsetRegisterConflictInHLSLTransforms) {
+    // TODO(crbug.com/dawn/658): Crashes on bots because there are two entrypoints in the shader.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    const char* shader = R"(
+// Dumped WGSL:
+
+struct Inputs {
+  @location(1) attrib1 : u32,
+  // The extra register added to handle base_vertex for vertex_index conflicts with [1]
+  @builtin(vertex_index) vertexIndex: u32,
+}
+
+// [1] a binding point that conflicts with the regitster
+struct S1 { data : array<vec4<u32>, 20> }
+@group(0) @binding(1) var<uniform> providedData1 : S1;
+
+@stage(vertex) fn vsMain(input : Inputs) -> @builtin(position) vec4<f32> {
+  _ = providedData1.data[input.vertexIndex][0];
+  return vec4<f32>();
+}
+
+@stage(fragment) fn fsMain() -> @location(0) vec4<f32> {
+  return vec4<f32>();
+}
+    )";
+    auto module = utils::CreateShaderModule(device, shader);
+
+    utils::ComboRenderPipelineDescriptor rpDesc;
+    rpDesc.vertex.module = module;
+    rpDesc.vertex.entryPoint = "vsMain";
+    rpDesc.cFragment.module = module;
+    rpDesc.cFragment.entryPoint = "fsMain";
+    rpDesc.vertex.bufferCount = 1;
+    rpDesc.cBuffers[0].attributeCount = 1;
+    rpDesc.cBuffers[0].arrayStride = 16;
+    rpDesc.cAttributes[0].shaderLocation = 1;
+    rpDesc.cAttributes[0].format = wgpu::VertexFormat::Uint8x2;
+    device.CreateRenderPipeline(&rpDesc);
+}
+
+// Test that WGSL built-in variable @sample_index can be used in fragment shaders.
+TEST_P(ShaderTests, SampleIndex) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+@stage(vertex)
+fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+    return pos;
+})");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+@stage(fragment) fn main(@builtin(sample_index) sampleIndex : u32)
+    -> @location(0) vec4<f32> {
+    return vec4<f32>(f32(sampleIndex), 1.0, 0.0, 1.0);
+})");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+    descriptor.vertex.bufferCount = 1;
+    descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+    descriptor.cBuffers[0].attributeCount = 1;
+    descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+    device.CreateRenderPipeline(&descriptor);
+}
+
+// Test overridable constants without numeric identifiers
+TEST_P(ShaderTests, OverridableConstants) {
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    uint32_t const kCount = 11;
+    std::vector<uint32_t> expected(kCount);
+    std::iota(expected.begin(), expected.end(), 0);
+    wgpu::Buffer buffer = CreateBuffer(kCount);
+
+    std::string shader = R"(
+override c0: bool;              // type: bool
+override c1: bool = false;      // default override
+override c2: f32;               // type: float32
+override c3: f32 = 0.0;         // default override
+override c4: f32 = 4.0;         // default
+override c5: i32;               // type: int32
+override c6: i32 = 0;           // default override
+override c7: i32 = 7;           // default
+override c8: u32;               // type: uint32
+override c9: u32 = 0u;          // default override
+override c10: u32 = 10u;        // default
+
+struct Buf {
+    data : array<u32, 11>
+}
+
+@group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+@stage(compute) @workgroup_size(1) fn main() {
+    buf.data[0] = u32(c0);
+    buf.data[1] = u32(c1);
+    buf.data[2] = u32(c2);
+    buf.data[3] = u32(c3);
+    buf.data[4] = u32(c4);
+    buf.data[5] = u32(c5);
+    buf.data[6] = u32(c6);
+    buf.data[7] = u32(c7);
+    buf.data[8] = u32(c8);
+    buf.data[9] = u32(c9);
+    buf.data[10] = u32(c10);
+})";
+
+    std::vector<wgpu::ConstantEntry> constants;
+    constants.push_back({nullptr, "c0", 0});
+    constants.push_back({nullptr, "c1", 1});
+    constants.push_back({nullptr, "c2", 2});
+    constants.push_back({nullptr, "c3", 3});
+    // c4 is not assigned, testing default value
+    constants.push_back({nullptr, "c5", 5});
+    constants.push_back({nullptr, "c6", 6});
+    // c7 is not assigned, testing default value
+    constants.push_back({nullptr, "c8", 8});
+    constants.push_back({nullptr, "c9", 9});
+    // c10 is not assigned, testing default value
+
+    wgpu::ComputePipeline pipeline = CreateComputePipeline(shader, "main", &constants);
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), buffer, 0, kCount);
+}
+
+// Test overridable constants with numeric identifiers
+TEST_P(ShaderTests, OverridableConstantsNumericIdentifiers) {
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    uint32_t const kCount = 4;
+    std::vector<uint32_t> expected{1u, 2u, 3u, 0u};
+    wgpu::Buffer buffer = CreateBuffer(kCount);
+
+    std::string shader = R"(
+@id(1001) override c1: u32;            // some big numeric id
+@id(1) override c2: u32 = 0u;          // id == 1 might collide with some generated constant id
+@id(1003) override c3: u32 = 3u;       // default
+@id(1004) override c4: u32;            // default unspecified
+
+struct Buf {
+    data : array<u32, 4>
+}
+
+@group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+@stage(compute) @workgroup_size(1) fn main() {
+    buf.data[0] = c1;
+    buf.data[1] = c2;
+    buf.data[2] = c3;
+    buf.data[3] = c4;
+})";
+
+    std::vector<wgpu::ConstantEntry> constants;
+    constants.push_back({nullptr, "1001", 1});
+    constants.push_back({nullptr, "1", 2});
+    // c3 is not assigned, testing default value
+    constants.push_back({nullptr, "1004", 0});
+
+    wgpu::ComputePipeline pipeline = CreateComputePipeline(shader, "main", &constants);
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), buffer, 0, kCount);
+}
+
+// Test overridable constants precision
+// D3D12 HLSL shader uses defines so we want float number to have enough precision
+TEST_P(ShaderTests, OverridableConstantsPrecision) {
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    uint32_t const kCount = 2;
+    float const kValue1 = 3.14159;
+    float const kValue2 = 3.141592653589793238;
+    std::vector<float> expected{kValue1, kValue2};
+    wgpu::Buffer buffer = CreateBuffer(kCount);
+
+    std::string shader = R"(
+@id(1001) override c1: f32;
+@id(1002) override c2: f32;
+
+struct Buf {
+    data : array<f32, 2>
+}
+
+@group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+@stage(compute) @workgroup_size(1) fn main() {
+    buf.data[0] = c1;
+    buf.data[1] = c2;
+})";
+
+    std::vector<wgpu::ConstantEntry> constants;
+    constants.push_back({nullptr, "1001", kValue1});
+    constants.push_back({nullptr, "1002", kValue2});
+    wgpu::ComputePipeline pipeline = CreateComputePipeline(shader, "main", &constants);
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer}});
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Dispatch(1);
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_FLOAT_RANGE_EQ(expected.data(), buffer, 0, kCount);
+}
+
+// Test overridable constants for different entry points
+TEST_P(ShaderTests, OverridableConstantsMultipleEntryPoints) {
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    uint32_t const kCount = 1;
+    std::vector<uint32_t> expected1{1u};
+    std::vector<uint32_t> expected2{2u};
+    std::vector<uint32_t> expected3{3u};
+
+    wgpu::Buffer buffer1 = CreateBuffer(kCount);
+    wgpu::Buffer buffer2 = CreateBuffer(kCount);
+    wgpu::Buffer buffer3 = CreateBuffer(kCount);
+
+    std::string shader = R"(
+@id(1001) override c1: u32;
+@id(1002) override c2: u32;
+
+struct Buf {
+    data : array<u32, 1>
+}
+
+@group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+@stage(compute) @workgroup_size(1) fn main1() {
+    buf.data[0] = c1;
+}
+
+@stage(compute) @workgroup_size(1) fn main2() {
+    buf.data[0] = c2;
+}
+
+@stage(compute) @workgroup_size(1) fn main3() {
+    buf.data[0] = 3u;
+}
+)";
+
+    std::vector<wgpu::ConstantEntry> constants1;
+    constants1.push_back({nullptr, "1001", 1});
+    std::vector<wgpu::ConstantEntry> constants2;
+    constants2.push_back({nullptr, "1002", 2});
+
+    wgpu::ShaderModule shaderModule = utils::CreateShaderModule(device, shader.c_str());
+
+    wgpu::ComputePipelineDescriptor csDesc1;
+    csDesc1.compute.module = shaderModule;
+    csDesc1.compute.entryPoint = "main1";
+    csDesc1.compute.constants = constants1.data();
+    csDesc1.compute.constantCount = constants1.size();
+    wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc1);
+
+    wgpu::ComputePipelineDescriptor csDesc2;
+    csDesc2.compute.module = shaderModule;
+    csDesc2.compute.entryPoint = "main2";
+    csDesc2.compute.constants = constants2.data();
+    csDesc2.compute.constantCount = constants2.size();
+    wgpu::ComputePipeline pipeline2 = device.CreateComputePipeline(&csDesc2);
+
+    wgpu::ComputePipelineDescriptor csDesc3;
+    csDesc3.compute.module = shaderModule;
+    csDesc3.compute.entryPoint = "main3";
+    wgpu::ComputePipeline pipeline3 = device.CreateComputePipeline(&csDesc3);
+
+    wgpu::BindGroup bindGroup1 =
+        utils::MakeBindGroup(device, pipeline1.GetBindGroupLayout(0), {{0, buffer1}});
+    wgpu::BindGroup bindGroup2 =
+        utils::MakeBindGroup(device, pipeline2.GetBindGroupLayout(0), {{0, buffer2}});
+    wgpu::BindGroup bindGroup3 =
+        utils::MakeBindGroup(device, pipeline3.GetBindGroupLayout(0), {{0, buffer3}});
+
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline1);
+        pass.SetBindGroup(0, bindGroup1);
+        pass.Dispatch(1);
+
+        pass.SetPipeline(pipeline2);
+        pass.SetBindGroup(0, bindGroup2);
+        pass.Dispatch(1);
+
+        pass.SetPipeline(pipeline3);
+        pass.SetBindGroup(0, bindGroup3);
+        pass.Dispatch(1);
+
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expected1.data(), buffer1, 0, kCount);
+    EXPECT_BUFFER_U32_RANGE_EQ(expected2.data(), buffer2, 0, kCount);
+    EXPECT_BUFFER_U32_RANGE_EQ(expected3.data(), buffer3, 0, kCount);
+}
+
+// Test overridable constants with render pipeline
+// Draw a triangle covering the render target, with vertex position and color values from
+// overridable constants
+TEST_P(ShaderTests, OverridableConstantsRenderPipeline) {
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL());
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+@id(1111) override xright: f32;
+@id(2222) override ytop: f32;
+@stage(vertex)
+fn main(@builtin(vertex_index) VertexIndex : u32)
+     -> @builtin(position) vec4<f32> {
+  var pos = array<vec2<f32>, 3>(
+      vec2<f32>(-1.0, ytop),
+      vec2<f32>(-1.0, -ytop),
+      vec2<f32>(xright, 0.0));
+
+  return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+})");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+@id(1000) override intensity: f32 = 0.0;
+@stage(fragment) fn main()
+    -> @location(0) vec4<f32> {
+    return vec4<f32>(intensity, intensity, intensity, 1.0);
+})");
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+    descriptor.cTargets[0].format = renderPass.colorFormat;
+
+    std::vector<wgpu::ConstantEntry> vertexConstants;
+    vertexConstants.push_back({nullptr, "1111", 3.0});  // x right
+    vertexConstants.push_back({nullptr, "2222", 3.0});  // y top
+    descriptor.vertex.constants = vertexConstants.data();
+    descriptor.vertex.constantCount = vertexConstants.size();
+    std::vector<wgpu::ConstantEntry> fragmentConstants;
+    fragmentConstants.push_back({nullptr, "1000", 1.0});  // color intensity
+    descriptor.cFragment.constants = fragmentConstants.data();
+    descriptor.cFragment.constantCount = fragmentConstants.size();
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.Draw(3);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(255, 255, 255, 255), renderPass.color, 0, 0);
+}
+
+// TODO(tint:1155): Test overridable constants used for workgroup size
+
+DAWN_INSTANTIATE_TEST(ShaderTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/StorageTextureTests.cpp b/src/dawn/tests/end2end/StorageTextureTests.cpp
new file mode 100644
index 0000000..ca29686
--- /dev/null
+++ b/src/dawn/tests/end2end/StorageTextureTests.cpp
@@ -0,0 +1,943 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    bool OpenGLESSupportsStorageTexture(wgpu::TextureFormat format) {
+        // TODO(crbug.com/dawn/595): 32-bit RG* formats are unsupported on OpenGL ES.
+        return format != wgpu::TextureFormat::RG32Float &&
+               format != wgpu::TextureFormat::RG32Sint && format != wgpu::TextureFormat::RG32Uint;
+    }
+}  // namespace
+
+class StorageTextureTests : public DawnTest {
+  public:
+    static void FillExpectedData(void* pixelValuePtr,
+                                 wgpu::TextureFormat format,
+                                 uint32_t x,
+                                 uint32_t y,
+                                 uint32_t depthOrArrayLayer) {
+        const uint32_t pixelValue = 1 + x + kWidth * (y + kHeight * depthOrArrayLayer);
+        ASSERT(pixelValue <= 255u / 4);
+
+        switch (format) {
+            // 32-bit unsigned integer formats
+            case wgpu::TextureFormat::R32Uint: {
+                uint32_t* valuePtr = static_cast<uint32_t*>(pixelValuePtr);
+                *valuePtr = pixelValue;
+                break;
+            }
+
+            case wgpu::TextureFormat::RG32Uint: {
+                uint32_t* valuePtr = static_cast<uint32_t*>(pixelValuePtr);
+                valuePtr[0] = pixelValue;
+                valuePtr[1] = pixelValue * 2;
+                break;
+            }
+
+            case wgpu::TextureFormat::RGBA32Uint: {
+                uint32_t* valuePtr = static_cast<uint32_t*>(pixelValuePtr);
+                valuePtr[0] = pixelValue;
+                valuePtr[1] = pixelValue * 2;
+                valuePtr[2] = pixelValue * 3;
+                valuePtr[3] = pixelValue * 4;
+                break;
+            }
+
+            // 32-bit signed integer formats
+            case wgpu::TextureFormat::R32Sint: {
+                int32_t* valuePtr = static_cast<int32_t*>(pixelValuePtr);
+                *valuePtr = static_cast<int32_t>(pixelValue);
+                break;
+            }
+
+            case wgpu::TextureFormat::RG32Sint: {
+                int32_t* valuePtr = static_cast<int32_t*>(pixelValuePtr);
+                valuePtr[0] = static_cast<int32_t>(pixelValue);
+                valuePtr[1] = -static_cast<int32_t>(pixelValue);
+                break;
+            }
+
+            case wgpu::TextureFormat::RGBA32Sint: {
+                int32_t* valuePtr = static_cast<int32_t*>(pixelValuePtr);
+                valuePtr[0] = static_cast<int32_t>(pixelValue);
+                valuePtr[1] = -static_cast<int32_t>(pixelValue);
+                valuePtr[2] = static_cast<int32_t>(pixelValue * 2);
+                valuePtr[3] = -static_cast<int32_t>(pixelValue * 2);
+                break;
+            }
+
+            // 32-bit float formats
+            case wgpu::TextureFormat::R32Float: {
+                float_t* valuePtr = static_cast<float_t*>(pixelValuePtr);
+                *valuePtr = static_cast<float_t>(pixelValue * 1.1f);
+                break;
+            }
+
+            case wgpu::TextureFormat::RG32Float: {
+                float_t* valuePtr = static_cast<float_t*>(pixelValuePtr);
+                valuePtr[0] = static_cast<float_t>(pixelValue * 1.1f);
+                valuePtr[1] = -static_cast<float_t>(pixelValue * 2.2f);
+                break;
+            }
+
+            case wgpu::TextureFormat::RGBA32Float: {
+                float_t* valuePtr = static_cast<float_t*>(pixelValuePtr);
+                valuePtr[0] = static_cast<float_t>(pixelValue * 1.1f);
+                valuePtr[1] = -static_cast<float_t>(pixelValue * 1.1f);
+                valuePtr[2] = static_cast<float_t>(pixelValue * 2.2f);
+                valuePtr[3] = -static_cast<float_t>(pixelValue * 2.2f);
+                break;
+            }
+
+            // 16-bit (unsigned integer, signed integer and float) 4-component formats
+            case wgpu::TextureFormat::RGBA16Uint: {
+                uint16_t* valuePtr = static_cast<uint16_t*>(pixelValuePtr);
+                valuePtr[0] = static_cast<uint16_t>(pixelValue);
+                valuePtr[1] = static_cast<uint16_t>(pixelValue * 2);
+                valuePtr[2] = static_cast<uint16_t>(pixelValue * 3);
+                valuePtr[3] = static_cast<uint16_t>(pixelValue * 4);
+                break;
+            }
+            case wgpu::TextureFormat::RGBA16Sint: {
+                int16_t* valuePtr = static_cast<int16_t*>(pixelValuePtr);
+                valuePtr[0] = static_cast<int16_t>(pixelValue);
+                valuePtr[1] = -static_cast<int16_t>(pixelValue);
+                valuePtr[2] = static_cast<int16_t>(pixelValue * 2);
+                valuePtr[3] = -static_cast<int16_t>(pixelValue * 2);
+                break;
+            }
+
+            case wgpu::TextureFormat::RGBA16Float: {
+                uint16_t* valuePtr = static_cast<uint16_t*>(pixelValuePtr);
+                valuePtr[0] = Float32ToFloat16(static_cast<float_t>(pixelValue));
+                valuePtr[1] = Float32ToFloat16(-static_cast<float_t>(pixelValue));
+                valuePtr[2] = Float32ToFloat16(static_cast<float_t>(pixelValue * 2));
+                valuePtr[3] = Float32ToFloat16(-static_cast<float_t>(pixelValue * 2));
+                break;
+            }
+
+            // 8-bit (normalized/non-normalized signed/unsigned integer) 4-component formats
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8Uint: {
+                RGBA8* valuePtr = static_cast<RGBA8*>(pixelValuePtr);
+                *valuePtr = RGBA8(pixelValue, pixelValue * 2, pixelValue * 3, pixelValue * 4);
+                break;
+            }
+
+            case wgpu::TextureFormat::RGBA8Snorm:
+            case wgpu::TextureFormat::RGBA8Sint: {
+                int8_t* valuePtr = static_cast<int8_t*>(pixelValuePtr);
+                valuePtr[0] = static_cast<int8_t>(pixelValue);
+                valuePtr[1] = -static_cast<int8_t>(pixelValue);
+                valuePtr[2] = static_cast<int8_t>(pixelValue) * 2;
+                valuePtr[3] = -static_cast<int8_t>(pixelValue) * 2;
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+    }
+
+    std::string GetImageDeclaration(wgpu::TextureFormat format,
+                                    std::string accessQualifier,
+                                    wgpu::TextureViewDimension dimension,
+                                    uint32_t binding) {
+        std::ostringstream ostream;
+        ostream << "@group(0) @binding(" << binding << ") "
+                << "var storageImage" << binding << " : ";
+        switch (dimension) {
+            case wgpu::TextureViewDimension::e1D:
+                ostream << "texture_storage_1d";
+                break;
+            case wgpu::TextureViewDimension::e2D:
+                ostream << "texture_storage_2d";
+                break;
+            case wgpu::TextureViewDimension::e2DArray:
+                ostream << "texture_storage_2d_array";
+                break;
+            case wgpu::TextureViewDimension::e3D:
+                ostream << "texture_storage_3d";
+                break;
+            default:
+                UNREACHABLE();
+                break;
+        }
+        ostream << "<" << utils::GetWGSLImageFormatQualifier(format) << ", ";
+        ostream << accessQualifier << ">;";
+        return ostream.str();
+    }
+
+    const char* GetExpectedPixelValue(wgpu::TextureFormat format) {
+        switch (format) {
+            // non-normalized unsigned integer formats
+            case wgpu::TextureFormat::R32Uint:
+                return "vec4<u32>(u32(value), 0u, 0u, 1u)";
+
+            case wgpu::TextureFormat::RG32Uint:
+                return "vec4<u32>(u32(value), u32(value) * 2u, 0u, 1u)";
+
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA32Uint:
+                return "vec4<u32>(u32(value), u32(value) * 2u, "
+                       "u32(value) * 3u, u32(value) * 4u)";
+
+            // non-normalized signed integer formats
+            case wgpu::TextureFormat::R32Sint:
+                return "vec4<i32>(i32(value), 0, 0, 1)";
+
+            case wgpu::TextureFormat::RG32Sint:
+                return "vec4<i32>(i32(value), -i32(value), 0, 1)";
+
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA32Sint:
+                return "vec4<i32>(i32(value), -i32(value), i32(value) * 2, -i32(value) * 2)";
+
+            // float formats
+            case wgpu::TextureFormat::R32Float:
+                return "vec4<f32>(f32(value) * 1.1, 0.0, 0.0, 1.0)";
+
+            case wgpu::TextureFormat::RG32Float:
+                return "vec4<f32>(f32(value) * 1.1, -f32(value) * 2.2, 0.0, 1.0)";
+
+            case wgpu::TextureFormat::RGBA16Float:
+                return "vec4<f32>(f32(value), -f32(value), "
+                       "f32(value) * 2.0, -f32(value) * 2.0)";
+
+            case wgpu::TextureFormat::RGBA32Float:
+                return "vec4<f32>(f32(value) * 1.1, -f32(value) * 1.1, "
+                       "f32(value) * 2.2, -f32(value) * 2.2)";
+
+            // normalized signed/unsigned integer formats
+            case wgpu::TextureFormat::RGBA8Unorm:
+                return "vec4<f32>(f32(value) / 255.0, f32(value) / 255.0 * 2.0, "
+                       "f32(value) / 255.0 * 3.0, f32(value) / 255.0 * 4.0)";
+
+            case wgpu::TextureFormat::RGBA8Snorm:
+                return "vec4<f32>(f32(value) / 127.0, -f32(value) / 127.0, "
+                       "f32(value) * 2.0 / 127.0, -f32(value) * 2.0 / 127.0)";
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+    }
+
+    const char* GetComparisonFunction(wgpu::TextureFormat format) {
+        switch (format) {
+            // non-normalized unsigned integer formats
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA32Uint:
+                return R"(
+fn IsEqualTo(pixel : vec4<u32>, expected : vec4<u32>) -> bool {
+  return all(pixel == expected);
+})";
+
+            // non-normalized signed integer formats
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA32Sint:
+                return R"(
+fn IsEqualTo(pixel : vec4<i32>, expected : vec4<i32>) -> bool {
+  return all(pixel == expected);
+})";
+
+            // float formats
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGBA32Float:
+                return R"(
+fn IsEqualTo(pixel : vec4<f32>, expected : vec4<f32>) -> bool {
+  return all(pixel == expected);
+})";
+
+            // normalized signed/unsigned integer formats
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8Snorm:
+                // On Windows Intel drivers the tests will fail if tolerance <= 0.00000001f.
+                return R"(
+fn IsEqualTo(pixel : vec4<f32>, expected : vec4<f32>) -> bool {
+  let tolerance : f32 = 0.0000001;
+  return all(abs(pixel - expected) < vec4<f32>(tolerance, tolerance, tolerance, tolerance));
+})";
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+
+        return "";
+    }
+
+    std::string CommonWriteOnlyTestCode(
+        const char* stage,
+        wgpu::TextureFormat format,
+        wgpu::TextureViewDimension dimension = wgpu::TextureViewDimension::e2D) {
+        std::string componentFmt = utils::GetWGSLColorTextureComponentType(format);
+        auto texelType = "vec4<" + componentFmt + ">";
+        std::string sliceCount;
+        std::string textureStore;
+        std::string textureSize = "textureDimensions(storageImage0).xy";
+        switch (dimension) {
+            case wgpu::TextureViewDimension::e1D:
+                sliceCount = "1";
+                textureStore = "textureStore(storageImage0, x, expected)";
+                textureSize = "vec2<i32>(textureDimensions(storageImage0), 1)";
+                break;
+            case wgpu::TextureViewDimension::e2D:
+                sliceCount = "1";
+                textureStore = "textureStore(storageImage0, vec2<i32>(x, y), expected)";
+                break;
+            case wgpu::TextureViewDimension::e2DArray:
+                sliceCount = "textureNumLayers(storageImage0)";
+                textureStore = "textureStore(storageImage0, vec2<i32>(x, y), slice, expected)";
+                break;
+            case wgpu::TextureViewDimension::e3D:
+                sliceCount = "textureDimensions(storageImage0).z";
+                textureStore = "textureStore(storageImage0, vec3<i32>(x, y, slice), expected)";
+                break;
+            default:
+                UNREACHABLE();
+                break;
+        }
+        const char* workgroupSize = !strcmp(stage, "compute") ? " @workgroup_size(1)" : "";
+        const bool isFragment = strcmp(stage, "fragment") == 0;
+
+        std::ostringstream ostream;
+        ostream << GetImageDeclaration(format, "write", dimension, 0) << "\n";
+        ostream << "@stage(" << stage << ")" << workgroupSize << "\n";
+        ostream << "fn main() ";
+        if (isFragment) {
+            ostream << "-> @location(0) vec4<f32> ";
+        }
+        ostream << "{\n";
+        ostream << "  let size : vec2<i32> = " << textureSize << ";\n";
+        ostream << "  let sliceCount : i32 = " << sliceCount << ";\n";
+        ostream << "  for (var slice : i32 = 0; slice < sliceCount; slice = slice + 1) {\n";
+        ostream << "    for (var y : i32 = 0; y < size.y; y = y + 1) {\n";
+        ostream << "      for (var x : i32 = 0; x < size.x; x = x + 1) {\n";
+        ostream << "        var value : i32 = " << kComputeExpectedValue << ";\n";
+        ostream << "        var expected : " << texelType << " = " << GetExpectedPixelValue(format)
+                << ";\n";
+        ostream << "        " << textureStore << ";\n";
+        ostream << "      }\n";
+        ostream << "    }\n";
+        ostream << "  }\n";
+        if (isFragment) {
+            ostream << "return vec4<f32>();\n";
+        }
+        ostream << "}\n";
+
+        return ostream.str();
+    }
+
+    static std::vector<uint8_t> GetExpectedData(wgpu::TextureFormat format,
+                                                uint32_t sliceCount = 1) {
+        const uint32_t texelSizeInBytes = utils::GetTexelBlockSizeInBytes(format);
+
+        std::vector<uint8_t> outputData(texelSizeInBytes * kWidth * kHeight * sliceCount);
+
+        for (uint32_t i = 0; i < outputData.size() / texelSizeInBytes; ++i) {
+            uint8_t* pixelValuePtr = &outputData[i * texelSizeInBytes];
+            const uint32_t x = i % kWidth;
+            const uint32_t y = (i % (kWidth * kHeight)) / kWidth;
+            const uint32_t slice = i / (kWidth * kHeight);
+            FillExpectedData(pixelValuePtr, format, x, y, slice);
+        }
+
+        return outputData;
+    }
+
+    wgpu::Texture CreateTexture(wgpu::TextureFormat format,
+                                wgpu::TextureUsage usage,
+                                const wgpu::Extent3D& size,
+                                wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.dimension = dimension;
+        descriptor.format = format;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::Texture CreateTextureWithTestData(
+        const std::vector<uint8_t>& initialTextureData,
+        wgpu::TextureFormat format,
+        wgpu::TextureViewDimension dimension = wgpu::TextureViewDimension::e2D) {
+        uint32_t texelSize = utils::GetTexelBlockSizeInBytes(format);
+        ASSERT(kWidth * texelSize <= kTextureBytesPerRowAlignment);
+
+        const uint32_t bytesPerTextureRow = texelSize * kWidth;
+        const uint32_t sliceCount =
+            static_cast<uint32_t>(initialTextureData.size() / texelSize / (kWidth * kHeight));
+        const size_t uploadBufferSize =
+            kTextureBytesPerRowAlignment * (kHeight * sliceCount - 1) + kWidth * bytesPerTextureRow;
+
+        std::vector<uint8_t> uploadBufferData(uploadBufferSize);
+        for (uint32_t slice = 0; slice < sliceCount; ++slice) {
+            const size_t initialDataOffset = bytesPerTextureRow * kHeight * slice;
+            for (size_t y = 0; y < kHeight; ++y) {
+                for (size_t x = 0; x < bytesPerTextureRow; ++x) {
+                    uint8_t data =
+                        initialTextureData[initialDataOffset + bytesPerTextureRow * y + x];
+                    size_t indexInUploadBuffer =
+                        (kHeight * slice + y) * kTextureBytesPerRowAlignment + x;
+                    uploadBufferData[indexInUploadBuffer] = data;
+                }
+            }
+        }
+        wgpu::Buffer uploadBuffer =
+            utils::CreateBufferFromData(device, uploadBufferData.data(), uploadBufferSize,
+                                        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+        wgpu::Texture outputTexture = CreateTexture(
+            format, wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::CopyDst,
+            {kWidth, kHeight, sliceCount}, utils::ViewDimensionToTextureDimension(dimension));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        const wgpu::Extent3D copyExtent = {kWidth, kHeight, sliceCount};
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(uploadBuffer, 0, kTextureBytesPerRowAlignment, kHeight);
+        wgpu::ImageCopyTexture imageCopyTexture;
+        imageCopyTexture.texture = outputTexture;
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copyExtent);
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        return outputTexture;
+    }
+
+    wgpu::ComputePipeline CreateComputePipeline(const char* computeShader) {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, computeShader);
+        wgpu::ComputePipelineDescriptor computeDescriptor;
+        computeDescriptor.layout = nullptr;
+        computeDescriptor.compute.module = csModule;
+        computeDescriptor.compute.entryPoint = "main";
+        return device.CreateComputePipeline(&computeDescriptor);
+    }
+
+    wgpu::RenderPipeline CreateRenderPipeline(const char* vertexShader,
+                                              const char* fragmentShader) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexShader);
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentShader);
+
+        utils::ComboRenderPipelineDescriptor desc;
+        desc.vertex.module = vsModule;
+        desc.cFragment.module = fsModule;
+        desc.cTargets[0].format = kRenderAttachmentFormat;
+        desc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        return device.CreateRenderPipeline(&desc);
+    }
+
+    void CheckDrawsGreen(const char* vertexShader,
+                         const char* fragmentShader,
+                         wgpu::Texture readonlyStorageTexture) {
+        wgpu::RenderPipeline pipeline = CreateRenderPipeline(vertexShader, fragmentShader);
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, pipeline.GetBindGroupLayout(0), {{0, readonlyStorageTexture.CreateView()}});
+
+        // Clear the render attachment to red at the beginning of the render pass.
+        wgpu::Texture outputTexture = CreateTexture(
+            kRenderAttachmentFormat,
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, {1, 1});
+        utils::ComboRenderPassDescriptor renderPassDescriptor({outputTexture.CreateView()});
+        renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        renderPassDescriptor.cColorAttachments[0].clearValue = {1.f, 0.f, 0.f, 1.f};
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPassEncoder.SetBindGroup(0, bindGroup);
+        renderPassEncoder.SetPipeline(pipeline);
+        renderPassEncoder.Draw(1);
+        renderPassEncoder.End();
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        // Check if the contents in the output texture are all as expected (green).
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, outputTexture, 0, 0)
+            << "\nVertex Shader:\n"
+            << vertexShader << "\n\nFragment Shader:\n"
+            << fragmentShader;
+    }
+
+    void CheckResultInStorageBuffer(
+        wgpu::Texture readonlyStorageTexture,
+        const std::string& computeShader,
+        wgpu::TextureViewDimension dimension = wgpu::TextureViewDimension::e2D) {
+        wgpu::ComputePipeline pipeline = CreateComputePipeline(computeShader.c_str());
+
+        // Clear the content of the result buffer into 0.
+        constexpr uint32_t kInitialValue = 0;
+        wgpu::Buffer resultBuffer =
+            utils::CreateBufferFromData(device, &kInitialValue, sizeof(kInitialValue),
+                                        wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = dimension;
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, pipeline.GetBindGroupLayout(0),
+            {{0, readonlyStorageTexture.CreateView(&descriptor)}, {1, resultBuffer}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computeEncoder = encoder.BeginComputePass();
+        computeEncoder.SetBindGroup(0, bindGroup);
+        computeEncoder.SetPipeline(pipeline);
+        computeEncoder.Dispatch(1);
+        computeEncoder.End();
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        // Check if the contents in the result buffer are what we expect.
+        constexpr uint32_t kExpectedValue = 1u;
+        EXPECT_BUFFER_U32_RANGE_EQ(&kExpectedValue, resultBuffer, 0, 1u);
+    }
+
+    void WriteIntoStorageTextureInRenderPass(wgpu::Texture writeonlyStorageTexture,
+                                             const char* vertexShader,
+                                             const char* fragmentShader) {
+        // Create a render pipeline that writes the expected pixel values into the storage texture
+        // without fragment shader outputs.
+        wgpu::RenderPipeline pipeline = CreateRenderPipeline(vertexShader, fragmentShader);
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, pipeline.GetBindGroupLayout(0), {{0, writeonlyStorageTexture.CreateView()}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::Texture dummyOutputTexture = CreateTexture(
+            kRenderAttachmentFormat,
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, {1, 1});
+        utils::ComboRenderPassDescriptor renderPassDescriptor({dummyOutputTexture.CreateView()});
+        wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPassEncoder.SetBindGroup(0, bindGroup);
+        renderPassEncoder.SetPipeline(pipeline);
+        renderPassEncoder.Draw(1);
+        renderPassEncoder.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+    }
+
+    void WriteIntoStorageTextureInComputePass(
+        wgpu::Texture writeonlyStorageTexture,
+        const char* computeShader,
+        wgpu::TextureViewDimension dimension = wgpu::TextureViewDimension::e2D) {
+        // Create a compute pipeline that writes the expected pixel values into the storage texture.
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = dimension;
+        wgpu::ComputePipeline pipeline = CreateComputePipeline(computeShader);
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {{0, writeonlyStorageTexture.CreateView(&descriptor)}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = encoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup);
+        computePassEncoder.SetPipeline(pipeline);
+        computePassEncoder.Dispatch(1);
+        computePassEncoder.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+    }
+
+    void ReadWriteIntoStorageTextureInComputePass(
+        wgpu::Texture readonlyStorageTexture,
+        wgpu::Texture writeonlyStorageTexture,
+        const char* computeShader,
+        wgpu::TextureViewDimension dimension = wgpu::TextureViewDimension::e2D) {
+        // Create a compute pipeline that writes the expected pixel values into the storage texture.
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = dimension;
+        wgpu::ComputePipeline pipeline = CreateComputePipeline(computeShader);
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                 {{0, writeonlyStorageTexture.CreateView(&descriptor)},
+                                  {1, readonlyStorageTexture.CreateView(&descriptor)}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = encoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup);
+        computePassEncoder.SetPipeline(pipeline);
+        computePassEncoder.Dispatch(1);
+        computePassEncoder.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+    }
+
+    void CheckOutputStorageTexture(wgpu::Texture writeonlyStorageTexture,
+                                   wgpu::TextureFormat format,
+                                   const wgpu::Extent3D& size) {
+        const std::vector<uint8_t>& expectedData = GetExpectedData(format, size.depthOrArrayLayers);
+        CheckOutputStorageTexture(writeonlyStorageTexture, format, size, expectedData);
+    }
+
+    void CheckOutputStorageTexture(wgpu::Texture writeonlyStorageTexture,
+                                   wgpu::TextureFormat format,
+                                   const wgpu::Extent3D& size,
+                                   const std::vector<uint8_t>& expectedData) {
+        // Copy the content from the write-only storage texture to the result buffer.
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size =
+            utils::RequiredBytesInCopy(kTextureBytesPerRowAlignment, size.height, size, format);
+        descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer resultBuffer = device.CreateBuffer(&descriptor);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::ImageCopyTexture imageCopyTexture =
+                utils::CreateImageCopyTexture(writeonlyStorageTexture, 0, {0, 0, 0});
+            wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(
+                resultBuffer, 0, kTextureBytesPerRowAlignment, size.height);
+            encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &size);
+        }
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        // Check if the contents in the result buffer are what we expect.
+        uint32_t texelSize = utils::GetTexelBlockSizeInBytes(format);
+        ASSERT(size.width * texelSize <= kTextureBytesPerRowAlignment);
+
+        for (size_t z = 0; z < size.depthOrArrayLayers; ++z) {
+            for (size_t y = 0; y < size.height; ++y) {
+                const size_t resultBufferOffset =
+                    kTextureBytesPerRowAlignment * (size.height * z + y);
+                const size_t expectedDataOffset = texelSize * size.width * (size.height * z + y);
+                EXPECT_BUFFER_U32_RANGE_EQ(
+                    reinterpret_cast<const uint32_t*>(expectedData.data() + expectedDataOffset),
+                    resultBuffer, resultBufferOffset, texelSize);
+            }
+        }
+    }
+
+    static constexpr size_t kWidth = 4u;
+    static constexpr size_t kHeight = 4u;
+    static constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    const char* kSimpleVertexShader = R"(
+;
+@stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+  return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+})";
+
+    const char* kComputeExpectedValue = "1 + x + size.x * (y + size.y * slice)";
+};
+
+// Test that write-only storage textures are supported in compute shader.
+TEST_P(StorageTextureTests, WriteonlyStorageTextureInComputeShader) {
+    for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+        if (!utils::TextureFormatSupportsStorageTexture(format)) {
+            continue;
+        }
+        if (IsOpenGLES() && !OpenGLESSupportsStorageTexture(format)) {
+            continue;
+        }
+
+        if (format == wgpu::TextureFormat::RGBA8Snorm && HasToggleEnabled("disable_snorm_read")) {
+            continue;
+        }
+
+        // TODO(crbug.com/dawn/676): investigate why this test fails with RGBA8Snorm on Linux
+        // Intel OpenGL and OpenGLES drivers.
+        if (format == wgpu::TextureFormat::RGBA8Snorm && IsIntel() &&
+            (IsOpenGL() || IsOpenGLES()) && IsLinux()) {
+            continue;
+        }
+
+        // Prepare the write-only storage texture.
+        wgpu::Texture writeonlyStorageTexture =
+            CreateTexture(format, wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::CopySrc,
+                          {kWidth, kHeight});
+
+        // Write the expected pixel values into the write-only storage texture.
+        const std::string computeShader = CommonWriteOnlyTestCode("compute", format);
+        WriteIntoStorageTextureInComputePass(writeonlyStorageTexture, computeShader.c_str());
+
+        // Verify the pixel data in the write-only storage texture is expected.
+        CheckOutputStorageTexture(writeonlyStorageTexture, format, {kWidth, kHeight});
+    }
+}
+
+// Test that write-only storage textures are supported in fragment shader.
+TEST_P(StorageTextureTests, WriteonlyStorageTextureInFragmentShader) {
+    // TODO(crbug.com/dawn/672): Investigate why this test fails on Linux
+    // NVidia OpenGLES drivers.
+    DAWN_SUPPRESS_TEST_IF(IsNvidia() && IsLinux() && IsOpenGLES());
+
+    for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+        if (!utils::TextureFormatSupportsStorageTexture(format)) {
+            continue;
+        }
+        if (IsOpenGLES() && !OpenGLESSupportsStorageTexture(format)) {
+            continue;
+        }
+
+        if (format == wgpu::TextureFormat::RGBA8Snorm && HasToggleEnabled("disable_snorm_read")) {
+            continue;
+        }
+
+        // TODO(crbug.com/dawn/676): investigate why this test fails with RGBA8Snorm on Linux
+        // Intel OpenGL and OpenGLES drivers.
+        if (format == wgpu::TextureFormat::RGBA8Snorm && IsIntel() &&
+            (IsOpenGL() || IsOpenGLES()) && IsLinux()) {
+            continue;
+        }
+
+        // Prepare the write-only storage texture.
+        wgpu::Texture writeonlyStorageTexture =
+            CreateTexture(format, wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::CopySrc,
+                          {kWidth, kHeight});
+
+        // Write the expected pixel values into the write-only storage texture.
+        const std::string fragmentShader = CommonWriteOnlyTestCode("fragment", format);
+        WriteIntoStorageTextureInRenderPass(writeonlyStorageTexture, kSimpleVertexShader,
+                                            fragmentShader.c_str());
+
+        // Verify the pixel data in the write-only storage texture is expected.
+        CheckOutputStorageTexture(writeonlyStorageTexture, format, {kWidth, kHeight});
+    }
+}
+
+// Verify 2D array and 3D write-only storage textures work correctly.
+TEST_P(StorageTextureTests, Writeonly2DArrayOr3DStorageTexture) {
+    // TODO(crbug.com/dawn/547): implement 3D storage texture on OpenGL and OpenGLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL() || IsOpenGLES());
+
+    constexpr uint32_t kSliceCount = 3u;
+
+    constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::R32Uint;
+
+    wgpu::TextureViewDimension dimensions[] = {
+        wgpu::TextureViewDimension::e2DArray,
+        wgpu::TextureViewDimension::e3D,
+    };
+
+    // Prepare the write-only storage texture.
+    for (wgpu::TextureViewDimension dimension : dimensions) {
+        wgpu::Texture writeonlyStorageTexture = CreateTexture(
+            kTextureFormat, wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::CopySrc,
+            {kWidth, kHeight, kSliceCount}, utils::ViewDimensionToTextureDimension(dimension));
+
+        // Write the expected pixel values into the write-only storage texture.
+        const std::string computeShader =
+            CommonWriteOnlyTestCode("compute", kTextureFormat, dimension);
+        WriteIntoStorageTextureInComputePass(writeonlyStorageTexture, computeShader.c_str(),
+                                             dimension);
+
+        // Verify the pixel data in the write-only storage texture is expected.
+        CheckOutputStorageTexture(writeonlyStorageTexture, kTextureFormat,
+                                  {kWidth, kHeight, kSliceCount});
+    }
+}
+
+// Verify 1D write-only storage textures work correctly.
+TEST_P(StorageTextureTests, Writeonly1DStorageTexture) {
+    // TODO(crbug.com/dawn/547): implement 1D storage texture on OpenGL and OpenGLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGL() || IsOpenGLES());
+
+    constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::R32Uint;
+
+    // Prepare the write-only storage texture.
+    wgpu::Texture writeonlyStorageTexture = CreateTexture(
+        kTextureFormat, wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::CopySrc,
+        {kWidth, 1, 1}, wgpu::TextureDimension::e1D);
+
+    // Write the expected pixel values into the write-only storage texture.
+    const std::string computeShader =
+        CommonWriteOnlyTestCode("compute", kTextureFormat, wgpu::TextureViewDimension::e1D);
+    WriteIntoStorageTextureInComputePass(writeonlyStorageTexture, computeShader.c_str(),
+                                         wgpu::TextureViewDimension::e1D);
+
+    // Verify the pixel data in the write-only storage texture is expected.
+    CheckOutputStorageTexture(writeonlyStorageTexture, kTextureFormat, {kWidth, 1, 1});
+}
+
+// Test that multiple dispatches to increment values by ping-ponging between a sampled texture and
+// a write-only storage texture are synchronized in one pass.
+TEST_P(StorageTextureTests, SampledAndWriteonlyStorageTexturePingPong) {
+    constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::R32Uint;
+    wgpu::Texture storageTexture1 =
+        CreateTexture(kTextureFormat,
+                      wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding |
+                          wgpu::TextureUsage::CopySrc,
+                      {1u, 1u});
+    wgpu::Texture storageTexture2 = CreateTexture(
+        kTextureFormat, wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding,
+        {1u, 1u});
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+@group(0) @binding(0) var Src : texture_2d<u32>;
+@group(0) @binding(1) var Dst : texture_storage_2d<r32uint, write>;
+@stage(compute) @workgroup_size(1) fn main() {
+  var srcValue : vec4<u32> = textureLoad(Src, vec2<i32>(0, 0), 0);
+  srcValue.x = srcValue.x + 1u;
+  textureStore(Dst, vec2<i32>(0, 0), srcValue);
+}
+    )");
+
+    wgpu::ComputePipelineDescriptor pipelineDesc = {};
+    pipelineDesc.compute.module = module;
+    pipelineDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&pipelineDesc);
+
+    // In bindGroupA storageTexture1 is bound as read-only storage texture and storageTexture2 is
+    // bound as write-only storage texture.
+    wgpu::BindGroup bindGroupA = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, storageTexture1.CreateView()},
+                                                          {1, storageTexture2.CreateView()},
+                                                      });
+
+    // In bindGroupA storageTexture2 is bound as read-only storage texture and storageTexture1 is
+    // bound as write-only storage texture.
+    wgpu::BindGroup bindGroupB = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {
+                                                          {0, storageTexture2.CreateView()},
+                                                          {1, storageTexture1.CreateView()},
+                                                      });
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+
+    // After the first dispatch the value in storageTexture2 should be 1u.
+    pass.SetBindGroup(0, bindGroupA);
+    pass.Dispatch(1);
+
+    // After the second dispatch the value in storageTexture1 should be 2u;
+    pass.SetBindGroup(0, bindGroupB);
+    pass.Dispatch(1);
+
+    pass.End();
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = sizeof(uint32_t);
+    bufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer resultBuffer = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::ImageCopyTexture imageCopyTexture;
+    imageCopyTexture.texture = storageTexture1;
+
+    wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(resultBuffer, 0, 256, 1);
+    wgpu::Extent3D extent3D = {1, 1, 1};
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &extent3D);
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    constexpr uint32_t kFinalPixelValueInTexture1 = 2u;
+    EXPECT_BUFFER_U32_EQ(kFinalPixelValueInTexture1, resultBuffer, 0);
+}
+
+DAWN_INSTANTIATE_TEST(StorageTextureTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class StorageTextureZeroInitTests : public StorageTextureTests {
+  public:
+    static std::vector<uint8_t> GetExpectedData() {
+        constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::R32Uint;
+
+        const uint32_t texelSizeInBytes = utils::GetTexelBlockSizeInBytes(kTextureFormat);
+        const size_t kDataCount = texelSizeInBytes * kWidth * kHeight;
+        std::vector<uint8_t> outputData(kDataCount, 0);
+
+        uint32_t* outputDataPtr = reinterpret_cast<uint32_t*>(&outputData[0]);
+        *outputDataPtr = 1u;
+
+        return outputData;
+    }
+
+    const char* kCommonReadOnlyZeroInitTestCode = R"(
+fn doTest() -> bool {
+  for (var y : i32 = 0; y < 4; y = y + 1) {
+    for (var x : i32 = 0; x < 4; x = x + 1) {
+      var pixel : vec4<u32> = textureLoad(srcImage, vec2<i32>(x, y));
+      if (any(pixel != vec4<u32>(0u, 0u, 0u, 1u))) {
+        return false;
+      }
+    }
+  }
+  return true;
+})";
+
+    const char* kCommonWriteOnlyZeroInitTestCodeFragment = R"(
+@group(0) @binding(0) var dstImage : texture_storage_2d<r32uint, write>;
+
+@stage(fragment) fn main() -> @location(0) vec4<f32> {
+  textureStore(dstImage, vec2<i32>(0, 0), vec4<u32>(1u, 0u, 0u, 1u));
+  return vec4<f32>();
+})";
+    const char* kCommonWriteOnlyZeroInitTestCodeCompute = R"(
+@group(0) @binding(0) var dstImage : texture_storage_2d<r32uint, write>;
+
+@stage(compute) @workgroup_size(1) fn main() {
+  textureStore(dstImage, vec2<i32>(0, 0), vec4<u32>(1u, 0u, 0u, 1u));
+})";
+};
+
+// Verify that the texture is correctly cleared to 0 before its first usage as a write-only storage
+// storage texture in a render pass.
+TEST_P(StorageTextureZeroInitTests, WriteonlyStorageTextureClearsToZeroInRenderPass) {
+    // Prepare the write-only storage texture.
+    wgpu::Texture writeonlyStorageTexture = CreateTexture(
+        wgpu::TextureFormat::R32Uint,
+        wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::CopySrc, {kWidth, kHeight});
+
+    WriteIntoStorageTextureInRenderPass(writeonlyStorageTexture, kSimpleVertexShader,
+                                        kCommonWriteOnlyZeroInitTestCodeFragment);
+    CheckOutputStorageTexture(writeonlyStorageTexture, wgpu::TextureFormat::R32Uint,
+                              {kWidth, kHeight}, GetExpectedData());
+}
+
+// Verify that the texture is correctly cleared to 0 before its first usage as a write-only storage
+// texture in a compute pass.
+TEST_P(StorageTextureZeroInitTests, WriteonlyStorageTextureClearsToZeroInComputePass) {
+    // Prepare the write-only storage texture.
+    wgpu::Texture writeonlyStorageTexture = CreateTexture(
+        wgpu::TextureFormat::R32Uint,
+        wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::CopySrc, {kWidth, kHeight});
+
+    WriteIntoStorageTextureInComputePass(writeonlyStorageTexture,
+                                         kCommonWriteOnlyZeroInitTestCodeCompute);
+    CheckOutputStorageTexture(writeonlyStorageTexture, wgpu::TextureFormat::R32Uint,
+                              {kWidth, kHeight}, GetExpectedData());
+}
+
+DAWN_INSTANTIATE_TEST(StorageTextureZeroInitTests,
+                      D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      MetalBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"}));
diff --git a/src/dawn/tests/end2end/SubresourceRenderAttachmentTests.cpp b/src/dawn/tests/end2end/SubresourceRenderAttachmentTests.cpp
new file mode 100644
index 0000000..b351c25
--- /dev/null
+++ b/src/dawn/tests/end2end/SubresourceRenderAttachmentTests.cpp
@@ -0,0 +1,175 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+// Test that rendering to a subresource of a texture works.
+class SubresourceRenderAttachmentTest : public DawnTest {
+    constexpr static uint32_t kRTSize = 2;
+
+  protected:
+    enum class Type { Color, Depth, Stencil };
+
+    void DoSingleTest(Type type,
+                      wgpu::TextureFormat format,
+                      wgpu::Texture renderTarget,
+                      uint32_t textureSize,
+                      uint32_t baseArrayLayer,
+                      uint32_t baseMipLevel) {
+        wgpu::TextureViewDescriptor renderTargetViewDesc;
+        renderTargetViewDesc.baseArrayLayer = baseArrayLayer;
+        renderTargetViewDesc.arrayLayerCount = 1;
+        renderTargetViewDesc.baseMipLevel = baseMipLevel;
+        renderTargetViewDesc.mipLevelCount = 1;
+        wgpu::TextureView renderTargetView = renderTarget.CreateView(&renderTargetViewDesc);
+
+        RGBA8 expectedColor(0, 255, 0, 255);
+        float expectedDepth = 0.3f;
+        uint8_t expectedStencil = 7;
+
+        utils::ComboRenderPassDescriptor renderPass = [&]() {
+            switch (type) {
+                case Type::Color: {
+                    utils::ComboRenderPassDescriptor renderPass({renderTargetView});
+                    renderPass.cColorAttachments[0].clearValue = {
+                        static_cast<float>(expectedColor.r) / 255.f,
+                        static_cast<float>(expectedColor.g) / 255.f,
+                        static_cast<float>(expectedColor.b) / 255.f,
+                        static_cast<float>(expectedColor.a) / 255.f,
+                    };
+                    return renderPass;
+                }
+                case Type::Depth: {
+                    utils::ComboRenderPassDescriptor renderPass({}, renderTargetView);
+                    renderPass.UnsetDepthStencilLoadStoreOpsForFormat(format);
+                    renderPass.cDepthStencilAttachmentInfo.depthClearValue = expectedDepth;
+                    return renderPass;
+                }
+                case Type::Stencil: {
+                    utils::ComboRenderPassDescriptor renderPass({}, renderTargetView);
+                    renderPass.UnsetDepthStencilLoadStoreOpsForFormat(format);
+                    renderPass.cDepthStencilAttachmentInfo.stencilClearValue = expectedStencil;
+                    return renderPass;
+                }
+                default:
+                    UNREACHABLE();
+            }
+        }();
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder passEncoder = commandEncoder.BeginRenderPass(&renderPass);
+        passEncoder.End();
+        wgpu::CommandBuffer commands = commandEncoder.Finish();
+        queue.Submit(1, &commands);
+
+        const uint32_t renderTargetSize = textureSize >> baseMipLevel;
+        switch (type) {
+            case Type::Color: {
+                std::vector<RGBA8> expected(renderTargetSize * renderTargetSize, expectedColor);
+                EXPECT_TEXTURE_EQ(expected.data(), renderTarget, {0, 0, baseArrayLayer},
+                                  {renderTargetSize, renderTargetSize}, baseMipLevel);
+                break;
+            }
+            case Type::Depth: {
+                std::vector<float> expected(renderTargetSize * renderTargetSize, expectedDepth);
+                EXPECT_TEXTURE_EQ(expected.data(), renderTarget, {0, 0, baseArrayLayer},
+                                  {renderTargetSize, renderTargetSize}, baseMipLevel);
+                break;
+            }
+            case Type::Stencil: {
+                std::vector<uint8_t> expected(renderTargetSize * renderTargetSize, expectedStencil);
+                EXPECT_TEXTURE_EQ(expected.data(), renderTarget, {0, 0, baseArrayLayer},
+                                  {renderTargetSize, renderTargetSize}, baseMipLevel,
+                                  wgpu::TextureAspect::StencilOnly);
+                break;
+            }
+        }
+    }
+
+    void DoTest(Type type) {
+        constexpr uint32_t kArrayLayerCount = 5;
+        constexpr uint32_t kMipLevelCount = 4;
+
+        wgpu::TextureFormat format;
+        switch (type) {
+            case Type::Color:
+                format = wgpu::TextureFormat::RGBA8Unorm;
+                break;
+            case Type::Depth:
+                format = wgpu::TextureFormat::Depth32Float;
+                break;
+            case Type::Stencil:
+                format = wgpu::TextureFormat::Depth24PlusStencil8;
+                break;
+            default:
+                UNREACHABLE();
+        }
+
+        constexpr uint32_t kTextureSize = kRTSize << (kMipLevelCount - 1);
+
+        wgpu::TextureDescriptor renderTargetDesc;
+        renderTargetDesc.dimension = wgpu::TextureDimension::e2D;
+        renderTargetDesc.size.width = kTextureSize;
+        renderTargetDesc.size.height = kTextureSize;
+        renderTargetDesc.size.depthOrArrayLayers = kArrayLayerCount;
+        renderTargetDesc.sampleCount = 1;
+        renderTargetDesc.format = format;
+        renderTargetDesc.mipLevelCount = kMipLevelCount;
+        renderTargetDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+
+        wgpu::Texture renderTarget = device.CreateTexture(&renderTargetDesc);
+
+        // Test rendering into the first, middle, and last of each of array layer and mip level.
+        for (uint32_t arrayLayer : {0u, kArrayLayerCount / 2, kArrayLayerCount - 1u}) {
+            for (uint32_t mipLevel : {0u, kMipLevelCount / 2, kMipLevelCount - 1u}) {
+                DoSingleTest(type, format, renderTarget, kTextureSize, arrayLayer, mipLevel);
+            }
+        }
+    }
+};
+
+// Test rendering into a subresource of a color texture
+TEST_P(SubresourceRenderAttachmentTest, ColorTexture) {
+    DoTest(Type::Color);
+}
+
+// Test rendering into a subresource of a depth texture
+TEST_P(SubresourceRenderAttachmentTest, DepthTexture) {
+    DoTest(Type::Depth);
+}
+
+// Test rendering into a subresource of a stencil texture
+TEST_P(SubresourceRenderAttachmentTest, StencilTexture) {
+    // TODO(crbug.com/dawn/667): Work around the fact that some platforms are unable to read
+    // stencil.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_depth_stencil_read"));
+
+    // TODO(crbug.com/dawn/704): Readback after clear via stencil copy does not work
+    // on some Intel drivers.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    DoTest(Type::Stencil);
+}
+
+DAWN_INSTANTIATE_TEST(SubresourceRenderAttachmentTest,
+                      D3D12Backend(),
+                      D3D12Backend({}, {"use_d3d12_render_pass"}),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/SwapChainTests.cpp b/src/dawn/tests/end2end/SwapChainTests.cpp
new file mode 100644
index 0000000..3218549
--- /dev/null
+++ b/src/dawn/tests/end2end/SwapChainTests.cpp
@@ -0,0 +1,241 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Log.h"
+#include "dawn/utils/GLFWUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include "GLFW/glfw3.h"
+
+class SwapChainTests : public DawnTest {
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+        glfwSetErrorCallback([](int code, const char* message) {
+            dawn::ErrorLog() << "GLFW error " << code << " " << message;
+        });
+
+        // GLFW can fail to start in headless environments, in which SwapChainTests are
+        // inapplicable. Skip this cases without producing a test failure.
+        if (glfwInit() == GLFW_FALSE) {
+            GTEST_SKIP();
+        }
+
+        // The SwapChainTests don't create OpenGL contexts so we don't need to call
+        // SetupGLFWWindowHintsForBackend. Set GLFW_NO_API anyway to avoid GLFW bringing up a GL
+        // context that we won't use.
+        ASSERT_TRUE(!IsOpenGL());
+        glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+        window = glfwCreateWindow(400, 400, "SwapChainValidationTests window", nullptr, nullptr);
+
+        int width;
+        int height;
+        glfwGetFramebufferSize(window, &width, &height);
+
+        surface = utils::CreateSurfaceForWindow(GetInstance(), window);
+        ASSERT_NE(surface, nullptr);
+
+        baseDescriptor.width = width;
+        baseDescriptor.height = height;
+        baseDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        baseDescriptor.format = wgpu::TextureFormat::BGRA8Unorm;
+        baseDescriptor.presentMode = wgpu::PresentMode::Mailbox;
+    }
+
+    void TearDown() override {
+        // Destroy the surface before the window as required by webgpu-native.
+        surface = wgpu::Surface();
+        if (window != nullptr) {
+            glfwDestroyWindow(window);
+        }
+        DawnTest::TearDown();
+    }
+
+    void ClearTexture(wgpu::TextureView view, wgpu::Color color) {
+        utils::ComboRenderPassDescriptor desc({view});
+        desc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        desc.cColorAttachments[0].clearValue = color;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&desc);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+  protected:
+    GLFWwindow* window = nullptr;
+    wgpu::Surface surface;
+
+    wgpu::SwapChainDescriptor baseDescriptor;
+};
+
+// Basic test for creating a swapchain and presenting one frame.
+TEST_P(SwapChainTests, Basic) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &baseDescriptor);
+    ClearTexture(swapchain.GetCurrentTextureView(), {1.0, 0.0, 0.0, 1.0});
+    swapchain.Present();
+}
+
+// Test replacing the swapchain
+TEST_P(SwapChainTests, ReplaceBasic) {
+    wgpu::SwapChain swapchain1 = device.CreateSwapChain(surface, &baseDescriptor);
+    ClearTexture(swapchain1.GetCurrentTextureView(), {1.0, 0.0, 0.0, 1.0});
+    swapchain1.Present();
+
+    wgpu::SwapChain swapchain2 = device.CreateSwapChain(surface, &baseDescriptor);
+    ClearTexture(swapchain2.GetCurrentTextureView(), {0.0, 1.0, 0.0, 1.0});
+    swapchain2.Present();
+}
+
+// Test replacing the swapchain after GetCurrentTextureView
+TEST_P(SwapChainTests, ReplaceAfterGet) {
+    wgpu::SwapChain swapchain1 = device.CreateSwapChain(surface, &baseDescriptor);
+    ClearTexture(swapchain1.GetCurrentTextureView(), {1.0, 0.0, 0.0, 1.0});
+
+    wgpu::SwapChain swapchain2 = device.CreateSwapChain(surface, &baseDescriptor);
+    ClearTexture(swapchain2.GetCurrentTextureView(), {0.0, 1.0, 0.0, 1.0});
+    swapchain2.Present();
+}
+
+// Test destroying the swapchain after GetCurrentTextureView
+TEST_P(SwapChainTests, DestroyAfterGet) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &baseDescriptor);
+    ClearTexture(swapchain.GetCurrentTextureView(), {1.0, 0.0, 0.0, 1.0});
+}
+
+// Test destroying the surface before the swapchain
+TEST_P(SwapChainTests, DestroySurface) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &baseDescriptor);
+    surface = nullptr;
+}
+
+// Test destroying the surface before the swapchain but after GetCurrentTextureView
+TEST_P(SwapChainTests, DestroySurfaceAfterGet) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &baseDescriptor);
+    ClearTexture(swapchain.GetCurrentTextureView(), {1.0, 0.0, 0.0, 1.0});
+    surface = nullptr;
+}
+
+// Test switching between present modes.
+TEST_P(SwapChainTests, SwitchPresentMode) {
+    // Fails with "internal drawable creation failed" on the Windows NVIDIA CQ builders but not
+    // locally.
+    DAWN_SUPPRESS_TEST_IF(IsWindows() && IsVulkan() && IsNvidia());
+
+    // TODO(jiawei.shao@intel.com): find out why this test sometimes hangs on the latest Linux Intel
+    // Vulkan drivers.
+    DAWN_SUPPRESS_TEST_IF(IsLinux() && IsVulkan() && IsIntel());
+
+    constexpr wgpu::PresentMode kAllPresentModes[] = {
+        wgpu::PresentMode::Immediate,
+        wgpu::PresentMode::Fifo,
+        wgpu::PresentMode::Mailbox,
+    };
+
+    for (wgpu::PresentMode mode1 : kAllPresentModes) {
+        for (wgpu::PresentMode mode2 : kAllPresentModes) {
+            wgpu::SwapChainDescriptor desc = baseDescriptor;
+
+            desc.presentMode = mode1;
+            wgpu::SwapChain swapchain1 = device.CreateSwapChain(surface, &desc);
+            ClearTexture(swapchain1.GetCurrentTextureView(), {0.0, 0.0, 0.0, 1.0});
+            swapchain1.Present();
+
+            desc.presentMode = mode2;
+            wgpu::SwapChain swapchain2 = device.CreateSwapChain(surface, &desc);
+            ClearTexture(swapchain2.GetCurrentTextureView(), {0.0, 0.0, 0.0, 1.0});
+            swapchain2.Present();
+        }
+    }
+}
+
+// Test resizing the swapchain and without resizing the window.
+TEST_P(SwapChainTests, ResizingSwapChainOnly) {
+    for (int i = 0; i < 10; i++) {
+        wgpu::SwapChainDescriptor desc = baseDescriptor;
+        desc.width += i * 10;
+        desc.height -= i * 10;
+
+        wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &desc);
+        ClearTexture(swapchain.GetCurrentTextureView(), {0.05f * i, 0.0f, 0.0f, 1.0f});
+        swapchain.Present();
+    }
+}
+
+// Test resizing the window but not the swapchain.
+TEST_P(SwapChainTests, ResizingWindowOnly) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &baseDescriptor);
+
+    for (int i = 0; i < 10; i++) {
+        glfwSetWindowSize(window, 400 - 10 * i, 400 + 10 * i);
+        glfwPollEvents();
+
+        ClearTexture(swapchain.GetCurrentTextureView(), {0.05f * i, 0.0f, 0.0f, 1.0f});
+        swapchain.Present();
+    }
+}
+
+// Test resizing both the window and the swapchain at the same time.
+TEST_P(SwapChainTests, ResizingWindowAndSwapChain) {
+    // TODO(crbug.com/dawn/1205) Currently failing on new NVIDIA GTX 1660s on Linux/Vulkan.
+    DAWN_SUPPRESS_TEST_IF(IsLinux() && IsVulkan() && IsNvidia());
+    for (int i = 0; i < 10; i++) {
+        glfwSetWindowSize(window, 400 - 10 * i, 400 + 10 * i);
+        glfwPollEvents();
+
+        int width;
+        int height;
+        glfwGetFramebufferSize(window, &width, &height);
+
+        wgpu::SwapChainDescriptor desc = baseDescriptor;
+        desc.width = width;
+        desc.height = height;
+
+        wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &desc);
+        ClearTexture(swapchain.GetCurrentTextureView(), {0.05f * i, 0.0f, 0.0f, 1.0f});
+        swapchain.Present();
+    }
+}
+
+// Test switching devices on the same adapter.
+TEST_P(SwapChainTests, SwitchingDevice) {
+    // The Vulkan Validation Layers incorrectly disallow gracefully passing a swapchain between two
+    // VkDevices using "vkSwapchainCreateInfoKHR::oldSwapchain".
+    // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2256
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsBackendValidationEnabled());
+
+    wgpu::Device device2 = wgpu::Device::Acquire(GetAdapter().CreateDevice());
+
+    for (int i = 0; i < 3; i++) {
+        wgpu::Device deviceToUse;
+        if (i % 2 == 0) {
+            deviceToUse = device;
+        } else {
+            deviceToUse = device2;
+        }
+
+        wgpu::SwapChain swapchain = deviceToUse.CreateSwapChain(surface, &baseDescriptor);
+        swapchain.GetCurrentTextureView();
+        swapchain.Present();
+    }
+}
+
+DAWN_INSTANTIATE_TEST(SwapChainTests, MetalBackend(), VulkanBackend());
diff --git a/src/dawn/tests/end2end/SwapChainValidationTests.cpp b/src/dawn/tests/end2end/SwapChainValidationTests.cpp
new file mode 100644
index 0000000..3af4c88
--- /dev/null
+++ b/src/dawn/tests/end2end/SwapChainValidationTests.cpp
@@ -0,0 +1,357 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Log.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/GLFWUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include "GLFW/glfw3.h"
+
+class SwapChainValidationTests : public DawnTest {
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+        glfwSetErrorCallback([](int code, const char* message) {
+            dawn::ErrorLog() << "GLFW error " << code << " " << message;
+        });
+        DAWN_TEST_UNSUPPORTED_IF(!glfwInit());
+
+        // The SwapChainValidationTests don't create devices so we don't need to call
+        // SetupGLFWWindowHintsForBackend. Set GLFW_NO_API anyway to avoid GLFW bringing up a GL
+        // context that we won't use.
+        ASSERT_TRUE(!IsOpenGL());
+        glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+        window = glfwCreateWindow(400, 400, "SwapChainValidationTests window", nullptr, nullptr);
+
+        surface = utils::CreateSurfaceForWindow(GetInstance(), window);
+        ASSERT_NE(surface, nullptr);
+
+        goodDescriptor.width = 1;
+        goodDescriptor.height = 1;
+        goodDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        goodDescriptor.format = wgpu::TextureFormat::BGRA8Unorm;
+        goodDescriptor.presentMode = wgpu::PresentMode::Mailbox;
+
+        badDescriptor = goodDescriptor;
+        badDescriptor.width = 0;
+    }
+
+    void TearDown() override {
+        // Destroy the surface before the window as required by webgpu-native.
+        surface = wgpu::Surface();
+        if (window != nullptr) {
+            glfwDestroyWindow(window);
+        }
+        DawnTest::TearDown();
+    }
+
+  protected:
+    GLFWwindow* window = nullptr;
+    wgpu::Surface surface;
+    wgpu::SwapChainDescriptor goodDescriptor;
+    wgpu::SwapChainDescriptor badDescriptor;
+
+    // Checks that a RenderAttachment view is an error by trying to create a render pass on it.
+    void CheckTextureViewIsError(wgpu::TextureView view) {
+        CheckTextureView(view, true, false);
+    }
+
+    // Checks that a RenderAttachment view is an error by trying to submit a render pass on it.
+    void CheckTextureViewIsDestroyed(wgpu::TextureView view) {
+        CheckTextureView(view, false, true);
+    }
+
+    // Checks that a RenderAttachment view is valid by submitting a render pass on it.
+    void CheckTextureViewIsValid(wgpu::TextureView view) {
+        CheckTextureView(view, false, false);
+    }
+
+  private:
+    void CheckTextureView(wgpu::TextureView view, bool errorAtFinish, bool errorAtSubmit) {
+        utils::ComboRenderPassDescriptor renderPassDesc({view});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+        pass.End();
+
+        if (errorAtFinish) {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        } else {
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            if (errorAtSubmit) {
+                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+            } else {
+                queue.Submit(1, &commands);
+            }
+        }
+    }
+};
+
+// Control case for a successful swapchain creation and presenting.
+TEST_P(SwapChainValidationTests, CreationSuccess) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+    wgpu::TextureView view = swapchain.GetCurrentTextureView();
+    swapchain.Present();
+}
+
+// Checks that the creation size must be a valid 2D texture size.
+TEST_P(SwapChainValidationTests, InvalidCreationSize) {
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // A width of 0 is invalid.
+    {
+        wgpu::SwapChainDescriptor desc = goodDescriptor;
+        desc.width = 0;
+        ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
+    }
+    // A height of 0 is invalid.
+    {
+        wgpu::SwapChainDescriptor desc = goodDescriptor;
+        desc.height = 0;
+        ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
+    }
+
+    // A width of maxTextureDimension2D is valid but maxTextureDimension2D + 1 isn't.
+    {
+        wgpu::SwapChainDescriptor desc = goodDescriptor;
+        desc.width = supportedLimits.maxTextureDimension2D;
+        device.CreateSwapChain(surface, &desc);
+
+        desc.width = supportedLimits.maxTextureDimension2D + 1;
+        ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
+    }
+
+    // A height of maxTextureDimension2D is valid but maxTextureDimension2D + 1 isn't.
+    {
+        wgpu::SwapChainDescriptor desc = goodDescriptor;
+        desc.height = supportedLimits.maxTextureDimension2D;
+        device.CreateSwapChain(surface, &desc);
+
+        desc.height = supportedLimits.maxTextureDimension2D + 1;
+        ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
+    }
+}
+
+// Checks that the creation usage must be RenderAttachment
+TEST_P(SwapChainValidationTests, InvalidCreationUsage) {
+    wgpu::SwapChainDescriptor desc = goodDescriptor;
+    desc.usage = wgpu::TextureUsage::TextureBinding;
+    ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
+}
+
+// Checks that the creation format must (currently) be BGRA8Unorm
+TEST_P(SwapChainValidationTests, InvalidCreationFormat) {
+    wgpu::SwapChainDescriptor desc = goodDescriptor;
+    desc.format = wgpu::TextureFormat::RGBA8Unorm;
+    ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
+}
+
+// Checks that the implementation must be zero.
+TEST_P(SwapChainValidationTests, InvalidWithImplementation) {
+    wgpu::SwapChainDescriptor desc = goodDescriptor;
+    desc.implementation = 1;
+    ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
+}
+
+// Check swapchain operations with an error swapchain are errors
+TEST_P(SwapChainValidationTests, OperationsOnErrorSwapChain) {
+    wgpu::SwapChain swapchain;
+    ASSERT_DEVICE_ERROR(swapchain = device.CreateSwapChain(surface, &badDescriptor));
+
+    wgpu::TextureView view;
+    ASSERT_DEVICE_ERROR(view = swapchain.GetCurrentTextureView());
+    CheckTextureViewIsError(view);
+
+    ASSERT_DEVICE_ERROR(swapchain.Present());
+}
+
+// Check it is invalid to call present without getting a current view.
+TEST_P(SwapChainValidationTests, PresentWithoutCurrentView) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+
+    // Check it is invalid if we never called GetCurrentTextureView
+    ASSERT_DEVICE_ERROR(swapchain.Present());
+
+    // Check it is invalid if we never called since the last present.
+    swapchain.GetCurrentTextureView();
+    swapchain.Present();
+    ASSERT_DEVICE_ERROR(swapchain.Present());
+}
+
+// Check that the current view isn't destroyed when the ref to the swapchain is lost because the
+// swapchain is kept alive by the surface. Also check after we lose all refs to the surface, the
+// texture is destroyed.
+TEST_P(SwapChainValidationTests, ViewValidAfterSwapChainRefLost) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+    wgpu::TextureView view = swapchain.GetCurrentTextureView();
+
+    swapchain = nullptr;
+    CheckTextureViewIsValid(view);
+
+    surface = nullptr;
+    CheckTextureViewIsDestroyed(view);
+}
+
+// Check that the current view is the destroyed state after present.
+TEST_P(SwapChainValidationTests, ViewDestroyedAfterPresent) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+    wgpu::TextureView view = swapchain.GetCurrentTextureView();
+    swapchain.Present();
+
+    CheckTextureViewIsDestroyed(view);
+}
+
+// Check that returned view is of the current format / usage / dimension / size / sample count
+TEST_P(SwapChainValidationTests, ReturnedViewCharacteristics) {
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+        struct FragmentOut {
+            @location(0) target0 : vec4<f32>,
+            @location(1) target1 : f32,
+        }
+        @stage(fragment) fn main() -> FragmentOut {
+            var out : FragmentOut;
+            out.target0 = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            out.target1 = 0.5;
+            return out;
+        })");
+    // Validation will check that the sample count of the view matches this format.
+    pipelineDesc.multisample.count = 1;
+    pipelineDesc.cFragment.targetCount = 2;
+    // Validation will check that the format of the view matches this format.
+    pipelineDesc.cTargets[0].format = wgpu::TextureFormat::BGRA8Unorm;
+    pipelineDesc.cTargets[1].format = wgpu::TextureFormat::R8Unorm;
+    device.CreateRenderPipeline(&pipelineDesc);
+
+    // Create a second texture to be used as render pass attachment. Validation will check that the
+    // size of the view matches the size of this texture.
+    wgpu::TextureDescriptor textureDesc;
+    textureDesc.usage = wgpu::TextureUsage::RenderAttachment;
+    textureDesc.dimension = wgpu::TextureDimension::e2D;
+    textureDesc.size = {1, 1, 1};
+    textureDesc.format = wgpu::TextureFormat::R8Unorm;
+    textureDesc.sampleCount = 1;
+    wgpu::Texture secondTexture = device.CreateTexture(&textureDesc);
+
+    // Get the swapchain view and try to use it in the render pass to trigger all the validation.
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+    wgpu::TextureView view = swapchain.GetCurrentTextureView();
+
+    // Validation will also check the dimension of the view is 2D, and it's usage contains
+    // RenderAttachment
+    utils::ComboRenderPassDescriptor renderPassDesc({view, secondTexture.CreateView()});
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    queue.Submit(1, &commands);
+
+    // Check that view doesn't have extra formats like Sampled.
+    // TODO(cwallez@chromium.org): also check for [Readonly]Storage once that's implemented.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, view}}));
+}
+
+// Check that failing to create a new swapchain doesn't replace the previous one.
+TEST_P(SwapChainValidationTests, ErrorSwapChainDoesntReplacePreviousOne) {
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+    ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &badDescriptor));
+
+    wgpu::TextureView view = swapchain.GetCurrentTextureView();
+    swapchain.Present();
+}
+
+// Check that after replacement, all swapchain operations are errors and the view is destroyed.
+TEST_P(SwapChainValidationTests, ReplacedSwapChainIsInvalid) {
+    {
+        wgpu::SwapChain replacedSwapChain = device.CreateSwapChain(surface, &goodDescriptor);
+        device.CreateSwapChain(surface, &goodDescriptor);
+        ASSERT_DEVICE_ERROR(replacedSwapChain.GetCurrentTextureView());
+    }
+
+    {
+        wgpu::SwapChain replacedSwapChain = device.CreateSwapChain(surface, &goodDescriptor);
+        wgpu::TextureView view = replacedSwapChain.GetCurrentTextureView();
+        device.CreateSwapChain(surface, &goodDescriptor);
+
+        CheckTextureViewIsDestroyed(view);
+        ASSERT_DEVICE_ERROR(replacedSwapChain.Present());
+    }
+}
+
+// Check that after surface destruction, all swapchain operations are errors and the view is
+// destroyed. The test is split in two to reset the wgpu::Surface in the middle.
+TEST_P(SwapChainValidationTests, SwapChainIsInvalidAfterSurfaceDestruction_GetView) {
+    wgpu::SwapChain replacedSwapChain = device.CreateSwapChain(surface, &goodDescriptor);
+    surface = nullptr;
+    ASSERT_DEVICE_ERROR(replacedSwapChain.GetCurrentTextureView());
+}
+TEST_P(SwapChainValidationTests, SwapChainIsInvalidAfterSurfaceDestruction_AfterGetView) {
+    wgpu::SwapChain replacedSwapChain = device.CreateSwapChain(surface, &goodDescriptor);
+    wgpu::TextureView view = replacedSwapChain.GetCurrentTextureView();
+    surface = nullptr;
+
+    CheckTextureViewIsDestroyed(view);
+    ASSERT_DEVICE_ERROR(replacedSwapChain.Present());
+}
+
+// Test that after Device is Lost, all swap chain operations fail
+static void ToMockDeviceLostCallback(WGPUDeviceLostReason reason,
+                                     const char* message,
+                                     void* userdata) {
+    DawnTest* self = static_cast<DawnTest*>(userdata);
+    self->StartExpectDeviceError();
+}
+
+// Test that new swap chain present fails after device is lost
+TEST_P(SwapChainValidationTests, NewSwapChainPresentFailsAfterDeviceLost) {
+    device.SetDeviceLostCallback(ToMockDeviceLostCallback, this);
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+    wgpu::TextureView view = swapchain.GetCurrentTextureView();
+
+    device.LoseForTesting();
+    ASSERT_DEVICE_ERROR(swapchain.Present());
+}
+
+// Test that new swap chain get current texture view fails after device is lost
+TEST_P(SwapChainValidationTests, NewSwapChainGetCurrentTextureViewFailsAfterDevLost) {
+    device.SetDeviceLostCallback(ToMockDeviceLostCallback, this);
+    wgpu::SwapChain swapchain = device.CreateSwapChain(surface, &goodDescriptor);
+
+    device.LoseForTesting();
+    ASSERT_DEVICE_ERROR(swapchain.GetCurrentTextureView());
+}
+
+// Test that creation of a new swapchain fails after device is lost
+TEST_P(SwapChainValidationTests, CreateNewSwapChainFailsAfterDevLost) {
+    device.SetDeviceLostCallback(ToMockDeviceLostCallback, this);
+    device.LoseForTesting();
+
+    ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &goodDescriptor));
+}
+
+DAWN_INSTANTIATE_TEST(SwapChainValidationTests, MetalBackend(), NullBackend());
diff --git a/src/dawn/tests/end2end/Texture3DTests.cpp b/src/dawn/tests/end2end/Texture3DTests.cpp
new file mode 100644
index 0000000..fc4b0c6
--- /dev/null
+++ b/src/dawn/tests/end2end/Texture3DTests.cpp
@@ -0,0 +1,124 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr static uint32_t kRTSize = 4;
+constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+class Texture3DTests : public DawnTest {};
+
+TEST_P(Texture3DTests, Sampling) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    // Set up pipeline. Two triangles will be drawn via the pipeline. They will fill the entire
+    // color attachment with data sampled from 3D texture.
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 6>(
+                vec2<f32>(-1.0, 1.0),
+                vec2<f32>( -1.0, -1.0),
+                vec2<f32>(1.0, 1.0),
+                vec2<f32>(1.0, 1.0),
+                vec2<f32>(-1.0, -1.0),
+                vec2<f32>(1.0, -1.0));
+
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var samp : sampler;
+        @group(0) @binding(1) var tex : texture_3d<f32>;
+
+        @stage(fragment)
+        fn main(@builtin(position) FragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+            return textureSample(tex, samp, vec3<f32>(FragCoord.xy / 4.0, 1.5 / 4.0));
+        })");
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.cFragment.module = fsModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    wgpu::Extent3D copySize = {kRTSize, kRTSize, kRTSize};
+
+    // Create a 3D texture, fill the texture via a B2T copy with well-designed data.
+    // The 3D texture will be used as the data source of a sampler in shader.
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e3D;
+    descriptor.size = copySize;
+    descriptor.format = kFormat;
+    descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    uint32_t bytesPerRow = utils::GetMinimumBytesPerRow(kFormat, copySize.width);
+    uint32_t sizeInBytes =
+        utils::RequiredBytesInCopy(bytesPerRow, copySize.height, copySize, kFormat);
+    const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(kFormat);
+    uint32_t size = sizeInBytes / bytesPerTexel;
+    std::vector<RGBA8> data = std::vector<RGBA8>(size);
+    for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
+        for (uint32_t y = 0; y < copySize.height; ++y) {
+            for (uint32_t x = 0; x < copySize.width; ++x) {
+                uint32_t i = (z * copySize.height + y) * bytesPerRow / bytesPerTexel + x;
+                data[i] = RGBA8(x, y, z, 255);
+            }
+        }
+    }
+    wgpu::Buffer buffer =
+        utils::CreateBufferFromData(device, data.data(), sizeInBytes, wgpu::BufferUsage::CopySrc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(buffer, 0, bytesPerRow, copySize.height);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                     {{0, sampler}, {1, textureView}});
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(6);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // We sample data from the 3D texture at depth slice 1: 1.5 / 4.0 for z axis in textureSampler()
+    // in shader, so the expected color at coordinate(x, y) should be (x, y, 1, 255).
+    for (uint32_t i = 0; i < kRTSize; ++i) {
+        for (uint32_t j = 0; j < kRTSize; ++j) {
+            EXPECT_PIXEL_RGBA8_EQ(RGBA8(i, j, 1, 255), renderPass.color, i, j);
+        }
+    }
+}
+
+DAWN_INSTANTIATE_TEST(Texture3DTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/TextureFormatTests.cpp b/src/dawn/tests/end2end/TextureFormatTests.cpp
new file mode 100644
index 0000000..6f2299b
--- /dev/null
+++ b/src/dawn/tests/end2end/TextureFormatTests.cpp
@@ -0,0 +1,794 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cmath>
+#include <type_traits>
+
+// An expectation for float buffer content that can correctly compare different NaN values and
+// supports a basic tolerance for comparison of finite values.
+class ExpectFloatWithTolerance : public detail::Expectation {
+  public:
+    ExpectFloatWithTolerance(std::vector<float> expected, float tolerance)
+        : mExpected(std::move(expected)), mTolerance(tolerance) {
+    }
+
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        ASSERT(size == sizeof(float) * mExpected.size());
+
+        const float* actual = static_cast<const float*>(data);
+
+        for (size_t i = 0; i < mExpected.size(); ++i) {
+            float expectedValue = mExpected[i];
+            float actualValue = actual[i];
+
+            if (!FloatsMatch(expectedValue, actualValue)) {
+                testing::AssertionResult result = testing::AssertionFailure()
+                                                  << "Expected data[" << i << "] to be close to "
+                                                  << expectedValue << ", actual " << actualValue
+                                                  << std::endl;
+                return result;
+            }
+        }
+        return testing::AssertionSuccess();
+    }
+
+  private:
+    bool FloatsMatch(float expected, float actual) {
+        if (std::isnan(expected)) {
+            return std::isnan(actual);
+        }
+
+        if (std::isinf(expected)) {
+            return std::isinf(actual) && std::signbit(expected) == std::signbit(actual);
+        }
+
+        if (mTolerance == 0.0f) {
+            return expected == actual;
+        }
+
+        float error = std::abs(expected - actual);
+        return error < mTolerance;
+    }
+
+    std::vector<float> mExpected;
+    float mTolerance;
+};
+
+// An expectation for float16 buffers that can correctly compare NaNs (all NaNs are equivalent).
+class ExpectFloat16 : public detail::Expectation {
+  public:
+    ExpectFloat16(std::vector<uint16_t> expected) : mExpected(std::move(expected)) {
+    }
+
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        ASSERT(size == sizeof(uint16_t) * mExpected.size());
+
+        const uint16_t* actual = static_cast<const uint16_t*>(data);
+
+        for (size_t i = 0; i < mExpected.size(); ++i) {
+            uint16_t expectedValue = mExpected[i];
+            uint16_t actualValue = actual[i];
+
+            if (!Floats16Match(expectedValue, actualValue)) {
+                testing::AssertionResult result = testing::AssertionFailure()
+                                                  << "Expected data[" << i << "] to be "
+                                                  << expectedValue << ", actual " << actualValue
+                                                  << std::endl;
+                return result;
+            }
+        }
+        return testing::AssertionSuccess();
+    }
+
+  private:
+    bool Floats16Match(float expected, float actual) {
+        if (IsFloat16NaN(expected)) {
+            return IsFloat16NaN(actual);
+        }
+
+        return expected == actual;
+    }
+
+    std::vector<uint16_t> mExpected;
+};
+
+class TextureFormatTest : public DawnTest {
+  protected:
+    // Structure containing all the information that tests need to know about the format.
+    struct FormatTestInfo {
+        wgpu::TextureFormat format;
+        uint32_t texelByteSize;
+        wgpu::TextureComponentType type;
+        uint32_t componentCount;
+    };
+
+    // Returns a reprensentation of a format that can be used to contain the "uncompressed" values
+    // of the format. That the equivalent format with all channels 32bit-sized.
+    FormatTestInfo GetUncompressedFormatInfo(FormatTestInfo formatInfo) {
+        switch (formatInfo.type) {
+            case wgpu::TextureComponentType::Float:
+                return {wgpu::TextureFormat::RGBA32Float, 16, formatInfo.type, 4};
+            case wgpu::TextureComponentType::Sint:
+                return {wgpu::TextureFormat::RGBA32Sint, 16, formatInfo.type, 4};
+            case wgpu::TextureComponentType::Uint:
+                return {wgpu::TextureFormat::RGBA32Uint, 16, formatInfo.type, 4};
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    // Return a pipeline that can be used in a full-texture draw to sample from the texture in the
+    // bindgroup and output its decompressed values to the render target.
+    wgpu::RenderPipeline CreateSamplePipeline(FormatTestInfo sampleFormatInfo,
+                                              FormatTestInfo renderFormatInfo) {
+        utils::ComboRenderPipelineDescriptor desc;
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-3.0, -1.0),
+                    vec2<f32>( 3.0, -1.0),
+                    vec2<f32>( 0.0,  2.0));
+
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        // Compute the WGSL type of the texture's data.
+        const char* type = utils::GetWGSLColorTextureComponentType(sampleFormatInfo.format);
+
+        std::ostringstream fsSource;
+        fsSource << "@group(0) @binding(0) var myTexture : texture_2d<" << type << ">;\n";
+        fsSource << "struct FragmentOut {\n";
+        fsSource << "   @location(0) color : vec4<" << type << ">\n";
+        fsSource << R"(}
+            @stage(fragment)
+            fn main(@builtin(position) FragCoord : vec4<f32>) -> FragmentOut {
+                var output : FragmentOut;
+                output.color = textureLoad(myTexture, vec2<i32>(FragCoord.xy), 0);
+                return output;
+            })";
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fsSource.str().c_str());
+
+        desc.vertex.module = vsModule;
+        desc.cFragment.module = fsModule;
+        desc.cTargets[0].format = renderFormatInfo.format;
+
+        return device.CreateRenderPipeline(&desc);
+    }
+
+    // The sampling test uploads the sample data in a texture with the sampleFormatInfo.format.
+    // It then samples from it and renders the results in a texture with the
+    // renderFormatInfo.format format. Finally it checks that the data rendered matches
+    // expectedRenderData, using the cutom expectation if present.
+    void DoSampleTest(FormatTestInfo sampleFormatInfo,
+                      const void* sampleData,
+                      size_t sampleDataSize,
+                      FormatTestInfo renderFormatInfo,
+                      const void* expectedRenderData,
+                      size_t expectedRenderDataSize,
+                      detail::Expectation* customExpectation) {
+        // The input data should contain an exact number of texels
+        ASSERT(sampleDataSize % sampleFormatInfo.texelByteSize == 0);
+        uint32_t width = sampleDataSize / sampleFormatInfo.texelByteSize;
+
+        // The input data must be a multiple of 4 byte in length for WriteBuffer
+        ASSERT(sampleDataSize % 4 == 0);
+        ASSERT(expectedRenderDataSize % 4 == 0);
+
+        // Create the texture we will sample from
+        wgpu::TextureDescriptor sampleTextureDesc;
+        sampleTextureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+        sampleTextureDesc.size = {width, 1, 1};
+        sampleTextureDesc.format = sampleFormatInfo.format;
+        wgpu::Texture sampleTexture = device.CreateTexture(&sampleTextureDesc);
+
+        wgpu::Buffer uploadBuffer = utils::CreateBufferFromData(device, sampleData, sampleDataSize,
+                                                                wgpu::BufferUsage::CopySrc);
+
+        // Create the texture that we will render results to
+        ASSERT(expectedRenderDataSize == width * renderFormatInfo.texelByteSize);
+
+        wgpu::TextureDescriptor renderTargetDesc;
+        renderTargetDesc.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment;
+        renderTargetDesc.size = {width, 1, 1};
+        renderTargetDesc.format = renderFormatInfo.format;
+
+        wgpu::Texture renderTarget = device.CreateTexture(&renderTargetDesc);
+
+        // Create the readback buffer for the data in renderTarget
+        wgpu::BufferDescriptor readbackBufferDesc;
+        readbackBufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+        readbackBufferDesc.size = expectedRenderDataSize;
+        wgpu::Buffer readbackBuffer = device.CreateBuffer(&readbackBufferDesc);
+
+        // Prepare objects needed to sample from texture in the renderpass
+        wgpu::RenderPipeline pipeline = CreateSamplePipeline(sampleFormatInfo, renderFormatInfo);
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                         {{0, sampleTexture.CreateView()}});
+
+        // Encode commands for the test that fill texture, sample it to render to renderTarget then
+        // copy renderTarget in a buffer so we can read it easily.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        {
+            wgpu::ImageCopyBuffer bufferView = utils::CreateImageCopyBuffer(uploadBuffer, 0, 256);
+            wgpu::ImageCopyTexture textureView =
+                utils::CreateImageCopyTexture(sampleTexture, 0, {0, 0, 0});
+            wgpu::Extent3D extent{width, 1, 1};
+            encoder.CopyBufferToTexture(&bufferView, &textureView, &extent);
+        }
+
+        utils::ComboRenderPassDescriptor renderPassDesc({renderTarget.CreateView()});
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDesc);
+        renderPass.SetPipeline(pipeline);
+        renderPass.SetBindGroup(0, bindGroup);
+        renderPass.Draw(3);
+        renderPass.End();
+
+        {
+            wgpu::ImageCopyBuffer bufferView = utils::CreateImageCopyBuffer(readbackBuffer, 0, 256);
+            wgpu::ImageCopyTexture textureView =
+                utils::CreateImageCopyTexture(renderTarget, 0, {0, 0, 0});
+            wgpu::Extent3D extent{width, 1, 1};
+            encoder.CopyTextureToBuffer(&textureView, &bufferView, &extent);
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // For floats use a special expectation that understands how to compare NaNs and support a
+        // tolerance.
+        if (customExpectation != nullptr) {
+            AddBufferExpectation(__FILE__, __LINE__, readbackBuffer, 0, expectedRenderDataSize,
+                                 customExpectation);
+        } else {
+            EXPECT_BUFFER_U32_RANGE_EQ(static_cast<const uint32_t*>(expectedRenderData),
+                                       readbackBuffer, 0,
+                                       expectedRenderDataSize / sizeof(uint32_t));
+        }
+    }
+
+    template <typename Data>
+    std::vector<Data> ExpandDataTo4Component(const std::vector<Data>& originalData,
+                                             uint32_t originalComponentCount,
+                                             const std::array<Data, 4>& defaultValues) {
+        std::vector<Data> result;
+
+        for (size_t i = 0; i < originalData.size() / originalComponentCount; i++) {
+            for (size_t component = 0; component < 4; component++) {
+                if (component < originalComponentCount) {
+                    result.push_back(originalData[i * originalComponentCount + component]);
+                } else {
+                    result.push_back(defaultValues[component]);
+                }
+            }
+        }
+
+        return result;
+    }
+
+    // Helper functions used to run tests that convert the typeful test objects to typeless void*
+
+    template <typename TextureData, typename RenderData>
+    void DoFormatSamplingTest(FormatTestInfo formatInfo,
+                              const std::vector<TextureData>& textureData,
+                              const std::vector<RenderData>& expectedRenderData,
+                              detail::Expectation* customExpectation = nullptr) {
+        FormatTestInfo renderFormatInfo = GetUncompressedFormatInfo(formatInfo);
+
+        // Expand the expected data to be 4 component wide with the default sampling values of
+        // (0, 0, 0, 1)
+        std::array<RenderData, 4> defaultValues = {RenderData(0), RenderData(0), RenderData(0),
+                                                   RenderData(1)};
+        std::vector<RenderData> expandedRenderData =
+            ExpandDataTo4Component(expectedRenderData, formatInfo.componentCount, defaultValues);
+
+        DoSampleTest(formatInfo, textureData.data(), textureData.size() * sizeof(TextureData),
+                     renderFormatInfo, expandedRenderData.data(),
+                     expandedRenderData.size() * sizeof(RenderData), customExpectation);
+    }
+
+    template <typename TextureData>
+    void DoFloatFormatSamplingTest(FormatTestInfo formatInfo,
+                                   const std::vector<TextureData>& textureData,
+                                   const std::vector<float>& expectedRenderData,
+                                   float floatTolerance = 0.0f) {
+        // Expand the expected data to be 4 component wide with the default sampling values of
+        // (0, 0, 0, 1)
+        std::array<float, 4> defaultValues = {0.0f, 0.0f, 0.0f, 1.0f};
+        std::vector<float> expandedRenderData =
+            ExpandDataTo4Component(expectedRenderData, formatInfo.componentCount, defaultValues);
+
+        // Use a special expectation that understands how to compare NaNs and supports a tolerance.
+        DoFormatSamplingTest(formatInfo, textureData, expectedRenderData,
+                             new ExpectFloatWithTolerance(expandedRenderData, floatTolerance));
+    }
+
+    template <typename TextureData, typename RenderData>
+    void DoFormatRenderingTest(FormatTestInfo formatInfo,
+                               const std::vector<TextureData>& textureData,
+                               const std::vector<RenderData>& expectedRenderData,
+                               detail::Expectation* customExpectation = nullptr) {
+        FormatTestInfo sampleFormatInfo = GetUncompressedFormatInfo(formatInfo);
+
+        // Expand the sampling texture data to contain garbage data for unused components to check
+        // that they don't influence the rendering result.
+        std::array<TextureData, 4> garbageValues;
+        garbageValues.fill(13);
+        std::vector<TextureData> expandedTextureData =
+            ExpandDataTo4Component(textureData, formatInfo.componentCount, garbageValues);
+
+        DoSampleTest(sampleFormatInfo, expandedTextureData.data(),
+                     expandedTextureData.size() * sizeof(TextureData), formatInfo,
+                     expectedRenderData.data(), expectedRenderData.size() * sizeof(RenderData),
+                     customExpectation);
+    }
+
+    // Below are helper functions for types that are very similar to one another so the logic is
+    // shared.
+
+    template <typename T>
+    void DoUnormTest(FormatTestInfo formatInfo) {
+        static_assert(!std::is_signed<T>::value && std::is_integral<T>::value);
+        ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
+        ASSERT(formatInfo.type == wgpu::TextureComponentType::Float);
+
+        T maxValue = std::numeric_limits<T>::max();
+        std::vector<T> textureData = {0, 1, maxValue, maxValue};
+        std::vector<float> uncompressedData = {0.0f, 1.0f / maxValue, 1.0f, 1.0f};
+
+        DoFormatSamplingTest(formatInfo, textureData, uncompressedData);
+        DoFormatRenderingTest(formatInfo, uncompressedData, textureData);
+    }
+
+    template <typename T>
+    void DoSnormTest(FormatTestInfo formatInfo) {
+        static_assert(std::is_signed<T>::value && std::is_integral<T>::value);
+        ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
+        ASSERT(formatInfo.type == wgpu::TextureComponentType::Float);
+
+        T maxValue = std::numeric_limits<T>::max();
+        T minValue = std::numeric_limits<T>::min();
+        std::vector<T> textureData = {0, 1, -1, maxValue, minValue, T(minValue + 1), 0, 0};
+        std::vector<float> uncompressedData = {
+            0.0f, 1.0f / maxValue, -1.0f / maxValue, 1.0f, -1.0f, -1.0f, 0.0f, 0.0f};
+
+        DoFloatFormatSamplingTest(formatInfo, textureData, uncompressedData, 0.0001f / maxValue);
+        // Snorm formats aren't renderable because they are not guaranteed renderable in Vulkan
+    }
+
+    template <typename T>
+    void DoUintTest(FormatTestInfo formatInfo) {
+        static_assert(!std::is_signed<T>::value && std::is_integral<T>::value);
+        ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
+        ASSERT(formatInfo.type == wgpu::TextureComponentType::Uint);
+
+        T maxValue = std::numeric_limits<T>::max();
+        std::vector<T> textureData = {0, 1, maxValue, maxValue};
+        std::vector<uint32_t> uncompressedData = {0, 1, maxValue, maxValue};
+
+        DoFormatSamplingTest(formatInfo, textureData, uncompressedData);
+        DoFormatRenderingTest(formatInfo, uncompressedData, textureData);
+    }
+
+    template <typename T>
+    void DoSintTest(FormatTestInfo formatInfo) {
+        static_assert(std::is_signed<T>::value && std::is_integral<T>::value);
+        ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
+        ASSERT(formatInfo.type == wgpu::TextureComponentType::Sint);
+
+        T maxValue = std::numeric_limits<T>::max();
+        T minValue = std::numeric_limits<T>::min();
+        std::vector<T> textureData = {0, 1, maxValue, minValue};
+        std::vector<int32_t> uncompressedData = {0, 1, maxValue, minValue};
+
+        DoFormatSamplingTest(formatInfo, textureData, uncompressedData);
+        DoFormatRenderingTest(formatInfo, uncompressedData, textureData);
+    }
+
+    void DoFloat32Test(FormatTestInfo formatInfo) {
+        ASSERT(sizeof(float) * formatInfo.componentCount == formatInfo.texelByteSize);
+        ASSERT(formatInfo.type == wgpu::TextureComponentType::Float);
+
+        std::vector<float> textureData = {+0.0f,   -0.0f, 1.0f,     1.0e-29f,
+                                          1.0e29f, NAN,   INFINITY, -INFINITY};
+
+        DoFloatFormatSamplingTest(formatInfo, textureData, textureData);
+        DoFormatRenderingTest(formatInfo, textureData, textureData,
+                              new ExpectFloatWithTolerance(textureData, 0.0f));
+    }
+
+    void DoFloat16Test(FormatTestInfo formatInfo) {
+        ASSERT(sizeof(int16_t) * formatInfo.componentCount == formatInfo.texelByteSize);
+        ASSERT(formatInfo.type == wgpu::TextureComponentType::Float);
+
+        std::vector<float> uncompressedData = {+0.0f,  -0.0f, 1.0f,     1.01e-4f,
+                                               1.0e4f, NAN,   INFINITY, -INFINITY};
+        std::vector<uint16_t> textureData;
+        for (float value : uncompressedData) {
+            textureData.push_back(Float32ToFloat16(value));
+        }
+
+        DoFloatFormatSamplingTest(formatInfo, textureData, uncompressedData, 1.0e-5f);
+
+        // Use a special expectation that knows that all Float16 NaNs are equivalent.
+        DoFormatRenderingTest(formatInfo, uncompressedData, textureData,
+                              new ExpectFloat16(textureData));
+    }
+};
+
+// Test the R8Unorm format
+TEST_P(TextureFormatTest, R8Unorm) {
+    DoUnormTest<uint8_t>({wgpu::TextureFormat::R8Unorm, 1, wgpu::TextureComponentType::Float, 1});
+}
+
+// Test the RG8Unorm format
+TEST_P(TextureFormatTest, RG8Unorm) {
+    DoUnormTest<uint8_t>({wgpu::TextureFormat::RG8Unorm, 2, wgpu::TextureComponentType::Float, 2});
+}
+
+// Test the RGBA8Unorm format
+TEST_P(TextureFormatTest, RGBA8Unorm) {
+    DoUnormTest<uint8_t>(
+        {wgpu::TextureFormat::RGBA8Unorm, 4, wgpu::TextureComponentType::Float, 4});
+}
+
+// Test the BGRA8Unorm format
+TEST_P(TextureFormatTest, BGRA8Unorm) {
+    // TODO(crbug.com/dawn/596): BGRA is unsupported on OpenGL ES; add workaround or validation
+    DAWN_SUPPRESS_TEST_IF(IsOpenGLES());
+    uint8_t maxValue = std::numeric_limits<uint8_t>::max();
+    std::vector<uint8_t> textureData = {maxValue, 1, 0, maxValue};
+    std::vector<float> uncompressedData = {0.0f, 1.0f / maxValue, 1.0f, 1.0f};
+    DoFormatSamplingTest({wgpu::TextureFormat::BGRA8Unorm, 4, wgpu::TextureComponentType::Float, 4},
+                         textureData, uncompressedData);
+    DoFormatRenderingTest(
+        {wgpu::TextureFormat::BGRA8Unorm, 4, wgpu::TextureComponentType::Float, 4},
+        uncompressedData, textureData);
+}
+
+// Test the R8Snorm format
+TEST_P(TextureFormatTest, R8Snorm) {
+    DoSnormTest<int8_t>({wgpu::TextureFormat::R8Snorm, 1, wgpu::TextureComponentType::Float, 1});
+}
+
+// Test the RG8Snorm format
+TEST_P(TextureFormatTest, RG8Snorm) {
+    DoSnormTest<int8_t>({wgpu::TextureFormat::RG8Snorm, 2, wgpu::TextureComponentType::Float, 2});
+}
+
+// Test the RGBA8Snorm format
+TEST_P(TextureFormatTest, RGBA8Snorm) {
+    DoSnormTest<int8_t>({wgpu::TextureFormat::RGBA8Snorm, 4, wgpu::TextureComponentType::Float, 4});
+}
+
+// Test the R8Uint format
+TEST_P(TextureFormatTest, R8Uint) {
+    DoUintTest<uint8_t>({wgpu::TextureFormat::R8Uint, 1, wgpu::TextureComponentType::Uint, 1});
+}
+
+// Test the RG8Uint format
+TEST_P(TextureFormatTest, RG8Uint) {
+    DoUintTest<uint8_t>({wgpu::TextureFormat::RG8Uint, 2, wgpu::TextureComponentType::Uint, 2});
+}
+
+// Test the RGBA8Uint format
+TEST_P(TextureFormatTest, RGBA8Uint) {
+    DoUintTest<uint8_t>({wgpu::TextureFormat::RGBA8Uint, 4, wgpu::TextureComponentType::Uint, 4});
+}
+
+// Test the R16Uint format
+TEST_P(TextureFormatTest, R16Uint) {
+    DoUintTest<uint16_t>({wgpu::TextureFormat::R16Uint, 2, wgpu::TextureComponentType::Uint, 1});
+}
+
+// Test the RG16Uint format
+TEST_P(TextureFormatTest, RG16Uint) {
+    DoUintTest<uint16_t>({wgpu::TextureFormat::RG16Uint, 4, wgpu::TextureComponentType::Uint, 2});
+}
+
+// Test the RGBA16Uint format
+TEST_P(TextureFormatTest, RGBA16Uint) {
+    DoUintTest<uint16_t>({wgpu::TextureFormat::RGBA16Uint, 8, wgpu::TextureComponentType::Uint, 4});
+}
+
+// Test the R32Uint format
+TEST_P(TextureFormatTest, R32Uint) {
+    DoUintTest<uint32_t>({wgpu::TextureFormat::R32Uint, 4, wgpu::TextureComponentType::Uint, 1});
+}
+
+// Test the RG32Uint format
+TEST_P(TextureFormatTest, RG32Uint) {
+    DoUintTest<uint32_t>({wgpu::TextureFormat::RG32Uint, 8, wgpu::TextureComponentType::Uint, 2});
+}
+
+// Test the RGBA32Uint format
+TEST_P(TextureFormatTest, RGBA32Uint) {
+    DoUintTest<uint32_t>(
+        {wgpu::TextureFormat::RGBA32Uint, 16, wgpu::TextureComponentType::Uint, 4});
+}
+
+// Test the R8Sint format
+TEST_P(TextureFormatTest, R8Sint) {
+    DoSintTest<int8_t>({wgpu::TextureFormat::R8Sint, 1, wgpu::TextureComponentType::Sint, 1});
+}
+
+// Test the RG8Sint format
+TEST_P(TextureFormatTest, RG8Sint) {
+    DoSintTest<int8_t>({wgpu::TextureFormat::RG8Sint, 2, wgpu::TextureComponentType::Sint, 2});
+}
+
+// Test the RGBA8Sint format
+TEST_P(TextureFormatTest, RGBA8Sint) {
+    DoSintTest<int8_t>({wgpu::TextureFormat::RGBA8Sint, 4, wgpu::TextureComponentType::Sint, 4});
+}
+
+// Test the R16Sint format
+TEST_P(TextureFormatTest, R16Sint) {
+    DoSintTest<int16_t>({wgpu::TextureFormat::R16Sint, 2, wgpu::TextureComponentType::Sint, 1});
+}
+
+// Test the RG16Sint format
+TEST_P(TextureFormatTest, RG16Sint) {
+    DoSintTest<int16_t>({wgpu::TextureFormat::RG16Sint, 4, wgpu::TextureComponentType::Sint, 2});
+}
+
+// Test the RGBA16Sint format
+TEST_P(TextureFormatTest, RGBA16Sint) {
+    DoSintTest<int16_t>({wgpu::TextureFormat::RGBA16Sint, 8, wgpu::TextureComponentType::Sint, 4});
+}
+
+// Test the R32Sint format
+TEST_P(TextureFormatTest, R32Sint) {
+    DoSintTest<int32_t>({wgpu::TextureFormat::R32Sint, 4, wgpu::TextureComponentType::Sint, 1});
+}
+
+// Test the RG32Sint format
+TEST_P(TextureFormatTest, RG32Sint) {
+    DoSintTest<int32_t>({wgpu::TextureFormat::RG32Sint, 8, wgpu::TextureComponentType::Sint, 2});
+}
+
+// Test the RGBA32Sint format
+TEST_P(TextureFormatTest, RGBA32Sint) {
+    DoSintTest<int32_t>({wgpu::TextureFormat::RGBA32Sint, 16, wgpu::TextureComponentType::Sint, 4});
+}
+
+// Test the R32Float format
+TEST_P(TextureFormatTest, R32Float) {
+    DoFloat32Test({wgpu::TextureFormat::R32Float, 4, wgpu::TextureComponentType::Float, 1});
+}
+
+// Test the RG32Float format
+TEST_P(TextureFormatTest, RG32Float) {
+    DoFloat32Test({wgpu::TextureFormat::RG32Float, 8, wgpu::TextureComponentType::Float, 2});
+}
+
+// Test the RGBA32Float format
+TEST_P(TextureFormatTest, RGBA32Float) {
+    DoFloat32Test({wgpu::TextureFormat::RGBA32Float, 16, wgpu::TextureComponentType::Float, 4});
+}
+
+// Test the R16Float format
+TEST_P(TextureFormatTest, R16Float) {
+    // TODO(https://crbug.com/swiftshader/147) Rendering INFINITY isn't handled correctly by
+    // swiftshader
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsSwiftshader() || IsANGLE());
+
+    DoFloat16Test({wgpu::TextureFormat::R16Float, 2, wgpu::TextureComponentType::Float, 1});
+}
+
+// Test the RG16Float format
+TEST_P(TextureFormatTest, RG16Float) {
+    // TODO(https://crbug.com/swiftshader/147) Rendering INFINITY isn't handled correctly by
+    // swiftshader
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsSwiftshader() || IsANGLE());
+
+    DoFloat16Test({wgpu::TextureFormat::RG16Float, 4, wgpu::TextureComponentType::Float, 2});
+}
+
+// Test the RGBA16Float format
+TEST_P(TextureFormatTest, RGBA16Float) {
+    // TODO(https://crbug.com/swiftshader/147) Rendering INFINITY isn't handled correctly by
+    // swiftshader
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsSwiftshader() || IsANGLE());
+
+    DoFloat16Test({wgpu::TextureFormat::RGBA16Float, 8, wgpu::TextureComponentType::Float, 4});
+}
+
+// Test the RGBA8Unorm format
+TEST_P(TextureFormatTest, RGBA8UnormSrgb) {
+    uint8_t maxValue = std::numeric_limits<uint8_t>::max();
+    std::vector<uint8_t> textureData = {0, 1, maxValue, 64, 35, 68, 152, 168};
+
+    std::vector<float> uncompressedData;
+    for (size_t i = 0; i < textureData.size(); i += 4) {
+        uncompressedData.push_back(SRGBToLinear(textureData[i + 0] / float(maxValue)));
+        uncompressedData.push_back(SRGBToLinear(textureData[i + 1] / float(maxValue)));
+        uncompressedData.push_back(SRGBToLinear(textureData[i + 2] / float(maxValue)));
+        // Alpha is linear for sRGB formats
+        uncompressedData.push_back(textureData[i + 3] / float(maxValue));
+    }
+
+    DoFloatFormatSamplingTest(
+        {wgpu::TextureFormat::RGBA8UnormSrgb, 4, wgpu::TextureComponentType::Float, 4}, textureData,
+        uncompressedData, 1.0e-3);
+    DoFormatRenderingTest(
+        {wgpu::TextureFormat::RGBA8UnormSrgb, 4, wgpu::TextureComponentType::Float, 4},
+        uncompressedData, textureData);
+}
+
+// Test the BGRA8UnormSrgb format
+TEST_P(TextureFormatTest, BGRA8UnormSrgb) {
+    // TODO(cwallez@chromium.org): This format doesn't exist in OpenGL, emulate it using
+    // RGBA8UnormSrgb and swizzling / shader twiddling
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    uint8_t maxValue = std::numeric_limits<uint8_t>::max();
+    std::vector<uint8_t> textureData = {0, 1, maxValue, 64, 35, 68, 152, 168};
+
+    std::vector<float> uncompressedData;
+    for (size_t i = 0; i < textureData.size(); i += 4) {
+        // Note that R and B are swapped
+        uncompressedData.push_back(SRGBToLinear(textureData[i + 2] / float(maxValue)));
+        uncompressedData.push_back(SRGBToLinear(textureData[i + 1] / float(maxValue)));
+        uncompressedData.push_back(SRGBToLinear(textureData[i + 0] / float(maxValue)));
+        // Alpha is linear for sRGB formats
+        uncompressedData.push_back(textureData[i + 3] / float(maxValue));
+    }
+
+    DoFloatFormatSamplingTest(
+        {wgpu::TextureFormat::BGRA8UnormSrgb, 4, wgpu::TextureComponentType::Float, 4}, textureData,
+        uncompressedData, 1.0e-3);
+    DoFormatRenderingTest(
+        {wgpu::TextureFormat::BGRA8UnormSrgb, 4, wgpu::TextureComponentType::Float, 4},
+        uncompressedData, textureData);
+}
+
+// Test the RGB10A2Unorm format
+TEST_P(TextureFormatTest, RGB10A2Unorm) {
+    auto MakeRGB10A2 = [](uint32_t r, uint32_t g, uint32_t b, uint32_t a) -> uint32_t {
+        ASSERT((r & 0x3FF) == r);
+        ASSERT((g & 0x3FF) == g);
+        ASSERT((b & 0x3FF) == b);
+        ASSERT((a & 0x3) == a);
+        return r | g << 10 | b << 20 | a << 30;
+    };
+
+    std::vector<uint32_t> textureData = {MakeRGB10A2(0, 0, 0, 0), MakeRGB10A2(1023, 1023, 1023, 1),
+                                         MakeRGB10A2(243, 576, 765, 2), MakeRGB10A2(0, 0, 0, 3)};
+    // clang-format off
+    std::vector<float> uncompressedData = {
+       0.0f, 0.0f, 0.0f, 0.0f,
+       1.0f, 1.0f, 1.0f, 1 / 3.0f,
+        243 / 1023.0f, 576 / 1023.0f, 765 / 1023.0f, 2 / 3.0f,
+       0.0f, 0.0f, 0.0f, 1.0f
+    };
+    // clang-format on
+
+    DoFloatFormatSamplingTest(
+        {wgpu::TextureFormat::RGB10A2Unorm, 4, wgpu::TextureComponentType::Float, 4}, textureData,
+        uncompressedData, 1.0e-5);
+    DoFormatRenderingTest(
+        {wgpu::TextureFormat::RGB10A2Unorm, 4, wgpu::TextureComponentType::Float, 4},
+        uncompressedData, textureData);
+}
+
+// Test the RG11B10Ufloat format
+TEST_P(TextureFormatTest, RG11B10Ufloat) {
+    constexpr uint32_t kFloat11Zero = 0;
+    constexpr uint32_t kFloat11Infinity = 0x7C0;
+    constexpr uint32_t kFloat11Nan = 0x7C1;
+    constexpr uint32_t kFloat11One = 0x3C0;
+
+    constexpr uint32_t kFloat10Zero = 0;
+    constexpr uint32_t kFloat10Infinity = 0x3E0;
+    constexpr uint32_t kFloat10Nan = 0x3E1;
+    constexpr uint32_t kFloat10One = 0x1E0;
+
+    auto MakeRG11B10 = [](uint32_t r, uint32_t g, uint32_t b) {
+        ASSERT((r & 0x7FF) == r);
+        ASSERT((g & 0x7FF) == g);
+        ASSERT((b & 0x3FF) == b);
+        return r | g << 11 | b << 22;
+    };
+
+    // Test each of (0, 1, INFINITY, NaN) for each component but never two with the same value at a
+    // time.
+    std::vector<uint32_t> textureData = {
+        MakeRG11B10(kFloat11Zero, kFloat11Infinity, kFloat10Nan),
+        MakeRG11B10(kFloat11Infinity, kFloat11Nan, kFloat10One),
+        MakeRG11B10(kFloat11Nan, kFloat11One, kFloat10Zero),
+        MakeRG11B10(kFloat11One, kFloat11Zero, kFloat10Infinity),
+    };
+
+    // This is one of the only 3-channel formats, so we don't have specific testing for them. Alpha
+    // should always be sampled as 1
+    // clang-format off
+    std::vector<float> uncompressedData = {
+        0.0f,     INFINITY, NAN,      1.0f,
+        INFINITY, NAN,      1.0f,     1.0f,
+        NAN,      1.0f,     0.0f,     1.0f,
+        1.0f,     0.0f,     INFINITY, 1.0f
+    };
+    // clang-format on
+
+    DoFloatFormatSamplingTest(
+        {wgpu::TextureFormat::RG11B10Ufloat, 4, wgpu::TextureComponentType::Float, 4}, textureData,
+        uncompressedData);
+    // This format is not renderable.
+}
+
+// Test the RGB9E5Ufloat format
+TEST_P(TextureFormatTest, RGB9E5Ufloat) {
+    // RGB9E5 is different from other floating point formats because the mantissa doesn't index in
+    // the window defined by the exponent but is instead treated as a pure multiplier. There is
+    // also no Infinity or NaN. The OpenGL 4.6 spec has the best explanation I've found in section
+    // 8.25 "Shared Exponent Texture Color Conversion":
+    //
+    //   red = reduint * 2^(expuint - B - N) = reduint * 2^(expuint - 24)
+    //
+    // Where reduint and expuint are the integer values when considering the E5 as a 5bit uint, and
+    // the r9 as a 9bit uint. B the number of bits of the mantissa (9), and N the offset for the
+    // exponent (15).
+
+    float smallestExponent = std::pow(2.0f, -24.0f);
+    float largestExponent = std::pow(2.0f, float(31 - 24));
+
+    auto MakeRGB9E5 = [](uint32_t r, uint32_t g, uint32_t b, uint32_t e) {
+        ASSERT((r & 0x1FF) == r);
+        ASSERT((g & 0x1FF) == g);
+        ASSERT((b & 0x1FF) == b);
+        ASSERT((e & 0x1F) == e);
+        return r | g << 9 | b << 18 | e << 27;
+    };
+
+    // Test the smallest largest, and "1" exponents
+    std::vector<uint32_t> textureData = {
+        MakeRGB9E5(0, 1, 2, 0b00000),
+        MakeRGB9E5(2, 1, 0, 0b11111),
+        MakeRGB9E5(0, 1, 2, 0b11000),
+    };
+
+    // This is one of the only 3-channel formats, so we don't have specific testing for them. Alpha
+    // should always be sampled as 1
+    // clang-format off
+    std::vector<float> uncompressedData = {
+        0.0f, smallestExponent, 2.0f * smallestExponent, 1.0f,
+        2.0f * largestExponent, largestExponent, 0.0f, 1.0f,
+        0.0f, 1.0f, 2.0f, 1.0f,
+    };
+    // clang-format on
+
+    DoFloatFormatSamplingTest(
+        {wgpu::TextureFormat::RGB9E5Ufloat, 4, wgpu::TextureComponentType::Float, 4}, textureData,
+        uncompressedData);
+    // This format is not renderable.
+}
+
+DAWN_INSTANTIATE_TEST(TextureFormatTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/TextureSubresourceTests.cpp b/src/dawn/tests/end2end/TextureSubresourceTests.cpp
new file mode 100644
index 0000000..fac0885
--- /dev/null
+++ b/src/dawn/tests/end2end/TextureSubresourceTests.cpp
@@ -0,0 +1,209 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class TextureSubresourceTest : public DawnTest {
+  public:
+    static constexpr uint32_t kSize = 4u;
+    static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::Texture CreateTexture(uint32_t mipLevelCount,
+                                uint32_t arrayLayerCount,
+                                wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.dimension = wgpu::TextureDimension::e2D;
+        texDesc.size = {kSize, kSize, arrayLayerCount};
+        texDesc.sampleCount = 1;
+        texDesc.mipLevelCount = mipLevelCount;
+        texDesc.usage = usage;
+        texDesc.format = kFormat;
+        return device.CreateTexture(&texDesc);
+    }
+
+    wgpu::TextureView CreateTextureView(wgpu::Texture texture,
+                                        uint32_t baseMipLevel,
+                                        uint32_t baseArrayLayer) {
+        wgpu::TextureViewDescriptor viewDesc;
+        viewDesc.format = kFormat;
+        viewDesc.baseArrayLayer = baseArrayLayer;
+        viewDesc.arrayLayerCount = 1;
+        viewDesc.baseMipLevel = baseMipLevel;
+        viewDesc.mipLevelCount = 1;
+        viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+        return texture.CreateView(&viewDesc);
+    }
+
+    void DrawTriangle(const wgpu::TextureView& view) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 1.0, -1.0));
+
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.cTargets[0].format = kFormat;
+
+        wgpu::RenderPipeline rp = device.CreateRenderPipeline(&descriptor);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        utils::ComboRenderPassDescriptor renderPassDesc({view});
+        renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 0.0f, 0.0f, 1.0f};
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+        pass.SetPipeline(rp);
+        pass.Draw(3);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    void SampleAndDraw(const wgpu::TextureView& samplerView, const wgpu::TextureView& renderView) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 1.0, -1.0),
+                    vec2<f32>( 1.0,  1.0));
+
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var samp : sampler;
+            @group(0) @binding(1) var tex : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@builtin(position) FragCoord : vec4<f32>) -> @location(0) vec4<f32> {
+                return textureSample(tex, samp, FragCoord.xy / vec2<f32>(4.0, 4.0));
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.cTargets[0].format = kFormat;
+
+        wgpu::Sampler sampler = device.CreateSampler();
+
+        wgpu::RenderPipeline rp = device.CreateRenderPipeline(&descriptor);
+        wgpu::BindGroupLayout bgl = rp.GetBindGroupLayout(0);
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, bgl, {{0, sampler}, {1, samplerView}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        utils::ComboRenderPassDescriptor renderPassDesc({renderView});
+        renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 0.0f, 0.0f, 1.0f};
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+        pass.SetPipeline(rp);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Draw(6);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+};
+
+// Test different mipmap levels
+TEST_P(TextureSubresourceTest, MipmapLevelsTest) {
+    // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // Create a texture with 2 mipmap levels and 1 layer
+    wgpu::Texture texture =
+        CreateTexture(2, 1,
+                      wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                          wgpu::TextureUsage::CopySrc);
+
+    // Create two views on different mipmap levels.
+    wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+    wgpu::TextureView renderView = CreateTextureView(texture, 1, 0);
+
+    // Draw a red triangle at the bottom-left half
+    DrawTriangle(samplerView);
+
+    // Sample from one subresource and draw into another subresource in the same texture
+    SampleAndDraw(samplerView, renderView);
+
+    // Verify that pixel at bottom-left corner is red, while pixel at top-right corner is background
+    // black in render view (mip level 1).
+    RGBA8 topRight = RGBA8::kBlack;
+    RGBA8 bottomLeft = RGBA8::kRed;
+    EXPECT_TEXTURE_EQ(&topRight, texture, {kSize / 2 - 1, 0}, {1, 1}, 1);
+    EXPECT_TEXTURE_EQ(&bottomLeft, texture, {0, kSize / 2 - 1}, {1, 1}, 1);
+}
+
+// Test different array layers
+TEST_P(TextureSubresourceTest, ArrayLayersTest) {
+    // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+    // Create a texture with 1 mipmap level and 2 layers
+    wgpu::Texture texture =
+        CreateTexture(1, 2,
+                      wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                          wgpu::TextureUsage::CopySrc);
+
+    // Create two views on different layers
+    wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+    wgpu::TextureView renderView = CreateTextureView(texture, 0, 1);
+
+    // Draw a red triangle at the bottom-left half
+    DrawTriangle(samplerView);
+
+    // Sample from one subresource and draw into another subresource in the same texture
+    SampleAndDraw(samplerView, renderView);
+
+    // Verify that pixel at bottom-left corner is red, while pixel at top-right corner is background
+    // black in render view (array layer 1).
+    RGBA8 topRight = RGBA8::kBlack;
+    RGBA8 bottomLeft = RGBA8::kRed;
+    EXPECT_TEXTURE_EQ(&topRight, texture, {kSize - 1, 0, 1}, {1, 1});
+    EXPECT_TEXTURE_EQ(&bottomLeft, texture, {0, kSize - 1, 1}, {1, 1});
+}
+
+// TODO (yunchao.he@intel.com):
+// * add tests for storage texture and sampler across miplevel or
+// arraylayer dimensions in the same texture
+//
+// * add tests for copy operation upon texture subresource if needed
+//
+// * add tests for clear operation upon texture subresource if needed
+
+DAWN_INSTANTIATE_TEST(TextureSubresourceTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/TextureViewTests.cpp b/src/dawn/tests/end2end/TextureViewTests.cpp
new file mode 100644
index 0000000..9c2a868
--- /dev/null
+++ b/src/dawn/tests/end2end/TextureViewTests.cpp
@@ -0,0 +1,909 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <array>
+
+constexpr static unsigned int kRTSize = 64;
+constexpr wgpu::TextureFormat kDefaultFormat = wgpu::TextureFormat::RGBA8Unorm;
+constexpr uint32_t kBytesPerTexel = 4;
+
+namespace {
+    wgpu::Texture Create2DTexture(wgpu::Device device,
+                                  uint32_t width,
+                                  uint32_t height,
+                                  uint32_t arrayLayerCount,
+                                  uint32_t mipLevelCount,
+                                  wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = arrayLayerCount;
+        descriptor.sampleCount = 1;
+        descriptor.format = kDefaultFormat;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::Texture Create3DTexture(wgpu::Device device,
+                                  wgpu::Extent3D size,
+                                  uint32_t mipLevelCount,
+                                  wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        descriptor.size = size;
+        descriptor.sampleCount = 1;
+        descriptor.format = kDefaultFormat;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::ShaderModule CreateDefaultVertexShaderModule(wgpu::Device device) {
+        return utils::CreateShaderModule(device, R"(
+            struct VertexOut {
+                @location(0) texCoord : vec2<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+                var output : VertexOut;
+                var pos = array<vec2<f32>, 6>(
+                                            vec2<f32>(-2., -2.),
+                                            vec2<f32>(-2.,  2.),
+                                            vec2<f32>( 2., -2.),
+                                            vec2<f32>(-2.,  2.),
+                                            vec2<f32>( 2., -2.),
+                                            vec2<f32>( 2.,  2.));
+                var texCoord = array<vec2<f32>, 6>(
+                                                 vec2<f32>(0., 0.),
+                                                 vec2<f32>(0., 1.),
+                                                 vec2<f32>(1., 0.),
+                                                 vec2<f32>(0., 1.),
+                                                 vec2<f32>(1., 0.),
+                                                 vec2<f32>(1., 1.));
+                output.position = vec4<f32>(pos[VertexIndex], 0., 1.);
+                output.texCoord = texCoord[VertexIndex];
+                return output;
+            }
+        )");
+    }
+}  // anonymous namespace
+
+class TextureViewSamplingTest : public DawnTest {
+  protected:
+    // Generates an arbitrary pixel value per-layer-per-level, used for the "actual" uploaded
+    // textures and the "expected" results.
+    static int GenerateTestPixelValue(uint32_t layer, uint32_t level) {
+        return static_cast<int>(level * 10) + static_cast<int>(layer + 1);
+    }
+
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        mRenderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+        wgpu::FilterMode kFilterMode = wgpu::FilterMode::Nearest;
+        wgpu::AddressMode kAddressMode = wgpu::AddressMode::ClampToEdge;
+
+        wgpu::SamplerDescriptor samplerDescriptor = {};
+        samplerDescriptor.minFilter = kFilterMode;
+        samplerDescriptor.magFilter = kFilterMode;
+        samplerDescriptor.mipmapFilter = kFilterMode;
+        samplerDescriptor.addressModeU = kAddressMode;
+        samplerDescriptor.addressModeV = kAddressMode;
+        samplerDescriptor.addressModeW = kAddressMode;
+        mSampler = device.CreateSampler(&samplerDescriptor);
+
+        mVSModule = CreateDefaultVertexShaderModule(device);
+    }
+
+    void InitTexture(uint32_t arrayLayerCount, uint32_t mipLevelCount) {
+        ASSERT(arrayLayerCount > 0 && mipLevelCount > 0);
+
+        const uint32_t textureWidthLevel0 = 1 << mipLevelCount;
+        const uint32_t textureHeightLevel0 = 1 << mipLevelCount;
+        constexpr wgpu::TextureUsage kUsage =
+            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+        mTexture = Create2DTexture(device, textureWidthLevel0, textureHeightLevel0, arrayLayerCount,
+                                   mipLevelCount, kUsage);
+
+        mDefaultTextureViewDescriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        mDefaultTextureViewDescriptor.format = kDefaultFormat;
+        mDefaultTextureViewDescriptor.baseMipLevel = 0;
+        mDefaultTextureViewDescriptor.mipLevelCount = mipLevelCount;
+        mDefaultTextureViewDescriptor.baseArrayLayer = 0;
+        mDefaultTextureViewDescriptor.arrayLayerCount = arrayLayerCount;
+
+        // Create a texture with pixel = (0, 0, 0, level * 10 + layer + 1) at level `level` and
+        // layer `layer`.
+        static_assert((kTextureBytesPerRowAlignment % sizeof(RGBA8)) == 0,
+                      "Texture bytes per row alignment must be a multiple of sizeof(RGBA8).");
+        constexpr uint32_t kPixelsPerRowPitch = kTextureBytesPerRowAlignment / sizeof(RGBA8);
+        ASSERT_LE(textureWidthLevel0, kPixelsPerRowPitch);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        for (uint32_t layer = 0; layer < arrayLayerCount; ++layer) {
+            for (uint32_t level = 0; level < mipLevelCount; ++level) {
+                const uint32_t texWidth = textureWidthLevel0 >> level;
+                const uint32_t texHeight = textureHeightLevel0 >> level;
+
+                const int pixelValue = GenerateTestPixelValue(layer, level);
+
+                constexpr uint32_t kPaddedTexWidth = kPixelsPerRowPitch;
+                std::vector<RGBA8> data(kPaddedTexWidth * texHeight, RGBA8(0, 0, 0, pixelValue));
+                wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+                    device, data.data(), data.size() * sizeof(RGBA8), wgpu::BufferUsage::CopySrc);
+                wgpu::ImageCopyBuffer imageCopyBuffer =
+                    utils::CreateImageCopyBuffer(stagingBuffer, 0, kTextureBytesPerRowAlignment);
+                wgpu::ImageCopyTexture imageCopyTexture =
+                    utils::CreateImageCopyTexture(mTexture, level, {0, 0, layer});
+                wgpu::Extent3D copySize = {texWidth, texHeight, 1};
+                encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+            }
+        }
+        wgpu::CommandBuffer copy = encoder.Finish();
+        queue.Submit(1, &copy);
+    }
+
+    void Verify(const wgpu::TextureView& textureView, const char* fragmentShader, int expected) {
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentShader);
+
+        utils::ComboRenderPipelineDescriptor textureDescriptor;
+        textureDescriptor.vertex.module = mVSModule;
+        textureDescriptor.cFragment.module = fsModule;
+        textureDescriptor.cTargets[0].format = mRenderPass.colorFormat;
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&textureDescriptor);
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                         {{0, mSampler}, {1, textureView}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&mRenderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        RGBA8 expectedPixel(0, 0, 0, expected);
+        EXPECT_PIXEL_RGBA8_EQ(expectedPixel, mRenderPass.color, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(expectedPixel, mRenderPass.color, mRenderPass.width - 1,
+                              mRenderPass.height - 1);
+        // TODO(jiawei.shao@intel.com): add tests for 3D textures once Dawn supports 3D textures
+    }
+
+    void Texture2DViewTest(uint32_t textureArrayLayers,
+                           uint32_t textureMipLevels,
+                           uint32_t textureViewBaseLayer,
+                           uint32_t textureViewBaseMipLevel) {
+        // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+        DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+        ASSERT(textureViewBaseLayer < textureArrayLayers);
+        ASSERT(textureViewBaseMipLevel < textureMipLevels);
+
+        InitTexture(textureArrayLayers, textureMipLevels);
+
+        wgpu::TextureViewDescriptor descriptor = mDefaultTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.baseArrayLayer = textureViewBaseLayer;
+        descriptor.arrayLayerCount = 1;
+        descriptor.baseMipLevel = textureViewBaseMipLevel;
+        descriptor.mipLevelCount = 1;
+        wgpu::TextureView textureView = mTexture.CreateView(&descriptor);
+
+        const char* fragmentShader = R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture0 : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+                return textureSample(texture0, sampler0, texCoord);
+            }
+        )";
+
+        const int expected = GenerateTestPixelValue(textureViewBaseLayer, textureViewBaseMipLevel);
+        Verify(textureView, fragmentShader, expected);
+    }
+
+    void Texture2DArrayViewTest(uint32_t textureArrayLayers,
+                                uint32_t textureMipLevels,
+                                uint32_t textureViewBaseLayer,
+                                uint32_t textureViewBaseMipLevel) {
+        // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+        DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+        ASSERT(textureViewBaseLayer < textureArrayLayers);
+        ASSERT(textureViewBaseMipLevel < textureMipLevels);
+
+        // We always set the layer count of the texture view to be 3 to match the fragment shader in
+        // this test.
+        constexpr uint32_t kTextureViewLayerCount = 3;
+        ASSERT(textureArrayLayers >= textureViewBaseLayer + kTextureViewLayerCount);
+
+        InitTexture(textureArrayLayers, textureMipLevels);
+
+        wgpu::TextureViewDescriptor descriptor = mDefaultTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        descriptor.baseArrayLayer = textureViewBaseLayer;
+        descriptor.arrayLayerCount = kTextureViewLayerCount;
+        descriptor.baseMipLevel = textureViewBaseMipLevel;
+        descriptor.mipLevelCount = 1;
+        wgpu::TextureView textureView = mTexture.CreateView(&descriptor);
+
+        const char* fragmentShader = R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture0 : texture_2d_array<f32>;
+
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+                return textureSample(texture0, sampler0, texCoord, 0) +
+                       textureSample(texture0, sampler0, texCoord, 1) +
+                       textureSample(texture0, sampler0, texCoord, 2);
+            }
+        )";
+
+        int expected = 0;
+        for (int i = 0; i < static_cast<int>(kTextureViewLayerCount); ++i) {
+            expected += GenerateTestPixelValue(textureViewBaseLayer + i, textureViewBaseMipLevel);
+        }
+        Verify(textureView, fragmentShader, expected);
+    }
+
+    std::string CreateFragmentShaderForCubeMapFace(uint32_t layer, bool isCubeMapArray) {
+        // Reference: https://en.wikipedia.org/wiki/Cube_mapping
+        const std::array<std::string, 6> kCoordsToCubeMapFace = {{
+            " 1.,  tc, -sc",  // Positive X
+            "-1.,  tc,  sc",  // Negative X
+            " sc,  1., -tc",  // Positive Y
+            " sc, -1.,  tc",  // Negative Y
+            " sc,  tc,  1.",  // Positive Z
+            "-sc,  tc, -1.",  // Negative Z
+        }};
+
+        const std::string textureType = isCubeMapArray ? "texture_cube_array" : "texture_cube";
+        const uint32_t cubeMapArrayIndex = layer / 6;
+        const std::string coordToCubeMapFace = kCoordsToCubeMapFace[layer % 6];
+
+        std::ostringstream stream;
+        stream << R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture0 : )"
+               << textureType << R"(<f32>;
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+                var sc : f32 = 2.0 * texCoord.x - 1.0;
+                var tc : f32 = 2.0 * texCoord.y - 1.0;
+                return textureSample(texture0, sampler0, vec3<f32>()"
+               << coordToCubeMapFace << ")";
+
+        if (isCubeMapArray) {
+            stream << ", " << cubeMapArrayIndex;
+        }
+
+        stream << R"();
+            })";
+
+        return stream.str();
+    }
+
+    void TextureCubeMapTest(uint32_t textureArrayLayers,
+                            uint32_t textureViewBaseLayer,
+                            uint32_t textureViewLayerCount,
+                            bool isCubeMapArray) {
+        // TODO(crbug.com/dawn/600): In OpenGL ES, cube map textures cannot be treated as arrays
+        // of 2D textures. Find a workaround.
+        DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+        constexpr uint32_t kMipLevels = 1u;
+        InitTexture(textureArrayLayers, kMipLevels);
+
+        ASSERT_TRUE((textureViewLayerCount == 6) ||
+                    (isCubeMapArray && textureViewLayerCount % 6 == 0));
+        wgpu::TextureViewDimension dimension = (isCubeMapArray)
+                                                   ? wgpu::TextureViewDimension::CubeArray
+                                                   : wgpu::TextureViewDimension::Cube;
+
+        wgpu::TextureViewDescriptor descriptor = mDefaultTextureViewDescriptor;
+        descriptor.dimension = dimension;
+        descriptor.baseArrayLayer = textureViewBaseLayer;
+        descriptor.arrayLayerCount = textureViewLayerCount;
+
+        wgpu::TextureView cubeMapTextureView = mTexture.CreateView(&descriptor);
+
+        // Check the data in the every face of the cube map (array) texture view.
+        for (uint32_t layer = 0; layer < textureViewLayerCount; ++layer) {
+            const std::string& fragmentShader =
+                CreateFragmentShaderForCubeMapFace(layer, isCubeMapArray);
+
+            int expected = GenerateTestPixelValue(textureViewBaseLayer + layer, 0);
+            Verify(cubeMapTextureView, fragmentShader.c_str(), expected);
+        }
+    }
+
+    wgpu::Sampler mSampler;
+    wgpu::Texture mTexture;
+    wgpu::TextureViewDescriptor mDefaultTextureViewDescriptor;
+    wgpu::ShaderModule mVSModule;
+    utils::BasicRenderPass mRenderPass;
+};
+
+// Test drawing a rect with a 2D array texture.
+TEST_P(TextureViewSamplingTest, Default2DArrayTexture) {
+    // TODO(cwallez@chromium.org) understand what the issue is
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    constexpr uint32_t kLayers = 3;
+    constexpr uint32_t kMipLevels = 1;
+    InitTexture(kLayers, kMipLevels);
+
+    wgpu::TextureViewDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+    wgpu::TextureView textureView = mTexture.CreateView(&descriptor);
+
+    const char* fragmentShader = R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture0 : texture_2d_array<f32>;
+
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+                return textureSample(texture0, sampler0, texCoord, 0) +
+                       textureSample(texture0, sampler0, texCoord, 1) +
+                       textureSample(texture0, sampler0, texCoord, 2);
+            }
+        )";
+
+    const int expected =
+        GenerateTestPixelValue(0, 0) + GenerateTestPixelValue(1, 0) + GenerateTestPixelValue(2, 0);
+    Verify(textureView, fragmentShader, expected);
+}
+
+// Test sampling from a 2D texture view created on a 2D array texture.
+TEST_P(TextureViewSamplingTest, Texture2DViewOn2DArrayTexture) {
+    Texture2DViewTest(6, 1, 4, 0);
+}
+
+// Test sampling from a 2D array texture view created on a 2D array texture.
+TEST_P(TextureViewSamplingTest, Texture2DArrayViewOn2DArrayTexture) {
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+    Texture2DArrayViewTest(6, 1, 2, 0);
+}
+
+// Test sampling from a 2D array texture view created on a 2D texture with one layer.
+// Regression test for crbug.com/dawn/1309.
+TEST_P(TextureViewSamplingTest, Texture2DArrayViewOnSingleLayer2DTexture) {
+    // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+    InitTexture(1 /* array layer count */, 1 /* mip level count */);
+
+    wgpu::TextureViewDescriptor descriptor = mDefaultTextureViewDescriptor;
+    descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+    descriptor.baseArrayLayer = 0;
+    descriptor.arrayLayerCount = 1;
+    descriptor.baseMipLevel = 0;
+    descriptor.mipLevelCount = 1;
+    wgpu::TextureView textureView = mTexture.CreateView(&descriptor);
+
+    const char* fragmentShader = R"(
+        @group(0) @binding(0) var sampler0 : sampler;
+        @group(0) @binding(1) var texture0 : texture_2d_array<f32>;
+
+        @stage(fragment)
+        fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+            return textureSample(texture0, sampler0, texCoord, 0);
+        }
+    )";
+
+    int expected = GenerateTestPixelValue(0, 0);
+    Verify(textureView, fragmentShader, expected);
+}
+
+// Test sampling from a 2D texture view created on a mipmap level of a 2D texture.
+TEST_P(TextureViewSamplingTest, Texture2DViewOnOneLevelOf2DTexture) {
+    Texture2DViewTest(1, 6, 0, 4);
+}
+
+// Test sampling from a 2D texture view created on a mipmap level of a 2D array texture layer.
+TEST_P(TextureViewSamplingTest, Texture2DViewOnOneLevelOf2DArrayTexture) {
+    Texture2DViewTest(6, 6, 3, 4);
+}
+
+// Test sampling from a 2D array texture view created on a mipmap level of a 2D array texture.
+TEST_P(TextureViewSamplingTest, Texture2DArrayViewOnOneLevelOf2DArrayTexture) {
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+    Texture2DArrayViewTest(6, 6, 2, 4);
+}
+
+// Test that an RGBA8 texture may be interpreted as RGBA8UnormSrgb
+// More extensive color value checks and format tests are left for the CTS.
+TEST_P(TextureViewSamplingTest, SRGBReinterpretation) {
+    // TODO(crbug.com/dawn/593): This test requires glTextureView, which is unsupported on GLES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size = {2, 2, 1};
+    textureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDesc.viewFormats = &viewDesc.format;
+    textureDesc.viewFormatCount = 1;
+    wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+    wgpu::ImageCopyTexture dst = {};
+    dst.texture = texture;
+    std::array<RGBA8, 4> rgbaTextureData = {
+        RGBA8(180, 0, 0, 255),
+        RGBA8(0, 84, 0, 127),
+        RGBA8(0, 0, 62, 100),
+        RGBA8(62, 180, 84, 90),
+    };
+
+    wgpu::TextureDataLayout dataLayout = {};
+    dataLayout.bytesPerRow = textureDesc.size.width * sizeof(RGBA8);
+
+    queue.WriteTexture(&dst, rgbaTextureData.data(), rgbaTextureData.size() * sizeof(RGBA8),
+                       &dataLayout, &textureDesc.size);
+
+    wgpu::TextureView textureView = texture.CreateView(&viewDesc);
+
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec2<f32>, 6>(
+                                        vec2<f32>(-1.0, -1.0),
+                                        vec2<f32>(-1.0,  1.0),
+                                        vec2<f32>( 1.0, -1.0),
+                                        vec2<f32>(-1.0,  1.0),
+                                        vec2<f32>( 1.0, -1.0),
+                                        vec2<f32>( 1.0,  1.0));
+            return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+        }
+    )");
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var texture : texture_2d<f32>;
+
+        @stage(fragment)
+        fn main(@builtin(position) coord: vec4<f32>) -> @location(0) vec4<f32> {
+            return textureLoad(texture, vec2<i32>(coord.xy), 0);
+        }
+    )");
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
+        device, textureDesc.size.width, textureDesc.size.height, wgpu::TextureFormat::RGBA8Unorm);
+    pipelineDesc.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, textureView}});
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(116, 0, 0, 255),   //
+        RGBA8(117, 0, 0, 255), renderPass.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(0, 23, 0, 127),    //
+        RGBA8(0, 24, 0, 127), renderPass.color, 1, 0);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(0, 0, 12, 100),    //
+        RGBA8(0, 0, 13, 100), renderPass.color, 0, 1);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(12, 116, 23, 90),  //
+        RGBA8(13, 117, 24, 90), renderPass.color, 1, 1);
+}
+
+// Test sampling from a cube map texture view that covers a whole 2D array texture.
+TEST_P(TextureViewSamplingTest, TextureCubeMapOnWholeTexture) {
+    constexpr uint32_t kTotalLayers = 6;
+    TextureCubeMapTest(kTotalLayers, 0, kTotalLayers, false);
+}
+
+// Test sampling from a cube map texture view that covers a sub part of a 2D array texture.
+TEST_P(TextureViewSamplingTest, TextureCubeMapViewOnPartOfTexture) {
+    TextureCubeMapTest(10, 2, 6, false);
+}
+
+// Test sampling from a cube map texture view that covers the last layer of a 2D array texture.
+TEST_P(TextureViewSamplingTest, TextureCubeMapViewCoveringLastLayer) {
+    constexpr uint32_t kTotalLayers = 10;
+    constexpr uint32_t kBaseLayer = 4;
+    TextureCubeMapTest(kTotalLayers, kBaseLayer, kTotalLayers - kBaseLayer, false);
+}
+
+// Test sampling from a cube map texture array view that covers a whole 2D array texture.
+TEST_P(TextureViewSamplingTest, TextureCubeMapArrayOnWholeTexture) {
+    constexpr uint32_t kTotalLayers = 12;
+    TextureCubeMapTest(kTotalLayers, 0, kTotalLayers, true);
+}
+
+// Test sampling from a cube map texture array view that covers a sub part of a 2D array texture.
+TEST_P(TextureViewSamplingTest, TextureCubeMapArrayViewOnPartOfTexture) {
+    // Test failing on the GPU FYI Mac Pro (AMD), see
+    // https://bugs.chromium.org/p/dawn/issues/detail?id=58
+    DAWN_SUPPRESS_TEST_IF(IsMacOS() && IsMetal() && IsAMD());
+
+    TextureCubeMapTest(20, 3, 12, true);
+}
+
+// Test sampling from a cube map texture array view that covers the last layer of a 2D array
+// texture.
+TEST_P(TextureViewSamplingTest, TextureCubeMapArrayViewCoveringLastLayer) {
+    // Test failing on the GPU FYI Mac Pro (AMD), see
+    // https://bugs.chromium.org/p/dawn/issues/detail?id=58
+    DAWN_SUPPRESS_TEST_IF(IsMacOS() && IsMetal() && IsAMD());
+
+    constexpr uint32_t kTotalLayers = 20;
+    constexpr uint32_t kBaseLayer = 8;
+    TextureCubeMapTest(kTotalLayers, kBaseLayer, kTotalLayers - kBaseLayer, true);
+}
+
+// Test sampling from a cube map array texture view that only has a single cube map.
+TEST_P(TextureViewSamplingTest, TextureCubeMapArrayViewSingleCubeMap) {
+    // Test failing on the GPU FYI Mac Pro (AMD), see
+    // https://bugs.chromium.org/p/dawn/issues/detail?id=58
+    DAWN_SUPPRESS_TEST_IF(IsMacOS() && IsMetal() && IsAMD());
+
+    TextureCubeMapTest(20, 7, 6, true);
+}
+
+class TextureViewRenderingTest : public DawnTest {
+  protected:
+    void TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension dimension,
+                                           uint32_t layerCount,
+                                           uint32_t levelCount,
+                                           uint32_t textureViewBaseLayer,
+                                           uint32_t textureViewBaseLevel,
+                                           uint32_t textureWidthLevel0,
+                                           uint32_t textureHeightLevel0) {
+        ASSERT(dimension == wgpu::TextureViewDimension::e2D ||
+               dimension == wgpu::TextureViewDimension::e2DArray);
+        ASSERT_LT(textureViewBaseLayer, layerCount);
+        ASSERT_LT(textureViewBaseLevel, levelCount);
+
+        constexpr wgpu::TextureUsage kUsage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture texture = Create2DTexture(device, textureWidthLevel0, textureHeightLevel0,
+                                                layerCount, levelCount, kUsage);
+
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = kDefaultFormat;
+        descriptor.dimension = dimension;
+        descriptor.baseArrayLayer = textureViewBaseLayer;
+        descriptor.arrayLayerCount = 1;
+        descriptor.baseMipLevel = textureViewBaseLevel;
+        descriptor.mipLevelCount = 1;
+        wgpu::TextureView textureView = texture.CreateView(&descriptor);
+
+        wgpu::ShaderModule vsModule = CreateDefaultVertexShaderModule(device);
+
+        // Clear textureView with Red(255, 0, 0, 255) and render Green(0, 255, 0, 255) into it
+        utils::ComboRenderPassDescriptor renderPassInfo({textureView});
+        renderPassInfo.cColorAttachments[0].clearValue = {1.0f, 0.0f, 0.0f, 1.0f};
+
+        const char* oneColorFragmentShader = R"(
+            @stage(fragment) fn main(@location(0) texCoord : vec2<f32>) ->
+                @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            }
+        )";
+        wgpu::ShaderModule oneColorFsModule =
+            utils::CreateShaderModule(device, oneColorFragmentShader);
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = oneColorFsModule;
+        pipelineDescriptor.cTargets[0].format = kDefaultFormat;
+
+        wgpu::RenderPipeline oneColorPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassInfo);
+            pass.SetPipeline(oneColorPipeline);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Check if the right pixels (Green) have been written into the right part of the texture.
+        uint32_t textureViewWidth = std::max(1u, textureWidthLevel0 >> textureViewBaseLevel);
+        uint32_t textureViewHeight = std::max(1u, textureHeightLevel0 >> textureViewBaseLevel);
+        uint32_t bytesPerRow =
+            Align(kBytesPerTexel * textureWidthLevel0, kTextureBytesPerRowAlignment);
+        uint32_t expectedDataSize =
+            bytesPerRow / kBytesPerTexel * (textureWidthLevel0 - 1) + textureHeightLevel0;
+        constexpr RGBA8 kExpectedPixel(0, 255, 0, 255);
+        std::vector<RGBA8> expected(expectedDataSize, kExpectedPixel);
+        EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, textureViewBaseLayer},
+                          {textureViewWidth, textureViewHeight}, textureViewBaseLevel);
+    }
+};
+
+// Test rendering into a 2D texture view created on a mipmap level of a 2D texture.
+TEST_P(TextureViewRenderingTest, Texture2DViewOnALevelOf2DTextureAsColorAttachment) {
+    constexpr uint32_t kLayers = 1;
+    constexpr uint32_t kMipLevels = 4;
+    constexpr uint32_t kBaseLayer = 0;
+
+    // Rendering into the first level
+    {
+        constexpr uint32_t kBaseLevel = 0;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+
+    // Rendering into the last level
+    {
+        constexpr uint32_t kBaseLevel = kMipLevels - 1;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+}
+
+// Test rendering into a 2D texture view created on a mipmap level of a rectangular 2D texture.
+TEST_P(TextureViewRenderingTest, Texture2DViewOnALevelOfRectangular2DTextureAsColorAttachment) {
+    constexpr uint32_t kLayers = 1;
+    constexpr uint32_t kMipLevels = 4;
+    constexpr uint32_t kBaseLayer = 0;
+
+    // Rendering into the first level
+    {
+        constexpr uint32_t kBaseLevel = 0;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels,
+                                          1 << (kMipLevels - 2));
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << (kMipLevels - 2),
+                                          1 << kMipLevels);
+    }
+
+    // Rendering into the last level
+    {
+        constexpr uint32_t kBaseLevel = kMipLevels - 1;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels,
+                                          1 << (kMipLevels - 2));
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << (kMipLevels - 2),
+                                          1 << kMipLevels);
+    }
+}
+
+// Test rendering into a 2D texture view created on a layer of a 2D array texture.
+TEST_P(TextureViewRenderingTest, Texture2DViewOnALayerOf2DArrayTextureAsColorAttachment) {
+    constexpr uint32_t kMipLevels = 1;
+    constexpr uint32_t kBaseLevel = 0;
+    constexpr uint32_t kLayers = 10;
+
+    // Rendering into the first layer
+    {
+        constexpr uint32_t kBaseLayer = 0;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+
+    // Rendering into the last layer
+    {
+        constexpr uint32_t kBaseLayer = kLayers - 1;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2D, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+}
+
+// Test rendering into a 1-layer 2D array texture view created on a mipmap level of a 2D texture.
+TEST_P(TextureViewRenderingTest, Texture2DArrayViewOnALevelOf2DTextureAsColorAttachment) {
+    constexpr uint32_t kLayers = 1;
+    constexpr uint32_t kMipLevels = 4;
+    constexpr uint32_t kBaseLayer = 0;
+
+    // Rendering into the first level
+    {
+        constexpr uint32_t kBaseLevel = 0;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2DArray, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+
+    // Rendering into the last level
+    {
+        constexpr uint32_t kBaseLevel = kMipLevels - 1;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2DArray, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+}
+
+// Test rendering into a 1-layer 2D array texture view created on a layer of a 2D array texture.
+TEST_P(TextureViewRenderingTest, Texture2DArrayViewOnALayerOf2DArrayTextureAsColorAttachment) {
+    constexpr uint32_t kMipLevels = 1;
+    constexpr uint32_t kBaseLevel = 0;
+    constexpr uint32_t kLayers = 10;
+
+    // Rendering into the first layer
+    {
+        constexpr uint32_t kBaseLayer = 0;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2DArray, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+
+    // Rendering into the last layer
+    {
+        constexpr uint32_t kBaseLayer = kLayers - 1;
+        TextureLayerAsColorAttachmentTest(wgpu::TextureViewDimension::e2DArray, kLayers, kMipLevels,
+                                          kBaseLayer, kBaseLevel, 1 << kMipLevels, 1 << kMipLevels);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(TextureViewSamplingTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+DAWN_INSTANTIATE_TEST(TextureViewRenderingTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class TextureViewTest : public DawnTest {};
+
+// This is a regression test for crbug.com/dawn/399 where creating a texture view with only copy
+// usage would cause the Vulkan validation layers to warn
+TEST_P(TextureViewTest, OnlyCopySrcDst) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {4, 4, 1};
+    descriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView view = texture.CreateView();
+}
+
+// Test that a texture view can be created from a destroyed texture without
+// backend errors.
+TEST_P(TextureViewTest, DestroyedTexture) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {4, 4, 2};
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    texture.Destroy();
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.baseArrayLayer = 1;
+    viewDesc.arrayLayerCount = 1;
+    wgpu::TextureView view = texture.CreateView(&viewDesc);
+}
+
+DAWN_INSTANTIATE_TEST(TextureViewTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class TextureView3DTest : public DawnTest {};
+
+// Test that 3D textures and 3D texture views can be created successfully
+TEST_P(TextureView3DTest, BasicTest) {
+    wgpu::Texture texture =
+        Create3DTexture(device, {4, 4, 4}, 3, wgpu::TextureUsage::TextureBinding);
+    wgpu::TextureView view = texture.CreateView();
+}
+
+DAWN_INSTANTIATE_TEST(TextureView3DTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+class TextureView1DTest : public DawnTest {};
+
+// Test that it is possible to create a 1D texture view and sample from it.
+TEST_P(TextureView1DTest, Sampling) {
+    // Create a 1D texture and fill it with some data.
+    wgpu::TextureDescriptor texDesc;
+    texDesc.dimension = wgpu::TextureDimension::e1D;
+    texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    texDesc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst;
+    texDesc.size = {4, 1, 1};
+    wgpu::Texture tex = device.CreateTexture(&texDesc);
+
+    std::array<RGBA8, 4> data = {RGBA8::kGreen, RGBA8::kRed, RGBA8::kBlue, RGBA8::kWhite};
+    wgpu::ImageCopyTexture target = utils::CreateImageCopyTexture(tex, 0, {});
+    wgpu::TextureDataLayout layout = utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
+    queue.WriteTexture(&target, &data, sizeof(data), &layout, &texDesc.size);
+
+    // Create a pipeline that will sample from the 1D texture and output to an attachment.
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(vertex)
+        fn vs(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            var pos = array<vec4<f32>, 3>(
+                vec4<f32>( 0.,  2., 0., 1.),
+                vec4<f32>(-3., -1., 0., 1.),
+                vec4<f32>( 3., -1., 0., 1.));
+            return pos[VertexIndex];
+        }
+
+        @group(0) @binding(0) var tex : texture_1d<f32>;
+        @group(0) @binding(1) var samp : sampler;
+        @stage(fragment)
+        fn fs(@builtin(position) pos: vec4<f32>) -> @location(0) vec4<f32> {
+            return textureSample(tex, samp, pos.x / 4.0);
+        }
+    )");
+    utils::ComboRenderPipelineDescriptor pDesc;
+    pDesc.vertex.module = module;
+    pDesc.vertex.entryPoint = "vs";
+    pDesc.cFragment.module = module;
+    pDesc.cFragment.entryPoint = "fs";
+    pDesc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pDesc);
+
+    // Do the sample + rendering.
+    wgpu::BindGroup bg = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                              {{0, tex.CreateView()}, {1, device.CreateSampler()}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    utils::BasicRenderPass rp = utils::CreateBasicRenderPass(device, 4, 1);
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&rp.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bg);
+    pass.Draw(3);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Check texels got sampled correctly.
+    EXPECT_PIXEL_RGBA8_EQ(data[0], rp.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(data[1], rp.color, 1, 0);
+    EXPECT_PIXEL_RGBA8_EQ(data[2], rp.color, 2, 0);
+    EXPECT_PIXEL_RGBA8_EQ(data[3], rp.color, 3, 0);
+}
+
+DAWN_INSTANTIATE_TEST(TextureView1DTest, D3D12Backend(), MetalBackend(), VulkanBackend());
diff --git a/src/dawn/tests/end2end/TextureZeroInitTests.cpp b/src/dawn/tests/end2end/TextureZeroInitTests.cpp
new file mode 100644
index 0000000..ed473cd
--- /dev/null
+++ b/src/dawn/tests/end2end/TextureZeroInitTests.cpp
@@ -0,0 +1,2147 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#define EXPECT_LAZY_CLEAR(N, statement)                                                        \
+    do {                                                                                       \
+        if (UsesWire()) {                                                                      \
+            statement;                                                                         \
+        } else {                                                                               \
+            size_t lazyClearsBefore = dawn::native::GetLazyClearCountForTesting(device.Get()); \
+            statement;                                                                         \
+            size_t lazyClearsAfter = dawn::native::GetLazyClearCountForTesting(device.Get());  \
+            EXPECT_EQ(N, lazyClearsAfter - lazyClearsBefore);                                  \
+        }                                                                                      \
+    } while (0)
+
+class TextureZeroInitTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    }
+    wgpu::TextureDescriptor CreateTextureDescriptor(uint32_t mipLevelCount,
+                                                    uint32_t arrayLayerCount,
+                                                    wgpu::TextureUsage usage,
+                                                    wgpu::TextureFormat format) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = kSize;
+        descriptor.size.height = kSize;
+        descriptor.size.depthOrArrayLayers = arrayLayerCount;
+        descriptor.sampleCount = 1;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        return descriptor;
+    }
+    wgpu::TextureViewDescriptor CreateTextureViewDescriptor(
+        uint32_t baseMipLevel,
+        uint32_t baseArrayLayer,
+        wgpu::TextureFormat format = kColorFormat) {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = format;
+        descriptor.baseArrayLayer = baseArrayLayer;
+        descriptor.arrayLayerCount = 1;
+        descriptor.baseMipLevel = baseMipLevel;
+        descriptor.mipLevelCount = 1;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        return descriptor;
+    }
+    wgpu::RenderPipeline CreatePipelineForTest(float depth = 0.f) {
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = CreateBasicVertexShaderForTest(depth);
+        const char* fs = R"(
+            ;
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+               return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+        )";
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, fs);
+        wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil();
+        depthStencil->depthCompare = wgpu::CompareFunction::Equal;
+        depthStencil->stencilFront.compare = wgpu::CompareFunction::Equal;
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+    wgpu::ShaderModule CreateBasicVertexShaderForTest(float depth = 0.f) {
+        std::string source = R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0, -1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0, -1.0)
+                );
+                return vec4<f32>(pos[VertexIndex], )" +
+                             std::to_string(depth) + R"(, 1.0);
+            })";
+        return utils::CreateShaderModule(device, source.c_str());
+    }
+    wgpu::ShaderModule CreateSampledTextureFragmentShaderForTest() {
+        return utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var texture0 : texture_2d<f32>;
+            struct FragmentOut {
+                @location(0) color : vec4<f32>
+            }
+            @stage(fragment)
+            fn main(@builtin(position) FragCoord : vec4<f32>) -> FragmentOut {
+                var output : FragmentOut;
+                output.color = textureLoad(texture0, vec2<i32>(FragCoord.xy), 0);
+                return output;
+            }
+        )");
+    }
+
+    constexpr static uint32_t kSize = 128;
+    constexpr static uint32_t kUnalignedSize = 127;
+    // All texture formats used (RGBA8Unorm, Depth24PlusStencil8, and RGBA8Snorm, BC formats)
+    // have the same block byte size of 4.
+    constexpr static uint32_t kFormatBlockByteSize = 4;
+    constexpr static wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr static wgpu::TextureFormat kDepthStencilFormat =
+        wgpu::TextureFormat::Depth24PlusStencil8;
+    constexpr static wgpu::TextureFormat kNonrenderableColorFormat =
+        wgpu::TextureFormat::RGBA8Snorm;
+};
+
+// This tests that the code path of CopyTextureToBuffer clears correctly to Zero after first usage
+TEST_P(TextureZeroInitTest, CopyTextureToBufferSource) {
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    // Texture's first usage is in EXPECT_PIXEL_RGBA8_EQ's call to CopyTextureToBuffer
+    RGBA8 filledWithZeros(0, 0, 0, 0);
+    EXPECT_LAZY_CLEAR(1u, EXPECT_PIXEL_RGBA8_EQ(filledWithZeros, texture, 0, 0));
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+}
+
+// This tests that the code path of CopyTextureToBuffer with multiple texture array layers clears
+// correctly to Zero after first usage
+TEST_P(TextureZeroInitTest, CopyMultipleTextureArrayLayersToBufferSource) {
+    constexpr uint32_t kArrayLayers = 6u;
+
+    const wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        1, kArrayLayers, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    const uint32_t bytesPerRow = utils::GetMinimumBytesPerRow(kColorFormat, kSize);
+    const uint32_t rowsPerImage = kSize;
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    bufferDescriptor.size = utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage,
+                                                       {kSize, kSize, kArrayLayers}, kColorFormat);
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
+
+    const wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(buffer, 0, bytesPerRow, kSize);
+    const wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    const wgpu::Extent3D copySize = {kSize, kSize, kArrayLayers};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+    // Expect texture to be lazy initialized.
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
+
+    // Expect texture subresource initialized to be true
+    EXPECT_TRUE(
+        dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, kArrayLayers));
+
+    const std::vector<RGBA8> kExpectedAllZero(kSize * kSize, {0, 0, 0, 0});
+    for (uint32_t layer = 0; layer < kArrayLayers; ++layer) {
+        EXPECT_TEXTURE_EQ(kExpectedAllZero.data(), texture, {0, 0, layer}, {kSize, kSize});
+    }
+}
+
+// Test that non-zero mip level clears subresource to Zero after first use
+// This goes through the BeginRenderPass's code path
+TEST_P(TextureZeroInitTest, RenderingMipMapClearsToZero) {
+    uint32_t baseMipLevel = 2;
+    uint32_t levelCount = 4;
+    uint32_t baseArrayLayer = 0;
+    uint32_t layerCount = 1;
+
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        levelCount, layerCount, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::TextureViewDescriptor viewDescriptor =
+        CreateTextureViewDescriptor(baseMipLevel, baseArrayLayer);
+    wgpu::TextureView view = texture.CreateView(&viewDescriptor);
+
+    utils::BasicRenderPass renderPass = utils::BasicRenderPass(kSize, kSize, texture, kColorFormat);
+
+    // Specify loadOp Load. Clear should be used to zero-initialize.
+    renderPass.renderPassInfo.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+    // Specify non-zero clear color. It should still be cleared to zero.
+    renderPass.renderPassInfo.cColorAttachments[0].clearValue = {0.5f, 0.5f, 0.5f, 0.5f};
+    renderPass.renderPassInfo.cColorAttachments[0].view = view;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        // Texture's first usage is in BeginRenderPass's call to RecordRenderPass
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    uint32_t mipSize = kSize >> 2;
+    std::vector<RGBA8> expected(mipSize * mipSize, {0, 0, 0, 0});
+
+    EXPECT_TEXTURE_EQ(expected.data(), renderPass.color, {0, 0, baseArrayLayer}, {mipSize, mipSize},
+                      baseMipLevel);
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                        renderPass.color.Get(), baseMipLevel, 1, baseArrayLayer, 1));
+}
+
+// Test that non-zero array layers clears subresource to Zero after first use.
+// This goes through the BeginRenderPass's code path
+TEST_P(TextureZeroInitTest, RenderingArrayLayerClearsToZero) {
+    uint32_t baseMipLevel = 0;
+    uint32_t levelCount = 1;
+    uint32_t baseArrayLayer = 2;
+    uint32_t layerCount = 4;
+
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        levelCount, layerCount, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::TextureViewDescriptor viewDescriptor =
+        CreateTextureViewDescriptor(baseMipLevel, baseArrayLayer);
+    wgpu::TextureView view = texture.CreateView(&viewDescriptor);
+
+    utils::BasicRenderPass renderPass = utils::BasicRenderPass(kSize, kSize, texture, kColorFormat);
+
+    // Specify loadOp Load. Clear should be used to zero-initialize.
+    renderPass.renderPassInfo.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+    // Specify non-zero clear color. It should still be cleared to zero.
+    renderPass.renderPassInfo.cColorAttachments[0].clearValue = {0.5f, 0.5f, 0.5f, 0.5f};
+    renderPass.renderPassInfo.cColorAttachments[0].view = view;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    std::vector<RGBA8> expected(kSize * kSize, {0, 0, 0, 0});
+
+    EXPECT_TEXTURE_EQ(expected.data(), renderPass.color, {0, 0, baseArrayLayer}, {kSize, kSize},
+                      baseMipLevel);
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                        renderPass.color.Get(), baseMipLevel, 1, baseArrayLayer, 1));
+}
+
+// This tests CopyBufferToTexture fully overwrites copy so lazy init is not needed.
+TEST_P(TextureZeroInitTest, CopyBufferToTexture) {
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(4, 1,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                    wgpu::TextureUsage::CopySrc,
+                                kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    std::vector<uint8_t> data(kFormatBlockByteSize * kSize * kSize, 100);
+    wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, kSize * sizeof(uint32_t));
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kSize, kSize, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    std::vector<RGBA8> expected(kSize * kSize, {100, 100, 100, 100});
+
+    EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {kSize, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+}
+
+// Test for a copy only to a subset of the subresource, lazy init is necessary to clear the other
+// half.
+TEST_P(TextureZeroInitTest, CopyBufferToTextureHalf) {
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(4, 1,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                    wgpu::TextureUsage::CopySrc,
+                                kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    std::vector<uint8_t> data(kFormatBlockByteSize * kSize * kSize, 100);
+    wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, kSize * sizeof(uint16_t));
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kSize / 2, kSize, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    std::vector<RGBA8> expected100((kSize / 2) * kSize, {100, 100, 100, 100});
+    std::vector<RGBA8> expectedZeros((kSize / 2) * kSize, {0, 0, 0, 0});
+    // first half filled with 100, by the buffer data
+    EXPECT_TEXTURE_EQ(expected100.data(), texture, {0, 0}, {kSize / 2, kSize});
+    // second half should be cleared
+    EXPECT_TEXTURE_EQ(expectedZeros.data(), texture, {kSize / 2, 0}, {kSize / 2, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+}
+
+// This tests CopyBufferToTexture fully overwrites a range of subresources, so lazy initialization
+// is needed for neither the subresources involved in the copy nor the other subresources.
+TEST_P(TextureZeroInitTest, CopyBufferToTextureMultipleArrayLayers) {
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        1, 6, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    constexpr uint32_t kBaseArrayLayer = 2u;
+    constexpr uint32_t kCopyLayerCount = 3u;
+    std::vector<uint8_t> data(kFormatBlockByteSize * kSize * kSize * kCopyLayerCount, 100);
+    wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+
+    const wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, kSize * kFormatBlockByteSize, kSize);
+    const wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, 0, {0, 0, kBaseArrayLayer});
+    const wgpu::Extent3D copySize = {kSize, kSize, kCopyLayerCount};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    // The copy overwrites the whole subresources so we don't need to do lazy initialization on
+    // them.
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    // Expect texture subresource initialized to be true
+    EXPECT_TRUE(dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, kBaseArrayLayer,
+                                                              kCopyLayerCount));
+
+    const std::vector<RGBA8> expected100(kSize * kSize, {100, 100, 100, 100});
+    for (uint32_t layer = kBaseArrayLayer; layer < kBaseArrayLayer + kCopyLayerCount; ++layer) {
+        EXPECT_TEXTURE_EQ(expected100.data(), texture, {0, 0, layer}, {kSize, kSize});
+    }
+}
+
+// This tests CopyTextureToTexture fully overwrites copy so lazy init is not needed.
+TEST_P(TextureZeroInitTest, CopyTextureToTexture) {
+    wgpu::TextureDescriptor srcDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc, kColorFormat);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    wgpu::ImageCopyTexture srcImageCopyTexture =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+
+    wgpu::TextureDescriptor dstDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::CopySrc,
+                                kColorFormat);
+    wgpu::Texture dstTexture = device.CreateTexture(&dstDescriptor);
+
+    wgpu::ImageCopyTexture dstImageCopyTexture =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D copySize = {kSize, kSize, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    std::vector<RGBA8> expected(kSize * kSize, {0, 0, 0, 0});
+
+    EXPECT_TEXTURE_EQ(expected.data(), srcTexture, {0, 0}, {kSize, kSize});
+    EXPECT_TEXTURE_EQ(expected.data(), dstTexture, {0, 0}, {kSize, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(srcTexture.Get(), 0, 1, 0, 1));
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(dstTexture.Get(), 0, 1, 0, 1));
+}
+
+// This Tests the CopyTextureToTexture's copy only to a subset of the subresource, lazy init is
+// necessary to clear the other half.
+TEST_P(TextureZeroInitTest, CopyTextureToTextureHalf) {
+    wgpu::TextureDescriptor srcDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+                                    wgpu::TextureUsage::CopyDst,
+                                kColorFormat);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    // fill srcTexture with 100
+    {
+        std::vector<uint8_t> data(kFormatBlockByteSize * kSize * kSize, 100);
+        wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+            device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(stagingBuffer, 0, kSize * kFormatBlockByteSize);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+        wgpu::Extent3D copySize = {kSize, kSize, 1};
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    wgpu::ImageCopyTexture srcImageCopyTexture =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+
+    wgpu::TextureDescriptor dstDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::CopySrc,
+                                kColorFormat);
+    wgpu::Texture dstTexture = device.CreateTexture(&dstDescriptor);
+
+    wgpu::ImageCopyTexture dstImageCopyTexture =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kSize / 2, kSize, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    std::vector<RGBA8> expectedWithZeros((kSize / 2) * kSize, {0, 0, 0, 0});
+    std::vector<RGBA8> expectedWith100(kSize * kSize, {100, 100, 100, 100});
+
+    EXPECT_TEXTURE_EQ(expectedWith100.data(), srcTexture, {0, 0}, {kSize, kSize});
+    EXPECT_TEXTURE_EQ(expectedWith100.data(), dstTexture, {0, 0}, {kSize / 2, kSize});
+    EXPECT_TEXTURE_EQ(expectedWithZeros.data(), dstTexture, {kSize / 2, 0}, {kSize / 2, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(srcTexture.Get(), 0, 1, 0, 1));
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(dstTexture.Get(), 0, 1, 0, 1));
+}
+
+// This tests the texture with depth attachment and load op load will init depth stencil texture to
+// 0s.
+TEST_P(TextureZeroInitTest, RenderingLoadingDepth) {
+    wgpu::TextureDescriptor srcDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::RenderAttachment,
+                                kColorFormat);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    wgpu::TextureDescriptor depthStencilDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kDepthStencilFormat);
+    wgpu::Texture depthStencilTexture = device.CreateTexture(&depthStencilDescriptor);
+
+    utils::ComboRenderPassDescriptor renderPassDescriptor({srcTexture.CreateView()},
+                                                          depthStencilTexture.CreateView());
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+    // Set clearDepth to non-zero. It should still be cleared to 0 by the loadOp.
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.5f;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue = 0;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+    pass.SetPipeline(CreatePipelineForTest());
+    pass.Draw(6);
+    pass.End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    // Expect 0 lazy clears, depth stencil texture will clear using loadop
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+    // Expect the texture to be red because depth test passed.
+    std::vector<RGBA8> expected(kSize * kSize, {255, 0, 0, 255});
+    EXPECT_TEXTURE_EQ(expected.data(), srcTexture, {0, 0}, {kSize, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(srcTexture.Get(), 0, 1, 0, 1));
+}
+
+// This tests the texture with stencil attachment and load op load will init depth stencil texture
+// to 0s.
+TEST_P(TextureZeroInitTest, RenderingLoadingStencil) {
+    wgpu::TextureDescriptor srcDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::RenderAttachment,
+                                kColorFormat);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    wgpu::TextureDescriptor depthStencilDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kDepthStencilFormat);
+    wgpu::Texture depthStencilTexture = device.CreateTexture(&depthStencilDescriptor);
+
+    utils::ComboRenderPassDescriptor renderPassDescriptor({srcTexture.CreateView()},
+                                                          depthStencilTexture.CreateView());
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.0f;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+    // Set clearStencil to non-zero. It should still be cleared to 0 by the loadOp.
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue = 2;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+    pass.SetPipeline(CreatePipelineForTest());
+    pass.Draw(6);
+    pass.End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    // Expect 0 lazy clears, depth stencil texture will clear using loadop
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+    // Expect the texture to be red because stencil test passed.
+    std::vector<RGBA8> expected(kSize * kSize, {255, 0, 0, 255});
+    EXPECT_TEXTURE_EQ(expected.data(), srcTexture, {0, 0}, {kSize, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(srcTexture.Get(), 0, 1, 0, 1));
+}
+
+// This tests the texture with depth stencil attachment and load op load will init depth stencil
+// texture to 0s.
+TEST_P(TextureZeroInitTest, RenderingLoadingDepthStencil) {
+    wgpu::TextureDescriptor srcDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::RenderAttachment,
+                                kColorFormat);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    wgpu::TextureDescriptor depthStencilDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kDepthStencilFormat);
+    wgpu::Texture depthStencilTexture = device.CreateTexture(&depthStencilDescriptor);
+
+    utils::ComboRenderPassDescriptor renderPassDescriptor({srcTexture.CreateView()},
+                                                          depthStencilTexture.CreateView());
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+    pass.SetPipeline(CreatePipelineForTest());
+    pass.Draw(6);
+    pass.End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    // Expect 0 lazy clears, depth stencil texture will clear using loadop
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+    // Expect the texture to be red because both depth and stencil tests passed.
+    std::vector<RGBA8> expected(kSize * kSize, {255, 0, 0, 255});
+    EXPECT_TEXTURE_EQ(expected.data(), srcTexture, {0, 0}, {kSize, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(srcTexture.Get(), 0, 1, 0, 1));
+}
+
+// Test that clear state is tracked independently for depth/stencil textures.
+TEST_P(TextureZeroInitTest, IndependentDepthStencilLoadAfterDiscard) {
+    // TODO(crbug.com/dawn/704): Readback after clear via stencil copy does not work
+    // on some Intel drivers.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    wgpu::TextureDescriptor depthStencilDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kDepthStencilFormat);
+    wgpu::Texture depthStencilTexture = device.CreateTexture(&depthStencilDescriptor);
+
+    // Uninitialize only depth
+    {
+        // Clear the stencil to 2 and discard the depth
+        {
+            utils::ComboRenderPassDescriptor renderPassDescriptor({},
+                                                                  depthStencilTexture.CreateView());
+            renderPassDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+            renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue = 2;
+            renderPassDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+            pass.End();
+            wgpu::CommandBuffer commandBuffer = encoder.Finish();
+            EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+        }
+
+        // "all" subresources are not initialized; Depth is not initialized
+        EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(
+                             depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_All));
+        EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(
+                             depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_DepthOnly));
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_StencilOnly));
+
+        // Now load both depth and stencil. Depth should be cleared and stencil should stay the same
+        // at 2.
+        {
+            wgpu::TextureDescriptor colorDescriptor =
+                CreateTextureDescriptor(1, 1,
+                                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                            wgpu::TextureUsage::RenderAttachment,
+                                        kColorFormat);
+            wgpu::Texture colorTexture = device.CreateTexture(&colorDescriptor);
+
+            utils::ComboRenderPassDescriptor renderPassDescriptor({colorTexture.CreateView()},
+                                                                  depthStencilTexture.CreateView());
+            renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+            renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+            pass.SetPipeline(CreatePipelineForTest());
+            pass.SetStencilReference(2);
+            pass.Draw(6);
+            pass.End();
+            wgpu::CommandBuffer commandBuffer = encoder.Finish();
+            // No lazy clear because depth will be cleared with a loadOp
+            EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+            // Expect the texture to be red because the depth and stencil tests passed. Depth was 0
+            // and stencil was 2.
+            std::vector<RGBA8> expected(kSize * kSize, {255, 0, 0, 255});
+            EXPECT_TEXTURE_EQ(expected.data(), colorTexture, {0, 0}, {kSize, kSize});
+        }
+
+        // Everything is initialized now
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_All));
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_DepthOnly));
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_StencilOnly));
+
+        // TODO(crbug.com/dawn/439): Implement stencil copies on other platforms
+        if (IsMetal() || IsVulkan() || IsD3D12()) {
+            // Check by copy that the stencil data is 2.
+            std::vector<uint8_t> expected(kSize * kSize, 2);
+            EXPECT_LAZY_CLEAR(
+                0u, EXPECT_TEXTURE_EQ(expected.data(), depthStencilTexture, {0, 0}, {kSize, kSize},
+                                      0, wgpu::TextureAspect::StencilOnly));
+        }
+    }
+
+    // Uninitialize only stencil
+    {
+        // Clear the depth to 0.7 and discard the stencil.
+        {
+            utils::ComboRenderPassDescriptor renderPassDescriptor({},
+                                                                  depthStencilTexture.CreateView());
+            renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.7;
+            renderPassDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+            renderPassDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp =
+                wgpu::StoreOp::Discard;
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+            pass.End();
+            wgpu::CommandBuffer commandBuffer = encoder.Finish();
+            EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+        }
+
+        // "all" subresources are not initialized; Stencil is not initialized
+        EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(
+                             depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_All));
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_DepthOnly));
+        EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(
+                             depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_StencilOnly));
+
+        // Now load both depth and stencil. Stencil should be cleared and depth should stay the same
+        // at 0.7.
+        {
+            wgpu::TextureDescriptor colorDescriptor =
+                CreateTextureDescriptor(1, 1,
+                                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                            wgpu::TextureUsage::RenderAttachment,
+                                        kColorFormat);
+            wgpu::Texture colorTexture = device.CreateTexture(&colorDescriptor);
+
+            utils::ComboRenderPassDescriptor renderPassDescriptor({colorTexture.CreateView()},
+                                                                  depthStencilTexture.CreateView());
+            renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+            renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+            pass.SetPipeline(CreatePipelineForTest(0.7));
+            pass.Draw(6);
+            pass.End();
+            wgpu::CommandBuffer commandBuffer = encoder.Finish();
+            // No lazy clear because stencil will clear using a loadOp.
+            EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+            // Expect the texture to be red because both the depth a stencil tests passed.
+            // Depth was 0.7 and stencil was 0
+            std::vector<RGBA8> expected(kSize * kSize, {255, 0, 0, 255});
+            EXPECT_TEXTURE_EQ(expected.data(), colorTexture, {0, 0}, {kSize, kSize});
+        }
+
+        // Everything is initialized now
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_All));
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_DepthOnly));
+        EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                            depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_StencilOnly));
+
+        // TODO(crbug.com/dawn/439): Implement stencil copies on other platforms
+        if (IsMetal() || IsVulkan() || IsD3D12()) {
+            // Check by copy that the stencil data is 0.
+            std::vector<uint8_t> expected(kSize * kSize, 0);
+            EXPECT_LAZY_CLEAR(
+                0u, EXPECT_TEXTURE_EQ(expected.data(), depthStencilTexture, {0, 0}, {kSize, kSize},
+                                      0, wgpu::TextureAspect::StencilOnly));
+        }
+    }
+}
+
+// Test that clear state is tracked independently for depth/stencil textures.
+// Lazy clear of the stencil aspect via copy should not touch depth.
+TEST_P(TextureZeroInitTest, IndependentDepthStencilCopyAfterDiscard) {
+    // TODO(crbug.com/dawn/439): Implement stencil copies on other platforms
+    DAWN_SUPPRESS_TEST_IF(!(IsMetal() || IsVulkan() || IsD3D12()));
+
+    // TODO(enga): Figure out why this fails on Metal Intel.
+    DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+    wgpu::TextureDescriptor depthStencilDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc,
+        kDepthStencilFormat);
+    wgpu::Texture depthStencilTexture = device.CreateTexture(&depthStencilDescriptor);
+
+    // Clear the depth to 0.3 and discard the stencil.
+    {
+        utils::ComboRenderPassDescriptor renderPassDescriptor({}, depthStencilTexture.CreateView());
+        renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 0.3;
+        renderPassDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        renderPassDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+    }
+
+    // "all" subresources are not initialized; Stencil is not initialized
+    EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(depthStencilTexture.Get(), 0, 1,
+                                                                   0, 1, WGPUTextureAspect_All));
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                        depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_DepthOnly));
+    EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(
+                         depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_StencilOnly));
+
+    // Check by copy that the stencil data is lazily cleared to 0.
+    std::vector<uint8_t> expected(kSize * kSize, 0);
+    EXPECT_LAZY_CLEAR(1u, EXPECT_TEXTURE_EQ(expected.data(), depthStencilTexture, {0, 0},
+                                            {kSize, kSize}, 0, wgpu::TextureAspect::StencilOnly));
+
+    // Everything is initialized now
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(depthStencilTexture.Get(), 0, 1,
+                                                                  0, 1, WGPUTextureAspect_All));
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                        depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_DepthOnly));
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                        depthStencilTexture.Get(), 0, 1, 0, 1, WGPUTextureAspect_StencilOnly));
+
+    // Now load both depth and stencil. Stencil should be cleared and depth should stay the same
+    // at 0.3.
+    {
+        wgpu::TextureDescriptor colorDescriptor =
+            CreateTextureDescriptor(1, 1,
+                                    wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                        wgpu::TextureUsage::RenderAttachment,
+                                    kColorFormat);
+        wgpu::Texture colorTexture = device.CreateTexture(&colorDescriptor);
+
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorTexture.CreateView()},
+                                                              depthStencilTexture.CreateView());
+        renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.SetPipeline(CreatePipelineForTest(0.3));
+        pass.Draw(6);
+        pass.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        // No lazy clear because stencil will clear using a loadOp.
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+        // Expect the texture to be red because both the depth a stencil tests passed.
+        // Depth was 0.3 and stencil was 0
+        std::vector<RGBA8> expected(kSize * kSize, {255, 0, 0, 255});
+        EXPECT_TEXTURE_EQ(expected.data(), colorTexture, {0, 0}, {kSize, kSize});
+    }
+}
+
+// This tests the color attachments clear to 0s
+TEST_P(TextureZeroInitTest, ColorAttachmentsClear) {
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    utils::BasicRenderPass renderPass = utils::BasicRenderPass(kSize, kSize, texture, kColorFormat);
+    renderPass.renderPassInfo.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    std::vector<RGBA8> expected(kSize * kSize, {0, 0, 0, 0});
+    EXPECT_TEXTURE_EQ(expected.data(), renderPass.color, {0, 0}, {kSize, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true,
+              dawn::native::IsTextureSubresourceInitialized(renderPass.color.Get(), 0, 1, 0, 1));
+}
+
+// This tests the clearing of sampled textures in render pass
+TEST_P(TextureZeroInitTest, RenderPassSampledTextureClear) {
+    // Create needed resources
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(1, 1, wgpu::TextureUsage::TextureBinding, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment, kColorFormat);
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+
+    // Create render pipeline
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.cTargets[0].format = kColorFormat;
+    renderPipelineDescriptor.vertex.module = CreateBasicVertexShaderForTest();
+    renderPipelineDescriptor.cFragment.module = CreateSampledTextureFragmentShaderForTest();
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    // Create bindgroup
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                     {{0, texture.CreateView()}});
+
+    // Encode pass and submit
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor renderPassDesc({renderTexture.CreateView()});
+    renderPassDesc.cColorAttachments[0].clearValue = {1.0, 1.0, 1.0, 1.0};
+    renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+    pass.SetPipeline(renderPipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(6);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    // Expect 1 lazy clear for sampled texture
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    // Expect the rendered texture to be cleared
+    std::vector<RGBA8> expectedWithZeros(kSize * kSize, {0, 0, 0, 0});
+    EXPECT_TEXTURE_EQ(expectedWithZeros.data(), renderTexture, {0, 0}, {kSize, kSize});
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(renderTexture.Get(), 0, 1, 0, 1));
+}
+
+// This is a regression test for a bug where a texture wouldn't get clear for a pass if at least
+// one of its subresources was used as an attachment. It tests that if a texture is used as both
+// sampled and attachment (with LoadOp::Clear so the lazy clear can be skipped) then the sampled
+// subresource is correctly cleared.
+TEST_P(TextureZeroInitTest, TextureBothSampledAndAttachmentClear) {
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // Create a 2D array texture, layer 0 will be used as attachment, layer 1 as sampled.
+    wgpu::TextureDescriptor texDesc;
+    texDesc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                    wgpu::TextureUsage::CopySrc;
+    texDesc.size = {1, 1, 2};
+    texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture texture = device.CreateTexture(&texDesc);
+
+    wgpu::TextureViewDescriptor viewDesc;
+    viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+    viewDesc.arrayLayerCount = 1;
+
+    viewDesc.baseArrayLayer = 0;
+    wgpu::TextureView attachmentView = texture.CreateView(&viewDesc);
+
+    viewDesc.baseArrayLayer = 1;
+    wgpu::TextureView sampleView = texture.CreateView(&viewDesc);
+
+    // Create render pipeline
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    renderPipelineDescriptor.vertex.module = CreateBasicVertexShaderForTest();
+    renderPipelineDescriptor.cFragment.module = CreateSampledTextureFragmentShaderForTest();
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), {{0, sampleView}});
+
+    // Encode pass and submit
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor renderPassDesc({attachmentView});
+    renderPassDesc.cColorAttachments[0].clearValue = {1.0, 1.0, 1.0, 1.0};
+    renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+    pass.SetPipeline(renderPipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(6);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    // Expect the lazy clear for the sampled subresource.
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    // Expect both subresources to be zero: the sampled one with lazy-clearing and the attachment
+    // because it sampled the lazy-cleared sampled subresource.
+    EXPECT_TEXTURE_EQ(&RGBA8::kZero, texture, {0, 0, 0}, {1, 1});
+    EXPECT_TEXTURE_EQ(&RGBA8::kZero, texture, {0, 0, 1}, {1, 1});
+
+    // The whole texture is now initialized.
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 2));
+}
+
+// This tests the clearing of sampled textures during compute pass
+TEST_P(TextureZeroInitTest, ComputePassSampledTextureClear) {
+    // Create needed resources
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(1, 1, wgpu::TextureUsage::TextureBinding, kColorFormat);
+    descriptor.size.width = 1;
+    descriptor.size.height = 1;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    uint32_t bufferSize = kFormatBlockByteSize * sizeof(uint32_t);
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = bufferSize;
+    bufferDescriptor.usage =
+        wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer bufferTex = device.CreateBuffer(&bufferDescriptor);
+    // Add data to buffer to ensure it is initialized
+    uint32_t data = 100;
+    queue.WriteBuffer(bufferTex, 0, &data, sizeof(data));
+
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    // Create compute pipeline
+    wgpu::ComputePipelineDescriptor computePipelineDescriptor;
+    wgpu::ProgrammableStageDescriptor compute;
+    const char* cs = R"(
+        @group(0) @binding(0) var tex : texture_2d<f32>;
+        struct Result {
+            value : vec4<f32>
+        }
+        @group(0) @binding(1) var<storage, read_write> result : Result;
+        @stage(compute) @workgroup_size(1) fn main() {
+           result.value = textureLoad(tex, vec2<i32>(0,0), 0);
+        }
+    )";
+    computePipelineDescriptor.compute.module = utils::CreateShaderModule(device, cs);
+    computePipelineDescriptor.compute.entryPoint = "main";
+    wgpu::ComputePipeline computePipeline =
+        device.CreateComputePipeline(&computePipelineDescriptor);
+
+    // Create bindgroup
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, computePipeline.GetBindGroupLayout(0),
+                             {{0, texture.CreateView()}, {1, bufferTex, 0, bufferSize}});
+
+    // Encode the pass and submit
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(computePipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Dispatch(1);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    // Expect the buffer to be zeroed out by the compute pass
+    std::vector<uint32_t> expectedWithZeros(bufferSize, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedWithZeros.data(), bufferTex, 0, kFormatBlockByteSize);
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+}
+
+// This tests that the code path of CopyTextureToBuffer clears correctly for non-renderable textures
+TEST_P(TextureZeroInitTest, NonRenderableTextureClear) {
+    // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support reading
+    // from Snorm textures.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_snorm_read"));
+
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(1, 1, wgpu::TextureUsage::CopySrc, kNonrenderableColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    // Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
+    uint32_t bytesPerRow = Align(kSize * kFormatBlockByteSize, kTextureBytesPerRowAlignment);
+    uint32_t bufferSize = bytesPerRow * kSize;
+    std::vector<uint8_t> data(bufferSize, 100);
+    wgpu::Buffer bufferDst = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+
+    wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(bufferDst, 0, bytesPerRow);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kSize, kSize, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    std::vector<uint32_t> expectedWithZeros(bufferSize, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedWithZeros.data(), bufferDst, 0, kSize);
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+}
+
+// This tests that the code path of CopyTextureToBuffer clears correctly for non-renderable textures
+TEST_P(TextureZeroInitTest, NonRenderableTextureClearUnalignedSize) {
+    // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support reading
+    // from Snorm textures.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_snorm_read"));
+
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(1, 1, wgpu::TextureUsage::CopySrc, kNonrenderableColorFormat);
+    descriptor.size.width = kUnalignedSize;
+    descriptor.size.height = kUnalignedSize;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    // Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
+    uint32_t bytesPerRow =
+        Align(kUnalignedSize * kFormatBlockByteSize, kTextureBytesPerRowAlignment);
+    uint32_t bufferSize = bytesPerRow * kUnalignedSize;
+    std::vector<uint8_t> data(bufferSize, 100);
+    wgpu::Buffer bufferDst = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+    wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(bufferDst, 0, bytesPerRow);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kUnalignedSize, kUnalignedSize, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    std::vector<uint32_t> expectedWithZeros(bufferSize, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedWithZeros.data(), bufferDst, 0, kUnalignedSize);
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+}
+
+// This tests that the code path of CopyTextureToBuffer clears correctly for non-renderable textures
+// with more than 1 array layers
+TEST_P(TextureZeroInitTest, NonRenderableTextureClearWithMultiArrayLayers) {
+    // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support reading
+    // from Snorm textures.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_snorm_read"));
+
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(1, 2, wgpu::TextureUsage::CopySrc, kNonrenderableColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    // Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
+    uint32_t bufferSize = kFormatBlockByteSize * kSize * kSize;
+    std::vector<uint8_t> data(bufferSize, 100);
+    wgpu::Buffer bufferDst = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(bufferDst, 0, kSize * kFormatBlockByteSize);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 1});
+    wgpu::Extent3D copySize = {kSize, kSize, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    std::vector<uint32_t> expectedWithZeros(bufferSize, 0);
+    EXPECT_BUFFER_U32_RANGE_EQ(expectedWithZeros.data(), bufferDst, 0, 8);
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 1, 1));
+}
+
+// This tests that storeOp clear resets resource state to uninitialized.
+// Start with a sample texture that is initialized with data.
+// Then expect the render texture to not store the data from sample texture
+// because it will be lazy cleared by the EXPECT_TEXTURE_EQ call.
+TEST_P(TextureZeroInitTest, RenderPassStoreOpClear) {
+    // Create needed resources
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment, kColorFormat);
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+
+    // Fill the sample texture with data
+    std::vector<uint8_t> data(kFormatBlockByteSize * kSize * kSize, 1);
+    wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, kSize * kFormatBlockByteSize);
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kSize, kSize, 1};
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    // Expect 0 lazy clears because the texture will be completely copied to
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    // Create render pipeline
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.vertex.module = CreateBasicVertexShaderForTest();
+    renderPipelineDescriptor.cFragment.module = CreateSampledTextureFragmentShaderForTest();
+    renderPipelineDescriptor.cTargets[0].format = kColorFormat;
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    // Create bindgroup
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                     {{0, texture.CreateView()}});
+
+    // Encode pass and submit
+    encoder = device.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor renderPassDesc({renderTexture.CreateView()});
+    renderPassDesc.cColorAttachments[0].clearValue = {0.0, 0.0, 0.0, 0.0};
+    renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPassDesc.cColorAttachments[0].storeOp = wgpu::StoreOp::Discard;
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+    pass.SetPipeline(renderPipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(6);
+    pass.End();
+    commands = encoder.Finish();
+    // Expect 0 lazy clears, sample texture is initialized by copyBufferToTexture and render texture
+    // is cleared by loadop
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    // Expect the rendered texture to be cleared
+    std::vector<RGBA8> expectedWithZeros(kSize * kSize, {0, 0, 0, 0});
+    EXPECT_LAZY_CLEAR(
+        1u, EXPECT_TEXTURE_EQ(expectedWithZeros.data(), renderTexture, {0, 0}, {kSize, kSize}));
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(renderTexture.Get(), 0, 1, 0, 1));
+}
+
+// This tests storeOp Clear on depth and stencil textures.
+// We put the depth stencil texture through 2 passes:
+// 1) LoadOp::Clear and StoreOp::Discard, fail the depth and stencil test set in the render
+//      pipeline. This means nothing is drawn and subresource is set as uninitialized.
+// 2) LoadOp::Load and StoreOp::Discard, pass the depth and stencil test set in the render pipeline.
+//      Because LoadOp is Load and the subresource is uninitialized, the texture will be cleared to
+//      0's This means the depth and stencil test will pass and the red square is drawn.
+TEST_P(TextureZeroInitTest, RenderingLoadingDepthStencilStoreOpClear) {
+    wgpu::TextureDescriptor srcDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::RenderAttachment,
+                                kColorFormat);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    wgpu::TextureDescriptor depthStencilDescriptor =
+        CreateTextureDescriptor(1, 1,
+                                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc |
+                                    wgpu::TextureUsage::CopyDst,
+                                kDepthStencilFormat);
+    wgpu::Texture depthStencilTexture = device.CreateTexture(&depthStencilDescriptor);
+
+    // Setup the renderPass for the first pass.
+    // We want to fail the depth and stencil test here so that nothing gets drawn and we can
+    // see that the subresource correctly gets set as unintialized in the second pass
+    utils::ComboRenderPassDescriptor renderPassDescriptor({srcTexture.CreateView()},
+                                                          depthStencilTexture.CreateView());
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue = 1.0f;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue = 1u;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+    renderPassDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.SetPipeline(CreatePipelineForTest());
+        pass.Draw(6);
+        pass.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        // Expect 0 lazy clears, depth stencil texture will clear using loadop
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+        // The depth stencil test should fail and not draw because the depth stencil texture is
+        // cleared to 1's by using loadOp clear and set values from descriptor.
+        std::vector<RGBA8> expectedBlack(kSize * kSize, {0, 0, 0, 0});
+        EXPECT_TEXTURE_EQ(expectedBlack.data(), srcTexture, {0, 0}, {kSize, kSize});
+
+        // Expect texture subresource initialized to be false since storeop is clear, sets
+        // subresource as uninitialized
+        EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(depthStencilTexture.Get(), 0,
+                                                                       1, 0, 1));
+    }
+
+    // Now we put the depth stencil texture back into renderpass, it should be cleared by loadop
+    // because storeOp clear sets the subresource as uninitialized
+    {
+        renderPassDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPassDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.SetPipeline(CreatePipelineForTest());
+        pass.Draw(6);
+        pass.End();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        // Expect 0 lazy clears, depth stencil texture will clear using loadop
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
+
+        // Now the depth stencil test should pass since depth stencil texture is cleared to 0's by
+        // loadop load and uninitialized subresource, so we should have a red square
+        std::vector<RGBA8> expectedRed(kSize * kSize, {255, 0, 0, 255});
+        EXPECT_TEXTURE_EQ(expectedRed.data(), srcTexture, {0, 0}, {kSize, kSize});
+
+        // Expect texture subresource initialized to be false since storeop is clear, sets
+        // subresource as uninitialized
+        EXPECT_EQ(false, dawn::native::IsTextureSubresourceInitialized(depthStencilTexture.Get(), 0,
+                                                                       1, 0, 1));
+    }
+}
+
+// Test that if one mip of a texture is initialized and another is uninitialized, lazy clearing the
+// uninitialized mip does not clear the initialized mip.
+TEST_P(TextureZeroInitTest, PreservesInitializedMip) {
+    wgpu::TextureDescriptor sampleTextureDescriptor =
+        CreateTextureDescriptor(2, 1,
+                                wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::TextureBinding,
+                                kColorFormat);
+    wgpu::Texture sampleTexture = device.CreateTexture(&sampleTextureDescriptor);
+
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment, kColorFormat);
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+
+    // Fill the sample texture's second mip with data
+    uint32_t mipSize = kSize >> 1;
+    std::vector<uint8_t> data(kFormatBlockByteSize * mipSize * mipSize, 2);
+    wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, mipSize * kFormatBlockByteSize);
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(sampleTexture, 1, {0, 0, 0});
+    wgpu::Extent3D copySize = {mipSize, mipSize, 1};
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    // Expect 0 lazy clears because the texture subresource will be completely copied to
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    // Create render pipeline
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.vertex.module = CreateBasicVertexShaderForTest();
+    renderPipelineDescriptor.cFragment.module = CreateSampledTextureFragmentShaderForTest();
+    renderPipelineDescriptor.cTargets[0].format = kColorFormat;
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    // Create bindgroup
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                     {{0, sampleTexture.CreateView()}});
+
+    // Encode pass and submit
+    encoder = device.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor renderPassDesc({renderTexture.CreateView()});
+    renderPassDesc.cColorAttachments[0].clearValue = {0.0, 0.0, 0.0, 0.0};
+    renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPassDesc.cColorAttachments[0].storeOp = wgpu::StoreOp::Discard;
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+    pass.SetPipeline(renderPipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(6);
+    pass.End();
+    commands = encoder.Finish();
+    // Expect 1 lazy clears, because not all mips of the sample texture are initialized by
+    // copyBufferToTexture.
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    // Expect the rendered texture to be cleared since we copied from the uninitialized first
+    // mip.
+    std::vector<RGBA8> expectedWithZeros(kSize * kSize, {0, 0, 0, 0});
+    EXPECT_LAZY_CLEAR(
+        1u, EXPECT_TEXTURE_EQ(expectedWithZeros.data(), renderTexture, {0, 0}, {kSize, kSize}, 0));
+
+    // Expect the first mip to have been lazy cleared to 0.
+    EXPECT_LAZY_CLEAR(
+        0u, EXPECT_TEXTURE_EQ(expectedWithZeros.data(), sampleTexture, {0, 0}, {kSize, kSize}, 0));
+
+    // Expect the second mip to still be filled with 2.
+    std::vector<RGBA8> expectedWithTwos(mipSize * mipSize, {2, 2, 2, 2});
+    EXPECT_LAZY_CLEAR(0u, EXPECT_TEXTURE_EQ(expectedWithTwos.data(), sampleTexture, {0, 0},
+                                            {mipSize, mipSize}, 1));
+
+    // Expect the whole texture to be initialized
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(sampleTexture.Get(), 0, 2, 0, 1));
+}
+
+// Test that if one layer of a texture is initialized and another is uninitialized, lazy clearing
+// the uninitialized layer does not clear the initialized layer.
+TEST_P(TextureZeroInitTest, PreservesInitializedArrayLayer) {
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::TextureDescriptor sampleTextureDescriptor =
+        CreateTextureDescriptor(1, 2,
+                                wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                                    wgpu::TextureUsage::TextureBinding,
+                                kColorFormat);
+    wgpu::Texture sampleTexture = device.CreateTexture(&sampleTextureDescriptor);
+
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment, kColorFormat);
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+
+    // Fill the sample texture's second array layer with data
+    std::vector<uint8_t> data(kFormatBlockByteSize * kSize * kSize, 2);
+    wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
+        device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
+    wgpu::ImageCopyBuffer imageCopyBuffer =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, kSize * kFormatBlockByteSize);
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(sampleTexture, 0, {0, 0, 1});
+    wgpu::Extent3D copySize = {kSize, kSize, 1};
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    // Expect 0 lazy clears because the texture subresource will be completely copied to
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    // Create render pipeline
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.vertex.module = CreateBasicVertexShaderForTest();
+    renderPipelineDescriptor.cFragment.module = CreateSampledTextureFragmentShaderForTest();
+    renderPipelineDescriptor.cTargets[0].format = kColorFormat;
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    // Only sample from the uninitialized first layer.
+    wgpu::TextureViewDescriptor textureViewDescriptor;
+    textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+    textureViewDescriptor.arrayLayerCount = 1;
+
+    // Create bindgroup
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                             {{0, sampleTexture.CreateView(&textureViewDescriptor)}});
+
+    // Encode pass and submit
+    encoder = device.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor renderPassDesc({renderTexture.CreateView()});
+    renderPassDesc.cColorAttachments[0].clearValue = {0.0, 0.0, 0.0, 0.0};
+    renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+    renderPassDesc.cColorAttachments[0].storeOp = wgpu::StoreOp::Discard;
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+    pass.SetPipeline(renderPipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(6);
+    pass.End();
+    commands = encoder.Finish();
+    // Expect 1 lazy clears, because not all array layers of the sample texture are initialized by
+    // copyBufferToTexture.
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    // Expect the rendered texture to be cleared since we copied from the uninitialized first
+    // array layer.
+    std::vector<RGBA8> expectedWithZeros(kSize * kSize, {0, 0, 0, 0});
+    EXPECT_LAZY_CLEAR(
+        1u, EXPECT_TEXTURE_EQ(expectedWithZeros.data(), renderTexture, {0, 0, 0}, {kSize, kSize}));
+
+    // Expect the first array layer to have been lazy cleared to 0.
+    EXPECT_LAZY_CLEAR(
+        0u, EXPECT_TEXTURE_EQ(expectedWithZeros.data(), sampleTexture, {0, 0, 0}, {kSize, kSize}));
+
+    // Expect the second array layer to still be filled with 2.
+    std::vector<RGBA8> expectedWithTwos(kSize * kSize, {2, 2, 2, 2});
+    EXPECT_LAZY_CLEAR(
+        0u, EXPECT_TEXTURE_EQ(expectedWithTwos.data(), sampleTexture, {0, 0, 1}, {kSize, kSize}));
+
+    // Expect the whole texture to be initialized
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(sampleTexture.Get(), 0, 1, 0, 2));
+}
+
+// This is a regression test for crbug.com/dawn/451 where the lazy texture
+// init path on D3D12 had a divide-by-zero exception in the copy split logic.
+TEST_P(TextureZeroInitTest, CopyTextureToBufferNonRenderableUnaligned) {
+    // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support reading
+    // from Snorm textures.
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_snorm_read"));
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size.width = kUnalignedSize;
+    descriptor.size.height = kUnalignedSize;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.format = wgpu::TextureFormat::R8Snorm;
+    descriptor.usage = wgpu::TextureUsage::CopySrc;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    {
+        uint32_t bytesPerRow = Align(kUnalignedSize, kTextureBytesPerRowAlignment);
+
+        // Create and initialize the destination buffer to ensure we only count the times of
+        // texture lazy initialization in this test.
+        const uint64_t bufferSize = kUnalignedSize * bytesPerRow;
+        const std::vector<uint8_t> initialBufferData(bufferSize, 0u);
+        wgpu::Buffer buffer = utils::CreateBufferFromData(device, initialBufferData.data(),
+                                                          bufferSize, wgpu::BufferUsage::CopyDst);
+
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(buffer, 0, bytesPerRow);
+        wgpu::Extent3D copySize = {kUnalignedSize, kUnalignedSize, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+    }
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+}
+
+// In this test WriteTexture fully overwrites a texture
+TEST_P(TextureZeroInitTest, WriteWholeTexture) {
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        1, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kSize, kSize, 1};
+
+    wgpu::TextureDataLayout textureDataLayout;
+    textureDataLayout.offset = 0;
+    textureDataLayout.bytesPerRow = kSize * kFormatBlockByteSize;
+    textureDataLayout.rowsPerImage = kSize;
+
+    std::vector<RGBA8> data(
+        utils::RequiredBytesInCopy(textureDataLayout.bytesPerRow, textureDataLayout.rowsPerImage,
+                                   copySize, kColorFormat) /
+            sizeof(RGBA8),
+        {100, 100, 100, 100});
+
+    // The write overwrites the whole texture so we don't need to do lazy initialization.
+    EXPECT_LAZY_CLEAR(
+        0u, queue.WriteTexture(&imageCopyTexture, data.data(), data.size() * sizeof(RGBA8),
+                               &textureDataLayout, &copySize));
+
+    // Expect texture initialized to be true
+    EXPECT_TRUE(dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+
+    EXPECT_TEXTURE_EQ(data.data(), texture, {0, 0}, {kSize, kSize});
+}
+
+// Test WriteTexture to a subset of the texture, lazy init is necessary to clear the other
+// half.
+TEST_P(TextureZeroInitTest, WriteTextureHalf) {
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(4, 1,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                    wgpu::TextureUsage::CopySrc,
+                                kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {kSize / 2, kSize, 1};
+
+    wgpu::TextureDataLayout textureDataLayout;
+    textureDataLayout.offset = 0;
+    textureDataLayout.bytesPerRow = kSize * kFormatBlockByteSize / 2;
+    textureDataLayout.rowsPerImage = kSize;
+
+    std::vector<RGBA8> data(
+        utils::RequiredBytesInCopy(textureDataLayout.bytesPerRow, textureDataLayout.rowsPerImage,
+                                   copySize, kColorFormat) /
+            sizeof(RGBA8),
+        {100, 100, 100, 100});
+
+    EXPECT_LAZY_CLEAR(
+        1u, queue.WriteTexture(&imageCopyTexture, data.data(), data.size() * sizeof(RGBA8),
+                               &textureDataLayout, &copySize));
+
+    // Expect texture initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, 0, 1));
+
+    std::vector<RGBA8> expectedZeros((kSize / 2) * kSize, {0, 0, 0, 0});
+    // first half filled with 100, by the data
+    EXPECT_TEXTURE_EQ(data.data(), texture, {0, 0}, {kSize / 2, kSize});
+    // second half should be cleared
+    EXPECT_TEXTURE_EQ(expectedZeros.data(), texture, {kSize / 2, 0}, {kSize / 2, kSize});
+}
+
+// In this test WriteTexture fully overwrites a range of subresources, so lazy initialization
+// is needed for neither the subresources involved in the write nor the other subresources.
+TEST_P(TextureZeroInitTest, WriteWholeTextureArray) {
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        1, 6, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    constexpr uint32_t kBaseArrayLayer = 2u;
+    constexpr uint32_t kCopyLayerCount = 3u;
+
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, 0, {0, 0, kBaseArrayLayer});
+    wgpu::Extent3D copySize = {kSize, kSize, kCopyLayerCount};
+
+    wgpu::TextureDataLayout textureDataLayout;
+    textureDataLayout.offset = 0;
+    textureDataLayout.bytesPerRow = kSize * kFormatBlockByteSize;
+    textureDataLayout.rowsPerImage = kSize;
+
+    std::vector<RGBA8> data(
+        utils::RequiredBytesInCopy(textureDataLayout.bytesPerRow, textureDataLayout.rowsPerImage,
+                                   copySize, kColorFormat) /
+            sizeof(RGBA8),
+        {100, 100, 100, 100});
+
+    // The write overwrites the whole subresources so we don't need to do lazy initialization on
+    // them.
+    EXPECT_LAZY_CLEAR(
+        0u, queue.WriteTexture(&imageCopyTexture, data.data(), data.size() * sizeof(RGBA8),
+                               &textureDataLayout, &copySize));
+
+    // Expect texture subresource initialized to be true
+    EXPECT_TRUE(dawn::native::IsTextureSubresourceInitialized(texture.Get(), 0, 1, kBaseArrayLayer,
+                                                              kCopyLayerCount));
+
+    for (uint32_t layer = kBaseArrayLayer; layer < kBaseArrayLayer + kCopyLayerCount; ++layer) {
+        EXPECT_TEXTURE_EQ(data.data(), texture, {0, 0, layer}, {kSize, kSize});
+    }
+}
+
+// Test WriteTexture to a subset of the subresource, lazy init is necessary to clear the other
+// half.
+TEST_P(TextureZeroInitTest, WriteTextureArrayHalf) {
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(4, 6,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                    wgpu::TextureUsage::CopySrc,
+                                kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    constexpr uint32_t kBaseArrayLayer = 2u;
+    constexpr uint32_t kCopyLayerCount = 3u;
+
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, 0, {0, 0, kBaseArrayLayer});
+    wgpu::Extent3D copySize = {kSize / 2, kSize, kCopyLayerCount};
+
+    wgpu::TextureDataLayout textureDataLayout;
+    textureDataLayout.offset = 0;
+    textureDataLayout.bytesPerRow = kSize * kFormatBlockByteSize / 2;
+    textureDataLayout.rowsPerImage = kSize;
+
+    std::vector<RGBA8> data(
+        utils::RequiredBytesInCopy(textureDataLayout.bytesPerRow, textureDataLayout.rowsPerImage,
+                                   copySize, kColorFormat) /
+            sizeof(RGBA8),
+        {100, 100, 100, 100});
+
+    EXPECT_LAZY_CLEAR(
+        1u, queue.WriteTexture(&imageCopyTexture, data.data(), data.size() * sizeof(RGBA8),
+                               &textureDataLayout, &copySize));
+
+    // Expect texture subresource initialized to be true
+    EXPECT_EQ(true, dawn::native::IsTextureSubresourceInitialized(
+                        texture.Get(), 0, 1, kBaseArrayLayer, kCopyLayerCount));
+
+    std::vector<RGBA8> expectedZeros((kSize / 2) * kSize, {0, 0, 0, 0});
+    for (uint32_t layer = kBaseArrayLayer; layer < kBaseArrayLayer + kCopyLayerCount; ++layer) {
+        // first half filled with 100, by the data
+        EXPECT_TEXTURE_EQ(data.data(), texture, {0, 0, layer}, {kSize / 2, kSize});
+        // second half should be cleared
+        EXPECT_TEXTURE_EQ(expectedZeros.data(), texture, {kSize / 2, 0, layer}, {kSize / 2, kSize});
+    }
+}
+
+// In this test WriteTexture fully overwrites a texture at mip level.
+TEST_P(TextureZeroInitTest, WriteWholeTextureAtMipLevel) {
+    wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
+        4, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    constexpr uint32_t kMipLevel = 2;
+    constexpr uint32_t kMipSize = kSize >> kMipLevel;
+
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, kMipLevel, {0, 0, 0});
+    wgpu::Extent3D copySize = {kMipSize, kMipSize, 1};
+
+    wgpu::TextureDataLayout textureDataLayout;
+    textureDataLayout.offset = 0;
+    textureDataLayout.bytesPerRow = kMipSize * kFormatBlockByteSize;
+    textureDataLayout.rowsPerImage = kMipSize;
+
+    std::vector<RGBA8> data(
+        utils::RequiredBytesInCopy(textureDataLayout.bytesPerRow, textureDataLayout.rowsPerImage,
+                                   copySize, kColorFormat) /
+            sizeof(RGBA8),
+        {100, 100, 100, 100});
+
+    // The write overwrites the whole texture so we don't need to do lazy initialization.
+    EXPECT_LAZY_CLEAR(
+        0u, queue.WriteTexture(&imageCopyTexture, data.data(), data.size() * sizeof(RGBA8),
+                               &textureDataLayout, &copySize));
+
+    // Expect texture initialized to be true
+    EXPECT_TRUE(dawn::native::IsTextureSubresourceInitialized(texture.Get(), kMipLevel, 1, 0, 1));
+
+    EXPECT_TEXTURE_EQ(data.data(), texture, {0, 0}, {kMipSize, kMipSize}, kMipLevel);
+}
+
+// Test WriteTexture to a subset of the texture at mip level, lazy init is necessary to clear the
+// other half.
+TEST_P(TextureZeroInitTest, WriteTextureHalfAtMipLevel) {
+    wgpu::TextureDescriptor descriptor =
+        CreateTextureDescriptor(4, 1,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                                    wgpu::TextureUsage::CopySrc,
+                                kColorFormat);
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    constexpr uint32_t kMipLevel = 2;
+    constexpr uint32_t kMipSize = kSize >> kMipLevel;
+
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, kMipLevel, {0, 0, 0});
+    wgpu::Extent3D copySize = {kMipSize / 2, kMipSize, 1};
+
+    wgpu::TextureDataLayout textureDataLayout;
+    textureDataLayout.offset = 0;
+    textureDataLayout.bytesPerRow = kMipSize * kFormatBlockByteSize / 2;
+    textureDataLayout.rowsPerImage = kMipSize;
+
+    std::vector<RGBA8> data(
+        utils::RequiredBytesInCopy(textureDataLayout.bytesPerRow, textureDataLayout.rowsPerImage,
+                                   copySize, kColorFormat) /
+            sizeof(RGBA8),
+        {100, 100, 100, 100});
+
+    EXPECT_LAZY_CLEAR(
+        1u, queue.WriteTexture(&imageCopyTexture, data.data(), data.size() * sizeof(RGBA8),
+                               &textureDataLayout, &copySize));
+
+    // Expect texture initialized to be true
+    EXPECT_EQ(true,
+              dawn::native::IsTextureSubresourceInitialized(texture.Get(), kMipLevel, 1, 0, 1));
+
+    std::vector<RGBA8> expectedZeros((kMipSize / 2) * kMipSize, {0, 0, 0, 0});
+    // first half filled with 100, by the data
+    EXPECT_TEXTURE_EQ(data.data(), texture, {0, 0}, {kMipSize / 2, kMipSize}, kMipLevel);
+    // second half should be cleared
+    EXPECT_TEXTURE_EQ(expectedZeros.data(), texture, {kMipSize / 2, 0}, {kMipSize / 2, kMipSize},
+                      kMipLevel);
+}
+
+DAWN_INSTANTIATE_TEST(TextureZeroInitTest,
+                      D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"},
+                                   {"use_d3d12_render_pass"}),
+                      OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      MetalBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"}));
+
+class CompressedTextureZeroInitTest : public TextureZeroInitTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
+        if (!mIsBCFormatSupported) {
+            return {};
+        }
+
+        return {wgpu::FeatureName::TextureCompressionBC};
+    }
+
+    bool IsBCFormatSupported() const {
+        return mIsBCFormatSupported;
+    }
+
+    // Copy the compressed texture data into the destination texture.
+    void InitializeDataInCompressedTextureAndExpectLazyClear(
+        wgpu::Texture bcCompressedTexture,
+        wgpu::TextureDescriptor textureDescriptor,
+        wgpu::Extent3D copyExtent3D,
+        uint32_t viewMipmapLevel,
+        uint32_t baseArrayLayer,
+        size_t lazyClearCount) {
+        uint32_t copyWidthInBlock = copyExtent3D.width / kFormatBlockByteSize;
+        uint32_t copyHeightInBlock = copyExtent3D.height / kFormatBlockByteSize;
+        uint32_t copyBytesPerRow =
+            Align(copyWidthInBlock * utils::GetTexelBlockSizeInBytes(textureDescriptor.format),
+                  kTextureBytesPerRowAlignment);
+
+        std::vector<uint8_t> data(
+            utils::RequiredBytesInCopy(copyBytesPerRow, copyHeightInBlock, copyExtent3D,
+                                       textureDescriptor.format),
+            1);
+
+        // Copy texture data from a staging buffer to the destination texture.
+        wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(device, data.data(), data.size(),
+                                                                 wgpu::BufferUsage::CopySrc);
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(stagingBuffer, 0, copyBytesPerRow, copyHeightInBlock);
+
+        wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(
+            bcCompressedTexture, viewMipmapLevel, {0, 0, baseArrayLayer});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copyExtent3D);
+        wgpu::CommandBuffer copy = encoder.Finish();
+        EXPECT_LAZY_CLEAR(lazyClearCount, queue.Submit(1, &copy));
+    }
+
+    // Run the tests that copies pre-prepared BC format data into a BC texture and verifies if we
+    // can render correctly with the pixel values sampled from the BC texture.
+    // Expect that the texture subresource is initialized
+    void TestCopyRegionIntoBCFormatTexturesAndCheckSubresourceIsInitialized(
+        wgpu::TextureDescriptor textureDescriptor,
+        wgpu::Extent3D copyExtent3D,
+        wgpu::Extent3D nonPaddedCopyExtent,
+        uint32_t viewMipmapLevel,
+        uint32_t baseArrayLayer,
+        size_t lazyClearCount,
+        bool halfCopyTest = false) {
+        wgpu::Texture bcTexture = device.CreateTexture(&textureDescriptor);
+        InitializeDataInCompressedTextureAndExpectLazyClear(bcTexture, textureDescriptor,
+                                                            copyExtent3D, viewMipmapLevel,
+                                                            baseArrayLayer, lazyClearCount);
+
+        SampleCompressedTextureAndVerifyColor(bcTexture, textureDescriptor, copyExtent3D,
+                                              nonPaddedCopyExtent, viewMipmapLevel, baseArrayLayer,
+                                              halfCopyTest);
+    }
+
+    void SampleCompressedTextureAndVerifyColor(wgpu::Texture bcTexture,
+                                               wgpu::TextureDescriptor textureDescriptor,
+                                               wgpu::Extent3D copyExtent3D,
+                                               wgpu::Extent3D nonPaddedCopyExtent,
+                                               uint32_t viewMipmapLevel,
+                                               uint32_t baseArrayLayer,
+                                               bool halfCopyTest = false) {
+        // Sample the compressed texture and verify the texture colors in the render target
+        utils::BasicRenderPass renderPass =
+            utils::CreateBasicRenderPass(device, textureDescriptor.size.width >> viewMipmapLevel,
+                                         textureDescriptor.size.height >> viewMipmapLevel);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+            renderPipelineDescriptor.cTargets[0].format = kColorFormat;
+            renderPipelineDescriptor.vertex.module = CreateBasicVertexShaderForTest();
+            renderPipelineDescriptor.cFragment.module = CreateSampledTextureFragmentShaderForTest();
+            wgpu::RenderPipeline renderPipeline =
+                device.CreateRenderPipeline(&renderPipelineDescriptor);
+            pass.SetPipeline(renderPipeline);
+
+            wgpu::TextureViewDescriptor textureViewDescriptor = CreateTextureViewDescriptor(
+                viewMipmapLevel, baseArrayLayer, textureDescriptor.format);
+            wgpu::BindGroup bindGroup =
+                utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                     {{0, bcTexture.CreateView(&textureViewDescriptor)}});
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        std::vector<RGBA8> expected(nonPaddedCopyExtent.width * nonPaddedCopyExtent.height,
+                                    {0x00, 0x20, 0x08, 0xFF});
+        EXPECT_TEXTURE_EQ(expected.data(), renderPass.color, {0, 0},
+                          {nonPaddedCopyExtent.width, nonPaddedCopyExtent.height});
+        EXPECT_TRUE(dawn::native::IsTextureSubresourceInitialized(bcTexture.Get(), viewMipmapLevel,
+                                                                  1, baseArrayLayer, 1));
+
+        // If we only copied to half the texture, check the other half is initialized to black
+        if (halfCopyTest) {
+            std::vector<RGBA8> expectBlack(nonPaddedCopyExtent.width * nonPaddedCopyExtent.height,
+                                           {0x00, 0x00, 0x00, 0xFF});
+            EXPECT_TEXTURE_EQ(expectBlack.data(), renderPass.color, {copyExtent3D.width, 0},
+                              {nonPaddedCopyExtent.width, nonPaddedCopyExtent.height});
+        }
+    }
+
+    bool mIsBCFormatSupported = false;
+};
+
+//  Test that the clearing is skipped when we use a full mip copy (with the physical size different
+//  than the virtual mip size)
+TEST_P(CompressedTextureZeroInitTest, FullMipCopy) {
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                              wgpu::TextureUsage::TextureBinding;
+    textureDescriptor.size = {60, 60, 1};
+    textureDescriptor.mipLevelCount = 1;
+    textureDescriptor.format = utils::kBCFormats[0];
+
+    TestCopyRegionIntoBCFormatTexturesAndCheckSubresourceIsInitialized(
+        textureDescriptor, textureDescriptor.size, textureDescriptor.size, 0, 0, 0u);
+}
+
+// Test that 1 lazy clear count happens when we copy to half the texture
+TEST_P(CompressedTextureZeroInitTest, HalfCopyBufferToTexture) {
+    // TODO(crbug.com/dawn/643): diagnose and fix this failure on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                              wgpu::TextureUsage::TextureBinding;
+    constexpr static uint32_t kSize = 16;
+    textureDescriptor.size = {kSize, kSize, 1};
+    textureDescriptor.mipLevelCount = 1;
+    textureDescriptor.format = utils::kBCFormats[0];
+
+    wgpu::Extent3D copyExtent3D = {kSize / 2, kSize, 1};
+
+    TestCopyRegionIntoBCFormatTexturesAndCheckSubresourceIsInitialized(
+        textureDescriptor, copyExtent3D, copyExtent3D, 0, 0, 1u, true);
+}
+
+// Test that 0 lazy clear count happens when we copy buffer to texture to a nonzero mip level
+// (with physical size different from the virtual mip size)
+TEST_P(CompressedTextureZeroInitTest, FullCopyToNonZeroMipLevel) {
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                              wgpu::TextureUsage::TextureBinding;
+    constexpr static uint32_t kSize = 60;
+    textureDescriptor.size = {kSize, kSize, 1};
+    textureDescriptor.mipLevelCount = 3;
+    textureDescriptor.format = utils::kBCFormats[0];
+    const uint32_t kViewMipLevel = 2;
+    const uint32_t kActualSizeAtLevel = kSize >> kViewMipLevel;
+
+    const uint32_t kCopySizeAtLevel = Align(kActualSizeAtLevel, kFormatBlockByteSize);
+
+    wgpu::Extent3D copyExtent3D = {kCopySizeAtLevel, kCopySizeAtLevel, 1};
+
+    TestCopyRegionIntoBCFormatTexturesAndCheckSubresourceIsInitialized(
+        textureDescriptor, copyExtent3D, {kActualSizeAtLevel, kActualSizeAtLevel, 1}, kViewMipLevel,
+        0, 0u);
+}
+
+// Test that 1 lazy clear count happens when we copy buffer to half texture to a nonzero mip level
+// (with physical size different from the virtual mip size)
+TEST_P(CompressedTextureZeroInitTest, HalfCopyToNonZeroMipLevel) {
+    // TODO(crbug.com/dawn/643): diagnose and fix this failure on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                              wgpu::TextureUsage::TextureBinding;
+    constexpr static uint32_t kSize = 60;
+    textureDescriptor.size = {kSize, kSize, 1};
+    textureDescriptor.mipLevelCount = 3;
+    textureDescriptor.format = utils::kBCFormats[0];
+    const uint32_t kViewMipLevel = 2;
+    const uint32_t kActualSizeAtLevel = kSize >> kViewMipLevel;
+
+    const uint32_t kCopySizeAtLevel = Align(kActualSizeAtLevel, kFormatBlockByteSize);
+
+    wgpu::Extent3D copyExtent3D = {kCopySizeAtLevel / 2, kCopySizeAtLevel, 1};
+
+    TestCopyRegionIntoBCFormatTexturesAndCheckSubresourceIsInitialized(
+        textureDescriptor, copyExtent3D, {kActualSizeAtLevel / 2, kActualSizeAtLevel, 1},
+        kViewMipLevel, 0, 1u, true);
+}
+
+// Test that 0 lazy clear count happens when we copy buffer to nonzero array layer
+TEST_P(CompressedTextureZeroInitTest, FullCopyToNonZeroArrayLayer) {
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                              wgpu::TextureUsage::TextureBinding;
+    constexpr static uint32_t kSize = 16;
+    constexpr static uint32_t kArrayLayers = 4;
+    textureDescriptor.size = {kSize, kSize, kArrayLayers};
+    textureDescriptor.mipLevelCount = 1;
+    textureDescriptor.format = utils::kBCFormats[0];
+
+    wgpu::Extent3D copyExtent3D = {kSize, kSize, 1};
+
+    TestCopyRegionIntoBCFormatTexturesAndCheckSubresourceIsInitialized(
+        textureDescriptor, copyExtent3D, copyExtent3D, 0, kArrayLayers - 2, 0u);
+}
+
+// Test that 1 lazy clear count happens when we copy buffer to half texture to a nonzero array layer
+TEST_P(CompressedTextureZeroInitTest, HalfCopyToNonZeroArrayLayer) {
+    // TODO(crbug.com/dawn/643): diagnose and fix this failure on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                              wgpu::TextureUsage::TextureBinding;
+    constexpr static uint32_t kSize = 16;
+    constexpr static uint32_t kArrayLayers = 4;
+    textureDescriptor.size = {kSize, kSize, kArrayLayers};
+    textureDescriptor.mipLevelCount = 3;
+    textureDescriptor.format = utils::kBCFormats[0];
+
+    wgpu::Extent3D copyExtent3D = {kSize / 2, kSize, 1};
+
+    TestCopyRegionIntoBCFormatTexturesAndCheckSubresourceIsInitialized(
+        textureDescriptor, copyExtent3D, copyExtent3D, 0, kArrayLayers - 2, 1u, true);
+}
+
+// full copy texture to texture, 0 lazy clears are needed
+TEST_P(CompressedTextureZeroInitTest, FullCopyTextureToTextureMipLevel) {
+    // TODO(crbug.com/dawn/593): This test uses glTextureView() which is not supported on OpenGL ES.
+    DAWN_TEST_UNSUPPORTED_IF(IsOpenGLES());
+
+    // create srcTexture and fill it with data
+    wgpu::TextureDescriptor srcDescriptor =
+        CreateTextureDescriptor(3, 1,
+                                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+                                    wgpu::TextureUsage::CopyDst,
+                                utils::kBCFormats[0]);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    const uint32_t kViewMipLevel = 2;
+    const uint32_t kActualSizeAtLevel = kSize >> kViewMipLevel;
+
+    const uint32_t kCopySizeAtLevel = Align(kActualSizeAtLevel, kFormatBlockByteSize);
+
+    wgpu::Extent3D copyExtent3D = {kCopySizeAtLevel, kCopySizeAtLevel, 1};
+
+    // fill srcTexture with data
+    InitializeDataInCompressedTextureAndExpectLazyClear(srcTexture, srcDescriptor, copyExtent3D,
+                                                        kViewMipLevel, 0, 0u);
+
+    wgpu::ImageCopyTexture srcImageCopyTexture =
+        utils::CreateImageCopyTexture(srcTexture, kViewMipLevel, {0, 0, 0});
+
+    // create dstTexture that we will copy to
+    wgpu::TextureDescriptor dstDescriptor =
+        CreateTextureDescriptor(3, 1,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc |
+                                    wgpu::TextureUsage::TextureBinding,
+                                utils::kBCFormats[0]);
+    wgpu::Texture dstTexture = device.CreateTexture(&dstDescriptor);
+
+    wgpu::ImageCopyTexture dstImageCopyTexture =
+        utils::CreateImageCopyTexture(dstTexture, kViewMipLevel, {0, 0, 0});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &copyExtent3D);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    // the dstTexture does not need to be lazy cleared since it's fully copied to
+    EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commands));
+
+    SampleCompressedTextureAndVerifyColor(dstTexture, dstDescriptor, copyExtent3D,
+                                          {kActualSizeAtLevel, kActualSizeAtLevel, 1},
+                                          kViewMipLevel, 0);
+}
+
+// half copy texture to texture, lazy clears are needed for noncopied half
+TEST_P(CompressedTextureZeroInitTest, HalfCopyTextureToTextureMipLevel) {
+    // TODO(crbug.com/dawn/643): diagnose and fix this failure on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    // create srcTexture with data
+    wgpu::TextureDescriptor srcDescriptor =
+        CreateTextureDescriptor(3, 1,
+                                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+                                    wgpu::TextureUsage::CopyDst,
+                                utils::kBCFormats[0]);
+    wgpu::Texture srcTexture = device.CreateTexture(&srcDescriptor);
+
+    const uint32_t kViewMipLevel = 2;
+    const uint32_t kActualSizeAtLevel = kSize >> kViewMipLevel;
+
+    const uint32_t kCopySizeAtLevel = Align(kActualSizeAtLevel, kFormatBlockByteSize);
+
+    wgpu::Extent3D copyExtent3D = {kCopySizeAtLevel / 2, kCopySizeAtLevel, 1};
+
+    // fill srcTexture with data
+    InitializeDataInCompressedTextureAndExpectLazyClear(srcTexture, srcDescriptor, copyExtent3D,
+                                                        kViewMipLevel, 0, 1u);
+
+    wgpu::ImageCopyTexture srcImageCopyTexture =
+        utils::CreateImageCopyTexture(srcTexture, kViewMipLevel, {0, 0, 0});
+
+    // create dstTexture that we will copy to
+    wgpu::TextureDescriptor dstDescriptor =
+        CreateTextureDescriptor(3, 1,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc |
+                                    wgpu::TextureUsage::TextureBinding,
+                                utils::kBCFormats[0]);
+    wgpu::Texture dstTexture = device.CreateTexture(&dstDescriptor);
+
+    wgpu::ImageCopyTexture dstImageCopyTexture =
+        utils::CreateImageCopyTexture(dstTexture, kViewMipLevel, {0, 0, 0});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &copyExtent3D);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    // expect 1 lazy clear count since the dstTexture needs to be lazy cleared when we only copy to
+    // half texture
+    EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commands));
+
+    SampleCompressedTextureAndVerifyColor(dstTexture, dstDescriptor, copyExtent3D,
+                                          {kActualSizeAtLevel / 2, kActualSizeAtLevel, 1},
+                                          kViewMipLevel, 0, true);
+}
+
+// Test uploading then reading back from a 2D array compressed texture.
+// This is a regression test for a bug where the final destination buffer
+// was considered fully initialized even though there was a 256-byte
+// stride between images.
+TEST_P(CompressedTextureZeroInitTest, Copy2DArrayCompressedB2T2B) {
+    // TODO(crbug.com/dawn/643): diagnose and fix this failure on OpenGL.
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+    // create srcTexture with data
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor(
+        4, 5, wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst, utils::kBCFormats[0]);
+    textureDescriptor.size = {8, 8, 5};
+    wgpu::Texture srcTexture = device.CreateTexture(&textureDescriptor);
+
+    uint32_t mipLevel = 2;
+    wgpu::Extent3D copyExtent3D = {4, 4, 5};
+
+    uint32_t copyWidthInBlock = copyExtent3D.width / kFormatBlockByteSize;
+    uint32_t copyHeightInBlock = copyExtent3D.height / kFormatBlockByteSize;
+    uint32_t copyRowsPerImage = copyHeightInBlock;
+    uint32_t copyBytesPerRow =
+        Align(copyWidthInBlock * utils::GetTexelBlockSizeInBytes(textureDescriptor.format),
+              kTextureBytesPerRowAlignment);
+
+    // Generate data to upload
+    std::vector<uint8_t> data(utils::RequiredBytesInCopy(copyBytesPerRow, copyRowsPerImage,
+                                                         copyExtent3D, textureDescriptor.format));
+    for (size_t i = 0; i < data.size(); ++i) {
+        data[i] = i % 255;
+    }
+
+    // Copy texture data from a staging buffer to the destination texture.
+    wgpu::Buffer stagingBuffer =
+        utils::CreateBufferFromData(device, data.data(), data.size(), wgpu::BufferUsage::CopySrc);
+    wgpu::ImageCopyBuffer imageCopyBufferSrc =
+        utils::CreateImageCopyBuffer(stagingBuffer, 0, copyBytesPerRow, copyRowsPerImage);
+
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(srcTexture, mipLevel, {0, 0, 0});
+
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBufferSrc, &imageCopyTexture, &copyExtent3D);
+        wgpu::CommandBuffer copy = encoder.Finish();
+        EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &copy));
+    }
+
+    // Create a buffer to read back the data. It is the same size as the upload buffer.
+    wgpu::BufferDescriptor readbackDesc = {};
+    readbackDesc.size = data.size();
+    readbackDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer readbackBuffer = device.CreateBuffer(&readbackDesc);
+
+    // Copy the texture to the readback buffer.
+    wgpu::ImageCopyBuffer imageCopyBufferDst =
+        utils::CreateImageCopyBuffer(readbackBuffer, 0, copyBytesPerRow, copyRowsPerImage);
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBufferDst, &copyExtent3D);
+        wgpu::CommandBuffer copy = encoder.Finish();
+
+        // Expect a lazy clear because the padding in the copy is not touched.
+        EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &copy));
+    }
+
+    // Generate expected data. It is the same as the upload data, but padding is zero.
+    std::vector<uint8_t> expected(data.size(), 0);
+    for (uint32_t z = 0; z < copyExtent3D.depthOrArrayLayers; ++z) {
+        for (uint32_t y = 0; y < copyHeightInBlock; ++y) {
+            memcpy(&expected[copyBytesPerRow * y + copyBytesPerRow * copyRowsPerImage * z],
+                   &data[copyBytesPerRow * y + copyBytesPerRow * copyRowsPerImage * z],
+                   copyWidthInBlock * utils::GetTexelBlockSizeInBytes(textureDescriptor.format));
+        }
+    }
+    // Check final contents
+    EXPECT_BUFFER_U8_RANGE_EQ(expected.data(), readbackBuffer, 0, expected.size());
+}
+
+DAWN_INSTANTIATE_TEST(CompressedTextureZeroInitTest,
+                      D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      MetalBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"}),
+                      VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"}));
diff --git a/src/dawn/tests/end2end/VertexFormatTests.cpp b/src/dawn/tests/end2end/VertexFormatTests.cpp
new file mode 100644
index 0000000..07c3763
--- /dev/null
+++ b/src/dawn/tests/end2end/VertexFormatTests.cpp
@@ -0,0 +1,831 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+// Vertex format tests all work the same way: the test will render a triangle.
+// Each test will set up a vertex buffer, and the vertex shader will check that
+// the vertex content is the same as what we expected. On success it outputs green,
+// otherwise red.
+
+constexpr uint32_t kRTSize = 1;
+constexpr uint32_t kVertexNum = 3;
+
+std::vector<uint16_t> Float32ToFloat16(std::vector<float> data) {
+    std::vector<uint16_t> expectedData;
+    for (auto& element : data) {
+        expectedData.push_back(Float32ToFloat16(element));
+    }
+    return expectedData;
+}
+
+template <typename destType, typename srcType>
+std::vector<destType> BitCast(std::vector<srcType> data) {
+    std::vector<destType> expectedData;
+    for (auto& element : data) {
+        expectedData.push_back(BitCast(element));
+    }
+    return expectedData;
+}
+
+class VertexFormatTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        // TODO(crbug.com/dawn/259): Failing because of a SPIRV-Cross issue.
+        DAWN_SUPPRESS_TEST_IF(IsMetal() && IsIntel());
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    }
+
+    utils::BasicRenderPass renderPass;
+
+    bool IsNormalizedFormat(wgpu::VertexFormat format) {
+        switch (format) {
+            case wgpu::VertexFormat::Unorm8x2:
+            case wgpu::VertexFormat::Unorm8x4:
+            case wgpu::VertexFormat::Snorm8x2:
+            case wgpu::VertexFormat::Snorm8x4:
+            case wgpu::VertexFormat::Unorm16x2:
+            case wgpu::VertexFormat::Unorm16x4:
+            case wgpu::VertexFormat::Snorm16x2:
+            case wgpu::VertexFormat::Snorm16x4:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    bool IsUnsignedFormat(wgpu::VertexFormat format) {
+        switch (format) {
+            case wgpu::VertexFormat::Uint32:
+            case wgpu::VertexFormat::Uint8x2:
+            case wgpu::VertexFormat::Uint8x4:
+            case wgpu::VertexFormat::Uint16x2:
+            case wgpu::VertexFormat::Uint16x4:
+            case wgpu::VertexFormat::Uint32x2:
+            case wgpu::VertexFormat::Uint32x3:
+            case wgpu::VertexFormat::Uint32x4:
+            case wgpu::VertexFormat::Unorm8x2:
+            case wgpu::VertexFormat::Unorm8x4:
+            case wgpu::VertexFormat::Unorm16x2:
+            case wgpu::VertexFormat::Unorm16x4:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    bool IsFloatFormat(wgpu::VertexFormat format) {
+        switch (format) {
+            case wgpu::VertexFormat::Float16x2:
+            case wgpu::VertexFormat::Float16x4:
+            case wgpu::VertexFormat::Float32:
+            case wgpu::VertexFormat::Float32x2:
+            case wgpu::VertexFormat::Float32x3:
+            case wgpu::VertexFormat::Float32x4:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    bool IsHalfFormat(wgpu::VertexFormat format) {
+        switch (format) {
+            case wgpu::VertexFormat::Float16x2:
+            case wgpu::VertexFormat::Float16x4:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    uint32_t BytesPerComponents(wgpu::VertexFormat format) {
+        switch (format) {
+            case wgpu::VertexFormat::Uint8x2:
+            case wgpu::VertexFormat::Uint8x4:
+            case wgpu::VertexFormat::Sint8x2:
+            case wgpu::VertexFormat::Sint8x4:
+            case wgpu::VertexFormat::Unorm8x2:
+            case wgpu::VertexFormat::Unorm8x4:
+            case wgpu::VertexFormat::Snorm8x2:
+            case wgpu::VertexFormat::Snorm8x4:
+                return 1;
+            case wgpu::VertexFormat::Uint16x2:
+            case wgpu::VertexFormat::Uint16x4:
+            case wgpu::VertexFormat::Unorm16x2:
+            case wgpu::VertexFormat::Unorm16x4:
+            case wgpu::VertexFormat::Sint16x2:
+            case wgpu::VertexFormat::Sint16x4:
+            case wgpu::VertexFormat::Snorm16x2:
+            case wgpu::VertexFormat::Snorm16x4:
+            case wgpu::VertexFormat::Float16x2:
+            case wgpu::VertexFormat::Float16x4:
+                return 2;
+            case wgpu::VertexFormat::Float32:
+            case wgpu::VertexFormat::Float32x2:
+            case wgpu::VertexFormat::Float32x3:
+            case wgpu::VertexFormat::Float32x4:
+            case wgpu::VertexFormat::Uint32:
+            case wgpu::VertexFormat::Uint32x2:
+            case wgpu::VertexFormat::Uint32x3:
+            case wgpu::VertexFormat::Uint32x4:
+            case wgpu::VertexFormat::Sint32:
+            case wgpu::VertexFormat::Sint32x2:
+            case wgpu::VertexFormat::Sint32x3:
+            case wgpu::VertexFormat::Sint32x4:
+                return 4;
+            default:
+                DAWN_UNREACHABLE();
+        }
+    }
+
+    uint32_t ComponentCount(wgpu::VertexFormat format) {
+        switch (format) {
+            case wgpu::VertexFormat::Float32:
+            case wgpu::VertexFormat::Uint32:
+            case wgpu::VertexFormat::Sint32:
+                return 1;
+            case wgpu::VertexFormat::Uint8x2:
+            case wgpu::VertexFormat::Sint8x2:
+            case wgpu::VertexFormat::Unorm8x2:
+            case wgpu::VertexFormat::Snorm8x2:
+            case wgpu::VertexFormat::Uint16x2:
+            case wgpu::VertexFormat::Sint16x2:
+            case wgpu::VertexFormat::Unorm16x2:
+            case wgpu::VertexFormat::Snorm16x2:
+            case wgpu::VertexFormat::Float16x2:
+            case wgpu::VertexFormat::Float32x2:
+            case wgpu::VertexFormat::Uint32x2:
+            case wgpu::VertexFormat::Sint32x2:
+                return 2;
+            case wgpu::VertexFormat::Float32x3:
+            case wgpu::VertexFormat::Uint32x3:
+            case wgpu::VertexFormat::Sint32x3:
+                return 3;
+            case wgpu::VertexFormat::Uint8x4:
+            case wgpu::VertexFormat::Sint8x4:
+            case wgpu::VertexFormat::Unorm8x4:
+            case wgpu::VertexFormat::Snorm8x4:
+            case wgpu::VertexFormat::Uint16x4:
+            case wgpu::VertexFormat::Sint16x4:
+            case wgpu::VertexFormat::Unorm16x4:
+            case wgpu::VertexFormat::Snorm16x4:
+            case wgpu::VertexFormat::Float16x4:
+            case wgpu::VertexFormat::Float32x4:
+            case wgpu::VertexFormat::Uint32x4:
+            case wgpu::VertexFormat::Sint32x4:
+                return 4;
+            default:
+                DAWN_UNREACHABLE();
+        }
+    }
+
+    std::string ShaderTypeGenerator(bool isFloat,
+                                    bool isNormalized,
+                                    bool isUnsigned,
+                                    uint32_t componentCount) {
+        std::string base;
+        if (isFloat || isNormalized) {
+            base = "f32";
+        } else if (isUnsigned) {
+            base = "u32";
+        } else {
+            base = "i32";
+        }
+
+        if (componentCount == 1) {
+            return base;
+        }
+
+        return "vec" + std::to_string(componentCount) + "<" + base + ">";
+    }
+
+    // The length of vertexData is fixed to 3, it aligns to triangle vertex number
+    template <typename T>
+    wgpu::RenderPipeline MakeTestPipeline(wgpu::VertexFormat format, std::vector<T>& expectedData) {
+        bool isFloat = IsFloatFormat(format);
+        bool isNormalized = IsNormalizedFormat(format);
+        bool isUnsigned = IsUnsignedFormat(format);
+        bool isInputTypeFloat = isFloat || isNormalized;
+        bool isHalf = IsHalfFormat(format);
+        const uint16_t kNegativeZeroInHalf = 0x8000;
+
+        uint32_t componentCount = ComponentCount(format);
+
+        std::string variableType =
+            ShaderTypeGenerator(isFloat, isNormalized, isUnsigned, componentCount);
+        std::string expectedDataType = ShaderTypeGenerator(isFloat, isNormalized, isUnsigned, 1);
+
+        std::ostringstream vs;
+        vs << "struct VertexIn {\n";
+        vs << "    @location(0) test : " << variableType << ",\n";
+        vs << "    @builtin(vertex_index) VertexIndex : u32,\n";
+        vs << "}\n";
+
+        // Because x86 CPU using "extended
+        // precision"(https://en.wikipedia.org/wiki/Extended_precision) during float
+        // math(https://developer.nvidia.com/sites/default/files/akamai/cuda/files/NVIDIA-CUDA-Floating-Point.pdf),
+        // move normalization and Float16ToFloat32 into shader to generate
+        // expected value.
+        vs << R"(
+            fn Float16ToFloat32(fp16 : u32) -> f32 {
+                let magic : u32 = (254u - 15u) << 23u;
+                let was_inf_nan : u32 = (127u + 16u) << 23u;
+                var fp32u : u32 = (fp16 & 0x7FFFu) << 13u;
+                let fp32 : f32 = bitcast<f32>(fp32u) * bitcast<f32>(magic);
+                fp32u = bitcast<u32>(fp32);
+                if (fp32 >= bitcast<f32>(was_inf_nan)) {
+                    fp32u = fp32u | (255u << 23u);
+                }
+                fp32u = fp32u | ((fp16 & 0x8000u) << 16u);
+                return bitcast<f32>(fp32u);
+            }
+
+            // NaN defination in IEEE 754-1985 is :
+            //   - sign = either 0 or 1.
+            //   - biased exponent = all 1 bits.
+            //   - fraction = anything except all 0 bits (since all 0 bits represents infinity).
+            // https://en.wikipedia.org/wiki/IEEE_754-1985#Representation_of_non-numbers
+            fn isNaNCustom(val: f32) -> bool {
+               let floatToUint: u32 = bitcast<u32>(val);
+               return (floatToUint & 0x7fffffffu) > 0x7f800000u;
+            }
+
+            struct VertexOut {
+                @location(0) color : vec4<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex)
+            fn main(input : VertexIn) -> VertexOut {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 2.0,  0.0),
+                    vec2<f32>( 0.0,  2.0));
+                var output : VertexOut;
+                output.position = vec4<f32>(pos[input.VertexIndex], 0.0, 1.0);
+        )";
+
+        // Declare expected values.
+        vs << "var expected : array<array<" << expectedDataType << ", "
+           << std::to_string(componentCount) << ">, " << std::to_string(kVertexNum) << ">;";
+        // Assign each elements in expected values
+        // e.g. expected[0][0] = u32(1u);
+        //      expected[0][1] = u32(2u);
+        for (uint32_t i = 0; i < kVertexNum; ++i) {
+            for (uint32_t j = 0; j < componentCount; ++j) {
+                vs << "    expected[" + std::to_string(i) + "][" + std::to_string(j) + "] = "
+                   << expectedDataType << "(";
+                if (isInputTypeFloat &&
+                    std::isnan(static_cast<float>(expectedData[i * componentCount + j]))) {
+                    // Set NaN.
+                    vs << "0.0 / 0.0);\n";
+                } else if (isNormalized) {
+                    // Move normalize operation into shader because of CPU and GPU precision
+                    // different on float math.
+                    vs << "max(f32(" << std::to_string(expectedData[i * componentCount + j])
+                       << ") / " << std::to_string(std::numeric_limits<T>::max())
+                       << ".0 , -1.0));\n";
+                } else if (isHalf) {
+                    // Becasue Vulkan and D3D12 handle -0.0f through bitcast have different
+                    // result (Vulkan take -0.0f as -0.0 but D3D12 take -0.0f as 0), add workaround
+                    // for -0.0f.
+                    if (static_cast<uint16_t>(expectedData[i * componentCount + j]) ==
+                        kNegativeZeroInHalf) {
+                        vs << "-0.0);\n";
+                    } else {
+                        vs << "Float16ToFloat32(u32("
+                           << std::to_string(expectedData[i * componentCount + j]) << ")));\n";
+                    }
+                } else if (isUnsigned) {
+                    vs << std::to_string(expectedData[i * componentCount + j]) << "u);\n";
+                } else {
+                    vs << std::to_string(expectedData[i * componentCount + j]) << ");\n";
+                }
+            }
+        }
+
+        vs << "    var success : bool = true;\n";
+        // Perform the checks by successively ANDing a boolean
+        for (uint32_t component = 0; component < componentCount; ++component) {
+            std::string suffix = componentCount == 1 ? "" : "[" + std::to_string(component) + "]";
+            std::string testVal = "testVal" + std::to_string(component);
+            std::string expectedVal = "expectedVal" + std::to_string(component);
+            vs << "    var " << testVal << " : " << expectedDataType << ";\n";
+            vs << "    var " << expectedVal << " : " << expectedDataType << ";\n";
+            vs << "    " << testVal << " = input.test" << suffix << ";\n";
+            vs << "    " << expectedVal << " = expected[input.VertexIndex]"
+               << "[" << component << "];\n";
+            if (!isInputTypeFloat) {  // Integer / unsigned integer need to match exactly.
+                vs << "    success = success && (" << testVal << " == " << expectedVal << ");\n";
+            } else {
+                // TODO(shaobo.yan@intel.com) : a difference of 8 ULPs is allowed in this test
+                // because it is required on MacbookPro 11.5,AMD Radeon HD 8870M(on macOS 10.13.6),
+                // but that it might be possible to tighten.
+                vs << "    if (isNaNCustom(" << expectedVal << ")) {\n";
+                vs << "       success = success && isNaNCustom(" << testVal << ");\n";
+                vs << "    } else {\n";
+                vs << "        let testValFloatToUint : u32 = bitcast<u32>(" << testVal << ");\n";
+                vs << "        let expectedValFloatToUint : u32 = bitcast<u32>(" << expectedVal
+                   << ");\n";
+                vs << "        success = success && max(testValFloatToUint, "
+                      "expectedValFloatToUint)";
+                vs << "        - min(testValFloatToUint, expectedValFloatToUint) < 8u;\n";
+                vs << "    }\n";
+            }
+        }
+        vs << R"(
+            if (success) {
+                output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            } else {
+                output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+            return output;
+        })";
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vs.str().c_str());
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+                @stage(fragment)
+                fn main(@location(0) color : vec4<f32>) -> @location(0) vec4<f32> {
+                    return color;
+                })");
+
+        uint32_t bytesPerComponents = BytesPerComponents(format);
+        uint32_t strideBytes = bytesPerComponents * componentCount;
+        // Stride size must be multiple of 4 bytes.
+        if (strideBytes % 4 != 0) {
+            strideBytes += (4 - strideBytes % 4);
+        }
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = strideBytes;
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = format;
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+
+    template <typename VertexType, typename ExpectedType>
+    void DoVertexFormatTest(wgpu::VertexFormat format,
+                            std::vector<VertexType> vertex,
+                            std::vector<ExpectedType> expectedData) {
+        wgpu::RenderPipeline pipeline = MakeTestPipeline(format, expectedData);
+        wgpu::Buffer vertexBuffer = utils::CreateBufferFromData(
+            device, vertex.data(), vertex.size() * sizeof(VertexType), wgpu::BufferUsage::Vertex);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+    }
+};
+
+TEST_P(VertexFormatTest, Uint8x2) {
+    std::vector<uint8_t> vertexData = {
+        std::numeric_limits<uint8_t>::max(),
+        0,
+        0,  // padding two bytes for stride
+        0,
+        std::numeric_limits<uint8_t>::min(),
+        2,
+        0,
+        0,  // padding two bytes for stride
+        200,
+        201,
+        0,
+        0  // padding two bytes for buffer copy
+    };
+
+    std::vector<uint8_t> expectedData = {
+        std::numeric_limits<uint8_t>::max(), 0, std::numeric_limits<uint8_t>::min(), 2, 200, 201,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint8x2, vertexData, expectedData);
+}
+
+TEST_P(VertexFormatTest, Uint8x4) {
+    std::vector<uint8_t> vertexData = {
+        std::numeric_limits<uint8_t>::max(),
+        0,
+        1,
+        2,
+        std::numeric_limits<uint8_t>::min(),
+        2,
+        3,
+        4,
+        200,
+        201,
+        202,
+        203,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint8x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Sint8x2) {
+    std::vector<int8_t> vertexData = {
+        std::numeric_limits<int8_t>::max(),
+        0,
+        0,  // padding two bytes for stride
+        0,
+        std::numeric_limits<int8_t>::min(),
+        -2,
+        0,  // padding two bytes for stride
+        0,
+        120,
+        -121,
+        0,
+        0  // padding two bytes for buffer copy
+    };
+
+    std::vector<int8_t> expectedData = {
+        std::numeric_limits<int8_t>::max(), 0, std::numeric_limits<int8_t>::min(), -2, 120, -121,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint8x2, vertexData, expectedData);
+}
+
+TEST_P(VertexFormatTest, Sint8x4) {
+    std::vector<int8_t> vertexData = {
+        std::numeric_limits<int8_t>::max(),
+        0,
+        -1,
+        2,
+        std::numeric_limits<int8_t>::min(),
+        -2,
+        3,
+        4,
+        120,
+        -121,
+        122,
+        -123,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint8x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Unorm8x2) {
+    std::vector<uint8_t> vertexData = {
+        std::numeric_limits<uint8_t>::max(),
+        std::numeric_limits<uint8_t>::min(),
+        0,  // padding two bytes for stride
+        0,
+        std::numeric_limits<uint8_t>::max() / 2u,
+        std::numeric_limits<uint8_t>::min() / 2u,
+        0,  // padding two bytes for stride
+        0,
+        200,
+        201,
+        0,
+        0  // padding two bytes for buffer copy
+    };
+
+    std::vector<uint8_t> expectedData = {std::numeric_limits<uint8_t>::max(),
+                                         std::numeric_limits<uint8_t>::min(),
+                                         std::numeric_limits<uint8_t>::max() / 2u,
+                                         std::numeric_limits<uint8_t>::min() / 2u,
+                                         200,
+                                         201};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Unorm8x2, vertexData, expectedData);
+}
+
+TEST_P(VertexFormatTest, Unorm8x4) {
+    std::vector<uint8_t> vertexData = {std::numeric_limits<uint8_t>::max(),
+                                       std::numeric_limits<uint8_t>::min(),
+                                       0,
+                                       0,
+                                       std::numeric_limits<uint8_t>::max() / 2u,
+                                       std::numeric_limits<uint8_t>::min() / 2u,
+                                       0,
+                                       0,
+                                       200,
+                                       201,
+                                       202,
+                                       203};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Unorm8x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Snorm8x2) {
+    std::vector<int8_t> vertexData = {
+        std::numeric_limits<int8_t>::max(),
+        std::numeric_limits<int8_t>::min(),
+        0,  // padding two bytes for stride
+        0,
+        std::numeric_limits<int8_t>::max() / 2,
+        std::numeric_limits<int8_t>::min() / 2,
+        0,  // padding two bytes for stride
+        0,
+        120,
+        -121,
+        0,
+        0  // padding two bytes for buffer copy
+    };
+
+    std::vector<int8_t> expectedData = {
+        std::numeric_limits<int8_t>::max(),
+        std::numeric_limits<int8_t>::min(),
+        std::numeric_limits<int8_t>::max() / 2,
+        std::numeric_limits<int8_t>::min() / 2,
+        120,
+        -121,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Snorm8x2, vertexData, expectedData);
+}
+
+TEST_P(VertexFormatTest, Snorm8x4) {
+    std::vector<int8_t> vertexData = {std::numeric_limits<int8_t>::max(),
+                                      std::numeric_limits<int8_t>::min(),
+                                      0,
+                                      0,
+                                      std::numeric_limits<int8_t>::max() / 2,
+                                      std::numeric_limits<int8_t>::min() / 2,
+                                      -2,
+                                      2,
+                                      120,
+                                      -120,
+                                      102,
+                                      -123};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Snorm8x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Uint16x2) {
+    std::vector<uint16_t> vertexData = {std::numeric_limits<uint16_t>::max(),
+                                        0,
+                                        std::numeric_limits<uint16_t>::min(),
+                                        2,
+                                        65432,
+                                        4890};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint16x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Uint16x4) {
+    std::vector<uint16_t> vertexData = {
+        std::numeric_limits<uint16_t>::max(),
+        std::numeric_limits<uint8_t>::max(),
+        1,
+        2,
+        std::numeric_limits<uint16_t>::min(),
+        2,
+        3,
+        4,
+        65520,
+        65521,
+        3435,
+        3467,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint16x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Sint16x2) {
+    std::vector<int16_t> vertexData = {std::numeric_limits<int16_t>::max(),
+                                       0,
+                                       std::numeric_limits<int16_t>::min(),
+                                       -2,
+                                       3876,
+                                       -3948};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint16x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Sint16x4) {
+    std::vector<int16_t> vertexData = {
+        std::numeric_limits<int16_t>::max(),
+        0,
+        -1,
+        2,
+        std::numeric_limits<int16_t>::min(),
+        -2,
+        3,
+        4,
+        24567,
+        -23545,
+        4350,
+        -2987,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint16x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Unorm16x2) {
+    std::vector<uint16_t> vertexData = {std::numeric_limits<uint16_t>::max(),
+                                        std::numeric_limits<uint16_t>::min(),
+                                        std::numeric_limits<uint16_t>::max() / 2u,
+                                        std::numeric_limits<uint16_t>::min() / 2u,
+                                        3456,
+                                        6543};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Unorm16x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Unorm16x4) {
+    std::vector<uint16_t> vertexData = {std::numeric_limits<uint16_t>::max(),
+                                        std::numeric_limits<uint16_t>::min(),
+                                        0,
+                                        0,
+                                        std::numeric_limits<uint16_t>::max() / 2u,
+                                        std::numeric_limits<uint16_t>::min() / 2u,
+                                        0,
+                                        0,
+                                        2987,
+                                        3055,
+                                        2987,
+                                        2987};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Unorm16x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Snorm16x2) {
+    std::vector<int16_t> vertexData = {std::numeric_limits<int16_t>::max(),
+                                       std::numeric_limits<int16_t>::min(),
+                                       std::numeric_limits<int16_t>::max() / 2,
+                                       std::numeric_limits<int16_t>::min() / 2,
+                                       4987,
+                                       -6789};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Snorm16x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Snorm16x4) {
+    std::vector<int16_t> vertexData = {std::numeric_limits<int16_t>::max(),
+                                       std::numeric_limits<int16_t>::min(),
+                                       0,
+                                       0,
+                                       std::numeric_limits<int16_t>::max() / 2,
+                                       std::numeric_limits<int16_t>::min() / 2,
+                                       -2,
+                                       2,
+                                       2890,
+                                       -29011,
+                                       20432,
+                                       -2083};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Snorm16x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Float16x2) {
+    // Fails on NVIDIA's Vulkan drivers on CQ but passes locally.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    std::vector<uint16_t> vertexData =
+        Float32ToFloat16(std::vector<float>({14.8f, -0.0f, 22.5f, 1.3f, +0.0f, -24.8f}));
+
+    DoVertexFormatTest(wgpu::VertexFormat::Float16x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Float16x4) {
+    // Fails on NVIDIA's Vulkan drivers on CQ but passes locally.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    std::vector<uint16_t> vertexData = Float32ToFloat16(std::vector<float>(
+        {+0.0f, -16.8f, 18.2f, -0.0f, 12.5f, 1.3f, 14.8f, -12.4f, 22.5f, -48.8f, 47.4f, -24.8f}));
+
+    DoVertexFormatTest(wgpu::VertexFormat::Float16x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Float32) {
+    std::vector<float> vertexData = {1.3f, +0.0f, -0.0f};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Float32, vertexData, vertexData);
+
+    vertexData = std::vector<float>{+1.0f, -1.0f, 18.23f};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Float32, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Float32x2) {
+    // Fails on NVIDIA's Vulkan drivers on CQ but passes locally.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    std::vector<float> vertexData = {18.23f, -0.0f, +0.0f, +1.0f, 1.3f, -1.0f};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Float32x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Float32x3) {
+    // Fails on NVIDIA's Vulkan drivers on CQ but passes locally.
+    DAWN_SUPPRESS_TEST_IF(IsVulkan() && IsNvidia());
+
+    std::vector<float> vertexData = {
+        +0.0f, -1.0f, -0.0f, 1.0f, 1.3f, 99.45f, 23.6f, -81.2f, 55.0f,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Float32x3, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Float32x4) {
+    std::vector<float> vertexData = {
+        19.2f, -19.3f, +0.0f, 1.0f, -0.0f, 1.0f, 1.3f, -1.0f, 13.078f, 21.1965f, -1.1f, -1.2f,
+    };
+
+    DoVertexFormatTest(wgpu::VertexFormat::Float32x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Uint32) {
+    std::vector<uint32_t> vertexData = {std::numeric_limits<uint32_t>::max(),
+                                        std::numeric_limits<uint16_t>::max(),
+                                        std::numeric_limits<uint8_t>::max()};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint32, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Uint32x2) {
+    std::vector<uint32_t> vertexData = {std::numeric_limits<uint32_t>::max(), 32,
+                                        std::numeric_limits<uint16_t>::max(), 64,
+                                        std::numeric_limits<uint8_t>::max(),  128};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint32x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Uint32x3) {
+    std::vector<uint32_t> vertexData = {std::numeric_limits<uint32_t>::max(), 32,   64,
+                                        std::numeric_limits<uint16_t>::max(), 164,  128,
+                                        std::numeric_limits<uint8_t>::max(),  1283, 256};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint32x3, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Uint32x4) {
+    std::vector<uint32_t> vertexData = {std::numeric_limits<uint32_t>::max(), 32,   64,  5460,
+                                        std::numeric_limits<uint16_t>::max(), 164,  128, 0,
+                                        std::numeric_limits<uint8_t>::max(),  1283, 256, 4567};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Uint32x4, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Sint32) {
+    std::vector<int32_t> vertexData = {std::numeric_limits<int32_t>::max(),
+                                       std::numeric_limits<int32_t>::min(),
+                                       std::numeric_limits<int8_t>::max()};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint32, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Sint32x2) {
+    std::vector<int32_t> vertexData = {
+        std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::min(),
+        std::numeric_limits<int16_t>::max(), std::numeric_limits<int16_t>::min(),
+        std::numeric_limits<int8_t>::max(),  std::numeric_limits<int8_t>::min()};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint32x2, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Sint32x3) {
+    std::vector<int32_t> vertexData = {
+        std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::min(), 64,
+        std::numeric_limits<int16_t>::max(), std::numeric_limits<int16_t>::min(), 128,
+        std::numeric_limits<int8_t>::max(),  std::numeric_limits<int8_t>::min(),  256};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint32x3, vertexData, vertexData);
+}
+
+TEST_P(VertexFormatTest, Sint32x4) {
+    std::vector<int32_t> vertexData = {
+        std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::min(), 64,   -5460,
+        std::numeric_limits<int16_t>::max(), std::numeric_limits<int16_t>::min(), -128, 0,
+        std::numeric_limits<int8_t>::max(),  std::numeric_limits<int8_t>::min(),  256,  -4567};
+
+    DoVertexFormatTest(wgpu::VertexFormat::Sint32x4, vertexData, vertexData);
+}
+
+DAWN_INSTANTIATE_TEST(VertexFormatTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/VertexOnlyRenderPipelineTests.cpp b/src/dawn/tests/end2end/VertexOnlyRenderPipelineTests.cpp
new file mode 100644
index 0000000..a56ebe2
--- /dev/null
+++ b/src/dawn/tests/end2end/VertexOnlyRenderPipelineTests.cpp
@@ -0,0 +1,319 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+constexpr uint32_t kRTWidth = 4;
+constexpr uint32_t kRTHeight = 1;
+
+class VertexOnlyRenderPipelineTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        vertexBuffer =
+            utils::CreateBufferFromData<float>(device, wgpu::BufferUsage::Vertex,
+                                               {// The middle back line
+                                                -0.5f, 0.0f, 0.0f, 1.0f, 0.5f, 0.0f, 0.0f, 1.0f,
+
+                                                // The right front line
+                                                0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f,
+
+                                                // The whole in-between line
+                                                -1.0f, 0.0f, 0.5f, 1.0f, 1.0f, 0.0f, 0.5f, 1.0f});
+
+        // Create a color texture as render target
+        {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.size = {kRTWidth, kRTHeight};
+            descriptor.format = kColorFormat;
+            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+            renderTargetColor = device.CreateTexture(&descriptor);
+        }
+
+        // Create a DepthStencilView for vertex-only pipeline to write and full pipeline to read
+        {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.size = {kRTWidth, kRTHeight};
+            descriptor.format = kDepthStencilFormat;
+            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+            depthStencilTexture = device.CreateTexture(&descriptor);
+            depthStencilView = depthStencilTexture.CreateView();
+        }
+
+        // The vertex-only render pass to modify the depth and stencil
+        renderPassDescNoColor = utils::ComboRenderPassDescriptor({}, depthStencilView);
+        renderPassDescNoColor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPassDescNoColor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+
+        // The complete render pass to read the depth and stencil and draw to color attachment
+        renderPassDescWithColor =
+            utils::ComboRenderPassDescriptor({renderTargetColor.CreateView()}, depthStencilView);
+        renderPassDescWithColor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPassDescWithColor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+
+        // Create a vertex-only render pipeline that only modify the depth in DepthStencilView, and
+        // ignore the stencil component
+        depthPipelineNoFragment =
+            CreateRenderPipeline(wgpu::CompareFunction::Always, wgpu::StencilOperation::Keep,
+                                 wgpu::CompareFunction::Always, true, false);
+        depthPipelineWithFragment =
+            CreateRenderPipeline(wgpu::CompareFunction::Always, wgpu::StencilOperation::Keep,
+                                 wgpu::CompareFunction::Always, true, true);
+
+        // Create a vertex-only render pipeline that only modify the stencil in DepthStencilView,
+        // and ignore the depth component
+        stencilPipelineNoFragment =
+            CreateRenderPipeline(wgpu::CompareFunction::Always, wgpu::StencilOperation::Replace,
+                                 wgpu::CompareFunction::Always, false, false);
+        stencilPipelineWithFragment =
+            CreateRenderPipeline(wgpu::CompareFunction::Always, wgpu::StencilOperation::Replace,
+                                 wgpu::CompareFunction::Always, false, true);
+
+        // Create a complete render pipeline that do both depth and stencil tests, and draw to color
+        // attachment
+        fullPipeline =
+            CreateRenderPipeline(wgpu::CompareFunction::Equal, wgpu::StencilOperation::Keep,
+                                 wgpu::CompareFunction::GreaterEqual, false, true);
+    }
+
+    wgpu::RenderPipeline CreateRenderPipeline(
+        wgpu::CompareFunction stencilCompare = wgpu::CompareFunction::Always,
+        wgpu::StencilOperation stencilPassOp = wgpu::StencilOperation::Keep,
+        wgpu::CompareFunction depthCompare = wgpu::CompareFunction::Always,
+        bool writeDepth = false,
+        bool useFragment = true) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@location(0) pos : vec4<f32>) -> @builtin(position) vec4<f32> {
+                return pos;
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::LineList;
+
+        descriptor.vertex.module = vsModule;
+        descriptor.vertex.bufferCount = 1;
+        descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+        descriptor.cBuffers[0].attributeCount = 1;
+        descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].format = kColorFormat;
+
+        wgpu::DepthStencilState* depthStencil = descriptor.EnableDepthStencil(kDepthStencilFormat);
+
+        depthStencil->stencilFront.compare = stencilCompare;
+        depthStencil->stencilBack.compare = stencilCompare;
+        depthStencil->stencilFront.passOp = stencilPassOp;
+        depthStencil->stencilBack.passOp = stencilPassOp;
+        depthStencil->depthWriteEnabled = writeDepth;
+        depthStencil->depthCompare = depthCompare;
+
+        if (!useFragment) {
+            descriptor.fragment = nullptr;
+        }
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+
+    void ClearAttachment(const wgpu::CommandEncoder& encoder) {
+        utils::ComboRenderPassDescriptor clearPass =
+            utils::ComboRenderPassDescriptor({renderTargetColor.CreateView()}, depthStencilView);
+        clearPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+        clearPass.cDepthStencilAttachmentInfo.depthClearValue = 0.0f;
+        clearPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        clearPass.cDepthStencilAttachmentInfo.stencilClearValue = 0x0;
+        for (auto& t : clearPass.cColorAttachments) {
+            t.loadOp = wgpu::LoadOp::Clear;
+            t.clearValue = {0.0, 0.0, 0.0, 0.0};
+        }
+
+        auto pass = encoder.BeginRenderPass(&clearPass);
+        pass.End();
+    }
+
+    // Render resource
+    wgpu::Buffer vertexBuffer;
+    // Render target
+    wgpu::Texture depthStencilTexture;
+    wgpu::TextureView depthStencilView;
+    wgpu::Texture renderTargetColor;
+    // Render result
+    const RGBA8 filled = RGBA8(0, 255, 0, 255);
+    const RGBA8 notFilled = RGBA8(0, 0, 0, 0);
+    // Render pass
+    utils::ComboRenderPassDescriptor renderPassDescNoColor{};
+    utils::ComboRenderPassDescriptor renderPassDescWithColor{};
+    // Render pipeline
+    wgpu::RenderPipeline stencilPipelineNoFragment;
+    wgpu::RenderPipeline stencilPipelineWithFragment;
+    wgpu::RenderPipeline depthPipelineNoFragment;
+    wgpu::RenderPipeline depthPipelineWithFragment;
+    wgpu::RenderPipeline fullPipeline;
+};
+
+// Test that a vertex-only render pipeline modify the stencil attachment as same as a complete
+// render pipeline do.
+TEST_P(VertexOnlyRenderPipelineTest, Stencil) {
+    auto doStencilTest = [&](const wgpu::RenderPassDescriptor* renderPass,
+                             const wgpu::RenderPipeline& pipeline,
+                             const RGBA8& colorExpect) -> void {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        ClearAttachment(encoder);
+
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(renderPass);
+            pass.SetPipeline(pipeline);
+            // Set the stencil reference to a arbitrary value
+            pass.SetStencilReference(0x42);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            // Draw the whole line
+            pass.Draw(2, 1, 4, 0);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 1, 0);
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 2, 0);
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 3, 0);
+
+        // Test that the stencil is set to the chosen value
+        ExpectAttachmentStencilTestData(depthStencilTexture, kDepthStencilFormat, 4, 1, 0, 0, 0x42);
+    };
+
+    doStencilTest(&renderPassDescWithColor, stencilPipelineWithFragment, filled);
+    doStencilTest(&renderPassDescNoColor, stencilPipelineNoFragment, notFilled);
+}
+
+// Test that a vertex-only render pipeline modify the depth attachment as same as a complete render
+// pipeline do.
+TEST_P(VertexOnlyRenderPipelineTest, Depth) {
+    auto doStencilTest = [&](const wgpu::RenderPassDescriptor* renderPass,
+                             const wgpu::RenderPipeline& pipeline,
+                             const RGBA8& colorExpect) -> void {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        ClearAttachment(encoder);
+
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(renderPass);
+            pass.SetPipeline(pipeline);
+            pass.SetStencilReference(0x0);
+            pass.SetVertexBuffer(0, vertexBuffer);
+            // Draw the whole line
+            pass.Draw(2, 1, 4, 0);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 1, 0);
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 2, 0);
+        EXPECT_PIXEL_RGBA8_EQ(colorExpect, renderTargetColor, 3, 0);
+
+        // Test that the stencil is set to the chosen value
+        uint8_t expectedStencil = 0;
+        ExpectAttachmentDepthStencilTestData(depthStencilTexture, kDepthStencilFormat, 4, 1, 0, 0,
+                                             {0.5, 0.5, 0.5, 0.5}, &expectedStencil);
+    };
+
+    doStencilTest(&renderPassDescWithColor, depthPipelineWithFragment, filled);
+    doStencilTest(&renderPassDescNoColor, depthPipelineNoFragment, notFilled);
+}
+
+// Test that vertex-only render pipelines and complete render pipelines cooperate correctly in a
+// single encoder, each in a render pass
+// In this test we first draw with a vertex-only pipeline to set up stencil in a region, than draw
+// with another vertex-only pipeline to modify depth in another region, and finally draw with a
+// complete pipeline with depth and stencil tests enabled. We check the color result of the final
+// draw, and make sure that it correctly use the stencil and depth result set in previous
+// vertex-only pipelines.
+TEST_P(VertexOnlyRenderPipelineTest, MultiplePass) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    ClearAttachment(encoder);
+
+    // Use the stencil pipeline to set the stencil on the middle
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescNoColor);
+        pass.SetStencilReference(0x1);
+        pass.SetPipeline(stencilPipelineNoFragment);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        // Draw the middle line
+        pass.Draw(2, 1, 0, 0);
+        pass.End();
+    }
+
+    // Use the depth pipeline to set the depth on the right
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescNoColor);
+        pass.SetStencilReference(0x0);
+        pass.SetPipeline(depthPipelineNoFragment);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        // Draw the right line
+        pass.Draw(2, 1, 2, 0);
+        pass.End();
+    }
+
+    // Use the complete pipeline to draw with depth and stencil tests
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescWithColor);
+        pass.SetStencilReference(0x1);
+        pass.SetPipeline(fullPipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        // Draw the full line with depth and stencil tests
+        pass.Draw(2, 1, 4, 0);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Only the middle left pixel should pass both stencil and depth tests
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderTargetColor, 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(filled, renderTargetColor, 1, 0);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderTargetColor, 2, 0);
+    EXPECT_PIXEL_RGBA8_EQ(notFilled, renderTargetColor, 3, 0);
+}
+
+DAWN_INSTANTIATE_TEST(VertexOnlyRenderPipelineTest,
+                      D3D12Backend(),
+                      D3D12Backend({"use_dummy_fragment_in_vertex_only_pipeline"}),
+                      MetalBackend(),
+                      MetalBackend({"use_dummy_fragment_in_vertex_only_pipeline"}),
+                      OpenGLBackend(),
+                      OpenGLBackend({"use_dummy_fragment_in_vertex_only_pipeline"}),
+                      OpenGLESBackend(),
+                      OpenGLESBackend({"use_dummy_fragment_in_vertex_only_pipeline"}),
+                      VulkanBackend(),
+                      VulkanBackend({"use_dummy_fragment_in_vertex_only_pipeline"}));
diff --git a/src/dawn/tests/end2end/VertexStateTests.cpp b/src/dawn/tests/end2end/VertexStateTests.cpp
new file mode 100644
index 0000000..874c714
--- /dev/null
+++ b/src/dawn/tests/end2end/VertexStateTests.cpp
@@ -0,0 +1,702 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+using wgpu::VertexFormat;
+using wgpu::VertexStepMode;
+
+// Input state tests all work the same way: the test will render triangles in a grid up to 4x4. Each
+// triangle is position in the grid such that X will correspond to the "triangle number" and the Y
+// to the instance number. Each test will set up an input state and buffers, and the vertex shader
+// will check that the vertex attributes corresponds to predetermined values. On success it outputs
+// green, otherwise red.
+//
+// The predetermined values are "K * gl_VertexID + componentIndex" for vertex-indexed buffers, and
+// "K * gl_InstanceID + componentIndex" for instance-indexed buffers.
+
+constexpr static unsigned int kRTSize = 400;
+constexpr static unsigned int kRTCellOffset = 50;
+constexpr static unsigned int kRTCellSize = 100;
+
+class VertexStateTest : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    }
+
+    bool ShouldComponentBeDefault(VertexFormat format, int component) {
+        EXPECT_TRUE(component >= 0 && component < 4);
+        switch (format) {
+            case VertexFormat::Float32x4:
+            case VertexFormat::Unorm8x4:
+                return component >= 4;
+            case VertexFormat::Float32x3:
+                return component >= 3;
+            case VertexFormat::Float32x2:
+            case VertexFormat::Unorm8x2:
+                return component >= 2;
+            case VertexFormat::Float32:
+                return component >= 1;
+            default:
+                DAWN_UNREACHABLE();
+        }
+    }
+
+    struct ShaderTestSpec {
+        uint32_t location;
+        VertexFormat format;
+        VertexStepMode step;
+    };
+    wgpu::RenderPipeline MakeTestPipeline(const utils::ComboVertexState& vertexState,
+                                          int multiplier,
+                                          const std::vector<ShaderTestSpec>& testSpec) {
+        std::ostringstream vs;
+        vs << "struct VertexIn {\n";
+
+        // TODO(cwallez@chromium.org): this only handles float attributes, we should extend it to
+        // other types Adds line of the form
+        //    @location(1) input1 : vec4<f32>;
+        for (const auto& input : testSpec) {
+            vs << "@location(" << input.location << ") input" << input.location
+               << " : vec4<f32>,\n";
+        }
+
+        vs << R"(
+                @builtin(vertex_index) VertexIndex : u32,
+                @builtin(instance_index) InstanceIndex : u32,
+            }
+
+            struct VertexOut {
+                @location(0) color : vec4<f32>,
+                @builtin(position) position : vec4<f32>,
+            }
+
+            @stage(vertex) fn main(input : VertexIn) -> VertexOut {
+                var output : VertexOut;
+        )";
+
+        // Hard code the triangle in the shader so that we don't have to add a vertex input for it.
+        // Also this places the triangle in the grid based on its VertexID and InstanceID
+        vs << "    var pos = array<vec2<f32>, 3>(\n"
+              "         vec2<f32>(0.5, 1.0), vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 0.0));\n";
+        vs << "    var offset : vec2<f32> = vec2<f32>(f32(input.VertexIndex / 3u), "
+              "f32(input.InstanceIndex));\n";
+        vs << "    var worldPos = pos[input.VertexIndex % 3u] + offset;\n";
+        vs << "    var position = vec4<f32>(0.5 * worldPos - vec2<f32>(1.0, 1.0), 0.0, "
+              "1.0);\n";
+        vs << "    output.position = vec4<f32>(position.x, -position.y, position.z, position.w);\n";
+
+        // Perform the checks by successively ANDing a boolean
+        vs << "    var success = true;\n";
+        for (const auto& input : testSpec) {
+            for (int component = 0; component < 4; ++component) {
+                vs << "    success = success && (input.input" << input.location << "[" << component
+                   << "] == ";
+                if (ShouldComponentBeDefault(input.format, component)) {
+                    vs << (component == 3 ? "1.0" : "0.0");
+                } else {
+                    if (input.step == VertexStepMode::Vertex) {
+                        vs << "f32(" << multiplier << "u * input.VertexIndex) + " << component
+                           << ".0";
+                    } else {
+                        vs << "f32(" << multiplier << "u * input.InstanceIndex) + " << component
+                           << ".0";
+                    }
+                }
+                vs << ");\n";
+            }
+        }
+
+        // Choose the color
+        vs << R"(
+            if (success) {
+                output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            } else {
+                output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+            return output;
+        })";
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vs.str().c_str());
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment)
+            fn main(@location(0) color : vec4<f32>) -> @location(0) vec4<f32> {
+                return color;
+            }
+        )");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.vertex.bufferCount = vertexState.vertexBufferCount;
+        descriptor.vertex.buffers = &vertexState.cVertexBuffers[0];
+        descriptor.cTargets[0].format = renderPass.colorFormat;
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+
+    struct VertexAttributeSpec {
+        uint32_t location;
+        uint64_t offset;
+        VertexFormat format;
+    };
+    struct VertexBufferSpec {
+        uint64_t arrayStride;
+        VertexStepMode step;
+        std::vector<VertexAttributeSpec> attributes;
+    };
+
+    void MakeVertexState(const std::vector<VertexBufferSpec>& buffers,
+                         utils::ComboVertexState* vertexState) {
+        uint32_t vertexBufferCount = 0;
+        uint32_t totalNumAttributes = 0;
+        for (const VertexBufferSpec& buffer : buffers) {
+            vertexState->cVertexBuffers[vertexBufferCount].arrayStride = buffer.arrayStride;
+            vertexState->cVertexBuffers[vertexBufferCount].stepMode = buffer.step;
+
+            vertexState->cVertexBuffers[vertexBufferCount].attributes =
+                &vertexState->cAttributes[totalNumAttributes];
+
+            for (const VertexAttributeSpec& attribute : buffer.attributes) {
+                vertexState->cAttributes[totalNumAttributes].shaderLocation = attribute.location;
+                vertexState->cAttributes[totalNumAttributes].offset = attribute.offset;
+                vertexState->cAttributes[totalNumAttributes].format = attribute.format;
+                totalNumAttributes++;
+            }
+            vertexState->cVertexBuffers[vertexBufferCount].attributeCount =
+                static_cast<uint32_t>(buffer.attributes.size());
+
+            vertexBufferCount++;
+        }
+
+        vertexState->vertexBufferCount = vertexBufferCount;
+    }
+
+    template <typename T>
+    wgpu::Buffer MakeVertexBuffer(std::vector<T> data) {
+        return utils::CreateBufferFromData(device, data.data(),
+                                           static_cast<uint32_t>(data.size() * sizeof(T)),
+                                           wgpu::BufferUsage::Vertex);
+    }
+
+    struct DrawVertexBuffer {
+        uint32_t location;
+        wgpu::Buffer* buffer;
+    };
+    void DoTestDraw(const wgpu::RenderPipeline& pipeline,
+                    unsigned int triangles,
+                    unsigned int instances,
+                    std::vector<DrawVertexBuffer> vertexBuffers) {
+        EXPECT_LE(triangles, 4u);
+        EXPECT_LE(instances, 4u);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+
+        for (const DrawVertexBuffer& buffer : vertexBuffers) {
+            pass.SetVertexBuffer(buffer.location, *buffer.buffer);
+        }
+
+        pass.Draw(triangles * 3, instances);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        CheckResult(triangles, instances);
+    }
+
+    void CheckResult(unsigned int triangles, unsigned int instances) {
+        // Check that the center of each triangle is pure green, so that if a single vertex shader
+        // instance fails, linear interpolation makes the pixel check fail.
+        for (unsigned int triangle = 0; triangle < 4; triangle++) {
+            for (unsigned int instance = 0; instance < 4; instance++) {
+                unsigned int x = kRTCellOffset + kRTCellSize * triangle;
+                unsigned int y = kRTCellOffset + kRTCellSize * instance;
+                if (triangle < triangles && instance < instances) {
+                    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, x, y);
+                } else {
+                    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, x, y);
+                }
+            }
+        }
+    }
+
+    utils::BasicRenderPass renderPass;
+};
+
+// Test compilation and usage of the fixture :)
+TEST_P(VertexStateTest, Basic) {
+    utils::ComboVertexState vertexState;
+    MakeVertexState(
+        {{4 * sizeof(float), VertexStepMode::Vertex, {{0, 0, VertexFormat::Float32x4}}}},
+        &vertexState);
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Vertex}});
+
+    // clang-format off
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({
+        0, 1, 2, 3,
+        1, 2, 3, 4,
+        2, 3, 4, 5
+    });
+    // clang-format on
+    DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+}
+
+// Test a stride of 0 works
+TEST_P(VertexStateTest, ZeroStride) {
+    // This test was failing only on AMD but the OpenGL backend doesn't gather PCI info yet.
+    DAWN_SUPPRESS_TEST_IF(IsLinux() && IsOpenGL());
+
+    utils::ComboVertexState vertexState;
+    MakeVertexState({{0, VertexStepMode::Vertex, {{0, 0, VertexFormat::Float32x4}}}}, &vertexState);
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 0, {{0, VertexFormat::Float32x4, VertexStepMode::Vertex}});
+
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({
+        0,
+        1,
+        2,
+        3,
+    });
+    DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+}
+
+// Test attributes defaults to (0, 0, 0, 1) if the input state doesn't have all components
+TEST_P(VertexStateTest, AttributeExpanding) {
+    // This test was failing only on AMD but the OpenGL backend doesn't gather PCI info yet.
+    DAWN_SUPPRESS_TEST_IF(IsLinux() && IsOpenGL());
+
+    // R32F case
+    {
+        utils::ComboVertexState vertexState;
+        MakeVertexState({{0, VertexStepMode::Vertex, {{0, 0, VertexFormat::Float32}}}},
+                        &vertexState);
+        wgpu::RenderPipeline pipeline =
+            MakeTestPipeline(vertexState, 0, {{0, VertexFormat::Float32, VertexStepMode::Vertex}});
+
+        wgpu::Buffer buffer0 = MakeVertexBuffer<float>({0, 1, 2, 3});
+        DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+    }
+    // RG32F case
+    {
+        utils::ComboVertexState vertexState;
+        MakeVertexState({{0, VertexStepMode::Vertex, {{0, 0, VertexFormat::Float32x2}}}},
+                        &vertexState);
+        wgpu::RenderPipeline pipeline = MakeTestPipeline(
+            vertexState, 0, {{0, VertexFormat::Float32x2, VertexStepMode::Vertex}});
+
+        wgpu::Buffer buffer0 = MakeVertexBuffer<float>({0, 1, 2, 3});
+        DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+    }
+    // RGB32F case
+    {
+        utils::ComboVertexState vertexState;
+        MakeVertexState({{0, VertexStepMode::Vertex, {{0, 0, VertexFormat::Float32x3}}}},
+                        &vertexState);
+        wgpu::RenderPipeline pipeline = MakeTestPipeline(
+            vertexState, 0, {{0, VertexFormat::Float32x3, VertexStepMode::Vertex}});
+
+        wgpu::Buffer buffer0 = MakeVertexBuffer<float>({0, 1, 2, 3});
+        DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+    }
+}
+
+// Test a stride larger than the attributes
+TEST_P(VertexStateTest, StrideLargerThanAttributes) {
+    // This test was failing only on AMD but the OpenGL backend doesn't gather PCI info yet.
+    DAWN_SUPPRESS_TEST_IF(IsLinux() && IsOpenGL());
+
+    utils::ComboVertexState vertexState;
+    MakeVertexState(
+        {{8 * sizeof(float), VertexStepMode::Vertex, {{0, 0, VertexFormat::Float32x4}}}},
+        &vertexState);
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Vertex}});
+
+    // clang-format off
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({
+        0, 1, 2, 3, 0, 0, 0, 0,
+        1, 2, 3, 4, 0, 0, 0, 0,
+        2, 3, 4, 5, 0, 0, 0, 0,
+    });
+    // clang-format on
+    DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+}
+
+// Test two attributes at an offset, vertex version
+TEST_P(VertexStateTest, TwoAttributesAtAnOffsetVertex) {
+    utils::ComboVertexState vertexState;
+    MakeVertexState(
+        {{8 * sizeof(float),
+          VertexStepMode::Vertex,
+          {{0, 0, VertexFormat::Float32x4}, {1, 4 * sizeof(float), VertexFormat::Float32x4}}}},
+        &vertexState);
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Vertex}});
+
+    // clang-format off
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({
+        0, 1, 2, 3, 0, 1, 2, 3,
+        1, 2, 3, 4, 1, 2, 3, 4,
+        2, 3, 4, 5, 2, 3, 4, 5,
+    });
+    // clang-format on
+    DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+}
+
+// Test two attributes at an offset, instance version
+TEST_P(VertexStateTest, TwoAttributesAtAnOffsetInstance) {
+    utils::ComboVertexState vertexState;
+    MakeVertexState(
+        {{8 * sizeof(float),
+          VertexStepMode::Instance,
+          {{0, 0, VertexFormat::Float32x4}, {1, 4 * sizeof(float), VertexFormat::Float32x4}}}},
+        &vertexState);
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Instance}});
+
+    // clang-format off
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({
+        0, 1, 2, 3, 0, 1, 2, 3,
+        1, 2, 3, 4, 1, 2, 3, 4,
+        2, 3, 4, 5, 2, 3, 4, 5,
+    });
+    // clang-format on
+    DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{0, &buffer0}});
+}
+
+// Test a pure-instance input state
+TEST_P(VertexStateTest, PureInstance) {
+    utils::ComboVertexState vertexState;
+    MakeVertexState(
+        {{4 * sizeof(float), VertexStepMode::Instance, {{0, 0, VertexFormat::Float32x4}}}},
+        &vertexState);
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Instance}});
+
+    // clang-format off
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({
+        0, 1, 2, 3,
+        1, 2, 3, 4,
+        2, 3, 4, 5,
+        3, 4, 5, 6,
+    });
+    // clang-format on
+    DoTestDraw(pipeline, 1, 4, {DrawVertexBuffer{0, &buffer0}});
+}
+
+// Test with mixed everything, vertex vs. instance, different stride and offsets
+// different attribute types
+TEST_P(VertexStateTest, MixedEverything) {
+    utils::ComboVertexState vertexState;
+    MakeVertexState(
+        {{12 * sizeof(float),
+          VertexStepMode::Vertex,
+          {{0, 0, VertexFormat::Float32}, {1, 6 * sizeof(float), VertexFormat::Float32x2}}},
+         {10 * sizeof(float),
+          VertexStepMode::Instance,
+          {{2, 0, VertexFormat::Float32x3}, {3, 5 * sizeof(float), VertexFormat::Float32x4}}}},
+        &vertexState);
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 1,
+                         {{0, VertexFormat::Float32, VertexStepMode::Vertex},
+                          {1, VertexFormat::Float32x2, VertexStepMode::Vertex},
+                          {2, VertexFormat::Float32x3, VertexStepMode::Instance},
+                          {3, VertexFormat::Float32x4, VertexStepMode::Instance}});
+
+    // clang-format off
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({
+        0, 1, 2, 3, 0, 0, 0, 1, 2, 3, 0, 0,
+        1, 2, 3, 4, 0, 0, 1, 2, 3, 4, 0, 0,
+        2, 3, 4, 5, 0, 0, 2, 3, 4, 5, 0, 0,
+        3, 4, 5, 6, 0, 0, 3, 4, 5, 6, 0, 0,
+    });
+    wgpu::Buffer buffer1 = MakeVertexBuffer<float>({
+        0, 1, 2, 3, 0, 0, 1, 2, 3, 0,
+        1, 2, 3, 4, 0, 1, 2, 3, 4, 0,
+        2, 3, 4, 5, 0, 2, 3, 4, 5, 0,
+        3, 4, 5, 6, 0, 3, 4, 5, 6, 0,
+    });
+    // clang-format on
+    DoTestDraw(pipeline, 1, 1, {{0, &buffer0}, {1, &buffer1}});
+}
+
+// Test input state is unaffected by unused vertex slot
+TEST_P(VertexStateTest, UnusedVertexSlot) {
+    // Instance input state, using slot 1
+    utils::ComboVertexState instanceVertexState;
+    MakeVertexState(
+        {{0, VertexStepMode::Vertex, {}},
+         {4 * sizeof(float), VertexStepMode::Instance, {{0, 0, VertexFormat::Float32x4}}}},
+        &instanceVertexState);
+    wgpu::RenderPipeline instancePipeline = MakeTestPipeline(
+        instanceVertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Instance}});
+
+    // clang-format off
+    wgpu::Buffer buffer = MakeVertexBuffer<float>({
+        0, 1, 2, 3,
+        1, 2, 3, 4,
+        2, 3, 4, 5,
+        3, 4, 5, 6,
+    });
+    // clang-format on
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    pass.SetVertexBuffer(0, buffer);
+    pass.SetVertexBuffer(1, buffer);
+
+    pass.SetPipeline(instancePipeline);
+    pass.Draw(3, 4);
+
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    CheckResult(1, 4);
+}
+
+// Test setting a different pipeline with a different input state.
+// This was a problem with the D3D12 backend where SetVertexBuffer
+// was getting the input from the last set pipeline, not the current.
+// SetVertexBuffer should be reapplied when the input state changes.
+TEST_P(VertexStateTest, MultiplePipelinesMixedVertexState) {
+    // Basic input state, using slot 0
+    utils::ComboVertexState vertexVertexState;
+    MakeVertexState(
+        {{4 * sizeof(float), VertexStepMode::Vertex, {{0, 0, VertexFormat::Float32x4}}}},
+        &vertexVertexState);
+    wgpu::RenderPipeline vertexPipeline = MakeTestPipeline(
+        vertexVertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Vertex}});
+
+    // Instance input state, using slot 1
+    utils::ComboVertexState instanceVertexState;
+    MakeVertexState(
+        {{0, VertexStepMode::Instance, {}},
+         {4 * sizeof(float), VertexStepMode::Instance, {{0, 0, VertexFormat::Float32x4}}}},
+        &instanceVertexState);
+    wgpu::RenderPipeline instancePipeline = MakeTestPipeline(
+        instanceVertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Instance}});
+
+    // clang-format off
+    wgpu::Buffer buffer = MakeVertexBuffer<float>({
+        0, 1, 2, 3,
+        1, 2, 3, 4,
+        2, 3, 4, 5,
+        3, 4, 5, 6,
+    });
+    // clang-format on
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+    pass.SetVertexBuffer(0, buffer);
+    pass.SetVertexBuffer(1, buffer);
+
+    pass.SetPipeline(vertexPipeline);
+    pass.Draw(3);
+
+    pass.SetPipeline(instancePipeline);
+    pass.Draw(3, 4);
+
+    pass.End();
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    CheckResult(1, 4);
+}
+
+// Checks that using the last vertex buffer doesn't overflow the vertex buffer table in Metal.
+TEST_P(VertexStateTest, LastAllowedVertexBuffer) {
+    constexpr uint32_t kBufferIndex = kMaxVertexBuffers - 1;
+
+    utils::ComboVertexState vertexState;
+    // All the other vertex buffers default to no attributes
+    vertexState.vertexBufferCount = kMaxVertexBuffers;
+    vertexState.cVertexBuffers[kBufferIndex].arrayStride = 4 * sizeof(float);
+    vertexState.cVertexBuffers[kBufferIndex].stepMode = VertexStepMode::Vertex;
+    vertexState.cVertexBuffers[kBufferIndex].attributeCount = 1;
+    vertexState.cVertexBuffers[kBufferIndex].attributes = &vertexState.cAttributes[0];
+    vertexState.cAttributes[0].shaderLocation = 0;
+    vertexState.cAttributes[0].offset = 0;
+    vertexState.cAttributes[0].format = VertexFormat::Float32x4;
+
+    wgpu::RenderPipeline pipeline =
+        MakeTestPipeline(vertexState, 1, {{0, VertexFormat::Float32x4, VertexStepMode::Vertex}});
+
+    wgpu::Buffer buffer0 = MakeVertexBuffer<float>({0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5});
+    DoTestDraw(pipeline, 1, 1, {DrawVertexBuffer{kMaxVertexBuffers - 1, &buffer0}});
+}
+
+// Test that overlapping vertex attributes are permitted and load data correctly
+TEST_P(VertexStateTest, OverlappingVertexAttributes) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 3, 3);
+
+    utils::ComboVertexState vertexState;
+    MakeVertexState({{16,
+                      VertexStepMode::Vertex,
+                      {
+                          // "****" represents the bytes we'll actually read in the shader.
+                          {0, 0 /* offset */, VertexFormat::Float32x4},  // |****|----|----|----|
+                          {1, 4 /* offset */, VertexFormat::Uint32x2},   //      |****|****|
+                          {2, 8 /* offset */, VertexFormat::Float16x4},  //           |-----****|
+                          {3, 0 /* offset */, VertexFormat::Float32},    // |****|
+                      }}},
+                    &vertexState);
+
+    struct Data {
+        float fvalue;
+        uint32_t uints[2];
+        uint16_t halfs[2];
+    };
+    static_assert(sizeof(Data) == 16);
+    Data data{1.f, {2u, 3u}, {Float32ToFloat16(4.f), Float32ToFloat16(5.f)}};
+
+    wgpu::Buffer vertexBuffer =
+        utils::CreateBufferFromData(device, &data, sizeof(data), wgpu::BufferUsage::Vertex);
+
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+        struct VertexIn {
+            @location(0) attr0 : vec4<f32>,
+            @location(1) attr1 : vec2<u32>,
+            @location(2) attr2 : vec4<f32>,
+            @location(3) attr3 : f32,
+        }
+
+        struct VertexOut {
+            @location(0) color : vec4<f32>,
+            @builtin(position) position : vec4<f32>,
+        }
+
+        @stage(vertex) fn main(input : VertexIn) -> VertexOut {
+            var output : VertexOut;
+            output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+
+            var success : bool = (
+                input.attr0.x == 1.0 &&
+                input.attr1.x == 2u &&
+                input.attr1.y == 3u &&
+                input.attr2.z == 4.0 &&
+                input.attr2.w == 5.0 &&
+                input.attr3 == 1.0
+            );
+            if (success) {
+                output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            } else {
+                output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+            return output;
+        })");
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+        @stage(fragment)
+        fn main(@location(0) color : vec4<f32>) -> @location(0) vec4<f32> {
+            return color;
+        })");
+    pipelineDesc.vertex.bufferCount = vertexState.vertexBufferCount;
+    pipelineDesc.vertex.buffers = &vertexState.cVertexBuffers[0];
+    pipelineDesc.cTargets[0].format = renderPass.colorFormat;
+    pipelineDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    pass.SetPipeline(pipeline);
+    pass.SetVertexBuffer(0, vertexBuffer);
+    pass.Draw(1);
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 1, 1);
+}
+
+DAWN_INSTANTIATE_TEST(VertexStateTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
+
+// TODO for the input state:
+//  - Add more vertex formats
+//  - Add checks that the stride is enough to contain all attributes
+//  - Add checks stride less than some limit
+//  - Add checks for alignement of vertex buffers and attributes if needed
+//  - Check for attribute narrowing
+//  - Check that the input state and the pipeline vertex input types match
+
+class OptionalVertexStateTest : public DawnTest {};
+
+// Test that vertex input is not required in render pipeline descriptor.
+TEST_P(OptionalVertexStateTest, Basic) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 3, 3);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+    descriptor.vertex.bufferCount = 0;
+    descriptor.vertex.buffers = nullptr;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 1, 1);
+}
+
+DAWN_INSTANTIATE_TEST(OptionalVertexStateTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/VideoViewsTests.cpp b/src/dawn/tests/end2end/VideoViewsTests.cpp
new file mode 100644
index 0000000..a037f0a
--- /dev/null
+++ b/src/dawn/tests/end2end/VideoViewsTests.cpp
@@ -0,0 +1,400 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "VideoViewsTests.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+VideoViewsTestBackend::PlatformTexture::PlatformTexture(wgpu::Texture&& texture)
+    : wgpuTexture(texture) {
+}
+VideoViewsTestBackend::PlatformTexture::~PlatformTexture() = default;
+
+VideoViewsTestBackend::~VideoViewsTestBackend() = default;
+
+constexpr std::array<RGBA8, 2> VideoViewsTests::kYellowYUVColor;
+constexpr std::array<RGBA8, 2> VideoViewsTests::kWhiteYUVColor;
+constexpr std::array<RGBA8, 2> VideoViewsTests::kBlueYUVColor;
+constexpr std::array<RGBA8, 2> VideoViewsTests::kRedYUVColor;
+
+void VideoViewsTests::SetUp() {
+    DawnTest::SetUp();
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    DAWN_TEST_UNSUPPORTED_IF(!IsMultiPlanarFormatsSupported());
+
+    mBackend = VideoViewsTestBackend::Create();
+    mBackend->OnSetUp(device.Get());
+}
+
+void VideoViewsTests::TearDown() {
+    if (!UsesWire() && IsMultiPlanarFormatsSupported()) {
+        mBackend->OnTearDown();
+    }
+    DawnTest::TearDown();
+}
+
+std::vector<wgpu::FeatureName> VideoViewsTests::GetRequiredFeatures() {
+    std::vector<wgpu::FeatureName> requiredFeatures = {};
+    mIsMultiPlanarFormatsSupported = SupportsFeatures({wgpu::FeatureName::DawnMultiPlanarFormats});
+    if (mIsMultiPlanarFormatsSupported) {
+        requiredFeatures.push_back(wgpu::FeatureName::DawnMultiPlanarFormats);
+    }
+    requiredFeatures.push_back(wgpu::FeatureName::DawnInternalUsages);
+    return requiredFeatures;
+}
+
+bool VideoViewsTests::IsMultiPlanarFormatsSupported() const {
+    return mIsMultiPlanarFormatsSupported;
+}
+
+// Returns a pre-prepared multi-planar formatted texture
+// The encoded texture data represents a 4x4 converted image. When |isCheckerboard| is true,
+// the top left is a 2x2 yellow block, bottom right is a 2x2 red block, top right is a 2x2
+// blue block, and bottom left is a 2x2 white block. When |isCheckerboard| is false, the
+// image is converted from a solid yellow 4x4 block.
+// static
+std::vector<uint8_t> VideoViewsTests::GetTestTextureData(wgpu::TextureFormat format,
+                                                         bool isCheckerboard) {
+    constexpr uint8_t Yy = kYellowYUVColor[kYUVLumaPlaneIndex].r;
+    constexpr uint8_t Yu = kYellowYUVColor[kYUVChromaPlaneIndex].r;
+    constexpr uint8_t Yv = kYellowYUVColor[kYUVChromaPlaneIndex].g;
+
+    switch (format) {
+        // The first 16 bytes is the luma plane (Y), followed by the chroma plane (UV) which
+        // is half the number of bytes (subsampled by 2) but same bytes per line as luma
+        // plane.
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+            if (isCheckerboard) {
+                constexpr uint8_t Wy = kWhiteYUVColor[kYUVLumaPlaneIndex].r;
+                constexpr uint8_t Wu = kWhiteYUVColor[kYUVChromaPlaneIndex].r;
+                constexpr uint8_t Wv = kWhiteYUVColor[kYUVChromaPlaneIndex].g;
+
+                constexpr uint8_t Ry = kRedYUVColor[kYUVLumaPlaneIndex].r;
+                constexpr uint8_t Ru = kRedYUVColor[kYUVChromaPlaneIndex].r;
+                constexpr uint8_t Rv = kRedYUVColor[kYUVChromaPlaneIndex].g;
+
+                constexpr uint8_t By = kBlueYUVColor[kYUVLumaPlaneIndex].r;
+                constexpr uint8_t Bu = kBlueYUVColor[kYUVChromaPlaneIndex].r;
+                constexpr uint8_t Bv = kBlueYUVColor[kYUVChromaPlaneIndex].g;
+
+                // clang-format off
+                        return {
+                            Wy, Wy, Ry, Ry, // plane 0, start + 0
+                            Wy, Wy, Ry, Ry,
+                            Yy, Yy, By, By,
+                            Yy, Yy, By, By,
+                            Wu, Wv, Ru, Rv, // plane 1, start + 16
+                            Yu, Yv, Bu, Bv,
+                        };
+                // clang-format on
+            } else {
+                // clang-format off
+                        return {
+                            Yy, Yy, Yy, Yy,  // plane 0, start + 0
+                            Yy, Yy, Yy, Yy,
+                            Yy, Yy, Yy, Yy,
+                            Yy, Yy, Yy, Yy,
+                            Yu, Yv, Yu, Yv,  // plane 1, start + 16
+                            Yu, Yv, Yu, Yv,
+                        };
+                // clang-format on
+            }
+        default:
+            UNREACHABLE();
+            return {};
+    }
+}
+
+uint32_t VideoViewsTests::NumPlanes(wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+            return 2;
+        default:
+            UNREACHABLE();
+            return 0;
+    }
+}
+std::vector<uint8_t> VideoViewsTests::GetTestTextureDataWithPlaneIndex(size_t planeIndex,
+                                                                       size_t bytesPerRow,
+                                                                       size_t height,
+                                                                       bool isCheckerboard) {
+    std::vector<uint8_t> texelData = VideoViewsTests::GetTestTextureData(
+        wgpu::TextureFormat::R8BG8Biplanar420Unorm, isCheckerboard);
+    const uint32_t texelDataRowBytes = kYUVImageDataWidthInTexels;
+    const uint32_t texelDataHeight =
+        planeIndex == 0 ? kYUVImageDataHeightInTexels : kYUVImageDataHeightInTexels / 2;
+
+    std::vector<uint8_t> texels(bytesPerRow * height, 0);
+    uint32_t plane_first_texel_offset = 0;
+    // The size of the test video frame is 4 x 4
+    switch (planeIndex) {
+        case VideoViewsTests::kYUVLumaPlaneIndex:
+            for (uint32_t i = 0; i < texelDataHeight; ++i) {
+                if (i < texelDataHeight) {
+                    for (uint32_t j = 0; j < texelDataRowBytes; ++j) {
+                        texels[bytesPerRow * i + j] =
+                            texelData[texelDataRowBytes * i + j + plane_first_texel_offset];
+                    }
+                }
+            }
+            return texels;
+        case VideoViewsTests::kYUVChromaPlaneIndex:
+            // TexelData is 4 * 6 size, first 4 * 4 is Y plane, UV plane started
+            // at index 16.
+            plane_first_texel_offset = 16;
+            for (uint32_t i = 0; i < texelDataHeight; ++i) {
+                if (i < texelDataHeight) {
+                    for (uint32_t j = 0; j < texelDataRowBytes; ++j) {
+                        texels[bytesPerRow * i + j] =
+                            texelData[texelDataRowBytes * i + j + plane_first_texel_offset];
+                    }
+                }
+            }
+            return texels;
+        default:
+            UNREACHABLE();
+            return {};
+    }
+}
+
+// Vertex shader used to render a sampled texture into a quad.
+wgpu::ShaderModule VideoViewsTests::GetTestVertexShaderModule() const {
+    return utils::CreateShaderModule(device, R"(
+                struct VertexOut {
+                    @location(0) texCoord : vec2 <f32>,
+                    @builtin(position) position : vec4<f32>,
+                }
+
+                @stage(vertex)
+                fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+                    var pos = array<vec2<f32>, 6>(
+                        vec2<f32>(-1.0, 1.0),
+                        vec2<f32>(-1.0, -1.0),
+                        vec2<f32>(1.0, -1.0),
+                        vec2<f32>(-1.0, 1.0),
+                        vec2<f32>(1.0, -1.0),
+                        vec2<f32>(1.0, 1.0)
+                    );
+                    var output : VertexOut;
+                    output.position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+                    output.texCoord = vec2<f32>(output.position.xy * 0.5) + vec2<f32>(0.5, 0.5);
+                    return output;
+            })");
+}
+
+// Samples the luminance (Y) plane from an imported NV12 texture into a single channel of an RGBA
+// output attachment and checks for the expected pixel value in the rendered quad.
+TEST_P(VideoViewsTests, NV12SampleYtoR) {
+    std::unique_ptr<VideoViewsTestBackend::PlatformTexture> platformTexture =
+        mBackend->CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                            wgpu::TextureUsage::TextureBinding,
+                                            /*isCheckerboard*/ false);
+    ASSERT_NE(platformTexture.get(), nullptr);
+    if (!platformTexture->CanWrapAsWGPUTexture()) {
+        mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
+        GTEST_SKIP() << "Skipped because not supported.";
+    }
+    wgpu::TextureViewDescriptor viewDesc;
+    viewDesc.format = wgpu::TextureFormat::R8Unorm;
+    viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    wgpu::TextureView textureView = platformTexture->wgpuTexture.CreateView(&viewDesc);
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
+
+    renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+               let y : f32 = textureSample(texture, sampler0, texCoord).r;
+               return vec4<f32>(y, 0.0, 0.0, 1.0);
+            })");
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
+        device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
+    renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(renderPipeline);
+        pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, sampler}, {1, textureView}}));
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Test the luma plane in the top left corner of RGB image.
+    EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVLumaPlaneIndex], renderPass.color, 0, 0);
+    mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
+}
+
+// Samples the chrominance (UV) plane from an imported texture into two channels of an RGBA output
+// attachment and checks for the expected pixel value in the rendered quad.
+TEST_P(VideoViewsTests, NV12SampleUVtoRG) {
+    std::unique_ptr<VideoViewsTestBackend::PlatformTexture> platformTexture =
+        mBackend->CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                            wgpu::TextureUsage::TextureBinding,
+                                            /*isCheckerboard*/ false);
+    ASSERT_NE(platformTexture.get(), nullptr);
+    if (!platformTexture->CanWrapAsWGPUTexture()) {
+        mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
+        GTEST_SKIP() << "Skipped because not supported.";
+    }
+
+    wgpu::TextureViewDescriptor viewDesc;
+    viewDesc.format = wgpu::TextureFormat::RG8Unorm;
+    viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    wgpu::TextureView textureView = platformTexture->wgpuTexture.CreateView(&viewDesc);
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
+
+    renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var texture : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+               let u : f32 = textureSample(texture, sampler0, texCoord).r;
+               let v : f32 = textureSample(texture, sampler0, texCoord).g;
+               return vec4<f32>(u, v, 0.0, 1.0);
+            })");
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
+        device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
+    renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+    renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(renderPipeline);
+        pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, sampler}, {1, textureView}}));
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Test the chroma plane in the top left corner of RGB image.
+    EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVChromaPlaneIndex], renderPass.color, 0, 0);
+    mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
+}
+
+// Renders a NV12 "checkerboard" texture into a RGB quad then checks the color at specific
+// points to ensure the image has not been flipped.
+TEST_P(VideoViewsTests, NV12SampleYUVtoRGB) {
+    // TODO(https://crbug.com/dawn/733): Figure out why Nvidia bot occasionally fails testing all
+    // four corners.
+    DAWN_SUPPRESS_TEST_IF(IsNvidia());
+
+    std::unique_ptr<VideoViewsTestBackend::PlatformTexture> platformTexture =
+        mBackend->CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                            wgpu::TextureUsage::TextureBinding,
+                                            /*isCheckerboard*/ true);
+    ASSERT_NE(platformTexture.get(), nullptr);
+    if (!platformTexture->CanWrapAsWGPUTexture()) {
+        mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
+        GTEST_SKIP() << "Skipped because not supported.";
+    }
+
+    wgpu::TextureViewDescriptor lumaViewDesc;
+    lumaViewDesc.format = wgpu::TextureFormat::R8Unorm;
+    lumaViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    wgpu::TextureView lumaTextureView = platformTexture->wgpuTexture.CreateView(&lumaViewDesc);
+
+    wgpu::TextureViewDescriptor chromaViewDesc;
+    chromaViewDesc.format = wgpu::TextureFormat::RG8Unorm;
+    chromaViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    wgpu::TextureView chromaTextureView = platformTexture->wgpuTexture.CreateView(&chromaViewDesc);
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
+
+    renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @group(0) @binding(1) var lumaTexture : texture_2d<f32>;
+            @group(0) @binding(2) var chromaTexture : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@location(0) texCoord : vec2<f32>) -> @location(0) vec4<f32> {
+               let y : f32 = textureSample(lumaTexture, sampler0, texCoord).r;
+               let u : f32 = textureSample(chromaTexture, sampler0, texCoord).r;
+               let v : f32 = textureSample(chromaTexture, sampler0, texCoord).g;
+               return vec4<f32>(y, u, v, 1.0);
+            })");
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
+        device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
+    renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(renderPipeline);
+        pass.SetBindGroup(
+            0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                    {{0, sampler}, {1, lumaTextureView}, {2, chromaTextureView}}));
+        pass.Draw(6);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Test four corners of the checkerboard image (YUV color space).
+    RGBA8 yellowYUV(kYellowYUVColor[kYUVLumaPlaneIndex].r, kYellowYUVColor[kYUVChromaPlaneIndex].r,
+                    kYellowYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
+    EXPECT_PIXEL_RGBA8_EQ(yellowYUV, renderPass.color, 0, 0);  // top left
+
+    RGBA8 redYUV(kRedYUVColor[kYUVLumaPlaneIndex].r, kRedYUVColor[kYUVChromaPlaneIndex].r,
+                 kRedYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
+    EXPECT_PIXEL_RGBA8_EQ(redYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
+                          kYUVImageDataHeightInTexels - 1);  // bottom right
+
+    RGBA8 blueYUV(kBlueYUVColor[kYUVLumaPlaneIndex].r, kBlueYUVColor[kYUVChromaPlaneIndex].r,
+                  kBlueYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
+    EXPECT_PIXEL_RGBA8_EQ(blueYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
+                          0);  // top right
+
+    RGBA8 whiteYUV(kWhiteYUVColor[kYUVLumaPlaneIndex].r, kWhiteYUVColor[kYUVChromaPlaneIndex].r,
+                   kWhiteYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
+    EXPECT_PIXEL_RGBA8_EQ(whiteYUV, renderPass.color, 0,
+                          kYUVImageDataHeightInTexels - 1);  // bottom left
+    mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
+}
+
+DAWN_INSTANTIATE_TEST(VideoViewsTests, VideoViewsTestBackend::Backend());
diff --git a/src/dawn/tests/end2end/VideoViewsTests.h b/src/dawn/tests/end2end/VideoViewsTests.h
new file mode 100644
index 0000000..60d93a0
--- /dev/null
+++ b/src/dawn/tests/end2end/VideoViewsTests.h
@@ -0,0 +1,95 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_VIDEOVIEWSTESTS_H_
+#define TESTS_VIDEOVIEWSTESTS_H_
+
+#include "dawn/tests/DawnTest.h"
+
+#include <array>
+#include <memory>
+
+class VideoViewsTestBackend {
+  public:
+    static BackendTestConfig Backend();
+    static std::unique_ptr<VideoViewsTestBackend> Create();
+
+    virtual ~VideoViewsTestBackend();
+
+    virtual void OnSetUp(WGPUDevice device) = 0;
+    virtual void OnTearDown() {
+    }
+
+    class PlatformTexture {
+      public:
+        PlatformTexture() = delete;
+        virtual ~PlatformTexture();
+
+        virtual bool CanWrapAsWGPUTexture() = 0;
+
+      protected:
+        explicit PlatformTexture(wgpu::Texture&& texture);
+
+      public:
+        wgpu::Texture wgpuTexture;
+    };
+    virtual std::unique_ptr<PlatformTexture> CreateVideoTextureForTest(wgpu::TextureFormat format,
+                                                                       wgpu::TextureUsage usage,
+                                                                       bool isCheckerboard) = 0;
+    virtual void DestroyVideoTextureForTest(std::unique_ptr<PlatformTexture>&& platformTexture) = 0;
+};
+
+class VideoViewsTests : public DawnTest {
+  public:
+    // The width and height in texels are 4 for all YUV formats.
+    static constexpr uint32_t kYUVImageDataWidthInTexels = 4;
+    static constexpr uint32_t kYUVImageDataHeightInTexels = 4;
+
+    static constexpr size_t kYUVLumaPlaneIndex = 0;
+    static constexpr size_t kYUVChromaPlaneIndex = 1;
+
+    // RGB colors converted into YUV (per plane), for testing.
+    // RGB colors are mapped to the BT.601 definition of luma.
+    // https://docs.microsoft.com/en-us/windows/win32/medfound/about-yuv-video
+    static constexpr std::array<RGBA8, 2> kYellowYUVColor = {RGBA8{210, 0, 0, 0xFF},    // Y
+                                                             RGBA8{16, 146, 0, 0xFF}};  // UV
+
+    static constexpr std::array<RGBA8, 2> kWhiteYUVColor = {RGBA8{235, 0, 0, 0xFF},     // Y
+                                                            RGBA8{128, 128, 0, 0xFF}};  // UV
+
+    static constexpr std::array<RGBA8, 2> kBlueYUVColor = {RGBA8{41, 0, 0, 0xFF},      // Y
+                                                           RGBA8{240, 110, 0, 0xFF}};  // UV
+
+    static constexpr std::array<RGBA8, 2> kRedYUVColor = {RGBA8{81, 0, 0, 0xFF},     // Y
+                                                          RGBA8{90, 240, 0, 0xFF}};  // UV
+
+    static std::vector<uint8_t> GetTestTextureData(wgpu::TextureFormat format, bool isCheckerboard);
+    static uint32_t NumPlanes(wgpu::TextureFormat format);
+    static std::vector<uint8_t> GetTestTextureDataWithPlaneIndex(size_t planeIndex,
+                                                                 size_t bytesPerRow,
+                                                                 size_t height,
+                                                                 bool isCheckerboard);
+
+  protected:
+    void SetUp() override;
+    void TearDown() override;
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override;
+    bool IsMultiPlanarFormatsSupported() const;
+    wgpu::ShaderModule GetTestVertexShaderModule() const;
+
+    std::unique_ptr<VideoViewsTestBackend> mBackend;
+    bool mIsMultiPlanarFormatsSupported = false;
+};
+
+#endif  // TESTS_VIDEOVIEWSTESTS_H_
diff --git a/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp b/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
new file mode 100644
index 0000000..bd23f2e
--- /dev/null
+++ b/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
@@ -0,0 +1,203 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "VideoViewsTests.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/VulkanBackend.h"
+
+#include <fcntl.h>
+#include <gbm.h>
+
+// "linux-chromeos-rel"'s gbm.h is too old to compile, missing this change at least:
+// https://chromium-review.googlesource.com/c/chromiumos/platform/minigbm/+/1963001/10/gbm.h#244
+#ifndef MINIGBM
+#    define GBM_BO_USE_TEXTURING (1 << 5)
+#    define GBM_BO_USE_SW_WRITE_RARELY (1 << 12)
+#    define GBM_BO_USE_HW_VIDEO_DECODER (1 << 13)
+#endif
+
+class PlatformTextureGbm : public VideoViewsTestBackend::PlatformTexture {
+  public:
+    PlatformTextureGbm(wgpu::Texture&& texture, gbm_bo* gbmBo)
+        : PlatformTexture(std::move(texture)), mGbmBo(gbmBo) {
+    }
+    ~PlatformTextureGbm() override = default;
+
+    // TODO(chromium:1258986): Add DISJOINT vkImage support for multi-plannar formats.
+    bool CanWrapAsWGPUTexture() override {
+        ASSERT(mGbmBo != nullptr);
+        // Checks if all plane handles of a multi-planar gbm_bo are same.
+        gbm_bo_handle plane0Handle = gbm_bo_get_handle_for_plane(mGbmBo, 0);
+        for (int plane = 1; plane < gbm_bo_get_plane_count(mGbmBo); ++plane) {
+            if (gbm_bo_get_handle_for_plane(mGbmBo, plane).u32 != plane0Handle.u32) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    gbm_bo* GetGbmBo() {
+        return mGbmBo;
+    }
+
+  private:
+    gbm_bo* mGbmBo = nullptr;
+};
+
+class VideoViewsTestBackendGbm : public VideoViewsTestBackend {
+  public:
+    void OnSetUp(WGPUDevice device) override {
+        mWGPUDevice = device;
+        mGbmDevice = CreateGbmDevice();
+    }
+
+    void OnTearDown() override {
+        gbm_device_destroy(mGbmDevice);
+    }
+
+  private:
+    gbm_device* CreateGbmDevice() {
+        // Render nodes [1] are the primary interface for communicating with the GPU on
+        // devices that support DRM. The actual filename of the render node is
+        // implementation-specific, so we must scan through all possible filenames to find
+        // one that we can use [2].
+        //
+        // [1] https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#render-nodes
+        // [2]
+        // https://cs.chromium.org/chromium/src/ui/ozone/platform/wayland/gpu/drm_render_node_path_finder.cc
+        const uint32_t kRenderNodeStart = 128;
+        const uint32_t kRenderNodeEnd = kRenderNodeStart + 16;
+        const std::string kRenderNodeTemplate = "/dev/dri/renderD";
+
+        int renderNodeFd = -1;
+        for (uint32_t i = kRenderNodeStart; i < kRenderNodeEnd; i++) {
+            std::string renderNode = kRenderNodeTemplate + std::to_string(i);
+            renderNodeFd = open(renderNode.c_str(), O_RDWR);
+            if (renderNodeFd >= 0)
+                break;
+        }
+        ASSERT(renderNodeFd > 0);
+
+        gbm_device* gbmDevice = gbm_create_device(renderNodeFd);
+        ASSERT(gbmDevice != nullptr);
+        return gbmDevice;
+    }
+
+    static uint32_t GetGbmBoFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return GBM_FORMAT_NV12;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    WGPUTextureFormat ToWGPUTextureFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return WGPUTextureFormat_R8BG8Biplanar420Unorm;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    WGPUTextureUsage ToWGPUTextureUsage(wgpu::TextureUsage usage) {
+        switch (usage) {
+            case wgpu::TextureUsage::TextureBinding:
+                return WGPUTextureUsage_TextureBinding;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    std::unique_ptr<VideoViewsTestBackend::PlatformTexture> CreateVideoTextureForTest(
+        wgpu::TextureFormat format,
+        wgpu::TextureUsage usage,
+        bool isCheckerboard) override {
+        uint32_t flags = GBM_BO_USE_SCANOUT | GBM_BO_USE_TEXTURING | GBM_BO_USE_HW_VIDEO_DECODER |
+                         GBM_BO_USE_SW_WRITE_RARELY;
+        gbm_bo* gbmBo = gbm_bo_create(mGbmDevice, VideoViewsTests::kYUVImageDataWidthInTexels,
+                                      VideoViewsTests::kYUVImageDataHeightInTexels,
+                                      GetGbmBoFormat(format), flags);
+        if (gbmBo == nullptr) {
+            return nullptr;
+        }
+
+        void* mapHandle = nullptr;
+        uint32_t strideBytes = 0;
+        void* addr = gbm_bo_map(gbmBo, 0, 0, VideoViewsTests::kYUVImageDataWidthInTexels,
+                                VideoViewsTests::kYUVImageDataHeightInTexels, GBM_BO_TRANSFER_WRITE,
+                                &strideBytes, &mapHandle);
+        EXPECT_NE(addr, nullptr);
+        std::vector<uint8_t> initialData =
+            VideoViewsTests::GetTestTextureData(format, isCheckerboard);
+        std::memcpy(addr, initialData.data(), initialData.size());
+
+        gbm_bo_unmap(gbmBo, mapHandle);
+
+        wgpu::TextureDescriptor textureDesc;
+        textureDesc.format = format;
+        textureDesc.dimension = wgpu::TextureDimension::e2D;
+        textureDesc.usage = usage;
+        textureDesc.size = {VideoViewsTests::kYUVImageDataWidthInTexels,
+                            VideoViewsTests::kYUVImageDataHeightInTexels, 1};
+
+        wgpu::DawnTextureInternalUsageDescriptor internalDesc;
+        internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+        textureDesc.nextInChain = &internalDesc;
+
+        dawn::native::vulkan::ExternalImageDescriptorDmaBuf descriptor = {};
+        descriptor.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(&textureDesc);
+        descriptor.isInitialized = true;
+
+        descriptor.memoryFD = gbm_bo_get_fd(gbmBo);
+        descriptor.stride = gbm_bo_get_stride(gbmBo);
+        descriptor.drmModifier = gbm_bo_get_modifier(gbmBo);
+        descriptor.waitFDs = {};
+
+        return std::make_unique<PlatformTextureGbm>(
+            wgpu::Texture::Acquire(dawn::native::vulkan::WrapVulkanImage(mWGPUDevice, &descriptor)),
+            gbmBo);
+    }
+
+    void DestroyVideoTextureForTest(
+        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& platformTexture) override {
+        // Exports the signal and ignores it.
+        dawn::native::vulkan::ExternalImageExportInfoDmaBuf exportInfo;
+        dawn::native::vulkan::ExportVulkanImage(platformTexture->wgpuTexture.Get(),
+                                                VK_IMAGE_LAYOUT_GENERAL, &exportInfo);
+        for (int fd : exportInfo.semaphoreHandles) {
+            ASSERT_NE(fd, -1);
+            close(fd);
+        }
+        gbm_bo* gbmBo = static_cast<PlatformTextureGbm*>(platformTexture.get())->GetGbmBo();
+        ASSERT_NE(gbmBo, nullptr);
+        gbm_bo_destroy(gbmBo);
+    }
+
+    WGPUDevice mWGPUDevice = nullptr;
+    gbm_device* mGbmDevice = nullptr;
+};
+
+// static
+BackendTestConfig VideoViewsTestBackend::Backend() {
+    return VulkanBackend();
+}
+
+// static
+std::unique_ptr<VideoViewsTestBackend> VideoViewsTestBackend::Create() {
+    return std::make_unique<VideoViewsTestBackendGbm>();
+}
diff --git a/src/dawn/tests/end2end/VideoViewsTests_mac.cpp b/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
new file mode 100644
index 0000000..151c0b3
--- /dev/null
+++ b/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
@@ -0,0 +1,187 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "VideoViewsTests.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/CoreFoundationRef.h"
+#include "dawn/native/MetalBackend.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <CoreVideo/CVPixelBuffer.h>
+#include <IOSurface/IOSurfaceRef.h>
+
+namespace {
+    void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
+        CFNumberRef number(CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
+        CFDictionaryAddValue(dictionary, key, number);
+        CFRelease(number);
+    }
+
+}  // anonymous namespace
+
+class PlatformTextureIOSurface : public VideoViewsTestBackend::PlatformTexture {
+  public:
+    PlatformTextureIOSurface(wgpu::Texture&& texture, IOSurfaceRef iosurface)
+        : PlatformTexture(std::move(texture)) {
+        mIOSurface = AcquireCFRef<IOSurfaceRef>(iosurface);
+    }
+    ~PlatformTextureIOSurface() override {
+        mIOSurface = nullptr;
+    }
+
+    bool CanWrapAsWGPUTexture() override {
+        return true;
+    }
+
+  private:
+    CFRef<IOSurfaceRef> mIOSurface = nullptr;
+};
+
+class VideoViewsTestBackendIOSurface : public VideoViewsTestBackend {
+  public:
+    void OnSetUp(WGPUDevice device) override {
+        mWGPUDevice = device;
+    }
+
+  private:
+    OSType ToCVFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
+            default:
+                UNREACHABLE();
+                return 0;
+        }
+    }
+
+    size_t GetSubSamplingFactorPerPlane(wgpu::TextureFormat format, size_t plane) {
+        switch (format) {
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return plane == VideoViewsTests::kYUVLumaPlaneIndex ? 1 : 2;
+            default:
+                UNREACHABLE();
+                return 0;
+        }
+    }
+
+    size_t BytesPerElement(wgpu::TextureFormat format, size_t plane) {
+        switch (format) {
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return plane == VideoViewsTests::kYUVLumaPlaneIndex ? 1 : 2;
+            default:
+                UNREACHABLE();
+                return 0;
+        }
+    }
+
+    std::unique_ptr<VideoViewsTestBackend::PlatformTexture> CreateVideoTextureForTest(
+        wgpu::TextureFormat format,
+        wgpu::TextureUsage usage,
+        bool isCheckerboard) override {
+        CFMutableDictionaryRef dict(CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+                                                              &kCFTypeDictionaryKeyCallBacks,
+                                                              &kCFTypeDictionaryValueCallBacks));
+        AddIntegerValue(dict, kIOSurfaceWidth, VideoViewsTests::kYUVImageDataWidthInTexels);
+        AddIntegerValue(dict, kIOSurfaceHeight, VideoViewsTests::kYUVImageDataHeightInTexels);
+        AddIntegerValue(dict, kIOSurfacePixelFormat, ToCVFormat(format));
+
+        size_t num_planes = VideoViewsTests::NumPlanes(format);
+
+        CFMutableArrayRef planes(
+            CFArrayCreateMutable(kCFAllocatorDefault, num_planes, &kCFTypeArrayCallBacks));
+        size_t total_bytes_alloc = 0;
+        for (size_t plane = 0; plane < num_planes; ++plane) {
+            const size_t factor = GetSubSamplingFactorPerPlane(format, plane);
+            const size_t plane_width = VideoViewsTests::kYUVImageDataWidthInTexels / factor;
+            const size_t plane_height = VideoViewsTests::kYUVImageDataHeightInTexels / factor;
+            const size_t plane_bytes_per_element = BytesPerElement(format, plane);
+            const size_t plane_bytes_per_row = IOSurfaceAlignProperty(
+                kIOSurfacePlaneBytesPerRow, plane_width * plane_bytes_per_element);
+            const size_t plane_bytes_alloc =
+                IOSurfaceAlignProperty(kIOSurfacePlaneSize, plane_height * plane_bytes_per_row);
+            const size_t plane_offset =
+                IOSurfaceAlignProperty(kIOSurfacePlaneOffset, total_bytes_alloc);
+
+            CFMutableDictionaryRef plane_info(
+                CFDictionaryCreateMutable(kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks,
+                                          &kCFTypeDictionaryValueCallBacks));
+
+            AddIntegerValue(plane_info, kIOSurfacePlaneWidth, plane_width);
+            AddIntegerValue(plane_info, kIOSurfacePlaneHeight, plane_height);
+            AddIntegerValue(plane_info, kIOSurfacePlaneBytesPerElement, plane_bytes_per_element);
+            AddIntegerValue(plane_info, kIOSurfacePlaneBytesPerRow, plane_bytes_per_row);
+            AddIntegerValue(plane_info, kIOSurfacePlaneSize, plane_bytes_alloc);
+            AddIntegerValue(plane_info, kIOSurfacePlaneOffset, plane_offset);
+            CFArrayAppendValue(planes, plane_info);
+            CFRelease(plane_info);
+            total_bytes_alloc = plane_offset + plane_bytes_alloc;
+        }
+        CFDictionaryAddValue(dict, kIOSurfacePlaneInfo, planes);
+        CFRelease(planes);
+
+        total_bytes_alloc = IOSurfaceAlignProperty(kIOSurfaceAllocSize, total_bytes_alloc);
+        AddIntegerValue(dict, kIOSurfaceAllocSize, total_bytes_alloc);
+
+        IOSurfaceRef surface = IOSurfaceCreate(dict);
+        CFRelease(dict);
+
+        IOSurfaceLock(surface, 0, nullptr);
+        for (size_t plane = 0; plane < num_planes; ++plane) {
+            std::vector<uint8_t> data = VideoViewsTests::GetTestTextureDataWithPlaneIndex(
+                plane, IOSurfaceGetBytesPerRowOfPlane(surface, plane),
+                IOSurfaceGetHeightOfPlane(surface, plane), isCheckerboard);
+            void* pointer = IOSurfaceGetBaseAddressOfPlane(surface, plane);
+            memcpy(pointer, data.data(), data.size());
+        }
+        IOSurfaceUnlock(surface, 0, nullptr);
+
+        wgpu::TextureDescriptor textureDesc;
+        textureDesc.format = format;
+        textureDesc.dimension = wgpu::TextureDimension::e2D;
+        textureDesc.usage = usage;
+        textureDesc.size = {VideoViewsTests::kYUVImageDataWidthInTexels,
+                            VideoViewsTests::kYUVImageDataHeightInTexels, 1};
+
+        wgpu::DawnTextureInternalUsageDescriptor internalDesc;
+        internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+        textureDesc.nextInChain = &internalDesc;
+
+        dawn::native::metal::ExternalImageDescriptorIOSurface descriptor = {};
+        descriptor.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(&textureDesc);
+        descriptor.isInitialized = true;
+        descriptor.ioSurface = surface;
+
+        return std::make_unique<PlatformTextureIOSurface>(
+            wgpu::Texture::Acquire(dawn::native::metal::WrapIOSurface(mWGPUDevice, &descriptor)),
+            surface);
+    }
+
+    void DestroyVideoTextureForTest(
+        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& platformTexture) override {
+    }
+
+    WGPUDevice mWGPUDevice = nullptr;
+};
+
+// static
+BackendTestConfig VideoViewsTestBackend::Backend() {
+    return MetalBackend();
+}
+
+// static
+std::unique_ptr<VideoViewsTestBackend> VideoViewsTestBackend::Create() {
+    return std::make_unique<VideoViewsTestBackendIOSurface>();
+}
diff --git a/src/dawn/tests/end2end/VideoViewsTests_win.cpp b/src/dawn/tests/end2end/VideoViewsTests_win.cpp
new file mode 100644
index 0000000..21889d8
--- /dev/null
+++ b/src/dawn/tests/end2end/VideoViewsTests_win.cpp
@@ -0,0 +1,187 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "VideoViewsTests.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/D3D12Backend.h"
+
+#include <d3d11.h>
+#include <d3d12.h>
+#include <dxgi1_4.h>
+#include <wrl/client.h>
+
+using Microsoft::WRL::ComPtr;
+
+class PlatformTextureWin : public VideoViewsTestBackend::PlatformTexture {
+  public:
+    explicit PlatformTextureWin(wgpu::Texture&& texture) : PlatformTexture(std::move(texture)) {
+    }
+    ~PlatformTextureWin() override = default;
+
+    bool CanWrapAsWGPUTexture() override {
+        return true;
+    }
+};
+
+class VideoViewsTestBackendWin : public VideoViewsTestBackend {
+  public:
+    ~VideoViewsTestBackendWin() override = default;
+
+    void OnSetUp(WGPUDevice device) override {
+        mWGPUDevice = device;
+
+        // Create the D3D11 device/contexts that will be used in subsequent tests
+        ComPtr<ID3D12Device> d3d12Device = dawn::native::d3d12::GetD3D12Device(device);
+
+        const LUID adapterLuid = d3d12Device->GetAdapterLuid();
+
+        ComPtr<IDXGIFactory4> dxgiFactory;
+        HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
+        ASSERT_EQ(hr, S_OK);
+
+        ComPtr<IDXGIAdapter> dxgiAdapter;
+        hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
+        ASSERT_EQ(hr, S_OK);
+
+        ComPtr<ID3D11Device> d3d11Device;
+        D3D_FEATURE_LEVEL d3dFeatureLevel;
+        ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+        hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0, nullptr, 0,
+                                 D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
+                                 &d3d11DeviceContext);
+        ASSERT_EQ(hr, S_OK);
+
+        // Runtime of the created texture (D3D11 device) and OpenSharedHandle runtime (Dawn's
+        // D3D12 device) must agree on resource sharing capability. For NV12 formats, D3D11
+        // requires at-least D3D11_SHARED_RESOURCE_TIER_2 support.
+        // https://docs.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_shared_resource_tier
+        D3D11_FEATURE_DATA_D3D11_OPTIONS5 featureOptions5{};
+        hr = d3d11Device->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS5, &featureOptions5,
+                                              sizeof(featureOptions5));
+        ASSERT_EQ(hr, S_OK);
+
+        ASSERT_GE(featureOptions5.SharedResourceTier, D3D11_SHARED_RESOURCE_TIER_2);
+
+        mD3d11Device = std::move(d3d11Device);
+    }
+
+  protected:
+    static DXGI_FORMAT GetDXGITextureFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+                return DXGI_FORMAT_NV12;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    std::unique_ptr<VideoViewsTestBackend::PlatformTexture> CreateVideoTextureForTest(
+        wgpu::TextureFormat format,
+        wgpu::TextureUsage usage,
+        bool isCheckerboard) override {
+        wgpu::TextureDescriptor textureDesc;
+        textureDesc.format = format;
+        textureDesc.dimension = wgpu::TextureDimension::e2D;
+        textureDesc.usage = usage;
+        textureDesc.size = {VideoViewsTests::kYUVImageDataWidthInTexels,
+                            VideoViewsTests::kYUVImageDataHeightInTexels, 1};
+
+        // Create a DX11 texture with data then wrap it in a shared handle.
+        D3D11_TEXTURE2D_DESC d3dDescriptor;
+        d3dDescriptor.Width = VideoViewsTests::kYUVImageDataWidthInTexels;
+        d3dDescriptor.Height = VideoViewsTests::kYUVImageDataHeightInTexels;
+        d3dDescriptor.MipLevels = 1;
+        d3dDescriptor.ArraySize = 1;
+        d3dDescriptor.Format = GetDXGITextureFormat(format);
+        d3dDescriptor.SampleDesc.Count = 1;
+        d3dDescriptor.SampleDesc.Quality = 0;
+        d3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
+        d3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE;
+        d3dDescriptor.CPUAccessFlags = 0;
+        d3dDescriptor.MiscFlags =
+            D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+
+        std::vector<uint8_t> initialData =
+            VideoViewsTests::GetTestTextureData(format, isCheckerboard);
+
+        D3D11_SUBRESOURCE_DATA subres;
+        subres.pSysMem = initialData.data();
+        subres.SysMemPitch = VideoViewsTests::kYUVImageDataWidthInTexels;
+
+        ComPtr<ID3D11Texture2D> d3d11Texture;
+        HRESULT hr = mD3d11Device->CreateTexture2D(&d3dDescriptor, &subres, &d3d11Texture);
+        ASSERT(hr == S_OK);
+
+        ComPtr<IDXGIResource1> dxgiResource;
+        hr = d3d11Texture.As(&dxgiResource);
+        ASSERT(hr == S_OK);
+
+        HANDLE sharedHandle;
+        hr = dxgiResource->CreateSharedHandle(
+            nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
+            &sharedHandle);
+        ASSERT(hr == S_OK);
+
+        // DX11 texture should be initialized upon CreateTexture2D. However, if we do not
+        // acquire/release the keyed mutex before using the wrapped WebGPU texture, the WebGPU
+        // texture is left uninitialized. This is required for D3D11 and D3D12 interop.
+        ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+        hr = d3d11Texture.As(&dxgiKeyedMutex);
+        ASSERT(hr == S_OK);
+
+        using dawn::native::d3d12::kDXGIKeyedMutexAcquireReleaseKey;
+        hr = dxgiKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE);
+        ASSERT(hr == S_OK);
+
+        hr = dxgiKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+        ASSERT(hr == S_OK);
+
+        // Open the DX11 texture in Dawn from the shared handle and return it as a WebGPU
+        // texture.
+        dawn::native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
+        externalImageDesc.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(&textureDesc);
+        externalImageDesc.sharedHandle = sharedHandle;
+
+        std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage =
+            dawn::native::d3d12::ExternalImageDXGI::Create(mWGPUDevice, &externalImageDesc);
+
+        // Handle is no longer needed once resources are created.
+        ::CloseHandle(sharedHandle);
+
+        dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+        externalAccessDesc.isInitialized = true;
+        externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(textureDesc.usage);
+
+        return std::make_unique<PlatformTextureWin>(wgpu::Texture::Acquire(
+            externalImage->ProduceTexture(mWGPUDevice, &externalAccessDesc)));
+    }
+
+    void DestroyVideoTextureForTest(
+        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& PlatformTexture) override {
+    }
+
+    WGPUDevice mWGPUDevice = nullptr;
+    ComPtr<ID3D11Device> mD3d11Device;
+};
+
+// static
+BackendTestConfig VideoViewsTestBackend::Backend() {
+    return D3D12Backend();
+}
+// static
+std::unique_ptr<VideoViewsTestBackend> VideoViewsTestBackend::Create() {
+    return std::make_unique<VideoViewsTestBackendWin>();
+}
diff --git a/src/dawn/tests/end2end/ViewportOrientationTests.cpp b/src/dawn/tests/end2end/ViewportOrientationTests.cpp
new file mode 100644
index 0000000..9a29049
--- /dev/null
+++ b/src/dawn/tests/end2end/ViewportOrientationTests.cpp
@@ -0,0 +1,66 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class ViewportOrientationTests : public DawnTest {};
+
+// Test that the pixel in viewport coordinate (-1, -1) matches texel (0, 0)
+TEST_P(ViewportOrientationTests, OriginAt0x0) {
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 2, 2);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(-0.5, 0.5, 0.0, 1.0);
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
+    descriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.Draw(1);
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 0, 1);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 1, 0);
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 1, 1);
+}
+
+DAWN_INSTANTIATE_TEST(ViewportOrientationTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/ViewportTests.cpp b/src/dawn/tests/end2end/ViewportTests.cpp
new file mode 100644
index 0000000..df2d98a
--- /dev/null
+++ b/src/dawn/tests/end2end/ViewportTests.cpp
@@ -0,0 +1,218 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class ViewportTest : public DawnTest {
+  private:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        mQuadVS = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0),
+                    vec2<f32>( 1.0, -1.0));
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        mQuadFS = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+            })");
+    }
+
+  protected:
+    wgpu::ShaderModule mQuadVS;
+    wgpu::ShaderModule mQuadFS;
+
+    static constexpr uint32_t kWidth = 5;
+    static constexpr uint32_t kHeight = 6;
+
+    // Viewport parameters are float, but use uint32_t because implementations of Vulkan are allowed
+    // to just discard the fractional part.
+    void TestViewportQuad(uint32_t x,
+                          uint32_t y,
+                          uint32_t width,
+                          uint32_t height,
+                          bool doViewportCall = true) {
+        // Create a pipeline that will draw a white quad.
+        utils::ComboRenderPipelineDescriptor pipelineDesc;
+        pipelineDesc.vertex.module = mQuadVS;
+        pipelineDesc.cFragment.module = mQuadFS;
+        pipelineDesc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+        // Render the quad with the viewport call.
+        utils::BasicRenderPass rp = utils::CreateBasicRenderPass(device, kWidth, kHeight);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&rp.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        if (doViewportCall) {
+            pass.SetViewport(x, y, width, height, 0.0, 1.0);
+        }
+        pass.Draw(6);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Check that only the texels that are in the veiwport were drawn.
+        for (uint32_t checkX = 0; checkX < kWidth; checkX++) {
+            for (uint32_t checkY = 0; checkY < kHeight; checkY++) {
+                if (checkX >= x && checkX < x + width && checkY >= y && checkY < y + height) {
+                    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kWhite, rp.color, checkX, checkY);
+                } else {
+                    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, rp.color, checkX, checkY);
+                }
+            }
+        }
+    }
+
+    void TestViewportDepth(float minDepth, float maxDepth, bool doViewportCall = true) {
+        // Create a pipeline drawing 3 points at depth 1.0, 0.5 and 0.0.
+        utils::ComboRenderPipelineDescriptor pipelineDesc;
+        pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var points : array<vec3<f32>, 3> = array<vec3<f32>, 3>(
+                    vec3<f32>(-0.9, 0.0, 1.0),
+                    vec3<f32>( 0.0, 0.0, 0.5),
+                    vec3<f32>( 0.9, 0.0, 0.0));
+                return vec4<f32>(points[VertexIndex], 1.0);
+            })");
+        pipelineDesc.cFragment.module = mQuadFS;
+        pipelineDesc.cFragment.targetCount = 0;
+        pipelineDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
+        wgpu::DepthStencilState* depthStencil =
+            pipelineDesc.EnableDepthStencil(wgpu::TextureFormat::Depth32Float);
+        depthStencil->depthWriteEnabled = true;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+        // Create the texture that will store the post-viewport-transform depth.
+        wgpu::TextureDescriptor depthDesc;
+        depthDesc.size = {3, 1, 1};
+        depthDesc.format = wgpu::TextureFormat::Depth32Float;
+        depthDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture depthTexture = device.CreateTexture(&depthDesc);
+
+        // Render the three points with the viewport call.
+        utils::ComboRenderPassDescriptor rpDesc({}, depthTexture.CreateView());
+        rpDesc.cDepthStencilAttachmentInfo.depthClearValue = 0.0f;
+        rpDesc.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+        rpDesc.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        rpDesc.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&rpDesc);
+        pass.SetPipeline(pipeline);
+        if (doViewportCall) {
+            pass.SetViewport(0, 0, 3, 1, minDepth, maxDepth);
+        }
+        pass.Draw(3);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Check that the viewport transform was computed correctly for the depth.
+        std::vector<float> expected = {
+            maxDepth,
+            (maxDepth + minDepth) / 2,
+            minDepth,
+        };
+        EXPECT_TEXTURE_EQ(expected.data(), depthTexture, {0, 0}, {3, 1});
+    }
+};
+
+// Test that by default the full viewport is used.
+TEST_P(ViewportTest, DefaultViewportRect) {
+    TestViewportQuad(0, 0, kWidth, kHeight, false);
+}
+
+// Test various viewport values in the X direction.
+TEST_P(ViewportTest, VaryingInX) {
+    TestViewportQuad(0, 0, kWidth - 1, kHeight);
+    TestViewportQuad(1, 0, kWidth - 1, kHeight);
+    TestViewportQuad(2, 0, 1, kHeight);
+}
+
+// Test various viewport values in the Y direction.
+TEST_P(ViewportTest, VaryingInY) {
+    TestViewportQuad(0, 0, kWidth, kHeight - 1);
+    TestViewportQuad(0, 1, kWidth, kHeight - 1);
+    TestViewportQuad(0, 2, kWidth, 1);
+}
+
+// Test various viewport values in both X and Y
+TEST_P(ViewportTest, SubBoxes) {
+    TestViewportQuad(1, 1, kWidth - 2, kHeight - 2);
+    TestViewportQuad(2, 2, 2, 2);
+    TestViewportQuad(2, 3, 2, 1);
+}
+
+// Test that by default the [0, 1] depth range is used.
+TEST_P(ViewportTest, DefaultViewportDepth) {
+    TestViewportDepth(0.0, 1.0, false);
+}
+
+// Test various viewport depth ranges
+TEST_P(ViewportTest, ViewportDepth) {
+    TestViewportDepth(0.0, 0.5);
+    TestViewportDepth(0.5, 1.0);
+}
+
+// Test that a draw with an empty viewport doesn't draw anything.
+TEST_P(ViewportTest, EmptyViewport) {
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    pipelineDescriptor.vertex.module = mQuadVS;
+    pipelineDescriptor.cFragment.module = mQuadFS;
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
+
+    auto DoEmptyViewportTest = [&](uint32_t width, uint32_t height) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetViewport(0.0f, 0.0f, width, height, 0.0f, 1.0f);
+        pass.Draw(6);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kZero, renderPass.color, 0, 0);
+    };
+
+    // Test with a 0x0, 0xN and nx0 viewport.
+    DoEmptyViewportTest(0, 0);
+    DoEmptyViewportTest(0, 1);
+    DoEmptyViewportTest(1, 0);
+}
+
+DAWN_INSTANTIATE_TEST(ViewportTest,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/end2end/WindowSurfaceTests.cpp b/src/dawn/tests/end2end/WindowSurfaceTests.cpp
new file mode 100644
index 0000000..1974f3c
--- /dev/null
+++ b/src/dawn/tests/end2end/WindowSurfaceTests.cpp
@@ -0,0 +1,251 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Log.h"
+#include "dawn/common/Platform.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/GLFWUtils.h"
+
+#include <gtest/gtest.h>
+
+#include <cstdlib>
+
+// Include windows.h before GLFW so GLFW's APIENTRY macro doesn't conflict with windows.h's.
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    include "dawn/common/windows_with_undefs.h"
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+#include "GLFW/glfw3.h"
+
+#if defined(DAWN_USE_X11)
+#    include "dawn/common/xlib_with_undefs.h"
+#endif  // defined(DAWN_USE_X11)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+#    include "dawn/utils/ObjCUtils.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#include "GLFW/glfw3native.h"
+
+// Test for wgpu::Surface creation that only need an instance (no devices) and don't need all the
+// complexity of DawnTest.
+class WindowSurfaceInstanceTests : public testing::Test {
+  public:
+    void SetUp() override {
+        glfwSetErrorCallback([](int code, const char* message) {
+            dawn::ErrorLog() << "GLFW error " << code << " " << message;
+        });
+        DAWN_TEST_UNSUPPORTED_IF(!glfwInit());
+
+        dawnProcSetProcs(&dawn::native::GetProcs());
+
+        mInstance = wgpu::CreateInstance();
+    }
+
+    void TearDown() override {
+        if (mWindow != nullptr) {
+            glfwDestroyWindow(mWindow);
+            mWindow = nullptr;
+        }
+    }
+
+    void AssertSurfaceCreation(const wgpu::SurfaceDescriptor* descriptor, bool succeeds) {
+        ASSERT_EQ(mInstance.CreateSurface(descriptor).Get() != nullptr, succeeds);
+    }
+
+    GLFWwindow* CreateWindow() {
+        // The WindowSurfaceInstance tests don't create devices so we don't need to call
+        // SetupGLFWWindowHintsForBackend. Set GLFW_NO_API anyway to avoid GLFW bringing up a GL
+        // context that we won't use.
+        glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+        mWindow = glfwCreateWindow(400, 400, "WindowSurfaceInstanceTests window", nullptr, nullptr);
+        return mWindow;
+    }
+
+  private:
+    wgpu::Instance mInstance;
+    GLFWwindow* mWindow = nullptr;
+};
+
+// Test that a valid chained descriptor works (and that GLFWUtils creates a valid chained
+// descriptor).
+TEST_F(WindowSurfaceInstanceTests, ControlCase) {
+    GLFWwindow* window = CreateWindow();
+    std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+        utils::SetupWindowAndGetSurfaceDescriptorForTesting(window);
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = chainedDescriptor.get();
+
+    AssertSurfaceCreation(&descriptor, true);
+}
+
+// Test that just wgpu::SurfaceDescriptor isn't enough and needs a chained descriptor.
+TEST_F(WindowSurfaceInstanceTests, NoChainedDescriptors) {
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = nullptr;  // That's the default value but we set it for clarity.
+
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+// Test that a chained descriptor with a garbage sType produces an error.
+TEST_F(WindowSurfaceInstanceTests, BadChainedDescriptors) {
+    wgpu::ChainedStruct chainedDescriptor;
+    chainedDescriptor.sType = wgpu::SType::Invalid;  // The default but we set it for clarity.
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+// Test that a chained descriptor with HTMLCanvas produces an error.
+TEST_F(WindowSurfaceInstanceTests, HTMLCanvasDescriptor) {
+    wgpu::SurfaceDescriptorFromCanvasHTMLSelector chainedDescriptor;
+    chainedDescriptor.selector = "#myCanvas";
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+// Test that it is invalid to give two valid chained descriptors
+TEST_F(WindowSurfaceInstanceTests, TwoChainedDescriptors) {
+    GLFWwindow* window = CreateWindow();
+    std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor1 =
+        utils::SetupWindowAndGetSurfaceDescriptorForTesting(window);
+    std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor2 =
+        utils::SetupWindowAndGetSurfaceDescriptorForTesting(window);
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = chainedDescriptor1.get();
+    chainedDescriptor1->nextInChain = chainedDescriptor2.get();
+
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+
+// Tests that GLFWUtils returns a descriptor of HWND type
+TEST_F(WindowSurfaceInstanceTests, CorrectSTypeHWND) {
+    GLFWwindow* window = CreateWindow();
+    std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+        utils::SetupWindowAndGetSurfaceDescriptorForTesting(window);
+    ASSERT_EQ(chainedDescriptor->sType, wgpu::SType::SurfaceDescriptorFromWindowsHWND);
+}
+
+// Test with setting an invalid hwnd
+TEST_F(WindowSurfaceInstanceTests, InvalidHWND) {
+    wgpu::SurfaceDescriptorFromWindowsHWND chainedDescriptor;
+    chainedDescriptor.hinstance = GetModuleHandle(nullptr);
+    chainedDescriptor.hwnd = 0;  // This always is an invalid HWND value.
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+#else  // defined(DAWN_PLATFORM_WINDOWS)
+
+// Test using HWND when it is not supported
+TEST_F(WindowSurfaceInstanceTests, HWNDSurfacesAreInvalid) {
+    wgpu::SurfaceDescriptorFromWindowsHWND chainedDescriptor;
+    chainedDescriptor.hinstance = nullptr;
+    chainedDescriptor.hwnd = 0;
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+#endif  // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+
+// Tests that GLFWUtils returns a descriptor of Xlib type
+TEST_F(WindowSurfaceInstanceTests, CorrectSTypeXlib) {
+    GLFWwindow* window = CreateWindow();
+    std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+        utils::SetupWindowAndGetSurfaceDescriptorForTesting(window);
+    ASSERT_EQ(chainedDescriptor->sType, wgpu::SType::SurfaceDescriptorFromXlibWindow);
+}
+
+// Test with setting an invalid window
+TEST_F(WindowSurfaceInstanceTests, InvalidXWindow) {
+    wgpu::SurfaceDescriptorFromXlibWindow chainedDescriptor;
+    chainedDescriptor.display = XOpenDisplay(nullptr);
+    // From the "X Window System Protocol" "X Version 11, Release 6.8" page 2 at
+    // https://www.x.org/releases/X11R7.5/doc/x11proto/proto.pdf
+    //    WINDOW 32-bit value (top three bits guaranteed to be zero.
+    // So UINT32_MAX should be an invalid window.
+    chainedDescriptor.window = 0xFFFFFFFF;
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+#else  // defined(DAWN_USE_X11)
+
+// Test using Xlib when it is not supported
+TEST_F(WindowSurfaceInstanceTests, XlibSurfacesAreInvalid) {
+    wgpu::SurfaceDescriptorFromXlibWindow chainedDescriptor;
+    chainedDescriptor.display = nullptr;
+    chainedDescriptor.window = 0;
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+#endif  // defined(DAWN_USE_X11)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+
+// Tests that GLFWUtils returns a descriptor of Metal type
+TEST_F(WindowSurfaceInstanceTests, CorrectSTypeMetal) {
+    GLFWwindow* window = CreateWindow();
+    std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+        utils::SetupWindowAndGetSurfaceDescriptorForTesting(window);
+    ASSERT_EQ(chainedDescriptor->sType, wgpu::SType::SurfaceDescriptorFromMetalLayer);
+}
+
+// Test with setting an invalid layer
+TEST_F(WindowSurfaceInstanceTests, InvalidMetalLayer) {
+    wgpu::SurfaceDescriptorFromMetalLayer chainedDescriptor;
+    // The CALayer is autoreleased. Releasing it causes a test failure when the Chromium GTest
+    // autoreleasepool is emptied.
+    chainedDescriptor.layer = utils::CreateDummyCALayer();
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+#else  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+// Test using Metal when it is not supported
+TEST_F(WindowSurfaceInstanceTests, MetalSurfacesAreInvalid) {
+    wgpu::SurfaceDescriptorFromMetalLayer chainedDescriptor;
+    chainedDescriptor.layer = nullptr;
+
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = &chainedDescriptor;
+    AssertSurfaceCreation(&descriptor, false);
+}
+
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
diff --git a/src/dawn/tests/perf_tests/BufferUploadPerf.cpp b/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
new file mode 100644
index 0000000..6be9436
--- /dev/null
+++ b/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
@@ -0,0 +1,155 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/perf_tests/DawnPerfTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    constexpr unsigned int kNumIterations = 50;
+
+    enum class UploadMethod {
+        WriteBuffer,
+        MappedAtCreation,
+    };
+
+    // Perf delta exists between ranges [0, 1MB] vs [1MB, MAX_SIZE).
+    // These are sample buffer sizes within each range.
+    enum class UploadSize {
+        BufferSize_1KB = 1 * 1024,
+        BufferSize_64KB = 64 * 1024,
+        BufferSize_1MB = 1 * 1024 * 1024,
+
+        BufferSize_4MB = 4 * 1024 * 1024,
+        BufferSize_16MB = 16 * 1024 * 1024,
+    };
+
+    struct BufferUploadParams : AdapterTestParam {
+        BufferUploadParams(const AdapterTestParam& param,
+                           UploadMethod uploadMethod,
+                           UploadSize uploadSize)
+            : AdapterTestParam(param), uploadMethod(uploadMethod), uploadSize(uploadSize) {
+        }
+
+        UploadMethod uploadMethod;
+        UploadSize uploadSize;
+    };
+
+    std::ostream& operator<<(std::ostream& ostream, const BufferUploadParams& param) {
+        ostream << static_cast<const AdapterTestParam&>(param);
+
+        switch (param.uploadMethod) {
+            case UploadMethod::WriteBuffer:
+                ostream << "_WriteBuffer";
+                break;
+            case UploadMethod::MappedAtCreation:
+                ostream << "_MappedAtCreation";
+                break;
+        }
+
+        switch (param.uploadSize) {
+            case UploadSize::BufferSize_1KB:
+                ostream << "_BufferSize_1KB";
+                break;
+            case UploadSize::BufferSize_64KB:
+                ostream << "_BufferSize_64KB";
+                break;
+            case UploadSize::BufferSize_1MB:
+                ostream << "_BufferSize_1MB";
+                break;
+            case UploadSize::BufferSize_4MB:
+                ostream << "_BufferSize_4MB";
+                break;
+            case UploadSize::BufferSize_16MB:
+                ostream << "_BufferSize_16MB";
+                break;
+        }
+
+        return ostream;
+    }
+
+}  // namespace
+
+// Test uploading |kBufferSize| bytes of data |kNumIterations| times.
+class BufferUploadPerf : public DawnPerfTestWithParams<BufferUploadParams> {
+  public:
+    BufferUploadPerf()
+        : DawnPerfTestWithParams(kNumIterations, 1),
+          data(static_cast<size_t>(GetParam().uploadSize)) {
+    }
+    ~BufferUploadPerf() override = default;
+
+    void SetUp() override;
+
+  private:
+    void Step() override;
+
+    wgpu::Buffer dst;
+    std::vector<uint8_t> data;
+};
+
+void BufferUploadPerf::SetUp() {
+    DawnPerfTestWithParams<BufferUploadParams>::SetUp();
+
+    wgpu::BufferDescriptor desc = {};
+    desc.size = data.size();
+    desc.usage = wgpu::BufferUsage::CopyDst;
+
+    dst = device.CreateBuffer(&desc);
+}
+
+void BufferUploadPerf::Step() {
+    switch (GetParam().uploadMethod) {
+        case UploadMethod::WriteBuffer: {
+            for (unsigned int i = 0; i < kNumIterations; ++i) {
+                queue.WriteBuffer(dst, 0, data.data(), data.size());
+            }
+            // Make sure all WriteBuffer's are flushed.
+            queue.Submit(0, nullptr);
+            break;
+        }
+
+        case UploadMethod::MappedAtCreation: {
+            wgpu::BufferDescriptor desc = {};
+            desc.size = data.size();
+            desc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+            desc.mappedAtCreation = true;
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+            for (unsigned int i = 0; i < kNumIterations; ++i) {
+                wgpu::Buffer buffer = device.CreateBuffer(&desc);
+                memcpy(buffer.GetMappedRange(0, data.size()), data.data(), data.size());
+                buffer.Unmap();
+                encoder.CopyBufferToBuffer(buffer, 0, dst, 0, data.size());
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            queue.Submit(1, &commands);
+            break;
+        }
+    }
+}
+
+TEST_P(BufferUploadPerf, Run) {
+    RunTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(BufferUploadPerf,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend()},
+                        {UploadMethod::WriteBuffer, UploadMethod::MappedAtCreation},
+                        {UploadSize::BufferSize_1KB, UploadSize::BufferSize_64KB,
+                         UploadSize::BufferSize_1MB, UploadSize::BufferSize_4MB,
+                         UploadSize::BufferSize_16MB});
diff --git a/src/dawn/tests/perf_tests/DawnPerfTest.cpp b/src/dawn/tests/perf_tests/DawnPerfTest.cpp
new file mode 100644
index 0000000..75d2871
--- /dev/null
+++ b/src/dawn/tests/perf_tests/DawnPerfTest.cpp
@@ -0,0 +1,410 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/perf_tests/DawnPerfTest.h"
+
+#include <algorithm>
+#include <fstream>
+#include <limits>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+#include "dawn/tests/perf_tests/DawnPerfTestPlatform.h"
+#include "dawn/utils/Timer.h"
+
+namespace {
+
+    DawnPerfTestEnvironment* gTestEnv = nullptr;
+
+    void DumpTraceEventsToJSONFile(
+        const std::vector<DawnPerfTestPlatform::TraceEvent>& traceEventBuffer,
+        const char* traceFile) {
+        std::ofstream outFile;
+        outFile.open(traceFile, std::ios_base::app);
+
+        for (const DawnPerfTestPlatform::TraceEvent& traceEvent : traceEventBuffer) {
+            const char* category = nullptr;
+            switch (traceEvent.category) {
+                case dawn::platform::TraceCategory::General:
+                    category = "general";
+                    break;
+                case dawn::platform::TraceCategory::Validation:
+                    category = "validation";
+                    break;
+                case dawn::platform::TraceCategory::Recording:
+                    category = "recording";
+                    break;
+                case dawn::platform::TraceCategory::GPUWork:
+                    category = "gpu";
+                    break;
+                default:
+                    UNREACHABLE();
+            }
+
+            uint64_t microseconds = static_cast<uint64_t>(traceEvent.timestamp * 1000.0 * 1000.0);
+
+            outFile << ", { "
+                    << "\"name\": \"" << traceEvent.name << "\", "
+                    << "\"cat\": \"" << category << "\", "
+                    << "\"ph\": \"" << traceEvent.phase << "\", "
+                    << "\"id\": " << traceEvent.id << ", "
+                    << "\"tid\": " << traceEvent.threadId << ", "
+                    << "\"ts\": " << microseconds << ", "
+                    << "\"pid\": \"Dawn\""
+                    << " }";
+        }
+        outFile.close();
+    }
+
+}  // namespace
+
+void InitDawnPerfTestEnvironment(int argc, char** argv) {
+    gTestEnv = new DawnPerfTestEnvironment(argc, argv);
+    DawnTestEnvironment::SetEnvironment(gTestEnv);
+    testing::AddGlobalTestEnvironment(gTestEnv);
+}
+
+DawnPerfTestEnvironment::DawnPerfTestEnvironment(int argc, char** argv)
+    : DawnTestEnvironment(argc, argv) {
+    size_t argLen = 0;  // Set when parsing --arg=X arguments
+    for (int i = 1; i < argc; ++i) {
+        if (strcmp("--calibration", argv[i]) == 0) {
+            mIsCalibrating = true;
+            continue;
+        }
+
+        constexpr const char kOverrideStepsArg[] = "--override-steps=";
+        argLen = sizeof(kOverrideStepsArg) - 1;
+        if (strncmp(argv[i], kOverrideStepsArg, argLen) == 0) {
+            const char* overrideSteps = argv[i] + argLen;
+            if (overrideSteps[0] != '\0') {
+                mOverrideStepsToRun = strtoul(overrideSteps, nullptr, 0);
+            }
+            continue;
+        }
+
+        constexpr const char kTraceFileArg[] = "--trace-file=";
+        argLen = sizeof(kTraceFileArg) - 1;
+        if (strncmp(argv[i], kTraceFileArg, argLen) == 0) {
+            const char* traceFile = argv[i] + argLen;
+            if (traceFile[0] != '\0') {
+                mTraceFile = traceFile;
+            }
+            continue;
+        }
+
+        if (strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+            dawn::InfoLog()
+                << "Additional flags:"
+                << " [--calibration] [--override-steps=x] [--trace-file=file]\n"
+                << "  --calibration: Only run calibration. Calibration allows the perf test"
+                   " runner script to save some time.\n"
+                << " --override-steps: Set a fixed number of steps to run for each test\n"
+                << " --trace-file: The file to dump trace results.\n";
+            continue;
+        }
+    }
+}
+
+DawnPerfTestEnvironment::~DawnPerfTestEnvironment() = default;
+
+void DawnPerfTestEnvironment::SetUp() {
+    DawnTestEnvironment::SetUp();
+
+    mPlatform = std::make_unique<DawnPerfTestPlatform>();
+    mInstance->SetPlatform(mPlatform.get());
+
+    // Begin writing the trace event array.
+    if (mTraceFile != nullptr) {
+        std::ofstream outFile;
+        outFile.open(mTraceFile);
+        outFile << "{ \"traceEvents\": [";
+        outFile << "{}";  // Dummy object so trace events can always prepend a comma
+        outFile.flush();
+        outFile.close();
+    }
+}
+
+void DawnPerfTestEnvironment::TearDown() {
+    // End writing the trace event array.
+    if (mTraceFile != nullptr) {
+        std::vector<DawnPerfTestPlatform::TraceEvent> traceEventBuffer =
+            mPlatform->AcquireTraceEventBuffer();
+
+        // Write remaining trace events.
+        DumpTraceEventsToJSONFile(traceEventBuffer, mTraceFile);
+
+        std::ofstream outFile;
+        outFile.open(mTraceFile, std::ios_base::app);
+        outFile << "]}";
+        outFile << std::endl;
+        outFile.close();
+    }
+
+    DawnTestEnvironment::TearDown();
+}
+
+bool DawnPerfTestEnvironment::IsCalibrating() const {
+    return mIsCalibrating;
+}
+
+unsigned int DawnPerfTestEnvironment::OverrideStepsToRun() const {
+    return mOverrideStepsToRun;
+}
+
+const char* DawnPerfTestEnvironment::GetTraceFile() const {
+    return mTraceFile;
+}
+
+DawnPerfTestPlatform* DawnPerfTestEnvironment::GetPlatform() const {
+    return mPlatform.get();
+}
+
+DawnPerfTestBase::DawnPerfTestBase(DawnTestBase* test,
+                                   unsigned int iterationsPerStep,
+                                   unsigned int maxStepsInFlight)
+    : mTest(test),
+      mIterationsPerStep(iterationsPerStep),
+      mMaxStepsInFlight(maxStepsInFlight),
+      mTimer(utils::CreateTimer()) {
+}
+
+DawnPerfTestBase::~DawnPerfTestBase() = default;
+
+void DawnPerfTestBase::AbortTest() {
+    mRunning = false;
+}
+
+void DawnPerfTestBase::RunTest() {
+    if (gTestEnv->OverrideStepsToRun() == 0) {
+        // Run to compute the approximate number of steps to perform.
+        mStepsToRun = std::numeric_limits<unsigned int>::max();
+
+        // Do a warmup run for calibration.
+        DoRunLoop(kCalibrationRunTimeSeconds);
+        DoRunLoop(kCalibrationRunTimeSeconds);
+
+        // Scale steps down according to the time that exceeded one second.
+        double scale = kCalibrationRunTimeSeconds / mTimer->GetElapsedTime();
+        mStepsToRun = static_cast<unsigned int>(static_cast<double>(mNumStepsPerformed) * scale);
+
+        // Calibration allows the perf test runner script to save some time.
+        if (gTestEnv->IsCalibrating()) {
+            PrintResult("steps", mStepsToRun, "count", false);
+            return;
+        }
+    } else {
+        mStepsToRun = gTestEnv->OverrideStepsToRun();
+    }
+
+    // Do another warmup run. Seems to consistently improve results.
+    DoRunLoop(kMaximumRunTimeSeconds);
+
+    DawnPerfTestPlatform* platform =
+        reinterpret_cast<DawnPerfTestPlatform*>(gTestEnv->GetPlatform());
+    const char* testName = ::testing::UnitTest::GetInstance()->current_test_info()->name();
+
+    // Only enable trace event recording in this section.
+    // We don't care about trace events during warmup and calibration.
+    platform->EnableTraceEventRecording(true);
+    {
+        TRACE_EVENT0(platform, General, testName);
+        for (unsigned int trial = 0; trial < kNumTrials; ++trial) {
+            TRACE_EVENT0(platform, General, "Trial");
+            DoRunLoop(kMaximumRunTimeSeconds);
+            OutputResults();
+        }
+    }
+    platform->EnableTraceEventRecording(false);
+}
+
+void DawnPerfTestBase::DoRunLoop(double maxRunTime) {
+    dawn::platform::Platform* platform = gTestEnv->GetPlatform();
+
+    mNumStepsPerformed = 0;
+    mCpuTime = 0;
+    mRunning = true;
+
+    uint64_t finishedIterations = 0;
+    uint64_t submittedIterations = 0;
+
+    mTimer->Start();
+
+    // This loop can be canceled by calling AbortTest().
+    while (mRunning) {
+        // Wait if there are too many steps in flight on the GPU.
+        while (submittedIterations - finishedIterations >= mMaxStepsInFlight) {
+            mTest->WaitABit();
+        }
+
+        TRACE_EVENT0(platform, General, "Step");
+        double stepStart = mTimer->GetElapsedTime();
+        Step();
+        mCpuTime += mTimer->GetElapsedTime() - stepStart;
+
+        submittedIterations++;
+        mTest->queue.OnSubmittedWorkDone(
+            0u,
+            [](WGPUQueueWorkDoneStatus, void* userdata) {
+                uint64_t* counter = static_cast<uint64_t*>(userdata);
+                (*counter)++;
+            },
+            &finishedIterations);
+
+        if (mRunning) {
+            ++mNumStepsPerformed;
+            if (mTimer->GetElapsedTime() > maxRunTime) {
+                mRunning = false;
+            } else if (mNumStepsPerformed >= mStepsToRun) {
+                mRunning = false;
+            }
+        }
+    }
+
+    // Wait for all GPU commands to complete.
+    // TODO(enga): When Dawn has multiple backgrounds threads, add a Device::WaitForIdleForTesting()
+    // which waits for all threads to stop doing work. When we output results, there should
+    // be no additional incoming trace events.
+    while (submittedIterations != finishedIterations) {
+        mTest->WaitABit();
+    }
+
+    mTimer->Stop();
+}
+
+void DawnPerfTestBase::OutputResults() {
+    // TODO(enga): When Dawn has multiple backgrounds threads, add a Device::WaitForIdleForTesting()
+    // which waits for all threads to stop doing work. When we output results, there should
+    // be no additional incoming trace events.
+    DawnPerfTestPlatform* platform =
+        reinterpret_cast<DawnPerfTestPlatform*>(gTestEnv->GetPlatform());
+
+    std::vector<DawnPerfTestPlatform::TraceEvent> traceEventBuffer =
+        platform->AcquireTraceEventBuffer();
+
+    struct EventTracker {
+        double start = std::numeric_limits<double>::max();
+        double end = 0;
+        uint32_t count = 0;
+    };
+
+    EventTracker validationTracker = {};
+    EventTracker recordingTracker = {};
+
+    double totalValidationTime = 0;
+    double totalRecordingTime = 0;
+
+    // Note: We assume END timestamps always come after their corresponding BEGIN timestamps.
+    // TODO(enga): When Dawn has multiple threads, stratify by thread id.
+    for (const DawnPerfTestPlatform::TraceEvent& traceEvent : traceEventBuffer) {
+        EventTracker* tracker = nullptr;
+        double* totalTime = nullptr;
+
+        switch (traceEvent.category) {
+            case dawn::platform::TraceCategory::Validation:
+                tracker = &validationTracker;
+                totalTime = &totalValidationTime;
+                break;
+            case dawn::platform::TraceCategory::Recording:
+                tracker = &recordingTracker;
+                totalTime = &totalRecordingTime;
+                break;
+            default:
+                break;
+        }
+
+        if (tracker == nullptr) {
+            continue;
+        }
+
+        if (traceEvent.phase == TRACE_EVENT_PHASE_BEGIN) {
+            tracker->start = std::min(tracker->start, traceEvent.timestamp);
+            tracker->count++;
+        }
+
+        if (traceEvent.phase == TRACE_EVENT_PHASE_END) {
+            tracker->end = std::max(tracker->end, traceEvent.timestamp);
+            ASSERT(tracker->count > 0);
+            tracker->count--;
+
+            if (tracker->count == 0) {
+                *totalTime += (tracker->end - tracker->start);
+                *tracker = {};
+            }
+        }
+    }
+
+    PrintPerIterationResultFromSeconds("wall_time", mTimer->GetElapsedTime(), true);
+    PrintPerIterationResultFromSeconds("cpu_time", mCpuTime, true);
+    PrintPerIterationResultFromSeconds("validation_time", totalValidationTime, true);
+    PrintPerIterationResultFromSeconds("recording_time", totalRecordingTime, true);
+
+    const char* traceFile = gTestEnv->GetTraceFile();
+    if (traceFile != nullptr) {
+        DumpTraceEventsToJSONFile(traceEventBuffer, traceFile);
+    }
+}
+
+void DawnPerfTestBase::PrintPerIterationResultFromSeconds(const std::string& trace,
+                                                          double valueInSeconds,
+                                                          bool important) const {
+    if (valueInSeconds == 0) {
+        return;
+    }
+
+    double secondsPerIteration =
+        valueInSeconds / static_cast<double>(mNumStepsPerformed * mIterationsPerStep);
+
+    // Give the result a different name to ensure separate graphs if we transition.
+    if (secondsPerIteration > 1) {
+        PrintResult(trace, secondsPerIteration * 1e3, "ms", important);
+    } else if (secondsPerIteration > 1e-3) {
+        PrintResult(trace, secondsPerIteration * 1e6, "us", important);
+    } else {
+        PrintResult(trace, secondsPerIteration * 1e9, "ns", important);
+    }
+}
+
+void DawnPerfTestBase::PrintResult(const std::string& trace,
+                                   double value,
+                                   const std::string& units,
+                                   bool important) const {
+    PrintResultImpl(trace, std::to_string(value), units, important);
+}
+
+void DawnPerfTestBase::PrintResult(const std::string& trace,
+                                   unsigned int value,
+                                   const std::string& units,
+                                   bool important) const {
+    PrintResultImpl(trace, std::to_string(value), units, important);
+}
+
+void DawnPerfTestBase::PrintResultImpl(const std::string& trace,
+                                       const std::string& value,
+                                       const std::string& units,
+                                       bool important) const {
+    const ::testing::TestInfo* const testInfo =
+        ::testing::UnitTest::GetInstance()->current_test_info();
+
+    std::string metric = std::string(testInfo->test_suite_name()) + "." + trace;
+
+    std::string story = testInfo->name();
+    std::replace(story.begin(), story.end(), '/', '_');
+
+    // The results are printed according to the format specified at
+    // [chromium]//src/tools/perf/generate_legacy_perf_dashboard_json.py
+    dawn::InfoLog() << (important ? "*" : "") << "RESULT " << metric << ": " << story << "= "
+                    << value << " " << units;
+}
diff --git a/src/dawn/tests/perf_tests/DawnPerfTest.h b/src/dawn/tests/perf_tests/DawnPerfTest.h
new file mode 100644
index 0000000..548d212
--- /dev/null
+++ b/src/dawn/tests/perf_tests/DawnPerfTest.h
@@ -0,0 +1,130 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_PERFTESTS_DAWNPERFTEST_H_
+#define TESTS_PERFTESTS_DAWNPERFTEST_H_
+
+#include "dawn/tests/DawnTest.h"
+
+namespace utils {
+    class Timer;
+}
+
+class DawnPerfTestPlatform;
+
+void InitDawnPerfTestEnvironment(int argc, char** argv);
+
+class DawnPerfTestEnvironment : public DawnTestEnvironment {
+  public:
+    DawnPerfTestEnvironment(int argc, char** argv);
+    ~DawnPerfTestEnvironment() override;
+
+    void SetUp() override;
+    void TearDown() override;
+
+    bool IsCalibrating() const;
+    unsigned int OverrideStepsToRun() const;
+
+    // Returns the path to the trace file, or nullptr if traces should
+    // not be written to a json file.
+    const char* GetTraceFile() const;
+
+    DawnPerfTestPlatform* GetPlatform() const;
+
+  private:
+    // Only run calibration which allows the perf test runner to save time.
+    bool mIsCalibrating = false;
+
+    // If non-zero, overrides the number of steps.
+    unsigned int mOverrideStepsToRun = 0;
+
+    const char* mTraceFile = nullptr;
+
+    std::unique_ptr<DawnPerfTestPlatform> mPlatform;
+};
+
+class DawnPerfTestBase {
+    static constexpr double kCalibrationRunTimeSeconds = 1.0;
+    static constexpr double kMaximumRunTimeSeconds = 10.0;
+    static constexpr unsigned int kNumTrials = 3;
+
+  public:
+    // Perf test results are reported as the amortized time of |mStepsToRun| * |mIterationsPerStep|.
+    // A test deriving from |DawnPerfTestBase| must call the base contructor with
+    // |iterationsPerStep| appropriately to reflect the amount of work performed.
+    // |maxStepsInFlight| may be used to mimic having multiple frames or workloads in flight which
+    // is common with double or triple buffered applications.
+    DawnPerfTestBase(DawnTestBase* test,
+                     unsigned int iterationsPerStep,
+                     unsigned int maxStepsInFlight);
+    virtual ~DawnPerfTestBase();
+
+  protected:
+    // Call if the test step was aborted and the test should stop running.
+    void AbortTest();
+
+    void RunTest();
+    void PrintPerIterationResultFromSeconds(const std::string& trace,
+                                            double valueInSeconds,
+                                            bool important) const;
+    void PrintResult(const std::string& trace,
+                     double value,
+                     const std::string& units,
+                     bool important) const;
+    void PrintResult(const std::string& trace,
+                     unsigned int value,
+                     const std::string& units,
+                     bool important) const;
+
+  private:
+    void DoRunLoop(double maxRunTime);
+    void OutputResults();
+
+    void PrintResultImpl(const std::string& trace,
+                         const std::string& value,
+                         const std::string& units,
+                         bool important) const;
+
+    virtual void Step() = 0;
+
+    DawnTestBase* mTest;
+    bool mRunning = false;
+    const unsigned int mIterationsPerStep;
+    const unsigned int mMaxStepsInFlight;
+    unsigned int mStepsToRun = 0;
+    unsigned int mNumStepsPerformed = 0;
+    double mCpuTime;
+    std::unique_ptr<utils::Timer> mTimer;
+};
+
+template <typename Params = AdapterTestParam>
+class DawnPerfTestWithParams : public DawnTestWithParams<Params>, public DawnPerfTestBase {
+  protected:
+    DawnPerfTestWithParams(unsigned int iterationsPerStep, unsigned int maxStepsInFlight)
+        : DawnTestWithParams<Params>(),
+          DawnPerfTestBase(this, iterationsPerStep, maxStepsInFlight) {
+    }
+    void SetUp() override {
+        DawnTestWithParams<Params>::SetUp();
+
+        wgpu::AdapterProperties properties;
+        this->GetAdapter().GetProperties(&properties);
+        DAWN_TEST_UNSUPPORTED_IF(properties.adapterType == wgpu::AdapterType::CPU);
+    }
+    ~DawnPerfTestWithParams() override = default;
+};
+
+using DawnPerfTest = DawnPerfTestWithParams<>;
+
+#endif  // TESTS_PERFTESTS_DAWNPERFTEST_H_
diff --git a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
new file mode 100644
index 0000000..3da0dad
--- /dev/null
+++ b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
@@ -0,0 +1,148 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/perf_tests/DawnPerfTestPlatform.h"
+
+#include <algorithm>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/HashUtils.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+#include "dawn/tests/perf_tests/DawnPerfTest.h"
+#include "dawn/utils/Timer.h"
+namespace {
+
+    struct TraceCategoryInfo {
+        unsigned char enabled;
+        dawn::platform::TraceCategory category;
+    };
+
+    constexpr TraceCategoryInfo gTraceCategories[4] = {
+        {1, dawn::platform::TraceCategory::General},
+        {1, dawn::platform::TraceCategory::Validation},
+        {1, dawn::platform::TraceCategory::Recording},
+        {1, dawn::platform::TraceCategory::GPUWork},
+    };
+
+    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::General) == 0);
+    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Validation) == 1);
+    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Recording) == 2);
+    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::GPUWork) == 3);
+
+}  // anonymous namespace
+
+DawnPerfTestPlatform::DawnPerfTestPlatform()
+    : dawn::platform::Platform(), mTimer(utils::CreateTimer()) {
+}
+
+DawnPerfTestPlatform::~DawnPerfTestPlatform() = default;
+
+const unsigned char* DawnPerfTestPlatform::GetTraceCategoryEnabledFlag(
+    dawn::platform::TraceCategory category) {
+    switch (category) {
+        case dawn::platform::TraceCategory::General:
+        case dawn::platform::TraceCategory::Validation:
+        case dawn::platform::TraceCategory::Recording:
+        case dawn::platform::TraceCategory::GPUWork:
+            break;
+        default:
+            UNREACHABLE();
+    }
+    return &gTraceCategories[static_cast<uint32_t>(category)].enabled;
+}
+
+double DawnPerfTestPlatform::MonotonicallyIncreasingTime() {
+    // Move the time origin to the first call to this function, to avoid generating
+    // unnecessarily large timestamps.
+    static double origin = mTimer->GetAbsoluteTime();
+    return mTimer->GetAbsoluteTime() - origin;
+}
+
+std::vector<DawnPerfTestPlatform::TraceEvent>* DawnPerfTestPlatform::GetLocalTraceEventBuffer() {
+    // Cache the pointer to the vector in thread_local storage
+    thread_local std::vector<TraceEvent>* traceEventBuffer = nullptr;
+
+    if (traceEventBuffer == nullptr) {
+        auto buffer = std::make_unique<std::vector<TraceEvent>>();
+        traceEventBuffer = buffer.get();
+
+        // Add a new buffer to the map
+        std::lock_guard<std::mutex> guard(mTraceEventBufferMapMutex);
+        mTraceEventBuffers[std::this_thread::get_id()] = std::move(buffer);
+    }
+
+    return traceEventBuffer;
+}
+
+// TODO(enga): Simplify this API.
+uint64_t DawnPerfTestPlatform::AddTraceEvent(char phase,
+                                             const unsigned char* categoryGroupEnabled,
+                                             const char* name,
+                                             uint64_t id,
+                                             double timestamp,
+                                             int numArgs,
+                                             const char** argNames,
+                                             const unsigned char* argTypes,
+                                             const uint64_t* argValues,
+                                             unsigned char flags) {
+    if (!mRecordTraceEvents) {
+        return 0;
+    }
+
+    // Discover the category name based on categoryGroupEnabled.  This flag comes from the first
+    // parameter of TraceCategory, and corresponds to one of the entries in gTraceCategories.
+    static_assert(offsetof(TraceCategoryInfo, enabled) == 0,
+                  "|enabled| must be the first field of the TraceCategoryInfo class.");
+
+    const TraceCategoryInfo* info =
+        reinterpret_cast<const TraceCategoryInfo*>(categoryGroupEnabled);
+
+    std::vector<TraceEvent>* buffer = GetLocalTraceEventBuffer();
+    buffer->emplace_back(phase, info->category, name, id, timestamp);
+
+    size_t hash = 0;
+    HashCombine(&hash, buffer->size());
+    HashCombine(&hash, std::this_thread::get_id());
+    return static_cast<uint64_t>(hash);
+}
+
+void DawnPerfTestPlatform::EnableTraceEventRecording(bool enable) {
+    mRecordTraceEvents = enable;
+}
+
+std::vector<DawnPerfTestPlatform::TraceEvent> DawnPerfTestPlatform::AcquireTraceEventBuffer() {
+    std::vector<TraceEvent> traceEventBuffer;
+    {
+        // AcquireTraceEventBuffer should only be called when Dawn is completely idle. There should
+        // be no threads inserting trace events.
+        // Right now, this is safe because AcquireTraceEventBuffer is called after waiting on a
+        // fence for all GPU commands to finish executing. When Dawn has multiple background threads
+        // for other work (creation, validation, submission, residency, etc), we will need to ensure
+        // all work on those threads is stopped as well.
+        std::lock_guard<std::mutex> guard(mTraceEventBufferMapMutex);
+        for (auto it = mTraceEventBuffers.begin(); it != mTraceEventBuffers.end(); ++it) {
+            std::ostringstream stream;
+            stream << it->first;
+            std::string threadId = stream.str();
+
+            std::transform(it->second->begin(), it->second->end(),
+                           std::back_inserter(traceEventBuffer), [&threadId](TraceEvent ev) {
+                               ev.threadId = threadId;
+                               return ev;
+                           });
+            it->second->clear();
+        }
+    }
+    return traceEventBuffer;
+}
diff --git a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
new file mode 100644
index 0000000..f41d6d9
--- /dev/null
+++ b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
@@ -0,0 +1,91 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_PERFTESTS_DAWNPERFTESTPLATFORM_H_
+#define TESTS_PERFTESTS_DAWNPERFTESTPLATFORM_H_
+
+#include "dawn/platform/DawnPlatform.h"
+
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <unordered_map>
+#include <vector>
+
+namespace utils {
+    class Timer;
+}
+
+class DawnPerfTestPlatform : public dawn::platform::Platform {
+  public:
+    // These are trace events according to Google's "Trace Event Format".
+    // See https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
+    // Only a subset of the properties are implemented.
+    struct TraceEvent final {
+        TraceEvent() {
+        }
+        TraceEvent(char phaseIn,
+                   dawn::platform::TraceCategory categoryIn,
+                   const char* nameIn,
+                   uint64_t idIn,
+                   double timestampIn)
+            : phase(phaseIn), category(categoryIn), name(nameIn), id(idIn), timestamp(timestampIn) {
+        }
+
+        char phase = 0;
+        dawn::platform::TraceCategory category;
+        const char* name = nullptr;
+        uint64_t id = 0;
+        std::string threadId;
+        double timestamp = 0;
+    };
+
+    DawnPerfTestPlatform();
+    ~DawnPerfTestPlatform() override;
+
+    void EnableTraceEventRecording(bool enable);
+    std::vector<TraceEvent> AcquireTraceEventBuffer();
+
+  private:
+    const unsigned char* GetTraceCategoryEnabledFlag(
+        dawn::platform::TraceCategory category) override;
+
+    double MonotonicallyIncreasingTime() override;
+
+    std::vector<TraceEvent>* GetLocalTraceEventBuffer();
+
+    uint64_t AddTraceEvent(char phase,
+                           const unsigned char* categoryGroupEnabled,
+                           const char* name,
+                           uint64_t id,
+                           double timestamp,
+                           int numArgs,
+                           const char** argNames,
+                           const unsigned char* argTypes,
+                           const uint64_t* argValues,
+                           unsigned char flags) override;
+
+    bool mRecordTraceEvents = false;
+    std::unique_ptr<utils::Timer> mTimer;
+
+    // Trace event record.
+    // Each uses their own trace event buffer, but the PerfTestPlatform owns all of them in
+    // this map. The map stores all of them so we can iterate through them and flush when
+    // AcquireTraceEventBuffer is called.
+    std::unordered_map<std::thread::id, std::unique_ptr<std::vector<TraceEvent>>>
+        mTraceEventBuffers;
+    std::mutex mTraceEventBufferMapMutex;
+};
+
+#endif  // TESTS_PERFTESTS_DAWNPERFTESTPLATFORM_H_
diff --git a/src/dawn/tests/perf_tests/DrawCallPerf.cpp b/src/dawn/tests/perf_tests/DrawCallPerf.cpp
new file mode 100644
index 0000000..692c167
--- /dev/null
+++ b/src/dawn/tests/perf_tests/DrawCallPerf.cpp
@@ -0,0 +1,664 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/perf_tests/DawnPerfTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    constexpr unsigned int kNumDraws = 2000;
+
+    constexpr uint32_t kTextureSize = 64;
+    constexpr size_t kUniformSize = 3 * sizeof(float);
+
+    constexpr float kVertexData[12] = {
+        0.0f, 0.5f, 0.0f, 1.0f, -0.5f, -0.5f, 0.0f, 1.0f, 0.5f, -0.5f, 0.0f, 1.0f,
+    };
+
+    constexpr char kVertexShader[] = R"(
+        @stage(vertex) fn main(
+            @location(0) pos : vec4<f32>
+        ) -> @builtin(position) vec4<f32> {
+            return pos;
+        })";
+
+    constexpr char kFragmentShaderA[] = R"(
+        struct Uniforms {
+            color : vec3<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms : Uniforms;
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(uniforms.color * (1.0 / 5000.0), 1.0);
+        })";
+
+    constexpr char kFragmentShaderB[] = R"(
+        struct Constants {
+            color : vec3<f32>
+        }
+        struct Uniforms {
+            color : vec3<f32>
+        }
+        @group(0) @binding(0) var<uniform> constants : Constants;
+        @group(1) @binding(0) var<uniform> uniforms : Uniforms;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>((constants.color + uniforms.color) * (1.0 / 5000.0), 1.0);
+        })";
+
+    enum class Pipeline {
+        Static,     // Keep the same pipeline for all draws.
+        Redundant,  // Use the same pipeline, but redundantly set it.
+        Dynamic,    // Change the pipeline between draws.
+    };
+
+    enum class UniformData {
+        Static,   // Don't update per-draw uniform data.
+        Dynamic,  // Update the per-draw uniform data once per frame.
+    };
+
+    enum class BindGroup {
+        NoChange,   // Use one bind group for all draws.
+        Redundant,  // Use the same bind group, but redundantly set it.
+        NoReuse,    // Create a new bind group every time.
+        Multiple,   // Use multiple static bind groups.
+        Dynamic,    // Use bind groups with dynamic offsets.
+    };
+
+    enum class VertexBuffer {
+        NoChange,  // Use one vertex buffer for all draws.
+        Multiple,  // Use multiple static vertex buffers.
+        Dynamic,   // Switch vertex buffers between draws.
+    };
+
+    enum class RenderBundle {
+        No,   // Record commands in a render pass
+        Yes,  // Record commands in a render bundle
+    };
+
+    struct DrawCallParam {
+        Pipeline pipelineType;
+        VertexBuffer vertexBufferType;
+        BindGroup bindGroupType;
+        UniformData uniformDataType;
+        RenderBundle withRenderBundle;
+    };
+
+    using DrawCallParamTuple =
+        std::tuple<Pipeline, VertexBuffer, BindGroup, UniformData, RenderBundle>;
+
+    template <typename T>
+    unsigned int AssignParam(T& lhs, T rhs) {
+        lhs = rhs;
+        return 0u;
+    }
+
+    // This helper function allows creating a DrawCallParam from a list of arguments
+    // without specifying all of the members. Provided members can be passed once in an arbitrary
+    // order. Unspecified members default to:
+    //  - Pipeline::Static
+    //  - VertexBuffer::NoChange
+    //  - BindGroup::NoChange
+    //  - UniformData::Static
+    //  - RenderBundle::No
+    template <typename... Ts>
+    DrawCallParam MakeParam(Ts... args) {
+        // Baseline param
+        DrawCallParamTuple paramTuple{Pipeline::Static, VertexBuffer::NoChange, BindGroup::NoChange,
+                                      UniformData::Static, RenderBundle::No};
+
+        unsigned int unused[] = {
+            0,  // Avoid making a 0-sized array.
+            AssignParam(std::get<Ts>(paramTuple), args)...,
+        };
+        DAWN_UNUSED(unused);
+
+        return DrawCallParam{
+            std::get<Pipeline>(paramTuple),     std::get<VertexBuffer>(paramTuple),
+            std::get<BindGroup>(paramTuple),    std::get<UniformData>(paramTuple),
+            std::get<RenderBundle>(paramTuple),
+        };
+    }
+
+    struct DrawCallParamForTest : AdapterTestParam {
+        DrawCallParamForTest(const AdapterTestParam& backendParam, DrawCallParam param)
+            : AdapterTestParam(backendParam), param(param) {
+        }
+        DrawCallParam param;
+    };
+
+    std::ostream& operator<<(std::ostream& ostream, const DrawCallParamForTest& testParams) {
+        ostream << static_cast<const AdapterTestParam&>(testParams);
+
+        const DrawCallParam& param = testParams.param;
+
+        switch (param.pipelineType) {
+            case Pipeline::Static:
+                break;
+            case Pipeline::Redundant:
+                ostream << "_RedundantPipeline";
+                break;
+            case Pipeline::Dynamic:
+                ostream << "_DynamicPipeline";
+                break;
+        }
+
+        switch (param.vertexBufferType) {
+            case VertexBuffer::NoChange:
+                break;
+            case VertexBuffer::Multiple:
+                ostream << "_MultipleVertexBuffers";
+                break;
+            case VertexBuffer::Dynamic:
+                ostream << "_DynamicVertexBuffer";
+        }
+
+        switch (param.bindGroupType) {
+            case BindGroup::NoChange:
+                break;
+            case BindGroup::Redundant:
+                ostream << "_RedundantBindGroups";
+                break;
+            case BindGroup::NoReuse:
+                ostream << "_NoReuseBindGroups";
+                break;
+            case BindGroup::Multiple:
+                ostream << "_MultipleBindGroups";
+                break;
+            case BindGroup::Dynamic:
+                ostream << "_DynamicBindGroup";
+                break;
+        }
+
+        switch (param.uniformDataType) {
+            case UniformData::Static:
+                break;
+            case UniformData::Dynamic:
+                ostream << "_DynamicData";
+                break;
+        }
+
+        switch (param.withRenderBundle) {
+            case RenderBundle::No:
+                break;
+            case RenderBundle::Yes:
+                ostream << "_RenderBundle";
+                break;
+        }
+
+        return ostream;
+    }
+
+}  // anonymous namespace
+
+// DrawCallPerf is an uber-benchmark with supports many parameterizations.
+// The specific parameterizations we care about are explicitly instantiated at the bottom
+// of this test file.
+// DrawCallPerf tests drawing a simple triangle with many ways of encoding commands,
+// binding, and uploading data to the GPU. The rationale for this is the following:
+//   - Static/Multiple/Dynamic vertex buffers: Tests switching buffer bindings. This has
+//     a state tracking cost as well as a GPU driver cost.
+//   - Static/Multiple/Dynamic bind groups: Same rationale as vertex buffers
+//   - Static/Dynamic pipelines: In addition to a change to GPU state, changing the pipeline
+//     layout incurs additional state tracking costs in Dawn.
+//   - With/Without render bundles: All of the above can have lower validation costs if
+//     precomputed in a render bundle.
+//   - Static/Dynamic data: Updating data for each draw is a common use case. It also tests
+//     the efficiency of resource transitions.
+class DrawCallPerf : public DawnPerfTestWithParams<DrawCallParamForTest> {
+  public:
+    DrawCallPerf() : DawnPerfTestWithParams(kNumDraws, 3) {
+    }
+    ~DrawCallPerf() override = default;
+
+    void SetUp() override;
+
+  protected:
+    DrawCallParam GetParam() const {
+        return DawnPerfTestWithParams::GetParam().param;
+    }
+
+    template <typename Encoder>
+    void RecordRenderCommands(Encoder encoder);
+
+  private:
+    void Step() override;
+
+    // One large dynamic vertex buffer, or multiple separate vertex buffers.
+    wgpu::Buffer mVertexBuffers[kNumDraws];
+    size_t mAlignedVertexDataSize;
+
+    std::vector<float> mUniformBufferData;
+    // One large dynamic uniform buffer, or multiple separate uniform buffers.
+    wgpu::Buffer mUniformBuffers[kNumDraws];
+
+    wgpu::BindGroupLayout mUniformBindGroupLayout;
+    // One dynamic bind group or multiple bind groups.
+    wgpu::BindGroup mUniformBindGroups[kNumDraws];
+    size_t mAlignedUniformSize;
+    size_t mNumUniformFloats;
+
+    wgpu::BindGroupLayout mConstantBindGroupLayout;
+    wgpu::BindGroup mConstantBindGroup;
+
+    // If the pipeline is static, only the first is used.
+    // Otherwise, the test alternates between two pipelines for each draw.
+    wgpu::RenderPipeline mPipelines[2];
+
+    wgpu::TextureView mColorAttachment;
+    wgpu::TextureView mDepthStencilAttachment;
+
+    wgpu::RenderBundle mRenderBundle;
+};
+
+void DrawCallPerf::SetUp() {
+    DawnPerfTestWithParams::SetUp();
+
+    // Compute aligned uniform / vertex data sizes.
+    mAlignedUniformSize =
+        Align(kUniformSize, GetSupportedLimits().limits.minUniformBufferOffsetAlignment);
+    mAlignedVertexDataSize = Align(sizeof(kVertexData), 4);
+
+    // Initialize uniform buffer data.
+    mNumUniformFloats = mAlignedUniformSize / sizeof(float);
+    mUniformBufferData = std::vector<float>(kNumDraws * mNumUniformFloats, 0.0);
+
+    // Create the color / depth stencil attachments.
+    {
+        wgpu::TextureDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = kTextureSize;
+        descriptor.size.height = kTextureSize;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        mColorAttachment = device.CreateTexture(&descriptor).CreateView();
+
+        descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        mDepthStencilAttachment = device.CreateTexture(&descriptor).CreateView();
+    }
+
+    // Create vertex buffer(s)
+    switch (GetParam().vertexBufferType) {
+        case VertexBuffer::NoChange:
+            mVertexBuffers[0] = utils::CreateBufferFromData(
+                device, kVertexData, sizeof(kVertexData), wgpu::BufferUsage::Vertex);
+            break;
+
+        case VertexBuffer::Multiple: {
+            for (uint32_t i = 0; i < kNumDraws; ++i) {
+                mVertexBuffers[i] = utils::CreateBufferFromData(
+                    device, kVertexData, sizeof(kVertexData), wgpu::BufferUsage::Vertex);
+            }
+            break;
+        }
+
+        case VertexBuffer::Dynamic: {
+            std::vector<char> data(mAlignedVertexDataSize * kNumDraws);
+            for (uint32_t i = 0; i < kNumDraws; ++i) {
+                memcpy(data.data() + mAlignedVertexDataSize * i, kVertexData, sizeof(kVertexData));
+            }
+
+            mVertexBuffers[0] = utils::CreateBufferFromData(device, data.data(), data.size(),
+                                                            wgpu::BufferUsage::Vertex);
+            break;
+        }
+    }
+
+    // Create the bind group layout.
+    switch (GetParam().bindGroupType) {
+        case BindGroup::NoChange:
+        case BindGroup::Redundant:
+        case BindGroup::NoReuse:
+        case BindGroup::Multiple:
+            mUniformBindGroupLayout = utils::MakeBindGroupLayout(
+                device,
+                {
+                    {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, false},
+                });
+            break;
+
+        case BindGroup::Dynamic:
+            mUniformBindGroupLayout = utils::MakeBindGroupLayout(
+                device,
+                {
+                    {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true},
+                });
+            break;
+
+        default:
+            UNREACHABLE();
+            break;
+    }
+
+    // Setup the base render pipeline descriptor.
+    utils::ComboRenderPipelineDescriptor renderPipelineDesc;
+    renderPipelineDesc.vertex.bufferCount = 1;
+    renderPipelineDesc.cBuffers[0].arrayStride = 4 * sizeof(float);
+    renderPipelineDesc.cBuffers[0].attributeCount = 1;
+    renderPipelineDesc.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    renderPipelineDesc.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+    renderPipelineDesc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Create the pipeline layout for the first pipeline.
+    wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
+    pipelineLayoutDesc.bindGroupLayouts = &mUniformBindGroupLayout;
+    pipelineLayoutDesc.bindGroupLayoutCount = 1;
+    wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+
+    // Create the shaders for the first pipeline.
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, kVertexShader);
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, kFragmentShaderA);
+
+    // Create the first pipeline.
+    renderPipelineDesc.layout = pipelineLayout;
+    renderPipelineDesc.vertex.module = vsModule;
+    renderPipelineDesc.cFragment.module = fsModule;
+    mPipelines[0] = device.CreateRenderPipeline(&renderPipelineDesc);
+
+    // If the test is using a dynamic pipeline, create the second pipeline.
+    if (GetParam().pipelineType == Pipeline::Dynamic) {
+        // Create another bind group layout. The data for this binding point will be the same for
+        // all draws.
+        mConstantBindGroupLayout = utils::MakeBindGroupLayout(
+            device, {
+                        {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, false},
+                    });
+
+        // Create the pipeline layout.
+        wgpu::BindGroupLayout bindGroupLayouts[2] = {
+            mConstantBindGroupLayout,
+            mUniformBindGroupLayout,
+        };
+        pipelineLayoutDesc.bindGroupLayouts = bindGroupLayouts,
+        pipelineLayoutDesc.bindGroupLayoutCount = 2;
+        wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+
+        // Create the fragment shader module. This shader matches the pipeline layout described
+        // above.
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, kFragmentShaderB);
+
+        // Create the pipeline.
+        renderPipelineDesc.layout = pipelineLayout;
+        renderPipelineDesc.cFragment.module = fsModule;
+        mPipelines[1] = device.CreateRenderPipeline(&renderPipelineDesc);
+
+        // Create the buffer and bind group to bind to the constant bind group layout slot.
+        constexpr float kConstantData[] = {0.01, 0.02, 0.03};
+        wgpu::Buffer constantBuffer = utils::CreateBufferFromData(
+            device, kConstantData, sizeof(kConstantData), wgpu::BufferUsage::Uniform);
+        mConstantBindGroup = utils::MakeBindGroup(device, mConstantBindGroupLayout,
+                                                  {{0, constantBuffer, 0, sizeof(kConstantData)}});
+    }
+
+    // Create the buffers and bind groups for the per-draw uniform data.
+    switch (GetParam().bindGroupType) {
+        case BindGroup::NoChange:
+        case BindGroup::Redundant:
+            mUniformBuffers[0] = utils::CreateBufferFromData(
+                device, mUniformBufferData.data(), 3 * sizeof(float), wgpu::BufferUsage::Uniform);
+
+            mUniformBindGroups[0] = utils::MakeBindGroup(
+                device, mUniformBindGroupLayout, {{0, mUniformBuffers[0], 0, kUniformSize}});
+            break;
+
+        case BindGroup::NoReuse:
+            for (uint32_t i = 0; i < kNumDraws; ++i) {
+                mUniformBuffers[i] = utils::CreateBufferFromData(
+                    device, mUniformBufferData.data() + i * mNumUniformFloats, 3 * sizeof(float),
+                    wgpu::BufferUsage::Uniform);
+            }
+            // Bind groups are created on-the-fly.
+            break;
+
+        case BindGroup::Multiple:
+            for (uint32_t i = 0; i < kNumDraws; ++i) {
+                mUniformBuffers[i] = utils::CreateBufferFromData(
+                    device, mUniformBufferData.data() + i * mNumUniformFloats, 3 * sizeof(float),
+                    wgpu::BufferUsage::Uniform);
+
+                mUniformBindGroups[i] = utils::MakeBindGroup(
+                    device, mUniformBindGroupLayout, {{0, mUniformBuffers[i], 0, kUniformSize}});
+            }
+            break;
+
+        case BindGroup::Dynamic:
+            mUniformBuffers[0] = utils::CreateBufferFromData(
+                device, mUniformBufferData.data(), mUniformBufferData.size() * sizeof(float),
+                wgpu::BufferUsage::Uniform);
+
+            mUniformBindGroups[0] = utils::MakeBindGroup(
+                device, mUniformBindGroupLayout, {{0, mUniformBuffers[0], 0, kUniformSize}});
+            break;
+        default:
+            UNREACHABLE();
+            break;
+    }
+
+    // If using render bundles, record the render commands now.
+    if (GetParam().withRenderBundle == RenderBundle::Yes) {
+        wgpu::RenderBundleEncoderDescriptor descriptor = {};
+        descriptor.colorFormatsCount = 1;
+        descriptor.colorFormats = &renderPipelineDesc.cTargets[0].format;
+        descriptor.depthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&descriptor);
+        RecordRenderCommands(encoder);
+        mRenderBundle = encoder.Finish();
+    }
+}
+
+template <typename Encoder>
+void DrawCallPerf::RecordRenderCommands(Encoder pass) {
+    uint32_t uniformBindGroupIndex = 0;
+
+    if (GetParam().pipelineType == Pipeline::Static) {
+        // Static pipeline can be set now.
+        pass.SetPipeline(mPipelines[0]);
+    }
+
+    if (GetParam().vertexBufferType == VertexBuffer::NoChange) {
+        // Static vertex buffer can be set now.
+        pass.SetVertexBuffer(0, mVertexBuffers[0]);
+    }
+
+    if (GetParam().bindGroupType == BindGroup::NoChange) {
+        // Incompatible. Can't change pipeline without changing bind groups.
+        ASSERT(GetParam().pipelineType == Pipeline::Static);
+
+        // Static bind group can be set now.
+        pass.SetBindGroup(uniformBindGroupIndex, mUniformBindGroups[0]);
+    }
+
+    for (unsigned int i = 0; i < kNumDraws; ++i) {
+        switch (GetParam().pipelineType) {
+            case Pipeline::Static:
+                break;
+            case Pipeline::Redundant:
+                pass.SetPipeline(mPipelines[0]);
+                break;
+            case Pipeline::Dynamic: {
+                // If the pipeline is dynamic, ping pong between two pipelines.
+                pass.SetPipeline(mPipelines[i % 2]);
+
+                // The pipelines have different layouts so we change the binding index here.
+                uniformBindGroupIndex = i % 2;
+                if (uniformBindGroupIndex == 1) {
+                    // Because of the pipeline layout change, we need to rebind bind group index 0.
+                    pass.SetBindGroup(0, mConstantBindGroup);
+                }
+                break;
+            }
+        }
+
+        // Set the vertex buffer, if it changes.
+        switch (GetParam().vertexBufferType) {
+            case VertexBuffer::NoChange:
+                break;
+
+            case VertexBuffer::Multiple:
+                pass.SetVertexBuffer(0, mVertexBuffers[i]);
+                break;
+
+            case VertexBuffer::Dynamic:
+                pass.SetVertexBuffer(0, mVertexBuffers[0], i * mAlignedVertexDataSize);
+                break;
+        }
+
+        // Set the bind group, if it changes.
+        switch (GetParam().bindGroupType) {
+            case BindGroup::NoChange:
+                break;
+
+            case BindGroup::Redundant:
+                pass.SetBindGroup(uniformBindGroupIndex, mUniformBindGroups[0]);
+                break;
+
+            case BindGroup::NoReuse: {
+                wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+                    device, mUniformBindGroupLayout, {{0, mUniformBuffers[i], 0, kUniformSize}});
+                pass.SetBindGroup(uniformBindGroupIndex, bindGroup);
+                break;
+            }
+
+            case BindGroup::Multiple:
+                pass.SetBindGroup(uniformBindGroupIndex, mUniformBindGroups[i]);
+                break;
+
+            case BindGroup::Dynamic: {
+                uint32_t dynamicOffset = static_cast<uint32_t>(i * mAlignedUniformSize);
+                pass.SetBindGroup(uniformBindGroupIndex, mUniformBindGroups[0], 1, &dynamicOffset);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+        pass.Draw(3);
+    }
+}
+
+void DrawCallPerf::Step() {
+    if (GetParam().uniformDataType == UniformData::Dynamic) {
+        // Update uniform data if it's dynamic.
+        std::fill(mUniformBufferData.begin(), mUniformBufferData.end(),
+                  mUniformBufferData[0] + 1.0);
+
+        switch (GetParam().bindGroupType) {
+            case BindGroup::NoChange:
+            case BindGroup::Redundant:
+                queue.WriteBuffer(mUniformBuffers[0], 0, mUniformBufferData.data(),
+                                  3 * sizeof(float));
+                break;
+            case BindGroup::NoReuse:
+            case BindGroup::Multiple:
+                for (uint32_t i = 0; i < kNumDraws; ++i) {
+                    queue.WriteBuffer(mUniformBuffers[i], 0,
+                                      mUniformBufferData.data() + i * mNumUniformFloats,
+                                      3 * sizeof(float));
+                }
+                break;
+            case BindGroup::Dynamic:
+                queue.WriteBuffer(mUniformBuffers[0], 0, mUniformBufferData.data(),
+                                  mUniformBufferData.size() * sizeof(float));
+                break;
+        }
+    }
+
+    wgpu::CommandEncoder commands = device.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor renderPass({mColorAttachment}, mDepthStencilAttachment);
+    wgpu::RenderPassEncoder pass = commands.BeginRenderPass(&renderPass);
+
+    switch (GetParam().withRenderBundle) {
+        case RenderBundle::No:
+            RecordRenderCommands(pass);
+            break;
+        case RenderBundle::Yes:
+            pass.ExecuteBundles(1, &mRenderBundle);
+            break;
+        default:
+            UNREACHABLE();
+            break;
+    }
+
+    pass.End();
+    wgpu::CommandBuffer commandBuffer = commands.Finish();
+    queue.Submit(1, &commandBuffer);
+}
+
+TEST_P(DrawCallPerf, Run) {
+    RunTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(
+    DrawCallPerf,
+    {D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend(),
+     VulkanBackend({"skip_validation"})},
+    {
+        // Baseline
+        MakeParam(),
+
+        // Change vertex buffer binding
+        MakeParam(VertexBuffer::Multiple),  // Multiple vertex buffers
+        MakeParam(VertexBuffer::Dynamic),   // Dynamic vertex buffer
+
+        // Change bind group binding
+        MakeParam(BindGroup::Multiple),  // Multiple bind groups
+        MakeParam(BindGroup::Dynamic),   // Dynamic bind groups
+        MakeParam(BindGroup::NoReuse),   // New bind group per-draw
+
+        // Redundantly set pipeline / bind groups
+        MakeParam(Pipeline::Redundant, BindGroup::Redundant),
+
+        // Switch the pipeline every draw to test state tracking and updates to binding points
+        MakeParam(Pipeline::Dynamic,
+                  BindGroup::Multiple),  // Multiple bind groups w/ dynamic pipeline
+        MakeParam(Pipeline::Dynamic,
+                  BindGroup::Dynamic),  // Dynamic bind groups w/ dynamic pipeline
+
+        // ----------- Render Bundles -----------
+        // Command validation / state tracking can be futher optimized / precomputed.
+        // Use render bundles with varying vertex buffer binding
+        MakeParam(VertexBuffer::Multiple,
+                  RenderBundle::Yes),  // Multiple vertex buffers w/ render bundle
+        MakeParam(VertexBuffer::Dynamic,
+                  RenderBundle::Yes),  // Dynamic vertex buffer w/ render bundle
+
+        // Use render bundles with varying bind group binding
+        MakeParam(BindGroup::Multiple, RenderBundle::Yes),  // Multiple bind groups w/ render bundle
+        MakeParam(BindGroup::Dynamic, RenderBundle::Yes),   // Dynamic bind groups w/ render bundle
+
+        // Use render bundles with dynamic pipeline
+        MakeParam(Pipeline::Dynamic,
+                  BindGroup::Multiple,
+                  RenderBundle::Yes),  // Multiple bind groups w/ dynamic pipeline w/ render bundle
+        MakeParam(Pipeline::Dynamic,
+                  BindGroup::Dynamic,
+                  RenderBundle::Yes),  // Dynamic bind groups w/ dynamic pipeline w/ render bundle
+
+        // ----------- Render Bundles (end)-------
+
+        // Update per-draw data in the bind group(s). This will cause resource transitions between
+        // updating and drawing.
+        MakeParam(BindGroup::Multiple,
+                  UniformData::Dynamic),  // Update per-draw data: Multiple bind groups
+        MakeParam(BindGroup::Dynamic,
+                  UniformData::Dynamic),  // Update per-draw data: Dynamic bind groups
+    });
diff --git a/src/dawn/tests/perf_tests/README.md b/src/dawn/tests/perf_tests/README.md
new file mode 100644
index 0000000..5c419ed
--- /dev/null
+++ b/src/dawn/tests/perf_tests/README.md
@@ -0,0 +1,2 @@
+# Dawn Perf Tests
+Moved to [`//src/docs/dawn/testing.md`](https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/testing.md).
diff --git a/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp b/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
new file mode 100644
index 0000000..07896ad
--- /dev/null
+++ b/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
@@ -0,0 +1,512 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/perf_tests/DawnPerfTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    constexpr uint32_t kTileSize = 32u;
+
+    const std::string& kMatMulFloatHeader = R"(
+        struct Uniforms {
+            dimAOuter : u32,
+            dimInner : u32,
+            dimBOuter : u32,
+        }
+        struct Matrix {
+            numbers: array<f32>
+        }
+
+        @group(0) @binding(0) var<storage, read> firstMatrix : Matrix;
+        @group(0) @binding(1) var<storage, read> secondMatrix : Matrix;
+        @group(0) @binding(2) var<storage, write> resultMatrix : Matrix;
+        @group(0) @binding(3) var<uniform> uniforms : Uniforms;
+
+        fn mm_readA(row : u32, col : u32) -> f32  {
+            if (row < uniforms.dimAOuter && col < uniforms.dimInner)
+            {
+                let result : f32 = firstMatrix.numbers[row * uniforms.dimInner + col];
+                return result;
+            }
+            return 0.;
+        }
+
+        fn mm_readB(row : u32, col : u32) -> f32 {
+            if (row < uniforms.dimInner && col < uniforms.dimBOuter)
+            {
+                let result : f32 = secondMatrix.numbers[row * uniforms.dimBOuter + col];
+                return result;
+            }
+            return 0.;
+        }
+
+        fn mm_write(row : u32, col : u32, value : f32) {
+            if (row < uniforms.dimAOuter && col < uniforms.dimBOuter)
+            {
+                let index : u32 = col + row * uniforms.dimBOuter;
+                resultMatrix.numbers[index] = value;
+            }
+        }
+
+        let RowPerThread : u32 = 4u;
+        let ColPerThread : u32 = 4u;
+        let TileAOuter : u32 = 32u;
+        let TileBOuter : u32 = 32u;
+        let TileInner : u32 = 32u;)";
+
+    const std::string& kMatMulFloatSharedArray1D = R"(
+        var<workgroup> mm_Asub : array<f32, 1024>;
+        var<workgroup> mm_Bsub : array<f32, 1024>;)";
+    const std::string& kMatMulFloatSharedArray2D = R"(
+        var<workgroup> mm_Asub : array<array<f32, 32>, 32>;
+        var<workgroup> mm_Bsub : array<array<f32, 32>, 32>;)";
+    const std::string& kMatMulFloatBodyPart1 = R"(
+        @stage(compute) @workgroup_size(8, 8, 1)
+        fn main(@builtin(local_invocation_id) local_id : vec3<u32>,
+                @builtin(global_invocation_id) global_id  : vec3<u32>) {
+            let tileRow : u32 = local_id.y * RowPerThread;
+            let tileCol : u32 = local_id.x * ColPerThread;
+
+            let globalRow : u32 = global_id.y * RowPerThread;
+            let globalCol : u32 = global_id.x * ColPerThread;
+
+            let numTiles : u32 = (uniforms.dimInner - 1u) / TileInner + 1u;
+
+            var acc: array<f32, 16>;
+            var ACached : f32;
+            var BCached : array<f32, 4>;
+
+            // Without this initialization strange values show up in acc.
+            // TODO: Remove it once the following bug is fixed.
+            // https://bugs.chromium.org/p/tint/issues/detail?id=759
+            for (var index : u32 = 0u; index < RowPerThread * ColPerThread; index = index + 1u) {
+                acc[index] = 0.;
+            }
+
+            let ColPerThreadA : u32 = TileInner / 8u;
+            let tileColA : u32 = local_id.x * ColPerThreadA;
+            let RowPerThreadB : u32 = TileInner / 8u;
+            let tileRowB : u32 = local_id.y * RowPerThreadB;
+
+            // Loop over shared dimension.
+            for (var t : u32 = 0u; t < numTiles; t = t + 1u) {
+                // Load one tile of A into local memory.
+                for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
+                for (var innerCol : u32 = 0u; innerCol < ColPerThreadA; innerCol = innerCol + 1u) {
+                    let inputRow : u32 = tileRow + innerRow;
+                    let inputCol : u32 = tileColA + innerCol;)";
+    const std::string& kMatMulFloatBodyPart2Array1D = R"(
+                    let index : u32 = inputRow * TileInner + inputCol;
+                    mm_Asub[index] = mm_readA(globalRow + innerRow, t * TileInner + inputCol);
+                }
+                }
+                // Load one tile of B into local memory.
+                for (var innerRow : u32 = 0u; innerRow < RowPerThreadB; innerRow = innerRow + 1u) {
+                for (var innerCol : u32 = 0u; innerCol < ColPerThread; innerCol = innerCol + 1u) {
+                    let inputRow : u32 = tileRowB + innerRow;
+                    let inputCol : u32 = tileCol + innerCol;
+                    let index : u32 = inputRow * TileBOuter + inputCol;
+
+                    mm_Bsub[index] = mm_readB(t * TileInner + inputRow, globalCol + innerCol);;
+                }
+                }
+
+                workgroupBarrier();
+
+                // Compute acc values for a single thread.
+                for (var k : u32 = 0u; k < TileInner; k = k + 1u) {
+                    for (var inner : u32 = 0u; inner < ColPerThread; inner = inner + 1u) {
+                        BCached[inner] = mm_Bsub[k * TileBOuter + tileCol + inner];
+                    }
+
+                    for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
+                        ACached = mm_Asub[(tileRow + innerRow) * TileInner + k];)";
+    const std::string& kMatMulFloatBodyPart2Array2D = R"(
+                    mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, t * TileInner + inputCol);
+                }
+                }
+                // Load one tile of B into local memory.
+                for (var innerRow : u32 = 0u; innerRow < RowPerThreadB; innerRow = innerRow + 1u) {
+                for (var innerCol : u32 = 0u; innerCol < ColPerThread; innerCol = innerCol + 1u) {
+                    let inputRow : u32 = tileRowB + innerRow;
+                    let inputCol : u32 = tileCol + innerCol;
+
+                    mm_Bsub[innerCol][inputCol] = mm_readB(t * TileInner + inputRow, globalCol + innerCol);;
+                }
+                }
+
+                workgroupBarrier();
+
+                // Compute acc values for a single thread.
+                for (var k : u32 = 0u; k < TileInner; k = k + 1u) {
+                    for (var inner : u32 = 0u; inner < ColPerThread; inner = inner + 1u) {
+                        BCached[inner] = mm_Bsub[k][tileCol + inner];
+                    }
+
+                    for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
+                        ACached = mm_Asub[tileRow + innerRow][k];)";
+    const std::string& kMatMulFloatBodyPart3 = R"(
+                        for (var innerCol : u32 = 0u; innerCol < ColPerThread; innerCol = innerCol + 1u) {
+                            let index : u32 = innerRow * ColPerThread + innerCol;
+                            acc[index] = acc[index] + ACached * BCached[innerCol];
+                        }
+                    }
+                }
+
+                workgroupBarrier();
+            }
+
+            for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
+            for (var innerCol : u32 = 0u; innerCol < ColPerThread; innerCol = innerCol + 1u) {
+                let index : u32 = innerRow * ColPerThread + innerCol;
+                mm_write(globalRow + innerRow,
+                         globalCol + innerCol,
+                         acc[index]);
+            }
+            }
+        })";
+    const std::string& kMatMulFloatOneDimensionalSharedArray =
+        kMatMulFloatHeader + kMatMulFloatSharedArray1D + kMatMulFloatBodyPart1 +
+        kMatMulFloatBodyPart2Array1D + kMatMulFloatBodyPart3;
+
+    const std::string& kMatMulFloatTwoDimensionalSharedArray =
+        kMatMulFloatHeader + kMatMulFloatSharedArray2D + kMatMulFloatBodyPart1 +
+        kMatMulFloatBodyPart2Array2D + kMatMulFloatBodyPart3;
+
+    // The vec4 version requires that dimInner and dimBOuter are divisible by 4.
+    const std::string& kMatMulVec4Header = R"(
+        struct Uniforms {
+            dimAOuter : u32,
+            dimInner : u32,
+            dimBOuter : u32,
+        }
+        struct Matrix {
+            numbers: array<vec4<f32>>
+        }
+
+        @group(0) @binding(0) var<storage, read> firstMatrix : Matrix;
+        @group(0) @binding(1) var<storage, read> secondMatrix : Matrix;
+        @group(0) @binding(2) var<storage, write> resultMatrix : Matrix;
+        @group(0) @binding(3) var<uniform> uniforms : Uniforms;
+
+        fn mm_readA(row : u32, col : u32) -> vec4<f32>  {
+            if (row < uniforms.dimAOuter && col < uniforms.dimInner)
+            {
+                let result : vec4<f32> = firstMatrix.numbers[row * uniforms.dimInner / 4u + col];
+                return result;
+            }
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        }
+
+        fn mm_readB(row : u32, col : u32) -> vec4<f32> {
+            if (row < uniforms.dimInner && col < uniforms.dimBOuter)
+            {
+                let result : vec4<f32> = secondMatrix.numbers[row * uniforms.dimBOuter / 4u + col];
+                return result;
+            }
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        }
+
+        fn mm_write(row : u32, col : u32, value : vec4<f32>) {
+            if (row < uniforms.dimAOuter && col < uniforms.dimBOuter)
+            {
+                let index : u32 = col + row * uniforms.dimBOuter / 4u;
+                resultMatrix.numbers[index] = value;
+            }
+        }
+
+        let RowPerThread : u32 = 4u;
+        let ColPerThread : u32 = 4u;
+        let TileOuter : u32 = 32u;
+        let TileInner : u32 = 32u;)";
+    const std::string& kMatMulVec4SharedArray1D = R"(
+        var<workgroup> mm_Asub : array<vec4<f32>, 256>;
+        var<workgroup> mm_Bsub : array<vec4<f32>, 256>;)";
+    const std::string& kMatMulVec4SharedArray2D = R"(
+        var<workgroup> mm_Asub : array<array<vec4<f32>, 8>, 32>;
+        var<workgroup> mm_Bsub : array<array<vec4<f32>, 8>, 32>;)";
+    const std::string& kMatMulVec4BodyPart1 = R"(
+        @stage(compute) @workgroup_size(8, 8, 1)
+        fn main(@builtin(local_invocation_id) local_id : vec3<u32>,
+                @builtin(global_invocation_id) global_id  : vec3<u32>) {
+            let tileRow : u32 = local_id.y * RowPerThread;
+            let tileCol : u32 = local_id.x;
+
+            let globalRow : u32 = global_id.y * RowPerThread;
+            let globalCol : u32 = global_id.x;
+
+            let numTiles : u32 = (uniforms.dimInner - 1u) / TileInner + 1u;
+
+            var acc: array<vec4<f32>, 4>;
+            var ACached : vec4<f32>;
+            var BCached : array<vec4<f32>, 4>;
+
+            // Without this initialization strange values show up in acc.
+            // TODO: Remove it once the following bug is fixed.
+            // https://bugs.chromium.org/p/tint/issues/detail?id=759
+            for (var index : u32 = 0u; index < RowPerThread; index = index + 1u) {
+                acc[index] = vec4<f32>(0.0, 0.0, 0.0, 0.0);
+            }
+
+            var globalColA : u32 = tileCol;
+            let RowPerThreadB : u32 = TileInner / 8u;
+            let tileRowB : u32 = local_id.y * RowPerThreadB;
+
+            // Loop over shared dimension.
+            for (var t : u32 = 0u; t < numTiles; t = t + 1u) {
+                // Load one tile of A into local memory.
+                for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
+                    let inputRow : u32 = tileRow + innerRow;
+                    let inputCol : u32 = tileCol;)";
+    const std::string& kMatMulVec4BodyPart2Array1D = R"(
+                    let index : u32 = inputRow * TileInner / ColPerThread + inputCol;
+                    mm_Asub[index] = mm_readA(globalRow + innerRow, globalColA);
+                }
+                globalColA = globalColA + TileInner / ColPerThread;
+
+                // Load one tile of B into local memory.
+                for (var innerRow : u32 = 0u; innerRow < RowPerThreadB; innerRow = innerRow + 1u) {
+                    let inputRow : u32 = tileRowB + innerRow;
+                    let inputCol : u32 = tileCol;
+                    let index : u32 = inputRow * TileOuter / ColPerThread + inputCol;
+                    mm_Bsub[index] = mm_readB(t * TileInner + inputRow, globalCol);;
+                }
+
+                workgroupBarrier();
+
+                // Compute acc values for a single thread.
+                for (var k : u32 = 0u; k < TileInner / ColPerThread; k = k + 1u) {
+                    BCached[0] = mm_Bsub[(k * ColPerThread) * (TileOuter / ColPerThread) + tileCol];
+                    BCached[1] = mm_Bsub[(k * ColPerThread + 1u) * (TileOuter / ColPerThread) + tileCol];
+                    BCached[2] = mm_Bsub[(k * ColPerThread + 2u) * (TileOuter / ColPerThread) + tileCol];
+                    BCached[3] = mm_Bsub[(k * ColPerThread + 3u) * (TileOuter / ColPerThread) + tileCol];
+
+                    for (var i : u32 = 0u; i < RowPerThread; i = i + 1u) {
+                        ACached = mm_Asub[(tileRow + i) * (TileInner / ColPerThread) + k];)";
+    const std::string& kMatMulVec4BodyPart2Array2D = R"(
+                    mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, globalColA);
+                }
+                globalColA = globalColA + TileInner / ColPerThread;
+
+                // Load one tile of B into local memory.
+                for (var innerRow : u32 = 0u; innerRow < RowPerThreadB; innerRow = innerRow + 1u) {
+                    let inputRow : u32 = tileRowB + innerRow;
+                    let inputCol : u32 = tileCol;
+                    mm_Bsub[inputRow][inputCol] = mm_readB(t * TileInner + inputRow, globalCol);;
+                }
+
+                workgroupBarrier();
+
+                // Compute acc values for a single thread.
+                for (var k : u32 = 0u; k < TileInner / ColPerThread; k = k + 1u) {
+                    BCached[0] = mm_Bsub[k * ColPerThread][tileCol];
+                    BCached[1] = mm_Bsub[k * ColPerThread + 1u][tileCol];
+                    BCached[2] = mm_Bsub[k * ColPerThread + 2u][tileCol];
+                    BCached[3] = mm_Bsub[k * ColPerThread + 3u][tileCol];
+
+                    for (var i : u32 = 0u; i < RowPerThread; i = i + 1u) {
+                        ACached = mm_Asub[tileRow + i][k];)";
+    const std::string& kMatMulVec4BodyPart3 = R"(
+                        acc[i] = BCached[0] * ACached.x + acc[i];
+                        acc[i] = BCached[1] * ACached.y + acc[i];
+                        acc[i] = BCached[2] * ACached.z + acc[i];
+                        acc[i] = BCached[3] * ACached.w + acc[i];
+                    }
+                }
+
+                workgroupBarrier();
+            }
+
+            for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
+                mm_write(globalRow + innerRow,
+                         globalCol,
+                         acc[innerRow]);
+            }
+        })";
+
+    const std::string& kMatMulVec4OneDimensionalSharedArray =
+        kMatMulVec4Header + kMatMulVec4SharedArray1D + kMatMulVec4BodyPart1 +
+        kMatMulVec4BodyPart2Array1D + kMatMulVec4BodyPart3;
+
+    const std::string& kMatMulVec4TwoDimensionalSharedArray =
+        kMatMulVec4Header + kMatMulVec4SharedArray2D + kMatMulVec4BodyPart1 +
+        kMatMulVec4BodyPart2Array2D + kMatMulVec4BodyPart3;
+
+    constexpr unsigned int kNumIterations = 50;
+
+    enum class MatMulMethod {
+        MatMulFloatOneDimSharedArray,
+        MatMulFloatTwoDimSharedArray,
+        MatMulVec4OneDimSharedArray,
+        MatMulVec4TwoDimSharedArray
+    };
+
+    std::ostream& operator<<(std::ostream& ostream, const MatMulMethod& matMulMethod) {
+        switch (matMulMethod) {
+            case MatMulMethod::MatMulFloatOneDimSharedArray:
+                ostream << "MatMulFloatOneDimSharedArray";
+                break;
+            case MatMulMethod::MatMulFloatTwoDimSharedArray:
+                ostream << "MatMulFloatTwoDimSharedArray";
+                break;
+            case MatMulMethod::MatMulVec4OneDimSharedArray:
+                ostream << "MatMulVec4OneDimSharedArray";
+                break;
+            case MatMulMethod::MatMulVec4TwoDimSharedArray:
+                ostream << "MatMulVec4TwoDimSharedArray";
+                break;
+        }
+        return ostream;
+    }
+
+    using DimAOuter = uint32_t;
+    using DimInner = uint32_t;
+    using DimBOuter = uint32_t;
+    DAWN_TEST_PARAM_STRUCT(ShaderRobustnessParams, MatMulMethod, DimAOuter, DimInner, DimBOuter);
+
+}  // namespace
+
+// Test the execution time of matrix multiplication (A [dimAOuter, dimInner] * B [dimInner,
+// dimBOuter]) on the GPU and see the difference between robustness on and off.
+class ShaderRobustnessPerf : public DawnPerfTestWithParams<ShaderRobustnessParams> {
+  public:
+    ShaderRobustnessPerf()
+        : DawnPerfTestWithParams(kNumIterations, 1),
+          mDimAOuter(GetParam().mDimAOuter),
+          mDimInner(GetParam().mDimInner),
+          mDimBOuter(GetParam().mDimBOuter) {
+    }
+    ~ShaderRobustnessPerf() override = default;
+
+    void SetUp() override;
+
+  private:
+    void Step() override;
+
+    wgpu::BindGroup mBindGroup;
+    wgpu::ComputePipeline mPipeline;
+    uint32_t mDimAOuter;
+    uint32_t mDimInner;
+    uint32_t mDimBOuter;
+};
+
+void ShaderRobustnessPerf::SetUp() {
+    DawnPerfTestWithParams<ShaderRobustnessParams>::SetUp();
+
+    // TODO(crbug.com/dawn/786): D3D12_Microsoft_Basic_Render_Driver_CPU
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+    // TODO(crbug.com/dawn/945): Generation via SPIRV-Cross fails
+    DAWN_SUPPRESS_TEST_IF(IsOpenGL());
+
+    const size_t dataASize = mDimAOuter * mDimInner;
+    std::vector<float> dataA(dataASize);
+    uint64_t byteASize = sizeof(float) * dataA.size();
+    // It's ok to use all zeros to do the matrix multiplication for performance test.
+    wgpu::Buffer bufA =
+        utils::CreateBufferFromData(device, dataA.data(), byteASize, wgpu::BufferUsage::Storage);
+
+    const size_t dataBSize = mDimInner * mDimBOuter;
+    std::vector<float> dataB(dataBSize);
+    uint64_t byteBSize = sizeof(float) * dataB.size();
+    wgpu::Buffer bufB =
+        utils::CreateBufferFromData(device, dataB.data(), byteBSize, wgpu::BufferUsage::Storage);
+
+    uint64_t byteDstSize = sizeof(float) * mDimAOuter * mDimBOuter;
+    wgpu::BufferDescriptor desc = {};
+    desc.usage = wgpu::BufferUsage::Storage;
+    desc.size = byteDstSize;
+    wgpu::Buffer dst = device.CreateBuffer(&desc);
+
+    uint32_t uniformData[] = {mDimAOuter, mDimInner, mDimBOuter};
+    wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+        device, uniformData, sizeof(uniformData), wgpu::BufferUsage::Uniform);
+
+    wgpu::ShaderModule module;
+    switch (GetParam().mMatMulMethod) {
+        case MatMulMethod::MatMulFloatOneDimSharedArray: {
+            module =
+                utils::CreateShaderModule(device, kMatMulFloatOneDimensionalSharedArray.c_str());
+            break;
+        }
+
+        case MatMulMethod::MatMulFloatTwoDimSharedArray: {
+            module =
+                utils::CreateShaderModule(device, kMatMulFloatTwoDimensionalSharedArray.c_str());
+            break;
+        }
+
+        case MatMulMethod::MatMulVec4OneDimSharedArray: {
+            module =
+                utils::CreateShaderModule(device, kMatMulVec4OneDimensionalSharedArray.c_str());
+            break;
+        }
+
+        case MatMulMethod::MatMulVec4TwoDimSharedArray: {
+            module =
+                utils::CreateShaderModule(device, kMatMulVec4TwoDimensionalSharedArray.c_str());
+            break;
+        }
+    }
+
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = module;
+    csDesc.compute.entryPoint = "main";
+    mPipeline = device.CreateComputePipeline(&csDesc);
+
+    mBindGroup = utils::MakeBindGroup(device, mPipeline.GetBindGroupLayout(0),
+                                      {
+                                          {0, bufA, 0, byteASize},
+                                          {1, bufB, 0, byteBSize},
+                                          {2, dst, 0, byteDstSize},
+                                          {3, uniformBuffer, 0, sizeof(uniformData)},
+                                      });
+}
+
+void ShaderRobustnessPerf::Step() {
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(mPipeline);
+        pass.SetBindGroup(0, mBindGroup);
+        for (unsigned int i = 0; i < kNumIterations; ++i) {
+            pass.Dispatch(ceil(float(mDimBOuter) / float(kTileSize)),
+                          ceil(float(mDimAOuter) / float(kTileSize)), 1);
+        }
+        pass.End();
+
+        commands = encoder.Finish();
+    }
+
+    queue.Submit(1, &commands);
+}
+
+TEST_P(ShaderRobustnessPerf, Run) {
+    RunTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(ShaderRobustnessPerf,
+                        {D3D12Backend(), D3D12Backend({"disable_robustness"}, {}), MetalBackend(),
+                         MetalBackend({"disable_robustness"}, {}), OpenGLBackend(),
+                         OpenGLBackend({"disable_robustness"}, {}), VulkanBackend(),
+                         VulkanBackend({"disable_robustness"}, {})},
+                        {MatMulMethod::MatMulFloatOneDimSharedArray,
+                         MatMulMethod::MatMulFloatTwoDimSharedArray,
+                         MatMulMethod::MatMulVec4OneDimSharedArray,
+                         MatMulMethod::MatMulVec4TwoDimSharedArray},
+                        {512u},
+                        {512u},
+                        {512u});
diff --git a/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp b/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
new file mode 100644
index 0000000..b70c68b
--- /dev/null
+++ b/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
@@ -0,0 +1,151 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/perf_tests/DawnPerfTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+struct SubresourceTrackingParams : AdapterTestParam {
+    SubresourceTrackingParams(const AdapterTestParam& param,
+                              uint32_t arrayLayerCountIn,
+                              uint32_t mipLevelCountIn)
+        : AdapterTestParam(param),
+          arrayLayerCount(arrayLayerCountIn),
+          mipLevelCount(mipLevelCountIn) {
+    }
+    uint32_t arrayLayerCount;
+    uint32_t mipLevelCount;
+};
+
+std::ostream& operator<<(std::ostream& ostream, const SubresourceTrackingParams& param) {
+    ostream << static_cast<const AdapterTestParam&>(param);
+    ostream << "_arrayLayer_" << param.arrayLayerCount;
+    ostream << "_mipLevel_" << param.mipLevelCount;
+    return ostream;
+}
+
+// Test the performance of Subresource usage and barrier tracking on a case that would generally be
+// difficult. It uses a 2D array texture with mipmaps and updates one of the layers with data from
+// another texture, then generates mipmaps for that layer. It is difficult because it requires
+// tracking the state of individual subresources in the middle of the subresources of that texture.
+class SubresourceTrackingPerf : public DawnPerfTestWithParams<SubresourceTrackingParams> {
+  public:
+    static constexpr unsigned int kNumIterations = 50;
+
+    SubresourceTrackingPerf() : DawnPerfTestWithParams(kNumIterations, 1) {
+    }
+    ~SubresourceTrackingPerf() override = default;
+
+    void SetUp() override {
+        DawnPerfTestWithParams<SubresourceTrackingParams>::SetUp();
+        const SubresourceTrackingParams& params = GetParam();
+
+        wgpu::TextureDescriptor materialDesc;
+        materialDesc.dimension = wgpu::TextureDimension::e2D;
+        materialDesc.size = {1u << (params.mipLevelCount - 1), 1u << (params.mipLevelCount - 1),
+                             params.arrayLayerCount};
+        materialDesc.mipLevelCount = params.mipLevelCount;
+        materialDesc.usage = wgpu::TextureUsage::TextureBinding |
+                             wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst;
+        materialDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        mMaterials = device.CreateTexture(&materialDesc);
+
+        wgpu::TextureDescriptor uploadTexDesc = materialDesc;
+        uploadTexDesc.size.depthOrArrayLayers = 1;
+        uploadTexDesc.mipLevelCount = 1;
+        uploadTexDesc.usage = wgpu::TextureUsage::CopySrc;
+        mUploadTexture = device.CreateTexture(&uploadTexDesc);
+
+        utils::ComboRenderPipelineDescriptor pipelineDesc;
+        pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+        )");
+        pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var materials : texture_2d<f32>;
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                let foo : vec2<i32> = textureDimensions(materials);
+                return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+        )");
+        mPipeline = device.CreateRenderPipeline(&pipelineDesc);
+    }
+
+  private:
+    void Step() override {
+        const SubresourceTrackingParams& params = GetParam();
+
+        uint32_t layerUploaded = params.arrayLayerCount / 2;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        // Copy into the layer of the material array.
+        {
+            wgpu::ImageCopyTexture sourceView;
+            sourceView.texture = mUploadTexture;
+
+            wgpu::ImageCopyTexture destView;
+            destView.texture = mMaterials;
+            destView.origin.z = layerUploaded;
+
+            wgpu::Extent3D copySize = {1u << (params.mipLevelCount - 1),
+                                       1u << (params.mipLevelCount - 1), 1};
+
+            encoder.CopyTextureToTexture(&sourceView, &destView, &copySize);
+        }
+
+        // Fake commands that would be used to create the mip levels.
+        for (uint32_t level = 1; level < params.mipLevelCount; level++) {
+            wgpu::TextureViewDescriptor rtViewDesc;
+            rtViewDesc.dimension = wgpu::TextureViewDimension::e2D;
+            rtViewDesc.baseMipLevel = level;
+            rtViewDesc.mipLevelCount = 1;
+            rtViewDesc.baseArrayLayer = layerUploaded;
+            rtViewDesc.arrayLayerCount = 1;
+            wgpu::TextureView rtView = mMaterials.CreateView(&rtViewDesc);
+
+            wgpu::TextureViewDescriptor sampleViewDesc = rtViewDesc;
+            sampleViewDesc.baseMipLevel = level - 1;
+            wgpu::TextureView sampleView = mMaterials.CreateView(&sampleViewDesc);
+
+            wgpu::BindGroup bindgroup =
+                utils::MakeBindGroup(device, mPipeline.GetBindGroupLayout(0), {{0, sampleView}});
+
+            utils::ComboRenderPassDescriptor renderPass({rtView});
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetPipeline(mPipeline);
+            pass.SetBindGroup(0, bindgroup);
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    wgpu::Texture mUploadTexture;
+    wgpu::Texture mMaterials;
+    wgpu::RenderPipeline mPipeline;
+};
+
+TEST_P(SubresourceTrackingPerf, Run) {
+    RunTest();
+}
+
+DAWN_INSTANTIATE_TEST_P(SubresourceTrackingPerf,
+                        {D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend()},
+                        {1, 4, 16, 256},
+                        {2, 3, 8});
diff --git a/src/dawn/tests/unittests/AsyncTaskTests.cpp b/src/dawn/tests/unittests/AsyncTaskTests.cpp
new file mode 100644
index 0000000..9170885
--- /dev/null
+++ b/src/dawn/tests/unittests/AsyncTaskTests.cpp
@@ -0,0 +1,89 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// AsyncTaskTests:
+//     Simple tests for dawn::native::AsyncTask and dawn::native::AsnycTaskManager.
+
+#include <gtest/gtest.h>
+
+#include <memory>
+#include <mutex>
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/native/AsyncTask.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace {
+
+    struct SimpleTaskResult {
+        uint32_t id;
+    };
+
+    // A thread-safe queue that stores the task results.
+    class ConcurrentTaskResultQueue : public NonCopyable {
+      public:
+        void AddResult(std::unique_ptr<SimpleTaskResult> result) {
+            std::lock_guard<std::mutex> lock(mMutex);
+            mTaskResults.push_back(std::move(result));
+        }
+
+        std::vector<std::unique_ptr<SimpleTaskResult>> GetAllResults() {
+            std::vector<std::unique_ptr<SimpleTaskResult>> outputResults;
+            {
+                std::lock_guard<std::mutex> lock(mMutex);
+                outputResults.swap(mTaskResults);
+            }
+            return outputResults;
+        }
+
+      private:
+        std::mutex mMutex;
+        std::vector<std::unique_ptr<SimpleTaskResult>> mTaskResults;
+    };
+
+    void DoTask(ConcurrentTaskResultQueue* resultQueue, uint32_t id) {
+        std::unique_ptr<SimpleTaskResult> result = std::make_unique<SimpleTaskResult>();
+        result->id = id;
+        resultQueue->AddResult(std::move(result));
+    }
+
+}  // anonymous namespace
+
+class AsyncTaskTest : public testing::Test {};
+
+// Emulate the basic usage of worker thread pool in Create*PipelineAsync().
+TEST_F(AsyncTaskTest, Basic) {
+    dawn::platform::Platform platform;
+    std::unique_ptr<dawn::platform::WorkerTaskPool> pool = platform.CreateWorkerTaskPool();
+
+    dawn::native::AsyncTaskManager taskManager(pool.get());
+    ConcurrentTaskResultQueue taskResultQueue;
+
+    constexpr size_t kTaskCount = 4u;
+    std::set<uint32_t> idset;
+    for (uint32_t i = 0; i < kTaskCount; ++i) {
+        dawn::native::AsyncTask asyncTask([&taskResultQueue, i] { DoTask(&taskResultQueue, i); });
+        taskManager.PostTask(std::move(asyncTask));
+        idset.insert(i);
+    }
+
+    taskManager.WaitAllPendingTasks();
+
+    std::vector<std::unique_ptr<SimpleTaskResult>> results = taskResultQueue.GetAllResults();
+    ASSERT_EQ(kTaskCount, results.size());
+    for (std::unique_ptr<SimpleTaskResult>& result : results) {
+        idset.erase(result->id);
+    }
+    ASSERT_TRUE(idset.empty());
+}
diff --git a/src/dawn/tests/unittests/BitSetIteratorTests.cpp b/src/dawn/tests/unittests/BitSetIteratorTests.cpp
new file mode 100644
index 0000000..ecdb92c
--- /dev/null
+++ b/src/dawn/tests/unittests/BitSetIteratorTests.cpp
@@ -0,0 +1,219 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_bitset.h"
+
+// This is ANGLE's BitSetIterator_unittests.cpp file.
+
+class BitSetIteratorTest : public testing::Test {
+  protected:
+    std::bitset<40> mStateBits;
+};
+
+// Simple iterator test.
+TEST_F(BitSetIteratorTest, Iterator) {
+    std::set<unsigned long> originalValues;
+    originalValues.insert(2);
+    originalValues.insert(6);
+    originalValues.insert(8);
+    originalValues.insert(35);
+
+    for (unsigned long value : originalValues) {
+        mStateBits.set(value);
+    }
+
+    std::set<unsigned long> readValues;
+    for (unsigned long bit : IterateBitSet(mStateBits)) {
+        EXPECT_EQ(1u, originalValues.count(bit));
+        EXPECT_EQ(0u, readValues.count(bit));
+        readValues.insert(bit);
+    }
+
+    EXPECT_EQ(originalValues.size(), readValues.size());
+}
+
+// Test an empty iterator.
+TEST_F(BitSetIteratorTest, EmptySet) {
+    // We don't use the FAIL gtest macro here since it returns immediately,
+    // causing an unreachable code warning in MSVC
+    bool sawBit = false;
+    for (unsigned long bit : IterateBitSet(mStateBits)) {
+        DAWN_UNUSED(bit);
+        sawBit = true;
+    }
+    EXPECT_FALSE(sawBit);
+}
+
+// Test iterating a result of combining two bitsets.
+TEST_F(BitSetIteratorTest, NonLValueBitset) {
+    std::bitset<40> otherBits;
+
+    mStateBits.set(1);
+    mStateBits.set(2);
+    mStateBits.set(3);
+    mStateBits.set(4);
+
+    otherBits.set(0);
+    otherBits.set(1);
+    otherBits.set(3);
+    otherBits.set(5);
+
+    std::set<unsigned long> seenBits;
+
+    for (unsigned long bit : IterateBitSet(mStateBits & otherBits)) {
+        EXPECT_EQ(0u, seenBits.count(bit));
+        seenBits.insert(bit);
+        EXPECT_TRUE(mStateBits[bit]);
+        EXPECT_TRUE(otherBits[bit]);
+    }
+
+    EXPECT_EQ((mStateBits & otherBits).count(), seenBits.size());
+}
+
+class EnumBitSetIteratorTest : public testing::Test {
+  protected:
+    enum class TestEnum { A, B, C, D, E, F, G, H, I, J, EnumCount };
+
+    static constexpr size_t kEnumCount = static_cast<size_t>(TestEnum::EnumCount);
+    ityp::bitset<TestEnum, kEnumCount> mStateBits;
+};
+
+// Simple iterator test.
+TEST_F(EnumBitSetIteratorTest, Iterator) {
+    std::set<TestEnum> originalValues;
+    originalValues.insert(TestEnum::B);
+    originalValues.insert(TestEnum::F);
+    originalValues.insert(TestEnum::C);
+    originalValues.insert(TestEnum::I);
+
+    for (TestEnum value : originalValues) {
+        mStateBits.set(value);
+    }
+
+    std::set<TestEnum> readValues;
+    for (TestEnum bit : IterateBitSet(mStateBits)) {
+        EXPECT_EQ(1u, originalValues.count(bit));
+        EXPECT_EQ(0u, readValues.count(bit));
+        readValues.insert(bit);
+    }
+
+    EXPECT_EQ(originalValues.size(), readValues.size());
+}
+
+// Test an empty iterator.
+TEST_F(EnumBitSetIteratorTest, EmptySet) {
+    // We don't use the FAIL gtest macro here since it returns immediately,
+    // causing an unreachable code warning in MSVC
+    bool sawBit = false;
+    for (TestEnum bit : IterateBitSet(mStateBits)) {
+        DAWN_UNUSED(bit);
+        sawBit = true;
+    }
+    EXPECT_FALSE(sawBit);
+}
+
+// Test iterating a result of combining two bitsets.
+TEST_F(EnumBitSetIteratorTest, NonLValueBitset) {
+    ityp::bitset<TestEnum, kEnumCount> otherBits;
+
+    mStateBits.set(TestEnum::B);
+    mStateBits.set(TestEnum::C);
+    mStateBits.set(TestEnum::D);
+    mStateBits.set(TestEnum::E);
+
+    otherBits.set(TestEnum::A);
+    otherBits.set(TestEnum::B);
+    otherBits.set(TestEnum::D);
+    otherBits.set(TestEnum::F);
+
+    std::set<TestEnum> seenBits;
+
+    for (TestEnum bit : IterateBitSet(mStateBits & otherBits)) {
+        EXPECT_EQ(0u, seenBits.count(bit));
+        seenBits.insert(bit);
+        EXPECT_TRUE(mStateBits[bit]);
+        EXPECT_TRUE(otherBits[bit]);
+    }
+
+    EXPECT_EQ((mStateBits & otherBits).count(), seenBits.size());
+}
+
+class ITypBitsetIteratorTest : public testing::Test {
+  protected:
+    using IntegerT = TypedInteger<struct Foo, uint32_t>;
+    ityp::bitset<IntegerT, 40> mStateBits;
+};
+
+// Simple iterator test.
+TEST_F(ITypBitsetIteratorTest, Iterator) {
+    std::set<IntegerT> originalValues;
+    originalValues.insert(IntegerT(2));
+    originalValues.insert(IntegerT(6));
+    originalValues.insert(IntegerT(8));
+    originalValues.insert(IntegerT(35));
+
+    for (IntegerT value : originalValues) {
+        mStateBits.set(value);
+    }
+
+    std::set<IntegerT> readValues;
+    for (IntegerT bit : IterateBitSet(mStateBits)) {
+        EXPECT_EQ(1u, originalValues.count(bit));
+        EXPECT_EQ(0u, readValues.count(bit));
+        readValues.insert(bit);
+    }
+
+    EXPECT_EQ(originalValues.size(), readValues.size());
+}
+
+// Test an empty iterator.
+TEST_F(ITypBitsetIteratorTest, EmptySet) {
+    // We don't use the FAIL gtest macro here since it returns immediately,
+    // causing an unreachable code warning in MSVC
+    bool sawBit = false;
+    for (IntegerT bit : IterateBitSet(mStateBits)) {
+        DAWN_UNUSED(bit);
+        sawBit = true;
+    }
+    EXPECT_FALSE(sawBit);
+}
+
+// Test iterating a result of combining two bitsets.
+TEST_F(ITypBitsetIteratorTest, NonLValueBitset) {
+    ityp::bitset<IntegerT, 40> otherBits;
+
+    mStateBits.set(IntegerT(1));
+    mStateBits.set(IntegerT(2));
+    mStateBits.set(IntegerT(3));
+    mStateBits.set(IntegerT(4));
+
+    otherBits.set(IntegerT(0));
+    otherBits.set(IntegerT(1));
+    otherBits.set(IntegerT(3));
+    otherBits.set(IntegerT(5));
+
+    std::set<IntegerT> seenBits;
+
+    for (IntegerT bit : IterateBitSet(mStateBits & otherBits)) {
+        EXPECT_EQ(0u, seenBits.count(bit));
+        seenBits.insert(bit);
+        EXPECT_TRUE(mStateBits[bit]);
+        EXPECT_TRUE(otherBits[bit]);
+    }
+
+    EXPECT_EQ((mStateBits & otherBits).count(), seenBits.size());
+}
diff --git a/src/dawn/tests/unittests/BuddyAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
new file mode 100644
index 0000000..cc824cf
--- /dev/null
+++ b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
@@ -0,0 +1,327 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+#include "dawn/native/BuddyAllocator.h"
+
+using namespace dawn::native;
+
+constexpr uint64_t BuddyAllocator::kInvalidOffset;
+
+// Verify the buddy allocator with a basic test.
+TEST(BuddyAllocatorTests, SingleBlock) {
+    // After one 32 byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               A              |
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Check that we cannot allocate a oversized block.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
+
+    // Check that we cannot allocate a zero sized block.
+    ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset);
+
+    // Allocate the block.
+    uint64_t blockOffset = allocator.Allocate(maxBlockSize);
+    ASSERT_EQ(blockOffset, 0u);
+
+    // Check that we are full.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Deallocate the block.
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify multiple allocations succeeds using a buddy allocator.
+TEST(BuddyAllocatorTests, MultipleBlocks) {
+    // Fill every level in the allocator (order-n = 2^n)
+    const uint64_t maxBlockSize = (1ull << 16);
+    for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) {
+        BuddyAllocator allocator(maxBlockSize);
+
+        uint64_t blockSize = (1ull << order);
+        for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) {
+            ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki);
+        }
+    }
+}
+
+// Verify that a single allocation succeeds using a buddy allocator.
+TEST(BuddyAllocatorTests, SingleSplitBlock) {
+    //  After one 8 byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       F      |        S - split
+    //                 --------------------------------        F - free
+    //      2       8  |   A   |   F   |       |      |        A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Allocate block (splits two blocks).
+    uint64_t blockOffset = allocator.Allocate(8);
+    ASSERT_EQ(blockOffset, 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Deallocate block (merges two blocks).
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Check that we cannot allocate a block that is oversized.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
+
+    // Re-allocate the largest block allowed after merging.
+    blockOffset = allocator.Allocate(maxBlockSize);
+    ASSERT_EQ(blockOffset, 0u);
+
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify that a multiple allocated blocks can be removed in the free-list.
+TEST(BuddyAllocatorTests, MultipleSplitBlocks) {
+    //  After four 16 byte allocations:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       S      |        S - split
+    //                 --------------------------------        F - free
+    //      2       8  |   Aa  |   Ab  |  Ac  |   Ad  |        A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Populates the free-list with four blocks at Level2.
+
+    // Allocate "a" block (two splits).
+    constexpr uint64_t blockSizeInBytes = 8;
+    uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetA, 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Allocate "b" block.
+    uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetB, blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Allocate "c" block (three splits).
+    uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Allocate "d" block.
+    uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Deallocate "d" block.
+    // FreeList[Level2] = [BlockD] -> x
+    allocator.Deallocate(blockOffsetD);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Deallocate "b" block.
+    // FreeList[Level2] = [BlockB] -> [BlockD] -> x
+    allocator.Deallocate(blockOffsetB);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Deallocate "c" block (one merges).
+    // FreeList[Level1] = [BlockCD] -> x
+    // FreeList[Level2] = [BlockB] -> x
+    allocator.Deallocate(blockOffsetC);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Deallocate "a" block (two merges).
+    // FreeList[Level0] = [BlockABCD] -> x
+    allocator.Deallocate(blockOffsetA);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify the buddy allocator can handle allocations of various sizes.
+TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) {
+    //  After four Level4-to-Level1 byte then one L4 block allocations:
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               A               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       A       |               |               |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   S   |   A   |       |       |       |       |       |       |
+    //                 -----------------------------------------------------------------
+    //      4       32 | A | F |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    ASSERT_EQ(allocator.Allocate(32), 0ull);
+    ASSERT_EQ(allocator.Allocate(64), 64ull);
+    ASSERT_EQ(allocator.Allocate(128), 128ull);
+    ASSERT_EQ(allocator.Allocate(256), 256ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Fill in the last free block.
+    ASSERT_EQ(allocator.Allocate(32), 32ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Check if we're full.
+    ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset);
+}
+
+// Verify very small allocations using a larger allocator works correctly.
+TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) {
+    //  After allocating four pairs of one 64 byte block and one 32 byte block.
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               S               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       S       |       S       |       F       |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   A   |   S   |   A   |   A   |   S   |   A   |       |       |
+    //                 -----------------------------------------------------------------
+    //      4       32 |   |   | A | A |   |   |   |   | A | A |   |   |   |   |   |   |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    ASSERT_EQ(allocator.Allocate(64), 0ull);
+    ASSERT_EQ(allocator.Allocate(32), 64ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 128ull);
+    ASSERT_EQ(allocator.Allocate(32), 96ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 192ull);
+    ASSERT_EQ(allocator.Allocate(32), 256ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 320ull);
+    ASSERT_EQ(allocator.Allocate(32), 288ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify the buddy allocator can deal with bad fragmentation.
+TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
+    //  Allocate every leaf then de-allocate every other of those allocations.
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               S               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       S       |        S       |        S     |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   S   |   S   |   S   |   S   |   S   |   S   |   S   |   S   |
+    //                 -----------------------------------------------------------------
+    //      4       32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Allocate leaf blocks
+    constexpr uint64_t minBlockSizeInBytes = 32;
+    std::vector<uint64_t> blockOffsets;
+    for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) {
+        blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes));
+    }
+
+    // Free every other leaf block.
+    for (size_t count = 1; count < blockOffsets.size(); count += 2) {
+        allocator.Deallocate(blockOffsets[count]);
+    }
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u);
+}
+
+// Verify the buddy allocator can deal with multiple allocations with mixed alignments.
+TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
+    //  After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8 byte
+    //  alignment.
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       S      |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   F   |  Ab   |  Ac  |       A - allocated
+    //                 --------------------------------
+    //
+    BuddyAllocator allocator(32);
+
+    // Allocate Aa (two splits).
+    ASSERT_EQ(allocator.Allocate(8, 16), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Allocate Ab (skip Aa buddy due to alignment and perform another split).
+    ASSERT_EQ(allocator.Allocate(8, 16), 16u);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Check that we cannot fit another.
+    ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset);
+
+    // Allocate Ac (zero splits and Ab's buddy is now the first free block).
+    ASSERT_EQ(allocator.Allocate(8, 8), 24u);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify the buddy allocator can deal with multiple allocations with equal alignments.
+TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
+    //  After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4 byte
+    //  alignment.
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       Ac     |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   Ab  |              |       A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    constexpr uint64_t alignment = 4;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Allocate block Aa (two splits)
+    ASSERT_EQ(allocator.Allocate(8, alignment), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Allocate block Ab (Aa's buddy)
+    ASSERT_EQ(allocator.Allocate(8, alignment), 8u);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Check that we can still allocate Ac.
+    ASSERT_EQ(allocator.Allocate(16, alignment), 16ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+}
diff --git a/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
new file mode 100644
index 0000000..fbe5239
--- /dev/null
+++ b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
@@ -0,0 +1,460 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/BuddyMemoryAllocator.h"
+#include "dawn/native/PooledResourceMemoryAllocator.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+
+#include <set>
+#include <vector>
+
+using namespace dawn::native;
+
+class DummyResourceHeapAllocator : public ResourceHeapAllocator {
+  public:
+    ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
+        return std::make_unique<ResourceHeapBase>();
+    }
+    void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
+    }
+};
+
+class DummyBuddyResourceAllocator {
+  public:
+    DummyBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
+        : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {
+    }
+
+    DummyBuddyResourceAllocator(uint64_t maxBlockSize,
+                                uint64_t memorySize,
+                                ResourceHeapAllocator* heapAllocator)
+        : mAllocator(maxBlockSize, memorySize, heapAllocator) {
+    }
+
+    ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) {
+        ResultOrError<ResourceMemoryAllocation> result =
+            mAllocator.Allocate(allocationSize, alignment);
+        return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{};
+    }
+
+    void Deallocate(ResourceMemoryAllocation& allocation) {
+        mAllocator.Deallocate(allocation);
+    }
+
+    uint64_t ComputeTotalNumOfHeapsForTesting() const {
+        return mAllocator.ComputeTotalNumOfHeapsForTesting();
+    }
+
+  private:
+    DummyResourceHeapAllocator mHeapAllocator;
+    BuddyMemoryAllocator mAllocator;
+};
+
+// Verify a single resource allocation in a single heap.
+TEST(BuddyMemoryAllocatorTests, SingleHeap) {
+    // After one 128 byte resource allocation:
+    //
+    // max block size -> ---------------------------
+    //                   |          A1/H0          |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = heapSize;
+    DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Cannot allocate greater than heap size.
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+
+    // Allocate one 128 byte allocation (same size as heap).
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(128);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    // Cannot allocate when allocator is full.
+    invalidAllocation = allocator.Allocate(128);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);
+}
+
+// Verify that multiple allocation are created in separate heaps.
+TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
+    // After two 128 byte resource allocations:
+    //
+    // max block size -> ---------------------------
+    //                   |                         |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //                   |   A1/H0    |    A2/H1   |
+    //                   ---------------------------
+    //
+    constexpr uint64_t maxBlockSize = 256;
+    constexpr uint64_t heapSize = 128;
+    DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Cannot allocate greater than heap size.
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+
+    // Cannot allocate greater than max block size.
+    invalidAllocation = allocator.Allocate(maxBlockSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+
+    // Allocate two 128 byte allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // First allocation creates first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Second allocation creates second heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    // Deallocate both allocations
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify multiple sub-allocations can re-use heaps.
+TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
+    // After two 64 byte allocations with 128 byte heaps.
+    //
+    // max block size -> ---------------------------
+    //                   |                         |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //                   |     H0     |     H1     |
+    //                   ---------------------------
+    //                   |  A1 |  A2  |  A3 |      |
+    //                   ---------------------------
+    //
+    constexpr uint64_t maxBlockSize = 256;
+    constexpr uint64_t heapSize = 128;
+    DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Allocate two 64 byte sub-allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // First sub-allocation creates first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Second allocation re-uses first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Third allocation creates second heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    // Deallocate all allocations in reverse order.
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(),
+              2u);  // A2 pins H0.
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+
+    allocator.Deallocate(allocation3);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify resource sub-allocation of various sizes over multiple heaps.
+TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
+    // After three 64 byte allocations and two 128 byte allocations.
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |    A3/H1   |      H2     |    A5/H3    |
+    //                   -------------------------------------------------------
+    //                   |  A1 |  A2  |            |   A4  |     |             |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Allocate two 64-byte allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetOffset(), 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
+    ASSERT_EQ(allocation2.GetOffset(), 64u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // A1 and A2 share H0
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(128);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // A3 creates and fully occupies a new heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(64);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation4.GetOffset(), 0u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+
+    // R5 size forms 64 byte hole after R4.
+    ResourceMemoryAllocation allocation5 = allocator.Allocate(128);
+    ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u);
+    ASSERT_EQ(allocation5.GetOffset(), 0u);
+    ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);
+    ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap());
+
+    // Deallocate allocations in staggered order.
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);  // A2 pins H0
+
+    allocator.Deallocate(allocation5);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);  // Released H3
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);  // Released H0
+
+    allocator.Deallocate(allocation4);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H2
+
+    allocator.Deallocate(allocation3);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify resource sub-allocation of same sizes with various alignments.
+TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
+    // After three 64 byte and one 128 byte resource allocations.
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |     H1     |     H2     |              |
+    //                   -------------------------------------------------------
+    //                   |  A1  |     |  A2  |     |  A3  |  A4 |              |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetOffset(), 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation2.GetOffset(), 0u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u);
+    ASSERT_EQ(allocation4.GetOffset(), 64u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+}
+
+// Verify resource sub-allocation of various sizes with same alignments.
+TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
+    // After two 64 byte and two 128 byte resource allocations:
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |    A3/H1   |    A4/H2   |              |
+    //                   -------------------------------------------------------
+    //                   |  A1 |  A2  |            |            |              |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    constexpr uint64_t alignment = 64;
+
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
+    ASSERT_EQ(allocation2.GetOffset(), 64u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Reuses H0
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation4.GetOffset(), 0u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+}
+
+// Verify allocating a very large resource does not overflow.
+TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    constexpr uint64_t largeBlock = (1ull << 63) + 1;
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+}
+
+// Verify resource heaps will be reused from a pool.
+TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
+    constexpr uint64_t kHeapSize = 128;
+    constexpr uint64_t kMaxBlockSize = 4096;
+
+    DummyResourceHeapAllocator heapAllocator;
+    PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
+    DummyBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
+
+    std::set<ResourceHeapBase*> heaps = {};
+    std::vector<ResourceMemoryAllocation> allocations = {};
+
+    constexpr uint32_t kNumOfAllocations = 100;
+
+    // Allocate |kNumOfAllocations|.
+    for (uint32_t i = 0; i < kNumOfAllocations; i++) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        heaps.insert(allocation.GetResourceHeap());
+        allocations.push_back(std::move(allocation));
+    }
+
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+
+    // Return the allocations to the pool.
+    for (ResourceMemoryAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+    }
+
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size());
+
+    // Allocate again reusing the same heaps.
+    for (uint32_t i = 0; i < kNumOfAllocations; i++) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second);
+    }
+
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+}
+
+// Verify resource heaps that were reused from a pool can be destroyed.
+TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
+    constexpr uint64_t kHeapSize = 128;
+    constexpr uint64_t kMaxBlockSize = 4096;
+
+    DummyResourceHeapAllocator heapAllocator;
+    PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
+    DummyBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
+
+    std::set<ResourceHeapBase*> heaps = {};
+    std::vector<ResourceMemoryAllocation> allocations = {};
+
+    // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth of
+    // buffers. Otherwise, the heap may be reused if not full.
+    constexpr uint32_t kNumOfHeaps = 10;
+
+    // Allocate |kNumOfHeaps| worth.
+    while (heaps.size() < kNumOfHeaps) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        heaps.insert(allocation.GetResourceHeap());
+        allocations.push_back(std::move(allocation));
+    }
+
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+
+    // Return the allocations to the pool.
+    for (ResourceMemoryAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+    }
+
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps);
+
+    // Make sure we can destroy the remaining heaps.
+    poolAllocator.DestroyPool();
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+}
diff --git a/src/dawn/tests/unittests/ChainUtilsTests.cpp b/src/dawn/tests/unittests/ChainUtilsTests.cpp
new file mode 100644
index 0000000..87d7b46
--- /dev/null
+++ b/src/dawn/tests/unittests/ChainUtilsTests.cpp
@@ -0,0 +1,186 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/dawn_platform.h"
+
+// Checks that we cannot find any structs in an empty chain
+TEST(ChainUtilsTests, FindEmptyChain) {
+    const dawn::native::PrimitiveDepthClampingState* info = nullptr;
+    dawn::native::FindInChain(nullptr, &info);
+
+    ASSERT_EQ(nullptr, info);
+}
+
+// Checks that searching a chain for a present struct returns that struct
+TEST(ChainUtilsTests, FindPresentInChain) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+    dawn::native::ShaderModuleSPIRVDescriptor chain2;
+    chain1.nextInChain = &chain2;
+    const dawn::native::PrimitiveDepthClampingState* info1 = nullptr;
+    const dawn::native::ShaderModuleSPIRVDescriptor* info2 = nullptr;
+    dawn::native::FindInChain(&chain1, &info1);
+    dawn::native::FindInChain(&chain1, &info2);
+
+    ASSERT_NE(nullptr, info1);
+    ASSERT_NE(nullptr, info2);
+}
+
+// Checks that searching a chain for a struct that doesn't exist returns a nullptr
+TEST(ChainUtilsTests, FindMissingInChain) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+    dawn::native::ShaderModuleSPIRVDescriptor chain2;
+    chain1.nextInChain = &chain2;
+    const dawn::native::SurfaceDescriptorFromMetalLayer* info = nullptr;
+    dawn::native::FindInChain(&chain1, &info);
+
+    ASSERT_EQ(nullptr, info);
+}
+
+// Checks that validation rejects chains with duplicate STypes
+TEST(ChainUtilsTests, ValidateDuplicateSTypes) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+    dawn::native::ShaderModuleSPIRVDescriptor chain2;
+    dawn::native::PrimitiveDepthClampingState chain3;
+    chain1.nextInChain = &chain2;
+    chain2.nextInChain = &chain3;
+
+    dawn::native::MaybeError result = dawn::native::ValidateSTypes(&chain1, {});
+    ASSERT_TRUE(result.IsError());
+    result.AcquireError();
+}
+
+// Checks that validation rejects chains that contain unspecified STypes
+TEST(ChainUtilsTests, ValidateUnspecifiedSTypes) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+    dawn::native::ShaderModuleSPIRVDescriptor chain2;
+    dawn::native::ShaderModuleWGSLDescriptor chain3;
+    chain1.nextInChain = &chain2;
+    chain2.nextInChain = &chain3;
+
+    dawn::native::MaybeError result =
+        dawn::native::ValidateSTypes(&chain1, {
+                                                  {wgpu::SType::PrimitiveDepthClampingState},
+                                                  {wgpu::SType::ShaderModuleSPIRVDescriptor},
+                                              });
+    ASSERT_TRUE(result.IsError());
+    result.AcquireError();
+}
+
+// Checks that validation rejects chains that contain multiple STypes from the same oneof
+// constraint.
+TEST(ChainUtilsTests, ValidateOneOfFailure) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+    dawn::native::ShaderModuleSPIRVDescriptor chain2;
+    dawn::native::ShaderModuleWGSLDescriptor chain3;
+    chain1.nextInChain = &chain2;
+    chain2.nextInChain = &chain3;
+
+    dawn::native::MaybeError result = dawn::native::ValidateSTypes(
+        &chain1,
+        {{wgpu::SType::ShaderModuleSPIRVDescriptor, wgpu::SType::ShaderModuleWGSLDescriptor}});
+    ASSERT_TRUE(result.IsError());
+    result.AcquireError();
+}
+
+// Checks that validation accepts chains that match the constraints.
+TEST(ChainUtilsTests, ValidateSuccess) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+    dawn::native::ShaderModuleSPIRVDescriptor chain2;
+    chain1.nextInChain = &chain2;
+
+    dawn::native::MaybeError result = dawn::native::ValidateSTypes(
+        &chain1,
+        {
+            {wgpu::SType::ShaderModuleSPIRVDescriptor, wgpu::SType::ShaderModuleWGSLDescriptor},
+            {wgpu::SType::PrimitiveDepthClampingState},
+            {wgpu::SType::SurfaceDescriptorFromMetalLayer},
+        });
+    ASSERT_TRUE(result.IsSuccess());
+}
+
+// Checks that validation always passes on empty chains.
+TEST(ChainUtilsTests, ValidateEmptyChain) {
+    dawn::native::MaybeError result =
+        dawn::native::ValidateSTypes(nullptr, {
+                                                  {wgpu::SType::ShaderModuleSPIRVDescriptor},
+                                                  {wgpu::SType::PrimitiveDepthClampingState},
+                                              });
+    ASSERT_TRUE(result.IsSuccess());
+
+    result = dawn::native::ValidateSTypes(nullptr, {});
+    ASSERT_TRUE(result.IsSuccess());
+}
+
+// Checks that singleton validation always passes on empty chains.
+TEST(ChainUtilsTests, ValidateSingleEmptyChain) {
+    dawn::native::MaybeError result =
+        dawn::native::ValidateSingleSType(nullptr, wgpu::SType::ShaderModuleSPIRVDescriptor);
+    ASSERT_TRUE(result.IsSuccess());
+
+    result = dawn::native::ValidateSingleSType(nullptr, wgpu::SType::ShaderModuleSPIRVDescriptor,
+                                               wgpu::SType::PrimitiveDepthClampingState);
+    ASSERT_TRUE(result.IsSuccess());
+}
+
+// Checks that singleton validation always fails on chains with multiple children.
+TEST(ChainUtilsTests, ValidateSingleMultiChain) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+    dawn::native::ShaderModuleSPIRVDescriptor chain2;
+    chain1.nextInChain = &chain2;
+
+    dawn::native::MaybeError result =
+        dawn::native::ValidateSingleSType(&chain1, wgpu::SType::PrimitiveDepthClampingState);
+    ASSERT_TRUE(result.IsError());
+    result.AcquireError();
+
+    result = dawn::native::ValidateSingleSType(&chain1, wgpu::SType::PrimitiveDepthClampingState,
+                                               wgpu::SType::ShaderModuleSPIRVDescriptor);
+    ASSERT_TRUE(result.IsError());
+    result.AcquireError();
+}
+
+// Checks that singleton validation passes when the oneof constraint is met.
+TEST(ChainUtilsTests, ValidateSingleSatisfied) {
+    dawn::native::ShaderModuleWGSLDescriptor chain1;
+
+    dawn::native::MaybeError result =
+        dawn::native::ValidateSingleSType(&chain1, wgpu::SType::ShaderModuleWGSLDescriptor);
+    ASSERT_TRUE(result.IsSuccess());
+
+    result = dawn::native::ValidateSingleSType(&chain1, wgpu::SType::ShaderModuleSPIRVDescriptor,
+                                               wgpu::SType::ShaderModuleWGSLDescriptor);
+    ASSERT_TRUE(result.IsSuccess());
+
+    result = dawn::native::ValidateSingleSType(&chain1, wgpu::SType::ShaderModuleWGSLDescriptor,
+                                               wgpu::SType::ShaderModuleSPIRVDescriptor);
+    ASSERT_TRUE(result.IsSuccess());
+}
+
+// Checks that singleton validation passes when the oneof constraint is not met.
+TEST(ChainUtilsTests, ValidateSingleUnsatisfied) {
+    dawn::native::PrimitiveDepthClampingState chain1;
+
+    dawn::native::MaybeError result =
+        dawn::native::ValidateSingleSType(&chain1, wgpu::SType::ShaderModuleWGSLDescriptor);
+    ASSERT_TRUE(result.IsError());
+    result.AcquireError();
+
+    result = dawn::native::ValidateSingleSType(&chain1, wgpu::SType::ShaderModuleSPIRVDescriptor,
+                                               wgpu::SType::ShaderModuleWGSLDescriptor);
+    ASSERT_TRUE(result.IsError());
+    result.AcquireError();
+}
diff --git a/src/dawn/tests/unittests/CommandAllocatorTests.cpp b/src/dawn/tests/unittests/CommandAllocatorTests.cpp
new file mode 100644
index 0000000..7fb047a
--- /dev/null
+++ b/src/dawn/tests/unittests/CommandAllocatorTests.cpp
@@ -0,0 +1,503 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/CommandAllocator.h"
+
+#include <limits>
+
+using namespace dawn::native;
+
+// Definition of the command types used in the tests
+enum class CommandType {
+    Draw,
+    Pipeline,
+    PushConstants,
+    Big,
+    Small,
+};
+
+struct CommandDraw {
+    uint32_t first;
+    uint32_t count;
+};
+
+struct CommandPipeline {
+    uint64_t pipeline;
+    uint32_t attachmentPoint;
+};
+
+struct CommandPushConstants {
+    uint8_t size;
+    uint8_t offset;
+};
+
+constexpr int kBigBufferSize = 65536;
+
+struct CommandBig {
+    uint32_t buffer[kBigBufferSize];
+};
+
+struct CommandSmall {
+    uint16_t data;
+};
+
+// Test allocating nothing works
+TEST(CommandAllocator, DoNothingAllocator) {
+    CommandAllocator allocator;
+}
+
+// Test iterating over nothing works
+TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
+    CommandAllocator allocator;
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test basic usage of allocator + iterator
+TEST(CommandAllocator, Basic) {
+    CommandAllocator allocator;
+
+    uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
+    uint32_t myAttachmentPoint = 2;
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
+        pipeline->pipeline = myPipeline;
+        pipeline->attachmentPoint = myAttachmentPoint;
+
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+// Test basic usage of allocator + iterator with data
+TEST(CommandAllocator, BasicWithData) {
+    CommandAllocator allocator;
+
+    uint8_t mySize = 8;
+    uint8_t myOffset = 3;
+    uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
+
+    {
+        CommandPushConstants* pushConstants =
+            allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
+        pushConstants->size = mySize;
+        pushConstants->offset = myOffset;
+
+        uint32_t* values = allocator.AllocateData<uint32_t>(5);
+        for (size_t i = 0; i < 5; i++) {
+            values[i] = myValues[i];
+        }
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::PushConstants);
+
+        CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
+        ASSERT_EQ(pushConstants->size, mySize);
+        ASSERT_EQ(pushConstants->offset, myOffset);
+
+        uint32_t* values = iterator.NextData<uint32_t>(5);
+        for (size_t i = 0; i < 5; i++) {
+            ASSERT_EQ(values[i], myValues[i]);
+        }
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+// Test basic iterating several times
+TEST(CommandAllocator, MultipleIterations) {
+    CommandAllocator allocator;
+
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        // First iteration
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        // Second iteration
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+// Test large commands work
+TEST(CommandAllocator, LargeCommands) {
+    CommandAllocator allocator;
+
+    const int kCommandCount = 5;
+
+    uint32_t count = 0;
+    for (int i = 0; i < kCommandCount; i++) {
+        CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
+        for (int j = 0; j < kBigBufferSize; j++) {
+            big->buffer[j] = count++;
+        }
+    }
+
+    CommandIterator iterator(std::move(allocator));
+    CommandType type;
+    count = 0;
+    int numCommands = 0;
+    while (iterator.NextCommandId(&type)) {
+        ASSERT_EQ(type, CommandType::Big);
+
+        CommandBig* big = iterator.NextCommand<CommandBig>();
+        for (int i = 0; i < kBigBufferSize; i++) {
+            ASSERT_EQ(big->buffer[i], count);
+            count++;
+        }
+        numCommands++;
+    }
+    ASSERT_EQ(numCommands, kCommandCount);
+
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test many small commands work
+TEST(CommandAllocator, ManySmallCommands) {
+    CommandAllocator allocator;
+
+    // Stay under max representable uint16_t
+    const int kCommandCount = 50000;
+
+    uint16_t count = 0;
+    for (int i = 0; i < kCommandCount; i++) {
+        CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
+        small->data = count++;
+    }
+
+    CommandIterator iterator(std::move(allocator));
+    CommandType type;
+    count = 0;
+    int numCommands = 0;
+    while (iterator.NextCommandId(&type)) {
+        ASSERT_EQ(type, CommandType::Small);
+
+        CommandSmall* small = iterator.NextCommand<CommandSmall>();
+        ASSERT_EQ(small->data, count);
+        count++;
+        numCommands++;
+    }
+    ASSERT_EQ(numCommands, kCommandCount);
+
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+/*        ________
+ *       /        \
+ *       | POUIC! |
+ *       \_ ______/
+ *         v
+ *    ()_()
+ *    (O.o)
+ *    (> <)o
+ */
+
+// Test usage of iterator.Reset
+TEST(CommandAllocator, IteratorReset) {
+    CommandAllocator allocator;
+
+    uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
+    uint32_t myAttachmentPoint = 2;
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
+        pipeline->pipeline = myPipeline;
+        pipeline->attachmentPoint = myAttachmentPoint;
+
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        iterator.Reset();
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+// Test iterating empty iterators
+TEST(CommandAllocator, EmptyIterator) {
+    {
+        CommandAllocator allocator;
+        CommandIterator iterator(std::move(allocator));
+
+        CommandType type;
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+    {
+        CommandAllocator allocator;
+        CommandIterator iterator1(std::move(allocator));
+        CommandIterator iterator2(std::move(iterator1));
+
+        CommandType type;
+        bool hasNext = iterator2.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator1.MakeEmptyAsDataWasDestroyed();
+        iterator2.MakeEmptyAsDataWasDestroyed();
+    }
+    {
+        CommandIterator iterator1;
+        CommandIterator iterator2(std::move(iterator1));
+
+        CommandType type;
+        bool hasNext = iterator2.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator1.MakeEmptyAsDataWasDestroyed();
+        iterator2.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+template <size_t A>
+struct alignas(A) AlignedStruct {
+    char dummy;
+};
+
+// Test for overflows in Allocate's computations, size 1 variant
+TEST(CommandAllocator, AllocationOverflow_1) {
+    CommandAllocator allocator;
+    AlignedStruct<1>* data =
+        allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 2 variant
+TEST(CommandAllocator, AllocationOverflow_2) {
+    CommandAllocator allocator;
+    AlignedStruct<2>* data =
+        allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 4 variant
+TEST(CommandAllocator, AllocationOverflow_4) {
+    CommandAllocator allocator;
+    AlignedStruct<4>* data =
+        allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 8 variant
+TEST(CommandAllocator, AllocationOverflow_8) {
+    CommandAllocator allocator;
+    AlignedStruct<8>* data =
+        allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
+    ASSERT_EQ(data, nullptr);
+}
+
+template <int DefaultValue>
+struct IntWithDefault {
+    IntWithDefault() : value(DefaultValue) {
+    }
+
+    int value;
+};
+
+// Test that the allcator correctly defaults initalizes data for Allocate
+TEST(CommandAllocator, AllocateDefaultInitializes) {
+    CommandAllocator allocator;
+
+    IntWithDefault<42>* int42 = allocator.Allocate<IntWithDefault<42>>(CommandType::Draw);
+    ASSERT_EQ(int42->value, 42);
+
+    IntWithDefault<43>* int43 = allocator.Allocate<IntWithDefault<43>>(CommandType::Draw);
+    ASSERT_EQ(int43->value, 43);
+
+    IntWithDefault<44>* int44 = allocator.Allocate<IntWithDefault<44>>(CommandType::Draw);
+    ASSERT_EQ(int44->value, 44);
+
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test that the allocator correctly default-initalizes data for AllocateData
+TEST(CommandAllocator, AllocateDataDefaultInitializes) {
+    CommandAllocator allocator;
+
+    IntWithDefault<33>* int33 = allocator.AllocateData<IntWithDefault<33>>(1);
+    ASSERT_EQ(int33[0].value, 33);
+
+    IntWithDefault<34>* int34 = allocator.AllocateData<IntWithDefault<34>>(2);
+    ASSERT_EQ(int34[0].value, 34);
+    ASSERT_EQ(int34[0].value, 34);
+
+    IntWithDefault<35>* int35 = allocator.AllocateData<IntWithDefault<35>>(3);
+    ASSERT_EQ(int35[0].value, 35);
+    ASSERT_EQ(int35[1].value, 35);
+    ASSERT_EQ(int35[2].value, 35);
+
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Tests flattening of multiple CommandAllocators into a single CommandIterator using
+// AcquireCommandBlocks.
+TEST(CommandAllocator, AcquireCommandBlocks) {
+    constexpr size_t kNumAllocators = 2;
+    constexpr size_t kNumCommandsPerAllocator = 2;
+    const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = {
+        {0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
+        {0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
+    };
+    const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2}, {3, 4}};
+    const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
+    const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
+
+    std::vector<CommandAllocator> allocators(kNumAllocators);
+    for (size_t j = 0; j < kNumAllocators; ++j) {
+        CommandAllocator& allocator = allocators[j];
+        for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
+            CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
+            pipeline->pipeline = pipelines[j][i];
+            pipeline->attachmentPoint = attachmentPoints[j][i];
+
+            CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+            draw->first = firsts[j][i];
+            draw->count = counts[j][i];
+        }
+    }
+
+    CommandIterator iterator;
+    iterator.AcquireCommandBlocks(std::move(allocators));
+    for (size_t j = 0; j < kNumAllocators; ++j) {
+        for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
+            CommandType type;
+            bool hasNext = iterator.NextCommandId(&type);
+            ASSERT_TRUE(hasNext);
+            ASSERT_EQ(type, CommandType::Pipeline);
+
+            CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
+            ASSERT_EQ(pipeline->pipeline, pipelines[j][i]);
+            ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]);
+
+            hasNext = iterator.NextCommandId(&type);
+            ASSERT_TRUE(hasNext);
+            ASSERT_EQ(type, CommandType::Draw);
+
+            CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+            ASSERT_EQ(draw->first, firsts[j][i]);
+            ASSERT_EQ(draw->count, counts[j][i]);
+        }
+    }
+    CommandType type;
+    ASSERT_FALSE(iterator.NextCommandId(&type));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
diff --git a/src/dawn/tests/unittests/ConcurrentCacheTests.cpp b/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
new file mode 100644
index 0000000..6369a39
--- /dev/null
+++ b/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
@@ -0,0 +1,114 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/ConcurrentCache.h"
+#include "dawn/native/AsyncTask.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/utils/SystemUtils.h"
+
+namespace {
+    class SimpleCachedObject {
+      public:
+        explicit SimpleCachedObject(size_t value) : mValue(value) {
+        }
+
+        size_t GetValue() const {
+            return mValue;
+        }
+
+        struct EqualityFunc {
+            bool operator()(const SimpleCachedObject* a, const SimpleCachedObject* b) const {
+                return a->mValue == b->mValue;
+            }
+        };
+
+        struct HashFunc {
+            size_t operator()(const SimpleCachedObject* obj) const {
+                return obj->mValue;
+            }
+        };
+
+      private:
+        size_t mValue;
+    };
+
+}  // anonymous namespace
+
+class ConcurrentCacheTest : public testing::Test {
+  public:
+    ConcurrentCacheTest() : mPool(mPlatform.CreateWorkerTaskPool()), mTaskManager(mPool.get()) {
+    }
+
+  protected:
+    dawn::platform::Platform mPlatform;
+    std::unique_ptr<dawn::platform::WorkerTaskPool> mPool;
+    dawn::native::AsyncTaskManager mTaskManager;
+    ConcurrentCache<SimpleCachedObject> mCache;
+};
+
+// Test inserting two objects that are equal to each other into the concurrent cache works as
+// expected.
+TEST_F(ConcurrentCacheTest, InsertAtSameTime) {
+    SimpleCachedObject cachedObject(1);
+    SimpleCachedObject anotherCachedObject(1);
+
+    std::pair<SimpleCachedObject*, bool> insertOutput = {};
+    std::pair<SimpleCachedObject*, bool> anotherInsertOutput = {};
+
+    ConcurrentCache<SimpleCachedObject>* cachePtr = &mCache;
+    dawn::native::AsyncTask asyncTask1([&insertOutput, cachePtr, &cachedObject] {
+        insertOutput = cachePtr->Insert(&cachedObject);
+    });
+    dawn::native::AsyncTask asyncTask2([&anotherInsertOutput, cachePtr, &anotherCachedObject] {
+        anotherInsertOutput = cachePtr->Insert(&anotherCachedObject);
+    });
+    mTaskManager.PostTask(std::move(asyncTask1));
+    mTaskManager.PostTask(std::move(asyncTask2));
+
+    mTaskManager.WaitAllPendingTasks();
+
+    ASSERT_TRUE(insertOutput.first == &cachedObject || insertOutput.first == &anotherCachedObject);
+    ASSERT_EQ(insertOutput.first, anotherInsertOutput.first);
+    ASSERT_EQ(insertOutput.second, !anotherInsertOutput.second);
+}
+
+// Testing erasing an object after inserting into the cache works as expected.
+TEST_F(ConcurrentCacheTest, EraseAfterInsertion) {
+    SimpleCachedObject cachedObject(1);
+
+    std::pair<SimpleCachedObject*, bool> insertOutput = {};
+    ConcurrentCache<SimpleCachedObject>* cachePtr = &mCache;
+    dawn::native::AsyncTask insertTask([&insertOutput, cachePtr, &cachedObject] {
+        insertOutput = cachePtr->Insert(&cachedObject);
+    });
+
+    size_t erasedObjectCount = 0;
+    dawn::native::AsyncTask eraseTask([&erasedObjectCount, cachePtr, &cachedObject] {
+        while (cachePtr->Find(&cachedObject) == nullptr) {
+            utils::USleep(100);
+        }
+        erasedObjectCount = cachePtr->Erase(&cachedObject);
+    });
+
+    mTaskManager.PostTask(std::move(insertTask));
+    mTaskManager.PostTask(std::move(eraseTask));
+
+    mTaskManager.WaitAllPendingTasks();
+
+    ASSERT_EQ(&cachedObject, insertOutput.first);
+    ASSERT_TRUE(insertOutput.second);
+    ASSERT_EQ(1u, erasedObjectCount);
+}
diff --git a/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp b/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
new file mode 100644
index 0000000..26849bd
--- /dev/null
+++ b/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
@@ -0,0 +1,93 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gtest/gtest.h"
+
+#include "dawn/EnumClassBitmasks.h"
+
+namespace dawn {
+
+    enum class Color : uint32_t {
+        R = 1,
+        G = 2,
+        B = 4,
+        A = 8,
+    };
+
+    template <>
+    struct IsDawnBitmask<Color> {
+        static constexpr bool enable = true;
+    };
+
+    TEST(BitmaskTests, BasicOperations) {
+        Color test1 = Color::R | Color::G;
+        ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+
+        Color test2 = test1 ^ (Color::R | Color::A);
+        ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
+
+        Color test3 = test2 & Color::A;
+        ASSERT_EQ(8u, static_cast<uint32_t>(test3));
+
+        Color test4 = ~test3;
+        ASSERT_EQ(~uint32_t(8), static_cast<uint32_t>(test4));
+    }
+
+    TEST(BitmaskTests, AssignOperations) {
+        Color test1 = Color::R;
+        test1 |= Color::G;
+        ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+
+        Color test2 = test1;
+        test2 ^= (Color::R | Color::A);
+        ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
+
+        Color test3 = test2;
+        test3 &= Color::A;
+        ASSERT_EQ(8u, static_cast<uint32_t>(test3));
+    }
+
+    TEST(BitmaskTests, BoolConversion) {
+        bool test1 = Color::R | Color::G;
+        ASSERT_TRUE(test1);
+
+        bool test2 = Color::R & Color::G;
+        ASSERT_FALSE(test2);
+
+        bool test3 = Color::R ^ Color::G;
+        ASSERT_TRUE(test3);
+
+        if (Color::R & ~Color::R) {
+            ASSERT_TRUE(false);
+        }
+    }
+
+    TEST(BitmaskTests, ThreeOrs) {
+        Color c = Color::R | Color::G | Color::B;
+        ASSERT_EQ(7u, static_cast<uint32_t>(c));
+    }
+
+    TEST(BitmaskTests, ZeroOrOneBits) {
+        Color zero = static_cast<Color>(0);
+        ASSERT_TRUE(HasZeroOrOneBits(zero));
+        ASSERT_TRUE(HasZeroOrOneBits(Color::R));
+        ASSERT_TRUE(HasZeroOrOneBits(Color::G));
+        ASSERT_TRUE(HasZeroOrOneBits(Color::B));
+        ASSERT_TRUE(HasZeroOrOneBits(Color::A));
+        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::R | Color::G)));
+        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::G | Color::B)));
+        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::B | Color::A)));
+    }
+
+}  // namespace dawn
diff --git a/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp b/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
new file mode 100644
index 0000000..b6c6727
--- /dev/null
+++ b/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
@@ -0,0 +1,72 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/EnumMaskIterator.h"
+
+#include "gtest/gtest.h"
+
+namespace dawn::native {
+
+    enum class TestAspect : uint8_t {
+        Color = 1,
+        Depth = 2,
+        Stencil = 4,
+    };
+
+    template <>
+    struct EnumBitmaskSize<TestAspect> {
+        static constexpr unsigned value = 3;
+    };
+
+}  // namespace dawn::native
+
+namespace dawn {
+
+    template <>
+    struct IsDawnBitmask<dawn::native::TestAspect> {
+        static constexpr bool enable = true;
+    };
+
+}  // namespace dawn
+
+namespace dawn::native {
+
+    static_assert(EnumBitmaskSize<TestAspect>::value == 3);
+
+    TEST(EnumMaskIteratorTests, None) {
+        for (TestAspect aspect : IterateEnumMask(static_cast<TestAspect>(0))) {
+            FAIL();
+            DAWN_UNUSED(aspect);
+        }
+    }
+
+    TEST(EnumMaskIteratorTests, All) {
+        TestAspect expected[] = {TestAspect::Color, TestAspect::Depth, TestAspect::Stencil};
+        uint32_t i = 0;
+        TestAspect aspects = TestAspect::Color | TestAspect::Depth | TestAspect::Stencil;
+        for (TestAspect aspect : IterateEnumMask(aspects)) {
+            EXPECT_EQ(aspect, expected[i++]);
+        }
+    }
+
+    TEST(EnumMaskIteratorTests, Partial) {
+        TestAspect expected[] = {TestAspect::Color, TestAspect::Stencil};
+        uint32_t i = 0;
+        TestAspect aspects = TestAspect::Stencil | TestAspect::Color;
+        for (TestAspect aspect : IterateEnumMask(aspects)) {
+            EXPECT_EQ(aspect, expected[i++]);
+        }
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/ErrorTests.cpp b/src/dawn/tests/unittests/ErrorTests.cpp
new file mode 100644
index 0000000..5af82a4
--- /dev/null
+++ b/src/dawn/tests/unittests/ErrorTests.cpp
@@ -0,0 +1,352 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ErrorData.h"
+
+using namespace dawn::native;
+
+namespace {
+
+    int dummySuccess = 0xbeef;
+    const char* dummyErrorMessage = "I am an error message :3";
+
+    // Check returning a success MaybeError with {};
+    TEST(ErrorTests, Error_Success) {
+        auto ReturnSuccess = []() -> MaybeError { return {}; };
+
+        MaybeError result = ReturnSuccess();
+        ASSERT_TRUE(result.IsSuccess());
+    }
+
+    // Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR"
+    TEST(ErrorTests, Error_Error) {
+        auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
+
+        MaybeError result = ReturnError();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+    // Check returning a success ResultOrError with an implicit conversion
+    TEST(ErrorTests, ResultOrError_Success) {
+        auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; };
+
+        ResultOrError<int*> result = ReturnSuccess();
+        ASSERT_TRUE(result.IsSuccess());
+        ASSERT_EQ(result.AcquireSuccess(), &dummySuccess);
+    }
+
+    // Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR"
+    TEST(ErrorTests, ResultOrError_Error) {
+        auto ReturnError = []() -> ResultOrError<int*> {
+            return DAWN_VALIDATION_ERROR(dummyErrorMessage);
+        };
+
+        ResultOrError<int*> result = ReturnError();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+    // Check DAWN_TRY handles successes correctly.
+    TEST(ErrorTests, TRY_Success) {
+        auto ReturnSuccess = []() -> MaybeError { return {}; };
+
+        // We need to check that DAWN_TRY doesn't return on successes
+        bool tryReturned = true;
+
+        auto Try = [ReturnSuccess, &tryReturned]() -> MaybeError {
+            DAWN_TRY(ReturnSuccess());
+            tryReturned = false;
+            return {};
+        };
+
+        MaybeError result = Try();
+        ASSERT_TRUE(result.IsSuccess());
+        ASSERT_FALSE(tryReturned);
+    }
+
+    // Check DAWN_TRY handles errors correctly.
+    TEST(ErrorTests, TRY_Error) {
+        auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
+
+        auto Try = [ReturnError]() -> MaybeError {
+            DAWN_TRY(ReturnError());
+            // DAWN_TRY should return before this point
+            EXPECT_FALSE(true);
+            return {};
+        };
+
+        MaybeError result = Try();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+    // Check DAWN_TRY adds to the backtrace.
+    TEST(ErrorTests, TRY_AddsToBacktrace) {
+        auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
+
+        auto SingleTry = [ReturnError]() -> MaybeError {
+            DAWN_TRY(ReturnError());
+            return {};
+        };
+
+        auto DoubleTry = [SingleTry]() -> MaybeError {
+            DAWN_TRY(SingleTry());
+            return {};
+        };
+
+        MaybeError singleResult = SingleTry();
+        ASSERT_TRUE(singleResult.IsError());
+
+        MaybeError doubleResult = DoubleTry();
+        ASSERT_TRUE(doubleResult.IsError());
+
+        std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
+        std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
+
+        ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
+    }
+
+    // Check DAWN_TRY_ASSIGN handles successes correctly.
+    TEST(ErrorTests, TRY_RESULT_Success) {
+        auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; };
+
+        // We need to check that DAWN_TRY doesn't return on successes
+        bool tryReturned = true;
+
+        auto Try = [ReturnSuccess, &tryReturned]() -> ResultOrError<int*> {
+            int* result = nullptr;
+            DAWN_TRY_ASSIGN(result, ReturnSuccess());
+            tryReturned = false;
+
+            EXPECT_EQ(result, &dummySuccess);
+            return result;
+        };
+
+        ResultOrError<int*> result = Try();
+        ASSERT_TRUE(result.IsSuccess());
+        ASSERT_FALSE(tryReturned);
+        ASSERT_EQ(result.AcquireSuccess(), &dummySuccess);
+    }
+
+    // Check DAWN_TRY_ASSIGN handles errors correctly.
+    TEST(ErrorTests, TRY_RESULT_Error) {
+        auto ReturnError = []() -> ResultOrError<int*> {
+            return DAWN_VALIDATION_ERROR(dummyErrorMessage);
+        };
+
+        auto Try = [ReturnError]() -> ResultOrError<int*> {
+            int* result = nullptr;
+            DAWN_TRY_ASSIGN(result, ReturnError());
+            DAWN_UNUSED(result);
+
+            // DAWN_TRY should return before this point
+            EXPECT_FALSE(true);
+            return &dummySuccess;
+        };
+
+        ResultOrError<int*> result = Try();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+    // Check DAWN_TRY_ASSIGN adds to the backtrace.
+    TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) {
+        auto ReturnError = []() -> ResultOrError<int*> {
+            return DAWN_VALIDATION_ERROR(dummyErrorMessage);
+        };
+
+        auto SingleTry = [ReturnError]() -> ResultOrError<int*> {
+            DAWN_TRY(ReturnError());
+            return &dummySuccess;
+        };
+
+        auto DoubleTry = [SingleTry]() -> ResultOrError<int*> {
+            DAWN_TRY(SingleTry());
+            return &dummySuccess;
+        };
+
+        ResultOrError<int*> singleResult = SingleTry();
+        ASSERT_TRUE(singleResult.IsError());
+
+        ResultOrError<int*> doubleResult = DoubleTry();
+        ASSERT_TRUE(doubleResult.IsError());
+
+        std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
+        std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
+
+        ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
+    }
+
+    // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
+    TEST(ErrorTests, TRY_RESULT_ConversionToError) {
+        auto ReturnError = []() -> ResultOrError<int*> {
+            return DAWN_VALIDATION_ERROR(dummyErrorMessage);
+        };
+
+        auto Try = [ReturnError]() -> MaybeError {
+            int* result = nullptr;
+            DAWN_TRY_ASSIGN(result, ReturnError());
+            DAWN_UNUSED(result);
+
+            return {};
+        };
+
+        MaybeError result = Try();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+    // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
+    // Version without Result<E*, T*>
+    TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) {
+        auto ReturnError = []() -> ResultOrError<int> {
+            return DAWN_VALIDATION_ERROR(dummyErrorMessage);
+        };
+
+        auto Try = [ReturnError]() -> MaybeError {
+            int result = 0;
+            DAWN_TRY_ASSIGN(result, ReturnError());
+            DAWN_UNUSED(result);
+
+            return {};
+        };
+
+        MaybeError result = Try();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+    // Check DAWN_TRY_ASSIGN handles successes correctly.
+    TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) {
+        auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; };
+
+        // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the
+        // cleanup is not called.
+        bool tryReturned = true;
+        bool tryCleanup = false;
+
+        auto Try = [ReturnSuccess, &tryReturned, &tryCleanup]() -> ResultOrError<int*> {
+            int* result = nullptr;
+            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; });
+            tryReturned = false;
+
+            EXPECT_EQ(result, &dummySuccess);
+            return result;
+        };
+
+        ResultOrError<int*> result = Try();
+        ASSERT_TRUE(result.IsSuccess());
+        ASSERT_FALSE(tryReturned);
+        ASSERT_FALSE(tryCleanup);
+        ASSERT_EQ(result.AcquireSuccess(), &dummySuccess);
+    }
+
+    // Check DAWN_TRY_ASSIGN handles cleanups.
+    TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) {
+        auto ReturnError = []() -> ResultOrError<int*> {
+            return DAWN_VALIDATION_ERROR(dummyErrorMessage);
+        };
+
+        // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error.
+        bool tryCleanup = false;
+
+        auto Try = [ReturnError, &tryCleanup]() -> ResultOrError<int*> {
+            int* result = nullptr;
+            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), { tryCleanup = true; });
+            DAWN_UNUSED(result);
+
+            // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
+            EXPECT_FALSE(true);
+            return &dummySuccess;
+        };
+
+        ResultOrError<int*> result = Try();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+        ASSERT_TRUE(tryCleanup);
+    }
+
+    // Check DAWN_TRY_ASSIGN can override return value when needed.
+    TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) {
+        auto ReturnError = []() -> ResultOrError<int*> {
+            return DAWN_VALIDATION_ERROR(dummyErrorMessage);
+        };
+
+        auto Try = [ReturnError]() -> bool {
+            int* result = nullptr;
+            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), {}, true);
+            DAWN_UNUSED(result);
+
+            // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
+            EXPECT_FALSE(true);
+            return false;
+        };
+
+        bool result = Try();
+        ASSERT_TRUE(result);
+    }
+
+    // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
+    // Check DAWN_TRY handles errors correctly.
+    TEST(ErrorTests, TRY_ConversionToErrorOrResult) {
+        auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
+
+        auto Try = [ReturnError]() -> ResultOrError<int*> {
+            DAWN_TRY(ReturnError());
+            return &dummySuccess;
+        };
+
+        ResultOrError<int*> result = Try();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+    // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
+    // Check DAWN_TRY handles errors correctly. Version without Result<E*, T*>
+    TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) {
+        auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
+
+        auto Try = [ReturnError]() -> ResultOrError<int> {
+            DAWN_TRY(ReturnError());
+            return 42;
+        };
+
+        ResultOrError<int> result = Try();
+        ASSERT_TRUE(result.IsError());
+
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
+        ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/FeatureTests.cpp b/src/dawn/tests/unittests/FeatureTests.cpp
new file mode 100644
index 0000000..29c1115
--- /dev/null
+++ b/src/dawn/tests/unittests/FeatureTests.cpp
@@ -0,0 +1,88 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/Features.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/null/DeviceNull.h"
+
+class FeatureTests : public testing::Test {
+  public:
+    FeatureTests()
+        : testing::Test(),
+          mInstanceBase(dawn::native::InstanceBase::Create()),
+          mAdapterBase(mInstanceBase.Get()) {
+    }
+
+    std::vector<wgpu::FeatureName> GetAllFeatureNames() {
+        std::vector<wgpu::FeatureName> allFeatureNames(kTotalFeaturesCount);
+        for (size_t i = 0; i < kTotalFeaturesCount; ++i) {
+            allFeatureNames[i] = FeatureEnumToAPIFeature(static_cast<dawn::native::Feature>(i));
+        }
+        return allFeatureNames;
+    }
+
+    static constexpr size_t kTotalFeaturesCount =
+        static_cast<size_t>(dawn::native::Feature::EnumCount);
+
+  protected:
+    Ref<dawn::native::InstanceBase> mInstanceBase;
+    dawn::native::null::Adapter mAdapterBase;
+};
+
+// Test the creation of a device will fail if the requested feature is not supported on the
+// Adapter.
+TEST_F(FeatureTests, AdapterWithRequiredFeatureDisabled) {
+    const std::vector<wgpu::FeatureName> kAllFeatureNames = GetAllFeatureNames();
+    for (size_t i = 0; i < kTotalFeaturesCount; ++i) {
+        dawn::native::Feature notSupportedFeature = static_cast<dawn::native::Feature>(i);
+
+        std::vector<wgpu::FeatureName> featureNamesWithoutOne = kAllFeatureNames;
+        featureNamesWithoutOne.erase(featureNamesWithoutOne.begin() + i);
+
+        mAdapterBase.SetSupportedFeatures(featureNamesWithoutOne);
+        dawn::native::Adapter adapterWithoutFeature(&mAdapterBase);
+
+        wgpu::DeviceDescriptor deviceDescriptor;
+        wgpu::FeatureName featureName = FeatureEnumToAPIFeature(notSupportedFeature);
+        deviceDescriptor.requiredFeatures = &featureName;
+        deviceDescriptor.requiredFeaturesCount = 1;
+
+        WGPUDevice deviceWithFeature = adapterWithoutFeature.CreateDevice(
+            reinterpret_cast<const WGPUDeviceDescriptor*>(&deviceDescriptor));
+        ASSERT_EQ(nullptr, deviceWithFeature);
+    }
+}
+
+// Test Device.GetEnabledFeatures() can return the names of the enabled features correctly.
+TEST_F(FeatureTests, GetEnabledFeatures) {
+    dawn::native::Adapter adapter(&mAdapterBase);
+    for (size_t i = 0; i < kTotalFeaturesCount; ++i) {
+        dawn::native::Feature feature = static_cast<dawn::native::Feature>(i);
+        wgpu::FeatureName featureName = FeatureEnumToAPIFeature(feature);
+
+        wgpu::DeviceDescriptor deviceDescriptor;
+        deviceDescriptor.requiredFeatures = &featureName;
+        deviceDescriptor.requiredFeaturesCount = 1;
+
+        dawn::native::DeviceBase* deviceBase = dawn::native::FromAPI(
+            adapter.CreateDevice(reinterpret_cast<const WGPUDeviceDescriptor*>(&deviceDescriptor)));
+
+        ASSERT_EQ(1u, deviceBase->APIEnumerateFeatures(nullptr));
+        wgpu::FeatureName enabledFeature;
+        deviceBase->APIEnumerateFeatures(&enabledFeature);
+        EXPECT_EQ(enabledFeature, featureName);
+    }
+}
diff --git a/src/dawn/tests/unittests/GPUInfoTests.cpp b/src/dawn/tests/unittests/GPUInfoTests.cpp
new file mode 100644
index 0000000..60e2190
--- /dev/null
+++ b/src/dawn/tests/unittests/GPUInfoTests.cpp
@@ -0,0 +1,31 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/GPUInfo.h"
+
+namespace {
+    const PCIVendorID vendorID = 0x8086;
+    const gpu_info::D3DDriverVersion version1 = {20, 19, 15, 5107};
+    const gpu_info::D3DDriverVersion version2 = {21, 20, 16, 5077};
+    const gpu_info::D3DDriverVersion version3 = {27, 20, 100, 9946};
+    const gpu_info::D3DDriverVersion version4 = {27, 20, 101, 2003};
+}  // anonymous namespace
+
+TEST(GPUInfo, CompareD3DDriverVersion) {
+    EXPECT_EQ(gpu_info::CompareD3DDriverVersion(vendorID, version1, version2), -1);
+    EXPECT_EQ(gpu_info::CompareD3DDriverVersion(vendorID, version2, version3), -1);
+    EXPECT_EQ(gpu_info::CompareD3DDriverVersion(vendorID, version3, version4), -1);
+}
diff --git a/src/dawn/tests/unittests/GetProcAddressTests.cpp b/src/dawn/tests/unittests/GetProcAddressTests.cpp
new file mode 100644
index 0000000..56e79a1
--- /dev/null
+++ b/src/dawn/tests/unittests/GetProcAddressTests.cpp
@@ -0,0 +1,170 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/dawn_proc.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/null/DeviceNull.h"
+#include "dawn/utils/TerribleCommandBuffer.h"
+#include "dawn/wire/WireClient.h"
+
+namespace {
+
+    // dawn_wire and dawn_native contain duplicated code for the handling of GetProcAddress
+    // so we run the tests against both implementations. This enum is used as a test parameters to
+    // know which implementation to test.
+    enum class DawnFlavor {
+        Native,
+        Wire,
+    };
+
+    std::ostream& operator<<(std::ostream& stream, DawnFlavor flavor) {
+        switch (flavor) {
+            case DawnFlavor::Native:
+                stream << "dawn_native";
+                break;
+
+            case DawnFlavor::Wire:
+                stream << "dawn_wire";
+                break;
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+        return stream;
+    }
+
+    class GetProcAddressTests : public testing::TestWithParam<DawnFlavor> {
+      public:
+        GetProcAddressTests()
+            : testing::TestWithParam<DawnFlavor>(),
+              mNativeInstance(dawn::native::InstanceBase::Create()),
+              mNativeAdapter(mNativeInstance.Get()) {
+        }
+
+        void SetUp() override {
+            switch (GetParam()) {
+                case DawnFlavor::Native: {
+                    mDevice = wgpu::Device::Acquire(
+                        reinterpret_cast<WGPUDevice>(mNativeAdapter.APICreateDevice()));
+                    mProcs = dawn::native::GetProcs();
+                    break;
+                }
+
+                case DawnFlavor::Wire: {
+                    mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
+
+                    dawn::wire::WireClientDescriptor clientDesc = {};
+                    clientDesc.serializer = mC2sBuf.get();
+                    mWireClient = std::make_unique<dawn::wire::WireClient>(clientDesc);
+
+                    mDevice = wgpu::Device::Acquire(mWireClient->ReserveDevice().device);
+                    mProcs = dawn::wire::client::GetProcs();
+                    break;
+                }
+
+                default:
+                    UNREACHABLE();
+                    break;
+            }
+
+            dawnProcSetProcs(&mProcs);
+        }
+
+        void TearDown() override {
+            // Destroy the device before freeing the instance or the wire client in the destructor
+            mDevice = wgpu::Device();
+        }
+
+      protected:
+        Ref<dawn::native::InstanceBase> mNativeInstance;
+        dawn::native::null::Adapter mNativeAdapter;
+
+        std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+        std::unique_ptr<dawn::wire::WireClient> mWireClient;
+
+        wgpu::Device mDevice;
+        DawnProcTable mProcs;
+    };
+
+    // Test GetProcAddress with and without devices on some valid examples
+    TEST_P(GetProcAddressTests, ValidExamples) {
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceCreateBuffer"),
+                  reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceCreateBuffer"),
+                  reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuQueueSubmit"),
+                  reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuQueueSubmit"),
+                  reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+    }
+
+    // Test GetProcAddress with and without devices on nullptr procName
+    TEST_P(GetProcAddressTests, Nullptr) {
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, nullptr), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), nullptr), nullptr);
+    }
+
+    // Test GetProcAddress with and without devices on some invalid
+    TEST_P(GetProcAddressTests, InvalidExamples) {
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceDoSomething"), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceDoSomething"), nullptr);
+
+        // Trigger the condition where lower_bound will return the end of the procMap.
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "zzzzzzz"), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "zzzzzzz"), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "ZZ"), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "ZZ"), nullptr);
+
+        // Some more potential corner cases.
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, ""), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), ""), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "0"), nullptr);
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "0"), nullptr);
+    }
+
+    // Test that GetProcAddress supports freestanding function that are handled specially
+    TEST_P(GetProcAddressTests, FreeStandingFunctions) {
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuGetProcAddress"),
+                  reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuGetProcAddress"),
+                  reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
+
+        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuCreateInstance"),
+                  reinterpret_cast<WGPUProc>(mProcs.createInstance));
+        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuCreateInstance"),
+                  reinterpret_cast<WGPUProc>(mProcs.createInstance));
+    }
+
+    INSTANTIATE_TEST_SUITE_P(,
+                             GetProcAddressTests,
+                             testing::Values(DawnFlavor::Native, DawnFlavor::Wire),
+                             testing::PrintToStringParamName());
+
+    TEST(GetProcAddressInternalTests, CheckDawnNativeProcMapOrder) {
+        std::vector<const char*> names = dawn::native::GetProcMapNamesForTesting();
+        for (size_t i = 1; i < names.size(); i++) {
+            ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
+        }
+    }
+
+    TEST(GetProcAddressInternalTests, CheckDawnWireClientProcMapOrder) {
+        std::vector<const char*> names = dawn::wire::client::GetProcMapNamesForTesting();
+        for (size_t i = 1; i < names.size(); i++) {
+            ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
+        }
+    }
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/ITypArrayTests.cpp b/src/dawn/tests/unittests/ITypArrayTests.cpp
new file mode 100644
index 0000000..d8fa1fc
--- /dev/null
+++ b/src/dawn/tests/unittests/ITypArrayTests.cpp
@@ -0,0 +1,91 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_array.h"
+
+class ITypArrayTest : public testing::Test {
+  protected:
+    using Key = TypedInteger<struct KeyT, uint32_t>;
+    using Val = TypedInteger<struct ValT, uint32_t>;
+    using Array = ityp::array<Key, Val, 10>;
+
+    // Test that the expected array methods can be constexpr
+    struct ConstexprTest {
+        static constexpr Array kArr = {Val(0), Val(1), Val(2), Val(3), Val(4),
+                                       Val(5), Val(6), Val(7), Val(8), Val(9)};
+
+        static_assert(kArr[Key(3)] == Val(3));
+        static_assert(kArr.at(Key(7)) == Val(7));
+        static_assert(kArr.size() == Key(10));
+    };
+};
+
+// Test that values can be set at an index and retrieved from the same index.
+TEST_F(ITypArrayTest, Indexing) {
+    Array arr;
+    {
+        arr[Key(2)] = Val(5);
+        arr[Key(1)] = Val(9);
+        arr[Key(9)] = Val(2);
+
+        ASSERT_EQ(arr[Key(2)], Val(5));
+        ASSERT_EQ(arr[Key(1)], Val(9));
+        ASSERT_EQ(arr[Key(9)], Val(2));
+    }
+    {
+        arr.at(Key(4)) = Val(5);
+        arr.at(Key(3)) = Val(8);
+        arr.at(Key(1)) = Val(7);
+
+        ASSERT_EQ(arr.at(Key(4)), Val(5));
+        ASSERT_EQ(arr.at(Key(3)), Val(8));
+        ASSERT_EQ(arr.at(Key(1)), Val(7));
+    }
+}
+
+// Test that the array can be iterated in order with a range-based for loop
+TEST_F(ITypArrayTest, RangeBasedIteration) {
+    Array arr;
+
+    // Assign in a non-const range-based for loop
+    uint32_t i = 0;
+    for (Val& val : arr) {
+        val = Val(i);
+    }
+
+    // Check values in a const range-based for loop
+    i = 0;
+    for (Val val : static_cast<const Array&>(arr)) {
+        ASSERT_EQ(val, arr[Key(i++)]);
+    }
+}
+
+// Test that begin/end/front/back/data return pointers/references to the correct elements.
+TEST_F(ITypArrayTest, BeginEndFrontBackData) {
+    Array arr;
+
+    // non-const versions
+    ASSERT_EQ(&arr.front(), &arr[Key(0)]);
+    ASSERT_EQ(&arr.back(), &arr[Key(9)]);
+    ASSERT_EQ(arr.data(), &arr[Key(0)]);
+
+    // const versions
+    const Array& constArr = arr;
+    ASSERT_EQ(&constArr.front(), &constArr[Key(0)]);
+    ASSERT_EQ(&constArr.back(), &constArr[Key(9)]);
+    ASSERT_EQ(constArr.data(), &constArr[Key(0)]);
+}
diff --git a/src/dawn/tests/unittests/ITypBitsetTests.cpp b/src/dawn/tests/unittests/ITypBitsetTests.cpp
new file mode 100644
index 0000000..6aa7fd2
--- /dev/null
+++ b/src/dawn/tests/unittests/ITypBitsetTests.cpp
@@ -0,0 +1,209 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_bitset.h"
+
+#include <set>
+
+class ITypBitsetTest : public testing::Test {
+  protected:
+    using Key = TypedInteger<struct KeyT, size_t>;
+    using Bitset = ityp::bitset<Key, 9>;
+    using Bitset40 = ityp::bitset<Key, 40>;
+
+    // Test that the expected bitset methods can be constexpr
+    struct ConstexprTest {
+        static constexpr Bitset kBitset = {1 << 0 | 1 << 3 | 1 << 7 | 1 << 8};
+
+        static_assert(kBitset[Key(0)] == true);
+        static_assert(kBitset[Key(1)] == false);
+        static_assert(kBitset[Key(2)] == false);
+        static_assert(kBitset[Key(3)] == true);
+        static_assert(kBitset[Key(4)] == false);
+        static_assert(kBitset[Key(5)] == false);
+        static_assert(kBitset[Key(6)] == false);
+        static_assert(kBitset[Key(7)] == true);
+        static_assert(kBitset[Key(8)] == true);
+
+        static_assert(kBitset.size() == 9);
+    };
+
+    void ExpectBits(const Bitset& bits, std::set<size_t> indices) {
+        size_t mask = 0;
+
+        for (size_t i = 0; i < bits.size(); ++i) {
+            if (indices.count(i) == 0) {
+                ASSERT_FALSE(bits[Key(i)]) << i;
+                ASSERT_FALSE(bits.test(Key(i))) << i;
+            } else {
+                mask |= (size_t(1) << i);
+                ASSERT_TRUE(bits[Key(i)]) << i;
+                ASSERT_TRUE(bits.test(Key(i))) << i;
+            }
+        }
+
+        ASSERT_EQ(bits.to_ullong(), mask);
+        ASSERT_EQ(bits.to_ulong(), mask);
+        ASSERT_EQ(bits.count(), indices.size());
+        ASSERT_EQ(bits.all(), indices.size() == bits.size());
+        ASSERT_EQ(bits.any(), indices.size() != 0);
+        ASSERT_EQ(bits.none(), indices.size() == 0);
+    }
+};
+
+// Test that by default no bits are set
+TEST_F(ITypBitsetTest, DefaultZero) {
+    Bitset bits;
+    ExpectBits(bits, {});
+}
+
+// Test the bitset can be initialized with a bitmask
+TEST_F(ITypBitsetTest, InitializeByBits) {
+    Bitset bits = {1 << 1 | 1 << 2 | 1 << 7};
+    ExpectBits(bits, {1, 2, 7});
+}
+
+// Test that bits can be set at an index and retrieved from the same index.
+TEST_F(ITypBitsetTest, Indexing) {
+    Bitset bits;
+    ExpectBits(bits, {});
+
+    bits[Key(2)] = true;
+    bits[Key(4)] = false;
+    bits.set(Key(1));
+    bits.set(Key(7), true);
+    bits.set(Key(8), false);
+
+    ExpectBits(bits, {1, 2, 7});
+
+    bits.reset(Key(2));
+    bits.reset(Key(7));
+    ExpectBits(bits, {1});
+}
+
+// Test that bits can be flipped
+TEST_F(ITypBitsetTest, Flip) {
+    Bitset bits = {1 << 1 | 1 << 2 | 1 << 7};
+    ExpectBits(bits, {1, 2, 7});
+
+    bits.flip(Key(4));
+    bits.flip(Key(1));  // false
+    bits.flip(Key(6));
+    bits.flip(Key(5));
+    ExpectBits(bits, {2, 4, 5, 6, 7});
+
+    bits.flip();
+    ExpectBits(bits, {0, 1, 3, 8});
+
+    ExpectBits(~bits, {2, 4, 5, 6, 7});
+}
+
+// Test that all the bits can be set/reset.
+TEST_F(ITypBitsetTest, SetResetAll) {
+    Bitset bits;
+
+    bits.set();
+
+    ASSERT_EQ(bits.count(), 9u);
+    ASSERT_TRUE(bits.all());
+    ASSERT_TRUE(bits.any());
+    ASSERT_FALSE(bits.none());
+
+    for (Key i(0); i < Key(9); ++i) {
+        ASSERT_TRUE(bits[i]);
+    }
+
+    bits.reset();
+
+    ASSERT_EQ(bits.count(), 0u);
+    ASSERT_FALSE(bits.all());
+    ASSERT_FALSE(bits.any());
+    ASSERT_TRUE(bits.none());
+
+    for (Key i(0); i < Key(9); ++i) {
+        ASSERT_FALSE(bits[i]);
+    }
+}
+
+// Test And operations
+TEST_F(ITypBitsetTest, And) {
+    Bitset bits = {1 << 1 | 1 << 2 | 1 << 7};
+    ExpectBits(bits, {1, 2, 7});
+
+    Bitset bits2 = bits & Bitset{1 << 0 | 1 << 3 | 1 << 7};
+    ExpectBits(bits2, {7});
+    ExpectBits(bits, {1, 2, 7});
+
+    bits &= Bitset{1 << 1 | 1 << 6};
+    ExpectBits(bits, {1});
+}
+
+// Test Or operations
+TEST_F(ITypBitsetTest, Or) {
+    Bitset bits = {1 << 1 | 1 << 2 | 1 << 7};
+    ExpectBits(bits, {1, 2, 7});
+
+    Bitset bits2 = bits | Bitset{1 << 0 | 1 << 3 | 1 << 7};
+    ExpectBits(bits2, {0, 1, 2, 3, 7});
+    ExpectBits(bits, {1, 2, 7});
+
+    bits |= Bitset{1 << 1 | 1 << 6};
+    ExpectBits(bits, {1, 2, 6, 7});
+}
+
+// Test xor operations
+TEST_F(ITypBitsetTest, Xor) {
+    Bitset bits = {1 << 1 | 1 << 2 | 1 << 7};
+    ExpectBits(bits, {1, 2, 7});
+
+    Bitset bits2 = bits ^ Bitset { 1 << 0 | 1 << 3 | 1 << 7 };
+    ExpectBits(bits2, {0, 1, 2, 3});
+    ExpectBits(bits, {1, 2, 7});
+
+    bits ^= Bitset{1 << 1 | 1 << 6};
+    ExpectBits(bits, {2, 6, 7});
+}
+
+// Testing the GetHighestBitIndexPlusOne function
+TEST_F(ITypBitsetTest, GetHighestBitIndexPlusOne) {
+    // <= 32 bit
+    EXPECT_EQ(0u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset(0b00))));
+    EXPECT_EQ(1u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset(0b01))));
+    EXPECT_EQ(2u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset(0b10))));
+    EXPECT_EQ(2u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset(0b11))));
+
+    EXPECT_EQ(3u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset{1 << 2})));
+    EXPECT_EQ(9u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset{1 << 8})));
+    EXPECT_EQ(9u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset{1 << 8 | 1 << 2})));
+
+    // > 32 bit
+    EXPECT_EQ(0u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0b00))));
+    EXPECT_EQ(1u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0b01))));
+    EXPECT_EQ(2u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0b10))));
+    EXPECT_EQ(2u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0b11))));
+
+    EXPECT_EQ(5u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0x10))));
+    EXPECT_EQ(5u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0x1F))));
+    EXPECT_EQ(16u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xF000))));
+    EXPECT_EQ(16u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xFFFF))));
+    EXPECT_EQ(32u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xF0000000))));
+    EXPECT_EQ(32u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xFFFFFFFF))));
+    EXPECT_EQ(36u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xF00000000))));
+    EXPECT_EQ(36u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xFFFFFFFFF))));
+    EXPECT_EQ(40u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xF000000000))));
+    EXPECT_EQ(40u, static_cast<size_t>(GetHighestBitIndexPlusOne(Bitset40(0xFFFFFFFFFF))));
+}
\ No newline at end of file
diff --git a/src/dawn/tests/unittests/ITypSpanTests.cpp b/src/dawn/tests/unittests/ITypSpanTests.cpp
new file mode 100644
index 0000000..e04ba5a
--- /dev/null
+++ b/src/dawn/tests/unittests/ITypSpanTests.cpp
@@ -0,0 +1,81 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_span.h"
+
+#include <array>
+
+class ITypSpanTest : public testing::Test {
+  protected:
+    using Key = TypedInteger<struct KeyT, size_t>;
+    using Val = TypedInteger<struct ValT, uint32_t>;
+    using Span = ityp::span<Key, Val>;
+};
+
+// Test that values can be set at an index and retrieved from the same index.
+TEST_F(ITypSpanTest, Indexing) {
+    std::array<Val, 10> arr;
+    Span span(arr.data(), Key(arr.size()));
+    {
+        span[Key(2)] = Val(5);
+        span[Key(1)] = Val(9);
+        span[Key(9)] = Val(2);
+
+        ASSERT_EQ(span[Key(2)], Val(5));
+        ASSERT_EQ(span[Key(1)], Val(9));
+        ASSERT_EQ(span[Key(9)], Val(2));
+    }
+}
+
+// Test that the span can be is iterated in order with a range-based for loop
+TEST_F(ITypSpanTest, RangeBasedIteration) {
+    std::array<Val, 10> arr;
+    Span span(arr.data(), Key(arr.size()));
+
+    // Assign in a non-const range-based for loop
+    uint32_t i = 0;
+    for (Val& val : span) {
+        val = Val(i);
+    }
+
+    // Check values in a const range-based for loop
+    i = 0;
+    for (Val val : static_cast<const Span&>(span)) {
+        ASSERT_EQ(val, span[Key(i++)]);
+    }
+}
+
+// Test that begin/end/front/back/data return pointers/references to the correct elements.
+TEST_F(ITypSpanTest, BeginEndFrontBackData) {
+    std::array<Val, 10> arr;
+    Span span(arr.data(), Key(arr.size()));
+
+    // non-const versions
+    ASSERT_EQ(span.begin(), &span[Key(0)]);
+    ASSERT_EQ(span.end(), &span[Key(0)] + static_cast<size_t>(span.size()));
+    ASSERT_EQ(&span.front(), &span[Key(0)]);
+    ASSERT_EQ(&span.back(), &span[Key(9)]);
+    ASSERT_EQ(span.data(), &span[Key(0)]);
+
+    // const versions
+    const Span& constSpan = span;
+    ASSERT_EQ(constSpan.begin(), &constSpan[Key(0)]);
+    ASSERT_EQ(constSpan.end(), &constSpan[Key(0)] + static_cast<size_t>(constSpan.size()));
+    ASSERT_EQ(&constSpan.front(), &constSpan[Key(0)]);
+    ASSERT_EQ(&constSpan.back(), &constSpan[Key(9)]);
+    ASSERT_EQ(constSpan.data(), &constSpan[Key(0)]);
+}
diff --git a/src/dawn/tests/unittests/ITypVectorTests.cpp b/src/dawn/tests/unittests/ITypVectorTests.cpp
new file mode 100644
index 0000000..1654a14
--- /dev/null
+++ b/src/dawn/tests/unittests/ITypVectorTests.cpp
@@ -0,0 +1,184 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WvecANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_vector.h"
+
+class ITypVectorTest : public testing::Test {
+  protected:
+    using Key = TypedInteger<struct KeyT, uint32_t>;
+    using Val = TypedInteger<struct ValT, uint32_t>;
+
+    using Vector = ityp::vector<Key, Val>;
+};
+
+// Test creation and initialization of the vector.
+TEST_F(ITypVectorTest, Creation) {
+    // Default constructor initializes to 0
+    {
+        Vector vec;
+        ASSERT_EQ(vec.size(), Key(0));
+    }
+
+    // Size constructor initializes contents to 0
+    {
+        Vector vec(Key(10));
+        ASSERT_EQ(vec.size(), Key(10));
+
+        for (Key i(0); i < Key(10); ++i) {
+            ASSERT_EQ(vec[i], Val(0));
+        }
+    }
+
+    // Size and initial value constructor initializes contents to the inital value
+    {
+        Vector vec(Key(10), Val(7));
+        ASSERT_EQ(vec.size(), Key(10));
+
+        for (Key i(0); i < Key(10); ++i) {
+            ASSERT_EQ(vec[i], Val(7));
+        }
+    }
+
+    // Initializer list constructor
+    {
+        Vector vec = {Val(2), Val(8), Val(1)};
+        ASSERT_EQ(vec.size(), Key(3));
+        ASSERT_EQ(vec[Key(0)], Val(2));
+        ASSERT_EQ(vec[Key(1)], Val(8));
+        ASSERT_EQ(vec[Key(2)], Val(1));
+    }
+}
+
+// Test copy construction/assignment
+TEST_F(ITypVectorTest, CopyConstructAssign) {
+    // Test the copy constructor
+    {
+        Vector rhs = {Val(2), Val(8), Val(1)};
+
+        Vector vec(rhs);
+        ASSERT_EQ(vec.size(), Key(3));
+        ASSERT_EQ(vec[Key(0)], Val(2));
+        ASSERT_EQ(vec[Key(1)], Val(8));
+        ASSERT_EQ(vec[Key(2)], Val(1));
+
+        ASSERT_EQ(rhs.size(), Key(3));
+        ASSERT_EQ(rhs[Key(0)], Val(2));
+        ASSERT_EQ(rhs[Key(1)], Val(8));
+        ASSERT_EQ(rhs[Key(2)], Val(1));
+    }
+
+    // Test the copy assignment
+    {
+        Vector rhs = {Val(2), Val(8), Val(1)};
+
+        Vector vec = rhs;
+        ASSERT_EQ(vec.size(), Key(3));
+        ASSERT_EQ(vec[Key(0)], Val(2));
+        ASSERT_EQ(vec[Key(1)], Val(8));
+        ASSERT_EQ(vec[Key(2)], Val(1));
+
+        ASSERT_EQ(rhs.size(), Key(3));
+        ASSERT_EQ(rhs[Key(0)], Val(2));
+        ASSERT_EQ(rhs[Key(1)], Val(8));
+        ASSERT_EQ(rhs[Key(2)], Val(1));
+    }
+}
+
+// Test move construction/assignment
+TEST_F(ITypVectorTest, MoveConstructAssign) {
+    // Test the move constructor
+    {
+        Vector rhs = {Val(2), Val(8), Val(1)};
+
+        Vector vec(std::move(rhs));
+        ASSERT_EQ(vec.size(), Key(3));
+        ASSERT_EQ(vec[Key(0)], Val(2));
+        ASSERT_EQ(vec[Key(1)], Val(8));
+        ASSERT_EQ(vec[Key(2)], Val(1));
+
+        ASSERT_EQ(rhs.size(), Key(0));
+    }
+
+    // Test the move assignment
+    {
+        Vector rhs = {Val(2), Val(8), Val(1)};
+
+        Vector vec = std::move(rhs);
+        ASSERT_EQ(vec.size(), Key(3));
+        ASSERT_EQ(vec[Key(0)], Val(2));
+        ASSERT_EQ(vec[Key(1)], Val(8));
+        ASSERT_EQ(vec[Key(2)], Val(1));
+
+        ASSERT_EQ(rhs.size(), Key(0));
+    }
+}
+
+// Test that values can be set at an index and retrieved from the same index.
+TEST_F(ITypVectorTest, Indexing) {
+    Vector vec(Key(10));
+    {
+        vec[Key(2)] = Val(5);
+        vec[Key(1)] = Val(9);
+        vec[Key(9)] = Val(2);
+
+        ASSERT_EQ(vec[Key(2)], Val(5));
+        ASSERT_EQ(vec[Key(1)], Val(9));
+        ASSERT_EQ(vec[Key(9)], Val(2));
+    }
+    {
+        vec.at(Key(4)) = Val(5);
+        vec.at(Key(3)) = Val(8);
+        vec.at(Key(1)) = Val(7);
+
+        ASSERT_EQ(vec.at(Key(4)), Val(5));
+        ASSERT_EQ(vec.at(Key(3)), Val(8));
+        ASSERT_EQ(vec.at(Key(1)), Val(7));
+    }
+}
+
+// Test that the vector can be iterated in order with a range-based for loop
+TEST_F(ITypVectorTest, RangeBasedIteration) {
+    Vector vec(Key(10));
+
+    // Assign in a non-const range-based for loop
+    uint32_t i = 0;
+    for (Val& val : vec) {
+        val = Val(i);
+    }
+
+    // Check values in a const range-based for loop
+    i = 0;
+    for (Val val : static_cast<const Vector&>(vec)) {
+        ASSERT_EQ(val, vec[Key(i++)]);
+    }
+}
+
+// Test that begin/end/front/back/data return pointers/references to the correct elements.
+TEST_F(ITypVectorTest, BeginEndFrontBackData) {
+    Vector vec(Key(10));
+
+    // non-const versions
+    ASSERT_EQ(&vec.front(), &vec[Key(0)]);
+    ASSERT_EQ(&vec.back(), &vec[Key(9)]);
+    ASSERT_EQ(vec.data(), &vec[Key(0)]);
+
+    // const versions
+    const Vector& constVec = vec;
+    ASSERT_EQ(&constVec.front(), &constVec[Key(0)]);
+    ASSERT_EQ(&constVec.back(), &constVec[Key(9)]);
+    ASSERT_EQ(constVec.data(), &constVec[Key(0)]);
+}
diff --git a/src/dawn/tests/unittests/LimitsTests.cpp b/src/dawn/tests/unittests/LimitsTests.cpp
new file mode 100644
index 0000000..544c0c5
--- /dev/null
+++ b/src/dawn/tests/unittests/LimitsTests.cpp
@@ -0,0 +1,200 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/Limits.h"
+
+// Test |GetDefaultLimits| returns the default.
+TEST(Limits, GetDefaultLimits) {
+    dawn::native::Limits limits = {};
+    EXPECT_NE(limits.maxBindGroups, 4u);
+
+    dawn::native::GetDefaultLimits(&limits);
+
+    EXPECT_EQ(limits.maxBindGroups, 4u);
+}
+
+// Test |ReifyDefaultLimits| populates the default if
+// values are undefined.
+TEST(Limits, ReifyDefaultLimits_PopulatesDefault) {
+    dawn::native::Limits limits;
+    limits.maxComputeWorkgroupStorageSize = wgpu::kLimitU32Undefined;
+    limits.maxStorageBufferBindingSize = wgpu::kLimitU64Undefined;
+
+    dawn::native::Limits reified = dawn::native::ReifyDefaultLimits(limits);
+    EXPECT_EQ(reified.maxComputeWorkgroupStorageSize, 16352u);
+    EXPECT_EQ(reified.maxStorageBufferBindingSize, 134217728ul);
+}
+
+// Test |ReifyDefaultLimits| clamps to the default if
+// values are worse than the default.
+TEST(Limits, ReifyDefaultLimits_Clamps) {
+    dawn::native::Limits limits;
+    limits.maxStorageBuffersPerShaderStage = 4;
+    limits.minUniformBufferOffsetAlignment = 512;
+
+    dawn::native::Limits reified = dawn::native::ReifyDefaultLimits(limits);
+    EXPECT_EQ(reified.maxStorageBuffersPerShaderStage, 8u);
+    EXPECT_EQ(reified.minUniformBufferOffsetAlignment, 256u);
+}
+
+// Test |ValidateLimits| works to validate limits are not better
+// than supported.
+TEST(Limits, ValidateLimits) {
+    // Start with the default for supported.
+    dawn::native::Limits defaults;
+    dawn::native::GetDefaultLimits(&defaults);
+
+    // Test supported == required is valid.
+    {
+        dawn::native::Limits required = defaults;
+        EXPECT_TRUE(ValidateLimits(defaults, required).IsSuccess());
+    }
+
+    // Test supported == required is valid, when they are not default.
+    {
+        dawn::native::Limits supported = defaults;
+        dawn::native::Limits required = defaults;
+        supported.maxBindGroups += 1;
+        required.maxBindGroups += 1;
+        EXPECT_TRUE(ValidateLimits(supported, required).IsSuccess());
+    }
+
+    // Test that default-initialized (all undefined) is valid.
+    {
+        dawn::native::Limits required = {};
+        EXPECT_TRUE(ValidateLimits(defaults, required).IsSuccess());
+    }
+
+    // Test that better than max is invalid.
+    {
+        dawn::native::Limits required = {};
+        required.maxTextureDimension3D = defaults.maxTextureDimension3D + 1;
+        dawn::native::MaybeError err = ValidateLimits(defaults, required);
+        EXPECT_TRUE(err.IsError());
+        err.AcquireError();
+    }
+
+    // Test that worse than max is valid.
+    {
+        dawn::native::Limits required = {};
+        required.maxComputeWorkgroupSizeX = defaults.maxComputeWorkgroupSizeX - 1;
+        EXPECT_TRUE(ValidateLimits(defaults, required).IsSuccess());
+    }
+
+    // Test that better than min is invalid.
+    {
+        dawn::native::Limits required = {};
+        required.minUniformBufferOffsetAlignment = defaults.minUniformBufferOffsetAlignment / 2;
+        dawn::native::MaybeError err = ValidateLimits(defaults, required);
+        EXPECT_TRUE(err.IsError());
+        err.AcquireError();
+    }
+
+    // Test that worse than min is valid.
+    {
+        dawn::native::Limits required = {};
+        required.minStorageBufferOffsetAlignment = defaults.minStorageBufferOffsetAlignment * 2;
+        EXPECT_TRUE(ValidateLimits(defaults, required).IsSuccess());
+    }
+}
+
+// Test that |ApplyLimitTiers| degrades limits to the next best tier.
+TEST(Limits, ApplyLimitTiers) {
+    auto SetLimitsStorageBufferBindingSizeTier2 = [](dawn::native::Limits* limits) {
+        limits->maxStorageBufferBindingSize = 1073741824;
+    };
+    dawn::native::Limits limitsStorageBufferBindingSizeTier2;
+    dawn::native::GetDefaultLimits(&limitsStorageBufferBindingSizeTier2);
+    SetLimitsStorageBufferBindingSizeTier2(&limitsStorageBufferBindingSizeTier2);
+
+    auto SetLimitsStorageBufferBindingSizeTier3 = [](dawn::native::Limits* limits) {
+        limits->maxStorageBufferBindingSize = 2147483647;
+    };
+    dawn::native::Limits limitsStorageBufferBindingSizeTier3;
+    dawn::native::GetDefaultLimits(&limitsStorageBufferBindingSizeTier3);
+    SetLimitsStorageBufferBindingSizeTier3(&limitsStorageBufferBindingSizeTier3);
+
+    auto SetLimitsComputeWorkgroupStorageSizeTier1 = [](dawn::native::Limits* limits) {
+        limits->maxComputeWorkgroupStorageSize = 16352;
+    };
+    dawn::native::Limits limitsComputeWorkgroupStorageSizeTier1;
+    dawn::native::GetDefaultLimits(&limitsComputeWorkgroupStorageSizeTier1);
+    SetLimitsComputeWorkgroupStorageSizeTier1(&limitsComputeWorkgroupStorageSizeTier1);
+
+    auto SetLimitsComputeWorkgroupStorageSizeTier3 = [](dawn::native::Limits* limits) {
+        limits->maxComputeWorkgroupStorageSize = 65536;
+    };
+    dawn::native::Limits limitsComputeWorkgroupStorageSizeTier3;
+    dawn::native::GetDefaultLimits(&limitsComputeWorkgroupStorageSizeTier3);
+    SetLimitsComputeWorkgroupStorageSizeTier3(&limitsComputeWorkgroupStorageSizeTier3);
+
+    // Test that applying tiers to limits that are exactly
+    // equal to a tier returns the same values.
+    {
+        dawn::native::Limits limits = limitsStorageBufferBindingSizeTier2;
+        EXPECT_EQ(ApplyLimitTiers(limits), limits);
+
+        limits = limitsStorageBufferBindingSizeTier3;
+        EXPECT_EQ(ApplyLimitTiers(limits), limits);
+    }
+
+    // Test all limits slightly worse than tier 3.
+    {
+        dawn::native::Limits limits = limitsStorageBufferBindingSizeTier3;
+        limits.maxStorageBufferBindingSize -= 1;
+        EXPECT_EQ(ApplyLimitTiers(limits), limitsStorageBufferBindingSizeTier2);
+    }
+
+    // Test that limits may match one tier exactly and be degraded in another tier.
+    // Degrading to one tier does not affect the other tier.
+    {
+        dawn::native::Limits limits = limitsComputeWorkgroupStorageSizeTier3;
+        // Set tier 3 and change one limit to be insufficent.
+        SetLimitsStorageBufferBindingSizeTier3(&limits);
+        limits.maxStorageBufferBindingSize -= 1;
+
+        dawn::native::Limits tiered = ApplyLimitTiers(limits);
+
+        // Check that |tiered| has the limits of memorySize tier 2
+        dawn::native::Limits tieredWithMemorySizeTier2 = tiered;
+        SetLimitsStorageBufferBindingSizeTier2(&tieredWithMemorySizeTier2);
+        EXPECT_EQ(tiered, tieredWithMemorySizeTier2);
+
+        // Check that |tiered| has the limits of bindingSpace tier 3
+        dawn::native::Limits tieredWithBindingSpaceTier3 = tiered;
+        SetLimitsComputeWorkgroupStorageSizeTier3(&tieredWithBindingSpaceTier3);
+        EXPECT_EQ(tiered, tieredWithBindingSpaceTier3);
+    }
+
+    // Test that limits may be simultaneously degraded in two tiers independently.
+    {
+        dawn::native::Limits limits;
+        dawn::native::GetDefaultLimits(&limits);
+        SetLimitsComputeWorkgroupStorageSizeTier3(&limits);
+        SetLimitsStorageBufferBindingSizeTier3(&limits);
+        limits.maxComputeWorkgroupStorageSize =
+            limitsComputeWorkgroupStorageSizeTier1.maxComputeWorkgroupStorageSize + 1;
+        limits.maxStorageBufferBindingSize =
+            limitsStorageBufferBindingSizeTier2.maxStorageBufferBindingSize + 1;
+
+        dawn::native::Limits tiered = ApplyLimitTiers(limits);
+
+        dawn::native::Limits expected = tiered;
+        SetLimitsComputeWorkgroupStorageSizeTier1(&expected);
+        SetLimitsStorageBufferBindingSizeTier2(&expected);
+        EXPECT_EQ(tiered, expected);
+    }
+}
diff --git a/src/dawn/tests/unittests/LinkedListTests.cpp b/src/dawn/tests/unittests/LinkedListTests.cpp
new file mode 100644
index 0000000..c4971d8
--- /dev/null
+++ b/src/dawn/tests/unittests/LinkedListTests.cpp
@@ -0,0 +1,435 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a copy of Chromium's /src/base/containers/linked_list_unittest.cc
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/LinkedList.h"
+
+#include <list>
+
+class Node : public LinkNode<Node> {
+  public:
+    explicit Node(int id) : id_(id) {
+    }
+
+    int id() const {
+        return id_;
+    }
+
+    void set_id(int id) {
+        id_ = id;
+    }
+
+  private:
+    int id_;
+};
+
+class MultipleInheritanceNodeBase {
+  public:
+    MultipleInheritanceNodeBase() : field_taking_up_space_(0) {
+    }
+    int field_taking_up_space_;
+};
+
+class MultipleInheritanceNode : public MultipleInheritanceNodeBase,
+                                public LinkNode<MultipleInheritanceNode> {
+  public:
+    MultipleInheritanceNode() = default;
+};
+
+class MovableNode : public LinkNode<MovableNode> {
+  public:
+    explicit MovableNode(int id) : id_(id) {
+    }
+
+    MovableNode(MovableNode&&) = default;
+
+    int id() const {
+        return id_;
+    }
+
+  private:
+    int id_;
+};
+
+// Checks that when iterating |list| (either from head to tail, or from
+// tail to head, as determined by |forward|), we get back |node_ids|,
+// which is an array of size |num_nodes|.
+void ExpectListContentsForDirection(const LinkedList<Node>& list,
+                                    int num_nodes,
+                                    const int* node_ids,
+                                    bool forward) {
+    int i = 0;
+    for (const LinkNode<Node>* node = (forward ? list.head() : list.tail()); node != list.end();
+         node = (forward ? node->next() : node->previous())) {
+        ASSERT_LT(i, num_nodes);
+        int index_of_id = forward ? i : num_nodes - i - 1;
+        EXPECT_EQ(node_ids[index_of_id], node->value()->id());
+        ++i;
+    }
+    EXPECT_EQ(num_nodes, i);
+}
+
+void ExpectListContents(const LinkedList<Node>& list, int num_nodes, const int* node_ids) {
+    {
+        SCOPED_TRACE("Iterating forward (from head to tail)");
+        ExpectListContentsForDirection(list, num_nodes, node_ids, true);
+    }
+    {
+        SCOPED_TRACE("Iterating backward (from tail to head)");
+        ExpectListContentsForDirection(list, num_nodes, node_ids, false);
+    }
+}
+
+TEST(LinkedList, Empty) {
+    LinkedList<Node> list;
+    EXPECT_EQ(list.end(), list.head());
+    EXPECT_EQ(list.end(), list.tail());
+    ExpectListContents(list, 0, nullptr);
+}
+
+TEST(LinkedList, Append) {
+    LinkedList<Node> list;
+    ExpectListContents(list, 0, nullptr);
+
+    Node n1(1);
+    list.Append(&n1);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n1, list.tail());
+    {
+        const int expected[] = {1};
+        ExpectListContents(list, 1, expected);
+    }
+
+    Node n2(2);
+    list.Append(&n2);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n2, list.tail());
+    {
+        const int expected[] = {1, 2};
+        ExpectListContents(list, 2, expected);
+    }
+
+    Node n3(3);
+    list.Append(&n3);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n3, list.tail());
+    {
+        const int expected[] = {1, 2, 3};
+        ExpectListContents(list, 3, expected);
+    }
+}
+
+TEST(LinkedList, RemoveFromList) {
+    LinkedList<Node> list;
+
+    Node n1(1);
+    Node n2(2);
+    Node n3(3);
+    Node n4(4);
+    Node n5(5);
+
+    list.Append(&n1);
+    list.Append(&n2);
+    list.Append(&n3);
+    list.Append(&n4);
+    list.Append(&n5);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n5, list.tail());
+    {
+        const int expected[] = {1, 2, 3, 4, 5};
+        ExpectListContents(list, 5, expected);
+    }
+
+    // Remove from the middle.
+    n3.RemoveFromList();
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n5, list.tail());
+    {
+        const int expected[] = {1, 2, 4, 5};
+        ExpectListContents(list, 4, expected);
+    }
+
+    // Remove from the tail.
+    n5.RemoveFromList();
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n4, list.tail());
+    {
+        const int expected[] = {1, 2, 4};
+        ExpectListContents(list, 3, expected);
+    }
+
+    // Remove from the head.
+    n1.RemoveFromList();
+
+    EXPECT_EQ(&n2, list.head());
+    EXPECT_EQ(&n4, list.tail());
+    {
+        const int expected[] = {2, 4};
+        ExpectListContents(list, 2, expected);
+    }
+
+    // Empty the list.
+    n2.RemoveFromList();
+    n4.RemoveFromList();
+
+    ExpectListContents(list, 0, nullptr);
+    EXPECT_EQ(list.end(), list.head());
+    EXPECT_EQ(list.end(), list.tail());
+
+    // Fill the list once again.
+    list.Append(&n1);
+    list.Append(&n2);
+    list.Append(&n3);
+    list.Append(&n4);
+    list.Append(&n5);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n5, list.tail());
+    {
+        const int expected[] = {1, 2, 3, 4, 5};
+        ExpectListContents(list, 5, expected);
+    }
+}
+
+TEST(LinkedList, InsertBefore) {
+    LinkedList<Node> list;
+
+    Node n1(1);
+    Node n2(2);
+    Node n3(3);
+    Node n4(4);
+
+    list.Append(&n1);
+    list.Append(&n2);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n2, list.tail());
+    {
+        const int expected[] = {1, 2};
+        ExpectListContents(list, 2, expected);
+    }
+
+    n3.InsertBefore(&n2);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n2, list.tail());
+    {
+        const int expected[] = {1, 3, 2};
+        ExpectListContents(list, 3, expected);
+    }
+
+    n4.InsertBefore(&n1);
+
+    EXPECT_EQ(&n4, list.head());
+    EXPECT_EQ(&n2, list.tail());
+    {
+        const int expected[] = {4, 1, 3, 2};
+        ExpectListContents(list, 4, expected);
+    }
+}
+
+TEST(LinkedList, InsertAfter) {
+    LinkedList<Node> list;
+
+    Node n1(1);
+    Node n2(2);
+    Node n3(3);
+    Node n4(4);
+
+    list.Append(&n1);
+    list.Append(&n2);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n2, list.tail());
+    {
+        const int expected[] = {1, 2};
+        ExpectListContents(list, 2, expected);
+    }
+
+    n3.InsertAfter(&n2);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n3, list.tail());
+    {
+        const int expected[] = {1, 2, 3};
+        ExpectListContents(list, 3, expected);
+    }
+
+    n4.InsertAfter(&n1);
+
+    EXPECT_EQ(&n1, list.head());
+    EXPECT_EQ(&n3, list.tail());
+    {
+        const int expected[] = {1, 4, 2, 3};
+        ExpectListContents(list, 4, expected);
+    }
+}
+
+TEST(LinkedList, MultipleInheritanceNode) {
+    MultipleInheritanceNode node;
+    EXPECT_EQ(&node, node.value());
+}
+
+TEST(LinkedList, EmptyListIsEmpty) {
+    LinkedList<Node> list;
+    EXPECT_TRUE(list.empty());
+}
+
+TEST(LinkedList, NonEmptyListIsNotEmpty) {
+    LinkedList<Node> list;
+
+    Node n(1);
+    list.Append(&n);
+
+    EXPECT_FALSE(list.empty());
+}
+
+TEST(LinkedList, EmptiedListIsEmptyAgain) {
+    LinkedList<Node> list;
+
+    Node n(1);
+    list.Append(&n);
+    n.RemoveFromList();
+
+    EXPECT_TRUE(list.empty());
+}
+
+TEST(LinkedList, NodesCanBeReused) {
+    LinkedList<Node> list1;
+    LinkedList<Node> list2;
+
+    Node n(1);
+    list1.Append(&n);
+    n.RemoveFromList();
+    list2.Append(&n);
+
+    EXPECT_EQ(list2.head()->value(), &n);
+}
+
+TEST(LinkedList, RemovedNodeHasNullNextPrevious) {
+    LinkedList<Node> list;
+
+    Node n(1);
+    list.Append(&n);
+    n.RemoveFromList();
+
+    EXPECT_EQ(nullptr, n.next());
+    EXPECT_EQ(nullptr, n.previous());
+}
+
+TEST(LinkedList, NodeMoveConstructor) {
+    LinkedList<MovableNode> list;
+
+    MovableNode n1(1);
+    MovableNode n2(2);
+    MovableNode n3(3);
+
+    list.Append(&n1);
+    list.Append(&n2);
+    list.Append(&n3);
+
+    EXPECT_EQ(&n1, n2.previous());
+    EXPECT_EQ(&n2, n1.next());
+    EXPECT_EQ(&n3, n2.next());
+    EXPECT_EQ(&n2, n3.previous());
+    EXPECT_EQ(2, n2.id());
+
+    MovableNode n2_new(std::move(n2));
+
+    EXPECT_EQ(nullptr, n2.next());
+    EXPECT_EQ(nullptr, n2.previous());
+
+    EXPECT_EQ(&n1, n2_new.previous());
+    EXPECT_EQ(&n2_new, n1.next());
+    EXPECT_EQ(&n3, n2_new.next());
+    EXPECT_EQ(&n2_new, n3.previous());
+    EXPECT_EQ(2, n2_new.id());
+}
+
+TEST(LinkedList, IsInList) {
+    LinkedList<Node> list;
+
+    Node n(1);
+
+    EXPECT_FALSE(n.IsInList());
+    list.Append(&n);
+    EXPECT_TRUE(n.IsInList());
+    EXPECT_TRUE(n.RemoveFromList());
+    EXPECT_FALSE(n.IsInList());
+    EXPECT_FALSE(n.RemoveFromList());
+}
+
+TEST(LinkedList, MoveInto) {
+    LinkedList<Node> l1;
+    LinkedList<Node> l2;
+
+    Node n1(1);
+    Node n2(2);
+    l1.Append(&n1);
+    l2.Append(&n2);
+
+    l2.MoveInto(&l1);
+    const int expected[] = {1, 2};
+    ExpectListContents(l1, 2, expected);
+    EXPECT_TRUE(l2.empty());
+}
+
+TEST(LinkedList, MoveEmptyListInto) {
+    LinkedList<Node> l1;
+    LinkedList<Node> l2;
+
+    Node n1(1);
+    Node n2(2);
+    l1.Append(&n1);
+    l1.Append(&n2);
+
+    l2.MoveInto(&l1);
+    const int expected[] = {1, 2};
+    ExpectListContents(l1, 2, expected);
+    EXPECT_TRUE(l2.empty());
+}
+
+TEST(LinkedList, MoveIntoEmpty) {
+    LinkedList<Node> l1;
+    LinkedList<Node> l2;
+
+    Node n1(1);
+    Node n2(2);
+    l2.Append(&n1);
+    l2.Append(&n2);
+
+    l2.MoveInto(&l1);
+    const int expected[] = {1, 2};
+    ExpectListContents(l1, 2, expected);
+    EXPECT_TRUE(l2.empty());
+}
+
+TEST(LinkedList, RangeBasedModify) {
+    LinkedList<Node> list;
+
+    Node n1(1);
+    Node n2(2);
+    list.Append(&n1);
+    list.Append(&n2);
+
+    for (LinkNode<Node>* node : list) {
+        node->value()->set_id(node->value()->id() + 1);
+    }
+    const int expected[] = {2, 3};
+    ExpectListContents(list, 2, expected);
+}
+
+TEST(LinkedList, RangeBasedEndIsEnd) {
+    LinkedList<Node> list;
+    EXPECT_EQ(list.end(), *end(list));
+}
diff --git a/src/dawn/tests/unittests/MathTests.cpp b/src/dawn/tests/unittests/MathTests.cpp
new file mode 100644
index 0000000..4635b64
--- /dev/null
+++ b/src/dawn/tests/unittests/MathTests.cpp
@@ -0,0 +1,321 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/EnumClassBitmasks.h"
+#include "dawn/common/Math.h"
+
+#include <cmath>
+#include "dawn/webgpu_cpp.h"
+
+namespace wgpu {
+    enum class TestEnum {
+        A = 0x1,
+        B = 0x2,
+        C = 0x4,
+    };
+}  // namespace wgpu
+
+namespace dawn {
+    template <>
+    struct IsDawnBitmask<wgpu::TestEnum> {
+        static constexpr bool enable = true;
+    };
+}  // namespace dawn
+
+// Tests for ScanForward
+TEST(Math, ScanForward) {
+    // Test extrema
+    ASSERT_EQ(ScanForward(1), 0u);
+    ASSERT_EQ(ScanForward(0x80000000), 31u);
+
+    // Test with more than one bit set.
+    ASSERT_EQ(ScanForward(256), 8u);
+    ASSERT_EQ(ScanForward(256 + 32), 5u);
+    ASSERT_EQ(ScanForward(1024 + 256 + 32), 5u);
+}
+
+// Tests for Log2
+TEST(Math, Log2) {
+    // Test extrema
+    ASSERT_EQ(Log2(1u), 0u);
+    ASSERT_EQ(Log2(0xFFFFFFFFu), 31u);
+    ASSERT_EQ(Log2(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)), 63u);
+
+    static_assert(ConstexprLog2(1u) == 0u);
+    static_assert(ConstexprLog2(0xFFFFFFFFu) == 31u);
+    static_assert(ConstexprLog2(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)) == 63u);
+
+    // Test boundary between two logs
+    ASSERT_EQ(Log2(0x80000000u), 31u);
+    ASSERT_EQ(Log2(0x7FFFFFFFu), 30u);
+    ASSERT_EQ(Log2(static_cast<uint64_t>(0x8000000000000000)), 63u);
+    ASSERT_EQ(Log2(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)), 62u);
+
+    static_assert(ConstexprLog2(0x80000000u) == 31u);
+    static_assert(ConstexprLog2(0x7FFFFFFFu) == 30u);
+    static_assert(ConstexprLog2(static_cast<uint64_t>(0x8000000000000000)) == 63u);
+    static_assert(ConstexprLog2(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)) == 62u);
+
+    ASSERT_EQ(Log2(16u), 4u);
+    ASSERT_EQ(Log2(15u), 3u);
+
+    static_assert(ConstexprLog2(16u) == 4u);
+    static_assert(ConstexprLog2(15u) == 3u);
+}
+
+// Tests for Log2Ceil
+TEST(Math, Log2Ceil) {
+    // Test extrema
+    ASSERT_EQ(Log2Ceil(1u), 0u);
+    ASSERT_EQ(Log2Ceil(0xFFFFFFFFu), 32u);
+    ASSERT_EQ(Log2Ceil(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)), 64u);
+
+    static_assert(ConstexprLog2Ceil(1u) == 0u);
+    static_assert(ConstexprLog2Ceil(0xFFFFFFFFu) == 32u);
+    static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)) == 64u);
+
+    // Test boundary between two logs
+    ASSERT_EQ(Log2Ceil(0x80000001u), 32u);
+    ASSERT_EQ(Log2Ceil(0x80000000u), 31u);
+    ASSERT_EQ(Log2Ceil(0x7FFFFFFFu), 31u);
+    ASSERT_EQ(Log2Ceil(static_cast<uint64_t>(0x8000000000000001)), 64u);
+    ASSERT_EQ(Log2Ceil(static_cast<uint64_t>(0x8000000000000000)), 63u);
+    ASSERT_EQ(Log2Ceil(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)), 63u);
+
+    static_assert(ConstexprLog2Ceil(0x80000001u) == 32u);
+    static_assert(ConstexprLog2Ceil(0x80000000u) == 31u);
+    static_assert(ConstexprLog2Ceil(0x7FFFFFFFu) == 31u);
+    static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x8000000000000001)) == 64u);
+    static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x8000000000000000)) == 63u);
+    static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)) == 63u);
+
+    ASSERT_EQ(Log2Ceil(17u), 5u);
+    ASSERT_EQ(Log2Ceil(16u), 4u);
+    ASSERT_EQ(Log2Ceil(15u), 4u);
+
+    static_assert(ConstexprLog2Ceil(17u) == 5u);
+    static_assert(ConstexprLog2Ceil(16u) == 4u);
+    static_assert(ConstexprLog2Ceil(15u) == 4u);
+}
+
+// Tests for IsPowerOfTwo
+TEST(Math, IsPowerOfTwo) {
+    ASSERT_TRUE(IsPowerOfTwo(1));
+    ASSERT_TRUE(IsPowerOfTwo(2));
+    ASSERT_FALSE(IsPowerOfTwo(3));
+
+    ASSERT_TRUE(IsPowerOfTwo(0x8000000));
+    ASSERT_FALSE(IsPowerOfTwo(0x8000400));
+}
+
+// Tests for NextPowerOfTwo
+TEST(Math, NextPowerOfTwo) {
+    // Test extrema
+    ASSERT_EQ(NextPowerOfTwo(0), 1ull);
+    ASSERT_EQ(NextPowerOfTwo(0x7FFFFFFFFFFFFFFF), 0x8000000000000000);
+
+    // Test boundary between powers-of-two.
+    ASSERT_EQ(NextPowerOfTwo(31), 32ull);
+    ASSERT_EQ(NextPowerOfTwo(33), 64ull);
+
+    ASSERT_EQ(NextPowerOfTwo(32), 32ull);
+}
+
+// Tests for AlignPtr
+TEST(Math, AlignPtr) {
+    constexpr size_t kTestAlignment = 8;
+
+    char buffer[kTestAlignment * 4];
+
+    for (size_t i = 0; i < 2 * kTestAlignment; ++i) {
+        char* unaligned = &buffer[i];
+        char* aligned = AlignPtr(unaligned, kTestAlignment);
+
+        ASSERT_GE(aligned - unaligned, 0);
+        ASSERT_LT(static_cast<size_t>(aligned - unaligned), kTestAlignment);
+        ASSERT_EQ(reinterpret_cast<uintptr_t>(aligned) & (kTestAlignment - 1), 0u);
+    }
+}
+
+// Tests for Align
+TEST(Math, Align) {
+    // 0 aligns to 0
+    ASSERT_EQ(Align(0u, 4), 0u);
+    ASSERT_EQ(Align(0u, 256), 0u);
+    ASSERT_EQ(Align(0u, 512), 0u);
+
+    // Multiples align to self
+    ASSERT_EQ(Align(8u, 8), 8u);
+    ASSERT_EQ(Align(16u, 8), 16u);
+    ASSERT_EQ(Align(24u, 8), 24u);
+    ASSERT_EQ(Align(256u, 256), 256u);
+    ASSERT_EQ(Align(512u, 256), 512u);
+    ASSERT_EQ(Align(768u, 256), 768u);
+
+    // Alignment with 1 is self
+    for (uint32_t i = 0; i < 128; ++i) {
+        ASSERT_EQ(Align(i, 1), i);
+    }
+
+    // Everything in the range (align, 2*align] aligns to 2*align
+    for (uint32_t i = 1; i <= 64; ++i) {
+        ASSERT_EQ(Align(64 + i, 64), 128u);
+    }
+
+    // Test extrema
+    ASSERT_EQ(Align(static_cast<uint64_t>(0xFFFFFFFF), 4), 0x100000000u);
+    ASSERT_EQ(Align(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF), 1), 0xFFFFFFFFFFFFFFFFull);
+}
+
+// Tests for IsPtrAligned
+TEST(Math, IsPtrAligned) {
+    constexpr size_t kTestAlignment = 8;
+
+    char buffer[kTestAlignment * 4];
+
+    for (size_t i = 0; i < 2 * kTestAlignment; ++i) {
+        char* unaligned = &buffer[i];
+        char* aligned = AlignPtr(unaligned, kTestAlignment);
+
+        ASSERT_EQ(IsPtrAligned(unaligned, kTestAlignment), unaligned == aligned);
+    }
+}
+
+// Tests for IsAligned
+TEST(Math, IsAligned) {
+    // 0 is aligned
+    ASSERT_TRUE(IsAligned(0, 4));
+    ASSERT_TRUE(IsAligned(0, 256));
+    ASSERT_TRUE(IsAligned(0, 512));
+
+    // Multiples are aligned
+    ASSERT_TRUE(IsAligned(8, 8));
+    ASSERT_TRUE(IsAligned(16, 8));
+    ASSERT_TRUE(IsAligned(24, 8));
+    ASSERT_TRUE(IsAligned(256, 256));
+    ASSERT_TRUE(IsAligned(512, 256));
+    ASSERT_TRUE(IsAligned(768, 256));
+
+    // Alignment with 1 is always aligned
+    for (uint32_t i = 0; i < 128; ++i) {
+        ASSERT_TRUE(IsAligned(i, 1));
+    }
+
+    // Everything in the range (align, 2*align) is not aligned
+    for (uint32_t i = 1; i < 64; ++i) {
+        ASSERT_FALSE(IsAligned(64 + i, 64));
+    }
+}
+
+// Tests for float32 to float16 conversion
+TEST(Math, Float32ToFloat16) {
+    ASSERT_EQ(Float32ToFloat16(0.0f), 0x0000);
+    ASSERT_EQ(Float32ToFloat16(-0.0f), 0x8000);
+
+    ASSERT_EQ(Float32ToFloat16(INFINITY), 0x7C00);
+    ASSERT_EQ(Float32ToFloat16(-INFINITY), 0xFC00);
+
+    // Check that NaN is converted to a value in one of the float16 NaN ranges
+    uint16_t nan16 = Float32ToFloat16(NAN);
+    ASSERT_TRUE(nan16 > 0xFC00 || (nan16 < 0x8000 && nan16 > 0x7C00));
+
+    ASSERT_EQ(Float32ToFloat16(1.0f), 0x3C00);
+}
+
+// Tests for IsFloat16NaN
+TEST(Math, IsFloat16NaN) {
+    ASSERT_FALSE(IsFloat16NaN(0u));
+    ASSERT_FALSE(IsFloat16NaN(0u));
+    ASSERT_FALSE(IsFloat16NaN(Float32ToFloat16(1.0f)));
+    ASSERT_FALSE(IsFloat16NaN(Float32ToFloat16(INFINITY)));
+    ASSERT_FALSE(IsFloat16NaN(Float32ToFloat16(-INFINITY)));
+
+    ASSERT_TRUE(IsFloat16NaN(Float32ToFloat16(INFINITY) + 1));
+    ASSERT_TRUE(IsFloat16NaN(Float32ToFloat16(-INFINITY) + 1));
+    ASSERT_TRUE(IsFloat16NaN(0x7FFF));
+    ASSERT_TRUE(IsFloat16NaN(0xFFFF));
+}
+
+// Tests for FloatToUnorm
+TEST(Math, FloatToUnorm) {
+    std::vector<float> kTestFloatValues = {0.0f, 0.4f, 0.5f, 1.0f};
+    std::vector<unsigned char> kExpectedCharValues = {0, 102, 127, 255};
+    std::vector<uint8_t> kExpectedUint8Values = {0, 102, 127, 255};
+    std::vector<uint16_t> kExpectedUint16Values = {0, 26214, 32767, 65535};
+    for (size_t i = 0; i < kTestFloatValues.size(); i++) {
+        ASSERT_EQ(FloatToUnorm<unsigned char>(kTestFloatValues[i]), kExpectedCharValues[i]);
+        ASSERT_EQ(FloatToUnorm<uint8_t>(kTestFloatValues[i]), kExpectedUint8Values[i]);
+        ASSERT_EQ(FloatToUnorm<uint16_t>(kTestFloatValues[i]), kExpectedUint16Values[i]);
+    }
+}
+
+// Tests for SRGBToLinear
+TEST(Math, SRGBToLinear) {
+    ASSERT_EQ(SRGBToLinear(0.0f), 0.0f);
+    ASSERT_EQ(SRGBToLinear(1.0f), 1.0f);
+
+    ASSERT_EQ(SRGBToLinear(-1.0f), 0.0f);
+    ASSERT_EQ(SRGBToLinear(2.0f), 1.0f);
+
+    ASSERT_FLOAT_EQ(SRGBToLinear(0.5f), 0.21404114f);
+}
+
+// Tests for RoundUp
+TEST(Math, RoundUp) {
+    ASSERT_EQ(RoundUp(2, 2), 2u);
+    ASSERT_EQ(RoundUp(2, 4), 4u);
+    ASSERT_EQ(RoundUp(6, 2), 6u);
+    ASSERT_EQ(RoundUp(8, 4), 8u);
+    ASSERT_EQ(RoundUp(12, 6), 12u);
+
+    ASSERT_EQ(RoundUp(3, 3), 3u);
+    ASSERT_EQ(RoundUp(3, 5), 5u);
+    ASSERT_EQ(RoundUp(5, 3), 6u);
+    ASSERT_EQ(RoundUp(9, 5), 10u);
+
+    // Test extrema
+    ASSERT_EQ(RoundUp(0x7FFFFFFFFFFFFFFFull, 0x8000000000000000ull), 0x8000000000000000ull);
+    ASSERT_EQ(RoundUp(1, 1), 1u);
+}
+
+// Tests for IsSubset
+TEST(Math, IsSubset) {
+    // single value is a subset
+    ASSERT_TRUE(IsSubset(0b100, 0b101));
+    ASSERT_FALSE(IsSubset(0b010, 0b101));
+    ASSERT_TRUE(IsSubset(0b001, 0b101));
+
+    // empty set is a subset
+    ASSERT_TRUE(IsSubset(0b000, 0b101));
+
+    // equal-to is a subset
+    ASSERT_TRUE(IsSubset(0b101, 0b101));
+
+    // superset is not a subset
+    ASSERT_FALSE(IsSubset(0b111, 0b101));
+
+    // only empty is a subset of empty
+    ASSERT_FALSE(IsSubset(0b100, 0b000));
+    ASSERT_FALSE(IsSubset(0b010, 0b000));
+    ASSERT_FALSE(IsSubset(0b001, 0b000));
+    ASSERT_TRUE(IsSubset(0b000, 0b000));
+
+    // Test with enums
+    ASSERT_TRUE(IsSubset(wgpu::TestEnum::A, wgpu::TestEnum::A));
+    ASSERT_TRUE(IsSubset(wgpu::TestEnum::A, wgpu::TestEnum::A | wgpu::TestEnum::B));
+    ASSERT_FALSE(IsSubset(wgpu::TestEnum::C, wgpu::TestEnum::A | wgpu::TestEnum::B));
+    ASSERT_FALSE(IsSubset(wgpu::TestEnum::A | wgpu::TestEnum::C, wgpu::TestEnum::A));
+}
diff --git a/src/dawn/tests/unittests/ObjectBaseTests.cpp b/src/dawn/tests/unittests/ObjectBaseTests.cpp
new file mode 100644
index 0000000..51244c7
--- /dev/null
+++ b/src/dawn/tests/unittests/ObjectBaseTests.cpp
@@ -0,0 +1,214 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/webgpu_cpp.h"
+
+class Object : public wgpu::ObjectBase<Object, int*> {
+  public:
+    using ObjectBase::ObjectBase;
+    using ObjectBase::operator=;
+
+    static void WGPUReference(int* handle) {
+        ASSERT_LE(0, *handle);
+        *handle += 1;
+    }
+    static void WGPURelease(int* handle) {
+        ASSERT_LT(0, *handle);
+        *handle -= 1;
+    }
+};
+
+// Test that creating an C++ object from a C object takes a ref.
+// Also test that the C++ object destructor removes a ref.
+TEST(ObjectBase, CTypeConstructor) {
+    int refcount = 1;
+    {
+        Object obj(&refcount);
+        ASSERT_EQ(2, refcount);
+    }
+    ASSERT_EQ(1, refcount);
+}
+
+// Test consuming a C object into a C++ object doesn't take a ref.
+TEST(ObjectBase, AcquireConstruction) {
+    int refcount = 1;
+    {
+        Object object = Object::Acquire(&refcount);
+        ASSERT_EQ(1, refcount);
+    }
+    ASSERT_EQ(0, refcount);
+}
+
+// Test .Get().
+TEST(ObjectBase, Get) {
+    int refcount = 1;
+    {
+        Object obj1(&refcount);
+
+        ASSERT_EQ(2, refcount);
+        ASSERT_EQ(&refcount, obj1.Get());
+    }
+    ASSERT_EQ(1, refcount);
+}
+
+// Test that Release consumes the C++ object into a C object and doesn't release
+TEST(ObjectBase, Release) {
+    int refcount = 1;
+    {
+        Object obj(&refcount);
+        ASSERT_EQ(2, refcount);
+
+        ASSERT_EQ(&refcount, obj.Release());
+        ASSERT_EQ(nullptr, obj.Get());
+        ASSERT_EQ(2, refcount);
+    }
+    ASSERT_EQ(2, refcount);
+}
+
+// Test using C++ objects in conditions
+TEST(ObjectBase, OperatorBool) {
+    int refcount = 1;
+    Object trueObj(&refcount);
+    Object falseObj;
+
+    if (falseObj || !trueObj) {
+        ASSERT_TRUE(false);
+    }
+}
+
+// Test the copy constructor of C++ objects
+TEST(ObjectBase, CopyConstructor) {
+    int refcount = 1;
+
+    Object source(&refcount);
+    Object destination(source);
+
+    ASSERT_EQ(source.Get(), &refcount);
+    ASSERT_EQ(destination.Get(), &refcount);
+    ASSERT_EQ(3, refcount);
+
+    destination = Object();
+    ASSERT_EQ(refcount, 2);
+}
+
+// Test the copy assignment of C++ objects
+TEST(ObjectBase, CopyAssignment) {
+    int refcount = 1;
+    Object source(&refcount);
+
+    Object destination;
+    destination = source;
+
+    ASSERT_EQ(source.Get(), &refcount);
+    ASSERT_EQ(destination.Get(), &refcount);
+    ASSERT_EQ(3, refcount);
+
+    destination = Object();
+    ASSERT_EQ(refcount, 2);
+}
+
+// Test the repeated copy assignment of C++ objects
+TEST(ObjectBase, RepeatedCopyAssignment) {
+    int refcount = 1;
+    Object source(&refcount);
+
+    Object destination;
+    for (int i = 0; i < 10; i++) {
+        destination = source;
+    }
+
+    ASSERT_EQ(source.Get(), &refcount);
+    ASSERT_EQ(destination.Get(), &refcount);
+    ASSERT_EQ(3, refcount);
+
+    destination = Object();
+    ASSERT_EQ(refcount, 2);
+}
+
+// Test the copy assignment of C++ objects onto themselves
+TEST(ObjectBase, CopyAssignmentSelf) {
+    int refcount = 1;
+
+    Object obj(&refcount);
+
+    // Fool the compiler to avoid a -Wself-assign-overload
+    Object* objPtr = &obj;
+    obj = *objPtr;
+
+    ASSERT_EQ(obj.Get(), &refcount);
+    ASSERT_EQ(refcount, 2);
+}
+
+// Test the move constructor of C++ objects
+TEST(ObjectBase, MoveConstructor) {
+    int refcount = 1;
+    Object source(&refcount);
+    Object destination(std::move(source));
+
+    ASSERT_EQ(source.Get(), nullptr);
+    ASSERT_EQ(destination.Get(), &refcount);
+    ASSERT_EQ(2, refcount);
+
+    destination = Object();
+    ASSERT_EQ(refcount, 1);
+}
+
+// Test the move assignment of C++ objects
+TEST(ObjectBase, MoveAssignment) {
+    int refcount = 1;
+    Object source(&refcount);
+
+    Object destination;
+    destination = std::move(source);
+
+    ASSERT_EQ(source.Get(), nullptr);
+    ASSERT_EQ(destination.Get(), &refcount);
+    ASSERT_EQ(2, refcount);
+
+    destination = Object();
+    ASSERT_EQ(refcount, 1);
+}
+
+// Test the move assignment of C++ objects onto themselves
+TEST(ObjectBase, MoveAssignmentSelf) {
+    int refcount = 1;
+
+    Object obj(&refcount);
+
+    // Fool the compiler to avoid a -Wself-move
+    Object* objPtr = &obj;
+    obj = std::move(*objPtr);
+
+    ASSERT_EQ(obj.Get(), &refcount);
+    ASSERT_EQ(refcount, 2);
+}
+
+// Test the constructor using nullptr
+TEST(ObjectBase, NullptrConstructor) {
+    Object obj(nullptr);
+    ASSERT_EQ(obj.Get(), nullptr);
+}
+
+// Test assigning nullptr to the object
+TEST(ObjectBase, AssignNullptr) {
+    int refcount = 1;
+
+    Object obj(&refcount);
+    ASSERT_EQ(refcount, 2);
+
+    obj = nullptr;
+    ASSERT_EQ(refcount, 1);
+}
diff --git a/src/dawn/tests/unittests/PerStageTests.cpp b/src/dawn/tests/unittests/PerStageTests.cpp
new file mode 100644
index 0000000..1ae2e17
--- /dev/null
+++ b/src/dawn/tests/unittests/PerStageTests.cpp
@@ -0,0 +1,89 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/PerStage.h"
+
+using namespace dawn::native;
+
+// Tests for StageBit
+TEST(PerStage, StageBit) {
+    ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex);
+    ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment);
+    ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute);
+}
+
+// Basic test for the PerStage container
+TEST(PerStage, PerStage) {
+    PerStage<int> data;
+
+    // Store data using wgpu::ShaderStage
+    data[SingleShaderStage::Vertex] = 42;
+    data[SingleShaderStage::Fragment] = 3;
+    data[SingleShaderStage::Compute] = -1;
+
+    // Load it using wgpu::ShaderStage
+    ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42);
+    ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3);
+    ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1);
+}
+
+// Test IterateStages with kAllStages
+TEST(PerStage, IterateAllStages) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
+
+    for (auto stage : IterateStages(kAllStages)) {
+        counts[stage]++;
+    }
+
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1);
+}
+
+// Test IterateStages with one stage
+TEST(PerStage, IterateOneStage) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
+
+    for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) {
+        counts[stage]++;
+    }
+
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
+}
+
+// Test IterateStages with no stage
+TEST(PerStage, IterateNoStages) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
+
+    for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) {
+        counts[stage]++;
+    }
+
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
+}
diff --git a/src/dawn/tests/unittests/PerThreadProcTests.cpp b/src/dawn/tests/unittests/PerThreadProcTests.cpp
new file mode 100644
index 0000000..f059e3e
--- /dev/null
+++ b/src/dawn/tests/unittests/PerThreadProcTests.cpp
@@ -0,0 +1,118 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/dawn_thread_dispatch_proc.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/null/DeviceNull.h"
+#include "dawn/webgpu_cpp.h"
+
+#include <gtest/gtest.h>
+#include <atomic>
+#include <thread>
+
+class PerThreadProcTests : public testing::Test {
+  public:
+    PerThreadProcTests()
+        : mNativeInstance(dawn::native::InstanceBase::Create()),
+          mNativeAdapter(mNativeInstance.Get()) {
+    }
+    ~PerThreadProcTests() override = default;
+
+  protected:
+    Ref<dawn::native::InstanceBase> mNativeInstance;
+    dawn::native::null::Adapter mNativeAdapter;
+};
+
+// Test that procs can be set per thread. This test overrides deviceCreateBuffer with a dummy proc
+// for each thread that increments a counter. Because each thread has their own proc and counter,
+// there should be no data races. The per-thread procs also check that the current thread id is
+// exactly equal to the expected thread id.
+TEST_F(PerThreadProcTests, DispatchesPerThread) {
+    dawnProcSetProcs(&dawnThreadDispatchProcTable);
+
+    // Threads will block on this atomic to be sure we set procs on both threads before
+    // either thread calls the procs.
+    std::atomic<bool> ready(false);
+
+    static int threadACounter = 0;
+    static int threadBCounter = 0;
+
+    static std::atomic<std::thread::id> threadIdA;
+    static std::atomic<std::thread::id> threadIdB;
+
+    constexpr int kThreadATargetCount = 28347;
+    constexpr int kThreadBTargetCount = 40420;
+
+    // Note: Acquire doesn't call reference or release.
+    wgpu::Device deviceA =
+        wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(mNativeAdapter.APICreateDevice()));
+
+    wgpu::Device deviceB =
+        wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(mNativeAdapter.APICreateDevice()));
+
+    std::thread threadA([&]() {
+        DawnProcTable procs = dawn::native::GetProcs();
+        procs.deviceCreateBuffer = [](WGPUDevice device,
+                                      WGPUBufferDescriptor const* descriptor) -> WGPUBuffer {
+            EXPECT_EQ(std::this_thread::get_id(), threadIdA);
+            threadACounter++;
+            return nullptr;
+        };
+        dawnProcSetPerThreadProcs(&procs);
+
+        while (!ready) {
+        }  // Should be fast, so just spin.
+
+        for (int i = 0; i < kThreadATargetCount; ++i) {
+            deviceA.CreateBuffer(nullptr);
+        }
+
+        deviceA = nullptr;
+        dawnProcSetPerThreadProcs(nullptr);
+    });
+
+    std::thread threadB([&]() {
+        DawnProcTable procs = dawn::native::GetProcs();
+        procs.deviceCreateBuffer = [](WGPUDevice device,
+                                      WGPUBufferDescriptor const* bufferDesc) -> WGPUBuffer {
+            EXPECT_EQ(std::this_thread::get_id(), threadIdB);
+            threadBCounter++;
+            return nullptr;
+        };
+        dawnProcSetPerThreadProcs(&procs);
+
+        while (!ready) {
+        }  // Should be fast, so just spin.
+
+        for (int i = 0; i < kThreadBTargetCount; ++i) {
+            deviceB.CreateBuffer(nullptr);
+        }
+
+        deviceB = nullptr;
+        dawnProcSetPerThreadProcs(nullptr);
+    });
+
+    threadIdA = threadA.get_id();
+    threadIdB = threadB.get_id();
+
+    ready = true;
+    threadA.join();
+    threadB.join();
+
+    EXPECT_EQ(threadACounter, kThreadATargetCount);
+    EXPECT_EQ(threadBCounter, kThreadBTargetCount);
+
+    dawnProcSetProcs(nullptr);
+}
diff --git a/src/dawn/tests/unittests/PlacementAllocatedTests.cpp b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
new file mode 100644
index 0000000..8ea1481
--- /dev/null
+++ b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
@@ -0,0 +1,115 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "dawn/common/PlacementAllocated.h"
+
+using namespace testing;
+
+namespace {
+
+    enum class DestructedClass {
+        Foo,
+        Bar,
+    };
+
+    class MockDestructor {
+      public:
+        MOCK_METHOD(void, Call, (void*, DestructedClass));
+    };
+
+    std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
+
+    class PlacementAllocatedTests : public Test {
+        void SetUp() override {
+            mockDestructor = std::make_unique<StrictMock<MockDestructor>>();
+        }
+
+        void TearDown() override {
+            mockDestructor = nullptr;
+        }
+    };
+
+    struct Foo : PlacementAllocated {
+        virtual ~Foo() {
+            mockDestructor->Call(this, DestructedClass::Foo);
+        }
+    };
+
+    struct Bar : Foo {
+        ~Bar() override {
+            mockDestructor->Call(this, DestructedClass::Bar);
+        }
+    };
+}  // namespace
+
+// Test that deletion calls the destructor and does not free memory.
+TEST_F(PlacementAllocatedTests, DeletionDoesNotFreeMemory) {
+    void* ptr = malloc(sizeof(Foo));
+
+    Foo* foo = new (ptr) Foo();
+
+    EXPECT_CALL(*mockDestructor, Call(foo, DestructedClass::Foo));
+    delete foo;
+
+    // Touch the memory, this shouldn't crash.
+    static_assert(sizeof(Foo) >= sizeof(uint32_t));
+    *reinterpret_cast<uint32_t*>(foo) = 42;
+
+    free(ptr);
+}
+
+// Test that destructing an instance of a derived class calls the derived, then base destructor, and
+// does not free memory.
+TEST_F(PlacementAllocatedTests, DeletingDerivedClassCallsBaseDestructor) {
+    void* ptr = malloc(sizeof(Bar));
+
+    Bar* bar = new (ptr) Bar();
+
+    {
+        InSequence s;
+        EXPECT_CALL(*mockDestructor, Call(bar, DestructedClass::Bar));
+        EXPECT_CALL(*mockDestructor, Call(bar, DestructedClass::Foo));
+        delete bar;
+    }
+
+    // Touch the memory, this shouldn't crash.
+    static_assert(sizeof(Bar) >= sizeof(uint32_t));
+    *reinterpret_cast<uint32_t*>(bar) = 42;
+
+    free(ptr);
+}
+
+// Test that destructing an instance of a base class calls the derived, then base destructor, and
+// does not free memory.
+TEST_F(PlacementAllocatedTests, DeletingBaseClassCallsDerivedDestructor) {
+    void* ptr = malloc(sizeof(Bar));
+
+    Foo* foo = new (ptr) Bar();
+
+    {
+        InSequence s;
+        EXPECT_CALL(*mockDestructor, Call(foo, DestructedClass::Bar));
+        EXPECT_CALL(*mockDestructor, Call(foo, DestructedClass::Foo));
+        delete foo;
+    }
+
+    // Touch the memory, this shouldn't crash.
+    static_assert(sizeof(Bar) >= sizeof(uint32_t));
+    *reinterpret_cast<uint32_t*>(foo) = 42;
+
+    free(ptr);
+}
diff --git a/src/dawn/tests/unittests/RefBaseTests.cpp b/src/dawn/tests/unittests/RefBaseTests.cpp
new file mode 100644
index 0000000..a328711
--- /dev/null
+++ b/src/dawn/tests/unittests/RefBaseTests.cpp
@@ -0,0 +1,319 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock.h>
+
+#include "dawn/common/RefBase.h"
+
+namespace {
+    using Id = uint32_t;
+
+    enum class Action {
+        kReference,
+        kRelease,
+        kAssign,
+        kMarker,
+    };
+
+    struct Event {
+        Action action;
+        Id thisId = 0;
+        Id otherId = 0;
+    };
+
+    std::ostream& operator<<(std::ostream& os, const Event& event) {
+        switch (event.action) {
+            case Action::kReference:
+                os << "Reference " << event.thisId;
+                break;
+            case Action::kRelease:
+                os << "Release " << event.thisId;
+                break;
+            case Action::kAssign:
+                os << "Assign " << event.thisId << " <- " << event.otherId;
+                break;
+            case Action::kMarker:
+                os << "Marker " << event.thisId;
+                break;
+        }
+        return os;
+    }
+
+    bool operator==(const Event& a, const Event& b) {
+        return a.action == b.action && a.thisId == b.thisId && a.otherId == b.otherId;
+    }
+
+    using Events = std::vector<Event>;
+
+    struct RefTracker {
+        explicit constexpr RefTracker(nullptr_t) : mId(0), mEvents(nullptr) {
+        }
+
+        constexpr RefTracker(const RefTracker& other) = default;
+
+        RefTracker(Id id, Events* events) : mId(id), mEvents(events) {
+        }
+
+        void Reference() const {
+            mEvents->emplace_back(Event{Action::kReference, mId});
+        }
+
+        void Release() const {
+            mEvents->emplace_back(Event{Action::kRelease, mId});
+        }
+
+        RefTracker& operator=(const RefTracker& other) {
+            if (mEvents || other.mEvents) {
+                Events* events = mEvents ? mEvents : other.mEvents;
+                events->emplace_back(Event{Action::kAssign, mId, other.mId});
+            }
+            mId = other.mId;
+            mEvents = other.mEvents;
+            return *this;
+        }
+
+        bool operator==(const RefTracker& other) const {
+            return mId == other.mId;
+        }
+
+        bool operator!=(const RefTracker& other) const {
+            return mId != other.mId;
+        }
+
+        Id mId;
+        Events* mEvents;
+    };
+
+    struct RefTrackerTraits {
+        static constexpr RefTracker kNullValue{nullptr};
+
+        static void Reference(const RefTracker& handle) {
+            handle.Reference();
+        }
+
+        static void Release(const RefTracker& handle) {
+            handle.Release();
+        }
+    };
+
+    constexpr RefTracker RefTrackerTraits::kNullValue;
+
+    using Ref = RefBase<RefTracker, RefTrackerTraits>;
+}  // namespace
+
+TEST(RefBase, Acquire) {
+    Events events;
+    RefTracker tracker1(1, &events);
+    RefTracker tracker2(2, &events);
+    Ref ref(tracker1);
+
+    events.clear();
+    { ref.Acquire(tracker2); }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kRelease, 1},   // release ref
+                                             Event{Action::kAssign, 1, 2}  // acquire tracker2
+                                             ));
+}
+
+TEST(RefBase, Detach) {
+    Events events;
+    RefTracker tracker(1, &events);
+    Ref ref(tracker);
+
+    events.clear();
+    { DAWN_UNUSED(ref.Detach()); }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kAssign, 1, 0}  // nullify ref
+                                             ));
+}
+
+TEST(RefBase, Constructor) {
+    Ref ref;
+    EXPECT_EQ(ref.Get(), RefTrackerTraits::kNullValue);
+}
+
+TEST(RefBase, ConstructDestruct) {
+    Events events;
+    RefTracker tracker(1, &events);
+
+    events.clear();
+    {
+        Ref ref(tracker);
+        events.emplace_back(Event{Action::kMarker, 10});
+    }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kReference, 1},  // reference tracker
+                                             Event{Action::kMarker, 10},    //
+                                             Event{Action::kRelease, 1}     // destruct ref
+                                             ));
+}
+
+TEST(RefBase, CopyConstruct) {
+    Events events;
+    RefTracker tracker(1, &events);
+    Ref refA(tracker);
+
+    events.clear();
+    {
+        Ref refB(refA);
+        events.emplace_back(Event{Action::kMarker, 10});
+    }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kReference, 1},  // reference tracker
+                                             Event{Action::kMarker, 10},    //
+                                             Event{Action::kRelease, 1}     // destruct ref
+                                             ));
+}
+
+TEST(RefBase, RefCopyAssignment) {
+    Events events;
+    RefTracker tracker1(1, &events);
+    RefTracker tracker2(2, &events);
+    Ref refA(tracker1);
+    Ref refB(tracker2);
+
+    events.clear();
+    {
+        Ref ref;
+        events.emplace_back(Event{Action::kMarker, 10});
+        ref = refA;
+        events.emplace_back(Event{Action::kMarker, 20});
+        ref = refB;
+        events.emplace_back(Event{Action::kMarker, 30});
+        ref = refA;
+        events.emplace_back(Event{Action::kMarker, 40});
+    }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kMarker, 10},    //
+                                             Event{Action::kReference, 1},  // reference tracker1
+                                             Event{Action::kAssign, 0, 1},  // copy tracker1
+                                             Event{Action::kMarker, 20},    //
+                                             Event{Action::kReference, 2},  // reference tracker2
+                                             Event{Action::kRelease, 1},    // release tracker1
+                                             Event{Action::kAssign, 1, 2},  // copy tracker2
+                                             Event{Action::kMarker, 30},    //
+                                             Event{Action::kReference, 1},  // reference tracker1
+                                             Event{Action::kRelease, 2},    // release tracker2
+                                             Event{Action::kAssign, 2, 1},  // copy tracker1
+                                             Event{Action::kMarker, 40},    //
+                                             Event{Action::kRelease, 1}     // destruct ref
+                                             ));
+}
+
+TEST(RefBase, RefMoveAssignment) {
+    Events events;
+    RefTracker tracker1(1, &events);
+    RefTracker tracker2(2, &events);
+    Ref refA(tracker1);
+    Ref refB(tracker2);
+
+    events.clear();
+    {
+        Ref ref;
+        events.emplace_back(Event{Action::kMarker, 10});
+        ref = std::move(refA);
+        events.emplace_back(Event{Action::kMarker, 20});
+        ref = std::move(refB);
+        events.emplace_back(Event{Action::kMarker, 30});
+    }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kMarker, 10},    //
+                                             Event{Action::kAssign, 1, 0},  // nullify refA
+                                             Event{Action::kAssign, 0, 1},  // move into ref
+                                             Event{Action::kMarker, 20},    //
+                                             Event{Action::kRelease, 1},    // release tracker1
+                                             Event{Action::kAssign, 2, 0},  // nullify refB
+                                             Event{Action::kAssign, 1, 2},  // move into ref
+                                             Event{Action::kMarker, 30},    //
+                                             Event{Action::kRelease, 2}     // destruct ref
+                                             ));
+}
+
+TEST(RefBase, RefCopyAssignmentSelf) {
+    Events events;
+    RefTracker tracker(1, &events);
+    Ref ref(tracker);
+    Ref& self = ref;
+
+    events.clear();
+    {
+        ref = self;
+        ref = self;
+        ref = self;
+    }
+    EXPECT_THAT(events, testing::ElementsAre());
+}
+
+TEST(RefBase, RefMoveAssignmentSelf) {
+    Events events;
+    RefTracker tracker(1, &events);
+    Ref ref(tracker);
+    Ref& self = ref;
+
+    events.clear();
+    {
+        ref = std::move(self);
+        ref = std::move(self);
+        ref = std::move(self);
+    }
+    EXPECT_THAT(events, testing::ElementsAre());
+}
+
+TEST(RefBase, TCopyAssignment) {
+    Events events;
+    RefTracker tracker(1, &events);
+    Ref ref;
+
+    events.clear();
+    {
+        ref = tracker;
+        ref = tracker;
+        ref = tracker;
+    }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kReference, 1},  //
+                                             Event{Action::kAssign, 0, 1}));
+}
+
+TEST(RefBase, TMoveAssignment) {
+    Events events;
+    RefTracker tracker(1, &events);
+    Ref ref;
+
+    events.clear();
+    { ref = std::move(tracker); }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kReference, 1},  //
+                                             Event{Action::kAssign, 0, 1}));
+}
+
+TEST(RefBase, TCopyAssignmentAlternate) {
+    Events events;
+    RefTracker tracker1(1, &events);
+    RefTracker tracker2(2, &events);
+    Ref ref;
+
+    events.clear();
+    {
+        ref = tracker1;
+        events.emplace_back(Event{Action::kMarker, 10});
+        ref = tracker2;
+        events.emplace_back(Event{Action::kMarker, 20});
+        ref = tracker1;
+        events.emplace_back(Event{Action::kMarker, 30});
+    }
+    EXPECT_THAT(events, testing::ElementsAre(Event{Action::kReference, 1},  // reference tracker1
+                                             Event{Action::kAssign, 0, 1},  // copy tracker1
+                                             Event{Action::kMarker, 10},    //
+                                             Event{Action::kReference, 2},  // reference tracker2
+                                             Event{Action::kRelease, 1},    // release tracker1
+                                             Event{Action::kAssign, 1, 2},  // copy tracker2
+                                             Event{Action::kMarker, 20},    //
+                                             Event{Action::kReference, 1},  // reference tracker1
+                                             Event{Action::kRelease, 2},    // release tracker2
+                                             Event{Action::kAssign, 2, 1},  // copy tracker1
+                                             Event{Action::kMarker, 30}));
+}
diff --git a/src/dawn/tests/unittests/RefCountedTests.cpp b/src/dawn/tests/unittests/RefCountedTests.cpp
new file mode 100644
index 0000000..5cfa951
--- /dev/null
+++ b/src/dawn/tests/unittests/RefCountedTests.cpp
@@ -0,0 +1,407 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+#include <thread>
+
+#include "dawn/common/RefCounted.h"
+
+class RCTest : public RefCounted {
+  public:
+    RCTest() : RefCounted() {
+    }
+
+    RCTest(uint64_t payload) : RefCounted(payload) {
+    }
+
+    RCTest(bool* deleted) : mDeleted(deleted) {
+    }
+
+    ~RCTest() override {
+        if (mDeleted != nullptr) {
+            *mDeleted = true;
+        }
+    }
+
+    RCTest* GetThis() {
+        return this;
+    }
+
+  private:
+    bool* mDeleted = nullptr;
+};
+
+struct RCTestDerived : public RCTest {
+    using RCTest::RCTest;
+};
+
+// Test that RCs start with one ref, and removing it destroys the object.
+TEST(RefCounted, StartsWithOneRef) {
+    bool deleted = false;
+    auto test = new RCTest(&deleted);
+
+    test->Release();
+    EXPECT_TRUE(deleted);
+}
+
+// Test adding refs keep the RC alive.
+TEST(RefCounted, AddingRefKeepsAlive) {
+    bool deleted = false;
+    auto test = new RCTest(&deleted);
+
+    test->Reference();
+    test->Release();
+    EXPECT_FALSE(deleted);
+
+    test->Release();
+    EXPECT_TRUE(deleted);
+}
+
+// Test that Reference and Release atomically change the refcount.
+TEST(RefCounted, RaceOnReferenceRelease) {
+    bool deleted = false;
+    auto* test = new RCTest(&deleted);
+
+    auto referenceManyTimes = [test]() {
+        for (uint32_t i = 0; i < 100000; ++i) {
+            test->Reference();
+        }
+    };
+    std::thread t1(referenceManyTimes);
+    std::thread t2(referenceManyTimes);
+
+    t1.join();
+    t2.join();
+    EXPECT_EQ(test->GetRefCountForTesting(), 200001u);
+
+    auto releaseManyTimes = [test]() {
+        for (uint32_t i = 0; i < 100000; ++i) {
+            test->Release();
+        }
+    };
+
+    std::thread t3(releaseManyTimes);
+    std::thread t4(releaseManyTimes);
+    t3.join();
+    t4.join();
+    EXPECT_EQ(test->GetRefCountForTesting(), 1u);
+
+    test->Release();
+    EXPECT_TRUE(deleted);
+}
+
+// Test Ref remove reference when going out of scope
+TEST(Ref, EndOfScopeRemovesRef) {
+    bool deleted = false;
+    {
+        Ref<RCTest> test(new RCTest(&deleted));
+        test->Release();
+    }
+    EXPECT_TRUE(deleted);
+}
+
+// Test getting pointer out of the Ref
+TEST(Ref, Gets) {
+    RCTest* original = new RCTest;
+    Ref<RCTest> test(original);
+    test->Release();
+
+    EXPECT_EQ(test.Get(), original);
+    EXPECT_EQ(test->GetThis(), original);
+}
+
+// Test Refs default to null
+TEST(Ref, DefaultsToNull) {
+    Ref<RCTest> test;
+
+    EXPECT_EQ(test.Get(), nullptr);
+    // Can't check GetThis() returns nullptr, as it would be undefined behavior.
+}
+
+// Test Ref's copy constructor
+TEST(Ref, CopyConstructor) {
+    bool deleted = false;
+    RCTest* original = new RCTest(&deleted);
+
+    Ref<RCTest> source(original);
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    Ref<RCTest> destination(source);
+    EXPECT_EQ(original->GetRefCountForTesting(), 3u);
+
+    original->Release();
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    EXPECT_EQ(source.Get(), original);
+    EXPECT_EQ(destination.Get(), original);
+
+    source = nullptr;
+    EXPECT_FALSE(deleted);
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    destination = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test Ref's copy assignment
+TEST(Ref, CopyAssignment) {
+    bool deleted = false;
+    RCTest* original = new RCTest(&deleted);
+
+    Ref<RCTest> source(original);
+    original->Release();
+
+    Ref<RCTest> destination;
+    destination = source;
+
+    EXPECT_EQ(source.Get(), original);
+    EXPECT_EQ(destination.Get(), original);
+
+    source = nullptr;
+    // This fails when address sanitizer is turned on
+    EXPECT_FALSE(deleted);
+
+    destination = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test Ref's move constructor
+TEST(Ref, MoveConstructor) {
+    bool deleted = false;
+    RCTest* original = new RCTest(&deleted);
+
+    Ref<RCTest> source(original);
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    Ref<RCTest> destination(std::move(source));
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    original->Release();
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    EXPECT_EQ(source.Get(), nullptr);
+    EXPECT_EQ(destination.Get(), original);
+    EXPECT_FALSE(deleted);
+
+    destination = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test Ref's move assignment
+TEST(Ref, MoveAssignment) {
+    bool deleted = false;
+    RCTest* original = new RCTest(&deleted);
+
+    Ref<RCTest> source(original);
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    original->Release();
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    Ref<RCTest> destination;
+    destination = std::move(source);
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    EXPECT_EQ(source.Get(), nullptr);
+    EXPECT_EQ(destination.Get(), original);
+    EXPECT_FALSE(deleted);
+
+    destination = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test move assigment where the destination and source
+// point to the same underlying object.
+TEST(Ref, MoveAssignmentSameObject) {
+    bool deleted = false;
+    RCTest* original = new RCTest(&deleted);
+
+    Ref<RCTest> source(original);
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    original->Release();
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    Ref<RCTest>& referenceToSource = source;
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    referenceToSource = std::move(source);
+
+    EXPECT_EQ(source.Get(), original);
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+    EXPECT_FALSE(deleted);
+
+    source = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test the payload initial value is set correctly
+TEST(Ref, InitialPayloadValue) {
+    RCTest* testDefaultConstructor = new RCTest();
+    EXPECT_EQ(testDefaultConstructor->GetRefCountPayload(), 0u);
+    testDefaultConstructor->Release();
+
+    RCTest* testZero = new RCTest(uint64_t(0ull));
+    EXPECT_EQ(testZero->GetRefCountPayload(), 0u);
+    testZero->Release();
+
+    RCTest* testOne = new RCTest(1ull);
+    EXPECT_EQ(testOne->GetRefCountPayload(), 1u);
+    testOne->Release();
+}
+
+// Test that the payload survives ref and release operations
+TEST(Ref, PayloadUnchangedByRefCounting) {
+    RCTest* test = new RCTest(1ull);
+    EXPECT_EQ(test->GetRefCountPayload(), 1u);
+
+    test->Reference();
+    EXPECT_EQ(test->GetRefCountPayload(), 1u);
+    test->Release();
+    EXPECT_EQ(test->GetRefCountPayload(), 1u);
+
+    test->Release();
+}
+
+// Test that Detach pulls out the pointer and stops tracking it.
+TEST(Ref, Detach) {
+    bool deleted = false;
+    RCTest* original = new RCTest(&deleted);
+
+    Ref<RCTest> test(original);
+    original->Release();
+
+    RCTest* detached = test.Detach();
+    EXPECT_EQ(detached, original);
+    EXPECT_EQ(detached->GetRefCountForTesting(), 1u);
+    EXPECT_EQ(test.Get(), nullptr);
+
+    detached->Release();
+    EXPECT_TRUE(deleted);
+}
+
+// Test constructor passed a derived pointer
+TEST(Ref, DerivedPointerConstructor) {
+    bool deleted = false;
+    {
+        Ref<RCTest> test(new RCTestDerived(&deleted));
+        test->Release();
+    }
+    EXPECT_TRUE(deleted);
+}
+
+// Test copy constructor of derived class
+TEST(Ref, DerivedCopyConstructor) {
+    bool deleted = false;
+    Ref<RCTestDerived> testDerived(new RCTestDerived(&deleted));
+    testDerived->Release();
+
+    {
+        Ref<RCTest> testBase(testDerived);
+        EXPECT_EQ(testBase->GetRefCountForTesting(), 2u);
+        EXPECT_EQ(testDerived->GetRefCountForTesting(), 2u);
+    }
+
+    EXPECT_EQ(testDerived->GetRefCountForTesting(), 1u);
+}
+
+// Test Ref constructed with nullptr
+TEST(Ref, ConstructedWithNullptr) {
+    Ref<RCTest> test(nullptr);
+    EXPECT_EQ(test.Get(), nullptr);
+}
+
+// Test Ref's copy assignment with derived class
+TEST(Ref, CopyAssignmentDerived) {
+    bool deleted = false;
+
+    RCTestDerived* original = new RCTestDerived(&deleted);
+    Ref<RCTestDerived> source(original);
+    original->Release();
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    Ref<RCTest> destination;
+    destination = source;
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    EXPECT_EQ(source.Get(), original);
+    EXPECT_EQ(destination.Get(), original);
+
+    source = nullptr;
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+    EXPECT_FALSE(deleted);
+
+    destination = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test Ref's move constructor with derived class
+TEST(Ref, MoveConstructorDerived) {
+    bool deleted = false;
+    RCTestDerived* original = new RCTestDerived(&deleted);
+
+    Ref<RCTestDerived> source(original);
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    Ref<RCTest> destination(std::move(source));
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    original->Release();
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    EXPECT_EQ(source.Get(), nullptr);
+    EXPECT_EQ(destination.Get(), original);
+    EXPECT_FALSE(deleted);
+
+    destination = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test Ref's move assignment with derived class
+TEST(Ref, MoveAssignmentDerived) {
+    bool deleted = false;
+    RCTestDerived* original = new RCTestDerived(&deleted);
+
+    Ref<RCTestDerived> source(original);
+    EXPECT_EQ(original->GetRefCountForTesting(), 2u);
+
+    original->Release();
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    Ref<RCTest> destination;
+    destination = std::move(source);
+
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    EXPECT_EQ(source.Get(), nullptr);
+    EXPECT_EQ(destination.Get(), original);
+    EXPECT_FALSE(deleted);
+
+    destination = nullptr;
+    EXPECT_TRUE(deleted);
+}
+
+// Test Ref's InitializeInto.
+TEST(Ref, InitializeInto) {
+    bool deleted = false;
+    RCTest* original = new RCTest(&deleted);
+
+    // InitializeInto acquires the ref.
+    Ref<RCTest> ref;
+    *ref.InitializeInto() = original;
+    EXPECT_EQ(original->GetRefCountForTesting(), 1u);
+
+    ref = nullptr;
+    EXPECT_TRUE(deleted);
+}
diff --git a/src/dawn/tests/unittests/ResultTests.cpp b/src/dawn/tests/unittests/ResultTests.cpp
new file mode 100644
index 0000000..1775c6d
--- /dev/null
+++ b/src/dawn/tests/unittests/ResultTests.cpp
@@ -0,0 +1,385 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/Result.h"
+
+namespace {
+
+    template <typename T, typename E>
+    void TestError(Result<T, E>* result, E expectedError) {
+        EXPECT_TRUE(result->IsError());
+        EXPECT_FALSE(result->IsSuccess());
+
+        std::unique_ptr<E> storedError = result->AcquireError();
+        EXPECT_EQ(*storedError, expectedError);
+    }
+
+    template <typename T, typename E>
+    void TestSuccess(Result<T, E>* result, T expectedSuccess) {
+        EXPECT_FALSE(result->IsError());
+        EXPECT_TRUE(result->IsSuccess());
+
+        const T storedSuccess = result->AcquireSuccess();
+        EXPECT_EQ(storedSuccess, expectedSuccess);
+
+        // Once the success is acquired, result has an empty
+        // payload and is neither in the success nor error state.
+        EXPECT_FALSE(result->IsError());
+        EXPECT_FALSE(result->IsSuccess());
+    }
+
+    static int dummyError = 0xbeef;
+    static float dummySuccess = 42.0f;
+    static const float dummyConstSuccess = 42.0f;
+
+    class AClass : public RefCounted {
+      public:
+        int a = 0;
+    };
+
+    // Tests using the following overload of TestSuccess make
+    // local Ref instances to dummySuccessObj. Tests should
+    // ensure any local Ref objects made along the way continue
+    // to point to dummySuccessObj.
+    template <typename T, typename E>
+    void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) {
+        EXPECT_FALSE(result->IsError());
+        EXPECT_TRUE(result->IsSuccess());
+
+        // AClass starts with a reference count of 1 and stored
+        // on the stack in the caller. The result parameter should
+        // hold the only other reference to the object.
+        EXPECT_EQ(expectedSuccess->GetRefCountForTesting(), 2u);
+
+        const Ref<T> storedSuccess = result->AcquireSuccess();
+        EXPECT_EQ(storedSuccess.Get(), expectedSuccess);
+
+        // Once the success is acquired, result has an empty
+        // payload and is neither in the success nor error state.
+        EXPECT_FALSE(result->IsError());
+        EXPECT_FALSE(result->IsSuccess());
+
+        // Once we call AcquireSuccess, result no longer stores
+        // the object. storedSuccess should contain the only other
+        // reference to the object.
+        EXPECT_EQ(storedSuccess->GetRefCountForTesting(), 2u);
+    }
+
+    // Result<void, E*>
+
+    // Test constructing an error Result<void, E>
+    TEST(ResultOnlyPointerError, ConstructingError) {
+        Result<void, int> result(std::make_unique<int>(dummyError));
+        TestError(&result, dummyError);
+    }
+
+    // Test moving an error Result<void, E>
+    TEST(ResultOnlyPointerError, MovingError) {
+        Result<void, int> result(std::make_unique<int>(dummyError));
+        Result<void, int> movedResult(std::move(result));
+        TestError(&movedResult, dummyError);
+    }
+
+    // Test returning an error Result<void, E>
+    TEST(ResultOnlyPointerError, ReturningError) {
+        auto CreateError = []() -> Result<void, int> {
+            return {std::make_unique<int>(dummyError)};
+        };
+
+        Result<void, int> result = CreateError();
+        TestError(&result, dummyError);
+    }
+
+    // Test constructing a success Result<void, E>
+    TEST(ResultOnlyPointerError, ConstructingSuccess) {
+        Result<void, int> result;
+        EXPECT_TRUE(result.IsSuccess());
+        EXPECT_FALSE(result.IsError());
+    }
+
+    // Test moving a success Result<void, E>
+    TEST(ResultOnlyPointerError, MovingSuccess) {
+        Result<void, int> result;
+        Result<void, int> movedResult(std::move(result));
+        EXPECT_TRUE(movedResult.IsSuccess());
+        EXPECT_FALSE(movedResult.IsError());
+    }
+
+    // Test returning a success Result<void, E>
+    TEST(ResultOnlyPointerError, ReturningSuccess) {
+        auto CreateError = []() -> Result<void, int> { return {}; };
+
+        Result<void, int> result = CreateError();
+        EXPECT_TRUE(result.IsSuccess());
+        EXPECT_FALSE(result.IsError());
+    }
+
+    // Result<T*, E*>
+
+    // Test constructing an error Result<T*, E>
+    TEST(ResultBothPointer, ConstructingError) {
+        Result<float*, int> result(std::make_unique<int>(dummyError));
+        TestError(&result, dummyError);
+    }
+
+    // Test moving an error Result<T*, E>
+    TEST(ResultBothPointer, MovingError) {
+        Result<float*, int> result(std::make_unique<int>(dummyError));
+        Result<float*, int> movedResult(std::move(result));
+        TestError(&movedResult, dummyError);
+    }
+
+    // Test returning an error Result<T*, E>
+    TEST(ResultBothPointer, ReturningError) {
+        auto CreateError = []() -> Result<float*, int> {
+            return {std::make_unique<int>(dummyError)};
+        };
+
+        Result<float*, int> result = CreateError();
+        TestError(&result, dummyError);
+    }
+
+    // Test constructing a success Result<T*, E>
+    TEST(ResultBothPointer, ConstructingSuccess) {
+        Result<float*, int> result(&dummySuccess);
+        TestSuccess(&result, &dummySuccess);
+    }
+
+    // Test moving a success Result<T*, E>
+    TEST(ResultBothPointer, MovingSuccess) {
+        Result<float*, int> result(&dummySuccess);
+        Result<float*, int> movedResult(std::move(result));
+        TestSuccess(&movedResult, &dummySuccess);
+    }
+
+    // Test returning a success Result<T*, E>
+    TEST(ResultBothPointer, ReturningSuccess) {
+        auto CreateSuccess = []() -> Result<float*, int*> { return {&dummySuccess}; };
+
+        Result<float*, int*> result = CreateSuccess();
+        TestSuccess(&result, &dummySuccess);
+    }
+
+    // Tests converting from a Result<TChild*, E>
+    TEST(ResultBothPointer, ConversionFromChildClass) {
+        struct T {
+            int a;
+        };
+        struct TChild : T {};
+
+        TChild child;
+        T* childAsT = &child;
+        {
+            Result<T*, int> result(&child);
+            TestSuccess(&result, childAsT);
+        }
+        {
+            Result<TChild*, int> resultChild(&child);
+            Result<T*, int> result(std::move(resultChild));
+            TestSuccess(&result, childAsT);
+        }
+        {
+            Result<TChild*, int> resultChild(&child);
+            Result<T*, int> result = std::move(resultChild);
+            TestSuccess(&result, childAsT);
+        }
+    }
+
+    // Result<const T*, E>
+
+    // Test constructing an error Result<const T*, E>
+    TEST(ResultBothPointerWithConstResult, ConstructingError) {
+        Result<const float*, int> result(std::make_unique<int>(dummyError));
+        TestError(&result, dummyError);
+    }
+
+    // Test moving an error Result<const T*, E>
+    TEST(ResultBothPointerWithConstResult, MovingError) {
+        Result<const float*, int> result(std::make_unique<int>(dummyError));
+        Result<const float*, int> movedResult(std::move(result));
+        TestError(&movedResult, dummyError);
+    }
+
+    // Test returning an error Result<const T*, E*>
+    TEST(ResultBothPointerWithConstResult, ReturningError) {
+        auto CreateError = []() -> Result<const float*, int> {
+            return {std::make_unique<int>(dummyError)};
+        };
+
+        Result<const float*, int> result = CreateError();
+        TestError(&result, dummyError);
+    }
+
+    // Test constructing a success Result<const T*, E*>
+    TEST(ResultBothPointerWithConstResult, ConstructingSuccess) {
+        Result<const float*, int> result(&dummyConstSuccess);
+        TestSuccess(&result, &dummyConstSuccess);
+    }
+
+    // Test moving a success Result<const T*, E*>
+    TEST(ResultBothPointerWithConstResult, MovingSuccess) {
+        Result<const float*, int> result(&dummyConstSuccess);
+        Result<const float*, int> movedResult(std::move(result));
+        TestSuccess(&movedResult, &dummyConstSuccess);
+    }
+
+    // Test returning a success Result<const T*, E*>
+    TEST(ResultBothPointerWithConstResult, ReturningSuccess) {
+        auto CreateSuccess = []() -> Result<const float*, int> { return {&dummyConstSuccess}; };
+
+        Result<const float*, int> result = CreateSuccess();
+        TestSuccess(&result, &dummyConstSuccess);
+    }
+
+    // Result<Ref<T>, E>
+
+    // Test constructing an error Result<Ref<T>, E>
+    TEST(ResultRefT, ConstructingError) {
+        Result<Ref<AClass>, int> result(std::make_unique<int>(dummyError));
+        TestError(&result, dummyError);
+    }
+
+    // Test moving an error Result<Ref<T>, E>
+    TEST(ResultRefT, MovingError) {
+        Result<Ref<AClass>, int> result(std::make_unique<int>(dummyError));
+        Result<Ref<AClass>, int> movedResult(std::move(result));
+        TestError(&movedResult, dummyError);
+    }
+
+    // Test returning an error Result<Ref<T>, E>
+    TEST(ResultRefT, ReturningError) {
+        auto CreateError = []() -> Result<Ref<AClass>, int> {
+            return {std::make_unique<int>(dummyError)};
+        };
+
+        Result<Ref<AClass>, int> result = CreateError();
+        TestError(&result, dummyError);
+    }
+
+    // Test constructing a success Result<Ref<T>, E>
+    TEST(ResultRefT, ConstructingSuccess) {
+        AClass success;
+
+        Ref<AClass> refObj(&success);
+        Result<Ref<AClass>, int> result(std::move(refObj));
+        TestSuccess(&result, &success);
+    }
+
+    // Test moving a success Result<Ref<T>, E>
+    TEST(ResultRefT, MovingSuccess) {
+        AClass success;
+
+        Ref<AClass> refObj(&success);
+        Result<Ref<AClass>, int> result(std::move(refObj));
+        Result<Ref<AClass>, int> movedResult(std::move(result));
+        TestSuccess(&movedResult, &success);
+    }
+
+    // Test returning a success Result<Ref<T>, E>
+    TEST(ResultRefT, ReturningSuccess) {
+        AClass success;
+        auto CreateSuccess = [&success]() -> Result<Ref<AClass>, int> {
+            return Ref<AClass>(&success);
+        };
+
+        Result<Ref<AClass>, int> result = CreateSuccess();
+        TestSuccess(&result, &success);
+    }
+
+    class OtherClass {
+      public:
+        int a = 0;
+    };
+    class Base : public RefCounted {};
+    class Child : public OtherClass, public Base {};
+
+    // Test constructing a Result<Ref<TChild>, E>
+    TEST(ResultRefT, ConversionFromChildConstructor) {
+        Child child;
+        Ref<Child> refChild(&child);
+
+        Result<Ref<Base>, int> result(std::move(refChild));
+        TestSuccess<Base>(&result, &child);
+    }
+
+    // Test copy constructing Result<Ref<TChild>, E>
+    TEST(ResultRefT, ConversionFromChildCopyConstructor) {
+        Child child;
+        Ref<Child> refChild(&child);
+
+        Result<Ref<Child>, int> resultChild(std::move(refChild));
+        Result<Ref<Base>, int> result(std::move(resultChild));
+        TestSuccess<Base>(&result, &child);
+    }
+
+    // Test assignment operator for Result<Ref<TChild>, E>
+    TEST(ResultRefT, ConversionFromChildAssignmentOperator) {
+        Child child;
+        Ref<Child> refChild(&child);
+
+        Result<Ref<Child>, int> resultChild(std::move(refChild));
+        Result<Ref<Base>, int> result = std::move(resultChild);
+        TestSuccess<Base>(&result, &child);
+    }
+
+    // Result<T, E>
+
+    // Test constructing an error Result<T, E>
+    TEST(ResultGeneric, ConstructingError) {
+        Result<std::vector<float>, int> result(std::make_unique<int>(dummyError));
+        TestError(&result, dummyError);
+    }
+
+    // Test moving an error Result<T, E>
+    TEST(ResultGeneric, MovingError) {
+        Result<std::vector<float>, int> result(std::make_unique<int>(dummyError));
+        Result<std::vector<float>, int> movedResult(std::move(result));
+        TestError(&movedResult, dummyError);
+    }
+
+    // Test returning an error Result<T, E>
+    TEST(ResultGeneric, ReturningError) {
+        auto CreateError = []() -> Result<std::vector<float>, int> {
+            return {std::make_unique<int>(dummyError)};
+        };
+
+        Result<std::vector<float>, int> result = CreateError();
+        TestError(&result, dummyError);
+    }
+
+    // Test constructing a success Result<T, E>
+    TEST(ResultGeneric, ConstructingSuccess) {
+        Result<std::vector<float>, int> result({1.0f});
+        TestSuccess(&result, {1.0f});
+    }
+
+    // Test moving a success Result<T, E>
+    TEST(ResultGeneric, MovingSuccess) {
+        Result<std::vector<float>, int> result({1.0f});
+        Result<std::vector<float>, int> movedResult(std::move(result));
+        TestSuccess(&movedResult, {1.0f});
+    }
+
+    // Test returning a success Result<T, E>
+    TEST(ResultGeneric, ReturningSuccess) {
+        auto CreateSuccess = []() -> Result<std::vector<float>, int> { return {{1.0f}}; };
+
+        Result<std::vector<float>, int> result = CreateSuccess();
+        TestSuccess(&result, {1.0f});
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
new file mode 100644
index 0000000..0a10375
--- /dev/null
+++ b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
@@ -0,0 +1,174 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/RingBufferAllocator.h"
+
+using namespace dawn::native;
+
+constexpr uint64_t RingBufferAllocator::kInvalidOffset;
+
+// Number of basic tests for Ringbuffer
+TEST(RingBufferAllocatorTests, BasicTest) {
+    constexpr uint64_t sizeInBytes = 64000;
+    RingBufferAllocator allocator(sizeInBytes);
+
+    // Ensure no requests exist on empty buffer.
+    EXPECT_TRUE(allocator.Empty());
+
+    ASSERT_EQ(allocator.GetSize(), sizeInBytes);
+
+    // Ensure failure upon sub-allocating an oversized request.
+    ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)),
+              RingBufferAllocator::kInvalidOffset);
+
+    // Fill the entire buffer with two requests of equal size.
+    ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u);
+    ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u);
+
+    // Ensure the buffer is full.
+    ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset);
+}
+
+// Tests that several ringbuffer allocations do not fail.
+TEST(RingBufferAllocatorTests, RingBufferManyAlloc) {
+    constexpr uint64_t maxNumOfFrames = 64000;
+    constexpr uint64_t frameSizeInBytes = 4;
+
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+
+    size_t offset = 0;
+    for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) {
+        offset = allocator.Allocate(frameSizeInBytes, i);
+        ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes);
+    }
+}
+
+// Tests ringbuffer sub-allocations of the same serial are correctly tracked.
+TEST(RingBufferAllocatorTests, AllocInSameFrame) {
+    constexpr uint64_t maxNumOfFrames = 3;
+    constexpr uint64_t frameSizeInBytes = 4;
+
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+
+    //    F1
+    //  [xxxx|--------]
+    size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1));
+
+    //    F1   F2
+    //  [xxxx|xxxx|----]
+
+    offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
+
+    //    F1     F2
+    //  [xxxx|xxxxxxxx]
+
+    offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
+
+    ASSERT_EQ(offset, 8u);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3);
+
+    allocator.Deallocate(ExecutionSerial(2));
+
+    ASSERT_EQ(allocator.GetUsedSize(), 0u);
+    EXPECT_TRUE(allocator.Empty());
+}
+
+// Tests ringbuffer sub-allocation at various offsets.
+TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
+    constexpr uint64_t maxNumOfFrames = 10;
+    constexpr uint64_t frameSizeInBytes = 4;
+
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+
+    // Sub-alloc the first eight frames.
+    ExecutionSerial serial(0);
+    while (serial < ExecutionSerial(8)) {
+        allocator.Allocate(frameSizeInBytes, serial);
+        serial++;
+    }
+
+    // Each frame corrresponds to the serial number (for simplicity).
+    //
+    //    F1   F2   F3   F4   F5   F6   F7   F8
+    //  [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
+    //
+
+    // Ensure an oversized allocation fails (only 8 bytes left)
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial),
+              RingBufferAllocator::kInvalidOffset);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
+
+    // Reclaim the first 3 frames.
+    allocator.Deallocate(ExecutionSerial(2));
+
+    //                 F4   F5   F6   F7   F8
+    //  [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
+    //
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5);
+
+    // Re-try the over-sized allocation.
+    size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial));
+
+    //        F9       F4   F5   F6   F7   F8
+    //  [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx]
+    //                                         ^^^^^^^^ wasted
+
+    // In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes
+    // were added to F9's sub-allocation.
+    // TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes.
+
+    ASSERT_EQ(offset, 0u);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
+
+    // Ensure we are full.
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
+
+    // Reclaim the next two frames.
+    allocator.Deallocate(ExecutionSerial(4));
+
+    //        F9       F4   F5   F6   F7   F8
+    //  [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx]
+    //
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
+
+    // Sub-alloc the chunk in the middle.
+    serial++;
+    offset = allocator.Allocate(frameSizeInBytes * 2, serial);
+
+    ASSERT_EQ(offset, frameSizeInBytes * 3);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
+
+    //        F9         F10      F6   F7   F8
+    //  [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx]
+    //
+
+    // Ensure we are full.
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
+
+    // Reclaim all.
+    allocator.Deallocate(kMaxExecutionSerial);
+
+    EXPECT_TRUE(allocator.Empty());
+}
+
+// Checks if ringbuffer sub-allocation does not overflow.
+TEST(RingBufferAllocatorTests, RingBufferOverflow) {
+    RingBufferAllocator allocator(std::numeric_limits<uint64_t>::max());
+
+    ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u);
+    ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
+              RingBufferAllocator::kInvalidOffset);
+}
diff --git a/src/dawn/tests/unittests/SerialMapTests.cpp b/src/dawn/tests/unittests/SerialMapTests.cpp
new file mode 100644
index 0000000..a4ac592
--- /dev/null
+++ b/src/dawn/tests/unittests/SerialMapTests.cpp
@@ -0,0 +1,183 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/SerialMap.h"
+#include "dawn/common/TypedInteger.h"
+
+using TestSerialMap = SerialMap<uint64_t, int>;
+
+// A number of basic tests for SerialMap that are difficult to split from one another
+TEST(SerialMap, BasicTest) {
+    TestSerialMap map;
+
+    // Map starts empty
+    ASSERT_TRUE(map.Empty());
+
+    // Iterating on empty map 1) works 2) doesn't produce any values
+    for (int value : map.IterateAll()) {
+        DAWN_UNUSED(value);
+        ASSERT_TRUE(false);
+    }
+
+    // Enqueuing values as const ref or rvalue ref
+    map.Enqueue(1, 0);
+    map.Enqueue(2, 0);
+    map.Enqueue(std::move(3), 1);
+
+    // Iterating over a non-empty map produces the expected result
+    std::vector<int> expectedValues = {1, 2, 3};
+    for (int value : map.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+
+    // Clear works and makes the map empty and iteration does nothing.
+    map.Clear();
+    ASSERT_TRUE(map.Empty());
+
+    for (int value : map.IterateAll()) {
+        DAWN_UNUSED(value);
+        ASSERT_TRUE(false);
+    }
+}
+
+// Test that items can be enqueued in an arbitrary order
+TEST(SerialMap, EnqueueOrder) {
+    TestSerialMap map;
+
+    // Enqueue values in an arbitrary order
+    map.Enqueue(3, 1);
+    map.Enqueue(1, 0);
+    map.Enqueue(4, 2);
+    map.Enqueue(5, 2);
+    map.Enqueue(2, 0);
+
+    // Iterating over a non-empty map produces the expected result
+    std::vector<int> expectedValues = {1, 2, 3, 4, 5};
+    for (int value : map.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
+
+// Test enqueuing vectors works
+TEST(SerialMap, EnqueueVectors) {
+    TestSerialMap map;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    map.Enqueue(vector1, 0);
+    map.Enqueue(std::move(vector2), 0);
+    map.Enqueue(vector3, 1);
+
+    std::vector<int> expectedValues = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0};
+    for (int value : map.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
+
+// Test IterateUpTo
+TEST(SerialMap, IterateUpTo) {
+    TestSerialMap map;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    map.Enqueue(vector1, 0);
+    map.Enqueue(std::move(vector2), 1);
+    map.Enqueue(vector3, 2);
+
+    std::vector<int> expectedValues = {1, 2, 3, 4, 5, 6, 7, 8};
+    for (int value : map.IterateUpTo(1)) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
+
+// Test ClearUpTo
+TEST(SerialMap, ClearUpTo) {
+    TestSerialMap map;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    map.Enqueue(vector1, 0);
+    map.Enqueue(std::move(vector2), 0);
+    map.Enqueue(vector3, 1);
+
+    map.ClearUpTo(0);
+
+    std::vector<int> expectedValues = {9, 0};
+    for (int value : map.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
+
+// Test FirstSerial
+TEST(SerialMap, FirstSerial) {
+    TestSerialMap map;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    map.Enqueue(vector1, 0);
+    map.Enqueue(std::move(vector2), 1);
+    map.Enqueue(vector3, 2);
+
+    EXPECT_EQ(map.FirstSerial(), 0u);
+
+    map.ClearUpTo(1);
+    EXPECT_EQ(map.FirstSerial(), 2u);
+
+    map.Clear();
+    map.Enqueue(vector1, 6);
+    EXPECT_EQ(map.FirstSerial(), 6u);
+}
+
+// Test basic functionality with type integers
+TEST(SerialMap, TypedInteger) {
+    using MySerial = TypedInteger<struct MySerialT, uint64_t>;
+    using MySerialMap = SerialMap<MySerial, int>;
+
+    MySerialMap map;
+    map.Enqueue(1, MySerial(0));
+    map.Enqueue(2, MySerial(0));
+
+    std::vector<int> expectedValues = {1, 2};
+    for (int value : map.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
diff --git a/src/dawn/tests/unittests/SerialQueueTests.cpp b/src/dawn/tests/unittests/SerialQueueTests.cpp
new file mode 100644
index 0000000..56ce97e
--- /dev/null
+++ b/src/dawn/tests/unittests/SerialQueueTests.cpp
@@ -0,0 +1,175 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/TypedInteger.h"
+
+using TestSerialQueue = SerialQueue<uint64_t, int>;
+
+// A number of basic tests for SerialQueue that are difficult to split from one another
+TEST(SerialQueue, BasicTest) {
+    TestSerialQueue queue;
+
+    // Queue starts empty
+    ASSERT_TRUE(queue.Empty());
+
+    // Iterating on empty queue 1) works 2) doesn't produce any values
+    for (int value : queue.IterateAll()) {
+        DAWN_UNUSED(value);
+        ASSERT_TRUE(false);
+    }
+
+    // Enqueuing values as const ref or rvalue ref
+    queue.Enqueue(1, 0);
+    queue.Enqueue(2, 0);
+    queue.Enqueue(std::move(3), 1);
+
+    // Iterating over a non-empty queue produces the expected result
+    std::vector<int> expectedValues = {1, 2, 3};
+    for (int value : queue.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+
+    // Clear works and makes the queue empty and iteration does nothing.
+    queue.Clear();
+    ASSERT_TRUE(queue.Empty());
+
+    for (int value : queue.IterateAll()) {
+        DAWN_UNUSED(value);
+        ASSERT_TRUE(false);
+    }
+}
+
+// Test enqueuing vectors works
+TEST(SerialQueue, EnqueueVectors) {
+    TestSerialQueue queue;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    queue.Enqueue(vector1, 0);
+    queue.Enqueue(std::move(vector2), 0);
+    queue.Enqueue(vector3, 1);
+
+    std::vector<int> expectedValues = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0};
+    for (int value : queue.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
+
+// Test IterateUpTo
+TEST(SerialQueue, IterateUpTo) {
+    TestSerialQueue queue;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    queue.Enqueue(vector1, 0);
+    queue.Enqueue(std::move(vector2), 1);
+    queue.Enqueue(vector3, 2);
+
+    std::vector<int> expectedValues = {1, 2, 3, 4, 5, 6, 7, 8};
+    for (int value : queue.IterateUpTo(1)) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+    EXPECT_EQ(queue.LastSerial(), 2u);
+}
+
+// Test ClearUpTo
+TEST(SerialQueue, ClearUpTo) {
+    TestSerialQueue queue;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    queue.Enqueue(vector1, 0);
+    queue.Enqueue(std::move(vector2), 0);
+    queue.Enqueue(vector3, 1);
+
+    queue.ClearUpTo(0);
+    EXPECT_EQ(queue.LastSerial(), 1u);
+
+    std::vector<int> expectedValues = {9, 0};
+    for (int value : queue.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
+
+// Test FirstSerial
+TEST(SerialQueue, FirstSerial) {
+    TestSerialQueue queue;
+
+    std::vector<int> vector1 = {1, 2, 3, 4};
+    std::vector<int> vector2 = {5, 6, 7, 8};
+    std::vector<int> vector3 = {9, 0};
+
+    queue.Enqueue(vector1, 0);
+    queue.Enqueue(std::move(vector2), 1);
+    queue.Enqueue(vector3, 2);
+
+    EXPECT_EQ(queue.FirstSerial(), 0u);
+
+    queue.ClearUpTo(1);
+    EXPECT_EQ(queue.FirstSerial(), 2u);
+
+    queue.Clear();
+    queue.Enqueue(vector1, 6);
+    EXPECT_EQ(queue.FirstSerial(), 6u);
+}
+
+// Test LastSerial
+TEST(SerialQueue, LastSerial) {
+    TestSerialQueue queue;
+
+    queue.Enqueue({1}, 0);
+    EXPECT_EQ(queue.LastSerial(), 0u);
+
+    queue.Enqueue({2}, 1);
+    EXPECT_EQ(queue.LastSerial(), 1u);
+}
+
+// Test basic functionality with type integers
+TEST(SerialQueue, TypedInteger) {
+    using MySerial = TypedInteger<struct MySerialT, uint64_t>;
+    using MySerialQueue = SerialQueue<MySerial, int>;
+
+    MySerialQueue queue;
+    queue.Enqueue(1, MySerial(0));
+    queue.Enqueue(2, MySerial(0));
+
+    std::vector<int> expectedValues = {1, 2};
+    for (int value : queue.IterateAll()) {
+        EXPECT_EQ(expectedValues.front(), value);
+        ASSERT_FALSE(expectedValues.empty());
+        expectedValues.erase(expectedValues.begin());
+    }
+    ASSERT_TRUE(expectedValues.empty());
+}
diff --git a/src/dawn/tests/unittests/SlabAllocatorTests.cpp b/src/dawn/tests/unittests/SlabAllocatorTests.cpp
new file mode 100644
index 0000000..5139172
--- /dev/null
+++ b/src/dawn/tests/unittests/SlabAllocatorTests.cpp
@@ -0,0 +1,180 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/Math.h"
+#include "dawn/common/SlabAllocator.h"
+
+namespace {
+
+    struct Foo : public PlacementAllocated {
+        Foo(int value) : value(value) {
+        }
+
+        int value;
+    };
+
+    struct alignas(256) AlignedFoo : public Foo {
+        using Foo::Foo;
+    };
+
+}  // namespace
+
+// Test that a slab allocator of a single object works.
+TEST(SlabAllocatorTests, Single) {
+    SlabAllocator<Foo> allocator(1 * sizeof(Foo));
+
+    Foo* obj = allocator.Allocate(4);
+    EXPECT_EQ(obj->value, 4);
+
+    allocator.Deallocate(obj);
+}
+
+// Allocate multiple objects and check their data is correct.
+TEST(SlabAllocatorTests, AllocateSequential) {
+    // Check small alignment
+    {
+        SlabAllocator<Foo> allocator(5 * sizeof(Foo));
+
+        std::vector<Foo*> objects;
+        for (int i = 0; i < 10; ++i) {
+            auto* ptr = allocator.Allocate(i);
+            EXPECT_TRUE(std::find(objects.begin(), objects.end(), ptr) == objects.end());
+            objects.push_back(ptr);
+        }
+
+        for (int i = 0; i < 10; ++i) {
+            // Check that the value is correct and hasn't been trampled.
+            EXPECT_EQ(objects[i]->value, i);
+
+            // Check that the alignment is correct.
+            EXPECT_TRUE(IsPtrAligned(objects[i], alignof(Foo)));
+        }
+
+        // Deallocate all of the objects.
+        for (Foo* object : objects) {
+            allocator.Deallocate(object);
+        }
+    }
+
+    // Check large alignment
+    {
+        SlabAllocator<AlignedFoo> allocator(9 * sizeof(AlignedFoo));
+
+        std::vector<AlignedFoo*> objects;
+        for (int i = 0; i < 21; ++i) {
+            auto* ptr = allocator.Allocate(i);
+            EXPECT_TRUE(std::find(objects.begin(), objects.end(), ptr) == objects.end());
+            objects.push_back(ptr);
+        }
+
+        for (int i = 0; i < 21; ++i) {
+            // Check that the value is correct and hasn't been trampled.
+            EXPECT_EQ(objects[i]->value, i);
+
+            // Check that the alignment is correct.
+            EXPECT_TRUE(IsPtrAligned(objects[i], 256));
+        }
+
+        // Deallocate all of the objects.
+        for (AlignedFoo* object : objects) {
+            allocator.Deallocate(object);
+        }
+    }
+}
+
+// Test that when reallocating a number of objects <= pool size, all memory is reused.
+TEST(SlabAllocatorTests, ReusesFreedMemory) {
+    SlabAllocator<Foo> allocator(17 * sizeof(Foo));
+
+    // Allocate a number of objects.
+    std::set<Foo*> objects;
+    for (int i = 0; i < 17; ++i) {
+        EXPECT_TRUE(objects.insert(allocator.Allocate(i)).second);
+    }
+
+    // Deallocate all of the objects.
+    for (Foo* object : objects) {
+        allocator.Deallocate(object);
+    }
+
+    std::set<Foo*> reallocatedObjects;
+    // Allocate objects again. All of the pointers should be the same.
+    for (int i = 0; i < 17; ++i) {
+        Foo* ptr = allocator.Allocate(i);
+        EXPECT_TRUE(reallocatedObjects.insert(ptr).second);
+        EXPECT_TRUE(std::find(objects.begin(), objects.end(), ptr) != objects.end());
+    }
+
+    // Deallocate all of the objects.
+    for (Foo* object : objects) {
+        allocator.Deallocate(object);
+    }
+}
+
+// Test many allocations and deallocations. Meant to catch corner cases with partially
+// empty slabs.
+TEST(SlabAllocatorTests, AllocateDeallocateMany) {
+    SlabAllocator<Foo> allocator(17 * sizeof(Foo));
+
+    std::set<Foo*> objects;
+    std::set<Foo*> set3;
+    std::set<Foo*> set7;
+
+    // Allocate many objects.
+    for (uint32_t i = 0; i < 800; ++i) {
+        Foo* object = allocator.Allocate(i);
+        EXPECT_TRUE(objects.insert(object).second);
+
+        if (i % 3 == 0) {
+            set3.insert(object);
+        } else if (i % 7 == 0) {
+            set7.insert(object);
+        }
+    }
+
+    // Deallocate every 3rd object.
+    for (Foo* object : set3) {
+        allocator.Deallocate(object);
+        objects.erase(object);
+    }
+
+    // Allocate many more objects
+    for (uint32_t i = 0; i < 800; ++i) {
+        Foo* object = allocator.Allocate(i);
+        EXPECT_TRUE(objects.insert(object).second);
+
+        if (i % 7 == 0) {
+            set7.insert(object);
+        }
+    }
+
+    // Deallocate every 7th object from the first and second rounds of allocation.
+    for (Foo* object : set7) {
+        allocator.Deallocate(object);
+        objects.erase(object);
+    }
+
+    // Allocate objects again
+    for (uint32_t i = 0; i < 800; ++i) {
+        Foo* object = allocator.Allocate(i);
+        EXPECT_TRUE(objects.insert(object).second);
+    }
+
+    // Deallocate the rest of the objects
+    for (Foo* object : objects) {
+        allocator.Deallocate(object);
+    }
+}
diff --git a/src/dawn/tests/unittests/StackContainerTests.cpp b/src/dawn/tests/unittests/StackContainerTests.cpp
new file mode 100644
index 0000000..a427215
--- /dev/null
+++ b/src/dawn/tests/unittests/StackContainerTests.cpp
@@ -0,0 +1,171 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a modified copy of Chromium's /src/base/containers/stack_container_unittest.cc
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/StackContainer.h"
+
+#include <algorithm>
+#include <cstddef>
+
+namespace {
+
+    class Dummy : public RefCounted {
+      public:
+        explicit Dummy(int* alive) : mAlive(alive) {
+            ++*mAlive;
+        }
+
+      private:
+        ~Dummy() {
+            --*mAlive;
+        }
+
+        int* const mAlive;
+    };
+
+}  // namespace
+
+TEST(StackContainer, Vector) {
+    const int stack_size = 3;
+    StackVector<int, stack_size> vect;
+    const int* stack_buffer = &vect.stack_data().stack_buffer()[0];
+
+    // The initial |stack_size| elements should appear in the stack buffer.
+    EXPECT_EQ(static_cast<size_t>(stack_size), vect.container().capacity());
+    for (int i = 0; i < stack_size; i++) {
+        vect.container().push_back(i);
+        EXPECT_EQ(stack_buffer, &vect.container()[0]);
+        EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
+    }
+
+    // Adding more elements should push the array onto the heap.
+    for (int i = 0; i < stack_size; i++) {
+        vect.container().push_back(i + stack_size);
+        EXPECT_NE(stack_buffer, &vect.container()[0]);
+        EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
+    }
+
+    // The array should still be in order.
+    for (int i = 0; i < stack_size * 2; i++)
+        EXPECT_EQ(i, vect.container()[i]);
+
+    // Resize to smaller. Our STL implementation won't reallocate in this case,
+    // otherwise it might use our stack buffer. We reserve right after the resize
+    // to guarantee it isn't using the stack buffer, even though it doesn't have
+    // much data.
+    vect.container().resize(stack_size);
+    vect.container().reserve(stack_size * 2);
+    EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
+
+    // Copying the small vector to another should use the same allocator and use
+    // the now-unused stack buffer. GENERALLY CALLERS SHOULD NOT DO THIS since
+    // they have to get the template types just right and it can cause errors.
+    std::vector<int, StackAllocator<int, stack_size>> other(vect.container());
+    EXPECT_EQ(stack_buffer, &other.front());
+    EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
+    for (int i = 0; i < stack_size; i++)
+        EXPECT_EQ(i, other[i]);
+}
+
+TEST(StackContainer, VectorDoubleDelete) {
+    // Regression testing for double-delete.
+    typedef StackVector<Ref<Dummy>, 2> Vector;
+    Vector vect;
+
+    int alive = 0;
+    Ref<Dummy> dummy = AcquireRef(new Dummy(&alive));
+    EXPECT_EQ(alive, 1);
+
+    vect->push_back(dummy);
+    EXPECT_EQ(alive, 1);
+
+    Dummy* dummy_unref = dummy.Get();
+    dummy = nullptr;
+    EXPECT_EQ(alive, 1);
+
+    auto itr = std::find(vect->begin(), vect->end(), dummy_unref);
+    EXPECT_EQ(itr->Get(), dummy_unref);
+    vect->erase(itr);
+    EXPECT_EQ(alive, 0);
+
+    // Shouldn't crash at exit.
+}
+
+namespace {
+
+    template <size_t alignment>
+    class AlignedData {
+      public:
+        AlignedData() {
+            memset(data_, 0, alignment);
+        }
+        ~AlignedData() = default;
+        AlignedData(const AlignedData&) = default;
+        AlignedData& operator=(const AlignedData&) = default;
+        alignas(alignment) char data_[alignment];
+    };
+
+}  // anonymous namespace
+
+#define EXPECT_ALIGNED(ptr, align) EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+TEST(StackContainer, BufferAlignment) {
+    StackVector<wchar_t, 16> text;
+    text->push_back(L'A');
+    EXPECT_ALIGNED(&text[0], alignof(wchar_t));
+
+    StackVector<double, 1> doubles;
+    doubles->push_back(0.0);
+    EXPECT_ALIGNED(&doubles[0], alignof(double));
+
+    StackVector<AlignedData<16>, 1> aligned16;
+    aligned16->push_back(AlignedData<16>());
+    EXPECT_ALIGNED(&aligned16[0], 16);
+
+#if !defined(DAWN_COMPILER_GCC) || defined(__x86_64__) || defined(__i386__)
+    // It seems that non-X86 gcc doesn't respect greater than 16 byte alignment.
+    // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33721 for details.
+    // TODO(sbc):re-enable this if GCC starts respecting higher alignments.
+    StackVector<AlignedData<256>, 1> aligned256;
+    aligned256->push_back(AlignedData<256>());
+    EXPECT_ALIGNED(&aligned256[0], 256);
+#endif
+}
+
+template class StackVector<int, 2>;
+template class StackVector<Ref<Dummy>, 2>;
+
+template <typename T, size_t size>
+void CheckStackVectorElements(const StackVector<T, size>& vec, std::initializer_list<T> expected) {
+    auto expected_it = expected.begin();
+    EXPECT_EQ(vec->size(), expected.size());
+    for (T t : vec) {
+        EXPECT_NE(expected.end(), expected_it);
+        EXPECT_EQ(*expected_it, t);
+        ++expected_it;
+    }
+    EXPECT_EQ(expected.end(), expected_it);
+}
+
+TEST(StackContainer, Iteration) {
+    StackVector<int, 3> vect;
+    vect->push_back(7);
+    vect->push_back(11);
+
+    CheckStackVectorElements(vect, {7, 11});
+    for (int& i : vect) {
+        ++i;
+    }
+    CheckStackVectorElements(vect, {8, 12});
+    vect->push_back(13);
+    CheckStackVectorElements(vect, {8, 12, 13});
+    vect->resize(5);
+    CheckStackVectorElements(vect, {8, 12, 13, 0, 0});
+    vect->resize(1);
+    CheckStackVectorElements(vect, {8});
+}
diff --git a/src/dawn/tests/unittests/SubresourceStorageTests.cpp b/src/dawn/tests/unittests/SubresourceStorageTests.cpp
new file mode 100644
index 0000000..ef4926d
--- /dev/null
+++ b/src/dawn/tests/unittests/SubresourceStorageTests.cpp
@@ -0,0 +1,677 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/SubresourceStorage.h"
+
+#include "dawn/common/Log.h"
+
+using namespace dawn::native;
+
+// A fake class that replicates the behavior of SubresourceStorage but without any compression and
+// is used to compare the results of operations on SubresourceStorage against the "ground truth" of
+// FakeStorage.
+template <typename T>
+struct FakeStorage {
+    FakeStorage(Aspect aspects,
+                uint32_t arrayLayerCount,
+                uint32_t mipLevelCount,
+                T initialValue = {})
+        : mAspects(aspects),
+          mArrayLayerCount(arrayLayerCount),
+          mMipLevelCount(mipLevelCount),
+          mData(GetAspectCount(aspects) * arrayLayerCount * mipLevelCount, initialValue) {
+    }
+
+    template <typename F>
+    void Update(const SubresourceRange& range, F&& updateFunc) {
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; layer++) {
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; level++) {
+                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                    updateFunc(range, &mData[GetDataIndex(aspect, layer, level)]);
+                }
+            }
+        }
+    }
+
+    template <typename U, typename F>
+    void Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
+        for (Aspect aspect : IterateEnumMask(mAspects)) {
+            for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+                for (uint32_t level = 0; level < mMipLevelCount; level++) {
+                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                    mergeFunc(range, &mData[GetDataIndex(aspect, layer, level)],
+                              other.Get(aspect, layer, level));
+                }
+            }
+        }
+    }
+
+    const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const {
+        return mData[GetDataIndex(aspect, arrayLayer, mipLevel)];
+    }
+
+    size_t GetDataIndex(Aspect aspect, uint32_t layer, uint32_t level) const {
+        uint32_t aspectIndex = GetAspectIndex(aspect);
+        return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex);
+    }
+
+    // Method that checks that this and real have exactly the same content. It does so via looping
+    // on all subresources and calling Get() (hence testing Get()). It also calls Iterate()
+    // checking that every subresource is mentioned exactly once and that its content is correct
+    // (hence testing Iterate()).
+    // Its implementation requires the RangeTracker below that itself needs FakeStorage<int> so it
+    // cannot be define inline with the other methods.
+    void CheckSameAs(const SubresourceStorage<T>& real);
+
+    Aspect mAspects;
+    uint32_t mArrayLayerCount;
+    uint32_t mMipLevelCount;
+
+    std::vector<T> mData;
+};
+
+// Track a set of ranges that have been seen and can assert that in aggregate they make exactly
+// a single range (and that each subresource was seen only once).
+struct RangeTracker {
+    template <typename T>
+    RangeTracker(const SubresourceStorage<T>& s)
+        : mTracked(s.GetAspectsForTesting(),
+                   s.GetArrayLayerCountForTesting(),
+                   s.GetMipLevelCountForTesting(),
+                   0) {
+    }
+
+    void Track(const SubresourceRange& range) {
+        // Add +1 to the subresources tracked.
+        mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
+            ASSERT_EQ(*counter, 0u);
+            *counter += 1;
+        });
+    }
+
+    void CheckTrackedExactly(const SubresourceRange& range) {
+        // Check that all subresources in the range were tracked once and set the counter back to 0.
+        mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
+            ASSERT_EQ(*counter, 1u);
+            *counter = 0;
+        });
+
+        // Now all subresources should be at 0.
+        for (int counter : mTracked.mData) {
+            ASSERT_EQ(counter, 0);
+        }
+    }
+
+    FakeStorage<uint32_t> mTracked;
+};
+
+template <typename T>
+void FakeStorage<T>::CheckSameAs(const SubresourceStorage<T>& real) {
+    EXPECT_EQ(real.GetAspectsForTesting(), mAspects);
+    EXPECT_EQ(real.GetArrayLayerCountForTesting(), mArrayLayerCount);
+    EXPECT_EQ(real.GetMipLevelCountForTesting(), mMipLevelCount);
+
+    RangeTracker tracker(real);
+    real.Iterate([&](const SubresourceRange& range, const T& data) {
+        // Check that the range is sensical.
+        EXPECT_TRUE(IsSubset(range.aspects, mAspects));
+
+        EXPECT_LT(range.baseArrayLayer, mArrayLayerCount);
+        EXPECT_LE(range.baseArrayLayer + range.layerCount, mArrayLayerCount);
+
+        EXPECT_LT(range.baseMipLevel, mMipLevelCount);
+        EXPECT_LE(range.baseMipLevel + range.levelCount, mMipLevelCount);
+
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; layer++) {
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; level++) {
+                    EXPECT_EQ(data, Get(aspect, layer, level));
+                    EXPECT_EQ(data, real.Get(aspect, layer, level));
+                }
+            }
+        }
+
+        tracker.Track(range);
+    });
+
+    tracker.CheckTrackedExactly(
+        SubresourceRange::MakeFull(mAspects, mArrayLayerCount, mMipLevelCount));
+}
+
+template <typename T>
+void CheckAspectCompressed(const SubresourceStorage<T>& s, Aspect aspect, bool expected) {
+    ASSERT(HasOneBit(aspect));
+
+    uint32_t levelCount = s.GetMipLevelCountForTesting();
+    uint32_t layerCount = s.GetArrayLayerCountForTesting();
+
+    bool seen = false;
+    s.Iterate([&](const SubresourceRange& range, const T&) {
+        if (range.aspects == aspect && range.layerCount == layerCount &&
+            range.levelCount == levelCount && range.baseArrayLayer == 0 &&
+            range.baseMipLevel == 0) {
+            seen = true;
+        }
+    });
+
+    ASSERT_EQ(seen, expected);
+
+    // Check that the internal state of SubresourceStorage matches what we expect.
+    // If an aspect is compressed, all its layers should be internally tagged as compressed.
+    ASSERT_EQ(s.IsAspectCompressedForTesting(aspect), expected);
+    if (expected) {
+        for (uint32_t layer = 0; layer < s.GetArrayLayerCountForTesting(); layer++) {
+            ASSERT_TRUE(s.IsLayerCompressedForTesting(aspect, layer));
+        }
+    }
+}
+
+template <typename T>
+void CheckLayerCompressed(const SubresourceStorage<T>& s,
+                          Aspect aspect,
+                          uint32_t layer,
+                          bool expected) {
+    ASSERT(HasOneBit(aspect));
+
+    uint32_t levelCount = s.GetMipLevelCountForTesting();
+
+    bool seen = false;
+    s.Iterate([&](const SubresourceRange& range, const T&) {
+        if (range.aspects == aspect && range.layerCount == 1 && range.levelCount == levelCount &&
+            range.baseArrayLayer == layer && range.baseMipLevel == 0) {
+            seen = true;
+        }
+    });
+
+    ASSERT_EQ(seen, expected);
+    ASSERT_EQ(s.IsLayerCompressedForTesting(aspect, layer), expected);
+}
+
+struct SmallData {
+    uint32_t value = 0xF00;
+};
+
+bool operator==(const SmallData& a, const SmallData& b) {
+    return a.value == b.value;
+}
+
+// Test that the default value is correctly set.
+TEST(SubresourceStorageTest, DefaultValue) {
+    // Test setting no default value for a primitive type.
+    {
+        SubresourceStorage<int> s(Aspect::Color, 3, 5);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 0);
+
+        FakeStorage<int> f(Aspect::Color, 3, 5);
+        f.CheckSameAs(s);
+    }
+
+    // Test setting a default value for a primitive type.
+    {
+        SubresourceStorage<int> s(Aspect::Color, 3, 5, 42);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 42);
+
+        FakeStorage<int> f(Aspect::Color, 3, 5, 42);
+        f.CheckSameAs(s);
+    }
+
+    // Test setting no default value for a type with a default constructor.
+    {
+        SubresourceStorage<SmallData> s(Aspect::Color, 3, 5);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 0xF00u);
+
+        FakeStorage<SmallData> f(Aspect::Color, 3, 5);
+        f.CheckSameAs(s);
+    }
+    // Test setting a default value for a type with a default constructor.
+    {
+        SubresourceStorage<SmallData> s(Aspect::Color, 3, 5, {007u});
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 007u);
+
+        FakeStorage<SmallData> f(Aspect::Color, 3, 5, {007u});
+        f.CheckSameAs(s);
+    }
+}
+
+// The tests for Update() all follow the same pattern of setting up a real and a fake storage then
+// performing one or multiple Update()s on them and checking:
+//  - They have the same content.
+//  - The Update() range was correct.
+//  - The aspects and layers have the expected "compressed" status.
+
+// Calls Update both on the read storage and the fake storage but intercepts the call to updateFunc
+// done by the real storage to check their ranges argument aggregate to exactly the update range.
+template <typename T, typename F>
+void CallUpdateOnBoth(SubresourceStorage<T>* s,
+                      FakeStorage<T>* f,
+                      const SubresourceRange& range,
+                      F&& updateFunc) {
+    RangeTracker tracker(*s);
+
+    s->Update(range, [&](const SubresourceRange& range, T* data) {
+        tracker.Track(range);
+        updateFunc(range, data);
+    });
+    f->Update(range, updateFunc);
+
+    tracker.CheckTrackedExactly(range);
+    f->CheckSameAs(*s);
+}
+
+// Test updating a single subresource on a single-aspect storage.
+TEST(SubresourceStorageTest, SingleSubresourceUpdateSingleAspect) {
+    SubresourceStorage<int> s(Aspect::Color, 5, 7);
+    FakeStorage<int> f(Aspect::Color, 5, 7);
+
+    // Update a single subresource.
+    SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 3, 2);
+    CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 2, true);
+    CheckLayerCompressed(s, Aspect::Color, 3, false);
+    CheckLayerCompressed(s, Aspect::Color, 4, true);
+}
+
+// Test updating a single subresource on a multi-aspect storage.
+TEST(SubresourceStorageTest, SingleSubresourceUpdateMultiAspect) {
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 5, 3);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 5, 3);
+
+    SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Stencil, 1, 2);
+    CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 0, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+}
+
+// Test updating as a stipple pattern on one of two aspects then updating it completely.
+TEST(SubresourceStorageTest, UpdateStipple) {
+    const uint32_t kLayers = 10;
+    const uint32_t kLevels = 7;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+
+    // Update with a stipple.
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        for (uint32_t level = 0; level < kLevels; level++) {
+            if ((layer + level) % 2 == 0) {
+                SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Depth, layer, level);
+                CallUpdateOnBoth(&s, &f, range,
+                                 [](const SubresourceRange&, int* data) { *data += 17; });
+            }
+        }
+    }
+
+    // The depth should be fully uncompressed while the stencil stayed compressed.
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+    CheckAspectCompressed(s, Aspect::Depth, false);
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        CheckLayerCompressed(s, Aspect::Depth, layer, false);
+    }
+
+    // Update completely with a single value. Recompression should happen!
+    {
+        SubresourceRange fullRange =
+            SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 31; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+}
+
+// Test updating as a crossing band pattern:
+//  - The first band is full layers [2, 3] on both aspects
+//  - The second band is full mips [5, 6] on one aspect.
+// Then updating completely.
+TEST(SubresourceStorageTest, UpdateTwoBand) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+
+    // Update the two bands
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+
+    // The layers were fully updated so they should stay compressed.
+    CheckLayerCompressed(s, Aspect::Depth, 2, true);
+    CheckLayerCompressed(s, Aspect::Depth, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
+    }
+
+    // The layers had to be decompressed in depth
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+
+    // Update completely. Without a single value recompression shouldn't happen.
+    {
+        SubresourceRange fullRange =
+            SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange,
+                         [](const SubresourceRange&, int* data) { *data += 12; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Depth, false);
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+}
+
+// Test updating with extremal subresources
+//    - Then half of the array layers in full.
+//    - Then updating completely.
+TEST(SubresourceStorageTest, UpdateExtremas) {
+    const uint32_t kLayers = 6;
+    const uint32_t kLevels = 4;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+
+    // Update the two extrema
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, kLevels - 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, kLayers - 1, 0);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
+    }
+
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 2, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+
+    // Update half of the layers in full with constant values. Some recompression should happen.
+    {
+        SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 123; });
+    }
+
+    CheckLayerCompressed(s, Aspect::Color, 0, true);
+    CheckLayerCompressed(s, Aspect::Color, 1, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+
+    // Update completely. Recompression should happen!
+    {
+        SubresourceRange fullRange = SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 35; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Color, true);
+}
+
+// A regression test for an issue found while reworking the implementation where RecompressAspect
+// didn't correctly check that each each layer was compressed but only that their 0th value was
+// the same.
+TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) {
+    SubresourceStorage<int> s(Aspect::Color, 2, 2);
+    FakeStorage<int> f(Aspect::Color, 2, 2);
+
+    // Update 0th mip levels to some value, it should decompress the aspect and both layers.
+    {
+        SubresourceRange range(Aspect::Color, {0, 2}, {0, 1});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 17; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+
+    // Update the whole resource by doing +1. The aspects and layers should stay decompressed.
+    {
+        SubresourceRange range = SubresourceRange::MakeFull(Aspect::Color, 2, 2);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+}
+
+// The tests for Merge() all follow the same as the Update() tests except that they use Update()
+// to set up the test storages.
+
+// Similar to CallUpdateOnBoth but for Merge
+template <typename T, typename U, typename F>
+void CallMergeOnBoth(SubresourceStorage<T>* s,
+                     FakeStorage<T>* f,
+                     const SubresourceStorage<U>& other,
+                     F&& mergeFunc) {
+    RangeTracker tracker(*s);
+
+    s->Merge(other, [&](const SubresourceRange& range, T* data, const U& otherData) {
+        tracker.Track(range);
+        mergeFunc(range, data, otherData);
+    });
+    f->Merge(other, mergeFunc);
+
+    tracker.CheckTrackedExactly(
+        SubresourceRange::MakeFull(f->mAspects, f->mArrayLayerCount, f->mMipLevelCount));
+    f->CheckSameAs(*s);
+}
+
+// Test merging two fully compressed single-aspect resources.
+TEST(SubresourceStorageTest, MergeFullWithFullSingleAspect) {
+    SubresourceStorage<int> s(Aspect::Color, 4, 6);
+    FakeStorage<int> f(Aspect::Color, 4, 6);
+
+    // Merge the whole resource in a single call.
+    SubresourceStorage<bool> other(Aspect::Color, 4, 6, true);
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
+        if (other) {
+            *data = 13;
+        }
+    });
+
+    CheckAspectCompressed(s, Aspect::Color, true);
+}
+
+// Test merging two fully compressed multi-aspect resources.
+TEST(SubresourceStorageTest, MergeFullWithFullMultiAspect) {
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 6, 7);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 6, 7);
+
+    // Merge the whole resource in a single call.
+    SubresourceStorage<bool> other(Aspect::Depth | Aspect::Stencil, 6, 7, true);
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
+        if (other) {
+            *data = 13;
+        }
+    });
+
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+}
+
+// Test merging a fully compressed resource in a resource with the "cross band" pattern.
+//  - The first band is full layers [2, 3] on both aspects
+//  - The second band is full mips [5, 6] on one aspect.
+// This provides coverage of using a single piece of data from `other` to update all of `s`
+TEST(SubresourceStorageTest, MergeFullInTwoBand) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+
+    // Update the two bands
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 5; });
+    }
+
+    // Merge the fully compressed resource.
+    SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 17);
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
+
+    // The layers traversed by the mip band are still uncompressed.
+    CheckLayerCompressed(s, Aspect::Depth, 1, false);
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Depth, 4, false);
+
+    // Stencil is decompressed but all its layers are still compressed because there wasn't the mip
+    // band.
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 4, true);
+}
+// Test the reverse, mergign two-bands in a full resource. This provides coverage for decompressing
+// aspects / and partilly layers to match the compression of `other`
+TEST(SubresourceStorageTest, MergeTwoBandInFull) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
+
+    // Update the two bands
+    SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        other.Update(range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        other.Update(range, [](const SubresourceRange&, int* data) { *data += 5; });
+    }
+
+    // Merge the fully compressed resource.
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
+
+    // The layers traversed by the mip band are still uncompressed.
+    CheckLayerCompressed(s, Aspect::Depth, 1, false);
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Depth, 4, false);
+
+    // Stencil is decompressed but all its layers are still compressed because there wasn't the mip
+    // band.
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 4, true);
+}
+
+// Test merging storage with a layer band in a stipple patterned storage. This provide coverage
+// for the code path that uses the same layer data for other multiple times.
+TEST(SubresourceStorageTest, MergeLayerBandInStipple) {
+    const uint32_t kLayers = 3;
+    const uint32_t kLevels = 5;
+
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+    SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
+
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        for (uint32_t level = 0; level < kLevels; level++) {
+            if ((layer + level) % 2 == 0) {
+                SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, layer, level);
+                CallUpdateOnBoth(&s, &f, range,
+                                 [](const SubresourceRange&, int* data) { *data += 17; });
+            }
+        }
+        if (layer % 2 == 0) {
+            other.Update({Aspect::Color, {layer, 1}, {0, kLevels}},
+                         [](const SubresourceRange&, int* data) { *data += 8; });
+        }
+    }
+
+    // Merge the band in the stipple.
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
+
+    // None of the resulting layers are compressed.
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+    CheckLayerCompressed(s, Aspect::Color, 2, false);
+}
+
+// Regression test for a missing check that layer 0 is compressed when recompressing.
+TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) {
+    const uint32_t kLayers = 2;
+    const uint32_t kLevels = 2;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+
+    // Set up s with zeros except (0, 1) which is garbage.
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
+    }
+
+    // Other is 2x2 of zeroes
+    SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
+
+    // Fake updating F with other which is fully compressed and will trigger recompression.
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int*, int) {});
+
+    // The Color aspect should not have been recompressed.
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+}
+
+// Regression test for aspect decompression not copying to layer 0
+TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) {
+    const uint32_t kLayers = 2;
+    const uint32_t kLevels = 2;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels, 3);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels, 3);
+
+    // Cause decompression by writing to a single subresource.
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
+    }
+
+    // Check that the aspect's value of 3 was correctly decompressed in layer 0.
+    CheckLayerCompressed(s, Aspect::Color, 0, true);
+    EXPECT_EQ(3, s.Get(Aspect::Color, 0, 0));
+    EXPECT_EQ(3, s.Get(Aspect::Color, 0, 1));
+}
+
+// Bugs found while testing:
+//  - mLayersCompressed not initialized to true.
+//  - DecompressLayer setting Compressed to true instead of false.
+//  - Get() checking for !compressed instead of compressed for the early exit.
+//  - ASSERT in RecompressLayers was inverted.
+//  - Two != being converted to == during a rework.
+//  - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed.
+//  - Missing decompression of layer 0 after introducing mInlineAspectData.
diff --git a/src/dawn/tests/unittests/SystemUtilsTests.cpp b/src/dawn/tests/unittests/SystemUtilsTests.cpp
new file mode 100644
index 0000000..15dff17
--- /dev/null
+++ b/src/dawn/tests/unittests/SystemUtilsTests.cpp
@@ -0,0 +1,117 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock-matchers.h>
+#include <gtest/gtest.h>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/SystemUtils.h"
+
+using ::testing::_;
+using ::testing::Pair;
+
+// Tests for GetEnvironmentVar
+TEST(SystemUtilsTests, GetEnvironmentVar) {
+    // Test nonexistent environment variable
+    EXPECT_THAT(GetEnvironmentVar("NonexistentEnvironmentVar"), Pair("", false));
+}
+
+// Tests for SetEnvironmentVar
+TEST(SystemUtilsTests, SetEnvironmentVar) {
+    // Test new environment variable
+    EXPECT_TRUE(SetEnvironmentVar("EnvironmentVarForTest", "NewEnvironmentVarValue"));
+    EXPECT_THAT(GetEnvironmentVar("EnvironmentVarForTest"), Pair("NewEnvironmentVarValue", true));
+    // Test override environment variable
+    EXPECT_TRUE(SetEnvironmentVar("EnvironmentVarForTest", "OverrideEnvironmentVarValue"));
+    EXPECT_THAT(GetEnvironmentVar("EnvironmentVarForTest"),
+                Pair("OverrideEnvironmentVarValue", true));
+}
+
+// Tests for GetExecutableDirectory
+TEST(SystemUtilsTests, GetExecutableDirectory) {
+    auto dir = GetExecutableDirectory();
+    // Test returned value is non-empty string
+    EXPECT_NE(dir, std::optional{std::string("")});
+    ASSERT_NE(dir, std::nullopt);
+    // Test last character in path
+    EXPECT_EQ(dir->back(), *GetPathSeparator());
+}
+
+// Tests for ScopedEnvironmentVar
+TEST(SystemUtilsTests, ScopedEnvironmentVar) {
+    SetEnvironmentVar("ScopedEnvironmentVarForTest", "original");
+
+    // Test empty environment variable doesn't crash
+    { ScopedEnvironmentVar var; }
+
+    // Test setting empty environment variable
+    {
+        ScopedEnvironmentVar var;
+        var.Set("ScopedEnvironmentVarForTest", "NewEnvironmentVarValue");
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"),
+                    Pair("NewEnvironmentVarValue", true));
+    }
+    EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("original", true));
+
+    // Test that the environment variable can be set, and it is unset at the end of the scope.
+    {
+        ScopedEnvironmentVar var("ScopedEnvironmentVarForTest", "NewEnvironmentVarValue");
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"),
+                    Pair("NewEnvironmentVarValue", true));
+    }
+    EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("original", true));
+
+    // Test nested scopes
+    {
+        ScopedEnvironmentVar outer("ScopedEnvironmentVarForTest", "outer");
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("outer", true));
+        {
+            ScopedEnvironmentVar inner("ScopedEnvironmentVarForTest", "inner");
+            EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("inner", true));
+        }
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("outer", true));
+    }
+    EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("original", true));
+
+    // Test redundantly setting scoped variables
+    {
+        ScopedEnvironmentVar var1("ScopedEnvironmentVarForTest", "var1");
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("var1", true));
+
+        ScopedEnvironmentVar var2("ScopedEnvironmentVarForTest", "var2");
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("var2", true));
+    }
+    EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("original", true));
+}
+
+// Test that restoring a scoped environment variable to the empty string.
+TEST(SystemUtilsTests, ScopedEnvironmentVarRestoresEmptyString) {
+    ScopedEnvironmentVar empty("ScopedEnvironmentVarForTest", "");
+    {
+        ScopedEnvironmentVar var1("ScopedEnvironmentVarForTest", "var1");
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("var1", true));
+    }
+    EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("", true));
+}
+
+// Test that restoring a scoped environment variable to not set (distuishable from empty string)
+// works.
+TEST(SystemUtilsTests, ScopedEnvironmentVarRestoresNotSet) {
+    ScopedEnvironmentVar null("ScopedEnvironmentVarForTest", nullptr);
+    {
+        ScopedEnvironmentVar var1("ScopedEnvironmentVarForTest", "var1");
+        EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("var1", true));
+    }
+    EXPECT_THAT(GetEnvironmentVar("ScopedEnvironmentVarForTest"), Pair("", false));
+}
diff --git a/src/dawn/tests/unittests/ToBackendTests.cpp b/src/dawn/tests/unittests/ToBackendTests.cpp
new file mode 100644
index 0000000..c143acd
--- /dev/null
+++ b/src/dawn/tests/unittests/ToBackendTests.cpp
@@ -0,0 +1,87 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/ToBackend.h"
+
+#include <type_traits>
+
+// Make our own Base - Backend object pair, reusing the AdapterBase name
+namespace dawn::native {
+    class AdapterBase : public RefCounted {};
+}  // namespace dawn::native
+
+using namespace dawn::native;
+
+class MyAdapter : public AdapterBase {};
+
+struct MyBackendTraits {
+    using AdapterType = MyAdapter;
+};
+
+// Instanciate ToBackend for our "backend"
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
+    return ToBackendBase<MyBackendTraits>(common);
+}
+
+// Test that ToBackend correctly converts pointers to base classes.
+TEST(ToBackend, Pointers) {
+    {
+        MyAdapter* adapter = new MyAdapter;
+        const AdapterBase* base = adapter;
+
+        auto backendAdapter = ToBackend(base);
+        static_assert(std::is_same<decltype(backendAdapter), const MyAdapter*>::value);
+        ASSERT_EQ(adapter, backendAdapter);
+
+        adapter->Release();
+    }
+    {
+        MyAdapter* adapter = new MyAdapter;
+        AdapterBase* base = adapter;
+
+        auto backendAdapter = ToBackend(base);
+        static_assert(std::is_same<decltype(backendAdapter), MyAdapter*>::value);
+        ASSERT_EQ(adapter, backendAdapter);
+
+        adapter->Release();
+    }
+}
+
+// Test that ToBackend correctly converts Refs to base classes.
+TEST(ToBackend, Ref) {
+    {
+        MyAdapter* adapter = new MyAdapter;
+        const Ref<AdapterBase> base(adapter);
+
+        const auto& backendAdapter = ToBackend(base);
+        static_assert(std::is_same<decltype(ToBackend(base)), const Ref<MyAdapter>&>::value);
+        ASSERT_EQ(adapter, backendAdapter.Get());
+
+        adapter->Release();
+    }
+    {
+        MyAdapter* adapter = new MyAdapter;
+        Ref<AdapterBase> base(adapter);
+
+        auto backendAdapter = ToBackend(base);
+        static_assert(std::is_same<decltype(ToBackend(base)), Ref<MyAdapter>&>::value);
+        ASSERT_EQ(adapter, backendAdapter.Get());
+
+        adapter->Release();
+    }
+}
diff --git a/src/dawn/tests/unittests/TypedIntegerTests.cpp b/src/dawn/tests/unittests/TypedIntegerTests.cpp
new file mode 100644
index 0000000..8e4d142
--- /dev/null
+++ b/src/dawn/tests/unittests/TypedIntegerTests.cpp
@@ -0,0 +1,234 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+class TypedIntegerTest : public testing::Test {
+  protected:
+    using Unsigned = TypedInteger<struct UnsignedT, uint32_t>;
+    using Signed = TypedInteger<struct SignedT, int32_t>;
+};
+
+// Test that typed integers can be created and cast and the internal values are identical
+TEST_F(TypedIntegerTest, ConstructionAndCast) {
+    Signed svalue(2);
+    EXPECT_EQ(static_cast<int32_t>(svalue), 2);
+
+    Unsigned uvalue(7);
+    EXPECT_EQ(static_cast<uint32_t>(uvalue), 7u);
+
+    static_assert(static_cast<int32_t>(Signed(3)) == 3);
+    static_assert(static_cast<uint32_t>(Unsigned(28)) == 28);
+}
+
+// Test typed integer comparison operators
+TEST_F(TypedIntegerTest, Comparison) {
+    Unsigned value(8);
+
+    // Truthy usages of comparison operators
+    EXPECT_TRUE(value < Unsigned(9));
+    EXPECT_TRUE(value <= Unsigned(9));
+    EXPECT_TRUE(value <= Unsigned(8));
+    EXPECT_TRUE(value == Unsigned(8));
+    EXPECT_TRUE(value >= Unsigned(8));
+    EXPECT_TRUE(value >= Unsigned(7));
+    EXPECT_TRUE(value > Unsigned(7));
+    EXPECT_TRUE(value != Unsigned(7));
+
+    // Falsy usages of comparison operators
+    EXPECT_FALSE(value >= Unsigned(9));
+    EXPECT_FALSE(value > Unsigned(9));
+    EXPECT_FALSE(value > Unsigned(8));
+    EXPECT_FALSE(value != Unsigned(8));
+    EXPECT_FALSE(value < Unsigned(8));
+    EXPECT_FALSE(value < Unsigned(7));
+    EXPECT_FALSE(value <= Unsigned(7));
+    EXPECT_FALSE(value == Unsigned(7));
+}
+
+TEST_F(TypedIntegerTest, Arithmetic) {
+    // Postfix Increment
+    {
+        Signed value(0);
+        EXPECT_EQ(value++, Signed(0));
+        EXPECT_EQ(value, Signed(1));
+    }
+
+    // Prefix Increment
+    {
+        Signed value(0);
+        EXPECT_EQ(++value, Signed(1));
+        EXPECT_EQ(value, Signed(1));
+    }
+
+    // Postfix Decrement
+    {
+        Signed value(0);
+        EXPECT_EQ(value--, Signed(0));
+        EXPECT_EQ(value, Signed(-1));
+    }
+
+    // Prefix Decrement
+    {
+        Signed value(0);
+        EXPECT_EQ(--value, Signed(-1));
+        EXPECT_EQ(value, Signed(-1));
+    }
+
+    // Signed addition
+    {
+        Signed a(3);
+        Signed b(-4);
+        Signed c = a + b;
+        EXPECT_EQ(a, Signed(3));
+        EXPECT_EQ(b, Signed(-4));
+        EXPECT_EQ(c, Signed(-1));
+    }
+
+    // Signed subtraction
+    {
+        Signed a(3);
+        Signed b(-4);
+        Signed c = a - b;
+        EXPECT_EQ(a, Signed(3));
+        EXPECT_EQ(b, Signed(-4));
+        EXPECT_EQ(c, Signed(7));
+    }
+
+    // Unsigned addition
+    {
+        Unsigned a(9);
+        Unsigned b(3);
+        Unsigned c = a + b;
+        EXPECT_EQ(a, Unsigned(9));
+        EXPECT_EQ(b, Unsigned(3));
+        EXPECT_EQ(c, Unsigned(12));
+    }
+
+    // Unsigned subtraction
+    {
+        Unsigned a(9);
+        Unsigned b(2);
+        Unsigned c = a - b;
+        EXPECT_EQ(a, Unsigned(9));
+        EXPECT_EQ(b, Unsigned(2));
+        EXPECT_EQ(c, Unsigned(7));
+    }
+
+    // Negation
+    {
+        Signed a(5);
+        Signed b = -a;
+        EXPECT_EQ(a, Signed(5));
+        EXPECT_EQ(b, Signed(-5));
+    }
+}
+
+TEST_F(TypedIntegerTest, NumericLimits) {
+    EXPECT_EQ(std::numeric_limits<Unsigned>::max(), Unsigned(std::numeric_limits<uint32_t>::max()));
+    EXPECT_EQ(std::numeric_limits<Unsigned>::min(), Unsigned(std::numeric_limits<uint32_t>::min()));
+    EXPECT_EQ(std::numeric_limits<Signed>::max(), Signed(std::numeric_limits<int32_t>::max()));
+    EXPECT_EQ(std::numeric_limits<Signed>::min(), Signed(std::numeric_limits<int32_t>::min()));
+}
+
+TEST_F(TypedIntegerTest, UnderlyingType) {
+    static_assert(std::is_same<UnderlyingType<Unsigned>, uint32_t>::value);
+    static_assert(std::is_same<UnderlyingType<Signed>, int32_t>::value);
+}
+
+// Tests for bounds assertions on arithmetic overflow and underflow.
+#if defined(DAWN_ENABLE_ASSERTS)
+
+TEST_F(TypedIntegerTest, IncrementUnsignedOverflow) {
+    Unsigned value(std::numeric_limits<uint32_t>::max() - 1);
+
+    value++;                    // Doesn't overflow.
+    EXPECT_DEATH(value++, "");  // Overflows.
+}
+
+TEST_F(TypedIntegerTest, IncrementSignedOverflow) {
+    Signed value(std::numeric_limits<int32_t>::max() - 1);
+
+    value++;                    // Doesn't overflow.
+    EXPECT_DEATH(value++, "");  // Overflows.
+}
+
+TEST_F(TypedIntegerTest, DecrementUnsignedUnderflow) {
+    Unsigned value(std::numeric_limits<uint32_t>::min() + 1);
+
+    value--;                    // Doesn't underflow.
+    EXPECT_DEATH(value--, "");  // Underflows.
+}
+
+TEST_F(TypedIntegerTest, DecrementSignedUnderflow) {
+    Signed value(std::numeric_limits<int32_t>::min() + 1);
+
+    value--;                    // Doesn't underflow.
+    EXPECT_DEATH(value--, "");  // Underflows.
+}
+
+TEST_F(TypedIntegerTest, UnsignedAdditionOverflow) {
+    Unsigned value(std::numeric_limits<uint32_t>::max() - 1);
+
+    value + Unsigned(1);                    // Doesn't overflow.
+    EXPECT_DEATH(value + Unsigned(2), "");  // Overflows.
+}
+
+TEST_F(TypedIntegerTest, UnsignedSubtractionUnderflow) {
+    Unsigned value(1);
+
+    value - Unsigned(1);                    // Doesn't underflow.
+    EXPECT_DEATH(value - Unsigned(2), "");  // Underflows.
+}
+
+TEST_F(TypedIntegerTest, SignedAdditionOverflow) {
+    Signed value(std::numeric_limits<int32_t>::max() - 1);
+
+    value + Signed(1);                    // Doesn't overflow.
+    EXPECT_DEATH(value + Signed(2), "");  // Overflows.
+}
+
+TEST_F(TypedIntegerTest, SignedAdditionUnderflow) {
+    Signed value(std::numeric_limits<int32_t>::min() + 1);
+
+    value + Signed(-1);                    // Doesn't underflow.
+    EXPECT_DEATH(value + Signed(-2), "");  // Underflows.
+}
+
+TEST_F(TypedIntegerTest, SignedSubtractionOverflow) {
+    Signed value(std::numeric_limits<int32_t>::max() - 1);
+
+    value - Signed(-1);                    // Doesn't overflow.
+    EXPECT_DEATH(value - Signed(-2), "");  // Overflows.
+}
+
+TEST_F(TypedIntegerTest, SignedSubtractionUnderflow) {
+    Signed value(std::numeric_limits<int32_t>::min() + 1);
+
+    value - Signed(1);                    // Doesn't underflow.
+    EXPECT_DEATH(value - Signed(2), "");  // Underflows.
+}
+
+TEST_F(TypedIntegerTest, NegationOverflow) {
+    Signed maxValue(std::numeric_limits<int32_t>::max());
+    -maxValue;  // Doesn't underflow.
+
+    Signed minValue(std::numeric_limits<int32_t>::min());
+    EXPECT_DEATH(-minValue, "");  // Overflows.
+}
+
+#endif  // defined(DAWN_ENABLE_ASSERTS)
diff --git a/src/dawn/tests/unittests/VersionTests.cpp b/src/dawn/tests/unittests/VersionTests.cpp
new file mode 100644
index 0000000..60172a7
--- /dev/null
+++ b/src/dawn/tests/unittests/VersionTests.cpp
@@ -0,0 +1,29 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "dawn/common/Version_autogen.h"
+
+namespace dawn { namespace {
+
+    using ::testing::SizeIs;
+
+    TEST(VersionTests, GitCommitHashLength) {
+        // Git hashes should be 40 characters long.
+        EXPECT_THAT(std::string(kGitHash), SizeIs(40));
+    }
+
+}}  // namespace dawn::
diff --git a/src/dawn/tests/unittests/WindowsUtilsTests.cpp b/src/dawn/tests/unittests/WindowsUtilsTests.cpp
new file mode 100644
index 0000000..70b209e
--- /dev/null
+++ b/src/dawn/tests/unittests/WindowsUtilsTests.cpp
@@ -0,0 +1,51 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/WindowsUtils.h"
+
+TEST(WindowsUtilsTests, WCharToUTF8) {
+    // Test the empty string
+    ASSERT_EQ("", WCharToUTF8(L""));
+
+    // Test ASCII characters
+    ASSERT_EQ("abc", WCharToUTF8(L"abc"));
+
+    // Test ASCII characters
+    ASSERT_EQ("abc", WCharToUTF8(L"abc"));
+
+    // Test two-byte utf8 character
+    ASSERT_EQ("\xd1\x90", WCharToUTF8(L"\x450"));
+
+    // Test three-byte utf8 codepoint
+    ASSERT_EQ("\xe1\x81\x90", WCharToUTF8(L"\x1050"));
+}
+
+TEST(WindowsUtilsTests, UTF8ToWStr) {
+    // Test the empty string
+    ASSERT_EQ(L"", UTF8ToWStr(""));
+
+    // Test ASCII characters
+    ASSERT_EQ(L"abc", UTF8ToWStr("abc"));
+
+    // Test ASCII characters
+    ASSERT_EQ(L"abc", UTF8ToWStr("abc"));
+
+    // Test two-byte utf8 character
+    ASSERT_EQ(L"\x450", UTF8ToWStr("\xd1\x90"));
+
+    // Test three-byte utf8 codepoint
+    ASSERT_EQ(L"\x1050", UTF8ToWStr("\xe1\x81\x90"));
+}
diff --git a/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
new file mode 100644
index 0000000..75f088b
--- /dev/null
+++ b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
@@ -0,0 +1,529 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/d3d12/TextureCopySplitter.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/webgpu_cpp_print.h"
+
+using namespace dawn::native::d3d12;
+
+namespace {
+
+    struct TextureSpec {
+        uint32_t x;
+        uint32_t y;
+        uint32_t z;
+        uint32_t width;
+        uint32_t height;
+        uint32_t depthOrArrayLayers;
+        uint32_t texelBlockSizeInBytes;
+        uint32_t blockWidth = 1;
+        uint32_t blockHeight = 1;
+    };
+
+    struct BufferSpec {
+        uint64_t offset;
+        uint32_t bytesPerRow;
+        uint32_t rowsPerImage;
+    };
+
+    // Check that each copy region fits inside the buffer footprint
+    void ValidateFootprints(const TextureSpec& textureSpec,
+                            const BufferSpec& bufferSpec,
+                            const TextureCopySubresource& copySplit,
+                            wgpu::TextureDimension dimension) {
+        for (uint32_t i = 0; i < copySplit.count; ++i) {
+            const auto& copy = copySplit.copies[i];
+            ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width);
+            ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height);
+            ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers,
+                      copy.bufferSize.depthOrArrayLayers);
+
+            // If there are multiple layers, 2D texture splitter actually splits each layer
+            // independently. See the details in Compute2DTextureCopySplits(). As a result,
+            // if we simply expand a copy region generated by 2D texture splitter to all
+            // layers, the copy region might be OOB. But that is not the approach that the current
+            // 2D texture splitter is doing, although Compute2DTextureCopySubresource forwards
+            // "copySize.depthOrArrayLayers" to the copy region it generated. So skip the test
+            // below for 2D textures with multiple layers.
+            if (textureSpec.depthOrArrayLayers <= 1 || dimension == wgpu::TextureDimension::e3D) {
+                uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
+                uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
+                uint64_t minimumRequiredBufferSize =
+                    bufferSpec.offset +
+                    utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, bufferSpec.rowsPerImage,
+                                               widthInBlocks, heightInBlocks,
+                                               textureSpec.depthOrArrayLayers,
+                                               textureSpec.texelBlockSizeInBytes);
+
+                // The last pixel (buffer footprint) of each copy region depends on its bufferOffset
+                // and copySize. It is not the last pixel where the bufferSize ends.
+                ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
+                ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
+                uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
+                ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u);
+                uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth;
+
+                ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u);
+                ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u);
+                uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height;
+                ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u);
+                uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight;
+
+                uint64_t bufferSizeForFootprint =
+                    copy.alignedOffset +
+                    utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height,
+                                               footprintWidthInBlocks, footprintHeightInBlocks,
+                                               copy.bufferSize.depthOrArrayLayers,
+                                               textureSpec.texelBlockSizeInBytes);
+
+                // The buffer footprint of each copy region should not exceed the minimum required
+                // buffer size. Otherwise, pixels accessed by copy may be OOB.
+                ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
+            }
+        }
+    }
+
+    // Check that the offset is aligned
+    void ValidateOffset(const TextureCopySubresource& copySplit) {
+        for (uint32_t i = 0; i < copySplit.count; ++i) {
+            ASSERT_TRUE(
+                Align(copySplit.copies[i].alignedOffset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
+                copySplit.copies[i].alignedOffset);
+        }
+    }
+
+    bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) {
+        return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB);
+    }
+
+    // Check that no pair of copy regions intersect each other
+    void ValidateDisjoint(const TextureCopySubresource& copySplit) {
+        for (uint32_t i = 0; i < copySplit.count; ++i) {
+            const auto& a = copySplit.copies[i];
+            for (uint32_t j = i + 1; j < copySplit.count; ++j) {
+                const auto& b = copySplit.copies[j];
+                // If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and
+                // 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be
+                // [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
+                // included.
+                bool overlapX = InclusiveRangesOverlap(
+                    a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1, b.textureOffset.x,
+                    b.textureOffset.x + b.copySize.width - 1);
+                bool overlapY = InclusiveRangesOverlap(
+                    a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1, b.textureOffset.y,
+                    b.textureOffset.y + b.copySize.height - 1);
+                bool overlapZ = InclusiveRangesOverlap(
+                    a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
+                    b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
+                ASSERT_TRUE(!overlapX || !overlapY || !overlapZ);
+            }
+        }
+    }
+
+    // Check that the union of the copy regions exactly covers the texture region
+    void ValidateTextureBounds(const TextureSpec& textureSpec,
+                               const TextureCopySubresource& copySplit) {
+        ASSERT_TRUE(copySplit.count > 0);
+
+        uint32_t minX = copySplit.copies[0].textureOffset.x;
+        uint32_t minY = copySplit.copies[0].textureOffset.y;
+        uint32_t minZ = copySplit.copies[0].textureOffset.z;
+        uint32_t maxX = copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
+        uint32_t maxY = copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
+        uint32_t maxZ =
+            copySplit.copies[0].textureOffset.z + copySplit.copies[0].copySize.depthOrArrayLayers;
+
+        for (uint32_t i = 1; i < copySplit.count; ++i) {
+            const auto& copy = copySplit.copies[i];
+            minX = std::min(minX, copy.textureOffset.x);
+            minY = std::min(minY, copy.textureOffset.y);
+            minZ = std::min(minZ, copy.textureOffset.z);
+            maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width);
+            maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height);
+            maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers);
+        }
+
+        ASSERT_EQ(minX, textureSpec.x);
+        ASSERT_EQ(minY, textureSpec.y);
+        ASSERT_EQ(minZ, textureSpec.z);
+        ASSERT_EQ(maxX, textureSpec.x + textureSpec.width);
+        ASSERT_EQ(maxY, textureSpec.y + textureSpec.height);
+        ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers);
+    }
+
+    // Validate that the number of pixels copied is exactly equal to the number of pixels in the
+    // texture region
+    void ValidatePixelCount(const TextureSpec& textureSpec,
+                            const TextureCopySubresource& copySplit) {
+        uint32_t count = 0;
+        for (uint32_t i = 0; i < copySplit.count; ++i) {
+            const auto& copy = copySplit.copies[i];
+            uint32_t copiedPixels =
+                copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers;
+            ASSERT_GT(copiedPixels, 0u);
+            count += copiedPixels;
+        }
+        ASSERT_EQ(count, textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
+    }
+
+    // Check that every buffer offset is at the correct pixel location
+    void ValidateBufferOffset(const TextureSpec& textureSpec,
+                              const BufferSpec& bufferSpec,
+                              const TextureCopySubresource& copySplit,
+                              wgpu::TextureDimension dimension) {
+        ASSERT_TRUE(copySplit.count > 0);
+
+        uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight;
+        for (uint32_t i = 0; i < copySplit.count; ++i) {
+            const auto& copy = copySplit.copies[i];
+
+            uint32_t bytesPerRowInTexels =
+                bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
+            uint32_t slicePitchInTexels =
+                bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight);
+            uint32_t absoluteTexelOffset =
+                copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock +
+                copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock +
+                copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels;
+
+            // There is one empty row at most in a 2D copy region. However, it is not true for
+            // a 3D texture copy region when we are copying the last row of each slice. We may
+            // need to offset a lot rows and copy.bufferOffset.y may be big.
+            if (dimension == wgpu::TextureDimension::e2D) {
+                ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight);
+            }
+            ASSERT_EQ(copy.bufferOffset.z, 0u);
+
+            ASSERT_GE(absoluteTexelOffset,
+                      bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock);
+            uint32_t relativeTexelOffset =
+                absoluteTexelOffset -
+                bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
+
+            uint32_t z = relativeTexelOffset / slicePitchInTexels;
+            uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels;
+            uint32_t x = relativeTexelOffset % bytesPerRowInTexels;
+
+            ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x);
+            ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y);
+            ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z);
+        }
+    }
+
+    void ValidateCopySplit(const TextureSpec& textureSpec,
+                           const BufferSpec& bufferSpec,
+                           const TextureCopySubresource& copySplit,
+                           wgpu::TextureDimension dimension) {
+        ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension);
+        ValidateOffset(copySplit);
+        ValidateDisjoint(copySplit);
+        ValidateTextureBounds(textureSpec, copySplit);
+        ValidatePixelCount(textureSpec, copySplit);
+        ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension);
+    }
+
+    std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) {
+        os << "TextureSpec("
+           << "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
+           << textureSpec.width << ", " << textureSpec.height << ", "
+           << textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes << ")";
+        return os;
+    }
+
+    std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) {
+        os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", "
+           << bufferSpec.rowsPerImage << ")";
+        return os;
+    }
+
+    std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) {
+        os << "CopySplit" << std::endl;
+        for (uint32_t i = 0; i < copySplit.count; ++i) {
+            const auto& copy = copySplit.copies[i];
+            os << "  " << i << ": Texture at (" << copy.textureOffset.x << ", "
+               << copy.textureOffset.y << ", " << copy.textureOffset.z << "), size ("
+               << copy.copySize.width << ", " << copy.copySize.height << ", "
+               << copy.copySize.depthOrArrayLayers << ")" << std::endl;
+            os << "  " << i << ": Buffer at (" << copy.bufferOffset.x << ", " << copy.bufferOffset.y
+               << ", " << copy.bufferOffset.z << "), footprint (" << copy.bufferSize.width << ", "
+               << copy.bufferSize.height << ", " << copy.bufferSize.depthOrArrayLayers << ")"
+               << std::endl;
+        }
+        return os;
+    }
+
+    // Define base texture sizes and offsets to test with: some aligned, some unaligned
+    constexpr TextureSpec kBaseTextureSpecs[] = {
+        {0, 0, 0, 1, 1, 1, 4},
+        {0, 0, 0, 64, 1, 1, 4},
+        {0, 0, 0, 128, 1, 1, 4},
+        {0, 0, 0, 192, 1, 1, 4},
+        {31, 16, 0, 1, 1, 1, 4},
+        {64, 16, 0, 1, 1, 1, 4},
+        {64, 16, 8, 1, 1, 1, 4},
+
+        {0, 0, 0, 64, 2, 1, 4},
+        {0, 0, 0, 64, 1, 2, 4},
+        {0, 0, 0, 64, 2, 2, 4},
+        {0, 0, 0, 128, 2, 1, 4},
+        {0, 0, 0, 128, 1, 2, 4},
+        {0, 0, 0, 128, 2, 2, 4},
+        {0, 0, 0, 192, 2, 1, 4},
+        {0, 0, 0, 192, 1, 2, 4},
+        {0, 0, 0, 192, 2, 2, 4},
+
+        {0, 0, 0, 1024, 1024, 1, 4},
+        {256, 512, 0, 1024, 1024, 1, 4},
+        {64, 48, 0, 1024, 1024, 1, 4},
+        {64, 48, 16, 1024, 1024, 1024, 4},
+
+        {0, 0, 0, 257, 31, 1, 4},
+        {0, 0, 0, 17, 93, 1, 4},
+        {59, 13, 0, 257, 31, 1, 4},
+        {17, 73, 0, 17, 93, 1, 4},
+        {17, 73, 59, 17, 93, 99, 4},
+
+        {0, 0, 0, 4, 4, 1, 8, 4, 4},
+        {64, 16, 0, 4, 4, 1, 8, 4, 4},
+        {64, 16, 8, 4, 4, 1, 8, 4, 4},
+        {0, 0, 0, 4, 4, 1, 16, 4, 4},
+        {64, 16, 0, 4, 4, 1, 16, 4, 4},
+        {64, 16, 8, 4, 4, 1, 16, 4, 4},
+
+        {0, 0, 0, 1024, 1024, 1, 8, 4, 4},
+        {256, 512, 0, 1024, 1024, 1, 8, 4, 4},
+        {64, 48, 0, 1024, 1024, 1, 8, 4, 4},
+        {64, 48, 16, 1024, 1024, 1, 8, 4, 4},
+        {0, 0, 0, 1024, 1024, 1, 16, 4, 4},
+        {256, 512, 0, 1024, 1024, 1, 16, 4, 4},
+        {64, 48, 0, 1024, 1024, 1, 4, 16, 4},
+        {64, 48, 16, 1024, 1024, 1, 16, 4, 4},
+    };
+
+    // Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow is
+    // the minimum required
+    std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
+        uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width,
+                                     kTextureBytesPerRowAlignment);
+
+        auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t {
+            return value == 0 ? 0 : ((value - 1) / size + 1) * size;
+        };
+
+        return {
+            BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height * 2},
+
+            BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height * 2},
+
+            BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height},
+            BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                       textureSpec.height * 2},
+        };
+    }
+
+    // Define a list of values to set properties in the spec structs
+    constexpr uint32_t kCheckValues[] = {1,  2,  3,  4,   5,   6,   7,    8,     // small values
+                                         16, 32, 64, 128, 256, 512, 1024, 2048,  // powers of 2
+                                         15, 31, 63, 127, 257, 511, 1023, 2047,  // misalignments
+                                         17, 33, 65, 129, 257, 513, 1025, 2049};
+
+}  // namespace
+
+class CopySplitTest : public testing::TestWithParam<wgpu::TextureDimension> {
+  protected:
+    void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) {
+        ASSERT(textureSpec.width % textureSpec.blockWidth == 0 &&
+               textureSpec.height % textureSpec.blockHeight == 0);
+
+        wgpu::TextureDimension dimension = GetParam();
+        TextureCopySubresource copySplit;
+        switch (dimension) {
+            case wgpu::TextureDimension::e2D: {
+                copySplit = Compute2DTextureCopySubresource(
+                    {textureSpec.x, textureSpec.y, textureSpec.z},
+                    {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
+                    {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
+                     textureSpec.blockHeight},
+                    bufferSpec.offset, bufferSpec.bytesPerRow);
+                break;
+            }
+            case wgpu::TextureDimension::e3D: {
+                copySplit = Compute3DTextureCopySplits(
+                    {textureSpec.x, textureSpec.y, textureSpec.z},
+                    {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
+                    {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
+                     textureSpec.blockHeight},
+                    bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
+                break;
+            }
+            default:
+                UNREACHABLE();
+                break;
+        }
+
+        ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension);
+
+        if (HasFatalFailure()) {
+            std::ostringstream message;
+            message << "Failed generating splits: " << textureSpec << ", " << bufferSpec
+                    << std::endl
+                    << dimension << " " << copySplit << std::endl;
+            FAIL() << message.str();
+        }
+    }
+};
+
+TEST_P(CopySplitTest, General) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            DoTest(textureSpec, bufferSpec);
+        }
+    }
+}
+
+TEST_P(CopySplitTest, TextureWidth) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            if (val % textureSpec.blockWidth != 0) {
+                continue;
+            }
+            textureSpec.width = val;
+            for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, TextureHeight) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            if (val % textureSpec.blockHeight != 0) {
+                continue;
+            }
+            textureSpec.height = val;
+            for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, TextureX) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            textureSpec.x = val;
+            for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, TextureY) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            textureSpec.y = val;
+            for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, TexelSize) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t texelSize : {4, 8, 16, 32, 64}) {
+            textureSpec.texelBlockSizeInBytes = texelSize;
+            for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, BufferOffset) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            for (uint32_t val : kCheckValues) {
+                bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, RowPitch) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            uint32_t baseRowPitch = bufferSpec.bytesPerRow;
+            for (uint32_t i = 0; i < 5; ++i) {
+                bufferSpec.bytesPerRow = baseRowPitch + i * 256;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, ImageHeight) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            uint32_t baseImageHeight = bufferSpec.rowsPerImage;
+            for (uint32_t i = 0; i < 5; ++i) {
+                bufferSpec.rowsPerImage = baseImageHeight + i * 256;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+                         CopySplitTest,
+                         testing::Values(wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D));
diff --git a/src/dawn/tests/unittests/native/CacheKeyTests.cpp b/src/dawn/tests/unittests/native/CacheKeyTests.cpp
new file mode 100644
index 0000000..45fd360
--- /dev/null
+++ b/src/dawn/tests/unittests/native/CacheKeyTests.cpp
@@ -0,0 +1,184 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <cstring>
+#include <iomanip>
+#include <string>
+
+#include "dawn/native/CacheKey.h"
+
+namespace dawn::native {
+
+    // Testing classes with mock serializing implemented for testing.
+    class A {
+      public:
+        MOCK_METHOD(void, SerializeMock, (CacheKey*, const A&), (const));
+    };
+    template <>
+    void CacheKeySerializer<A>::Serialize(CacheKey* key, const A& t) {
+        t.SerializeMock(key, t);
+    }
+
+    // Custom printer for CacheKey for clearer debug testing messages.
+    void PrintTo(const CacheKey& key, std::ostream* stream) {
+        *stream << std::hex;
+        for (const int b : key) {
+            *stream << std::setfill('0') << std::setw(2) << b << " ";
+        }
+        *stream << std::dec;
+    }
+
+    namespace {
+
+        using ::testing::InSequence;
+        using ::testing::NotNull;
+        using ::testing::PrintToString;
+        using ::testing::Ref;
+
+        // Matcher to compare CacheKeys for easier testing.
+        MATCHER_P(CacheKeyEq, key, PrintToString(key)) {
+            return memcmp(arg.data(), key.data(), arg.size()) == 0;
+        }
+
+        TEST(CacheKeyTests, RecordSingleMember) {
+            CacheKey key;
+
+            A a;
+            EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+            EXPECT_THAT(key.Record(a), CacheKeyEq(CacheKey()));
+        }
+
+        TEST(CacheKeyTests, RecordManyMembers) {
+            constexpr size_t kNumMembers = 100;
+
+            CacheKey key;
+            for (size_t i = 0; i < kNumMembers; ++i) {
+                A a;
+                EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+                key.Record(a);
+            }
+            EXPECT_THAT(key, CacheKeyEq(CacheKey()));
+        }
+
+        TEST(CacheKeyTests, RecordIterable) {
+            constexpr size_t kIterableSize = 100;
+
+            // Expecting the size of the container.
+            CacheKey expected;
+            expected.Record(kIterableSize);
+
+            std::vector<A> iterable(kIterableSize);
+            {
+                InSequence seq;
+                for (const auto& a : iterable) {
+                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+                }
+                for (const auto& a : iterable) {
+                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+                }
+            }
+
+            EXPECT_THAT(CacheKey().RecordIterable(iterable), CacheKeyEq(expected));
+            EXPECT_THAT(CacheKey().RecordIterable(iterable.data(), kIterableSize),
+                        CacheKeyEq(expected));
+        }
+
+        TEST(CacheKeyTests, RecordNested) {
+            CacheKey expected;
+            CacheKey actual;
+            {
+                // Recording a single member.
+                A a;
+                EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+                actual.Record(CacheKey().Record(a));
+            }
+            {
+                // Recording multiple members.
+                constexpr size_t kNumMembers = 2;
+                CacheKey sub;
+                for (size_t i = 0; i < kNumMembers; ++i) {
+                    A a;
+                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+                    sub.Record(a);
+                }
+                actual.Record(sub);
+            }
+            {
+                // Record an iterable.
+                constexpr size_t kIterableSize = 2;
+                expected.Record(kIterableSize);
+                std::vector<A> iterable(kIterableSize);
+                {
+                    InSequence seq;
+                    for (const auto& a : iterable) {
+                        EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+                    }
+                }
+                actual.Record(CacheKey().RecordIterable(iterable));
+            }
+            EXPECT_THAT(actual, CacheKeyEq(expected));
+        }
+
+        TEST(CacheKeySerializerTests, IntegralTypes) {
+            // Only testing explicitly sized types for simplicity, and using 0s for larger types to
+            // avoid dealing with endianess.
+            EXPECT_THAT(CacheKey().Record('c'), CacheKeyEq(CacheKey({'c'})));
+            EXPECT_THAT(CacheKey().Record(uint8_t(255)), CacheKeyEq(CacheKey({255})));
+            EXPECT_THAT(CacheKey().Record(uint16_t(0)), CacheKeyEq(CacheKey({0, 0})));
+            EXPECT_THAT(CacheKey().Record(uint32_t(0)), CacheKeyEq(CacheKey({0, 0, 0, 0})));
+        }
+
+        TEST(CacheKeySerializerTests, FloatingTypes) {
+            // Using 0s to avoid dealing with implementation specific float details.
+            EXPECT_THAT(CacheKey().Record(float(0)), CacheKeyEq(CacheKey(sizeof(float), 0)));
+            EXPECT_THAT(CacheKey().Record(double(0)), CacheKeyEq(CacheKey(sizeof(double), 0)));
+        }
+
+        TEST(CacheKeySerializerTests, LiteralStrings) {
+            // Using a std::string here to help with creating the expected result.
+            std::string str = "string";
+
+            CacheKey expected;
+            expected.Record(size_t(7));
+            expected.insert(expected.end(), str.begin(), str.end());
+            expected.push_back('\0');
+
+            EXPECT_THAT(CacheKey().Record("string"), CacheKeyEq(expected));
+        }
+
+        TEST(CacheKeySerializerTests, StdStrings) {
+            std::string str = "string";
+
+            CacheKey expected;
+            expected.Record((size_t)6);
+            expected.insert(expected.end(), str.begin(), str.end());
+
+            EXPECT_THAT(CacheKey().Record(str), CacheKeyEq(expected));
+        }
+
+        TEST(CacheKeySerializerTests, CacheKeys) {
+            CacheKey data = {'d', 'a', 't', 'a'};
+
+            CacheKey expected;
+            expected.insert(expected.end(), data.begin(), data.end());
+
+            EXPECT_THAT(CacheKey().Record(data), CacheKeyEq(expected));
+        }
+
+    }  // namespace
+
+}  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
new file mode 100644
index 0000000..eb94dcc
--- /dev/null
+++ b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
@@ -0,0 +1,310 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnNativeTest.h"
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class CommandBufferEncodingTests : public DawnNativeTest {
+  protected:
+    void ExpectCommands(dawn::native::CommandIterator* commands,
+                        std::vector<std::pair<dawn::native::Command,
+                                              std::function<void(dawn::native::CommandIterator*)>>>
+                            expectedCommands) {
+        dawn::native::Command commandId;
+        for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) {
+            ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command";
+            ASSERT_EQ(commandId, expectedCommands[commandIndex].first)
+                << "at command " << commandIndex;
+            expectedCommands[commandIndex].second(commands);
+        }
+    }
+};
+
+// Indirect dispatch validation changes the bind groups in the middle
+// of a pass. Test that bindings are restored after the validation runs.
+TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) {
+    using namespace dawn::native;
+
+    wgpu::BindGroupLayout staticLayout =
+        utils::MakeBindGroupLayout(device, {{
+                                               0,
+                                               wgpu::ShaderStage::Compute,
+                                               wgpu::BufferBindingType::Uniform,
+                                           }});
+
+    wgpu::BindGroupLayout dynamicLayout =
+        utils::MakeBindGroupLayout(device, {{
+                                               0,
+                                               wgpu::ShaderStage::Compute,
+                                               wgpu::BufferBindingType::Uniform,
+                                               true,
+                                           }});
+
+    // Create a simple pipeline
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1, 1, 1)
+        fn main() {
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout});
+    csDesc.layout = pl0;
+    wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc);
+
+    wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout});
+    csDesc.layout = pl1;
+    wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc);
+
+    // Create buffers to use for both the indirect buffer and the bind groups.
+    wgpu::Buffer indirectBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
+
+    wgpu::BufferDescriptor uniformBufferDesc = {};
+    uniformBufferDesc.size = 512;
+    uniformBufferDesc.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc);
+
+    wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}});
+
+    wgpu::BindGroup dynamicBG =
+        utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}});
+
+    uint32_t dynamicOffset = 256;
+    std::vector<uint32_t> emptyDynamicOffsets = {};
+    std::vector<uint32_t> singleDynamicOffset = {dynamicOffset};
+
+    // Begin encoding commands.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+    CommandBufferStateTracker* stateTracker =
+        FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
+
+    // Perform a dispatch indirect which will be preceded by a validation dispatch.
+    pass.SetPipeline(pipeline0);
+    pass.SetBindGroup(0, staticBG);
+    pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset);
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+
+    pass.DispatchIndirect(indirectBuffer, 0);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
+
+    // Dispatch again to check that the restored state can be used.
+    // Also pass an indirect offset which should get replaced with the offset
+    // into the scratch indirect buffer (0).
+    pass.DispatchIndirect(indirectBuffer, 4);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
+
+    // Change the pipeline
+    pass.SetPipeline(pipeline1);
+    pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset);
+    pass.SetBindGroup(1, staticBG);
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
+
+    pass.DispatchIndirect(indirectBuffer, 0);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets);
+
+    pass.End();
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+    auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) {
+        return [pipeline](CommandIterator* commands) {
+            auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
+            EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get());
+        };
+    };
+
+    auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg,
+                                 std::vector<uint32_t> offsets = {}) {
+        return [index, bg, offsets](CommandIterator* commands) {
+            auto* cmd = commands->NextCommand<SetBindGroupCmd>();
+            uint32_t* dynamicOffsets = nullptr;
+            if (cmd->dynamicOffsetCount > 0) {
+                dynamicOffsets = commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+            }
+
+            ASSERT_EQ(cmd->index, BindGroupIndex(index));
+            ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get());
+            ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size());
+            for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) {
+                ASSERT_EQ(dynamicOffsets[i], offsets[i]);
+            }
+        };
+    };
+
+    // Initialize as null. Once we know the pointer, we'll check
+    // that it's the same buffer every time.
+    WGPUBuffer indirectScratchBuffer = nullptr;
+    auto ExpectDispatchIndirect = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<DispatchIndirectCmd>();
+        if (indirectScratchBuffer == nullptr) {
+            indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get());
+        }
+        ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer);
+        ASSERT_EQ(cmd->indirectOffset, uint64_t(0));
+    };
+
+    // Initialize as null. Once we know the pointer, we'll check
+    // that it's the same pipeline every time.
+    WGPUComputePipeline validationPipeline = nullptr;
+    auto ExpectSetValidationPipeline = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
+        WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get());
+        if (validationPipeline != nullptr) {
+            EXPECT_EQ(pipeline, validationPipeline);
+        } else {
+            EXPECT_NE(pipeline, nullptr);
+            validationPipeline = pipeline;
+        }
+    };
+
+    auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<SetBindGroupCmd>();
+        ASSERT_EQ(cmd->index, BindGroupIndex(0));
+        ASSERT_NE(cmd->group.Get(), nullptr);
+        ASSERT_EQ(cmd->dynamicOffsetCount, 0u);
+    };
+
+    auto ExpectSetValidationDispatch = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<DispatchCmd>();
+        ASSERT_EQ(cmd->x, 1u);
+        ASSERT_EQ(cmd->y, 1u);
+        ASSERT_EQ(cmd->z, 1u);
+    };
+
+    ExpectCommands(
+        FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(),
+        {
+            {Command::BeginComputePass,
+             [&](CommandIterator* commands) { SkipCommand(commands, Command::BeginComputePass); }},
+            // Expect the state to be set.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            // Expect the state to be set (new pipeline).
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            {Command::EndComputePass,
+             [&](CommandIterator* commands) { commands->NextCommand<EndComputePassCmd>(); }},
+        });
+}
+
+// Test that after restoring state, it is fully applied to the state tracker
+// and does not leak state changes that occured between a snapshot and the
+// state restoration.
+TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
+    using namespace dawn::native;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+    CommandBufferStateTracker* stateTracker =
+        FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
+
+    // Snapshot the state.
+    CommandBufferStateTracker snapshot = *stateTracker;
+    // Expect no pipeline in the snapshot
+    EXPECT_FALSE(snapshot.HasPipeline());
+
+    // Create a simple pipeline
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1, 1, 1)
+        fn main() {
+        })");
+    csDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+
+    // Set the pipeline.
+    pass.SetPipeline(pipeline);
+
+    // Expect the pipeline to be set.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get());
+
+    // Restore the state.
+    FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot));
+
+    // Expect no pipeline
+    EXPECT_FALSE(stateTracker->HasPipeline());
+}
diff --git a/src/dawn/tests/unittests/native/CreatePipelineAsyncTaskTests.cpp b/src/dawn/tests/unittests/native/CreatePipelineAsyncTaskTests.cpp
new file mode 100644
index 0000000..3583b79
--- /dev/null
+++ b/src/dawn/tests/unittests/native/CreatePipelineAsyncTaskTests.cpp
@@ -0,0 +1,73 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnNativeTest.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "mocks/ComputePipelineMock.h"
+#include "mocks/RenderPipelineMock.h"
+
+class CreatePipelineAsyncTaskTests : public DawnNativeTest {};
+
+// A regression test for a null pointer issue in CreateRenderPipelineAsyncTask::Run().
+// See crbug.com/dawn/1310 for more details.
+TEST_F(CreatePipelineAsyncTaskTests, InitializationErrorInCreateRenderPipelineAsync) {
+    dawn::native::DeviceBase* deviceBase =
+        reinterpret_cast<dawn::native::DeviceBase*>(device.Get());
+    Ref<dawn::native::RenderPipelineMock> renderPipelineMock =
+        AcquireRef(new dawn::native::RenderPipelineMock(deviceBase));
+
+    ON_CALL(*renderPipelineMock.Get(), Initialize)
+        .WillByDefault(testing::Return(testing::ByMove(
+            DAWN_MAKE_ERROR(dawn::native::InternalErrorType::Validation, "Initialization Error"))));
+
+    dawn::native::CreateRenderPipelineAsyncTask asyncTask(
+        renderPipelineMock,
+        [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Error, status);
+        },
+        nullptr);
+
+    asyncTask.Run();
+    device.Tick();
+
+    EXPECT_CALL(*renderPipelineMock.Get(), DestroyImpl).Times(1);
+}
+
+// A regression test for a null pointer issue in CreateComputePipelineAsyncTask::Run().
+// See crbug.com/dawn/1310 for more details.
+TEST_F(CreatePipelineAsyncTaskTests, InitializationErrorInCreateComputePipelineAsync) {
+    dawn::native::DeviceBase* deviceBase =
+        reinterpret_cast<dawn::native::DeviceBase*>(device.Get());
+    Ref<dawn::native::ComputePipelineMock> computePipelineMock =
+        AcquireRef(new dawn::native::ComputePipelineMock(deviceBase));
+
+    ON_CALL(*computePipelineMock.Get(), Initialize)
+        .WillByDefault(testing::Return(testing::ByMove(
+            DAWN_MAKE_ERROR(dawn::native::InternalErrorType::Validation, "Initialization Error"))));
+
+    dawn::native::CreateComputePipelineAsyncTask asyncTask(
+        computePipelineMock,
+        [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline returnPipeline,
+           const char* message, void* userdata) {
+            EXPECT_EQ(WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Error, status);
+        },
+        nullptr);
+
+    asyncTask.Run();
+    device.Tick();
+
+    EXPECT_CALL(*computePipelineMock.Get(), DestroyImpl).Times(1);
+}
diff --git a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
new file mode 100644
index 0000000..01c09ad
--- /dev/null
+++ b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
@@ -0,0 +1,766 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "dawn/native/Toggles.h"
+#include "dawn/tests/DawnNativeTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "mocks/BindGroupLayoutMock.h"
+#include "mocks/BindGroupMock.h"
+#include "mocks/BufferMock.h"
+#include "mocks/CommandBufferMock.h"
+#include "mocks/ComputePipelineMock.h"
+#include "mocks/DeviceMock.h"
+#include "mocks/ExternalTextureMock.h"
+#include "mocks/PipelineLayoutMock.h"
+#include "mocks/QuerySetMock.h"
+#include "mocks/RenderPipelineMock.h"
+#include "mocks/SamplerMock.h"
+#include "mocks/ShaderModuleMock.h"
+#include "mocks/SwapChainMock.h"
+#include "mocks/TextureMock.h"
+
+namespace dawn::native { namespace {
+
+    using ::testing::_;
+    using ::testing::ByMove;
+    using ::testing::InSequence;
+    using ::testing::Return;
+    using ::testing::Test;
+
+    class DestroyObjectTests : public Test {
+      public:
+        DestroyObjectTests() : Test() {
+            // Skipping validation on descriptors as coverage for validation is already present.
+            mDevice.SetToggle(Toggle::SkipValidation, true);
+        }
+
+        Ref<TextureMock> GetTexture() {
+            if (mTexture != nullptr) {
+                return mTexture;
+            }
+            mTexture =
+                AcquireRef(new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal));
+            EXPECT_CALL(*mTexture.Get(), DestroyImpl).Times(1);
+            return mTexture;
+        }
+
+        Ref<PipelineLayoutMock> GetPipelineLayout() {
+            if (mPipelineLayout != nullptr) {
+                return mPipelineLayout;
+            }
+            mPipelineLayout = AcquireRef(new PipelineLayoutMock(&mDevice));
+            EXPECT_CALL(*mPipelineLayout.Get(), DestroyImpl).Times(1);
+            return mPipelineLayout;
+        }
+
+        Ref<ShaderModuleMock> GetVertexShaderModule() {
+            if (mVsModule != nullptr) {
+                return mVsModule;
+            }
+            DAWN_TRY_ASSIGN_WITH_CLEANUP(
+                mVsModule, ShaderModuleMock::Create(&mDevice, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })"),
+                { ASSERT(false); }, mVsModule);
+            EXPECT_CALL(*mVsModule.Get(), DestroyImpl).Times(1);
+            return mVsModule;
+        }
+
+        Ref<ShaderModuleMock> GetComputeShaderModule() {
+            if (mCsModule != nullptr) {
+                return mCsModule;
+            }
+            DAWN_TRY_ASSIGN_WITH_CLEANUP(
+                mCsModule, ShaderModuleMock::Create(&mDevice, R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+            })"),
+                { ASSERT(false); }, mCsModule);
+            EXPECT_CALL(*mCsModule.Get(), DestroyImpl).Times(1);
+            return mCsModule;
+        }
+
+      protected:
+        DeviceMock mDevice;
+
+        // The following lazy-initialized objects are used to facilitate creation of dependent
+        // objects under test.
+        Ref<TextureMock> mTexture;
+        Ref<PipelineLayoutMock> mPipelineLayout;
+        Ref<ShaderModuleMock> mVsModule;
+        Ref<ShaderModuleMock> mCsModule;
+    };
+
+    TEST_F(DestroyObjectTests, BindGroupExplicit) {
+        BindGroupMock bindGroupMock(&mDevice);
+        EXPECT_CALL(bindGroupMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(bindGroupMock.IsAlive());
+        bindGroupMock.Destroy();
+        EXPECT_FALSE(bindGroupMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, BindGroupImplicit) {
+        BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
+        EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+        {
+            BindGroupDescriptor desc = {};
+            Ref<BindGroupBase> bindGroup;
+            EXPECT_CALL(mDevice, CreateBindGroupImpl)
+                .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
+            DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
+
+            EXPECT_TRUE(bindGroup->IsAlive());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, BindGroupLayoutExplicit) {
+        BindGroupLayoutMock bindGroupLayoutMock(&mDevice);
+        EXPECT_CALL(bindGroupLayoutMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(bindGroupLayoutMock.IsAlive());
+        bindGroupLayoutMock.Destroy();
+        EXPECT_FALSE(bindGroupLayoutMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, BindGroupLayoutImplicit) {
+        BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
+        EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
+        {
+            BindGroupLayoutDescriptor desc = {};
+            Ref<BindGroupLayoutBase> bindGroupLayout;
+            EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
+                .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
+            DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
+
+            EXPECT_TRUE(bindGroupLayout->IsAlive());
+            EXPECT_TRUE(bindGroupLayout->IsCachedReference());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, BufferExplicit) {
+        {
+            BufferMock bufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+            EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
+
+            EXPECT_TRUE(bufferMock.IsAlive());
+            bufferMock.Destroy();
+            EXPECT_FALSE(bufferMock.IsAlive());
+        }
+        {
+            BufferMock bufferMock(&mDevice, BufferBase::BufferState::Mapped);
+            {
+                InSequence seq;
+                EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
+                EXPECT_CALL(bufferMock, UnmapImpl).Times(1);
+            }
+
+            EXPECT_TRUE(bufferMock.IsAlive());
+            bufferMock.Destroy();
+            EXPECT_FALSE(bufferMock.IsAlive());
+        }
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, BufferImplicit) {
+        {
+            BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+            EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
+            {
+                BufferDescriptor desc = {};
+                Ref<BufferBase> buffer;
+                EXPECT_CALL(mDevice, CreateBufferImpl)
+                    .WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+                DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+
+                EXPECT_TRUE(buffer->IsAlive());
+            }
+        }
+        {
+            BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Mapped);
+            {
+                InSequence seq;
+                EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
+                EXPECT_CALL(*bufferMock, UnmapImpl).Times(1);
+            }
+            {
+                BufferDescriptor desc = {};
+                Ref<BufferBase> buffer;
+                EXPECT_CALL(mDevice, CreateBufferImpl)
+                    .WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+                DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+
+                EXPECT_TRUE(buffer->IsAlive());
+            }
+        }
+    }
+
+    TEST_F(DestroyObjectTests, CommandBufferExplicit) {
+        CommandBufferMock commandBufferMock(&mDevice);
+        EXPECT_CALL(commandBufferMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(commandBufferMock.IsAlive());
+        commandBufferMock.Destroy();
+        EXPECT_FALSE(commandBufferMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, CommandBufferImplicit) {
+        CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+        EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
+        {
+            CommandBufferDescriptor desc = {};
+            Ref<CommandBufferBase> commandBuffer;
+            EXPECT_CALL(mDevice, CreateCommandBuffer)
+                .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
+            DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
+
+            EXPECT_TRUE(commandBuffer->IsAlive());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, ComputePipelineExplicit) {
+        ComputePipelineMock computePipelineMock(&mDevice);
+        EXPECT_CALL(computePipelineMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(computePipelineMock.IsAlive());
+        computePipelineMock.Destroy();
+        EXPECT_FALSE(computePipelineMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, ComputePipelineImplicit) {
+        // ComputePipelines usually set their hash values at construction, but the mock does not, so
+        // we set it here.
+        constexpr size_t hash = 0x12345;
+        ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
+        computePipelineMock->SetContentHash(hash);
+        ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+        // Compute pipelines are initialized during their creation via the device.
+        EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
+        EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
+
+        {
+            ComputePipelineDescriptor desc = {};
+            desc.layout = GetPipelineLayout().Get();
+            desc.compute.module = GetComputeShaderModule().Get();
+
+            Ref<ComputePipelineBase> computePipeline;
+            EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
+                .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
+            DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
+
+            EXPECT_TRUE(computePipeline->IsAlive());
+            EXPECT_TRUE(computePipeline->IsCachedReference());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, ExternalTextureExplicit) {
+        ExternalTextureMock externalTextureMock(&mDevice);
+        EXPECT_CALL(externalTextureMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(externalTextureMock.IsAlive());
+        externalTextureMock.Destroy();
+        EXPECT_FALSE(externalTextureMock.IsAlive());
+    }
+
+    TEST_F(DestroyObjectTests, ExternalTextureImplicit) {
+        ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
+        EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
+        {
+            ExternalTextureDescriptor desc = {};
+            Ref<ExternalTextureBase> externalTexture;
+            EXPECT_CALL(mDevice, CreateExternalTextureImpl)
+                .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
+            DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
+
+            EXPECT_TRUE(externalTexture->IsAlive());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, PipelineLayoutExplicit) {
+        PipelineLayoutMock pipelineLayoutMock(&mDevice);
+        EXPECT_CALL(pipelineLayoutMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(pipelineLayoutMock.IsAlive());
+        pipelineLayoutMock.Destroy();
+        EXPECT_FALSE(pipelineLayoutMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, PipelineLayoutImplicit) {
+        PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
+        EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
+        {
+            PipelineLayoutDescriptor desc = {};
+            Ref<PipelineLayoutBase> pipelineLayout;
+            EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
+                .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
+            DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
+
+            EXPECT_TRUE(pipelineLayout->IsAlive());
+            EXPECT_TRUE(pipelineLayout->IsCachedReference());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, QuerySetExplicit) {
+        QuerySetMock querySetMock(&mDevice);
+        EXPECT_CALL(querySetMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(querySetMock.IsAlive());
+        querySetMock.Destroy();
+        EXPECT_FALSE(querySetMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, QuerySetImplicit) {
+        QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
+        EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
+        {
+            QuerySetDescriptor desc = {};
+            Ref<QuerySetBase> querySet;
+            EXPECT_CALL(mDevice, CreateQuerySetImpl)
+                .WillOnce(Return(ByMove(AcquireRef(querySetMock))));
+            DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
+
+            EXPECT_TRUE(querySet->IsAlive());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, RenderPipelineExplicit) {
+        RenderPipelineMock renderPipelineMock(&mDevice);
+        EXPECT_CALL(renderPipelineMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(renderPipelineMock.IsAlive());
+        renderPipelineMock.Destroy();
+        EXPECT_FALSE(renderPipelineMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, RenderPipelineImplicit) {
+        // RenderPipelines usually set their hash values at construction, but the mock does not, so
+        // we set it here.
+        constexpr size_t hash = 0x12345;
+        RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
+        renderPipelineMock->SetContentHash(hash);
+        ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+        // Render pipelines are initialized during their creation via the device.
+        EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
+        EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
+
+        {
+            RenderPipelineDescriptor desc = {};
+            desc.layout = GetPipelineLayout().Get();
+            desc.vertex.module = GetVertexShaderModule().Get();
+
+            Ref<RenderPipelineBase> renderPipeline;
+            EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
+                .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
+            DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
+
+            EXPECT_TRUE(renderPipeline->IsAlive());
+            EXPECT_TRUE(renderPipeline->IsCachedReference());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, SamplerExplicit) {
+        SamplerMock samplerMock(&mDevice);
+        EXPECT_CALL(samplerMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(samplerMock.IsAlive());
+        samplerMock.Destroy();
+        EXPECT_FALSE(samplerMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, SamplerImplicit) {
+        SamplerMock* samplerMock = new SamplerMock(&mDevice);
+        EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
+        {
+            SamplerDescriptor desc = {};
+            Ref<SamplerBase> sampler;
+            EXPECT_CALL(mDevice, CreateSamplerImpl)
+                .WillOnce(Return(ByMove(AcquireRef(samplerMock))));
+            DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
+
+            EXPECT_TRUE(sampler->IsAlive());
+            EXPECT_TRUE(sampler->IsCachedReference());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, ShaderModuleExplicit) {
+        ShaderModuleMock shaderModuleMock(&mDevice);
+        EXPECT_CALL(shaderModuleMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(shaderModuleMock.IsAlive());
+        shaderModuleMock.Destroy();
+        EXPECT_FALSE(shaderModuleMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, ShaderModuleImplicit) {
+        ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
+        EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
+        {
+            ShaderModuleWGSLDescriptor wgslDesc;
+            wgslDesc.source = R"(
+                @stage(compute) @workgroup_size(1) fn main() {
+                }
+            )";
+            ShaderModuleDescriptor desc = {};
+            desc.nextInChain = &wgslDesc;
+            Ref<ShaderModuleBase> shaderModule;
+            EXPECT_CALL(mDevice, CreateShaderModuleImpl)
+                .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
+            DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+
+            EXPECT_TRUE(shaderModule->IsAlive());
+            EXPECT_TRUE(shaderModule->IsCachedReference());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, SwapChainExplicit) {
+        SwapChainMock swapChainMock(&mDevice);
+        EXPECT_CALL(swapChainMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(swapChainMock.IsAlive());
+        swapChainMock.Destroy();
+        EXPECT_FALSE(swapChainMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, SwapChainImplicit) {
+        SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
+        EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
+        {
+            SwapChainDescriptor desc = {};
+            Ref<SwapChainBase> swapChain;
+            EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
+                .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
+            DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
+
+            EXPECT_TRUE(swapChain->IsAlive());
+        }
+    }
+
+    TEST_F(DestroyObjectTests, TextureExplicit) {
+        {
+            TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+            EXPECT_CALL(textureMock, DestroyImpl).Times(1);
+
+            EXPECT_TRUE(textureMock.IsAlive());
+            textureMock.Destroy();
+            EXPECT_FALSE(textureMock.IsAlive());
+        }
+        {
+            TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
+            EXPECT_CALL(textureMock, DestroyImpl).Times(1);
+
+            EXPECT_TRUE(textureMock.IsAlive());
+            textureMock.Destroy();
+            EXPECT_FALSE(textureMock.IsAlive());
+        }
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, TextureImplicit) {
+        {
+            TextureMock* textureMock =
+                new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+            {
+                TextureDescriptor desc = {};
+                Ref<TextureBase> texture;
+                EXPECT_CALL(mDevice, CreateTextureImpl)
+                    .WillOnce(Return(ByMove(AcquireRef(textureMock))));
+                DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+
+                EXPECT_TRUE(texture->IsAlive());
+            }
+        }
+        {
+            TextureMock* textureMock =
+                new TextureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
+            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+            {
+                TextureDescriptor desc = {};
+                Ref<TextureBase> texture;
+                EXPECT_CALL(mDevice, CreateTextureImpl)
+                    .WillOnce(Return(ByMove(AcquireRef(textureMock))));
+                DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+
+                EXPECT_TRUE(texture->IsAlive());
+            }
+        }
+    }
+
+    TEST_F(DestroyObjectTests, TextureViewExplicit) {
+        TextureViewMock textureViewMock(GetTexture().Get());
+        EXPECT_CALL(textureViewMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(textureViewMock.IsAlive());
+        textureViewMock.Destroy();
+        EXPECT_FALSE(textureViewMock.IsAlive());
+    }
+
+    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+    // will also complain if there is a memory leak.
+    TEST_F(DestroyObjectTests, TextureViewImplicit) {
+        TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
+        EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
+        {
+            TextureViewDescriptor desc = {};
+            Ref<TextureViewBase> textureView;
+            EXPECT_CALL(mDevice, CreateTextureViewImpl)
+                .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
+            DAWN_ASSERT_AND_ASSIGN(textureView,
+                                   mDevice.CreateTextureView(GetTexture().Get(), &desc));
+
+            EXPECT_TRUE(textureView->IsAlive());
+        }
+    }
+
+    // Destroying the objects on the mDevice should result in all created objects being destroyed in
+    // order.
+    TEST_F(DestroyObjectTests, DestroyObjects) {
+        BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
+        BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
+        BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+        CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+        ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
+        ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
+        PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
+        QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
+        RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
+        SamplerMock* samplerMock = new SamplerMock(&mDevice);
+        ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
+        SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
+        TextureMock* textureMock =
+            new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+        TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
+        {
+            InSequence seq;
+            EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
+            EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
+        }
+
+        Ref<BindGroupBase> bindGroup;
+        {
+            BindGroupDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateBindGroupImpl)
+                .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
+            DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
+            EXPECT_TRUE(bindGroup->IsAlive());
+        }
+
+        Ref<BindGroupLayoutBase> bindGroupLayout;
+        {
+            BindGroupLayoutDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
+                .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
+            DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
+            EXPECT_TRUE(bindGroupLayout->IsAlive());
+            EXPECT_TRUE(bindGroupLayout->IsCachedReference());
+        }
+
+        Ref<BufferBase> buffer;
+        {
+            BufferDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+            DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+            EXPECT_TRUE(buffer->IsAlive());
+        }
+
+        Ref<CommandBufferBase> commandBuffer;
+        {
+            CommandBufferDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateCommandBuffer)
+                .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
+            DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
+            EXPECT_TRUE(commandBuffer->IsAlive());
+        }
+
+        Ref<ComputePipelineBase> computePipeline;
+        {
+            // Compute pipelines usually set their hash values at construction, but the mock does
+            // not, so we set it here.
+            constexpr size_t hash = 0x12345;
+            computePipelineMock->SetContentHash(hash);
+            ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+            // Compute pipelines are initialized during their creation via the device.
+            EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
+
+            ComputePipelineDescriptor desc = {};
+            desc.layout = GetPipelineLayout().Get();
+            desc.compute.module = GetComputeShaderModule().Get();
+            EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
+                .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
+            DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
+            EXPECT_TRUE(computePipeline->IsAlive());
+            EXPECT_TRUE(computePipeline->IsCachedReference());
+        }
+
+        Ref<ExternalTextureBase> externalTexture;
+        {
+            ExternalTextureDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateExternalTextureImpl)
+                .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
+            DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
+            EXPECT_TRUE(externalTexture->IsAlive());
+        }
+
+        Ref<PipelineLayoutBase> pipelineLayout;
+        {
+            PipelineLayoutDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
+                .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
+            DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
+            EXPECT_TRUE(pipelineLayout->IsAlive());
+            EXPECT_TRUE(pipelineLayout->IsCachedReference());
+        }
+
+        Ref<QuerySetBase> querySet;
+        {
+            QuerySetDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateQuerySetImpl)
+                .WillOnce(Return(ByMove(AcquireRef(querySetMock))));
+            DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
+            EXPECT_TRUE(querySet->IsAlive());
+        }
+
+        Ref<RenderPipelineBase> renderPipeline;
+        {
+            // Render pipelines usually set their hash values at construction, but the mock does
+            // not, so we set it here.
+            constexpr size_t hash = 0x12345;
+            renderPipelineMock->SetContentHash(hash);
+            ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+            // Render pipelines are initialized during their creation via the device.
+            EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
+
+            RenderPipelineDescriptor desc = {};
+            desc.layout = GetPipelineLayout().Get();
+            desc.vertex.module = GetVertexShaderModule().Get();
+            EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
+                .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
+            DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
+            EXPECT_TRUE(renderPipeline->IsAlive());
+            EXPECT_TRUE(renderPipeline->IsCachedReference());
+        }
+
+        Ref<SamplerBase> sampler;
+        {
+            SamplerDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateSamplerImpl)
+                .WillOnce(Return(ByMove(AcquireRef(samplerMock))));
+            DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
+            EXPECT_TRUE(sampler->IsAlive());
+            EXPECT_TRUE(sampler->IsCachedReference());
+        }
+
+        Ref<ShaderModuleBase> shaderModule;
+        {
+            ShaderModuleWGSLDescriptor wgslDesc;
+            wgslDesc.source = R"(
+                @stage(compute) @workgroup_size(1) fn main() {
+                }
+            )";
+            ShaderModuleDescriptor desc = {};
+            desc.nextInChain = &wgslDesc;
+
+            EXPECT_CALL(mDevice, CreateShaderModuleImpl)
+                .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
+            DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+            EXPECT_TRUE(shaderModule->IsAlive());
+            EXPECT_TRUE(shaderModule->IsCachedReference());
+        }
+
+        Ref<SwapChainBase> swapChain;
+        {
+            SwapChainDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
+                .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
+            DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
+            EXPECT_TRUE(swapChain->IsAlive());
+        }
+
+        Ref<TextureBase> texture;
+        {
+            TextureDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateTextureImpl)
+                .WillOnce(Return(ByMove(AcquireRef(textureMock))));
+            DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+            EXPECT_TRUE(texture->IsAlive());
+        }
+
+        Ref<TextureViewBase> textureView;
+        {
+            TextureViewDescriptor desc = {};
+            EXPECT_CALL(mDevice, CreateTextureViewImpl)
+                .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
+            DAWN_ASSERT_AND_ASSIGN(textureView,
+                                   mDevice.CreateTextureView(GetTexture().Get(), &desc));
+            EXPECT_TRUE(textureView->IsAlive());
+        }
+
+        mDevice.DestroyObjects();
+        EXPECT_FALSE(bindGroup->IsAlive());
+        EXPECT_FALSE(bindGroupLayout->IsAlive());
+        EXPECT_FALSE(buffer->IsAlive());
+        EXPECT_FALSE(commandBuffer->IsAlive());
+        EXPECT_FALSE(computePipeline->IsAlive());
+        EXPECT_FALSE(externalTexture->IsAlive());
+        EXPECT_FALSE(pipelineLayout->IsAlive());
+        EXPECT_FALSE(querySet->IsAlive());
+        EXPECT_FALSE(renderPipeline->IsAlive());
+        EXPECT_FALSE(sampler->IsAlive());
+        EXPECT_FALSE(shaderModule->IsAlive());
+        EXPECT_FALSE(swapChain->IsAlive());
+        EXPECT_FALSE(texture->IsAlive());
+        EXPECT_FALSE(textureView->IsAlive());
+    }
+
+}}  // namespace dawn::native::
diff --git a/src/dawn/tests/unittests/native/DeviceCreationTests.cpp b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
new file mode 100644
index 0000000..20c0fac
--- /dev/null
+++ b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
@@ -0,0 +1,171 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/dawn_proc.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/dawn_platform.h"
+#include "dawn/tests/MockCallback.h"
+#include "dawn/utils/SystemUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <gtest/gtest.h>
+
+namespace {
+
+    using namespace testing;
+
+    class DeviceCreationTest : public Test {
+      protected:
+        void SetUp() override {
+            dawnProcSetProcs(&dawn::native::GetProcs());
+
+            instance = std::make_unique<dawn::native::Instance>();
+            instance->DiscoverDefaultAdapters();
+            for (dawn::native::Adapter& nativeAdapter : instance->GetAdapters()) {
+                wgpu::AdapterProperties properties;
+                nativeAdapter.GetProperties(&properties);
+
+                if (properties.backendType == wgpu::BackendType::Null) {
+                    adapter = wgpu::Adapter(nativeAdapter.Get());
+                    break;
+                }
+            }
+            ASSERT_NE(adapter, nullptr);
+        }
+
+        void TearDown() override {
+            adapter = nullptr;
+            instance = nullptr;
+            dawnProcSetProcs(nullptr);
+        }
+
+        std::unique_ptr<dawn::native::Instance> instance;
+        wgpu::Adapter adapter;
+    };
+
+    // Test successful call to CreateDevice with no descriptor
+    TEST_F(DeviceCreationTest, CreateDeviceNoDescriptorSuccess) {
+        wgpu::Device device = adapter.CreateDevice();
+        EXPECT_NE(device, nullptr);
+    }
+
+    // Test successful call to CreateDevice with descriptor.
+    TEST_F(DeviceCreationTest, CreateDeviceSuccess) {
+        wgpu::DeviceDescriptor desc = {};
+        wgpu::Device device = adapter.CreateDevice(&desc);
+        EXPECT_NE(device, nullptr);
+    }
+
+    // Test successful call to CreateDevice with toggle descriptor.
+    TEST_F(DeviceCreationTest, CreateDeviceWithTogglesSuccess) {
+        wgpu::DeviceDescriptor desc = {};
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc = {};
+        desc.nextInChain = &togglesDesc;
+
+        const char* toggle = "skip_validation";
+        togglesDesc.forceEnabledToggles = &toggle;
+        togglesDesc.forceEnabledTogglesCount = 1;
+
+        wgpu::Device device = adapter.CreateDevice(&desc);
+        EXPECT_NE(device, nullptr);
+
+        auto toggles = dawn::native::GetTogglesUsed(device.Get());
+        EXPECT_THAT(toggles, testing::Contains(testing::StrEq(toggle)));
+    }
+
+    TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) {
+        // Default device descriptor should have an empty cache isolation key.
+        {
+            wgpu::DeviceDescriptor desc = {};
+            wgpu::Device device = adapter.CreateDevice(&desc);
+            EXPECT_NE(device, nullptr);
+
+            EXPECT_THAT(dawn::native::FromAPI(device.Get())->GetCacheIsolationKey(),
+                        testing::StrEq(""));
+        }
+        // Device descriptor with empty cache descriptor should have an empty cache isolation key.
+        {
+            wgpu::DeviceDescriptor desc = {};
+            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+            desc.nextInChain = &cacheDesc;
+
+            wgpu::Device device = adapter.CreateDevice(&desc);
+            EXPECT_NE(device, nullptr);
+
+            EXPECT_THAT(dawn::native::FromAPI(device.Get())->GetCacheIsolationKey(),
+                        testing::StrEq(""));
+        }
+        // Specified cache isolation key should be retained.
+        {
+            wgpu::DeviceDescriptor desc = {};
+            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+            desc.nextInChain = &cacheDesc;
+
+            const char* isolationKey = "isolation key";
+            cacheDesc.isolationKey = isolationKey;
+
+            wgpu::Device device = adapter.CreateDevice(&desc);
+            EXPECT_NE(device, nullptr);
+
+            EXPECT_THAT(dawn::native::FromAPI(device.Get())->GetCacheIsolationKey(),
+                        testing::StrEq(isolationKey));
+        }
+    }
+
+    // Test successful call to RequestDevice with descriptor
+    TEST_F(DeviceCreationTest, RequestDeviceSuccess) {
+        WGPUDevice cDevice;
+        {
+            MockCallback<WGPURequestDeviceCallback> cb;
+            EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+                .WillOnce(SaveArg<1>(&cDevice));
+
+            wgpu::DeviceDescriptor desc = {};
+            adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
+        }
+
+        wgpu::Device device = wgpu::Device::Acquire(cDevice);
+        EXPECT_NE(device, nullptr);
+    }
+
+    // Test successful call to RequestDevice with a null descriptor
+    TEST_F(DeviceCreationTest, RequestDeviceNullDescriptorSuccess) {
+        WGPUDevice cDevice;
+        {
+            MockCallback<WGPURequestDeviceCallback> cb;
+            EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+                .WillOnce(SaveArg<1>(&cDevice));
+
+            adapter.RequestDevice(nullptr, cb.Callback(), cb.MakeUserdata(this));
+        }
+
+        wgpu::Device device = wgpu::Device::Acquire(cDevice);
+        EXPECT_NE(device, nullptr);
+    }
+
+    // Test failing call to RequestDevice with invalid feature
+    TEST_F(DeviceCreationTest, RequestDeviceFailure) {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+
+        wgpu::DeviceDescriptor desc = {};
+        wgpu::FeatureName invalidFeature = static_cast<wgpu::FeatureName>(WGPUFeatureName_Force32);
+        desc.requiredFeatures = &invalidFeature;
+        desc.requiredFeaturesCount = 1;
+
+        adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h b/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
new file mode 100644
index 0000000..8135f0c
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_BINDGROUPLAYOUT_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_BINDGROUPLAYOUT_MOCK_H_
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class BindGroupLayoutMock final : public BindGroupLayoutBase {
+      public:
+        BindGroupLayoutMock(DeviceBase* device) : BindGroupLayoutBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->BindGroupLayoutBase::DestroyImpl();
+            });
+        }
+        ~BindGroupLayoutMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_BINDGROUPLAYOUT_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/BindGroupMock.h b/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
new file mode 100644
index 0000000..f6a2370
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_BINDGROUP_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_BINDGROUP_MOCK_H_
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Device.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class BindGroupMock : public BindGroupBase {
+      public:
+        BindGroupMock(DeviceBase* device) : BindGroupBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->BindGroupBase::DestroyImpl();
+            });
+        }
+        ~BindGroupMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_BINDGROUP_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/BufferMock.h b/src/dawn/tests/unittests/native/mocks/BufferMock.h
new file mode 100644
index 0000000..8f17d1d
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/BufferMock.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_BUFFER_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_BUFFER_MOCK_H_
+
+#include "dawn/native/Buffer.h"
+#include "dawn/native/Device.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class BufferMock : public BufferBase {
+      public:
+        BufferMock(DeviceBase* device, BufferBase::BufferState state) : BufferBase(device, state) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->BufferBase::DestroyImpl();
+            });
+        }
+        ~BufferMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+
+        MOCK_METHOD(MaybeError, MapAtCreationImpl, (), (override));
+        MOCK_METHOD(MaybeError,
+                    MapAsyncImpl,
+                    (wgpu::MapMode mode, size_t offset, size_t size),
+                    (override));
+        MOCK_METHOD(void, UnmapImpl, (), (override));
+        MOCK_METHOD(void*, GetMappedPointerImpl, (), (override));
+
+        MOCK_METHOD(bool, IsCPUWritableAtCreation, (), (const, override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_BINDGROUP_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h b/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
new file mode 100644
index 0000000..7f0198b
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_COMMANDBUFFER_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_COMMANDBUFFER_MOCK_H_
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Device.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class CommandBufferMock : public CommandBufferBase {
+      public:
+        CommandBufferMock(DeviceBase* device) : CommandBufferBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->CommandBufferBase::DestroyImpl();
+            });
+        }
+        ~CommandBufferMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_COMMANDBUFFER_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h b/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
new file mode 100644
index 0000000..6071051
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
@@ -0,0 +1,41 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_COMPUTEPIPELINE_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_COMPUTEPIPELINE_MOCK_H_
+
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class ComputePipelineMock : public ComputePipelineBase {
+      public:
+        ComputePipelineMock(DeviceBase* device) : ComputePipelineBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->ComputePipelineBase::DestroyImpl();
+            });
+        }
+        ~ComputePipelineMock() override = default;
+
+        MOCK_METHOD(MaybeError, Initialize, (), (override));
+        MOCK_METHOD(size_t, ComputeContentHash, (), (override));
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_COMPUTEPIPELINE_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/DeviceMock.h b/src/dawn/tests/unittests/native/mocks/DeviceMock.h
new file mode 100644
index 0000000..d0d5c31
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/DeviceMock.h
@@ -0,0 +1,121 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_DEVICE_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_DEVICE_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class DeviceMock : public DeviceBase {
+      public:
+        // Exposes some protected functions for testing purposes.
+        using DeviceBase::DestroyObjects;
+        using DeviceBase::SetToggle;
+
+        MOCK_METHOD(ResultOrError<Ref<CommandBufferBase>>,
+                    CreateCommandBuffer,
+                    (CommandEncoder*, const CommandBufferDescriptor*),
+                    (override));
+
+        MOCK_METHOD(ResultOrError<std::unique_ptr<StagingBufferBase>>,
+                    CreateStagingBuffer,
+                    (size_t),
+                    (override));
+        MOCK_METHOD(MaybeError,
+                    CopyFromStagingToBuffer,
+                    (StagingBufferBase*, uint64_t, BufferBase*, uint64_t, uint64_t),
+                    (override));
+        MOCK_METHOD(
+            MaybeError,
+            CopyFromStagingToTexture,
+            (const StagingBufferBase*, const TextureDataLayout&, TextureCopy*, const Extent3D&),
+            (override));
+
+        MOCK_METHOD(uint32_t, GetOptimalBytesPerRowAlignment, (), (const, override));
+        MOCK_METHOD(uint64_t, GetOptimalBufferToTextureCopyOffsetAlignment, (), (const, override));
+
+        MOCK_METHOD(float, GetTimestampPeriodInNS, (), (const, override));
+
+        MOCK_METHOD(ResultOrError<Ref<BindGroupBase>>,
+                    CreateBindGroupImpl,
+                    (const BindGroupDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<BindGroupLayoutBase>>,
+                    CreateBindGroupLayoutImpl,
+                    (const BindGroupLayoutDescriptor*, PipelineCompatibilityToken),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<BufferBase>>,
+                    CreateBufferImpl,
+                    (const BufferDescriptor*),
+                    (override));
+        MOCK_METHOD(Ref<ComputePipelineBase>,
+                    CreateUninitializedComputePipelineImpl,
+                    (const ComputePipelineDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<ExternalTextureBase>>,
+                    CreateExternalTextureImpl,
+                    (const ExternalTextureDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<PipelineLayoutBase>>,
+                    CreatePipelineLayoutImpl,
+                    (const PipelineLayoutDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<QuerySetBase>>,
+                    CreateQuerySetImpl,
+                    (const QuerySetDescriptor*),
+                    (override));
+        MOCK_METHOD(Ref<RenderPipelineBase>,
+                    CreateUninitializedRenderPipelineImpl,
+                    (const RenderPipelineDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<SamplerBase>>,
+                    CreateSamplerImpl,
+                    (const SamplerDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<ShaderModuleBase>>,
+                    CreateShaderModuleImpl,
+                    (const ShaderModuleDescriptor*, ShaderModuleParseResult*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<SwapChainBase>>,
+                    CreateSwapChainImpl,
+                    (const SwapChainDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<NewSwapChainBase>>,
+                    CreateSwapChainImpl,
+                    (Surface*, NewSwapChainBase*, const SwapChainDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<TextureBase>>,
+                    CreateTextureImpl,
+                    (const TextureDescriptor*),
+                    (override));
+        MOCK_METHOD(ResultOrError<Ref<TextureViewBase>>,
+                    CreateTextureViewImpl,
+                    (TextureBase*, const TextureViewDescriptor*),
+                    (override));
+
+        MOCK_METHOD(MaybeError, TickImpl, (), (override));
+
+        MOCK_METHOD(ResultOrError<ExecutionSerial>, CheckAndUpdateCompletedSerials, (), (override));
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+        MOCK_METHOD(MaybeError, WaitForIdleForDestruction, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_DEVICE_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h b/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
new file mode 100644
index 0000000..c40276b
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_EXTERNALTEXTURE_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_EXTERNALTEXTURE_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/ExternalTexture.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class ExternalTextureMock : public ExternalTextureBase {
+      public:
+        ExternalTextureMock(DeviceBase* device) : ExternalTextureBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->ExternalTextureBase::DestroyImpl();
+            });
+        }
+        ~ExternalTextureMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_EXTERNALTEXTURE_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h b/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
new file mode 100644
index 0000000..7e82a40
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_PIPELINELAYOUT_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_PIPELINELAYOUT_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/PipelineLayout.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class PipelineLayoutMock : public PipelineLayoutBase {
+      public:
+        PipelineLayoutMock(DeviceBase* device) : PipelineLayoutBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->PipelineLayoutBase::DestroyImpl();
+            });
+        }
+        ~PipelineLayoutMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_PIPELINELAYOUT_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/QuerySetMock.h b/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
new file mode 100644
index 0000000..c4b8349
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_QUERYSET_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_QUERYSET_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/QuerySet.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class QuerySetMock : public QuerySetBase {
+      public:
+        QuerySetMock(DeviceBase* device) : QuerySetBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->QuerySetBase::DestroyImpl();
+            });
+        }
+        ~QuerySetMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_QUERYSET_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h b/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
new file mode 100644
index 0000000..71e0287
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
@@ -0,0 +1,41 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_RENDERPIPELINE_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_RENDERPIPELINE_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class RenderPipelineMock : public RenderPipelineBase {
+      public:
+        RenderPipelineMock(DeviceBase* device) : RenderPipelineBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->RenderPipelineBase::DestroyImpl();
+            });
+        }
+        ~RenderPipelineMock() override = default;
+
+        MOCK_METHOD(MaybeError, Initialize, (), (override));
+        MOCK_METHOD(size_t, ComputeContentHash, (), (override));
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_RENDERPIPELINE_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/SamplerMock.h b/src/dawn/tests/unittests/native/mocks/SamplerMock.h
new file mode 100644
index 0000000..48ab087
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/SamplerMock.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_SAMPLER_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_SAMPLER_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/Sampler.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class SamplerMock : public SamplerBase {
+      public:
+        SamplerMock(DeviceBase* device) : SamplerBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->SamplerBase::DestroyImpl();
+            });
+        }
+        ~SamplerMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_SAMPLER_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
new file mode 100644
index 0000000..4c7da22
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "ShaderModuleMock.h"
+
+namespace dawn::native {
+
+    ShaderModuleMock::ShaderModuleMock(DeviceBase* device) : ShaderModuleBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->ShaderModuleBase::DestroyImpl();
+        });
+    }
+
+    ResultOrError<Ref<ShaderModuleMock>> ShaderModuleMock::Create(DeviceBase* device,
+                                                                  const char* source) {
+        ShaderModuleMock* mock = new ShaderModuleMock(device);
+
+        ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = source;
+        ShaderModuleDescriptor desc;
+        desc.nextInChain = &wgslDesc;
+
+        ShaderModuleParseResult parseResult;
+        DAWN_TRY(ValidateShaderModuleDescriptor(device, &desc, &parseResult, nullptr));
+        DAWN_TRY(mock->InitializeBase(&parseResult));
+        return AcquireRef(mock);
+    }
+
+}  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
new file mode 100644
index 0000000..8beeabf
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
@@ -0,0 +1,41 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_SHADERMODULE_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_SHADERMODULE_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ShaderModule.h"
+
+#include <memory>
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class ShaderModuleMock : public ShaderModuleBase {
+      public:
+        ShaderModuleMock(DeviceBase* device);
+        ~ShaderModuleMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+
+        // Creates a shader module mock based on the wgsl source.
+        static ResultOrError<Ref<ShaderModuleMock>> Create(DeviceBase* device, const char* source);
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_SHADERMODULE_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/SwapChainMock.h b/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
new file mode 100644
index 0000000..711f6ff
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_SWAPCHAIN_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_SWAPCHAIN_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/SwapChain.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class SwapChainMock : public SwapChainBase {
+      public:
+        SwapChainMock(DeviceBase* device) : SwapChainBase(device) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->SwapChainBase::DestroyImpl();
+            });
+        }
+        ~SwapChainMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+
+        MOCK_METHOD(void,
+                    APIConfigure,
+                    (wgpu::TextureFormat, wgpu::TextureUsage, uint32_t, uint32_t),
+                    (override));
+        MOCK_METHOD(TextureViewBase*, APIGetCurrentTextureView, (), (override));
+        MOCK_METHOD(void, APIPresent, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_SWAPCHAIN_MOCK_H_
diff --git a/src/dawn/tests/unittests/native/mocks/TextureMock.h b/src/dawn/tests/unittests/native/mocks/TextureMock.h
new file mode 100644
index 0000000..10aa2e9
--- /dev/null
+++ b/src/dawn/tests/unittests/native/mocks/TextureMock.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_NATIVE_MOCKS_TEXTURE_MOCK_H_
+#define TESTS_UNITTESTS_NATIVE_MOCKS_TEXTURE_MOCK_H_
+
+#include "dawn/native/Device.h"
+#include "dawn/native/Texture.h"
+
+#include <gmock/gmock.h>
+
+namespace dawn::native {
+
+    class TextureMock : public TextureBase {
+      public:
+        TextureMock(DeviceBase* device, TextureBase::TextureState state)
+            : TextureBase(device, state) {
+            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+                this->TextureBase::DestroyImpl();
+            });
+        }
+        ~TextureMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+    class TextureViewMock : public TextureViewBase {
+      public:
+        TextureViewMock(TextureBase* texture) : TextureViewBase(texture) {
+        }
+        ~TextureViewMock() override = default;
+
+        MOCK_METHOD(void, DestroyImpl, (), (override));
+    };
+
+}  // namespace dawn::native
+
+#endif  // TESTS_UNITTESTS_NATIVE_MOCKS_TEXTURE_MOCK_H_
diff --git a/src/dawn/tests/unittests/validation/BindGroupValidationTests.cpp b/src/dawn/tests/unittests/validation/BindGroupValidationTests.cpp
new file mode 100644
index 0000000..ef60ba4
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/BindGroupValidationTests.cpp
@@ -0,0 +1,2746 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class BindGroupValidationTest : public ValidationTest {
+  public:
+    wgpu::Texture CreateTexture(wgpu::TextureUsage usage,
+                                wgpu::TextureFormat format,
+                                uint32_t layerCount) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size = {16, 16, layerCount};
+        descriptor.sampleCount = 1;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = usage;
+        descriptor.format = format;
+
+        return device.CreateTexture(&descriptor);
+    }
+
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        // Create objects to use as resources inside test bind groups.
+        {
+            wgpu::BufferDescriptor descriptor;
+            descriptor.size = 1024;
+            descriptor.usage = wgpu::BufferUsage::Uniform;
+            mUBO = device.CreateBuffer(&descriptor);
+        }
+        {
+            wgpu::BufferDescriptor descriptor;
+            descriptor.size = 1024;
+            descriptor.usage = wgpu::BufferUsage::Storage;
+            mSSBO = device.CreateBuffer(&descriptor);
+        }
+        { mSampler = device.CreateSampler(); }
+        {
+            mSampledTexture =
+                CreateTexture(wgpu::TextureUsage::TextureBinding, kDefaultTextureFormat, 1);
+            mSampledTextureView = mSampledTexture.CreateView();
+
+            wgpu::ExternalTextureDescriptor externalTextureDesc;
+            externalTextureDesc.plane0 = mSampledTextureView;
+            mExternalTexture = device.CreateExternalTexture(&externalTextureDesc);
+            mExternalTextureBindingEntry.externalTexture = mExternalTexture;
+        }
+    }
+
+  protected:
+    wgpu::Buffer mUBO;
+    wgpu::Buffer mSSBO;
+    wgpu::Sampler mSampler;
+    wgpu::Texture mSampledTexture;
+    wgpu::TextureView mSampledTextureView;
+    wgpu::ExternalTextureBindingEntry mExternalTextureBindingEntry;
+
+    static constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+  private:
+    wgpu::ExternalTexture mExternalTexture;
+};
+
+// Test the validation of BindGroupDescriptor::nextInChain
+TEST_F(BindGroupValidationTest, NextInChainNullptr) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(device, {});
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = 0;
+    descriptor.entries = nullptr;
+
+    // Control case: check that nextInChain = nullptr is valid
+    descriptor.nextInChain = nullptr;
+    device.CreateBindGroup(&descriptor);
+
+    // Check that nextInChain != nullptr is an error.
+    wgpu::ChainedStruct chainedDescriptor;
+    chainedDescriptor.sType = wgpu::SType::Invalid;
+    descriptor.nextInChain = &chainedDescriptor;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+}
+
+// Check constraints on entryCount
+TEST_F(BindGroupValidationTest, EntryCountMismatch) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+    // Control case: check that a descriptor with one binding is ok
+    utils::MakeBindGroup(device, layout, {{0, mSampler}});
+
+    // Check that entryCount != layout.entryCount fails.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {}));
+}
+
+// Check constraints on BindGroupEntry::binding
+TEST_F(BindGroupValidationTest, WrongBindings) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+    // Control case: check that a descriptor with a binding matching the layout's is ok
+    utils::MakeBindGroup(device, layout, {{0, mSampler}});
+
+    // Check that binding must be present in the layout
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{1, mSampler}}));
+}
+
+// Check that the same binding cannot be set twice
+TEST_F(BindGroupValidationTest, BindingSetTwice) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+                 {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+    // Control case: check that different bindings work
+    utils::MakeBindGroup(device, layout, {{0, mSampler}, {1, mSampler}});
+
+    // Check that setting the same binding twice is invalid
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mSampler}, {0, mSampler}}));
+}
+
+// Check that a sampler binding must contain exactly one sampler
+TEST_F(BindGroupValidationTest, SamplerBindingType) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+    wgpu::BindGroupEntry binding;
+    binding.binding = 0;
+    binding.sampler = nullptr;
+    binding.textureView = nullptr;
+    binding.buffer = nullptr;
+    binding.offset = 0;
+    binding.size = 0;
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = 1;
+    descriptor.entries = &binding;
+
+    // Not setting anything fails
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+
+    // Control case: setting just the sampler works
+    binding.sampler = mSampler;
+    device.CreateBindGroup(&descriptor);
+
+    // Setting the texture view as well is an error
+    binding.textureView = mSampledTextureView;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.textureView = nullptr;
+
+    // Setting the buffer as well is an error
+    binding.buffer = mUBO;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.buffer = nullptr;
+
+    // Setting the external texture view as well is an error
+    binding.nextInChain = &mExternalTextureBindingEntry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.nextInChain = nullptr;
+
+    // Setting the sampler to an error sampler is an error.
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.minFilter = static_cast<wgpu::FilterMode>(0xFFFFFFFF);
+
+        wgpu::Sampler errorSampler;
+        ASSERT_DEVICE_ERROR(errorSampler = device.CreateSampler(&samplerDesc));
+
+        binding.sampler = errorSampler;
+        ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+        binding.sampler = nullptr;
+    }
+}
+
+// Check that a texture binding must contain exactly a texture view
+TEST_F(BindGroupValidationTest, TextureBindingType) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+
+    wgpu::BindGroupEntry binding;
+    binding.binding = 0;
+    binding.sampler = nullptr;
+    binding.textureView = nullptr;
+    binding.buffer = nullptr;
+    binding.offset = 0;
+    binding.size = 0;
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = 1;
+    descriptor.entries = &binding;
+
+    // Not setting anything fails
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+
+    // Control case: setting just the texture view works
+    binding.textureView = mSampledTextureView;
+    device.CreateBindGroup(&descriptor);
+
+    // Setting the sampler as well is an error
+    binding.sampler = mSampler;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.sampler = nullptr;
+
+    // Setting the buffer as well is an error
+    binding.buffer = mUBO;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.buffer = nullptr;
+
+    // Setting the external texture view as well is an error
+    binding.nextInChain = &mExternalTextureBindingEntry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.nextInChain = nullptr;
+
+    // Setting the texture view to an error texture view is an error.
+    {
+        wgpu::TextureViewDescriptor viewDesc;
+        viewDesc.format = kDefaultTextureFormat;
+        viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+        viewDesc.baseMipLevel = 0;
+        viewDesc.mipLevelCount = 0;
+        viewDesc.baseArrayLayer = 0;
+        viewDesc.arrayLayerCount = 1000;
+
+        wgpu::TextureView errorView;
+        ASSERT_DEVICE_ERROR(errorView = mSampledTexture.CreateView(&viewDesc));
+
+        binding.textureView = errorView;
+        ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+        binding.textureView = nullptr;
+    }
+}
+
+// Check that a buffer binding must contain exactly a buffer
+TEST_F(BindGroupValidationTest, BufferBindingType) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+
+    wgpu::BindGroupEntry binding;
+    binding.binding = 0;
+    binding.sampler = nullptr;
+    binding.textureView = nullptr;
+    binding.buffer = nullptr;
+    binding.offset = 0;
+    binding.size = 1024;
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = 1;
+    descriptor.entries = &binding;
+
+    // Not setting anything fails
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+
+    // Control case: setting just the buffer works
+    binding.buffer = mUBO;
+    device.CreateBindGroup(&descriptor);
+
+    // Setting the texture view as well is an error
+    binding.textureView = mSampledTextureView;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.textureView = nullptr;
+
+    // Setting the sampler as well is an error
+    binding.sampler = mSampler;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.sampler = nullptr;
+
+    // Setting the external texture view as well is an error
+    binding.nextInChain = &mExternalTextureBindingEntry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.nextInChain = nullptr;
+
+    // Setting the buffer to an error buffer is an error.
+    {
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = 1024;
+        bufferDesc.usage = static_cast<wgpu::BufferUsage>(0xFFFFFFFF);
+
+        wgpu::Buffer errorBuffer;
+        ASSERT_DEVICE_ERROR(errorBuffer = device.CreateBuffer(&bufferDesc));
+
+        binding.buffer = errorBuffer;
+        ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+        binding.buffer = nullptr;
+    }
+}
+
+// Check that an external texture binding must contain exactly an external texture
+TEST_F(BindGroupValidationTest, ExternalTextureBindingType) {
+    // Create an external texture
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding, kDefaultTextureFormat, 1);
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group layout for a single external texture
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+
+    wgpu::BindGroupEntry binding;
+    binding.binding = 0;
+    binding.sampler = nullptr;
+    binding.textureView = nullptr;
+    binding.buffer = nullptr;
+    binding.offset = 0;
+    binding.size = 0;
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = 1;
+    descriptor.entries = &binding;
+
+    // Not setting anything fails
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+
+    // Control case: setting just the external texture works
+    wgpu::ExternalTextureBindingEntry externalBindingEntry;
+    externalBindingEntry.externalTexture = externalTexture;
+    binding.nextInChain = &externalBindingEntry;
+    device.CreateBindGroup(&descriptor);
+
+    // Setting the texture view as well is an error
+    binding.textureView = mSampledTextureView;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.textureView = nullptr;
+
+    // Setting the sampler as well is an error
+    binding.sampler = mSampler;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.sampler = nullptr;
+
+    // Setting the buffer as well is an error
+    binding.buffer = mUBO;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.buffer = nullptr;
+
+    // Setting the external texture to an error external texture is an error.
+    {
+        wgpu::Texture errorTexture = CreateTexture(wgpu::TextureUsage::TextureBinding,
+                                                   wgpu::TextureFormat::RGBA8UnormSrgb, 1);
+        wgpu::ExternalTextureDescriptor errorExternalDesciptor;
+        errorExternalDesciptor.plane0 = errorTexture.CreateView();
+
+        wgpu::ExternalTexture errorExternalTexture;
+        ASSERT_DEVICE_ERROR(errorExternalTexture =
+                                device.CreateExternalTexture(&errorExternalDesciptor));
+
+        wgpu::ExternalTextureBindingEntry errorExternalBindingEntry;
+        errorExternalBindingEntry.externalTexture = errorExternalTexture;
+        binding.nextInChain = &errorExternalBindingEntry;
+        ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+        binding.nextInChain = nullptr;
+    }
+
+    // Setting an external texture with another external texture chained is an error.
+    {
+        wgpu::ExternalTexture externalTexture2 = device.CreateExternalTexture(&externalDesc);
+        wgpu::ExternalTextureBindingEntry externalBindingEntry2;
+        externalBindingEntry2.externalTexture = externalTexture2;
+        externalBindingEntry.nextInChain = &externalBindingEntry2;
+
+        ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    }
+
+    // Chaining a struct that isn't an external texture binding entry is an error.
+    {
+        wgpu::ExternalTextureBindingLayout externalBindingLayout;
+        binding.nextInChain = &externalBindingLayout;
+        ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    }
+}
+
+// Check that a texture binding must have the correct usage
+TEST_F(BindGroupValidationTest, TextureUsage) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+
+    // Control case: setting a sampleable texture view works.
+    utils::MakeBindGroup(device, layout, {{0, mSampledTextureView}});
+
+    // Make an render attachment texture and try to set it for a SampledTexture binding
+    wgpu::Texture outputTexture =
+        CreateTexture(wgpu::TextureUsage::RenderAttachment, wgpu::TextureFormat::RGBA8Unorm, 1);
+    wgpu::TextureView outputTextureView = outputTexture.CreateView();
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, outputTextureView}}));
+}
+
+// Check that a storage texture binding must have the correct usage
+TEST_F(BindGroupValidationTest, StorageTextureUsage) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly,
+                  wgpu::TextureFormat::RGBA8Uint}});
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size = {16, 16, 1};
+    descriptor.sampleCount = 1;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::StorageBinding;
+    descriptor.format = wgpu::TextureFormat::RGBA8Uint;
+
+    wgpu::TextureView view = device.CreateTexture(&descriptor).CreateView();
+
+    // Control case: setting a storage texture view works.
+    utils::MakeBindGroup(device, layout, {{0, view}});
+
+    // Sampled texture is invalid with storage buffer binding
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    view = device.CreateTexture(&descriptor).CreateView();
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}}));
+
+    // Multisampled texture is invalid with storage buffer binding
+    // Regression case for crbug.com/dawn/614 where this hit an ASSERT.
+    descriptor.sampleCount = 4;
+    view = device.CreateTexture(&descriptor).CreateView();
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}}));
+}
+
+// Check that a texture must have the correct sample type
+TEST_F(BindGroupValidationTest, TextureSampleType) {
+    auto DoTest = [this](bool success, wgpu::TextureFormat format,
+                         wgpu::TextureSampleType sampleType) {
+        wgpu::BindGroupLayout layout =
+            utils::MakeBindGroupLayout(device, {{0, wgpu::ShaderStage::Fragment, sampleType}});
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = {4, 4, 1};
+        descriptor.usage = wgpu::TextureUsage::TextureBinding;
+        descriptor.format = format;
+
+        wgpu::TextureView view = device.CreateTexture(&descriptor).CreateView();
+
+        if (success) {
+            utils::MakeBindGroup(device, layout, {{0, view}});
+        } else {
+            ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}}));
+        }
+    };
+
+    // Test that RGBA8Unorm is only compatible with float/unfilterable-float
+    DoTest(true, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Float);
+    DoTest(true, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::UnfilterableFloat);
+    DoTest(false, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Depth);
+    DoTest(false, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Uint);
+    DoTest(false, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Sint);
+
+    // Test that R32Float is only compatible with unfilterable-float
+    DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Float);
+    DoTest(true, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::UnfilterableFloat);
+    DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Depth);
+    DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Uint);
+    DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Sint);
+
+    // Test that Depth32Float is only compatible with depth.
+    DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Float);
+    DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::UnfilterableFloat);
+    DoTest(true, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Depth);
+    DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Uint);
+    DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Sint);
+
+    // Test that RG8Uint is only compatible with uint
+    DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Float);
+    DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::UnfilterableFloat);
+    DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Depth);
+    DoTest(true, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Uint);
+    DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Sint);
+
+    // Test that R16Sint is only compatible with sint
+    DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Float);
+    DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::UnfilterableFloat);
+    DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Depth);
+    DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Uint);
+    DoTest(true, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Sint);
+}
+
+// Test which depth-stencil formats are allowed to be sampled (all).
+TEST_F(BindGroupValidationTest, SamplingDepthStencilTexture) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+
+    wgpu::TextureDescriptor desc;
+    desc.size = {1, 1, 1};
+    desc.usage = wgpu::TextureUsage::TextureBinding;
+
+    // Depth32Float is allowed to be sampled.
+    {
+        desc.format = wgpu::TextureFormat::Depth32Float;
+        wgpu::Texture texture = device.CreateTexture(&desc);
+
+        utils::MakeBindGroup(device, layout, {{0, texture.CreateView()}});
+    }
+
+    // Depth24Plus is allowed to be sampled.
+    {
+        desc.format = wgpu::TextureFormat::Depth24Plus;
+        wgpu::Texture texture = device.CreateTexture(&desc);
+
+        utils::MakeBindGroup(device, layout, {{0, texture.CreateView()}});
+    }
+
+    // Depth24PlusStencil8 is allowed to be sampled, if the depth or stencil aspect is selected.
+    {
+        desc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        wgpu::Texture texture = device.CreateTexture(&desc);
+        wgpu::TextureViewDescriptor viewDesc = {};
+
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&viewDesc)}});
+
+        layout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Uint}});
+
+        viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&viewDesc)}});
+    }
+}
+
+// Check that a texture must have the correct dimension
+TEST_F(BindGroupValidationTest, TextureDimension) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+
+    // Control case: setting a 2D texture view works.
+    utils::MakeBindGroup(device, layout, {{0, mSampledTextureView}});
+
+    // Make a 2DArray texture and try to set it to a 2D binding.
+    wgpu::Texture arrayTexture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding, wgpu::TextureFormat::RGBA8Uint, 2);
+    wgpu::TextureView arrayTextureView = arrayTexture.CreateView();
+
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, arrayTextureView}}));
+}
+
+// Check that a storage texture binding must have a texture view with a mipLevelCount of 1
+TEST_F(BindGroupValidationTest, StorageTextureViewLayerCount) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly,
+                  wgpu::TextureFormat::RGBA8Uint}});
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size = {16, 16, 1};
+    descriptor.sampleCount = 1;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::StorageBinding;
+    descriptor.format = wgpu::TextureFormat::RGBA8Uint;
+
+    wgpu::Texture textureNoMip = device.CreateTexture(&descriptor);
+
+    descriptor.mipLevelCount = 3;
+    wgpu::Texture textureMip = device.CreateTexture(&descriptor);
+
+    // Control case: setting a storage texture view on a texture with only one mip level works
+    {
+        wgpu::TextureView view = textureNoMip.CreateView();
+        utils::MakeBindGroup(device, layout, {{0, view}});
+    }
+
+    // Setting a storage texture view with mipLevelCount=1 on a texture of multiple mip levels is
+    // valid
+    {
+        wgpu::TextureViewDescriptor viewDesc = {};
+        viewDesc.aspect = wgpu::TextureAspect::All;
+        viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+        viewDesc.format = wgpu::TextureFormat::RGBA8Uint;
+        viewDesc.baseMipLevel = 0;
+        viewDesc.mipLevelCount = 1;
+
+        // Setting texture view with lod 0 is valid
+        wgpu::TextureView view = textureMip.CreateView(&viewDesc);
+        utils::MakeBindGroup(device, layout, {{0, view}});
+
+        // Setting texture view with other lod is also valid
+        viewDesc.baseMipLevel = 2;
+        view = textureMip.CreateView(&viewDesc);
+        utils::MakeBindGroup(device, layout, {{0, view}});
+    }
+
+    // Texture view with mipLevelCount > 1 is invalid
+    {
+        wgpu::TextureViewDescriptor viewDesc = {};
+        viewDesc.aspect = wgpu::TextureAspect::All;
+        viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+        viewDesc.format = wgpu::TextureFormat::RGBA8Uint;
+        viewDesc.baseMipLevel = 0;
+        viewDesc.mipLevelCount = 2;
+
+        // Setting texture view with lod 0 and 1 is invalid
+        wgpu::TextureView view = textureMip.CreateView(&viewDesc);
+        ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}}));
+
+        // Setting texture view with lod 1 and 2 is invalid
+        viewDesc.baseMipLevel = 1;
+        view = textureMip.CreateView(&viewDesc);
+        ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}}));
+    }
+}
+
+// Check that a UBO must have the correct usage
+TEST_F(BindGroupValidationTest, BufferUsageUBO) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+
+    // Control case: using a buffer with the uniform usage works
+    utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 256}});
+
+    // Using a buffer without the uniform usage fails
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}}));
+}
+
+// Check that a SSBO must have the correct usage
+TEST_F(BindGroupValidationTest, BufferUsageSSBO) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+
+    // Control case: using a buffer with the storage usage works
+    utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}});
+
+    // Using a buffer without the storage usage fails
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 256}}));
+}
+
+// Check that a readonly SSBO must have the correct usage
+TEST_F(BindGroupValidationTest, BufferUsageReadonlySSBO) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+
+    // Control case: using a buffer with the storage usage works
+    utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}});
+
+    // Using a buffer without the storage usage fails
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 256}}));
+}
+
+// Check that a resolve buffer with internal storge usage cannot be used as SSBO
+TEST_F(BindGroupValidationTest, BufferUsageQueryResolve) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+
+    // Control case: using a buffer with the storage usage works
+    utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}});
+
+    // Using a resolve buffer with the internal storage usage fails
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 1024;
+    descriptor.usage = wgpu::BufferUsage::QueryResolve;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 0, 256}}));
+}
+
+// Tests constraints on the buffer offset for bind groups.
+TEST_F(BindGroupValidationTest, BufferOffsetAlignment) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                });
+
+    // Check that offset 0 is valid
+    utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 512}});
+
+    // Check that offset 256 (aligned) is valid
+    utils::MakeBindGroup(device, layout, {{0, mUBO, 256, 256}});
+
+    // Check cases where unaligned buffer offset is invalid
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 1, 256}}));
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 128, 256}}));
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 255, 256}}));
+}
+
+// Tests constraints on the texture for MultisampledTexture bindings
+TEST_F(BindGroupValidationTest, MultisampledTexture) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float,
+                  wgpu::TextureViewDimension::e2D, true}});
+
+    wgpu::BindGroupEntry binding;
+    binding.binding = 0;
+    binding.sampler = nullptr;
+    binding.textureView = nullptr;
+    binding.buffer = nullptr;
+    binding.offset = 0;
+    binding.size = 0;
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = 1;
+    descriptor.entries = &binding;
+
+    // Not setting anything fails
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+
+    // Control case: setting a multisampled 2D texture works
+    wgpu::TextureDescriptor textureDesc;
+    textureDesc.sampleCount = 4;
+    textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+    textureDesc.dimension = wgpu::TextureDimension::e2D;
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDesc.size = {1, 1, 1};
+    wgpu::Texture msTexture = device.CreateTexture(&textureDesc);
+
+    binding.textureView = msTexture.CreateView();
+    device.CreateBindGroup(&descriptor);
+    binding.textureView = nullptr;
+
+    // Error case: setting a single sampled 2D texture is an error.
+    binding.textureView = mSampledTextureView;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor));
+    binding.textureView = nullptr;
+}
+
+// Tests constraints to be sure the buffer binding fits in the buffer
+TEST_F(BindGroupValidationTest, BufferBindingOOB) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                });
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 1024;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    // Success case, touching the start of the buffer works
+    utils::MakeBindGroup(device, layout, {{0, buffer, 0, 256}});
+
+    // Success case, touching the end of the buffer works
+    utils::MakeBindGroup(device, layout, {{0, buffer, 3 * 256, 256}});
+
+    // Error case, zero size is invalid.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 1024, 0}}));
+
+    // Success case, touching the full buffer works
+    utils::MakeBindGroup(device, layout, {{0, buffer, 0, 1024}});
+    utils::MakeBindGroup(device, layout, {{0, buffer, 0, wgpu::kWholeSize}});
+
+    // Success case, whole size causes the rest of the buffer to be used but not beyond.
+    utils::MakeBindGroup(device, layout, {{0, buffer, 256, wgpu::kWholeSize}});
+
+    // Error case, offset is OOB
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 256 * 5, 0}}));
+
+    // Error case, size is OOB
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 0, 256 * 5}}));
+
+    // Error case, offset+size is OOB
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 1024, 256}}));
+
+    // Error case, offset+size overflows to be 0
+    ASSERT_DEVICE_ERROR(
+        utils::MakeBindGroup(device, layout, {{0, buffer, 256, uint32_t(0) - uint32_t(256)}}));
+}
+
+// Tests constraints to be sure the uniform buffer binding isn't too large
+TEST_F(BindGroupValidationTest, MaxUniformBufferBindingSize) {
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 2 * supportedLimits.maxUniformBufferBindingSize;
+    descriptor.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    wgpu::BindGroupLayout uniformLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+
+    // Success case, this is exactly the limit
+    utils::MakeBindGroup(device, uniformLayout,
+                         {{0, buffer, 0, supportedLimits.maxUniformBufferBindingSize}});
+
+    wgpu::BindGroupLayout doubleUniformLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                 {1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+
+    // Success case, individual bindings don't exceed the limit
+    utils::MakeBindGroup(device, doubleUniformLayout,
+                         {{0, buffer, 0, supportedLimits.maxUniformBufferBindingSize},
+                          {1, buffer, supportedLimits.maxUniformBufferBindingSize,
+                           supportedLimits.maxUniformBufferBindingSize}});
+
+    // Error case, this is above the limit
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(
+        device, uniformLayout, {{0, buffer, 0, supportedLimits.maxUniformBufferBindingSize + 1}}));
+
+    // Making sure the constraint doesn't apply to storage buffers
+    wgpu::BindGroupLayout readonlyStorageLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+    wgpu::BindGroupLayout storageLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+
+    // Success case, storage buffer can still be created.
+    utils::MakeBindGroup(device, readonlyStorageLayout,
+                         {{0, buffer, 0, 2 * supportedLimits.maxUniformBufferBindingSize}});
+    utils::MakeBindGroup(device, storageLayout,
+                         {{0, buffer, 0, 2 * supportedLimits.maxUniformBufferBindingSize}});
+}
+
+// Tests constraints to be sure the storage buffer binding isn't too large
+TEST_F(BindGroupValidationTest, MaxStorageBufferBindingSize) {
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 2 * supportedLimits.maxStorageBufferBindingSize;
+    descriptor.usage = wgpu::BufferUsage::Storage;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    wgpu::BindGroupLayout uniformLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+
+    // Success case, this is exactly the limit
+    utils::MakeBindGroup(device, uniformLayout,
+                         {{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize}});
+
+    // Success case, this is one less than the limit (check it is not an alignment constraint)
+    utils::MakeBindGroup(device, uniformLayout,
+                         {{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize - 1}});
+
+    wgpu::BindGroupLayout doubleUniformLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage},
+                 {1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+
+    // Success case, individual bindings don't exceed the limit
+    utils::MakeBindGroup(device, doubleUniformLayout,
+                         {{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize},
+                          {1, buffer, supportedLimits.maxStorageBufferBindingSize,
+                           supportedLimits.maxStorageBufferBindingSize}});
+
+    // Error case, this is above the limit
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(
+        device, uniformLayout, {{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize + 1}}));
+}
+
+// Test what happens when the layout is an error.
+TEST_F(BindGroupValidationTest, ErrorLayout) {
+    wgpu::BindGroupLayout goodLayout = utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                });
+
+    wgpu::BindGroupLayout errorLayout;
+    ASSERT_DEVICE_ERROR(
+        errorLayout = utils::MakeBindGroupLayout(
+            device, {
+                        {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                        {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                    }));
+
+    // Control case, creating with the good layout works
+    utils::MakeBindGroup(device, goodLayout, {{0, mUBO, 0, 256}});
+
+    // Creating with an error layout fails
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, errorLayout, {{0, mUBO, 0, 256}}));
+}
+
+class BindGroupLayoutValidationTest : public ValidationTest {
+  public:
+    wgpu::BindGroupLayout MakeBindGroupLayout(wgpu::BindGroupLayoutEntry* binding, uint32_t count) {
+        wgpu::BindGroupLayoutDescriptor descriptor;
+        descriptor.entryCount = count;
+        descriptor.entries = binding;
+        return device.CreateBindGroupLayout(&descriptor);
+    }
+
+    void TestCreateBindGroupLayout(wgpu::BindGroupLayoutEntry* binding,
+                                   uint32_t count,
+                                   bool expected) {
+        wgpu::BindGroupLayoutDescriptor descriptor;
+
+        descriptor.entryCount = count;
+        descriptor.entries = binding;
+
+        if (!expected) {
+            ASSERT_DEVICE_ERROR(device.CreateBindGroupLayout(&descriptor));
+        } else {
+            device.CreateBindGroupLayout(&descriptor);
+        }
+    }
+
+    void TestCreatePipelineLayout(wgpu::BindGroupLayout* bgl, uint32_t count, bool expected) {
+        wgpu::PipelineLayoutDescriptor descriptor;
+
+        descriptor.bindGroupLayoutCount = count;
+        descriptor.bindGroupLayouts = bgl;
+
+        if (!expected) {
+            ASSERT_DEVICE_ERROR(device.CreatePipelineLayout(&descriptor));
+        } else {
+            device.CreatePipelineLayout(&descriptor);
+        }
+    }
+};
+
+// Tests setting storage buffer and readonly storage buffer bindings in vertex and fragment shader.
+TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutStorageBindingsInVertexShader) {
+    // Checks that storage buffer binding is not supported in vertex shader.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Storage}}));
+
+    utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::ReadOnlyStorage}});
+
+    utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+
+    utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+}
+
+// Tests setting that bind group layout bindings numbers may be very large.
+TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutEntryMax) {
+    // Check that up to kMaxBindingNumber is valid.
+    utils::MakeBindGroupLayout(
+        device, {{kMaxBindingNumber, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+
+    // But after is an error.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+        device,
+        {{kMaxBindingNumber + 1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}));
+}
+
+// This test verifies that the BindGroupLayout bindings are correctly validated, even if the
+// binding ids are out-of-order.
+TEST_F(BindGroupLayoutValidationTest, BindGroupEntry) {
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                                   {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                               });
+}
+
+// Check that dynamic = true is only allowed buffer bindings.
+TEST_F(BindGroupLayoutValidationTest, DynamicAndTypeCompatibility) {
+    utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, true},
+                });
+
+    utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage, true},
+                });
+
+    utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true},
+                });
+}
+
+// Test that it is invalid to create a BGL with more than one binding type set.
+TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutEntryTooManySet) {
+    wgpu::BindGroupLayoutEntry entry = {};
+    entry.binding = 0;
+    entry.visibility = wgpu::ShaderStage::Fragment;
+    entry.buffer.type = wgpu::BufferBindingType::Uniform;
+    entry.sampler.type = wgpu::SamplerBindingType::Filtering;
+
+    wgpu::BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = 1;
+    descriptor.entries = &entry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroupLayout(&descriptor),
+                        testing::HasSubstr("had more than one of"));
+}
+
+// Test that it is invalid to create a BGL with none one of buffer,
+// sampler, texture, storageTexture, or externalTexture set.
+TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutEntryNoneSet) {
+    wgpu::BindGroupLayoutEntry entry = {};
+
+    wgpu::BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = 1;
+    descriptor.entries = &entry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroupLayout(&descriptor),
+                        testing::HasSubstr("had none of"));
+}
+
+// This test verifies that visibility of bindings in BindGroupLayout can be none
+TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutVisibilityNone) {
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                               });
+
+    wgpu::BindGroupLayoutEntry entry;
+    entry.binding = 0;
+    entry.visibility = wgpu::ShaderStage::None;
+    entry.buffer.type = wgpu::BufferBindingType::Uniform;
+    wgpu::BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = 1;
+    descriptor.entries = &entry;
+    device.CreateBindGroupLayout(&descriptor);
+}
+
+// This test verifies that binding with none visibility in bind group layout can be supported in
+// bind group
+TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutVisibilityNoneExpectsBindGroupEntry) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+                    {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::Uniform},
+                });
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, buffer}}));
+}
+
+#define BGLEntryType(...) \
+    utils::BindingLayoutEntryInitializationHelper(0, wgpu::ShaderStage::Compute, __VA_ARGS__)
+
+TEST_F(BindGroupLayoutValidationTest, PerStageLimits) {
+    struct TestInfo {
+        uint32_t maxCount;
+        wgpu::BindGroupLayoutEntry entry;
+        wgpu::BindGroupLayoutEntry otherEntry;
+    };
+
+    std::array<TestInfo, 7> kTestInfos = {
+        TestInfo{kMaxSampledTexturesPerShaderStage, BGLEntryType(wgpu::TextureSampleType::Float),
+                 BGLEntryType(wgpu::BufferBindingType::Uniform)},
+        TestInfo{kMaxSamplersPerShaderStage, BGLEntryType(wgpu::SamplerBindingType::Filtering),
+                 BGLEntryType(wgpu::BufferBindingType::Uniform)},
+        TestInfo{kMaxSamplersPerShaderStage, BGLEntryType(wgpu::SamplerBindingType::Comparison),
+                 BGLEntryType(wgpu::BufferBindingType::Uniform)},
+        TestInfo{kMaxStorageBuffersPerShaderStage, BGLEntryType(wgpu::BufferBindingType::Storage),
+                 BGLEntryType(wgpu::BufferBindingType::Uniform)},
+        TestInfo{
+            kMaxStorageTexturesPerShaderStage,
+            BGLEntryType(wgpu::StorageTextureAccess::WriteOnly, wgpu::TextureFormat::RGBA8Unorm),
+            BGLEntryType(wgpu::BufferBindingType::Uniform)},
+        TestInfo{kMaxUniformBuffersPerShaderStage, BGLEntryType(wgpu::BufferBindingType::Uniform),
+                 BGLEntryType(wgpu::TextureSampleType::Float)},
+        // External textures use multiple bindings (3 sampled textures, 1 sampler, 1 uniform buffer)
+        // that count towards the per stage binding limits. The number of external textures are
+        // currently restricted by the maximum number of sampled textures.
+        TestInfo{kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
+                 BGLEntryType(&utils::kExternalTextureBindingLayout),
+                 BGLEntryType(wgpu::BufferBindingType::Uniform)}};
+
+    for (TestInfo info : kTestInfos) {
+        wgpu::BindGroupLayout bgl[2];
+        std::vector<utils::BindingLayoutEntryInitializationHelper> maxBindings;
+
+        for (uint32_t i = 0; i < info.maxCount; ++i) {
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.binding = i;
+            maxBindings.push_back(entry);
+        }
+
+        // Creating with the maxes works.
+        bgl[0] = MakeBindGroupLayout(maxBindings.data(), maxBindings.size());
+
+        // Adding an extra binding of a different type works.
+        {
+            std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings;
+            wgpu::BindGroupLayoutEntry entry = info.otherEntry;
+            entry.binding = info.maxCount;
+            bindings.push_back(entry);
+            MakeBindGroupLayout(bindings.data(), bindings.size());
+        }
+
+        // Adding an extra binding of the maxed type in a different stage works
+        {
+            std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings;
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.binding = info.maxCount;
+            entry.visibility = wgpu::ShaderStage::Fragment;
+            bindings.push_back(entry);
+            MakeBindGroupLayout(bindings.data(), bindings.size());
+        }
+
+        // Adding an extra binding of the maxed type and stage exceeds the per stage limit.
+        {
+            std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings;
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.binding = info.maxCount;
+            bindings.push_back(entry);
+            ASSERT_DEVICE_ERROR(MakeBindGroupLayout(bindings.data(), bindings.size()));
+        }
+
+        // Creating a pipeline layout from the valid BGL works.
+        TestCreatePipelineLayout(bgl, 1, true);
+
+        // Adding an extra binding of a different type in a different BGL works
+        bgl[1] = utils::MakeBindGroupLayout(device, {info.otherEntry});
+        TestCreatePipelineLayout(bgl, 2, true);
+
+        {
+            // Adding an extra binding of the maxed type in a different stage works
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.visibility = wgpu::ShaderStage::Fragment;
+            bgl[1] = utils::MakeBindGroupLayout(device, {entry});
+            TestCreatePipelineLayout(bgl, 2, true);
+        }
+
+        // Adding an extra binding of the maxed type in a different BGL exceeds the per stage limit.
+        bgl[1] = utils::MakeBindGroupLayout(device, {info.entry});
+        TestCreatePipelineLayout(bgl, 2, false);
+    }
+}
+
+// External textures require multiple binding slots (3 sampled texture, 1 uniform buffer, 1
+// sampler), so ensure that these count towards the limit when combined non-external texture
+// bindings.
+TEST_F(BindGroupLayoutValidationTest, PerStageLimitsWithExternalTexture) {
+    struct TestInfo {
+        uint32_t maxCount;
+        uint32_t bindingsPerExternalTexture;
+        wgpu::BindGroupLayoutEntry entry;
+        wgpu::BindGroupLayoutEntry otherEntry;
+    };
+
+    std::array<TestInfo, 3> kTestInfos = {
+        TestInfo{kMaxSampledTexturesPerShaderStage, kSampledTexturesPerExternalTexture,
+                 BGLEntryType(wgpu::TextureSampleType::Float),
+                 BGLEntryType(wgpu::BufferBindingType::Uniform)},
+        TestInfo{kMaxSamplersPerShaderStage, kSamplersPerExternalTexture,
+                 BGLEntryType(wgpu::SamplerBindingType::Filtering),
+                 BGLEntryType(wgpu::BufferBindingType::Uniform)},
+        TestInfo{kMaxUniformBuffersPerShaderStage, kUniformsPerExternalTexture,
+                 BGLEntryType(wgpu::BufferBindingType::Uniform),
+                 BGLEntryType(wgpu::TextureSampleType::Float)},
+    };
+
+    for (TestInfo info : kTestInfos) {
+        wgpu::BindGroupLayout bgl[2];
+        std::vector<utils::BindingLayoutEntryInitializationHelper> maxBindings;
+
+        // Create an external texture binding layout entry
+        wgpu::BindGroupLayoutEntry entry = BGLEntryType(&utils::kExternalTextureBindingLayout);
+        entry.binding = 0;
+        maxBindings.push_back(entry);
+
+        // Create the other bindings such that we reach the max bindings per stage when including
+        // the external texture.
+        for (uint32_t i = 1; i <= info.maxCount - info.bindingsPerExternalTexture; ++i) {
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.binding = i;
+            maxBindings.push_back(entry);
+        }
+
+        // Ensure that creation without the external texture works.
+        bgl[0] = MakeBindGroupLayout(maxBindings.data(), maxBindings.size());
+
+        // Adding an extra binding of a different type works.
+        {
+            std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings;
+            wgpu::BindGroupLayoutEntry entry = info.otherEntry;
+            entry.binding = info.maxCount;
+            bindings.push_back(entry);
+            MakeBindGroupLayout(bindings.data(), bindings.size());
+        }
+
+        // Adding an extra binding of the maxed type in a different stage works
+        {
+            std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings;
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.binding = info.maxCount;
+            entry.visibility = wgpu::ShaderStage::Fragment;
+            bindings.push_back(entry);
+            MakeBindGroupLayout(bindings.data(), bindings.size());
+        }
+
+        // Adding an extra binding of the maxed type and stage exceeds the per stage limit.
+        {
+            std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings;
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.binding = info.maxCount;
+            bindings.push_back(entry);
+            ASSERT_DEVICE_ERROR(MakeBindGroupLayout(bindings.data(), bindings.size()));
+        }
+
+        // Creating a pipeline layout from the valid BGL works.
+        TestCreatePipelineLayout(bgl, 1, true);
+
+        // Adding an extra binding of a different type in a different BGL works
+        bgl[1] = utils::MakeBindGroupLayout(device, {info.otherEntry});
+        TestCreatePipelineLayout(bgl, 2, true);
+
+        {
+            // Adding an extra binding of the maxed type in a different stage works
+            wgpu::BindGroupLayoutEntry entry = info.entry;
+            entry.visibility = wgpu::ShaderStage::Fragment;
+            bgl[1] = utils::MakeBindGroupLayout(device, {entry});
+            TestCreatePipelineLayout(bgl, 2, true);
+        }
+
+        // Adding an extra binding of the maxed type in a different BGL exceeds the per stage limit.
+        bgl[1] = utils::MakeBindGroupLayout(device, {info.entry});
+        TestCreatePipelineLayout(bgl, 2, false);
+    }
+}
+
+// Check that dynamic buffer numbers exceed maximum value in one bind group layout.
+TEST_F(BindGroupLayoutValidationTest, DynamicBufferNumberLimit) {
+    wgpu::BindGroupLayout bgl[2];
+    std::vector<wgpu::BindGroupLayoutEntry> maxUniformDB;
+    std::vector<wgpu::BindGroupLayoutEntry> maxStorageDB;
+    std::vector<wgpu::BindGroupLayoutEntry> maxReadonlyStorageDB;
+
+    // In this test, we use all the same shader stage. Ensure that this does not exceed the
+    // per-stage limit.
+    static_assert(kMaxDynamicUniformBuffersPerPipelineLayout <= kMaxUniformBuffersPerShaderStage);
+    static_assert(kMaxDynamicStorageBuffersPerPipelineLayout <= kMaxStorageBuffersPerShaderStage);
+
+    for (uint32_t i = 0; i < kMaxDynamicUniformBuffersPerPipelineLayout; ++i) {
+        maxUniformDB.push_back(utils::BindingLayoutEntryInitializationHelper(
+            i, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, true));
+    }
+
+    for (uint32_t i = 0; i < kMaxDynamicStorageBuffersPerPipelineLayout; ++i) {
+        maxStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper(
+            i, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage, true));
+    }
+
+    for (uint32_t i = 0; i < kMaxDynamicStorageBuffersPerPipelineLayout; ++i) {
+        maxReadonlyStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper(
+            i, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true));
+    }
+
+    // Test creating with the maxes works
+    {
+        bgl[0] = MakeBindGroupLayout(maxUniformDB.data(), maxUniformDB.size());
+        TestCreatePipelineLayout(bgl, 1, true);
+
+        bgl[0] = MakeBindGroupLayout(maxStorageDB.data(), maxStorageDB.size());
+        TestCreatePipelineLayout(bgl, 1, true);
+
+        bgl[0] = MakeBindGroupLayout(maxReadonlyStorageDB.data(), maxReadonlyStorageDB.size());
+        TestCreatePipelineLayout(bgl, 1, true);
+    }
+
+    // The following tests exceed the per-pipeline layout limits. We use the Fragment stage to
+    // ensure we don't hit the per-stage limit.
+
+    // Check dynamic uniform buffers exceed maximum in pipeline layout.
+    {
+        bgl[0] = MakeBindGroupLayout(maxUniformDB.data(), maxUniformDB.size());
+        bgl[1] = utils::MakeBindGroupLayout(
+            device, {
+                        {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true},
+                    });
+
+        TestCreatePipelineLayout(bgl, 2, false);
+    }
+
+    // Check dynamic storage buffers exceed maximum in pipeline layout
+    {
+        bgl[0] = MakeBindGroupLayout(maxStorageDB.data(), maxStorageDB.size());
+        bgl[1] = utils::MakeBindGroupLayout(
+            device, {
+                        {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage, true},
+                    });
+
+        TestCreatePipelineLayout(bgl, 2, false);
+    }
+
+    // Check dynamic readonly storage buffers exceed maximum in pipeline layout
+    {
+        bgl[0] = MakeBindGroupLayout(maxReadonlyStorageDB.data(), maxReadonlyStorageDB.size());
+        bgl[1] = utils::MakeBindGroupLayout(
+            device,
+            {
+                {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage, true},
+            });
+
+        TestCreatePipelineLayout(bgl, 2, false);
+    }
+
+    // Check dynamic storage buffers + dynamic readonly storage buffers exceed maximum storage
+    // buffers in pipeline layout
+    {
+        bgl[0] = MakeBindGroupLayout(maxStorageDB.data(), maxStorageDB.size());
+        bgl[1] = utils::MakeBindGroupLayout(
+            device,
+            {
+                {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage, true},
+            });
+
+        TestCreatePipelineLayout(bgl, 2, false);
+    }
+
+    // Check dynamic uniform buffers exceed maximum in bind group layout.
+    {
+        maxUniformDB.push_back(utils::BindingLayoutEntryInitializationHelper(
+            kMaxDynamicUniformBuffersPerPipelineLayout, wgpu::ShaderStage::Fragment,
+            wgpu::BufferBindingType::Uniform, true));
+        TestCreateBindGroupLayout(maxUniformDB.data(), maxUniformDB.size(), false);
+    }
+
+    // Check dynamic storage buffers exceed maximum in bind group layout.
+    {
+        maxStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper(
+            kMaxDynamicStorageBuffersPerPipelineLayout, wgpu::ShaderStage::Fragment,
+            wgpu::BufferBindingType::Storage, true));
+        TestCreateBindGroupLayout(maxStorageDB.data(), maxStorageDB.size(), false);
+    }
+
+    // Check dynamic readonly storage buffers exceed maximum in bind group layout.
+    {
+        maxReadonlyStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper(
+            kMaxDynamicStorageBuffersPerPipelineLayout, wgpu::ShaderStage::Fragment,
+            wgpu::BufferBindingType::ReadOnlyStorage, true));
+        TestCreateBindGroupLayout(maxReadonlyStorageDB.data(), maxReadonlyStorageDB.size(), false);
+    }
+}
+
+// Test that multisampled textures must be 2D sampled textures
+TEST_F(BindGroupLayoutValidationTest, MultisampledTextureViewDimension) {
+    // Multisampled 2D texture works.
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                                    wgpu::TextureViewDimension::e2D, true},
+                               });
+
+    // Multisampled 2D (defaulted) texture works.
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                                    wgpu::TextureViewDimension::Undefined, true},
+                               });
+
+    // Multisampled 2D array texture is invalid.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                     wgpu::TextureViewDimension::e2DArray, true},
+                }));
+
+    // Multisampled cube texture is invalid.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                     wgpu::TextureViewDimension::Cube, true},
+                }));
+
+    // Multisampled cube array texture is invalid.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                     wgpu::TextureViewDimension::CubeArray, true},
+                }));
+
+    // Multisampled 3D texture is invalid.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                     wgpu::TextureViewDimension::e3D, true},
+                }));
+
+    // Multisampled 1D texture is invalid.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+        device, {
+                    {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                     wgpu::TextureViewDimension::e1D, true},
+                }));
+}
+
+// Test that multisampled texture bindings are valid
+TEST_F(BindGroupLayoutValidationTest, MultisampledTextureSampleType) {
+    // Multisampled float sample type works.
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                                    wgpu::TextureViewDimension::e2D, true},
+                               });
+
+    // Multisampled uint sample type works.
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Uint,
+                                    wgpu::TextureViewDimension::e2D, true},
+                               });
+
+    // Multisampled sint sample type works.
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Sint,
+                                    wgpu::TextureViewDimension::e2D, true},
+                               });
+
+    // Multisampled depth sample type works.
+    utils::MakeBindGroupLayout(device,
+                               {
+                                   {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Depth,
+                                    wgpu::TextureViewDimension::e2D, true},
+                               });
+}
+
+constexpr uint32_t kBindingSize = 9;
+
+class SetBindGroupValidationTest : public ValidationTest {
+  public:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        mBindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Uniform, true},
+                     {1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Uniform, false},
+                     {2, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Storage, true},
+                     {3, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::ReadOnlyStorage, true}});
+        mMinUniformBufferOffsetAlignment =
+            GetSupportedLimits().limits.minUniformBufferOffsetAlignment;
+        mBufferSize = 3 * mMinUniformBufferOffsetAlignment + 8;
+    }
+
+    wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = bufferSize;
+        bufferDescriptor.usage = usage;
+
+        return device.CreateBuffer(&bufferDescriptor);
+    }
+
+    wgpu::BindGroupLayout mBindGroupLayout;
+
+    wgpu::RenderPipeline CreateRenderPipeline() {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+                struct S {
+                    value : vec2<f32>
+                }
+
+                @group(0) @binding(0) var<uniform> uBufferDynamic : S;
+                @group(0) @binding(1) var<uniform> uBuffer : S;
+                @group(0) @binding(2) var<storage, read_write> sBufferDynamic : S;
+                @group(0) @binding(3) var<storage, read> sReadonlyBufferDynamic : S;
+
+                @stage(fragment) fn main() {
+                })");
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        wgpu::PipelineLayout pipelineLayout =
+            utils::MakeBasicPipelineLayout(device, &mBindGroupLayout);
+        pipelineDescriptor.layout = pipelineLayout;
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::ComputePipeline CreateComputePipeline() {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+                struct S {
+                    value : vec2<f32>
+                }
+
+                @group(0) @binding(0) var<uniform> uBufferDynamic : S;
+                @group(0) @binding(1) var<uniform> uBuffer : S;
+                @group(0) @binding(2) var<storage, read_write> sBufferDynamic : S;
+                @group(0) @binding(3) var<storage, read> sReadonlyBufferDynamic : S;
+
+                @stage(compute) @workgroup_size(4, 4, 1) fn main() {
+                })");
+
+        wgpu::PipelineLayout pipelineLayout =
+            utils::MakeBasicPipelineLayout(device, &mBindGroupLayout);
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.layout = pipelineLayout;
+        csDesc.compute.module = csModule;
+        csDesc.compute.entryPoint = "main";
+
+        return device.CreateComputePipeline(&csDesc);
+    }
+
+    void TestRenderPassBindGroup(wgpu::BindGroup bindGroup,
+                                 uint32_t* offsets,
+                                 uint32_t count,
+                                 bool expectation) {
+        wgpu::RenderPipeline renderPipeline = CreateRenderPipeline();
+        DummyRenderPass renderPass(device);
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+        renderPassEncoder.SetPipeline(renderPipeline);
+        if (bindGroup != nullptr) {
+            renderPassEncoder.SetBindGroup(0, bindGroup, count, offsets);
+        }
+        renderPassEncoder.Draw(3);
+        renderPassEncoder.End();
+        if (!expectation) {
+            ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+        } else {
+            commandEncoder.Finish();
+        }
+    }
+
+    void TestComputePassBindGroup(wgpu::BindGroup bindGroup,
+                                  uint32_t* offsets,
+                                  uint32_t count,
+                                  bool expectation) {
+        wgpu::ComputePipeline computePipeline = CreateComputePipeline();
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetPipeline(computePipeline);
+        if (bindGroup != nullptr) {
+            computePassEncoder.SetBindGroup(0, bindGroup, count, offsets);
+        }
+        computePassEncoder.Dispatch(1);
+        computePassEncoder.End();
+        if (!expectation) {
+            ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+        } else {
+            commandEncoder.Finish();
+        }
+    }
+
+  protected:
+    uint32_t mMinUniformBufferOffsetAlignment;
+    uint64_t mBufferSize;
+};
+
+// This is the test case that should work.
+TEST_F(SetBindGroupValidationTest, Basic) {
+    // Set up the bind group.
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+
+    std::array<uint32_t, 3> offsets = {512, 256, 0};
+
+    TestRenderPassBindGroup(bindGroup, offsets.data(), 3, true);
+
+    TestComputePassBindGroup(bindGroup, offsets.data(), 3, true);
+}
+
+// Draw/dispatch with a bind group missing is invalid
+TEST_F(SetBindGroupValidationTest, MissingBindGroup) {
+    TestRenderPassBindGroup(nullptr, nullptr, 0, false);
+    TestComputePassBindGroup(nullptr, nullptr, 0, false);
+}
+
+// Setting bind group after a draw / dispatch should re-verify the layout is compatible
+TEST_F(SetBindGroupValidationTest, VerifyGroupIfChangedAfterAction) {
+    // Set up the bind group
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+
+    std::array<uint32_t, 3> offsets = {512, 256, 0};
+
+    // Set up bind group that is incompatible
+    wgpu::BindGroupLayout invalidLayout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroup invalidGroup =
+        utils::MakeBindGroup(device, invalidLayout, {{0, storageBuffer, 0, kBindingSize}});
+
+    {
+        wgpu::ComputePipeline computePipeline = CreateComputePipeline();
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetPipeline(computePipeline);
+        computePassEncoder.SetBindGroup(0, bindGroup, 3, offsets.data());
+        computePassEncoder.Dispatch(1);
+        computePassEncoder.SetBindGroup(0, invalidGroup, 0, nullptr);
+        computePassEncoder.Dispatch(1);
+        computePassEncoder.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+    {
+        wgpu::RenderPipeline renderPipeline = CreateRenderPipeline();
+        DummyRenderPass renderPass(device);
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+        renderPassEncoder.SetPipeline(renderPipeline);
+        renderPassEncoder.SetBindGroup(0, bindGroup, 3, offsets.data());
+        renderPassEncoder.Draw(3);
+        renderPassEncoder.SetBindGroup(0, invalidGroup, 0, nullptr);
+        renderPassEncoder.Draw(3);
+        renderPassEncoder.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test cases that test dynamic offsets count mismatch with bind group layout.
+TEST_F(SetBindGroupValidationTest, DynamicOffsetsMismatch) {
+    // Set up bind group.
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+
+    // Number of offsets mismatch.
+    std::array<uint32_t, 4> mismatchOffsets = {768, 512, 256, 0};
+
+    TestRenderPassBindGroup(bindGroup, mismatchOffsets.data(), 1, false);
+    TestRenderPassBindGroup(bindGroup, mismatchOffsets.data(), 2, false);
+    TestRenderPassBindGroup(bindGroup, mismatchOffsets.data(), 4, false);
+
+    TestComputePassBindGroup(bindGroup, mismatchOffsets.data(), 1, false);
+    TestComputePassBindGroup(bindGroup, mismatchOffsets.data(), 2, false);
+    TestComputePassBindGroup(bindGroup, mismatchOffsets.data(), 4, false);
+}
+
+// Test cases that test dynamic offsets not aligned
+TEST_F(SetBindGroupValidationTest, DynamicOffsetsNotAligned) {
+    // Set up bind group.
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+
+    // Dynamic offsets are not aligned.
+    std::array<uint32_t, 3> notAlignedOffsets = {512, 128, 0};
+
+    TestRenderPassBindGroup(bindGroup, notAlignedOffsets.data(), 3, false);
+
+    TestComputePassBindGroup(bindGroup, notAlignedOffsets.data(), 3, false);
+}
+
+// Test cases that test dynamic uniform buffer out of bound situation.
+TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicUniformBuffer) {
+    // Set up bind group.
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+
+    // Dynamic offset + offset is larger than buffer size.
+    std::array<uint32_t, 3> overFlowOffsets = {1024, 256, 0};
+
+    TestRenderPassBindGroup(bindGroup, overFlowOffsets.data(), 3, false);
+
+    TestComputePassBindGroup(bindGroup, overFlowOffsets.data(), 3, false);
+}
+
+// Test cases that test dynamic storage buffer out of bound situation.
+TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicStorageBuffer) {
+    // Set up bind group.
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+
+    // Dynamic offset + offset is larger than buffer size.
+    std::array<uint32_t, 3> overFlowOffsets = {0, 256, 1024};
+
+    TestRenderPassBindGroup(bindGroup, overFlowOffsets.data(), 3, false);
+
+    TestComputePassBindGroup(bindGroup, overFlowOffsets.data(), 3, false);
+}
+
+// Test cases that test dynamic uniform buffer out of bound situation because of binding size.
+TEST_F(SetBindGroupValidationTest, BindingSizeOutOfBoundDynamicUniformBuffer) {
+    // Set up bind group, but binding size is larger than
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+
+    // Dynamic offset + offset isn't larger than buffer size.
+    // But with binding size, it will trigger OOB error.
+    std::array<uint32_t, 3> offsets = {768, 256, 0};
+
+    TestRenderPassBindGroup(bindGroup, offsets.data(), 3, false);
+
+    TestComputePassBindGroup(bindGroup, offsets.data(), 3, false);
+}
+
+// Test cases that test dynamic storage buffer out of bound situation because of binding size.
+TEST_F(SetBindGroupValidationTest, BindingSizeOutOfBoundDynamicStorageBuffer) {
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
+                                                     {{0, uniformBuffer, 0, kBindingSize},
+                                                      {1, uniformBuffer, 0, kBindingSize},
+                                                      {2, storageBuffer, 0, kBindingSize},
+                                                      {3, readonlyStorageBuffer, 0, kBindingSize}});
+    // Dynamic offset + offset isn't larger than buffer size.
+    // But with binding size, it will trigger OOB error.
+    std::array<uint32_t, 3> offsets = {0, 256, 768};
+
+    TestRenderPassBindGroup(bindGroup, offsets.data(), 3, false);
+
+    TestComputePassBindGroup(bindGroup, offsets.data(), 3, false);
+}
+
+// Regression test for crbug.com/dawn/408 where dynamic offsets were applied in the wrong order.
+// Dynamic offsets should be applied in increasing order of binding number.
+TEST_F(SetBindGroupValidationTest, DynamicOffsetOrder) {
+    // Note: The order of the binding numbers of the bind group and bind group layout are
+    // intentionally different and not in increasing order.
+    // This test uses both storage and uniform buffers to ensure buffer bindings are sorted first by
+    // binding number before type.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {
+                    {3, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true},
+                    {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true},
+                    {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, true},
+                });
+
+    // Create buffers which are 3x, 2x, and 1x the size of the minimum buffer offset, plus 4 bytes
+    // to spare (to avoid zero-sized bindings). We will offset the bindings so they reach the very
+    // end of the buffer. Any mismatch applying too-large of an offset to a smaller buffer will hit
+    // the out-of-bounds condition during validation.
+    wgpu::Buffer buffer3x =
+        CreateBuffer(3 * mMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage);
+    wgpu::Buffer buffer2x =
+        CreateBuffer(2 * mMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage);
+    wgpu::Buffer buffer1x =
+        CreateBuffer(1 * mMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Uniform);
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl,
+                                                     {
+                                                         {0, buffer3x, 0, 4},
+                                                         {3, buffer2x, 0, 4},
+                                                         {2, buffer1x, 0, 4},
+                                                     });
+
+    std::array<uint32_t, 3> offsets;
+    {
+        // Base case works.
+        offsets = {/* binding 0 */ 0,
+                   /* binding 2 */ 0,
+                   /* binding 3 */ 0};
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
+        computePassEncoder.End();
+        commandEncoder.Finish();
+    }
+    {
+        // Offset the first binding to touch the end of the buffer. Should succeed.
+        // Will fail if the offset is applied to the first or second bindings since their buffers
+        // are too small.
+        offsets = {/* binding 0 */ 3 * mMinUniformBufferOffsetAlignment,
+                   /* binding 2 */ 0,
+                   /* binding 3 */ 0};
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
+        computePassEncoder.End();
+        commandEncoder.Finish();
+    }
+    {
+        // Offset the second binding to touch the end of the buffer. Should succeed.
+        offsets = {/* binding 0 */ 0,
+                   /* binding 2 */ 1 * mMinUniformBufferOffsetAlignment,
+                   /* binding 3 */ 0};
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
+        computePassEncoder.End();
+        commandEncoder.Finish();
+    }
+    {
+        // Offset the third binding to touch the end of the buffer. Should succeed.
+        // Will fail if the offset is applied to the second binding since its buffer
+        // is too small.
+        offsets = {/* binding 0 */ 0,
+                   /* binding 2 */ 0,
+                   /* binding 3 */ 2 * mMinUniformBufferOffsetAlignment};
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
+        computePassEncoder.End();
+        commandEncoder.Finish();
+    }
+    {
+        // Offset each binding to touch the end of their buffer. Should succeed.
+        offsets = {/* binding 0 */ 3 * mMinUniformBufferOffsetAlignment,
+                   /* binding 2 */ 1 * mMinUniformBufferOffsetAlignment,
+                   /* binding 3 */ 2 * mMinUniformBufferOffsetAlignment};
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
+        computePassEncoder.End();
+        commandEncoder.Finish();
+    }
+}
+
+// Test that an error is produced (and no ASSERTs fired) when using an error bindgroup in
+// SetBindGroup
+TEST_F(SetBindGroupValidationTest, ErrorBindGroup) {
+    // Bindgroup creation fails because not all bindings are specified.
+    wgpu::BindGroup bindGroup;
+    ASSERT_DEVICE_ERROR(bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, {}));
+
+    TestRenderPassBindGroup(bindGroup, nullptr, 0, false);
+
+    TestComputePassBindGroup(bindGroup, nullptr, 0, false);
+}
+
+class SetBindGroupPersistenceValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        mVsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+        mBufferSize = 3 * GetSupportedLimits().limits.minUniformBufferOffsetAlignment + 8;
+    }
+
+    wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = bufferSize;
+        bufferDescriptor.usage = usage;
+
+        return device.CreateBuffer(&bufferDescriptor);
+    }
+
+    // Generates bind group layouts and a pipeline from a 2D list of binding types.
+    std::tuple<std::vector<wgpu::BindGroupLayout>, wgpu::RenderPipeline> SetUpLayoutsAndPipeline(
+        std::vector<std::vector<wgpu::BufferBindingType>> layouts) {
+        std::vector<wgpu::BindGroupLayout> bindGroupLayouts(layouts.size());
+
+        // Iterate through the desired bind group layouts.
+        for (uint32_t l = 0; l < layouts.size(); ++l) {
+            const auto& layout = layouts[l];
+            std::vector<wgpu::BindGroupLayoutEntry> bindings(layout.size());
+
+            // Iterate through binding types and populate a list of BindGroupLayoutEntrys.
+            for (uint32_t b = 0; b < layout.size(); ++b) {
+                bindings[b] = utils::BindingLayoutEntryInitializationHelper(
+                    b, wgpu::ShaderStage::Fragment, layout[b]);
+            }
+
+            // Create the bind group layout.
+            wgpu::BindGroupLayoutDescriptor bglDescriptor;
+            bglDescriptor.entryCount = static_cast<uint32_t>(bindings.size());
+            bglDescriptor.entries = bindings.data();
+            bindGroupLayouts[l] = device.CreateBindGroupLayout(&bglDescriptor);
+        }
+
+        // Create a pipeline layout from the list of bind group layouts.
+        wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor;
+        pipelineLayoutDescriptor.bindGroupLayoutCount =
+            static_cast<uint32_t>(bindGroupLayouts.size());
+        pipelineLayoutDescriptor.bindGroupLayouts = bindGroupLayouts.data();
+
+        wgpu::PipelineLayout pipelineLayout =
+            device.CreatePipelineLayout(&pipelineLayoutDescriptor);
+
+        std::stringstream ss;
+        ss << "struct S { value : vec2<f32> }";
+
+        // Build a shader which has bindings that match the pipeline layout.
+        for (uint32_t l = 0; l < layouts.size(); ++l) {
+            const auto& layout = layouts[l];
+
+            for (uint32_t b = 0; b < layout.size(); ++b) {
+                wgpu::BufferBindingType binding = layout[b];
+                ss << "@group(" << l << ") @binding(" << b << ") ";
+                switch (binding) {
+                    case wgpu::BufferBindingType::Storage:
+                        ss << "var<storage, read_write> set" << l << "_binding" << b << " : S;";
+                        break;
+                    case wgpu::BufferBindingType::Uniform:
+                        ss << "var<uniform> set" << l << "_binding" << b << " : S;";
+                        break;
+                    default:
+                        UNREACHABLE();
+                }
+            }
+        }
+
+        ss << "@stage(fragment) fn main() {}";
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, ss.str().c_str());
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = mVsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = pipelineLayout;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+        return std::make_tuple(bindGroupLayouts, pipeline);
+    }
+
+  protected:
+    uint32_t mBufferSize;
+
+  private:
+    wgpu::ShaderModule mVsModule;
+};
+
+// Test it is valid to set bind groups before setting the pipeline.
+TEST_F(SetBindGroupPersistenceValidationTest, BindGroupBeforePipeline) {
+    auto [bindGroupLayouts, pipeline] = SetUpLayoutsAndPipeline({{
+        {{
+            wgpu::BufferBindingType::Uniform,
+            wgpu::BufferBindingType::Uniform,
+        }},
+        {{
+            wgpu::BufferBindingType::Storage,
+            wgpu::BufferBindingType::Uniform,
+        }},
+    }});
+
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+
+    wgpu::BindGroup bindGroup0 = utils::MakeBindGroup(
+        device, bindGroupLayouts[0],
+        {{0, uniformBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
+
+    wgpu::BindGroup bindGroup1 = utils::MakeBindGroup(
+        device, bindGroupLayouts[1],
+        {{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
+
+    DummyRenderPass renderPass(device);
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+
+    renderPassEncoder.SetBindGroup(0, bindGroup0);
+    renderPassEncoder.SetBindGroup(1, bindGroup1);
+    renderPassEncoder.SetPipeline(pipeline);
+    renderPassEncoder.Draw(3);
+
+    renderPassEncoder.End();
+    commandEncoder.Finish();
+}
+
+// Dawn does not have a concept of bind group inheritance though the backing APIs may.
+// Test that it is valid to draw with bind groups that are not "inherited". They persist
+// after a pipeline change.
+TEST_F(SetBindGroupPersistenceValidationTest, NotVulkanInheritance) {
+    auto [bindGroupLayoutsA, pipelineA] = SetUpLayoutsAndPipeline({{
+        {{
+            wgpu::BufferBindingType::Uniform,
+            wgpu::BufferBindingType::Storage,
+        }},
+        {{
+            wgpu::BufferBindingType::Uniform,
+            wgpu::BufferBindingType::Uniform,
+        }},
+    }});
+
+    auto [bindGroupLayoutsB, pipelineB] = SetUpLayoutsAndPipeline({{
+        {{
+            wgpu::BufferBindingType::Storage,
+            wgpu::BufferBindingType::Uniform,
+        }},
+        {{
+            wgpu::BufferBindingType::Uniform,
+            wgpu::BufferBindingType::Uniform,
+        }},
+    }});
+
+    wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+
+    wgpu::BindGroup bindGroupA0 = utils::MakeBindGroup(
+        device, bindGroupLayoutsA[0],
+        {{0, uniformBuffer, 0, kBindingSize}, {1, storageBuffer, 0, kBindingSize}});
+
+    wgpu::BindGroup bindGroupA1 = utils::MakeBindGroup(
+        device, bindGroupLayoutsA[1],
+        {{0, uniformBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
+
+    wgpu::BindGroup bindGroupB0 = utils::MakeBindGroup(
+        device, bindGroupLayoutsB[0],
+        {{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
+
+    DummyRenderPass renderPass(device);
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+
+    renderPassEncoder.SetPipeline(pipelineA);
+    renderPassEncoder.SetBindGroup(0, bindGroupA0);
+    renderPassEncoder.SetBindGroup(1, bindGroupA1);
+    renderPassEncoder.Draw(3);
+
+    renderPassEncoder.SetPipeline(pipelineB);
+    renderPassEncoder.SetBindGroup(0, bindGroupB0);
+    // This draw is valid.
+    // Bind group 1 persists even though it is not "inherited".
+    renderPassEncoder.Draw(3);
+
+    renderPassEncoder.End();
+    commandEncoder.Finish();
+}
+
+class BindGroupLayoutCompatibilityTest : public ValidationTest {
+  public:
+    wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = bufferSize;
+        bufferDescriptor.usage = usage;
+
+        return device.CreateBuffer(&bufferDescriptor);
+    }
+
+    wgpu::RenderPipeline CreateFSRenderPipeline(
+        const char* fsShader,
+        std::vector<wgpu::BindGroupLayout> bindGroupLayout) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fsShader);
+
+        wgpu::PipelineLayoutDescriptor descriptor;
+        descriptor.bindGroupLayoutCount = bindGroupLayout.size();
+        descriptor.bindGroupLayouts = bindGroupLayout.data();
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&descriptor);
+        pipelineDescriptor.layout = pipelineLayout;
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    wgpu::RenderPipeline CreateRenderPipeline(std::vector<wgpu::BindGroupLayout> bindGroupLayouts) {
+        return CreateFSRenderPipeline(R"(
+            struct S {
+                value : vec2<f32>
+            }
+
+            @group(0) @binding(0) var<storage, read_write> sBufferDynamic : S;
+            @group(1) @binding(0) var<storage, read> sReadonlyBufferDynamic : S;
+
+            @stage(fragment) fn main() {
+                var val : vec2<f32> = sBufferDynamic.value;
+                val = sReadonlyBufferDynamic.value;
+            })",
+                                      std::move(bindGroupLayouts));
+    }
+
+    wgpu::ComputePipeline CreateComputePipeline(
+        const char* shader,
+        std::vector<wgpu::BindGroupLayout> bindGroupLayout) {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, shader);
+
+        wgpu::PipelineLayoutDescriptor descriptor;
+        descriptor.bindGroupLayoutCount = bindGroupLayout.size();
+        descriptor.bindGroupLayouts = bindGroupLayout.data();
+        wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&descriptor);
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.layout = pipelineLayout;
+        csDesc.compute.module = csModule;
+        csDesc.compute.entryPoint = "main";
+
+        return device.CreateComputePipeline(&csDesc);
+    }
+
+    wgpu::ComputePipeline CreateComputePipeline(
+        std::vector<wgpu::BindGroupLayout> bindGroupLayouts) {
+        return CreateComputePipeline(R"(
+            struct S {
+                value : vec2<f32>
+            }
+
+            @group(0) @binding(0) var<storage, read_write> sBufferDynamic : S;
+            @group(1) @binding(0) var<storage, read> sReadonlyBufferDynamic : S;
+
+            @stage(compute) @workgroup_size(4, 4, 1) fn main() {
+                var val : vec2<f32> = sBufferDynamic.value;
+                val = sReadonlyBufferDynamic.value;
+            })",
+                                     std::move(bindGroupLayouts));
+    }
+};
+
+// Test that it is invalid to pass a writable storage buffer in the pipeline layout when the shader
+// uses the binding as a readonly storage buffer.
+TEST_F(BindGroupLayoutCompatibilityTest, RWStorageInBGLWithROStorageInShader) {
+    // Set up the bind group layout.
+    wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Storage}});
+
+    ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0, bgl1}));
+
+    ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0, bgl1}));
+}
+
+// Test that it is invalid to pass a readonly storage buffer in the pipeline layout when the shader
+// uses the binding as a writable storage buffer.
+TEST_F(BindGroupLayoutCompatibilityTest, ROStorageInBGLWithRWStorageInShader) {
+    // Set up the bind group layout.
+    wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::ReadOnlyStorage}});
+    wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::ReadOnlyStorage}});
+
+    ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0, bgl1}));
+
+    ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0, bgl1}));
+}
+
+TEST_F(BindGroupLayoutCompatibilityTest, TextureViewDimension) {
+    constexpr char kTexture2DShaderFS[] = R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @stage(fragment) fn main() {
+            textureDimensions(myTexture);
+        })";
+    constexpr char kTexture2DShaderCS[] = R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @stage(compute) @workgroup_size(1) fn main() {
+            textureDimensions(myTexture);
+        })";
+
+    // Render: Test that 2D texture with 2D view dimension works
+    CreateFSRenderPipeline(
+        kTexture2DShaderFS,
+        {utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float,
+                      wgpu::TextureViewDimension::e2D}})});
+
+    // Render: Test that 2D texture with 2D array view dimension is invalid
+    ASSERT_DEVICE_ERROR(CreateFSRenderPipeline(
+        kTexture2DShaderFS,
+        {utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float,
+                      wgpu::TextureViewDimension::e2DArray}})}));
+
+    // Compute: Test that 2D texture with 2D view dimension works
+    CreateComputePipeline(
+        kTexture2DShaderCS,
+        {utils::MakeBindGroupLayout(device,
+                                    {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                                      wgpu::TextureViewDimension::e2D}})});
+
+    // Compute: Test that 2D texture with 2D array view dimension is invalid
+    ASSERT_DEVICE_ERROR(CreateComputePipeline(
+        kTexture2DShaderCS,
+        {utils::MakeBindGroupLayout(device,
+                                    {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                                      wgpu::TextureViewDimension::e2DArray}})}));
+
+    constexpr char kTexture2DArrayShaderFS[] = R"(
+        @group(0) @binding(0) var myTexture : texture_2d_array<f32>;
+        @stage(fragment) fn main() {
+            textureDimensions(myTexture);
+        })";
+    constexpr char kTexture2DArrayShaderCS[] = R"(
+        @group(0) @binding(0) var myTexture : texture_2d_array<f32>;
+        @stage(compute) @workgroup_size(1) fn main() {
+            textureDimensions(myTexture);
+        })";
+
+    // Render: Test that 2D texture array with 2D array view dimension works
+    CreateFSRenderPipeline(
+        kTexture2DArrayShaderFS,
+        {utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float,
+                      wgpu::TextureViewDimension::e2DArray}})});
+
+    // Render: Test that 2D texture array with 2D view dimension is invalid
+    ASSERT_DEVICE_ERROR(CreateFSRenderPipeline(
+        kTexture2DArrayShaderFS,
+        {utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float,
+                      wgpu::TextureViewDimension::e2D}})}));
+
+    // Compute: Test that 2D texture array with 2D array view dimension works
+    CreateComputePipeline(
+        kTexture2DArrayShaderCS,
+        {utils::MakeBindGroupLayout(device,
+                                    {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                                      wgpu::TextureViewDimension::e2DArray}})});
+
+    // Compute: Test that 2D texture array with 2D view dimension is invalid
+    ASSERT_DEVICE_ERROR(CreateComputePipeline(
+        kTexture2DArrayShaderCS,
+        {utils::MakeBindGroupLayout(device,
+                                    {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float,
+                                      wgpu::TextureViewDimension::e2D}})}));
+}
+
+// Test that a bgl with an external texture is compatible with texture_external in a shader and that
+// an error is returned when the binding in the shader does not match.
+TEST_F(BindGroupLayoutCompatibilityTest, ExternalTextureBindGroupLayoutCompatibility) {
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+
+    // Test that an external texture binding works with a texture_external in the shader.
+    CreateFSRenderPipeline(R"(
+            @group(0) @binding(0) var myExternalTexture: texture_external;
+            @stage(fragment) fn main() {
+                _ = myExternalTexture;
+            })",
+                           {bgl});
+
+    // Test that an external texture binding doesn't work with a texture_2d<f32> in the shader.
+    ASSERT_DEVICE_ERROR(CreateFSRenderPipeline(R"(
+            @group(0) @binding(0) var myTexture: texture_2d<f32>;
+            @stage(fragment) fn main() {
+                _ = myTexture;
+            })",
+                                               {bgl}));
+}
+
+class BindingsValidationTest : public BindGroupLayoutCompatibilityTest {
+  public:
+    void SetUp() override {
+        BindGroupLayoutCompatibilityTest::SetUp();
+        mBufferSize = 3 * GetSupportedLimits().limits.minUniformBufferOffsetAlignment + 8;
+    }
+
+    void TestRenderPassBindings(const wgpu::BindGroup* bg,
+                                uint32_t count,
+                                wgpu::RenderPipeline pipeline,
+                                bool expectation) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        DummyRenderPass dummyRenderPass(device);
+        wgpu::RenderPassEncoder rp = encoder.BeginRenderPass(&dummyRenderPass);
+        for (uint32_t i = 0; i < count; ++i) {
+            rp.SetBindGroup(i, bg[i]);
+        }
+        rp.SetPipeline(pipeline);
+        rp.Draw(3);
+        rp.End();
+        if (!expectation) {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        } else {
+            encoder.Finish();
+        }
+    }
+
+    void TestComputePassBindings(const wgpu::BindGroup* bg,
+                                 uint32_t count,
+                                 wgpu::ComputePipeline pipeline,
+                                 bool expectation) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder cp = encoder.BeginComputePass();
+        for (uint32_t i = 0; i < count; ++i) {
+            cp.SetBindGroup(i, bg[i]);
+        }
+        cp.SetPipeline(pipeline);
+        cp.Dispatch(1);
+        cp.End();
+        if (!expectation) {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        } else {
+            encoder.Finish();
+        }
+    }
+
+    uint32_t mBufferSize;
+    static constexpr uint32_t kBindingNum = 3;
+};
+
+// Test that it is valid to set a pipeline layout with bindings unused by the pipeline.
+TEST_F(BindingsValidationTest, PipelineLayoutWithMoreBindingsThanPipeline) {
+    // Set up bind group layouts.
+    wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Storage},
+                 {1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Uniform}});
+    wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::ReadOnlyStorage}});
+    wgpu::BindGroupLayout bgl2 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Storage}});
+
+    // pipelineLayout has unused binding set (bgl2) and unused entry in a binding set (bgl0).
+    CreateRenderPipeline({bgl0, bgl1, bgl2});
+
+    CreateComputePipeline({bgl0, bgl1, bgl2});
+}
+
+// Test that it is invalid to set a pipeline layout that doesn't have all necessary bindings
+// required by the pipeline.
+TEST_F(BindingsValidationTest, PipelineLayoutWithLessBindingsThanPipeline) {
+    // Set up bind group layout.
+    wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Storage}});
+
+    // missing a binding set (bgl1) in pipeline layout
+    {
+        ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0}));
+
+        ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0}));
+    }
+
+    // bgl1 is not missing, but it is empty
+    {
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(device, {});
+
+        ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0, bgl1}));
+
+        ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0, bgl1}));
+    }
+
+    // bgl1 is neither missing nor empty, but it doesn't contain the necessary binding
+    {
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      wgpu::BufferBindingType::Uniform}});
+
+        ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0, bgl1}));
+
+        ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0, bgl1}));
+    }
+}
+
+// Test that it is valid to set bind groups whose layout is not set in the pipeline layout.
+// But it's invalid to set extra entry for a given bind group's layout if that layout is set in
+// the pipeline layout.
+TEST_F(BindingsValidationTest, BindGroupsWithMoreBindingsThanPipelineLayout) {
+    // Set up bind group layouts, buffers, bind groups, pipeline layouts and pipelines.
+    std::array<wgpu::BindGroupLayout, kBindingNum + 1> bgl;
+    std::array<wgpu::BindGroup, kBindingNum + 1> bg;
+    std::array<wgpu::Buffer, kBindingNum + 1> buffer;
+    for (uint32_t i = 0; i < kBindingNum + 1; ++i) {
+        bgl[i] = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      i == 1 ? wgpu::BufferBindingType::ReadOnlyStorage
+                             : wgpu::BufferBindingType::Storage}});
+        buffer[i] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+        bg[i] = utils::MakeBindGroup(device, bgl[i], {{0, buffer[i]}});
+    }
+
+    // Set 3 bindings (and 3 pipeline layouts) in pipeline.
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({bgl[0], bgl[1], bgl[2]});
+    wgpu::ComputePipeline computePipeline = CreateComputePipeline({bgl[0], bgl[1], bgl[2]});
+
+    // Comprared to pipeline layout, there is an extra bind group (bg[3])
+    TestRenderPassBindings(bg.data(), kBindingNum + 1, renderPipeline, true);
+
+    TestComputePassBindings(bg.data(), kBindingNum + 1, computePipeline, true);
+
+    // If a bind group has entry (like bgl1_1 below) unused by the pipeline layout, it is invalid.
+    // Bind groups associated layout should exactly match bind group layout if that layout is
+    // set in pipeline layout.
+    bgl[1] = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::ReadOnlyStorage},
+                 {1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Uniform}});
+    buffer[1] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
+    bg[1] = utils::MakeBindGroup(device, bgl[1], {{0, buffer[1]}, {1, buffer[1]}});
+
+    TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false);
+
+    TestComputePassBindings(bg.data(), kBindingNum, computePipeline, false);
+}
+
+// Test that it is invalid to set bind groups that don't have all necessary bindings required
+// by the pipeline layout. Note that both pipeline layout and bind group have enough bindings for
+// pipeline in the following test.
+TEST_F(BindingsValidationTest, BindGroupsWithLessBindingsThanPipelineLayout) {
+    // Set up bind group layouts, buffers, bind groups, pipeline layouts and pipelines.
+    std::array<wgpu::BindGroupLayout, kBindingNum> bgl;
+    std::array<wgpu::BindGroup, kBindingNum> bg;
+    std::array<wgpu::Buffer, kBindingNum> buffer;
+    for (uint32_t i = 0; i < kBindingNum; ++i) {
+        bgl[i] = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                      i == 1 ? wgpu::BufferBindingType::ReadOnlyStorage
+                             : wgpu::BufferBindingType::Storage}});
+        buffer[i] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
+        bg[i] = utils::MakeBindGroup(device, bgl[i], {{0, buffer[i]}});
+    }
+
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({bgl[0], bgl[1], bgl[2]});
+    wgpu::ComputePipeline computePipeline = CreateComputePipeline({bgl[0], bgl[1], bgl[2]});
+
+    // Compared to pipeline layout, a binding set (bgl2) related bind group is missing
+    TestRenderPassBindings(bg.data(), kBindingNum - 1, renderPipeline, false);
+
+    TestComputePassBindings(bg.data(), kBindingNum - 1, computePipeline, false);
+
+    // bgl[2] related bind group is not missing, but its bind group is empty
+    bgl[2] = utils::MakeBindGroupLayout(device, {});
+    bg[2] = utils::MakeBindGroup(device, bgl[2], {});
+
+    TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false);
+
+    TestComputePassBindings(bg.data(), kBindingNum, computePipeline, false);
+
+    // bgl[2] related bind group is neither missing nor empty, but it doesn't contain the necessary
+    // binding
+    bgl[2] = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
+                  wgpu::BufferBindingType::Uniform}});
+    buffer[2] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
+    bg[2] = utils::MakeBindGroup(device, bgl[2], {{1, buffer[2]}});
+
+    TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false);
+
+    TestComputePassBindings(bg.data(), kBindingNum, computePipeline, false);
+}
+
+class SamplerTypeBindingTest : public ValidationTest {
+  protected:
+    wgpu::RenderPipeline CreateFragmentPipeline(wgpu::BindGroupLayout* bindGroupLayout,
+                                                const char* fragmentSource) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>();
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentSource);
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        wgpu::PipelineLayout pipelineLayout =
+            utils::MakeBasicPipelineLayout(device, bindGroupLayout);
+        pipelineDescriptor.layout = pipelineLayout;
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+};
+
+// Test that the use of sampler and comparison_sampler in the shader must match the bind group
+// layout.
+TEST_F(SamplerTypeBindingTest, ShaderAndBGLMatches) {
+    // Test that a filtering sampler binding works with normal sampler in the shader.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @stage(fragment) fn main() {
+                _ = mySampler;
+            })");
+    }
+
+    // Test that a non-filtering sampler binding works with normal sampler in the shader.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @stage(fragment) fn main() {
+                _ = mySampler;
+            })");
+    }
+
+    // Test that comparison sampler binding works with comparison sampler in the shader.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler_comparison;
+            @stage(fragment) fn main() {
+                _ = mySampler;
+            })");
+    }
+
+    // Test that filtering sampler binding does not work with comparison sampler in the shader.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+        ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler_comparison;
+            @stage(fragment) fn main() {
+                _ = mySampler;
+            })"));
+    }
+
+    // Test that non-filtering sampler binding does not work with comparison sampler in the shader.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}});
+
+        ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler_comparison;
+            @stage(fragment) fn main() {
+                _ = mySampler;
+            })"));
+    }
+
+    // Test that comparison sampler binding does not work with normal sampler in the shader.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}});
+
+        ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @stage(fragment) fn main() {
+                _ = mySampler;
+            })"));
+    }
+
+    // Test that a filtering sampler can be used to sample a float texture.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @group(0) @binding(1) var myTexture: texture_2d<f32>;
+            @stage(fragment) fn main() {
+                textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0));
+            })");
+    }
+
+    // Test that a non-filtering sampler can be used to sample a float texture.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @group(0) @binding(1) var myTexture: texture_2d<f32>;
+            @stage(fragment) fn main() {
+                textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0));
+            })");
+    }
+
+    // Test that a filtering sampler can be used to sample a depth texture.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @group(0) @binding(1) var myTexture: texture_depth_2d;
+            @stage(fragment) fn main() {
+                textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0));
+            })");
+    }
+
+    // Test that a non-filtering sampler can be used to sample a depth texture.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @group(0) @binding(1) var myTexture: texture_depth_2d;
+            @stage(fragment) fn main() {
+                textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0));
+            })");
+    }
+
+    // Test that a comparison sampler can be used to sample a depth texture.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler_comparison;
+            @group(0) @binding(1) var myTexture: texture_depth_2d;
+            @stage(fragment) fn main() {
+                textureSampleCompare(myTexture, mySampler, vec2<f32>(0.0, 0.0), 0.0);
+            })");
+    }
+
+    // Test that a filtering sampler cannot be used to sample an unfilterable-float texture.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::UnfilterableFloat}});
+
+        ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @group(0) @binding(1) var myTexture: texture_2d<f32>;
+            @stage(fragment) fn main() {
+                textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0));
+            })"));
+    }
+
+    // Test that a non-filtering sampler can be used to sample an unfilterable-float texture.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::UnfilterableFloat}});
+
+        CreateFragmentPipeline(&bindGroupLayout, R"(
+            @group(0) @binding(0) var mySampler: sampler;
+            @group(0) @binding(1) var myTexture: texture_2d<f32>;
+            @stage(fragment) fn main() {
+                textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0));
+            })");
+    }
+}
+
+TEST_F(SamplerTypeBindingTest, SamplerAndBindGroupMatches) {
+    // Test that sampler binding works with normal sampler.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+        utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler()}});
+    }
+
+    // Test that comparison sampler binding works with sampler w/ compare function.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}});
+
+        wgpu::SamplerDescriptor desc = {};
+        desc.compare = wgpu::CompareFunction::Never;
+        utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}});
+    }
+
+    // Test that sampler binding does not work with sampler w/ compare function.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+        wgpu::SamplerDescriptor desc;
+        desc.compare = wgpu::CompareFunction::Never;
+        ASSERT_DEVICE_ERROR(
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}));
+    }
+
+    // Test that comparison sampler binding does not work with normal sampler.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}});
+
+        wgpu::SamplerDescriptor desc = {};
+        ASSERT_DEVICE_ERROR(
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}));
+    }
+
+    // Test that filtering sampler binding works with a filtering or non-filtering sampler.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}});
+
+        // Test each filter member
+        {
+            wgpu::SamplerDescriptor desc;
+            desc.minFilter = wgpu::FilterMode::Linear;
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}});
+        }
+        {
+            wgpu::SamplerDescriptor desc;
+            desc.magFilter = wgpu::FilterMode::Linear;
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}});
+        }
+        {
+            wgpu::SamplerDescriptor desc;
+            desc.mipmapFilter = wgpu::FilterMode::Linear;
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}});
+        }
+
+        // Test non-filtering sampler
+        utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler()}});
+    }
+
+    // Test that non-filtering sampler binding does not work with a filtering sampler.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}});
+
+        // Test each filter member
+        {
+            wgpu::SamplerDescriptor desc;
+            desc.minFilter = wgpu::FilterMode::Linear;
+            ASSERT_DEVICE_ERROR(
+                utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}));
+        }
+        {
+            wgpu::SamplerDescriptor desc;
+            desc.magFilter = wgpu::FilterMode::Linear;
+            ASSERT_DEVICE_ERROR(
+                utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}));
+        }
+        {
+            wgpu::SamplerDescriptor desc;
+            desc.mipmapFilter = wgpu::FilterMode::Linear;
+            ASSERT_DEVICE_ERROR(
+                utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}));
+        }
+
+        // Test non-filtering sampler
+        utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler()}});
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/BufferValidationTests.cpp b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
new file mode 100644
index 0000000..9ec4ae6
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
@@ -0,0 +1,911 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include <gmock/gmock.h>
+
+#include <memory>
+
+using namespace testing;
+
+class MockBufferMapAsyncCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+};
+
+static std::unique_ptr<MockBufferMapAsyncCallback> mockBufferMapAsyncCallback;
+static void ToMockBufferMapAsyncCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+    mockBufferMapAsyncCallback->Call(status, userdata);
+}
+
+class BufferValidationTest : public ValidationTest {
+  protected:
+    wgpu::Buffer CreateMapReadBuffer(uint64_t size) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = wgpu::BufferUsage::MapRead;
+
+        return device.CreateBuffer(&descriptor);
+    }
+
+    wgpu::Buffer CreateMapWriteBuffer(uint64_t size) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = wgpu::BufferUsage::MapWrite;
+
+        return device.CreateBuffer(&descriptor);
+    }
+
+    wgpu::Buffer BufferMappedAtCreation(uint64_t size, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
+        descriptor.mappedAtCreation = true;
+
+        return device.CreateBuffer(&descriptor);
+    }
+
+    void AssertMapAsyncError(wgpu::Buffer buffer, wgpu::MapMode mode, size_t offset, size_t size) {
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+        ASSERT_DEVICE_ERROR(
+            buffer.MapAsync(mode, offset, size, ToMockBufferMapAsyncCallback, nullptr));
+    }
+
+    wgpu::Queue queue;
+
+  private:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        mockBufferMapAsyncCallback = std::make_unique<MockBufferMapAsyncCallback>();
+        queue = device.GetQueue();
+    }
+
+    void TearDown() override {
+        // Delete mocks so that expectations are checked
+        mockBufferMapAsyncCallback = nullptr;
+
+        ValidationTest::TearDown();
+    }
+};
+
+// Test case where creation should succeed
+TEST_F(BufferValidationTest, CreationSuccess) {
+    // Success
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::Uniform;
+
+        device.CreateBuffer(&descriptor);
+    }
+}
+
+// Test restriction on usages must not be None (0)
+TEST_F(BufferValidationTest, CreationMapUsageNotZero) {
+    // Zero (None) usage is an error
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::None;
+
+        ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+    }
+}
+
+// Test restriction on usages allowed with MapRead and MapWrite
+TEST_F(BufferValidationTest, CreationMapUsageRestrictions) {
+    // MapRead with CopyDst is ok
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+
+        device.CreateBuffer(&descriptor);
+    }
+
+    // MapRead with something else is an error
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::Uniform;
+
+        ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+    }
+
+    // MapWrite with CopySrc is ok
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+
+        device.CreateBuffer(&descriptor);
+    }
+
+    // MapWrite with something else is an error
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::Uniform;
+
+        ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+    }
+}
+
+// Test the success case for mapping buffer for reading
+TEST_F(BufferValidationTest, MapAsync_ReadSuccess) {
+    wgpu::Buffer buf = CreateMapReadBuffer(4);
+
+    buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+    EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+    WaitForAllOperations(device);
+
+    buf.Unmap();
+}
+
+// Test the success case for mapping buffer for writing
+TEST_F(BufferValidationTest, MapAsync_WriteSuccess) {
+    wgpu::Buffer buf = CreateMapWriteBuffer(4);
+
+    buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+    EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+    WaitForAllOperations(device);
+
+    buf.Unmap();
+}
+
+// Test map async with a buffer that's an error
+TEST_F(BufferValidationTest, MapAsync_ErrorBuffer) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
+    wgpu::Buffer buffer;
+    ASSERT_DEVICE_ERROR(buffer = device.CreateBuffer(&desc));
+
+    AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
+    AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
+}
+
+// Test map async with an invalid offset and size alignment.
+TEST_F(BufferValidationTest, MapAsync_OffsetSizeAlignment) {
+    // Control case, offset aligned to 8 and size to 4 is valid
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(12);
+        buffer.MapAsync(wgpu::MapMode::Read, 8, 4, nullptr, nullptr);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+        buffer.MapAsync(wgpu::MapMode::Write, 8, 4, nullptr, nullptr);
+    }
+
+    // Error case, offset aligned to 4 is an error.
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(12);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 4, 4);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Write, 4, 4);
+    }
+
+    // Error case, size aligned to 2 is an error.
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(8);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 6);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(8);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 6);
+    }
+}
+
+// Test map async with an invalid offset and size OOB checks
+TEST_F(BufferValidationTest, MapAsync_OffsetSizeOOB) {
+    // Valid case: full buffer is ok.
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(8);
+        buffer.MapAsync(wgpu::MapMode::Read, 0, 8, nullptr, nullptr);
+    }
+
+    // Valid case: range in the middle of the buffer is ok.
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(16);
+        buffer.MapAsync(wgpu::MapMode::Read, 8, 4, nullptr, nullptr);
+    }
+
+    // Valid case: empty range at the end of the buffer is ok.
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(8);
+        buffer.MapAsync(wgpu::MapMode::Read, 8, 0, nullptr, nullptr);
+    }
+
+    // Error case, offset is larger than the buffer size (even if size is 0).
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(12);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 16, 0);
+    }
+
+    // Error case, offset + size is larger than the buffer
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(12);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 8, 8);
+    }
+
+    // Error case, offset + size is larger than the buffer, overflow case.
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(12);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 8,
+                            std::numeric_limits<size_t>::max() & ~size_t(7));
+    }
+}
+
+// Test map async with a buffer that has the wrong usage
+TEST_F(BufferValidationTest, MapAsync_WrongUsage) {
+    {
+        wgpu::BufferDescriptor desc;
+        desc.usage = wgpu::BufferUsage::Vertex;
+        desc.size = 4;
+        wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(4);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
+    }
+}
+
+// Test map async with a wrong mode
+TEST_F(BufferValidationTest, MapAsync_WrongMode) {
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(4);
+        AssertMapAsyncError(buffer, wgpu::MapMode::None, 0, 4);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(4);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read | wgpu::MapMode::Write, 0, 4);
+    }
+}
+
+// Test map async with a buffer that's already mapped
+TEST_F(BufferValidationTest, MapAsync_AlreadyMapped) {
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(4);
+        buffer.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
+    }
+    {
+        wgpu::Buffer buffer = BufferMappedAtCreation(4, wgpu::BufferUsage::MapRead);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
+    }
+    {
+        wgpu::Buffer buffer = BufferMappedAtCreation(4, wgpu::BufferUsage::MapWrite);
+        AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
+    }
+}
+
+// Test map async with a buffer that's destroyed
+TEST_F(BufferValidationTest, MapAsync_Destroy) {
+    {
+        wgpu::Buffer buffer = CreateMapReadBuffer(4);
+        buffer.Destroy();
+        AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
+    }
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(4);
+        buffer.Destroy();
+        AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
+    }
+}
+
+// Test map async but unmapping before the result is ready.
+TEST_F(BufferValidationTest, MapAsync_UnmapBeforeResult) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback,
+                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+            .Times(1);
+        buf.Unmap();
+
+        // The callback shouldn't be called again.
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback,
+                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+            .Times(1);
+        buf.Unmap();
+
+        // The callback shouldn't be called again.
+        WaitForAllOperations(device);
+    }
+}
+
+// When a MapAsync is cancelled with Unmap it might still be in flight, test doing a new request
+// works as expected and we don't get the cancelled request's data.
+TEST_F(BufferValidationTest, MapAsync_UnmapBeforeResultAndMapAgain) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, this + 0);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback,
+                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, this + 0))
+            .Times(1);
+        buf.Unmap();
+
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, this + 1);
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, this + 1))
+            .Times(1);
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, this + 0);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback,
+                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, this + 0))
+            .Times(1);
+        buf.Unmap();
+
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, this + 1);
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, this + 1))
+            .Times(1);
+        WaitForAllOperations(device);
+    }
+}
+
+// Test map async but destroying before the result is ready.
+TEST_F(BufferValidationTest, MapAsync_DestroyBeforeResult) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback,
+                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+            .Times(1);
+        buf.Destroy();
+
+        // The callback shouldn't be called again.
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback,
+                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+            .Times(1);
+        buf.Destroy();
+
+        // The callback shouldn't be called again.
+        WaitForAllOperations(device);
+    }
+}
+
+// Test that the MapCallback isn't fired twice when unmap() is called inside the callback
+TEST_F(BufferValidationTest, MapAsync_UnmapCalledInCallback) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .WillOnce(InvokeWithoutArgs([&]() { buf.Unmap(); }));
+
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .WillOnce(InvokeWithoutArgs([&]() { buf.Unmap(); }));
+
+        WaitForAllOperations(device);
+    }
+}
+
+// Test that the MapCallback isn't fired twice when destroy() is called inside the callback
+TEST_F(BufferValidationTest, MapAsync_DestroyCalledInCallback) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .WillOnce(InvokeWithoutArgs([&]() { buf.Destroy(); }));
+
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .WillOnce(InvokeWithoutArgs([&]() { buf.Destroy(); }));
+
+        WaitForAllOperations(device);
+    }
+}
+
+// Test the success case for mappedAtCreation
+TEST_F(BufferValidationTest, MappedAtCreationSuccess) {
+    BufferMappedAtCreation(4, wgpu::BufferUsage::MapWrite);
+}
+
+// Test the success case for mappedAtCreation for a non-mappable usage
+TEST_F(BufferValidationTest, NonMappableMappedAtCreationSuccess) {
+    BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+}
+
+// Test there is an error when mappedAtCreation is set but the size isn't aligned to 4.
+TEST_F(BufferValidationTest, MappedAtCreationSizeAlignment) {
+    ASSERT_DEVICE_ERROR(BufferMappedAtCreation(2, wgpu::BufferUsage::MapWrite));
+}
+
+// Test that it is valid to destroy an error buffer
+TEST_F(BufferValidationTest, DestroyErrorBuffer) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
+    wgpu::Buffer buf;
+    ASSERT_DEVICE_ERROR(buf = device.CreateBuffer(&desc));
+
+    buf.Destroy();
+}
+
+// Test that it is valid to Destroy an unmapped buffer
+TEST_F(BufferValidationTest, DestroyUnmappedBuffer) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        buf.Destroy();
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.Destroy();
+    }
+}
+
+// Test that it is valid to Destroy a destroyed buffer
+TEST_F(BufferValidationTest, DestroyDestroyedBuffer) {
+    wgpu::Buffer buf = CreateMapWriteBuffer(4);
+    buf.Destroy();
+    buf.Destroy();
+}
+
+// Test that it is invalid to Unmap an error buffer
+TEST_F(BufferValidationTest, UnmapErrorBuffer) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
+    wgpu::Buffer buf;
+    ASSERT_DEVICE_ERROR(buf = device.CreateBuffer(&desc));
+
+    ASSERT_DEVICE_ERROR(buf.Unmap());
+}
+
+// Test that it is invalid to Unmap a destroyed buffer
+TEST_F(BufferValidationTest, UnmapDestroyedBuffer) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        buf.Destroy();
+        ASSERT_DEVICE_ERROR(buf.Unmap());
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.Destroy();
+        ASSERT_DEVICE_ERROR(buf.Unmap());
+    }
+}
+
+// Test that it is valid to submit a buffer in a queue with a map usage if it is unmapped
+TEST_F(BufferValidationTest, SubmitBufferWithMapUsage) {
+    wgpu::BufferDescriptor descriptorA;
+    descriptorA.size = 4;
+    descriptorA.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+
+    wgpu::BufferDescriptor descriptorB;
+    descriptorB.size = 4;
+    descriptorB.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
+
+    wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
+    wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+}
+
+// Test that it is invalid to submit a mapped buffer in a queue
+TEST_F(BufferValidationTest, SubmitMappedBuffer) {
+    wgpu::BufferDescriptor descriptorA;
+    descriptorA.size = 4;
+    descriptorA.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+
+    wgpu::BufferDescriptor descriptorB;
+    descriptorB.size = 4;
+    descriptorB.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
+    {
+        wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
+        wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
+
+        bufA.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
+        wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
+
+        bufB.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::BufferDescriptor mappedBufferDesc = descriptorA;
+        mappedBufferDesc.mappedAtCreation = true;
+        wgpu::Buffer bufA = device.CreateBuffer(&mappedBufferDesc);
+        wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        WaitForAllOperations(device);
+    }
+    {
+        wgpu::BufferDescriptor mappedBufferDesc = descriptorB;
+        mappedBufferDesc.mappedAtCreation = true;
+        wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
+        wgpu::Buffer bufB = device.CreateBuffer(&mappedBufferDesc);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        WaitForAllOperations(device);
+    }
+}
+
+// Test that it is invalid to submit a destroyed buffer in a queue
+TEST_F(BufferValidationTest, SubmitDestroyedBuffer) {
+    wgpu::BufferDescriptor descriptorA;
+    descriptorA.size = 4;
+    descriptorA.usage = wgpu::BufferUsage::CopySrc;
+
+    wgpu::BufferDescriptor descriptorB;
+    descriptorB.size = 4;
+    descriptorB.usage = wgpu::BufferUsage::CopyDst;
+
+    wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
+    wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
+
+    bufA.Destroy();
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Test that a map usage is required to call Unmap
+TEST_F(BufferValidationTest, UnmapWithoutMapUsage) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buf = device.CreateBuffer(&descriptor);
+
+    ASSERT_DEVICE_ERROR(buf.Unmap());
+}
+
+// Test that it is valid to call Unmap on a buffer that is not mapped
+TEST_F(BufferValidationTest, UnmapUnmappedBuffer) {
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+        // Buffer starts unmapped. Unmap should fail.
+        ASSERT_DEVICE_ERROR(buf.Unmap());
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
+        buf.Unmap();
+        // Unmapping a second time should fail.
+        ASSERT_DEVICE_ERROR(buf.Unmap());
+    }
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        // Buffer starts unmapped. Unmap should fail.
+        ASSERT_DEVICE_ERROR(buf.Unmap());
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
+        buf.Unmap();
+        // Unmapping a second time should fail.
+        ASSERT_DEVICE_ERROR(buf.Unmap());
+    }
+}
+
+// Test that it is invalid to call GetMappedRange on an unmapped buffer.
+TEST_F(BufferValidationTest, GetMappedRange_OnUnmappedBuffer) {
+    // Unmapped at creation case.
+    {
+        wgpu::BufferDescriptor desc;
+        desc.size = 4;
+        desc.usage = wgpu::BufferUsage::CopySrc;
+        wgpu::Buffer buf = device.CreateBuffer(&desc);
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+
+    // Unmapped after mappedAtCreation case.
+    {
+        wgpu::Buffer buf = BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+        buf.Unmap();
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+
+    // Unmapped after MapAsync read case.
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .Times(1);
+        WaitForAllOperations(device);
+        buf.Unmap();
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+
+    // Unmapped after MapAsync write case.
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .Times(1);
+        WaitForAllOperations(device);
+        buf.Unmap();
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+}
+
+// Test that it is invalid to call GetMappedRange on a destroyed buffer.
+TEST_F(BufferValidationTest, GetMappedRange_OnDestroyedBuffer) {
+    // Destroyed after creation case.
+    {
+        wgpu::BufferDescriptor desc;
+        desc.size = 4;
+        desc.usage = wgpu::BufferUsage::CopySrc;
+        wgpu::Buffer buf = device.CreateBuffer(&desc);
+        buf.Destroy();
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+
+    // Destroyed after mappedAtCreation case.
+    {
+        wgpu::Buffer buf = BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+        buf.Destroy();
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+
+    // Destroyed after MapAsync read case.
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .Times(1);
+        WaitForAllOperations(device);
+        buf.Destroy();
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+
+    // Destroyed after MapAsync write case.
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+        EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+            .Times(1);
+        WaitForAllOperations(device);
+        buf.Destroy();
+
+        ASSERT_EQ(nullptr, buf.GetMappedRange());
+        ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+    }
+}
+
+// Test that it is invalid to call GetMappedRange on a buffer after MapAsync for reading
+TEST_F(BufferValidationTest, GetMappedRange_NonConstOnMappedForReading) {
+    wgpu::Buffer buf = CreateMapReadBuffer(4);
+
+    buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
+    EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+    WaitForAllOperations(device);
+
+    ASSERT_EQ(nullptr, buf.GetMappedRange());
+}
+
+// Test valid cases to call GetMappedRange on a buffer.
+TEST_F(BufferValidationTest, GetMappedRange_ValidBufferStateCases) {
+    // GetMappedRange after mappedAtCreation case.
+    {
+        wgpu::Buffer buffer = BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+        ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+        ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+    }
+
+    // GetMappedRange after MapAsync for reading case.
+    {
+        wgpu::Buffer buf = CreateMapReadBuffer(4);
+
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
+        WaitForAllOperations(device);
+
+        ASSERT_NE(buf.GetConstMappedRange(), nullptr);
+    }
+
+    // GetMappedRange after MapAsync for writing case.
+    {
+        wgpu::Buffer buf = CreateMapWriteBuffer(4);
+
+        buf.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
+        WaitForAllOperations(device);
+
+        ASSERT_NE(buf.GetConstMappedRange(), nullptr);
+        ASSERT_EQ(buf.GetConstMappedRange(), buf.GetMappedRange());
+    }
+}
+
+// Test valid cases to call GetMappedRange on an error buffer.
+TEST_F(BufferValidationTest, GetMappedRange_OnErrorBuffer) {
+    wgpu::BufferDescriptor desc;
+    desc.size = 4;
+    desc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead;
+
+    uint64_t kStupidLarge = uint64_t(1) << uint64_t(63);
+
+    // GetMappedRange after mappedAtCreation a zero-sized buffer returns a non-nullptr.
+    // This is to check we don't do a malloc(0).
+    {
+        wgpu::Buffer buffer;
+        ASSERT_DEVICE_ERROR(buffer = BufferMappedAtCreation(
+                                0, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+
+        ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+        ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+    }
+
+    // GetMappedRange after mappedAtCreation non-OOM returns a non-nullptr.
+    {
+        wgpu::Buffer buffer;
+        ASSERT_DEVICE_ERROR(buffer = BufferMappedAtCreation(
+                                4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+
+        ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+        ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+    }
+
+    // GetMappedRange after mappedAtCreation OOM case returns nullptr.
+    {
+        wgpu::Buffer buffer;
+        ASSERT_DEVICE_ERROR(
+            buffer = BufferMappedAtCreation(
+                kStupidLarge, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+
+        ASSERT_EQ(buffer.GetConstMappedRange(), nullptr);
+        ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+    }
+}
+
+// Test validation of the GetMappedRange parameters
+TEST_F(BufferValidationTest, GetMappedRange_OffsetSizeOOB) {
+    // Valid case: full range is ok
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(8);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, 8, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_NE(buffer.GetMappedRange(0, 8), nullptr);
+    }
+
+    // Valid case: full range is ok with defaulted MapAsync size
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(8);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, wgpu::kWholeMapSize, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_NE(buffer.GetMappedRange(0, 8), nullptr);
+    }
+
+    // Valid case: empty range at the end is ok
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(8);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, 8, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_NE(buffer.GetMappedRange(8, 0), nullptr);
+    }
+
+    // Valid case: range in the middle is ok.
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(16);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, 16, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_NE(buffer.GetMappedRange(8, 4), nullptr);
+    }
+
+    // Error case: offset is larger than the mapped range (even with size = 0)
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(8);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, 8, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_EQ(buffer.GetMappedRange(9, 0), nullptr);
+        EXPECT_EQ(buffer.GetMappedRange(16, 0), nullptr);
+    }
+
+    // Error case: offset + size is larger than the mapped range
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, 12, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_EQ(buffer.GetMappedRange(8, 5), nullptr);
+        EXPECT_EQ(buffer.GetMappedRange(8, 8), nullptr);
+    }
+
+    // Error case: offset + size is larger than the mapped range, overflow case
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+        buffer.MapAsync(wgpu::MapMode::Write, 0, 12, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_EQ(buffer.GetMappedRange(8, std::numeric_limits<size_t>::max()), nullptr);
+    }
+
+    // Error case: offset is before the start of the range
+    {
+        wgpu::Buffer buffer = CreateMapWriteBuffer(12);
+        buffer.MapAsync(wgpu::MapMode::Write, 8, 4, nullptr, nullptr);
+        WaitForAllOperations(device);
+        EXPECT_EQ(buffer.GetMappedRange(7, 4), nullptr);
+        EXPECT_EQ(buffer.GetMappedRange(0, 4), nullptr);
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/CommandBufferValidationTests.cpp b/src/dawn/tests/unittests/validation/CommandBufferValidationTests.cpp
new file mode 100644
index 0000000..784c8aa
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/CommandBufferValidationTests.cpp
@@ -0,0 +1,371 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gmock/gmock.h>
+
+#include "dawn/native/CommandEncoder.h"
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+using ::testing::HasSubstr;
+
+class CommandBufferValidationTest : public ValidationTest {};
+
+// Test for an empty command buffer
+TEST_F(CommandBufferValidationTest, Empty) {
+    device.CreateCommandEncoder().Finish();
+}
+
+// Test that a command buffer cannot be ended mid render pass
+TEST_F(CommandBufferValidationTest, EndedMidRenderPass) {
+    DummyRenderPass dummyRenderPass(device);
+
+    // Control case, command buffer ended after the pass is ended.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case, command buffer ended mid-pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
+    }
+
+    // Error case, command buffer ended mid-pass. Trying to use encoders after Finish
+    // should fail too.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
+        ASSERT_DEVICE_ERROR(
+            pass.End(), HasSubstr("Recording in an error or already ended [RenderPassEncoder]."));
+    }
+}
+
+// Test that a command buffer cannot be ended mid compute pass
+TEST_F(CommandBufferValidationTest, EndedMidComputePass) {
+    // Control case, command buffer ended after the pass is ended.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case, command buffer ended mid-pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [ComputePassEncoder] was ended."));
+    }
+
+    // Error case, command buffer ended mid-pass. Trying to use encoders after Finish
+    // should fail too.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [ComputePassEncoder] was ended."));
+        ASSERT_DEVICE_ERROR(
+            pass.End(), HasSubstr("Recording in an error or already ended [ComputePassEncoder]."));
+    }
+}
+
+// Test that a render pass cannot be ended twice
+TEST_F(CommandBufferValidationTest, RenderPassEndedTwice) {
+    DummyRenderPass dummyRenderPass(device);
+
+    // Control case, pass is ended once
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case, pass ended twice
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        pass.End();
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Recording in an error or already ended [RenderPassEncoder]."));
+    }
+}
+
+// Test that a compute pass cannot be ended twice
+TEST_F(CommandBufferValidationTest, ComputePassEndedTwice) {
+    // Control case, pass is ended once.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case, pass ended twice
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.End();
+        pass.End();
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Recording in an error or already ended [ComputePassEncoder]."));
+    }
+}
+
+// Test that beginning a compute pass before ending the previous pass causes an error.
+TEST_F(CommandBufferValidationTest, BeginComputePassBeforeEndPreviousPass) {
+    DummyRenderPass dummyRenderPass(device);
+
+    // Beginning a compute pass before ending a render pass causes an error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&dummyRenderPass);
+        wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
+        computePass.End();
+        renderPass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Beginning a compute pass before ending a compute pass causes an error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePass1 = encoder.BeginComputePass();
+        wgpu::ComputePassEncoder computePass2 = encoder.BeginComputePass();
+        computePass2.End();
+        computePass1.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that beginning a render pass before ending the previous pass causes an error.
+TEST_F(CommandBufferValidationTest, BeginRenderPassBeforeEndPreviousPass) {
+    DummyRenderPass dummyRenderPass(device);
+
+    // Beginning a render pass before ending the render pass causes an error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass1 = encoder.BeginRenderPass(&dummyRenderPass);
+        wgpu::RenderPassEncoder renderPass2 = encoder.BeginRenderPass(&dummyRenderPass);
+        renderPass2.End();
+        renderPass1.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Beginning a compute pass before ending a compute pass causes an error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&dummyRenderPass);
+        renderPass.End();
+        computePass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that encoding command after a successful finish produces an error
+TEST_F(CommandBufferValidationTest, CallsAfterASuccessfulFinish) {
+    // A buffer that can be used in CopyBufferToBuffer
+    wgpu::BufferDescriptor copyBufferDesc;
+    copyBufferDesc.size = 16;
+    copyBufferDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer copyBuffer = device.CreateBuffer(&copyBufferDesc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.Finish();
+
+    ASSERT_DEVICE_ERROR(encoder.CopyBufferToBuffer(copyBuffer, 0, copyBuffer, 0, 0));
+}
+
+// Test that encoding command after a failed finish produces an error
+TEST_F(CommandBufferValidationTest, CallsAfterAFailedFinish) {
+    // A buffer that can be used in CopyBufferToBuffer
+    wgpu::BufferDescriptor copyBufferDesc;
+    copyBufferDesc.size = 16;
+    copyBufferDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer copyBuffer = device.CreateBuffer(&copyBufferDesc);
+
+    // A buffer that can't be used in CopyBufferToBuffer
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = 16;
+    bufferDesc.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToBuffer(buffer, 0, buffer, 0, 0);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    ASSERT_DEVICE_ERROR(encoder.CopyBufferToBuffer(copyBuffer, 0, copyBuffer, 0, 0));
+}
+
+// Test that passes which are de-referenced prior to ending still allow the correct errors to be
+// produced.
+TEST_F(CommandBufferValidationTest, PassDereferenced) {
+    DummyRenderPass dummyRenderPass(device);
+
+    // Control case, command buffer ended after the pass is ended.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case, no reference is kept to a render pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.BeginRenderPass(&dummyRenderPass);
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
+    }
+
+    // Error case, no reference is kept to a compute pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.BeginComputePass();
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [ComputePassEncoder] was ended."));
+    }
+
+    // Error case, beginning a new pass after failing to end a de-referenced pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.BeginRenderPass(&dummyRenderPass);
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.End();
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
+    }
+
+    // Error case, deleting the pass after finishing the command encoder shouldn't generate an
+    // uncaptured error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        ASSERT_DEVICE_ERROR(
+            encoder.Finish(),
+            HasSubstr("Command buffer recording ended before [ComputePassEncoder] was ended."));
+
+        pass = nullptr;
+    }
+
+    // Valid case, command encoder is never finished so the de-referenced pass shouldn't
+    // generate an uncaptured error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.BeginComputePass();
+    }
+}
+
+// Test that calling inject validation error produces an error.
+TEST_F(CommandBufferValidationTest, InjectValidationError) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.InjectValidationError("my error");
+    ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("my error"));
+}
+
+TEST_F(CommandBufferValidationTest, DestroyEncoder) {
+    // Skip these tests if we are using wire because the destroy functionality is not exposed
+    // and needs to use a cast to call manually. We cannot test this in the wire case since the
+    // only way to trigger the destroy call is by losing all references which means we cannot
+    // call finish.
+    DAWN_SKIP_TEST_IF(UsesWire());
+    DummyRenderPass dummyRenderPass(device);
+
+    // Control case, command buffer ended after the pass is ended.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Destroyed encoder with encoded commands should emit error on finish.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+        ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
+    }
+
+    // Destroyed encoder with encoded commands shouldn't emit an error if never finished.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+    }
+
+    // Destroyed encoder should allow encoding, and emit error on finish.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
+    }
+
+    // Destroyed encoder should allow encoding and shouldn't emit an error if never finished.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+    }
+
+    // Destroying a finished encoder should not emit any errors.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+        pass.End();
+        encoder.Finish();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+    }
+
+    // Destroying an encoder twice should not emit any errors.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+    }
+
+    // Destroying an encoder twice and then calling finish should fail.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+        dawn::native::FromAPI(encoder.Get())->Destroy();
+        ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/ComputeIndirectValidationTests.cpp b/src/dawn/tests/unittests/validation/ComputeIndirectValidationTests.cpp
new file mode 100644
index 0000000..6d62e34
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ComputeIndirectValidationTests.cpp
@@ -0,0 +1,97 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <initializer_list>
+#include <limits>
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class ComputeIndirectValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        wgpu::ShaderModule computeModule = utils::CreateShaderModule(device, R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+            })");
+
+        // Set up compute pipeline
+        wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, nullptr);
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.layout = pl;
+        csDesc.compute.module = computeModule;
+        csDesc.compute.entryPoint = "main";
+        pipeline = device.CreateComputePipeline(&csDesc);
+    }
+
+    void ValidateExpectation(wgpu::CommandEncoder encoder, utils::Expectation expectation) {
+        if (expectation == utils::Expectation::Success) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    void TestIndirectOffset(utils::Expectation expectation,
+                            std::initializer_list<uint32_t> bufferList,
+                            uint64_t indirectOffset,
+                            wgpu::BufferUsage usage = wgpu::BufferUsage::Indirect) {
+        wgpu::Buffer indirectBuffer =
+            utils::CreateBufferFromData<uint32_t>(device, usage, bufferList);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.DispatchIndirect(indirectBuffer, indirectOffset);
+        pass.End();
+
+        ValidateExpectation(encoder, expectation);
+    }
+
+    wgpu::ComputePipeline pipeline;
+};
+
+// Verify out of bounds indirect dispatch calls are caught early
+TEST_F(ComputeIndirectValidationTest, IndirectOffsetBounds) {
+    // In bounds
+    TestIndirectOffset(utils::Expectation::Success, {1, 2, 3}, 0);
+    // In bounds, bigger buffer
+    TestIndirectOffset(utils::Expectation::Success, {1, 2, 3, 4, 5, 6}, 0);
+    // In bounds, bigger buffer, positive offset
+    TestIndirectOffset(utils::Expectation::Success, {1, 2, 3, 4, 5, 6}, 3 * sizeof(uint32_t));
+
+    // In bounds, non-multiple of 4 offsets
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3, 4}, 1);
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3, 4}, 2);
+
+    // Out of bounds, buffer too small
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2}, 0);
+    // Out of bounds, index too big
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3}, 1 * sizeof(uint32_t));
+    // Out of bounds, index past buffer
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3}, 4 * sizeof(uint32_t));
+    // Out of bounds, index + size of command overflows
+    uint64_t offset = std::numeric_limits<uint64_t>::max();
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3, 4, 5, 6}, offset);
+}
+
+// Check that the buffer must have the indirect usage
+TEST_F(ComputeIndirectValidationTest, IndirectUsage) {
+    // Control case: using a buffer with the indirect usage is valid.
+    TestIndirectOffset(utils::Expectation::Success, {1, 2, 3}, 0, wgpu::BufferUsage::Indirect);
+
+    // Error case: using a buffer with the vertex usage is an error.
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3}, 0, wgpu::BufferUsage::Vertex);
+}
diff --git a/src/dawn/tests/unittests/validation/ComputeValidationTests.cpp b/src/dawn/tests/unittests/validation/ComputeValidationTests.cpp
new file mode 100644
index 0000000..ec4a74d
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ComputeValidationTests.cpp
@@ -0,0 +1,86 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Constants.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+// TODO(cwallez@chromium.org): Add a regression test for Disptach validation trying to acces the
+// input state.
+
+class ComputeValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        wgpu::ShaderModule computeModule = utils::CreateShaderModule(device, R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+            })");
+
+        // Set up compute pipeline
+        wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, nullptr);
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.layout = pl;
+        csDesc.compute.module = computeModule;
+        csDesc.compute.entryPoint = "main";
+        pipeline = device.CreateComputePipeline(&csDesc);
+    }
+
+    void TestDispatch(uint32_t x, uint32_t y, uint32_t z) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(pipeline);
+        pass.Dispatch(x, y, z);
+        pass.End();
+        encoder.Finish();
+    }
+
+    wgpu::ComputePipeline pipeline;
+};
+
+// Check that 1x1x1 dispatch is OK.
+TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_SmallestValid) {
+    TestDispatch(1, 1, 1);
+}
+
+// Check that the largest allowed dispatch is OK.
+TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_LargestValid) {
+    const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+    TestDispatch(max, max, max);
+}
+
+// Check that exceeding the maximum on the X dimension results in validation failure.
+TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidX) {
+    const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+    ASSERT_DEVICE_ERROR(TestDispatch(max + 1, 1, 1));
+}
+
+// Check that exceeding the maximum on the Y dimension results in validation failure.
+TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidY) {
+    const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+    ASSERT_DEVICE_ERROR(TestDispatch(1, max + 1, 1));
+}
+
+// Check that exceeding the maximum on the Z dimension results in validation failure.
+TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidZ) {
+    const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+    ASSERT_DEVICE_ERROR(TestDispatch(1, 1, max + 1));
+}
+
+// Check that exceeding the maximum on all dimensions results in validation failure.
+TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidAll) {
+    const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
+    ASSERT_DEVICE_ERROR(TestDispatch(max + 1, max + 1, max + 1));
+}
diff --git a/src/dawn/tests/unittests/validation/CopyCommandsValidationTests.cpp b/src/dawn/tests/unittests/validation/CopyCommandsValidationTests.cpp
new file mode 100644
index 0000000..a7d26f1
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/CopyCommandsValidationTests.cpp
@@ -0,0 +1,2653 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class CopyCommandTest : public ValidationTest {
+  protected:
+    wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
+
+        return device.CreateBuffer(&descriptor);
+    }
+
+    wgpu::Texture Create2DTexture(uint32_t width,
+                                  uint32_t height,
+                                  uint32_t mipLevelCount,
+                                  uint32_t arrayLayerCount,
+                                  wgpu::TextureFormat format,
+                                  wgpu::TextureUsage usage,
+                                  uint32_t sampleCount = 1) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = arrayLayerCount;
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        wgpu::Texture tex = device.CreateTexture(&descriptor);
+        return tex;
+    }
+
+    wgpu::Texture Create3DTexture(uint32_t width,
+                                  uint32_t height,
+                                  uint32_t depth,
+                                  uint32_t mipLevelCount,
+                                  wgpu::TextureFormat format,
+                                  wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = depth;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        wgpu::Texture tex = device.CreateTexture(&descriptor);
+        return tex;
+    }
+
+    uint32_t BufferSizeForTextureCopy(
+        uint32_t width,
+        uint32_t height,
+        uint32_t depth,
+        wgpu::TextureFormat format = wgpu::TextureFormat::RGBA8Unorm) {
+        uint32_t bytesPerPixel = utils::GetTexelBlockSizeInBytes(format);
+        uint32_t bytesPerRow = Align(width * bytesPerPixel, kTextureBytesPerRowAlignment);
+        return (bytesPerRow * (height - 1) + width * bytesPerPixel) * depth;
+    }
+
+    void ValidateExpectation(wgpu::CommandEncoder encoder, utils::Expectation expectation) {
+        if (expectation == utils::Expectation::Success) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    void TestB2TCopy(utils::Expectation expectation,
+                     wgpu::Buffer srcBuffer,
+                     uint64_t srcOffset,
+                     uint32_t srcBytesPerRow,
+                     uint32_t srcRowsPerImage,
+                     wgpu::Texture destTexture,
+                     uint32_t destLevel,
+                     wgpu::Origin3D destOrigin,
+                     wgpu::Extent3D extent3D,
+                     wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(srcBuffer, srcOffset, srcBytesPerRow, srcRowsPerImage);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(destTexture, destLevel, destOrigin, aspect);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &extent3D);
+
+        ValidateExpectation(encoder, expectation);
+    }
+
+    void TestT2BCopy(utils::Expectation expectation,
+                     wgpu::Texture srcTexture,
+                     uint32_t srcLevel,
+                     wgpu::Origin3D srcOrigin,
+                     wgpu::Buffer destBuffer,
+                     uint64_t destOffset,
+                     uint32_t destBytesPerRow,
+                     uint32_t destRowsPerImage,
+                     wgpu::Extent3D extent3D,
+                     wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
+        wgpu::ImageCopyBuffer imageCopyBuffer =
+            utils::CreateImageCopyBuffer(destBuffer, destOffset, destBytesPerRow, destRowsPerImage);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(srcTexture, srcLevel, srcOrigin, aspect);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &extent3D);
+
+        ValidateExpectation(encoder, expectation);
+    }
+
+    void TestT2TCopy(utils::Expectation expectation,
+                     wgpu::Texture srcTexture,
+                     uint32_t srcLevel,
+                     wgpu::Origin3D srcOrigin,
+                     wgpu::Texture dstTexture,
+                     uint32_t dstLevel,
+                     wgpu::Origin3D dstOrigin,
+                     wgpu::Extent3D extent3D,
+                     wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcTexture, srcLevel, srcOrigin, aspect);
+        wgpu::ImageCopyTexture dstImageCopyTexture =
+            utils::CreateImageCopyTexture(dstTexture, dstLevel, dstOrigin, aspect);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+
+        ValidateExpectation(encoder, expectation);
+    }
+
+    void TestBothTBCopies(utils::Expectation expectation,
+                          wgpu::Buffer buffer,
+                          uint64_t bufferOffset,
+                          uint32_t bufferBytesPerRow,
+                          uint32_t rowsPerImage,
+                          wgpu::Texture texture,
+                          uint32_t level,
+                          wgpu::Origin3D origin,
+                          wgpu::Extent3D extent3D) {
+        TestB2TCopy(expectation, buffer, bufferOffset, bufferBytesPerRow, rowsPerImage, texture,
+                    level, origin, extent3D);
+        TestT2BCopy(expectation, texture, level, origin, buffer, bufferOffset, bufferBytesPerRow,
+                    rowsPerImage, extent3D);
+    }
+
+    void TestBothT2TCopies(utils::Expectation expectation,
+                           wgpu::Texture texture1,
+                           uint32_t level1,
+                           wgpu::Origin3D origin1,
+                           wgpu::Texture texture2,
+                           uint32_t level2,
+                           wgpu::Origin3D origin2,
+                           wgpu::Extent3D extent3D) {
+        TestT2TCopy(expectation, texture1, level1, origin1, texture2, level2, origin2, extent3D);
+        TestT2TCopy(expectation, texture2, level2, origin2, texture1, level1, origin1, extent3D);
+    }
+
+    void TestBothTBCopiesExactBufferSize(uint32_t bufferBytesPerRow,
+                                         uint32_t rowsPerImage,
+                                         wgpu::Texture texture,
+                                         wgpu::TextureFormat textureFormat,
+                                         wgpu::Origin3D origin,
+                                         wgpu::Extent3D extent3D) {
+        // Check the minimal valid bufferSize.
+        uint64_t bufferSize =
+            utils::RequiredBytesInCopy(bufferBytesPerRow, rowsPerImage, extent3D, textureFormat);
+        wgpu::Buffer source =
+            CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+        TestBothTBCopies(utils::Expectation::Success, source, 0, bufferBytesPerRow, rowsPerImage,
+                         texture, 0, origin, extent3D);
+
+        // Check bufferSize was indeed minimal.
+        uint64_t invalidSize = bufferSize - 1;
+        wgpu::Buffer invalidSource =
+            CreateBuffer(invalidSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+        TestBothTBCopies(utils::Expectation::Failure, invalidSource, 0, bufferBytesPerRow,
+                         rowsPerImage, texture, 0, origin, extent3D);
+    }
+};
+
+// Test copies between buffer and multiple array layers of an uncompressed texture
+TEST_F(CopyCommandTest, CopyToMultipleArrayLayers) {
+    wgpu::Texture destination =
+        CopyCommandTest::Create2DTexture(4, 2, 1, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                         wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
+
+    // Copy to all array layers
+    TestBothTBCopiesExactBufferSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 0},
+                                    {4, 2, 5});
+
+    // Copy to the highest array layer
+    TestBothTBCopiesExactBufferSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 4},
+                                    {4, 2, 1});
+
+    // Copy to array layers in the middle
+    TestBothTBCopiesExactBufferSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 1},
+                                    {4, 2, 3});
+
+    // Copy with a non-packed rowsPerImage
+    TestBothTBCopiesExactBufferSize(256, 3, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 0},
+                                    {4, 2, 5});
+
+    // Copy with bytesPerRow = 512
+    TestBothTBCopiesExactBufferSize(512, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 1},
+                                    {4, 2, 3});
+}
+
+class CopyCommandTest_B2B : public CopyCommandTest {};
+
+// TODO(cwallez@chromium.org): Test that copies are forbidden inside renderpasses
+
+// Test a successfull B2B copy
+TEST_F(CopyCommandTest_B2B, Success) {
+    wgpu::Buffer source = CreateBuffer(16, wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    // Copy different copies, including some that touch the OOB condition
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, 0, destination, 0, 16);
+        encoder.CopyBufferToBuffer(source, 8, destination, 0, 8);
+        encoder.CopyBufferToBuffer(source, 0, destination, 8, 8);
+        encoder.Finish();
+    }
+
+    // Empty copies are valid
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, 0, destination, 0, 0);
+        encoder.CopyBufferToBuffer(source, 0, destination, 16, 0);
+        encoder.CopyBufferToBuffer(source, 16, destination, 0, 0);
+        encoder.Finish();
+    }
+}
+
+// Test a successful B2B copy where the last external reference is dropped.
+// This is a regression test for crbug.com/1217741 where submitting a command
+// buffer with dropped resources when the copy size is 0 was a use-after-free.
+TEST_F(CopyCommandTest_B2B, DroppedBuffer) {
+    wgpu::Buffer source = CreateBuffer(16, wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToBuffer(source, 0, destination, 0, 0);
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+    source = nullptr;
+    destination = nullptr;
+    device.GetQueue().Submit(1, &commandBuffer);
+}
+
+// Test B2B copies with OOB
+TEST_F(CopyCommandTest_B2B, OutOfBounds) {
+    wgpu::Buffer source = CreateBuffer(16, wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    // OOB on the source
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, 8, destination, 0, 12);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // OOB on the destination
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, 0, destination, 8, 12);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test B2B copies with incorrect buffer usage
+TEST_F(CopyCommandTest_B2B, BadUsage) {
+    wgpu::Buffer source = CreateBuffer(16, wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+    wgpu::Buffer vertex = CreateBuffer(16, wgpu::BufferUsage::Vertex);
+
+    // Source with incorrect usage
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(vertex, 0, destination, 0, 16);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Destination with incorrect usage
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, 0, vertex, 0, 16);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test B2B copies with unaligned data size
+TEST_F(CopyCommandTest_B2B, UnalignedSize) {
+    wgpu::Buffer source = CreateBuffer(16, wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToBuffer(source, 8, destination, 0, sizeof(uint8_t));
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test B2B copies with unaligned offset
+TEST_F(CopyCommandTest_B2B, UnalignedOffset) {
+    wgpu::Buffer source = CreateBuffer(16, wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    // Unaligned source offset
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, 9, destination, 0, 4);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Unaligned destination offset
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(source, 8, destination, 1, 4);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test B2B copies with buffers in error state cause errors.
+TEST_F(CopyCommandTest_B2B, BuffersInErrorState) {
+    wgpu::BufferDescriptor errorBufferDescriptor;
+    errorBufferDescriptor.size = 4;
+    errorBufferDescriptor.usage =
+        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    ASSERT_DEVICE_ERROR(wgpu::Buffer errorBuffer = device.CreateBuffer(&errorBufferDescriptor));
+
+    constexpr uint64_t bufferSize = 4;
+    wgpu::Buffer validBuffer =
+        CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(errorBuffer, 0, validBuffer, 0, 4);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(validBuffer, 0, errorBuffer, 0, 4);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test it is not allowed to do B2B copies within same buffer.
+TEST_F(CopyCommandTest_B2B, CopyWithinSameBuffer) {
+    constexpr uint32_t kBufferSize = 16u;
+    wgpu::Buffer buffer =
+        CreateBuffer(kBufferSize, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+    // srcOffset < dstOffset, and srcOffset + copySize > dstOffset (overlapping)
+    {
+        constexpr uint32_t kSrcOffset = 0u;
+        constexpr uint32_t kDstOffset = 4u;
+        constexpr uint32_t kCopySize = 8u;
+        ASSERT(kDstOffset > kSrcOffset && kDstOffset < kSrcOffset + kCopySize);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, kSrcOffset, buffer, kDstOffset, kCopySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // srcOffset < dstOffset, and srcOffset + copySize == dstOffset (not overlapping)
+    {
+        constexpr uint32_t kSrcOffset = 0u;
+        constexpr uint32_t kDstOffset = 8u;
+        constexpr uint32_t kCopySize = kDstOffset - kSrcOffset;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, kSrcOffset, buffer, kDstOffset, kCopySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // srcOffset > dstOffset, and srcOffset < dstOffset + copySize (overlapping)
+    {
+        constexpr uint32_t kSrcOffset = 4u;
+        constexpr uint32_t kDstOffset = 0u;
+        constexpr uint32_t kCopySize = 8u;
+        ASSERT(kSrcOffset > kDstOffset && kSrcOffset < kDstOffset + kCopySize);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, kSrcOffset, buffer, kDstOffset, kCopySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // srcOffset > dstOffset, and srcOffset + copySize == dstOffset (not overlapping)
+    {
+        constexpr uint32_t kSrcOffset = 8u;
+        constexpr uint32_t kDstOffset = 0u;
+        constexpr uint32_t kCopySize = kSrcOffset - kDstOffset;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, kSrcOffset, buffer, kDstOffset, kCopySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+class CopyCommandTest_B2T : public CopyCommandTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[2] = {wgpu::FeatureName::Depth24UnormStencil8,
+                                                 wgpu::FeatureName::Depth32FloatStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 2;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test a successfull B2T copy
+TEST_F(CopyCommandTest_B2T, Success) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Different copies, including some that touch the OOB condition
+    {
+        // Copy 4x4 block in corner of first mip.
+        TestB2TCopy(utils::Expectation::Success, source, 0, 256, 4, destination, 0, {0, 0, 0},
+                    {4, 4, 1});
+        // Copy 4x4 block in opposite corner of first mip.
+        TestB2TCopy(utils::Expectation::Success, source, 0, 256, 4, destination, 0, {12, 12, 0},
+                    {4, 4, 1});
+        // Copy 4x4 block in the 4x4 mip.
+        TestB2TCopy(utils::Expectation::Success, source, 0, 256, 4, destination, 2, {0, 0, 0},
+                    {4, 4, 1});
+        // Copy with a buffer offset
+        TestB2TCopy(utils::Expectation::Success, source, bufferSize - 4, 256, 1, destination, 0,
+                    {0, 0, 0}, {1, 1, 1});
+        TestB2TCopy(utils::Expectation::Success, source, bufferSize - 4, 256,
+                    wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0}, {1, 1, 1});
+    }
+
+    // Copies with a 256-byte aligned bytes per row but unaligned texture region
+    {
+        // Unaligned region
+        TestB2TCopy(utils::Expectation::Success, source, 0, 256, 4, destination, 0, {0, 0, 0},
+                    {3, 4, 1});
+        // Unaligned region with texture offset
+        TestB2TCopy(utils::Expectation::Success, source, 0, 256, 3, destination, 0, {5, 7, 0},
+                    {2, 3, 1});
+        // Unaligned region, with buffer offset
+        TestB2TCopy(utils::Expectation::Success, source, 31 * 4, 256, 3, destination, 0, {0, 0, 0},
+                    {3, 3, 1});
+    }
+
+    // bytesPerRow is undefined
+    {
+        TestB2TCopy(utils::Expectation::Success, source, 0, wgpu::kCopyStrideUndefined, 2,
+                    destination, 0, {0, 0, 0}, {1, 1, 1});
+        TestB2TCopy(utils::Expectation::Success, source, 0, wgpu::kCopyStrideUndefined, 2,
+                    destination, 0, {0, 0, 0}, {3, 1, 1});
+        // Fail because height or depth is greater than 1:
+        TestB2TCopy(utils::Expectation::Failure, source, 0, wgpu::kCopyStrideUndefined, 2,
+                    destination, 0, {0, 0, 0}, {1, 2, 1});
+        TestB2TCopy(utils::Expectation::Failure, source, 0, wgpu::kCopyStrideUndefined, 2,
+                    destination, 0, {0, 0, 0}, {1, 1, 2});
+    }
+
+    // Empty copies are valid
+    {
+        // An empty copy
+        TestB2TCopy(utils::Expectation::Success, source, 0, 0, 0, destination, 0, {0, 0, 0},
+                    {0, 0, 1});
+        TestB2TCopy(utils::Expectation::Success, source, 0, wgpu::kCopyStrideUndefined, 0,
+                    destination, 0, {0, 0, 0}, {0, 0, 1});
+        // An empty copy with depth = 0
+        TestB2TCopy(utils::Expectation::Success, source, 0, 0, 0, destination, 0, {0, 0, 0},
+                    {0, 0, 0});
+        TestB2TCopy(utils::Expectation::Success, source, 0, wgpu::kCopyStrideUndefined, 0,
+                    destination, 0, {0, 0, 0}, {0, 0, 0});
+        // An empty copy touching the end of the buffer
+        TestB2TCopy(utils::Expectation::Success, source, bufferSize, 0, 0, destination, 0,
+                    {0, 0, 0}, {0, 0, 1});
+        TestB2TCopy(utils::Expectation::Success, source, bufferSize, wgpu::kCopyStrideUndefined, 0,
+                    destination, 0, {0, 0, 0}, {0, 0, 1});
+        // An empty copy touching the side of the texture
+        TestB2TCopy(utils::Expectation::Success, source, 0, 0, 0, destination, 0, {16, 16, 0},
+                    {0, 0, 1});
+        TestB2TCopy(utils::Expectation::Success, source, 0, wgpu::kCopyStrideUndefined, 0,
+                    destination, 0, {16, 16, 0}, {0, 0, 1});
+
+        // An empty copy with depth = 1 and bytesPerRow > 0
+        TestB2TCopy(utils::Expectation::Success, source, 0, kTextureBytesPerRowAlignment, 0,
+                    destination, 0, {0, 0, 0}, {0, 0, 1});
+        // An empty copy with height > 0, depth = 0, bytesPerRow > 0 and rowsPerImage > 0
+        TestB2TCopy(utils::Expectation::Success, source, 0, kTextureBytesPerRowAlignment, 3,
+                    destination, 0, {0, 0, 0}, {0, 1, 0});
+    }
+}
+
+// Test OOB conditions on the buffer
+TEST_F(CopyCommandTest_B2T, OutOfBoundsOnBuffer) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // OOB on the buffer because we copy too many pixels
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 5, destination, 0, {0, 0, 0},
+                {4, 5, 1});
+
+    // OOB on the buffer because of the offset
+    TestB2TCopy(utils::Expectation::Failure, source, 4, 256, 4, destination, 0, {0, 0, 0},
+                {4, 4, 1});
+
+    // OOB on the buffer because (bytes per row * (height - 1) + width * bytesPerPixel) * depth
+    // overflows
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 512, 3, destination, 0, {0, 0, 0},
+                {4, 3, 1});
+
+    // Not OOB on the buffer although bytes per row * height overflows
+    // but (bytes per row * (height - 1) + width * bytesPerPixel) * depth does not overflow
+    {
+        uint32_t sourceBufferSize = BufferSizeForTextureCopy(7, 3, 1);
+        ASSERT_TRUE(256 * 3 > sourceBufferSize) << "bytes per row * height should overflow buffer";
+        wgpu::Buffer sourceBuffer = CreateBuffer(sourceBufferSize, wgpu::BufferUsage::CopySrc);
+
+        TestB2TCopy(utils::Expectation::Success, source, 0, 256, 3, destination, 0, {0, 0, 0},
+                    {7, 3, 1});
+    }
+}
+
+// Test OOB conditions on the texture
+TEST_F(CopyCommandTest_B2T, OutOfBoundsOnTexture) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 2, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // OOB on the texture because x + width overflows
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 4, destination, 0, {13, 12, 0},
+                {4, 4, 1});
+
+    // OOB on the texture because y + width overflows
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 4, destination, 0, {12, 13, 0},
+                {4, 4, 1});
+
+    // OOB on the texture because we overflow a non-zero mip
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 4, destination, 2, {1, 0, 0},
+                {4, 4, 1});
+
+    // OOB on the texture even on an empty copy when we copy to a non-existent mip.
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 0, 0, destination, 5, {0, 0, 0}, {0, 0, 1});
+
+    // OOB on the texture because slice overflows
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 0, 0, destination, 0, {0, 0, 2}, {0, 0, 1});
+}
+
+// Test that we force Depth=1 on copies to 2D textures
+TEST_F(CopyCommandTest_B2T, DepthConstraintFor2DTextures) {
+    wgpu::Buffer source = CreateBuffer(16 * 4, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Depth > 1 on an empty copy still errors
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 2});
+}
+
+// Test B2T copies with incorrect buffer usage
+TEST_F(CopyCommandTest_B2T, IncorrectUsage) {
+    wgpu::Buffer source = CreateBuffer(16 * 4, wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer vertex = CreateBuffer(16 * 4, wgpu::BufferUsage::Vertex);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+    wgpu::Texture sampled = Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                            wgpu::TextureUsage::TextureBinding);
+
+    // Incorrect source usage
+    TestB2TCopy(utils::Expectation::Failure, vertex, 0, 256, 4, destination, 0, {0, 0, 0},
+                {4, 4, 1});
+
+    // Incorrect destination usage
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 4, sampled, 0, {0, 0, 0}, {4, 4, 1});
+}
+
+TEST_F(CopyCommandTest_B2T, BytesPerRowConstraints) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(128, 16, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination = Create2DTexture(128, 16, 5, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // bytes per row is 0
+    {
+        // copyHeight > 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 0, 4, destination, 0, {0, 0, 0},
+                    {64, 4, 1});
+        TestB2TCopy(utils::Expectation::Success, source, 0, 0, 4, destination, 0, {0, 0, 0},
+                    {0, 4, 1});
+
+        // copyDepth > 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 0, 1, destination, 0, {0, 0, 0},
+                    {64, 1, 4});
+        TestB2TCopy(utils::Expectation::Success, source, 0, 0, 1, destination, 0, {0, 0, 0},
+                    {0, 1, 4});
+
+        // copyHeight = 1 and copyDepth = 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 0, 1, destination, 0, {0, 0, 0},
+                    {64, 1, 1});
+    }
+
+    // bytes per row is not 256-byte aligned
+    {
+        // copyHeight > 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 128, 4, destination, 0, {0, 0, 0},
+                    {4, 4, 1});
+
+        // copyHeight = 1 and copyDepth = 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 128, 1, destination, 0, {0, 0, 0},
+                    {4, 1, 1});
+    }
+
+    // bytes per row is less than width * bytesPerPixel
+    {
+        // copyHeight > 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 2, destination, 0, {0, 0, 0},
+                    {65, 2, 1});
+        // copyHeight == 0
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 0, destination, 0, {0, 0, 0},
+                    {65, 0, 1});
+
+        // copyDepth > 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, 0, {0, 0, 0},
+                    {65, 1, 2});
+        // copyDepth == 0
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, 0, {0, 0, 0},
+                    {65, 1, 0});
+
+        // copyHeight = 1 and copyDepth = 1
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, 0, {0, 0, 0},
+                    {65, 1, 1});
+    }
+}
+
+TEST_F(CopyCommandTest_B2T, RowsPerImageConstraints) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(5, 5, 6);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 1, 5, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // rowsPerImage is zero
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 0, destination, 0, {0, 0, 0},
+                {1, 1, 1});
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 0, destination, 0, {0, 0, 0},
+                {4, 4, 1});
+
+    // rowsPerImage is undefined
+    TestB2TCopy(utils::Expectation::Success, source, 0, 256, wgpu::kCopyStrideUndefined,
+                destination, 0, {0, 0, 0}, {4, 4, 1});
+    // Fail because depth > 1:
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, wgpu::kCopyStrideUndefined,
+                destination, 0, {0, 0, 0}, {4, 4, 2});
+
+    // rowsPerImage is equal to copy height (Valid)
+    TestB2TCopy(utils::Expectation::Success, source, 0, 256, 4, destination, 0, {0, 0, 0},
+                {4, 4, 1});
+    TestB2TCopy(utils::Expectation::Success, source, 0, 256, 4, destination, 0, {0, 0, 0},
+                {4, 4, 2});
+
+    // rowsPerImage is larger than copy height (Valid)
+    TestB2TCopy(utils::Expectation::Success, source, 0, 256, 5, destination, 0, {0, 0, 0},
+                {4, 4, 1});
+    TestB2TCopy(utils::Expectation::Success, source, 0, 256, 5, destination, 0, {0, 0, 0},
+                {4, 4, 2});
+
+    // rowsPerImage is less than copy height (Invalid)
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 3, destination, 0, {0, 0, 0},
+                {4, 4, 1});
+}
+
+// Test B2T copies with incorrect buffer offset usage for color texture
+TEST_F(CopyCommandTest_B2T, IncorrectBufferOffsetForColorTexture) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Correct usage
+    TestB2TCopy(utils::Expectation::Success, source, bufferSize - 4, 256, 1, destination, 0,
+                {0, 0, 0}, {1, 1, 1});
+
+    // Incorrect usages
+    {
+        TestB2TCopy(utils::Expectation::Failure, source, bufferSize - 5, 256, 1, destination, 0,
+                    {0, 0, 0}, {1, 1, 1});
+        TestB2TCopy(utils::Expectation::Failure, source, bufferSize - 6, 256, 1, destination, 0,
+                    {0, 0, 0}, {1, 1, 1});
+        TestB2TCopy(utils::Expectation::Failure, source, bufferSize - 7, 256, 1, destination, 0,
+                    {0, 0, 0}, {1, 1, 1});
+    }
+}
+
+// Test B2T copies with incorrect buffer offset usage for depth-stencil texture
+TEST_F(CopyCommandTest_B2T, IncorrectBufferOffsetForDepthStencilTexture) {
+    // TODO(dawn:570, dawn:666): List other valid parameters after missing texture formats
+    // are implemented, e.g. Stencil8.
+    std::array<std::tuple<wgpu::TextureFormat, wgpu::TextureAspect>, 5> params = {
+        std::make_tuple(wgpu::TextureFormat::Depth16Unorm, wgpu::TextureAspect::DepthOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth16Unorm, wgpu::TextureAspect::All),
+        std::make_tuple(wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureAspect::StencilOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth24UnormStencil8,
+                        wgpu::TextureAspect::StencilOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth32FloatStencil8,
+                        wgpu::TextureAspect::StencilOnly),
+    };
+
+    uint64_t bufferSize = BufferSizeForTextureCopy(32, 32, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+
+    for (auto param : params) {
+        wgpu::TextureFormat textureFormat = std::get<0>(param);
+        wgpu::TextureAspect textureAspect = std::get<1>(param);
+
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 5, 1, textureFormat, wgpu::TextureUsage::CopyDst);
+
+        for (uint64_t srcOffset = 0; srcOffset < 8; srcOffset++) {
+            utils::Expectation expectation =
+                (srcOffset % 4 == 0) ? utils::Expectation::Success : utils::Expectation::Failure;
+            TestB2TCopy(expectation, source, srcOffset, 256, 16, destination, 0, {0, 0, 0},
+                        {16, 16, 1}, textureAspect);
+        }
+    }
+}
+
+// Test multisampled textures cannot be used in B2T copies.
+TEST_F(CopyCommandTest_B2T, CopyToMultisampledTexture) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(16, 16, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination = Create2DTexture(2, 2, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst, 4);
+
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 2, destination, 0, {0, 0, 0},
+                {2, 2, 1});
+}
+
+// Test B2T copies with buffer or texture in error state causes errors.
+TEST_F(CopyCommandTest_B2T, BufferOrTextureInErrorState) {
+    wgpu::BufferDescriptor errorBufferDescriptor;
+    errorBufferDescriptor.size = 4;
+    errorBufferDescriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc;
+    ASSERT_DEVICE_ERROR(wgpu::Buffer errorBuffer = device.CreateBuffer(&errorBufferDescriptor));
+
+    wgpu::TextureDescriptor errorTextureDescriptor;
+    errorTextureDescriptor.size.depthOrArrayLayers = 0;
+    ASSERT_DEVICE_ERROR(wgpu::Texture errorTexture = device.CreateTexture(&errorTextureDescriptor));
+
+    wgpu::ImageCopyBuffer errorImageCopyBuffer = utils::CreateImageCopyBuffer(errorBuffer, 0, 0, 0);
+    wgpu::ImageCopyTexture errorImageCopyTexture =
+        utils::CreateImageCopyTexture(errorTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D extent3D = {0, 0, 0};
+
+    {
+        wgpu::Texture destination = Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&errorImageCopyBuffer, &imageCopyTexture, &extent3D);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+        wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+
+        wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(source, 0, 0, 0);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&imageCopyBuffer, &errorImageCopyTexture, &extent3D);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Regression tests for a bug in the computation of texture copy buffer size in Dawn.
+TEST_F(CopyCommandTest_B2T, TextureCopyBufferSizeLastRowComputation) {
+    constexpr uint32_t kBytesPerRow = 256;
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    constexpr std::array<wgpu::TextureFormat, 2> kFormats = {wgpu::TextureFormat::RGBA8Unorm,
+                                                             wgpu::TextureFormat::RG8Unorm};
+
+    {
+        // kBytesPerRow * (kHeight - 1) + kWidth is not large enough to be the valid buffer size in
+        // this test because the buffer sizes in B2T copies are not in texels but in bytes.
+        constexpr uint32_t kInvalidBufferSize = kBytesPerRow * (kHeight - 1) + kWidth;
+
+        for (wgpu::TextureFormat format : kFormats) {
+            wgpu::Buffer source = CreateBuffer(kInvalidBufferSize, wgpu::BufferUsage::CopySrc);
+            wgpu::Texture destination =
+                Create2DTexture(kWidth, kHeight, 1, 1, format, wgpu::TextureUsage::CopyDst);
+            TestB2TCopy(utils::Expectation::Failure, source, 0, kBytesPerRow, kHeight, destination,
+                        0, {0, 0, 0}, {kWidth, kHeight, 1});
+        }
+    }
+
+    {
+        for (wgpu::TextureFormat format : kFormats) {
+            uint32_t validBufferSize = BufferSizeForTextureCopy(kWidth, kHeight, 1, format);
+            wgpu::Texture destination =
+                Create2DTexture(kWidth, kHeight, 1, 1, format, wgpu::TextureUsage::CopyDst);
+
+            // Verify the return value of BufferSizeForTextureCopy() is exactly the minimum valid
+            // buffer size in this test.
+            {
+                uint32_t invalidBuffferSize = validBufferSize - 1;
+                wgpu::Buffer source = CreateBuffer(invalidBuffferSize, wgpu::BufferUsage::CopySrc);
+                TestB2TCopy(utils::Expectation::Failure, source, 0, kBytesPerRow, kHeight,
+                            destination, 0, {0, 0, 0}, {kWidth, kHeight, 1});
+            }
+
+            {
+                wgpu::Buffer source = CreateBuffer(validBufferSize, wgpu::BufferUsage::CopySrc);
+                TestB2TCopy(utils::Expectation::Success, source, 0, kBytesPerRow, kHeight,
+                            destination, 0, {0, 0, 0}, {kWidth, kHeight, 1});
+            }
+        }
+    }
+}
+
+// Test copy from buffer to mip map of non square texture
+TEST_F(CopyCommandTest_B2T, CopyToMipmapOfNonSquareTexture) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 2, 1);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+    uint32_t maxMipmapLevel = 3;
+    wgpu::Texture destination = Create2DTexture(
+        4, 2, maxMipmapLevel, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Copy to top level mip map
+    TestB2TCopy(utils::Expectation::Success, source, 0, 256, 1, destination, maxMipmapLevel - 1,
+                {0, 0, 0}, {1, 1, 1});
+    // Copy to high level mip map
+    TestB2TCopy(utils::Expectation::Success, source, 0, 256, 1, destination, maxMipmapLevel - 2,
+                {0, 0, 0}, {2, 1, 1});
+    // Mip level out of range
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, maxMipmapLevel,
+                {0, 0, 0}, {1, 1, 1});
+    // Copy origin out of range
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, maxMipmapLevel - 2,
+                {1, 0, 0}, {2, 1, 1});
+    // Copy size out of range
+    TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 2, destination, maxMipmapLevel - 2,
+                {0, 0, 0}, {2, 2, 1});
+}
+
+// Test whether or not it is valid to copy to a depth texture
+TEST_F(CopyCommandTest_B2T, CopyToDepthAspect) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(16, 16, 1, wgpu::TextureFormat::Depth32Float);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+
+    constexpr std::array<wgpu::TextureFormat, 1> kAllowBufferToDepthCopyFormats = {
+        wgpu::TextureFormat::Depth16Unorm};
+
+    for (wgpu::TextureFormat format : kAllowBufferToDepthCopyFormats) {
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopyDst);
+
+        // Test it is valid to copy this format from a buffer into a depth texture
+        TestB2TCopy(utils::Expectation::Success, source, 0, 256, 16, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::DepthOnly);
+        if (utils::IsDepthOnlyFormat(format)) {
+            // Test "all" of a depth texture which is only the depth aspect.
+            TestB2TCopy(utils::Expectation::Success, source, 0, 256, 16, destination, 0, {0, 0, 0},
+                        {16, 16, 1}, wgpu::TextureAspect::All);
+        }
+    }
+
+    constexpr std::array<wgpu::TextureFormat, 5> kDisallowBufferToDepthCopyFormats = {
+        wgpu::TextureFormat::Depth32Float,         wgpu::TextureFormat::Depth24Plus,
+        wgpu::TextureFormat::Depth24PlusStencil8,  wgpu::TextureFormat::Depth24UnormStencil8,
+        wgpu::TextureFormat::Depth32FloatStencil8,
+    };
+
+    for (wgpu::TextureFormat format : kDisallowBufferToDepthCopyFormats) {
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopyDst);
+
+        // Test it is invalid to copy from a buffer into a depth texture
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 16, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::DepthOnly);
+
+        if (utils::IsDepthOnlyFormat(format)) {
+            // Test "all" of a depth texture which is only the depth aspect.
+            TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 16, destination, 0, {0, 0, 0},
+                        {16, 16, 1}, wgpu::TextureAspect::All);
+        }
+    }
+}
+
+// Test copy to only the stencil aspect of a texture
+TEST_F(CopyCommandTest_B2T, CopyToStencilAspect) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(16, 16, 1, wgpu::TextureFormat::R8Uint);
+    wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+
+    for (wgpu::TextureFormat format : utils::kStencilFormats) {
+        // Test it is valid to copy from a buffer into the stencil aspect of a depth/stencil texture
+        {
+            wgpu::Texture destination =
+                Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopyDst);
+
+            // TODO(dawn:666): Test "all" of Stencil8 format when it's implemented.
+
+            TestB2TCopy(utils::Expectation::Success, source, 0, 256, 16, destination, 0, {0, 0, 0},
+                        {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+
+            // And that it fails if the buffer is one byte too small
+            wgpu::Buffer sourceSmall = CreateBuffer(bufferSize - 1, wgpu::BufferUsage::CopySrc);
+            TestB2TCopy(utils::Expectation::Failure, sourceSmall, 0, 256, 16, destination, 0,
+                        {0, 0, 0}, {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+
+        // A copy fails when using a depth/stencil texture, and the entire subresource isn't copied
+        {
+            wgpu::Texture destination =
+                Create2DTexture(16, 16, 1, 1, format,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+            TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 15, destination, 0, {0, 0, 0},
+                        {15, 15, 1}, wgpu::TextureAspect::StencilOnly);
+
+            TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, 0, {0, 0, 0},
+                        {1, 1, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+
+        // Non-zero mip: A copy fails when using a depth/stencil texture, and the entire subresource
+        // isn't copied
+        {
+            uint64_t bufferSize = BufferSizeForTextureCopy(8, 8, 1, wgpu::TextureFormat::R8Uint);
+            wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+
+            wgpu::Texture destination =
+                Create2DTexture(16, 16, 2, 1, format,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+            // Whole mip is success
+            TestB2TCopy(utils::Expectation::Success, source, 0, 256, 8, destination, 1, {0, 0, 0},
+                        {8, 8, 1}, wgpu::TextureAspect::StencilOnly);
+
+            // Partial mip fails
+            TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 7, destination, 1, {0, 0, 0},
+                        {7, 7, 1}, wgpu::TextureAspect::StencilOnly);
+
+            TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, 1, {0, 0, 0},
+                        {1, 1, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+
+        // Non-zero mip, non-pow-2: A copy fails when using a depth/stencil texture, and the entire
+        // subresource isn't copied
+        {
+            uint64_t bufferSize = BufferSizeForTextureCopy(8, 8, 1, wgpu::TextureFormat::R8Uint);
+            wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+
+            wgpu::Texture destination =
+                Create2DTexture(17, 17, 2, 1, format,
+                                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+            // Whole mip is success
+            TestB2TCopy(utils::Expectation::Success, source, 0, 256, 8, destination, 1, {0, 0, 0},
+                        {8, 8, 1}, wgpu::TextureAspect::StencilOnly);
+
+            // Partial mip fails
+            TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 7, destination, 1, {0, 0, 0},
+                        {7, 7, 1}, wgpu::TextureAspect::StencilOnly);
+
+            TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 1, destination, 1, {0, 0, 0},
+                        {1, 1, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+    }
+
+    // Test it is invalid to copy from a buffer into the stencil aspect of Depth24Plus (no stencil)
+    {
+        wgpu::Texture destination = Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::Depth24Plus,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 16, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+    }
+
+    // Test it is invalid to copy from a buffer into the stencil aspect of a color texture
+    {
+        wgpu::Texture destination = Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::RGBA8Uint,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        TestB2TCopy(utils::Expectation::Failure, source, 0, 256, 16, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+    }
+}
+
+// Test that CopyB2T throws an error when requiredBytesInCopy overflows uint64_t
+TEST_F(CopyCommandTest_B2T, RequiredBytesInCopyOverflow) {
+    wgpu::Buffer source = CreateBuffer(10000, wgpu::BufferUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(1, 1, 1, 16, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Success
+    TestB2TCopy(utils::Expectation::Success, source, 0, (1 << 31), (1 << 31), destination, 0,
+                {0, 0, 0}, {1, 1, 1});
+    // Failure because bytesPerImage * (depth - 1) overflows
+    TestB2TCopy(utils::Expectation::Failure, source, 0, (1 << 31), (1 << 31), destination, 0,
+                {0, 0, 0}, {1, 1, 16});
+}
+
+class CopyCommandTest_T2B : public CopyCommandTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[2] = {wgpu::FeatureName::Depth24UnormStencil8,
+                                                 wgpu::FeatureName::Depth32FloatStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 2;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test a successfull T2B copy
+TEST_F(CopyCommandTest_T2B, Success) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // Different copies, including some that touch the OOB condition
+    {
+        // Copy from 4x4 block in corner of first mip.
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 4,
+                    {4, 4, 1});
+        // Copy from 4x4 block in opposite corner of first mip.
+        TestT2BCopy(utils::Expectation::Success, source, 0, {12, 12, 0}, destination, 0, 256, 4,
+                    {4, 4, 1});
+        // Copy from 4x4 block in the 4x4 mip.
+        TestT2BCopy(utils::Expectation::Success, source, 2, {0, 0, 0}, destination, 0, 256, 4,
+                    {4, 4, 1});
+        // Copy with a buffer offset
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, bufferSize - 4,
+                    256, 1, {1, 1, 1});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, bufferSize - 4,
+                    256, wgpu::kCopyStrideUndefined, {1, 1, 1});
+    }
+
+    // Copies with a 256-byte aligned bytes per row but unaligned texture region
+    {
+        // Unaligned region
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 4,
+                    {3, 4, 1});
+        // Unaligned region with texture offset
+        TestT2BCopy(utils::Expectation::Success, source, 0, {5, 7, 0}, destination, 0, 256, 3,
+                    {2, 3, 1});
+        // Unaligned region, with buffer offset
+        TestT2BCopy(utils::Expectation::Success, source, 2, {0, 0, 0}, destination, 31 * 4, 256, 3,
+                    {3, 3, 1});
+    }
+
+    // bytesPerRow is undefined
+    {
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                    wgpu::kCopyStrideUndefined, 2, {1, 1, 1});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                    wgpu::kCopyStrideUndefined, 2, {3, 1, 1});
+        // Fail because height or depth is greater than 1:
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                    wgpu::kCopyStrideUndefined, 2, {1, 2, 1});
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                    wgpu::kCopyStrideUndefined, 2, {1, 1, 2});
+    }
+
+    // Empty copies are valid
+    {
+        // An empty copy
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 0, 0,
+                    {0, 0, 1});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                    wgpu::kCopyStrideUndefined, 0, {0, 0, 1});
+        // An empty copy with depth = 0
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 0, 0,
+                    {0, 0, 0});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                    wgpu::kCopyStrideUndefined, 0, {0, 0, 0});
+        // An empty copy touching the end of the buffer
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, bufferSize, 0,
+                    0, {0, 0, 1});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, bufferSize,
+                    wgpu::kCopyStrideUndefined, 0, {0, 0, 1});
+        // An empty copy touching the side of the texture
+        TestT2BCopy(utils::Expectation::Success, source, 0, {16, 16, 0}, destination, 0, 0, 0,
+                    {0, 0, 1});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {16, 16, 0}, destination, 0,
+                    wgpu::kCopyStrideUndefined, 0, {0, 0, 1});
+
+        // An empty copy with depth = 1 and bytesPerRow > 0
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                    kTextureBytesPerRowAlignment, 0, {0, 0, 1});
+        // An empty copy with height > 0, depth = 0, bytesPerRow > 0 and rowsPerImage > 0
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                    kTextureBytesPerRowAlignment, 3, {0, 1, 0});
+    }
+}
+
+// Edge cases around requiredBytesInCopy computation for empty copies
+TEST_F(CopyCommandTest_T2B, Empty) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 1, 2, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0},
+                CreateBuffer(0, wgpu::BufferUsage::CopyDst), 0, 256, 4, {0, 0, 0});
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0},
+                CreateBuffer(0, wgpu::BufferUsage::CopyDst), 0, 256, 4, {4, 0, 0});
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0},
+                CreateBuffer(0, wgpu::BufferUsage::CopyDst), 0, 256, 4, {4, 4, 0});
+
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0},
+                CreateBuffer(1024, wgpu::BufferUsage::CopyDst), 0, 256, 4, {4, 0, 2});
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0},
+                CreateBuffer(1023, wgpu::BufferUsage::CopyDst), 0, 256, 4, {4, 0, 2});
+
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0},
+                CreateBuffer(1792, wgpu::BufferUsage::CopyDst), 0, 256, 4, {0, 4, 2});
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0},
+                CreateBuffer(1791, wgpu::BufferUsage::CopyDst), 0, 256, 4, {0, 4, 2});
+
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0},
+                CreateBuffer(1024, wgpu::BufferUsage::CopyDst), 0, 256, 4, {0, 0, 2});
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0},
+                CreateBuffer(1023, wgpu::BufferUsage::CopyDst), 0, 256, 4, {0, 0, 2});
+}
+
+// Test OOB conditions on the texture
+TEST_F(CopyCommandTest_T2B, OutOfBoundsOnTexture) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // OOB on the texture because x + width overflows
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {13, 12, 0}, destination, 0, 256, 4,
+                {4, 4, 1});
+
+    // OOB on the texture because y + width overflows
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {12, 13, 0}, destination, 0, 256, 4,
+                {4, 4, 1});
+
+    // OOB on the texture because we overflow a non-zero mip
+    TestT2BCopy(utils::Expectation::Failure, source, 2, {1, 0, 0}, destination, 0, 256, 4,
+                {4, 4, 1});
+
+    // OOB on the texture even on an empty copy when we copy from a non-existent mip.
+    TestT2BCopy(utils::Expectation::Failure, source, 5, {0, 0, 0}, destination, 0, 0, 4, {0, 0, 1});
+}
+
+// Test OOB conditions on the buffer
+TEST_F(CopyCommandTest_T2B, OutOfBoundsOnBuffer) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // OOB on the buffer because we copy too many pixels
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 5,
+                {4, 5, 1});
+
+    // OOB on the buffer because of the offset
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 4, 256, 4,
+                {4, 4, 1});
+
+    // OOB on the buffer because (bytes per row * (height - 1) + width * bytesPerPixel) * depth
+    // overflows
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 512, 3,
+                {4, 3, 1});
+
+    // Not OOB on the buffer although bytes per row * height overflows
+    // but (bytes per row * (height - 1) + width * bytesPerPixel) * depth does not overflow
+    {
+        uint32_t destinationBufferSize = BufferSizeForTextureCopy(7, 3, 1);
+        ASSERT_TRUE(256 * 3 > destinationBufferSize)
+            << "bytes per row * height should overflow buffer";
+        wgpu::Buffer destinationBuffer =
+            CreateBuffer(destinationBufferSize, wgpu::BufferUsage::CopyDst);
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destinationBuffer, 0, 256, 3,
+                    {7, 3, 1});
+    }
+}
+
+// Test that we force Depth=1 on copies from to 2D textures
+TEST_F(CopyCommandTest_T2B, DepthConstraintFor2DTextures) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // Depth > 1 on an empty copy still errors
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 0, 0, {0, 0, 2});
+}
+
+// Test T2B copies with incorrect buffer usage
+TEST_F(CopyCommandTest_T2B, IncorrectUsage) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Texture sampled = Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                            wgpu::TextureUsage::TextureBinding);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+    wgpu::Buffer vertex = CreateBuffer(bufferSize, wgpu::BufferUsage::Vertex);
+
+    // Incorrect source usage
+    TestT2BCopy(utils::Expectation::Failure, sampled, 0, {0, 0, 0}, destination, 0, 256, 4,
+                {4, 4, 1});
+
+    // Incorrect destination usage
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, vertex, 0, 256, 4, {4, 4, 1});
+}
+
+TEST_F(CopyCommandTest_T2B, BytesPerRowConstraints) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(128, 16, 1);
+    wgpu::Texture source = Create2DTexture(128, 16, 5, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                           wgpu::TextureUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // bytes per row is 0
+    {
+        // copyHeight > 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 0, 4,
+                    {64, 4, 1});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 0, 4,
+                    {0, 4, 1});
+
+        // copyDepth > 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 0, 1,
+                    {64, 1, 4});
+        TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 0, 1,
+                    {0, 1, 4});
+
+        // copyHeight = 1 and copyDepth = 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 0, 1,
+                    {64, 1, 1});
+    }
+
+    // bytes per row is not 256-byte aligned
+    {
+        // copyHeight > 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 128, 4,
+                    {4, 4, 1});
+
+        // copyHeight = 1 and copyDepth = 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 128, 1,
+                    {4, 1, 1});
+    }
+
+    // bytes per row is less than width * bytesPerPixel
+    {
+        // copyHeight > 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 2,
+                    {65, 2, 1});
+        // copyHeight == 0
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 0,
+                    {65, 0, 1});
+
+        // copyDepth > 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 1,
+                    {65, 1, 2});
+        // copyDepth == 0
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 1,
+                    {65, 1, 0});
+
+        // copyHeight = 1 and copyDepth = 1
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 1,
+                    {65, 1, 1});
+    }
+}
+
+TEST_F(CopyCommandTest_T2B, RowsPerImageConstraints) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(5, 5, 6);
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 1, 5, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // rowsPerImage is zero (Valid)
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 0,
+                {1, 1, 1});
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 0,
+                {4, 4, 1});
+
+    // rowsPerImage is undefined
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256,
+                wgpu::kCopyStrideUndefined, {4, 4, 1});
+    // Fail because depth > 1:
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256,
+                wgpu::kCopyStrideUndefined, {4, 4, 2});
+
+    // rowsPerImage is equal to copy height (Valid)
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 4,
+                {4, 4, 1});
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 4,
+                {4, 4, 2});
+
+    // rowsPerImage exceeds copy height (Valid)
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 5,
+                {4, 4, 1});
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 5,
+                {4, 4, 2});
+
+    // rowsPerImage is less than copy height (Invalid)
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 3,
+                {4, 4, 1});
+}
+
+// Test T2B copies with incorrect buffer offset usage for color texture
+TEST_F(CopyCommandTest_T2B, IncorrectBufferOffsetForColorTexture) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(128, 16, 1);
+    wgpu::Texture source = Create2DTexture(128, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                           wgpu::TextureUsage::CopySrc);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // Correct usage
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, bufferSize - 4, 256,
+                1, {1, 1, 1});
+
+    // Incorrect usages
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, bufferSize - 5, 256,
+                1, {1, 1, 1});
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, bufferSize - 6, 256,
+                1, {1, 1, 1});
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, bufferSize - 7, 256,
+                1, {1, 1, 1});
+}
+
+// Test T2B copies with incorrect buffer offset usage for depth-stencil texture
+TEST_F(CopyCommandTest_T2B, IncorrectBufferOffsetForDepthStencilTexture) {
+    // TODO(dawn:570, dawn:666): List other valid parameters after missing texture formats
+    // are implemented, e.g. Stencil8.
+    std::array<std::tuple<wgpu::TextureFormat, wgpu::TextureAspect>, 8> params = {
+        std::make_tuple(wgpu::TextureFormat::Depth16Unorm, wgpu::TextureAspect::DepthOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth16Unorm, wgpu::TextureAspect::All),
+        std::make_tuple(wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureAspect::StencilOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth32Float, wgpu::TextureAspect::DepthOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth32Float, wgpu::TextureAspect::All),
+        std::make_tuple(wgpu::TextureFormat::Depth24UnormStencil8,
+                        wgpu::TextureAspect::StencilOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth32FloatStencil8, wgpu::TextureAspect::DepthOnly),
+        std::make_tuple(wgpu::TextureFormat::Depth32FloatStencil8,
+                        wgpu::TextureAspect::StencilOnly),
+    };
+
+    uint64_t bufferSize = BufferSizeForTextureCopy(32, 32, 1);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    for (auto param : params) {
+        wgpu::TextureFormat textureFormat = std::get<0>(param);
+        wgpu::TextureAspect textureAspect = std::get<1>(param);
+
+        wgpu::Texture source =
+            Create2DTexture(16, 16, 5, 1, textureFormat, wgpu::TextureUsage::CopySrc);
+
+        for (uint64_t dstOffset = 0; dstOffset < 8; dstOffset++) {
+            utils::Expectation expectation =
+                (dstOffset % 4 == 0) ? utils::Expectation::Success : utils::Expectation::Failure;
+            TestT2BCopy(expectation, source, 0, {0, 0, 0}, destination, dstOffset, 256, 16,
+                        {16, 16, 1}, textureAspect);
+        }
+    }
+}
+
+// Test multisampled textures cannot be used in T2B copies.
+TEST_F(CopyCommandTest_T2B, CopyFromMultisampledTexture) {
+    wgpu::Texture source = Create2DTexture(2, 2, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                           wgpu::TextureUsage::CopySrc, 4);
+    uint64_t bufferSize = BufferSizeForTextureCopy(16, 16, 1);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 2,
+                {2, 2, 1});
+}
+
+// Test T2B copies with buffer or texture in error state cause errors.
+TEST_F(CopyCommandTest_T2B, BufferOrTextureInErrorState) {
+    wgpu::BufferDescriptor errorBufferDescriptor;
+    errorBufferDescriptor.size = 4;
+    errorBufferDescriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc;
+    ASSERT_DEVICE_ERROR(wgpu::Buffer errorBuffer = device.CreateBuffer(&errorBufferDescriptor));
+
+    wgpu::TextureDescriptor errorTextureDescriptor;
+    errorTextureDescriptor.size.depthOrArrayLayers = 0;
+    ASSERT_DEVICE_ERROR(wgpu::Texture errorTexture = device.CreateTexture(&errorTextureDescriptor));
+
+    wgpu::ImageCopyBuffer errorImageCopyBuffer = utils::CreateImageCopyBuffer(errorBuffer, 0, 0, 0);
+    wgpu::ImageCopyTexture errorImageCopyTexture =
+        utils::CreateImageCopyTexture(errorTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D extent3D = {0, 0, 0};
+
+    {
+        uint64_t bufferSize = BufferSizeForTextureCopy(4, 4, 1);
+        wgpu::Buffer source = CreateBuffer(bufferSize, wgpu::BufferUsage::CopySrc);
+
+        wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(source, 0, 0, 0);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&errorImageCopyTexture, &imageCopyBuffer, &extent3D);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        wgpu::Texture destination = Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&imageCopyTexture, &errorImageCopyBuffer, &extent3D);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Regression tests for a bug in the computation of texture copy buffer size in Dawn.
+TEST_F(CopyCommandTest_T2B, TextureCopyBufferSizeLastRowComputation) {
+    constexpr uint32_t kBytesPerRow = 256;
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    constexpr std::array<wgpu::TextureFormat, 2> kFormats = {wgpu::TextureFormat::RGBA8Unorm,
+                                                             wgpu::TextureFormat::RG8Unorm};
+
+    {
+        // kBytesPerRow * (kHeight - 1) + kWidth is not large enough to be the valid buffer size in
+        // this test because the buffer sizes in T2B copies are not in texels but in bytes.
+        constexpr uint32_t kInvalidBufferSize = kBytesPerRow * (kHeight - 1) + kWidth;
+
+        for (wgpu::TextureFormat format : kFormats) {
+            wgpu::Texture source =
+                Create2DTexture(kWidth, kHeight, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+            wgpu::Buffer destination = CreateBuffer(kInvalidBufferSize, wgpu::BufferUsage::CopyDst);
+            TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                        kBytesPerRow, kHeight, {kWidth, kHeight, 1});
+        }
+    }
+
+    {
+        for (wgpu::TextureFormat format : kFormats) {
+            uint32_t validBufferSize = BufferSizeForTextureCopy(kWidth, kHeight, 1, format);
+            wgpu::Texture source =
+                Create2DTexture(kWidth, kHeight, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+            // Verify the return value of BufferSizeForTextureCopy() is exactly the minimum valid
+            // buffer size in this test.
+            {
+                uint32_t invalidBufferSize = validBufferSize - 1;
+                wgpu::Buffer destination =
+                    CreateBuffer(invalidBufferSize, wgpu::BufferUsage::CopyDst);
+                TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                            kBytesPerRow, kHeight, {kWidth, kHeight, 1});
+            }
+
+            {
+                wgpu::Buffer destination =
+                    CreateBuffer(validBufferSize, wgpu::BufferUsage::CopyDst);
+                TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                            kBytesPerRow, kHeight, {kWidth, kHeight, 1});
+            }
+        }
+    }
+}
+
+// Test copy from mip map of non square texture to buffer
+TEST_F(CopyCommandTest_T2B, CopyFromMipmapOfNonSquareTexture) {
+    uint32_t maxMipmapLevel = 3;
+    wgpu::Texture source = Create2DTexture(4, 2, maxMipmapLevel, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                           wgpu::TextureUsage::CopySrc);
+    uint64_t bufferSize = BufferSizeForTextureCopy(4, 2, 1);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    // Copy from top level mip map
+    TestT2BCopy(utils::Expectation::Success, source, maxMipmapLevel - 1, {0, 0, 0}, destination, 0,
+                256, 1, {1, 1, 1});
+    // Copy from high level mip map
+    TestT2BCopy(utils::Expectation::Success, source, maxMipmapLevel - 2, {0, 0, 0}, destination, 0,
+                256, 1, {2, 1, 1});
+    // Mip level out of range
+    TestT2BCopy(utils::Expectation::Failure, source, maxMipmapLevel, {0, 0, 0}, destination, 0, 256,
+                1, {2, 1, 1});
+    // Copy origin out of range
+    TestT2BCopy(utils::Expectation::Failure, source, maxMipmapLevel - 2, {2, 0, 0}, destination, 0,
+                256, 1, {2, 1, 1});
+    // Copy size out of range
+    TestT2BCopy(utils::Expectation::Failure, source, maxMipmapLevel - 2, {1, 0, 0}, destination, 0,
+                256, 1, {2, 1, 1});
+}
+
+// Test copy from only the depth aspect of a texture
+TEST_F(CopyCommandTest_T2B, CopyFromDepthAspect) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(16, 16, 1, wgpu::TextureFormat::Depth32Float);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    constexpr std::array<wgpu::TextureFormat, 3> kAllowDepthCopyFormats = {
+        wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
+        wgpu::TextureFormat::Depth32FloatStencil8};
+    for (wgpu::TextureFormat format : kAllowDepthCopyFormats) {
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+            // Test it is valid to copy the depth aspect of these depth/stencil texture
+            TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 16,
+                        {16, 16, 1}, wgpu::TextureAspect::DepthOnly);
+
+            if (utils::IsDepthOnlyFormat(format)) {
+                // Test "all" of a depth texture which is only the depth aspect.
+                TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256,
+                            16, {16, 16, 1}, wgpu::TextureAspect::All);
+            }
+        }
+    }
+
+    constexpr std::array<wgpu::TextureFormat, 3> kDisallowDepthCopyFormats = {
+        wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth24UnormStencil8};
+    for (wgpu::TextureFormat format : kDisallowDepthCopyFormats) {
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+            // Test it is invalid to copy from the depth aspect of these depth/stencil texture
+            TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 16,
+                        {16, 16, 1}, wgpu::TextureAspect::DepthOnly);
+        }
+    }
+
+    {
+        wgpu::Texture source = Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::R32Float,
+                                               wgpu::TextureUsage::CopySrc);
+
+        // Test it is invalid to copy from the depth aspect of a color texture
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 16,
+                    {16, 16, 1}, wgpu::TextureAspect::DepthOnly);
+    }
+}
+
+// Test copy from only the stencil aspect of a texture
+TEST_F(CopyCommandTest_T2B, CopyFromStencilAspect) {
+    uint64_t bufferSize = BufferSizeForTextureCopy(16, 16, 1, wgpu::TextureFormat::R8Uint);
+    wgpu::Buffer destination = CreateBuffer(bufferSize, wgpu::BufferUsage::CopyDst);
+
+    for (wgpu::TextureFormat format : utils::kStencilFormats) {
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+            // TODO(dawn:666): Test "all" of Stencil8 format when it's implemented
+
+            // Test it is valid to copy from the stencil aspect of a depth/stencil texture
+            TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, 256, 16,
+                        {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+
+            // Test it is invalid if the buffer is too small
+            wgpu::Buffer destinationSmall =
+                CreateBuffer(bufferSize - 1, wgpu::BufferUsage::CopyDst);
+            TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destinationSmall, 0, 256,
+                        16, {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+
+        // A copy fails when using a depth/stencil texture, and the entire subresource isn't
+        // copied
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+            TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 15,
+                        {15, 15, 1}, wgpu::TextureAspect::StencilOnly);
+
+            TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 1,
+                        {1, 1, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+
+        // Non-zero mip: A copy fails when using a depth/stencil texture, and the entire
+        // subresource isn't copied
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 2, 1, format, wgpu::TextureUsage::CopySrc);
+
+            // Whole mip is success
+            TestT2BCopy(utils::Expectation::Success, source, 1, {0, 0, 0}, destination, 0, 256, 8,
+                        {8, 8, 1}, wgpu::TextureAspect::StencilOnly);
+
+            // Partial mip fails
+            TestT2BCopy(utils::Expectation::Failure, source, 1, {0, 0, 0}, destination, 0, 256, 7,
+                        {7, 7, 1}, wgpu::TextureAspect::StencilOnly);
+
+            TestT2BCopy(utils::Expectation::Failure, source, 1, {0, 0, 0}, destination, 0, 256, 1,
+                        {1, 1, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+
+        // Non-zero mip, non-pow-2: A copy fails when using a depth/stencil texture, and the
+        // entire subresource isn't copied
+        {
+            wgpu::Texture source =
+                Create2DTexture(17, 17, 2, 1, format, wgpu::TextureUsage::CopySrc);
+
+            // Whole mip is success
+            TestT2BCopy(utils::Expectation::Success, source, 1, {0, 0, 0}, destination, 0, 256, 8,
+                        {8, 8, 1}, wgpu::TextureAspect::StencilOnly);
+
+            // Partial mip fails
+            TestT2BCopy(utils::Expectation::Failure, source, 1, {0, 0, 0}, destination, 0, 256, 7,
+                        {7, 7, 1}, wgpu::TextureAspect::StencilOnly);
+
+            TestT2BCopy(utils::Expectation::Failure, source, 1, {0, 0, 0}, destination, 0, 256, 1,
+                        {1, 1, 1}, wgpu::TextureAspect::StencilOnly);
+        }
+    }
+
+    {
+        wgpu::Texture source =
+            Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::R8Uint, wgpu::TextureUsage::CopySrc);
+
+        // Test it is invalid to copy from the stencil aspect of a color texture
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 16,
+                    {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+    }
+    {
+        wgpu::Texture source = Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::Depth24Plus,
+                                               wgpu::TextureUsage::CopySrc);
+
+        // Test it is invalid to copy from the stencil aspect of a depth-only texture
+        TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, 256, 16,
+                    {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+    }
+}
+
+// Test that CopyT2B throws an error when requiredBytesInCopy overflows uint64_t
+TEST_F(CopyCommandTest_T2B, RequiredBytesInCopyOverflow) {
+    wgpu::Buffer destination = CreateBuffer(10000, wgpu::BufferUsage::CopyDst);
+    wgpu::Texture source =
+        Create2DTexture(1, 1, 1, 16, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+
+    // Success
+    TestT2BCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, (1 << 31),
+                (1 << 31), {1, 1, 1});
+    // Failure because bytesPerImage * (depth - 1) overflows
+    TestT2BCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, (1 << 31),
+                (1 << 31), {1, 1, 16});
+}
+
+class CopyCommandTest_T2T : public CopyCommandTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[2] = {wgpu::FeatureName::Depth24UnormStencil8,
+                                                 wgpu::FeatureName::Depth32FloatStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 2;
+        return adapter.CreateDevice(&descriptor);
+    }
+
+    wgpu::TextureFormat GetCopyCompatibleFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::BGRA8Unorm:
+                return wgpu::TextureFormat::BGRA8UnormSrgb;
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+                return wgpu::TextureFormat::BGRA8Unorm;
+            case wgpu::TextureFormat::RGBA8Unorm:
+                return wgpu::TextureFormat::RGBA8UnormSrgb;
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+                return wgpu::TextureFormat::RGBA8Unorm;
+            default:
+                UNREACHABLE();
+        }
+    }
+};
+
+TEST_F(CopyCommandTest_T2T, Success) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Different copies, including some that touch the OOB condition
+    {
+        // Copy a region along top left boundary
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {4, 4, 1});
+
+        // Copy entire texture
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1});
+
+        // Copy a region along bottom right boundary
+        TestT2TCopy(utils::Expectation::Success, source, 0, {8, 8, 0}, destination, 0, {8, 8, 0},
+                    {8, 8, 1});
+
+        // Copy region into mip
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 2, {0, 0, 0},
+                    {4, 4, 1});
+
+        // Copy mip into region
+        TestT2TCopy(utils::Expectation::Success, source, 2, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {4, 4, 1});
+
+        // Copy between slices
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 1}, destination, 0, {0, 0, 1},
+                    {16, 16, 1});
+
+        // Copy multiple slices (srcImageCopyTexture.arrayLayer + copySize.depthOrArrayLayers ==
+        // srcImageCopyTexture.texture.arrayLayerCount)
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 2}, destination, 0, {0, 0, 0},
+                    {16, 16, 2});
+
+        // Copy multiple slices (dstImageCopyTexture.arrayLayer + copySize.depthOrArrayLayers ==
+        // dstImageCopyTexture.texture.arrayLayerCount)
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 2},
+                    {16, 16, 2});
+    }
+
+    // Empty copies are valid
+    {
+        // An empty copy
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {0, 0, 1});
+
+        // An empty copy with depth = 0
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {0, 0, 0});
+
+        // An empty copy touching the side of the source texture
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {16, 16, 0},
+                    {0, 0, 1});
+
+        // An empty copy touching the side of the destination texture
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {16, 16, 0},
+                    {0, 0, 1});
+    }
+}
+
+TEST_F(CopyCommandTest_T2T, IncorrectUsage) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 2, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 2, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Incorrect source usage causes failure
+    TestT2TCopy(utils::Expectation::Failure, destination, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                {16, 16, 1});
+
+    // Incorrect destination usage causes failure
+    TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, source, 0, {0, 0, 0},
+                {16, 16, 1});
+}
+
+TEST_F(CopyCommandTest_T2T, OutOfBounds) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // OOB on source
+    {
+        // x + width overflows
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {1, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1});
+
+        // y + height overflows
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 1, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1});
+
+        // non-zero mip overflows
+        TestT2TCopy(utils::Expectation::Failure, source, 1, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {9, 9, 1});
+
+        // arrayLayer + depth OOB
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 3}, destination, 0, {0, 0, 0},
+                    {16, 16, 2});
+
+        // empty copy on non-existent mip fails
+        TestT2TCopy(utils::Expectation::Failure, source, 6, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {0, 0, 1});
+
+        // empty copy from non-existent slice fails
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 4}, destination, 0, {0, 0, 0},
+                    {0, 0, 1});
+    }
+
+    // OOB on destination
+    {
+        // x + width overflows
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {1, 0, 0},
+                    {16, 16, 1});
+
+        // y + height overflows
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 1, 0},
+                    {16, 16, 1});
+
+        // non-zero mip overflows
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 1, {0, 0, 0},
+                    {9, 9, 1});
+
+        // arrayLayer + depth OOB
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 3},
+                    {16, 16, 2});
+
+        // empty copy on non-existent mip fails
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 6, {0, 0, 0},
+                    {0, 0, 1});
+
+        // empty copy on non-existent slice fails
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 4},
+                    {0, 0, 1});
+    }
+}
+
+TEST_F(CopyCommandTest_T2T, 2DTextureDepthStencil) {
+    for (wgpu::TextureFormat format : utils::kDepthAndStencilFormats) {
+        wgpu::Texture source = Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopyDst);
+
+        // Success when entire depth stencil subresource is copied
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1});
+
+        // Failure when depth stencil subresource is partially copied
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {15, 15, 1});
+
+        // Failure when selecting the depth aspect (not all)
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::DepthOnly);
+
+        // Failure when selecting the stencil aspect (not all)
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+    }
+}
+
+TEST_F(CopyCommandTest_T2T, 2DTextureDepthOnly) {
+    constexpr std::array<wgpu::TextureFormat, 2> kDepthOnlyFormats = {
+        wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth32Float};
+
+    for (wgpu::TextureFormat format : kDepthOnlyFormats) {
+        wgpu::Texture source = Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopySrc);
+
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopyDst);
+
+        // Success when entire subresource is copied
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1});
+
+        // Failure when depth subresource is partially copied
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {15, 15, 1});
+
+        // Success when selecting the depth aspect (not all)
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::DepthOnly);
+
+        // Failure when selecting the stencil aspect (not all)
+        TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {16, 16, 1}, wgpu::TextureAspect::StencilOnly);
+    }
+}
+
+TEST_F(CopyCommandTest_T2T, 2DTextureArrayDepthStencil) {
+    for (wgpu::TextureFormat format : utils::kDepthAndStencilFormats) {
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 1, 3, format, wgpu::TextureUsage::CopySrc);
+            wgpu::Texture destination =
+                Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopyDst);
+
+            // Success when entire depth stencil subresource (layer) is the copy source
+            TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 1}, destination, 0,
+                        {0, 0, 0}, {16, 16, 1});
+        }
+
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 1, 1, format, wgpu::TextureUsage::CopySrc);
+            wgpu::Texture destination =
+                Create2DTexture(16, 16, 1, 3, format, wgpu::TextureUsage::CopyDst);
+
+            // Success when entire depth stencil subresource (layer) is the copy destination
+            TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                        {0, 0, 1}, {16, 16, 1});
+        }
+
+        {
+            wgpu::Texture source =
+                Create2DTexture(16, 16, 1, 3, format, wgpu::TextureUsage::CopySrc);
+            wgpu::Texture destination =
+                Create2DTexture(16, 16, 1, 3, format, wgpu::TextureUsage::CopyDst);
+
+            // Success when src and dst are an entire depth stencil subresource (layer)
+            TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 2}, destination, 0,
+                        {0, 0, 1}, {16, 16, 1});
+
+            // Success when src and dst are an array of entire depth stencil subresources
+            TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 1}, destination, 0,
+                        {0, 0, 0}, {16, 16, 2});
+        }
+    }
+}
+
+TEST_F(CopyCommandTest_T2T, FormatsMismatch) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 2, wgpu::TextureFormat::RGBA8Uint, wgpu::TextureUsage::CopySrc);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 2, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // Failure when formats don't match
+    TestT2TCopy(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                {0, 0, 1});
+}
+
+// Test copying between textures that have srgb compatible texture formats;
+TEST_F(CopyCommandTest_T2T, SrgbFormatsCompatibility) {
+    for (wgpu::TextureFormat srcTextureFormat :
+         {wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb,
+          wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureFormat::RGBA8UnormSrgb}) {
+        wgpu::TextureFormat dstTextureFormat = GetCopyCompatibleFormat(srcTextureFormat);
+        wgpu::Texture source =
+            Create2DTexture(16, 16, 5, 2, srcTextureFormat, wgpu::TextureUsage::CopySrc);
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 5, 2, dstTextureFormat, wgpu::TextureUsage::CopyDst);
+
+        // Failure when formats don't match
+        TestT2TCopy(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0, {0, 0, 0},
+                    {0, 0, 1});
+    }
+}
+
+TEST_F(CopyCommandTest_T2T, MultisampledCopies) {
+    wgpu::Texture sourceMultiSampled1x = Create2DTexture(
+        16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc, 1);
+    wgpu::Texture sourceMultiSampled4x = Create2DTexture(
+        16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc, 4);
+    wgpu::Texture destinationMultiSampled4x = Create2DTexture(
+        16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst, 4);
+
+    // Success when entire multisampled subresource is copied
+    {
+        TestT2TCopy(utils::Expectation::Success, sourceMultiSampled4x, 0, {0, 0, 0},
+                    destinationMultiSampled4x, 0, {0, 0, 0}, {16, 16, 1});
+    }
+
+    // Failures
+    {
+        // An empty copy with mismatched samples fails
+        TestT2TCopy(utils::Expectation::Failure, sourceMultiSampled1x, 0, {0, 0, 0},
+                    destinationMultiSampled4x, 0, {0, 0, 0}, {0, 0, 1});
+
+        // A copy fails when samples are greater than 1, and entire subresource isn't copied
+        TestT2TCopy(utils::Expectation::Failure, sourceMultiSampled4x, 0, {0, 0, 0},
+                    destinationMultiSampled4x, 0, {0, 0, 0}, {15, 15, 1});
+    }
+}
+
+// Test copy to mip map of non square textures
+TEST_F(CopyCommandTest_T2T, CopyToMipmapOfNonSquareTexture) {
+    uint32_t maxMipmapLevel = 3;
+    wgpu::Texture source = Create2DTexture(4, 2, maxMipmapLevel, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                           wgpu::TextureUsage::CopySrc);
+    wgpu::Texture destination = Create2DTexture(
+        4, 2, maxMipmapLevel, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+    // Copy to top level mip map
+    TestT2TCopy(utils::Expectation::Success, source, maxMipmapLevel - 1, {0, 0, 0}, destination,
+                maxMipmapLevel - 1, {0, 0, 0}, {1, 1, 1});
+    // Copy to high level mip map
+    TestT2TCopy(utils::Expectation::Success, source, maxMipmapLevel - 2, {0, 0, 0}, destination,
+                maxMipmapLevel - 2, {0, 0, 0}, {2, 1, 1});
+    // Mip level out of range
+    TestT2TCopy(utils::Expectation::Failure, source, maxMipmapLevel, {0, 0, 0}, destination,
+                maxMipmapLevel, {0, 0, 0}, {2, 1, 1});
+    // Copy origin out of range
+    TestT2TCopy(utils::Expectation::Failure, source, maxMipmapLevel - 2, {2, 0, 0}, destination,
+                maxMipmapLevel - 2, {2, 0, 0}, {2, 1, 1});
+    // Copy size out of range
+    TestT2TCopy(utils::Expectation::Failure, source, maxMipmapLevel - 2, {1, 0, 0}, destination,
+                maxMipmapLevel - 2, {0, 0, 0}, {2, 1, 1});
+}
+
+// Test copy within the same texture
+TEST_F(CopyCommandTest_T2T, CopyWithinSameTexture) {
+    wgpu::Texture texture =
+        Create2DTexture(32, 32, 2, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst);
+
+    // The base array layer of the copy source being equal to that of the copy destination is not
+    // allowed.
+    {
+        constexpr uint32_t kBaseArrayLayer = 0;
+
+        // copyExtent.z == 1
+        {
+            constexpr uint32_t kCopyArrayLayerCount = 1;
+            TestT2TCopy(utils::Expectation::Failure, texture, 0, {0, 0, kBaseArrayLayer}, texture,
+                        0, {2, 2, kBaseArrayLayer}, {1, 1, kCopyArrayLayerCount});
+        }
+
+        // copyExtent.z > 1
+        {
+            constexpr uint32_t kCopyArrayLayerCount = 2;
+            TestT2TCopy(utils::Expectation::Failure, texture, 0, {0, 0, kBaseArrayLayer}, texture,
+                        0, {2, 2, kBaseArrayLayer}, {1, 1, kCopyArrayLayerCount});
+        }
+    }
+
+    // The array slices of the source involved in the copy have no overlap with those of the
+    // destination is allowed.
+    {
+        constexpr uint32_t kCopyArrayLayerCount = 2;
+
+        // srcBaseArrayLayer < dstBaseArrayLayer
+        {
+            constexpr uint32_t kSrcBaseArrayLayer = 0;
+            constexpr uint32_t kDstBaseArrayLayer = kSrcBaseArrayLayer + kCopyArrayLayerCount;
+
+            TestT2TCopy(utils::Expectation::Success, texture, 0, {0, 0, kSrcBaseArrayLayer},
+                        texture, 0, {0, 0, kDstBaseArrayLayer}, {1, 1, kCopyArrayLayerCount});
+        }
+
+        // srcBaseArrayLayer > dstBaseArrayLayer
+        {
+            constexpr uint32_t kSrcBaseArrayLayer = 2;
+            constexpr uint32_t kDstBaseArrayLayer = kSrcBaseArrayLayer - kCopyArrayLayerCount;
+            TestT2TCopy(utils::Expectation::Success, texture, 0, {0, 0, kSrcBaseArrayLayer},
+                        texture, 0, {0, 0, kDstBaseArrayLayer}, {1, 1, kCopyArrayLayerCount});
+        }
+    }
+
+    // Copy between different mipmap levels is allowed.
+    {
+        constexpr uint32_t kSrcMipLevel = 0;
+        constexpr uint32_t kDstMipLevel = 1;
+
+        // Copy one slice
+        {
+            constexpr uint32_t kCopyArrayLayerCount = 1;
+            TestT2TCopy(utils::Expectation::Success, texture, kSrcMipLevel, {0, 0, 0}, texture,
+                        kDstMipLevel, {1, 1, 0}, {1, 1, kCopyArrayLayerCount});
+        }
+
+        // The base array layer of the copy source is equal to that of the copy destination.
+        {
+            constexpr uint32_t kCopyArrayLayerCount = 2;
+            constexpr uint32_t kBaseArrayLayer = 0;
+
+            TestT2TCopy(utils::Expectation::Success, texture, kSrcMipLevel, {0, 0, kBaseArrayLayer},
+                        texture, kDstMipLevel, {1, 1, kBaseArrayLayer},
+                        {1, 1, kCopyArrayLayerCount});
+        }
+
+        // The array slices of the source involved in the copy have overlaps with those of the
+        // destination, and the copy areas have overlaps.
+        {
+            constexpr uint32_t kCopyArrayLayerCount = 2;
+
+            constexpr uint32_t kSrcBaseArrayLayer = 0;
+            constexpr uint32_t kDstBaseArrayLayer = 1;
+            ASSERT(kSrcBaseArrayLayer + kCopyArrayLayerCount > kDstBaseArrayLayer);
+
+            constexpr wgpu::Extent3D kCopyExtent = {1, 1, kCopyArrayLayerCount};
+
+            TestT2TCopy(utils::Expectation::Success, texture, kSrcMipLevel,
+                        {0, 0, kSrcBaseArrayLayer}, texture, kDstMipLevel,
+                        {0, 0, kDstBaseArrayLayer}, kCopyExtent);
+        }
+    }
+
+    // The array slices of the source involved in the copy have overlaps with those of the
+    // destination is not allowed.
+    {
+        constexpr uint32_t kMipmapLevel = 0;
+        constexpr uint32_t kMinBaseArrayLayer = 0;
+        constexpr uint32_t kMaxBaseArrayLayer = 1;
+        constexpr uint32_t kCopyArrayLayerCount = 3;
+        ASSERT(kMinBaseArrayLayer + kCopyArrayLayerCount > kMaxBaseArrayLayer);
+
+        constexpr wgpu::Extent3D kCopyExtent = {4, 4, kCopyArrayLayerCount};
+
+        const wgpu::Origin3D srcOrigin = {0, 0, kMinBaseArrayLayer};
+        const wgpu::Origin3D dstOrigin = {4, 4, kMaxBaseArrayLayer};
+        TestT2TCopy(utils::Expectation::Failure, texture, kMipmapLevel, srcOrigin, texture,
+                    kMipmapLevel, dstOrigin, kCopyExtent);
+    }
+
+    // Copy between different mipmap levels and array slices is allowed.
+    TestT2TCopy(utils::Expectation::Success, texture, 0, {0, 0, 1}, texture, 1, {1, 1, 0},
+                {1, 1, 1});
+
+    // Copy between 3D texture of both overlapping depth ranges is not allowed.
+    {
+        wgpu::Texture texture3D =
+            Create3DTexture(32, 32, 4, 2, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst);
+
+        constexpr uint32_t kMipmapLevel = 0;
+        constexpr wgpu::Origin3D kSrcOrigin = {0, 0, 0};
+        constexpr wgpu::Origin3D kDstOrigin = {0, 0, 1};
+        constexpr wgpu::Extent3D kCopyExtent = {4, 4, 2};
+
+        TestT2TCopy(utils::Expectation::Failure, texture3D, kMipmapLevel, kSrcOrigin, texture3D,
+                    kMipmapLevel, kDstOrigin, kCopyExtent);
+    }
+
+    // Copy between 3D texture of both non-overlapping depth ranges is not allowed.
+    {
+        wgpu::Texture texture3D =
+            Create3DTexture(32, 32, 4, 2, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst);
+
+        constexpr uint32_t kMipmapLevel = 0;
+        constexpr wgpu::Origin3D kSrcOrigin = {0, 0, 0};
+        constexpr wgpu::Origin3D kDstOrigin = {0, 0, 2};
+        constexpr wgpu::Extent3D kCopyExtent = {4, 4, 1};
+
+        TestT2TCopy(utils::Expectation::Failure, texture3D, kMipmapLevel, kSrcOrigin, texture3D,
+                    kMipmapLevel, kDstOrigin, kCopyExtent);
+    }
+}
+
+class CopyCommandTest_CompressedTextureFormats : public CopyCommandTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
+                                                 wgpu::FeatureName::TextureCompressionETC2,
+                                                 wgpu::FeatureName::TextureCompressionASTC};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 3;
+        return adapter.CreateDevice(&descriptor);
+    }
+
+    wgpu::Texture Create2DTexture(wgpu::TextureFormat format,
+                                  uint32_t mipmapLevels,
+                                  uint32_t width,
+                                  uint32_t height) {
+        constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst |
+                                              wgpu::TextureUsage::CopySrc |
+                                              wgpu::TextureUsage::TextureBinding;
+        constexpr uint32_t kArrayLayers = 1;
+        return CopyCommandTest::Create2DTexture(width, height, mipmapLevels, kArrayLayers, format,
+                                                kUsage, 1);
+    }
+
+    // By default, we use a 4x4 tiling of the format block size.
+    wgpu::Texture Create2DTexture(wgpu::TextureFormat format) {
+        uint32_t width = utils::GetTextureFormatBlockWidth(format) * 4;
+        uint32_t height = utils::GetTextureFormatBlockHeight(format) * 4;
+        return Create2DTexture(format, 1, width, height);
+    }
+
+    wgpu::TextureFormat GetCopyCompatibleFormat(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+                return wgpu::TextureFormat::BC1RGBAUnormSrgb;
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+                return wgpu::TextureFormat::BC1RGBAUnorm;
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+                return wgpu::TextureFormat::BC2RGBAUnormSrgb;
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+                return wgpu::TextureFormat::BC2RGBAUnorm;
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+                return wgpu::TextureFormat::BC3RGBAUnormSrgb;
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+                return wgpu::TextureFormat::BC3RGBAUnorm;
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+                return wgpu::TextureFormat::BC7RGBAUnormSrgb;
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return wgpu::TextureFormat::BC7RGBAUnorm;
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+                return wgpu::TextureFormat::ETC2RGB8UnormSrgb;
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+                return wgpu::TextureFormat::ETC2RGB8Unorm;
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+                return wgpu::TextureFormat::ETC2RGB8A1UnormSrgb;
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+                return wgpu::TextureFormat::ETC2RGB8A1Unorm;
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+                return wgpu::TextureFormat::ETC2RGBA8UnormSrgb;
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+                return wgpu::TextureFormat::ETC2RGBA8Unorm;
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+                return wgpu::TextureFormat::ASTC4x4UnormSrgb;
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+                return wgpu::TextureFormat::ASTC4x4Unorm;
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+                return wgpu::TextureFormat::ASTC5x4UnormSrgb;
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+                return wgpu::TextureFormat::ASTC5x4Unorm;
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+                return wgpu::TextureFormat::ASTC5x5UnormSrgb;
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+                return wgpu::TextureFormat::ASTC5x5Unorm;
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+                return wgpu::TextureFormat::ASTC6x5UnormSrgb;
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+                return wgpu::TextureFormat::ASTC6x5Unorm;
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+                return wgpu::TextureFormat::ASTC6x6UnormSrgb;
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+                return wgpu::TextureFormat::ASTC6x6Unorm;
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+                return wgpu::TextureFormat::ASTC8x5UnormSrgb;
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+                return wgpu::TextureFormat::ASTC8x5Unorm;
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+                return wgpu::TextureFormat::ASTC8x6UnormSrgb;
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+                return wgpu::TextureFormat::ASTC8x6Unorm;
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+                return wgpu::TextureFormat::ASTC8x8UnormSrgb;
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+                return wgpu::TextureFormat::ASTC8x8Unorm;
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+                return wgpu::TextureFormat::ASTC10x5UnormSrgb;
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+                return wgpu::TextureFormat::ASTC10x5Unorm;
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+                return wgpu::TextureFormat::ASTC10x6UnormSrgb;
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+                return wgpu::TextureFormat::ASTC10x6Unorm;
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+                return wgpu::TextureFormat::ASTC10x8UnormSrgb;
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+                return wgpu::TextureFormat::ASTC10x8Unorm;
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+                return wgpu::TextureFormat::ASTC10x10UnormSrgb;
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+                return wgpu::TextureFormat::ASTC10x10Unorm;
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+                return wgpu::TextureFormat::ASTC12x10UnormSrgb;
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+                return wgpu::TextureFormat::ASTC12x10Unorm;
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+                return wgpu::TextureFormat::ASTC12x12UnormSrgb;
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return wgpu::TextureFormat::ASTC12x12Unorm;
+            default:
+                UNREACHABLE();
+        }
+    }
+};
+
+// Tests to verify that bufferOffset must be a multiple of the compressed texture blocks in bytes
+// in buffer-to-texture or texture-to-buffer copies with compressed texture formats.
+TEST_F(CopyCommandTest_CompressedTextureFormats, BufferOffset) {
+    wgpu::Buffer buffer =
+        CreateBuffer(512, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Valid usages of BufferOffset in B2T and T2B copies with compressed texture formats.
+        {
+            uint32_t validBufferOffset = utils::GetTexelBlockSizeInBytes(format);
+            TestBothTBCopies(utils::Expectation::Success, buffer, validBufferOffset, 256, 4,
+                             texture, 0, {0, 0, 0}, {blockWidth, blockHeight, 1});
+        }
+
+        // Failures on invalid bufferOffset.
+        {
+            uint32_t kInvalidBufferOffset = utils::GetTexelBlockSizeInBytes(format) / 2;
+            TestBothTBCopies(utils::Expectation::Failure, buffer, kInvalidBufferOffset, 256, 4,
+                             texture, 0, {0, 0, 0}, {blockWidth, blockHeight, 1});
+        }
+    }
+}
+
+// Tests to verify that bytesPerRow must not be less than (width / blockWidth) * blockSizeInBytes.
+// Note that in Dawn we require bytesPerRow be a multiple of 256, which ensures bytesPerRow will
+// always be the multiple of compressed texture block width in bytes.
+TEST_F(CopyCommandTest_CompressedTextureFormats, BytesPerRow) {
+    wgpu::Buffer buffer =
+        CreateBuffer(1024, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+    // Used to compute test width and height. We choose 320 because it isn't divisible by 256 and
+    // hence will need to be aligned.
+    constexpr uint32_t kInvalidBytesPerRow = 320;
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        // Compute the test width and height such that the smallest BytesPerRow is always equal to
+        // 320. We choose 320 because it isn't divisible by 256 and hence needs to be aligned.
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t blockByteSize = utils::GetTexelBlockSizeInBytes(format);
+        uint32_t testWidth = kInvalidBytesPerRow * blockWidth / blockByteSize;
+        uint32_t testHeight = kInvalidBytesPerRow * blockHeight / blockByteSize;
+        wgpu::Texture texture = Create2DTexture(format, 1, testWidth, testHeight);
+
+        // Failures on the BytesPerRow that is not large enough.
+        {
+            constexpr uint32_t kSmallBytesPerRow = 256;
+            TestBothTBCopies(utils::Expectation::Failure, buffer, 0, kSmallBytesPerRow, 4, texture,
+                             0, {0, 0, 0}, {testWidth, blockHeight, 1});
+        }
+
+        // Test it is not valid to use a BytesPerRow that is not a multiple of 256.
+        {
+            TestBothTBCopies(utils::Expectation::Failure, buffer, 0, kInvalidBytesPerRow, 4,
+                             texture, 0, {0, 0, 0}, {testWidth, blockHeight, 1});
+        }
+
+        // Test the smallest valid BytesPerRow should work.
+        {
+            uint32_t smallestValidBytesPerRow = Align(kInvalidBytesPerRow, 256);
+            TestBothTBCopies(utils::Expectation::Success, buffer, 0, smallestValidBytesPerRow, 4,
+                             texture, 0, {0, 0, 0}, {testWidth, blockHeight, 1});
+        }
+    }
+}
+
+// rowsPerImage must be >= heightInBlocks.
+TEST_F(CopyCommandTest_CompressedTextureFormats, RowsPerImage) {
+    wgpu::Buffer buffer =
+        CreateBuffer(1024, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Valid usages of rowsPerImage in B2T and T2B copies with compressed texture formats.
+        {
+            constexpr uint32_t kValidRowsPerImage = 5;
+            TestBothTBCopies(utils::Expectation::Success, buffer, 0, 256, kValidRowsPerImage,
+                             texture, 0, {0, 0, 0}, {blockWidth, blockHeight * 4, 1});
+        }
+        {
+            constexpr uint32_t kValidRowsPerImage = 4;
+            TestBothTBCopies(utils::Expectation::Success, buffer, 0, 256, kValidRowsPerImage,
+                             texture, 0, {0, 0, 0}, {blockWidth, blockHeight * 4, 1});
+        }
+
+        // rowsPerImage is smaller than height.
+        {
+            constexpr uint32_t kInvalidRowsPerImage = 3;
+            TestBothTBCopies(utils::Expectation::Failure, buffer, 0, 256, kInvalidRowsPerImage,
+                             texture, 0, {0, 0, 0}, {blockWidth, blockHeight * 5, 1});
+        }
+    }
+}
+
+// Tests to verify that ImageOffset.x must be a multiple of the compressed texture block width and
+// ImageOffset.y must be a multiple of the compressed texture block height in buffer-to-texture,
+// texture-to-buffer or texture-to-texture copies with compressed texture formats.
+TEST_F(CopyCommandTest_CompressedTextureFormats, ImageOffset) {
+    wgpu::Buffer buffer =
+        CreateBuffer(512, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        wgpu::Texture texture2 = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        wgpu::Origin3D smallestValidOrigin3D = {blockWidth, blockHeight, 0};
+
+        // Valid usages of ImageOffset in B2T, T2B and T2T copies with compressed texture formats.
+        {
+            TestBothTBCopies(utils::Expectation::Success, buffer, 0, 256, 4, texture, 0,
+                             smallestValidOrigin3D, {blockWidth, blockHeight, 1});
+            TestBothT2TCopies(utils::Expectation::Success, texture, 0, {0, 0, 0}, texture2, 0,
+                              smallestValidOrigin3D, {blockWidth, blockHeight, 1});
+        }
+
+        // Failures on invalid ImageOffset.x.
+        {
+            wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x - 1, smallestValidOrigin3D.y,
+                                              0};
+            TestBothTBCopies(utils::Expectation::Failure, buffer, 0, 256, 4, texture, 0,
+                             invalidOrigin3D, {blockWidth, blockHeight, 1});
+            TestBothT2TCopies(utils::Expectation::Failure, texture, 0, invalidOrigin3D, texture2, 0,
+                              {0, 0, 0}, {blockWidth, blockHeight, 1});
+        }
+
+        // Failures on invalid ImageOffset.y.
+        {
+            wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x, smallestValidOrigin3D.y - 1,
+                                              0};
+            TestBothTBCopies(utils::Expectation::Failure, buffer, 0, 256, 4, texture, 0,
+                             invalidOrigin3D, {blockWidth, blockHeight, 1});
+            TestBothT2TCopies(utils::Expectation::Failure, texture, 0, invalidOrigin3D, texture2, 0,
+                              {0, 0, 0}, {blockWidth, blockHeight, 1});
+        }
+    }
+}
+
+// Tests to verify that ImageExtent.x must be a multiple of the compressed texture block width and
+// ImageExtent.y must be a multiple of the compressed texture block height in buffer-to-texture,
+// texture-to-buffer or texture-to-texture copies with compressed texture formats.
+TEST_F(CopyCommandTest_CompressedTextureFormats, ImageExtent) {
+    wgpu::Buffer buffer =
+        CreateBuffer(1024, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst);
+
+    constexpr uint32_t kMipmapLevels = 3;
+    // We choose a prime that is greater than the current max texel dimension size as a multiplier
+    // to compute the test texture size so that we can be certain that its level 2 mipmap (x4)
+    // cannot be a multiple of the dimension. This is useful for testing padding at the edges of
+    // the mipmaps.
+    constexpr uint32_t kBlockPerDim = 13;
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t testWidth = blockWidth * kBlockPerDim;
+        uint32_t testHeight = blockHeight * kBlockPerDim;
+        wgpu::Texture texture = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+        wgpu::Texture texture2 = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+
+        wgpu::Extent3D smallestValidExtent3D = {blockWidth, blockHeight, 1};
+
+        // Valid usages of ImageExtent in B2T, T2B and T2T copies with compressed texture formats.
+        {
+            TestBothTBCopies(utils::Expectation::Success, buffer, 0, 256, 4, texture, 0, {0, 0, 0},
+                             smallestValidExtent3D);
+            TestBothT2TCopies(utils::Expectation::Success, texture, 0, {0, 0, 0}, texture2, 0,
+                              {0, 0, 0}, smallestValidExtent3D);
+        }
+
+        // Valid usages of ImageExtent in B2T, T2B and T2T copies with compressed texture formats
+        // and non-zero mipmap levels.
+        {
+            constexpr uint32_t kTestMipmapLevel = 2;
+            wgpu::Origin3D testOrigin = {
+                ((testWidth >> kTestMipmapLevel) / blockWidth) * blockWidth,
+                ((testHeight >> kTestMipmapLevel) / blockHeight) * blockHeight, 0};
+            TestBothTBCopies(utils::Expectation::Success, buffer, 0, 256, 4, texture,
+                             kTestMipmapLevel, testOrigin, smallestValidExtent3D);
+            TestBothT2TCopies(utils::Expectation::Success, texture, kTestMipmapLevel, testOrigin,
+                              texture2, 0, {0, 0, 0}, smallestValidExtent3D);
+        }
+
+        // Failures on invalid ImageExtent.x.
+        {
+            wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width - 1,
+                                              smallestValidExtent3D.height, 1};
+            TestBothTBCopies(utils::Expectation::Failure, buffer, 0, 256, 4, texture, 0, {0, 0, 0},
+                             inValidExtent3D);
+            TestBothT2TCopies(utils::Expectation::Failure, texture, 0, {0, 0, 0}, texture2, 0,
+                              {0, 0, 0}, inValidExtent3D);
+        }
+
+        // Failures on invalid ImageExtent.y.
+        {
+            wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width,
+                                              smallestValidExtent3D.height - 1, 1};
+            TestBothTBCopies(utils::Expectation::Failure, buffer, 0, 256, 4, texture, 0, {0, 0, 0},
+                             inValidExtent3D);
+            TestBothT2TCopies(utils::Expectation::Failure, texture, 0, {0, 0, 0}, texture2, 0,
+                              {0, 0, 0}, inValidExtent3D);
+        }
+    }
+}
+
+// Test copies between buffer and multiple array layers of a compressed texture
+TEST_F(CopyCommandTest_CompressedTextureFormats, CopyToMultipleArrayLayers) {
+    constexpr uint32_t kWidthMultiplier = 3;
+    constexpr uint32_t kHeightMultiplier = 4;
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t testWidth = kWidthMultiplier * blockWidth;
+        uint32_t testHeight = kHeightMultiplier * blockHeight;
+        wgpu::Texture texture = CopyCommandTest::Create2DTexture(
+            testWidth, testHeight, 1, 20, format,
+            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
+
+        // Copy to all array layers
+        TestBothTBCopiesExactBufferSize(256, 4, texture, format, {0, 0, 0},
+                                        {testWidth, testHeight, 20});
+
+        // Copy to the highest array layer
+        TestBothTBCopiesExactBufferSize(256, 4, texture, format, {0, 0, 19},
+                                        {testWidth, testHeight, 1});
+
+        // Copy to array layers in the middle
+        TestBothTBCopiesExactBufferSize(256, 4, texture, format, {0, 0, 1},
+                                        {testWidth, testHeight, 18});
+
+        // Copy touching the texture corners with a non-packed rowsPerImage
+        TestBothTBCopiesExactBufferSize(256, 6, texture, format, {blockWidth, blockHeight, 4},
+                                        {testWidth - blockWidth, testHeight - blockHeight, 16});
+    }
+}
+
+// Test copying between textures that have srgb compatible texture formats;
+TEST_F(CopyCommandTest_CompressedTextureFormats, SrgbFormatCompatibility) {
+    constexpr std::array<wgpu::TextureFormat, 42> srcFormats = {
+        wgpu::TextureFormat::BC1RGBAUnorm,    wgpu::TextureFormat::BC1RGBAUnormSrgb,
+        wgpu::TextureFormat::BC2RGBAUnorm,    wgpu::TextureFormat::BC2RGBAUnormSrgb,
+        wgpu::TextureFormat::BC3RGBAUnorm,    wgpu::TextureFormat::BC3RGBAUnormSrgb,
+        wgpu::TextureFormat::BC7RGBAUnorm,    wgpu::TextureFormat::BC7RGBAUnormSrgb,
+        wgpu::TextureFormat::ETC2RGB8Unorm,   wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+        wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+        wgpu::TextureFormat::ETC2RGBA8Unorm,  wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+        wgpu::TextureFormat::ASTC4x4Unorm,    wgpu::TextureFormat::ASTC4x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x4Unorm,    wgpu::TextureFormat::ASTC5x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x5Unorm,    wgpu::TextureFormat::ASTC5x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x5Unorm,    wgpu::TextureFormat::ASTC6x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x6Unorm,    wgpu::TextureFormat::ASTC6x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x5Unorm,    wgpu::TextureFormat::ASTC8x5UnormSrgb,
+        wgpu::TextureFormat::ASTC8x6Unorm,    wgpu::TextureFormat::ASTC8x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x8Unorm,    wgpu::TextureFormat::ASTC8x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x5Unorm,   wgpu::TextureFormat::ASTC10x5UnormSrgb,
+        wgpu::TextureFormat::ASTC10x6Unorm,   wgpu::TextureFormat::ASTC10x6UnormSrgb,
+        wgpu::TextureFormat::ASTC10x8Unorm,   wgpu::TextureFormat::ASTC10x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x10Unorm,  wgpu::TextureFormat::ASTC10x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x10Unorm,  wgpu::TextureFormat::ASTC12x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x12Unorm,  wgpu::TextureFormat::ASTC12x12UnormSrgb};
+
+    constexpr uint32_t kBlockPerDim = 2;
+    constexpr uint32_t kMipmapLevels = 1;
+    for (wgpu::TextureFormat srcFormat : srcFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(srcFormat);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(srcFormat);
+        uint32_t testWidth = blockWidth * kBlockPerDim;
+        uint32_t testHeight = blockHeight * kBlockPerDim;
+        wgpu::Texture texture = Create2DTexture(srcFormat, kMipmapLevels, testWidth, testHeight);
+        wgpu::Texture texture2 = Create2DTexture(GetCopyCompatibleFormat(srcFormat), kMipmapLevels,
+                                                 testWidth, testHeight);
+        wgpu::Extent3D extent3D = {testWidth, testHeight, 1};
+
+        TestBothT2TCopies(utils::Expectation::Success, texture, 0, {0, 0, 0}, texture2, 0,
+                          {0, 0, 0}, extent3D);
+    }
+}
+
+class CopyCommandTest_ClearBuffer : public CopyCommandTest {};
+
+TEST_F(CopyCommandTest_ClearBuffer, Success) {
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    // Clear different ranges, including some that touch the OOB condition
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ClearBuffer(destination, 0, 16);
+        encoder.ClearBuffer(destination, 0, 8);
+        encoder.ClearBuffer(destination, 8, 8);
+        encoder.Finish();
+    }
+
+    // Size is allowed to be omitted
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ClearBuffer(destination, 0);
+        encoder.ClearBuffer(destination, 8);
+        encoder.Finish();
+    }
+
+    // Size and Offset are allowed to be omitted
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ClearBuffer(destination);
+        encoder.Finish();
+    }
+}
+
+// Test a successful ClearBuffer where the last external reference is dropped.
+TEST_F(CopyCommandTest_ClearBuffer, DroppedBuffer) {
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.ClearBuffer(destination, 0, 8);
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+    destination = nullptr;
+    device.GetQueue().Submit(1, &commandBuffer);
+}
+
+// Test ClearBuffer copies with OOB
+TEST_F(CopyCommandTest_ClearBuffer, OutOfBounds) {
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ClearBuffer(destination, 8, 12);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        // Despite being zero length, should still raise an error due to being out of bounds.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ClearBuffer(destination, 20, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test ClearBuffer with incorrect buffer usage
+TEST_F(CopyCommandTest_ClearBuffer, BadUsage) {
+    wgpu::Buffer vertex = CreateBuffer(16, wgpu::BufferUsage::Vertex);
+
+    // Destination with incorrect usage
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.ClearBuffer(vertex, 0, 16);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test ClearBuffer with unaligned data size
+TEST_F(CopyCommandTest_ClearBuffer, UnalignedSize) {
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.ClearBuffer(destination, 0, 2);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test ClearBuffer with unaligned offset
+TEST_F(CopyCommandTest_ClearBuffer, UnalignedOffset) {
+    wgpu::Buffer destination = CreateBuffer(16, wgpu::BufferUsage::CopyDst);
+
+    // Unaligned destination offset
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.ClearBuffer(destination, 2, 4);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test ClearBuffer with buffers in error state cause errors.
+TEST_F(CopyCommandTest_ClearBuffer, BuffersInErrorState) {
+    wgpu::BufferDescriptor errorBufferDescriptor;
+    errorBufferDescriptor.size = 4;
+    errorBufferDescriptor.usage =
+        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    ASSERT_DEVICE_ERROR(wgpu::Buffer errorBuffer = device.CreateBuffer(&errorBufferDescriptor));
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.ClearBuffer(errorBuffer, 0, 4);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
diff --git a/src/dawn/tests/unittests/validation/CopyTextureForBrowserTests.cpp b/src/dawn/tests/unittests/validation/CopyTextureForBrowserTests.cpp
new file mode 100644
index 0000000..489a802
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/CopyTextureForBrowserTests.cpp
@@ -0,0 +1,434 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class CopyTextureForBrowserTest : public ValidationTest {
+  protected:
+    wgpu::Texture Create2DTexture(uint32_t width,
+                                  uint32_t height,
+                                  uint32_t mipLevelCount,
+                                  uint32_t arrayLayerCount,
+                                  wgpu::TextureFormat format,
+                                  wgpu::TextureUsage usage,
+                                  uint32_t sampleCount = 1) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = arrayLayerCount;
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        wgpu::Texture tex = device.CreateTexture(&descriptor);
+        return tex;
+    }
+
+    void TestCopyTextureForBrowser(utils::Expectation expectation,
+                                   wgpu::Texture srcTexture,
+                                   uint32_t srcLevel,
+                                   wgpu::Origin3D srcOrigin,
+                                   wgpu::Texture dstTexture,
+                                   uint32_t dstLevel,
+                                   wgpu::Origin3D dstOrigin,
+                                   wgpu::Extent3D extent3D,
+                                   wgpu::TextureAspect aspect = wgpu::TextureAspect::All,
+                                   wgpu::CopyTextureForBrowserOptions options = {}) {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcTexture, srcLevel, srcOrigin, aspect);
+        wgpu::ImageCopyTexture dstImageCopyTexture =
+            utils::CreateImageCopyTexture(dstTexture, dstLevel, dstOrigin, aspect);
+
+        if (expectation == utils::Expectation::Success) {
+            device.GetQueue().CopyTextureForBrowser(&srcImageCopyTexture, &dstImageCopyTexture,
+                                                    &extent3D, &options);
+        } else {
+            ASSERT_DEVICE_ERROR(device.GetQueue().CopyTextureForBrowser(
+                &srcImageCopyTexture, &dstImageCopyTexture, &extent3D, &options));
+        }
+    }
+};
+
+// Tests should be Success
+TEST_F(CopyTextureForBrowserTest, Success) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+    // Different copies, including some that touch the OOB condition
+    {
+        // Copy a region along top left boundary
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1});
+
+        // Copy entire texture
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {16, 16, 1});
+
+        // Copy a region along bottom right boundary
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {8, 8, 0}, destination, 0,
+                                  {8, 8, 0}, {8, 8, 1});
+
+        // Copy region into mip
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 2,
+                                  {0, 0, 0}, {4, 4, 1});
+
+        // Copy mip into region
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 2, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1});
+
+        // Copy between slices
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 1}, {16, 16, 1});
+    }
+
+    // Empty copies are valid
+    {
+        // An empty copy
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {0, 0, 1});
+
+        // An empty copy with depth = 0
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {0, 0, 0});
+
+        // An empty copy touching the side of the source texture
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {16, 16, 0}, {0, 0, 1});
+
+        // An empty copy touching the side of the destination texture
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {16, 16, 0}, {0, 0, 1});
+    }
+}
+
+// Test source or destination texture has wrong usages
+TEST_F(CopyTextureForBrowserTest, IncorrectUsage) {
+    wgpu::Texture validSource =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture validDestination =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+    wgpu::Texture noSampledUsageSource =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopySrc);
+    wgpu::Texture noRenderAttachmentUsageDestination =
+        Create2DTexture(16, 16, 5, 2, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+    wgpu::Texture noCopySrcUsageSource = Create2DTexture(
+        16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture noCopyDstUsageSource = Create2DTexture(
+        16, 16, 5, 2, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::RenderAttachment);
+
+    // Incorrect source usage causes failure : lack |Sampled| usage
+    TestCopyTextureForBrowser(utils::Expectation::Failure, noSampledUsageSource, 0, {0, 0, 0},
+                              validDestination, 0, {0, 0, 0}, {16, 16, 1});
+
+    // Incorrect destination usage causes failure: lack |RenderAttachement| usage.
+    TestCopyTextureForBrowser(utils::Expectation::Failure, validSource, 0, {0, 0, 0},
+                              noRenderAttachmentUsageDestination, 0, {0, 0, 0}, {16, 16, 1});
+
+    // Incorrect source usage causes failure : lack |CopySrc| usage
+    TestCopyTextureForBrowser(utils::Expectation::Failure, noCopySrcUsageSource, 0, {0, 0, 0},
+                              validDestination, 0, {0, 0, 0}, {16, 16, 1});
+
+    // Incorrect destination usage causes failure: lack |CopyDst| usage.
+    TestCopyTextureForBrowser(utils::Expectation::Failure, validSource, 0, {0, 0, 0},
+                              noCopyDstUsageSource, 0, {0, 0, 0}, {16, 16, 1});
+}
+
+// Test source or destination texture is destroyed.
+TEST_F(CopyTextureForBrowserTest, DestroyedTexture) {
+    wgpu::CopyTextureForBrowserOptions options = {};
+
+    // Valid src and dst textures.
+    {
+        wgpu::Texture source =
+            Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, options);
+
+        // Check noop copy
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {0, 0, 0}, wgpu::TextureAspect::All, options);
+    }
+
+    // Destroyed src texture.
+    {
+        wgpu::Texture source =
+            Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+        source.Destroy();
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, options);
+
+        // Check noop copy
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {0, 0, 0}, wgpu::TextureAspect::All, options);
+    }
+
+    // Destroyed dst texture.
+    {
+        wgpu::Texture source =
+            Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+        wgpu::Texture destination =
+            Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+        destination.Destroy();
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, options);
+
+        // Check noop copy
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {0, 0, 0}, wgpu::TextureAspect::All, options);
+    }
+}
+
+// Test non-zero value origin in source and OOB copy rects.
+TEST_F(CopyTextureForBrowserTest, OutOfBounds) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+    // OOB on source
+    {
+        // x + width overflows
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {1, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {16, 16, 1});
+
+        // y + height overflows
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 1, 0}, destination, 0,
+                                  {0, 0, 0}, {16, 16, 1});
+
+        // non-zero mip overflows
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 1, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {9, 9, 1});
+
+        // copy to multiple slices
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 2}, {16, 16, 2});
+
+        // copy origin z value is non-zero.
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 1}, destination, 0,
+                                  {0, 0, 2}, {16, 16, 1});
+    }
+
+    // OOB on destination
+    {
+        // x + width overflows
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {1, 0, 0}, {16, 16, 1});
+
+        // y + height overflows
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 1, 0}, {16, 16, 1});
+
+        // non-zero mip overflows
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 1,
+                                  {0, 0, 0}, {9, 9, 1});
+
+        // arrayLayer + depth OOB
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 4}, {16, 16, 1});
+
+        // empty copy on non-existent mip fails
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 6,
+                                  {0, 0, 0}, {0, 0, 1});
+
+        // empty copy on non-existent slice fails
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 4}, {0, 0, 1});
+    }
+}
+
+// Test destination texture has format that not supported by CopyTextureForBrowser().
+TEST_F(CopyTextureForBrowserTest, InvalidDstFormat) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 2, wgpu::TextureFormat::RG8Uint,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+    // Not supported dst texture format.
+    TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                              {0, 0, 0}, {0, 0, 1});
+}
+
+// Test source or destination texture are multisampled.
+TEST_F(CopyTextureForBrowserTest, InvalidSampleCount) {
+    wgpu::Texture sourceMultiSampled1x =
+        Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding, 1);
+    wgpu::Texture destinationMultiSampled1x =
+        Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment, 1);
+    wgpu::Texture sourceMultiSampled4x =
+        Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding, 4);
+    wgpu::Texture destinationMultiSampled4x =
+        Create2DTexture(16, 16, 1, 1, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment, 4);
+
+    // An empty copy with dst texture sample count > 1 failure.
+    TestCopyTextureForBrowser(utils::Expectation::Failure, sourceMultiSampled1x, 0, {0, 0, 0},
+                              destinationMultiSampled4x, 0, {0, 0, 0}, {0, 0, 1});
+
+    // A empty copy with source texture sample count > 1 failure
+    TestCopyTextureForBrowser(utils::Expectation::Failure, sourceMultiSampled4x, 0, {0, 0, 0},
+                              destinationMultiSampled1x, 0, {0, 0, 0}, {0, 0, 1});
+}
+
+// Test color space conversion related attributes in CopyTextureForBrowserOptions.
+TEST_F(CopyTextureForBrowserTest, ColorSpaceConversion_ColorSpace) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+    wgpu::CopyTextureForBrowserOptions options = {};
+    options.needsColorSpaceConversion = true;
+
+    // Valid cases
+    {
+        wgpu::CopyTextureForBrowserOptions validOptions = options;
+        std::array<float, 7> srcTransferFunctionParameters = {};
+        std::array<float, 7> dstTransferFunctionParameters = {};
+        std::array<float, 9> conversionMatrix = {};
+        validOptions.srcTransferFunctionParameters = srcTransferFunctionParameters.data();
+        validOptions.dstTransferFunctionParameters = dstTransferFunctionParameters.data();
+        validOptions.conversionMatrix = conversionMatrix.data();
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, validOptions);
+
+        // if no color space conversion, no need to validate related attributes
+        wgpu::CopyTextureForBrowserOptions noColorSpaceConversion = options;
+        noColorSpaceConversion.needsColorSpaceConversion = false;
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All,
+                                  noColorSpaceConversion);
+    }
+
+    // Invalid cases: srcTransferFunctionParameters, dstTransferFunctionParameters or
+    // conversionMatrix is nullptr or not set
+    {
+        // not set srcTransferFunctionParameters
+        wgpu::CopyTextureForBrowserOptions invalidOptions = options;
+        std::array<float, 7> dstTransferFunctionParameters = {};
+        std::array<float, 9> conversionMatrix = {};
+        invalidOptions.dstTransferFunctionParameters = dstTransferFunctionParameters.data();
+        invalidOptions.conversionMatrix = conversionMatrix.data();
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, invalidOptions);
+
+        // set to nullptr
+        invalidOptions.srcTransferFunctionParameters = nullptr;
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, invalidOptions);
+    }
+
+    {
+        // not set dstTransferFunctionParameters
+        wgpu::CopyTextureForBrowserOptions invalidOptions = options;
+        std::array<float, 7> srcTransferFunctionParameters = {};
+        std::array<float, 9> conversionMatrix = {};
+        invalidOptions.srcTransferFunctionParameters = srcTransferFunctionParameters.data();
+        invalidOptions.conversionMatrix = conversionMatrix.data();
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, invalidOptions);
+
+        // set to nullptr
+        invalidOptions.dstTransferFunctionParameters = nullptr;
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, invalidOptions);
+    }
+
+    {
+        // not set conversionMatrix
+        wgpu::CopyTextureForBrowserOptions invalidOptions = options;
+        std::array<float, 7> srcTransferFunctionParameters = {};
+        std::array<float, 7> dstTransferFunctionParameters = {};
+        invalidOptions.srcTransferFunctionParameters = srcTransferFunctionParameters.data();
+        invalidOptions.dstTransferFunctionParameters = dstTransferFunctionParameters.data();
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, invalidOptions);
+
+        // set to nullptr
+        invalidOptions.conversionMatrix = nullptr;
+        TestCopyTextureForBrowser(utils::Expectation::Failure, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, invalidOptions);
+    }
+}
+
+// Test option.srcAlphaMode/dstAlphaMode
+TEST_F(CopyTextureForBrowserTest, ColorSpaceConversion_TextureAlphaState) {
+    wgpu::Texture source =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture destination =
+        Create2DTexture(16, 16, 5, 4, wgpu::TextureFormat::RGBA8Unorm,
+                        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::RenderAttachment);
+
+    wgpu::CopyTextureForBrowserOptions options = {};
+
+    // Valid src texture alpha state and valid dst texture alpha state
+    {
+        options.srcAlphaMode = wgpu::AlphaMode::Premultiplied;
+        options.dstAlphaMode = wgpu::AlphaMode::Premultiplied;
+
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, options);
+
+        options.srcAlphaMode = wgpu::AlphaMode::Premultiplied;
+        options.dstAlphaMode = wgpu::AlphaMode::Unpremultiplied;
+
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, options);
+
+        options.srcAlphaMode = wgpu::AlphaMode::Unpremultiplied;
+        options.dstAlphaMode = wgpu::AlphaMode::Premultiplied;
+
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, options);
+
+        options.srcAlphaMode = wgpu::AlphaMode::Unpremultiplied;
+        options.dstAlphaMode = wgpu::AlphaMode::Unpremultiplied;
+
+        TestCopyTextureForBrowser(utils::Expectation::Success, source, 0, {0, 0, 0}, destination, 0,
+                                  {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All, options);
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/DebugMarkerValidationTests.cpp b/src/dawn/tests/unittests/validation/DebugMarkerValidationTests.cpp
new file mode 100644
index 0000000..7fd283b
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/DebugMarkerValidationTests.cpp
@@ -0,0 +1,255 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class DebugMarkerValidationTest : public ValidationTest {};
+
+// Correct usage of debug markers should succeed in render pass.
+TEST_F(DebugMarkerValidationTest, RenderSuccess) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.PushDebugGroup("Event Start");
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.PopDebugGroup();
+        pass.End();
+    }
+
+    encoder.Finish();
+}
+
+// A PushDebugGroup call without a following PopDebugGroup produces an error in render pass.
+TEST_F(DebugMarkerValidationTest, RenderUnbalancedPush) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.PushDebugGroup("Event Start");
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// A PopDebugGroup call without a preceding PushDebugGroup produces an error in render pass.
+TEST_F(DebugMarkerValidationTest, RenderUnbalancedPop) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.PopDebugGroup();
+        pass.End();
+    }
+
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Correct usage of debug markers should succeed in render bundle.
+TEST_F(DebugMarkerValidationTest, RenderBundleSuccess) {
+    utils::ComboRenderBundleEncoderDescriptor desc;
+    desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    desc.colorFormatsCount = 1;
+
+    wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&desc);
+    encoder.PushDebugGroup("Event Start");
+    encoder.PushDebugGroup("Event Start");
+    encoder.InsertDebugMarker("Marker");
+    encoder.PopDebugGroup();
+    encoder.PopDebugGroup();
+
+    encoder.Finish();
+}
+
+// A PushDebugGroup call without a following PopDebugGroup produces an error in render bundle.
+TEST_F(DebugMarkerValidationTest, RenderBundleUnbalancedPush) {
+    utils::ComboRenderBundleEncoderDescriptor desc;
+    desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    desc.colorFormatsCount = 1;
+
+    wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&desc);
+    encoder.PushDebugGroup("Event Start");
+    encoder.PushDebugGroup("Event Start");
+    encoder.InsertDebugMarker("Marker");
+    encoder.PopDebugGroup();
+
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// A PopDebugGroup call without a preceding PushDebugGroup produces an error in render bundle.
+TEST_F(DebugMarkerValidationTest, RenderBundleUnbalancedPop) {
+    utils::ComboRenderBundleEncoderDescriptor desc;
+    desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    desc.colorFormatsCount = 1;
+
+    wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&desc);
+    encoder.PushDebugGroup("Event Start");
+    encoder.InsertDebugMarker("Marker");
+    encoder.PopDebugGroup();
+    encoder.PopDebugGroup();
+
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Correct usage of debug markers should succeed in compute pass.
+TEST_F(DebugMarkerValidationTest, ComputeSuccess) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.PushDebugGroup("Event Start");
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.PopDebugGroup();
+        pass.End();
+    }
+
+    encoder.Finish();
+}
+
+// A PushDebugGroup call without a following PopDebugGroup produces an error in compute pass.
+TEST_F(DebugMarkerValidationTest, ComputeUnbalancedPush) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.PushDebugGroup("Event Start");
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// A PopDebugGroup call without a preceding PushDebugGroup produces an error in compute pass.
+TEST_F(DebugMarkerValidationTest, ComputeUnbalancedPop) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.PopDebugGroup();
+        pass.End();
+    }
+
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Correct usage of debug markers should succeed in command encoder.
+TEST_F(DebugMarkerValidationTest, CommandEncoderSuccess) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    encoder.PushDebugGroup("Event Start");
+    encoder.InsertDebugMarker("Marker");
+    encoder.PopDebugGroup();
+    encoder.PopDebugGroup();
+    encoder.Finish();
+}
+
+// A PushDebugGroup call without a following PopDebugGroup produces an error in command encoder.
+TEST_F(DebugMarkerValidationTest, CommandEncoderUnbalancedPush) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    encoder.PushDebugGroup("Event Start");
+    encoder.InsertDebugMarker("Marker");
+    encoder.PopDebugGroup();
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// A PopDebugGroup call without a preceding PushDebugGroup produces an error in command encoder.
+TEST_F(DebugMarkerValidationTest, CommandEncoderUnbalancedPop) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    encoder.InsertDebugMarker("Marker");
+    encoder.PopDebugGroup();
+    encoder.PopDebugGroup();
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// It is possible to nested pushes in a compute pass in a command encoder.
+TEST_F(DebugMarkerValidationTest, NestedComputeInCommandEncoder) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+    encoder.PopDebugGroup();
+    encoder.Finish();
+}
+
+// Command encoder and compute pass pushes must be balanced independently.
+TEST_F(DebugMarkerValidationTest, NestedComputeInCommandEncoderIndependent) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    {
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// It is possible to nested pushes in a render pass in a command encoder.
+TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoder) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.PushDebugGroup("Event Start");
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+    encoder.PopDebugGroup();
+    encoder.Finish();
+}
+
+// Command encoder and render pass pushes must be balanced independently.
+TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoderIndependent) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.PushDebugGroup("Event Start");
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.InsertDebugMarker("Marker");
+        pass.PopDebugGroup();
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
diff --git a/src/dawn/tests/unittests/validation/DeviceValidationTests.cpp b/src/dawn/tests/unittests/validation/DeviceValidationTests.cpp
new file mode 100644
index 0000000..cd297d3
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/DeviceValidationTests.cpp
@@ -0,0 +1,230 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/dawn_platform.h"
+
+using ::testing::HasSubstr;
+
+class RequestDeviceValidationTest : public ValidationTest {
+  protected:
+    void SetUp() {
+        DAWN_SKIP_TEST_IF(UsesWire());
+        ValidationTest::SetUp();
+    }
+
+    static void ExpectRequestDeviceSuccess(WGPURequestDeviceStatus status,
+                                           WGPUDevice cDevice,
+                                           const char* message,
+                                           void* userdata) {
+        wgpu::Device device = wgpu::Device::Acquire(cDevice);
+        EXPECT_EQ(status, WGPURequestDeviceStatus_Success);
+        EXPECT_NE(device, nullptr);
+        EXPECT_STREQ(message, nullptr);
+        if (userdata != nullptr) {
+            CallCheckDevice(static_cast<std::function<void(wgpu::Device)>*>(userdata),
+                            std::move(device));
+        }
+    }
+
+    static void ExpectRequestDeviceError(WGPURequestDeviceStatus status,
+                                         WGPUDevice cDevice,
+                                         const char* message,
+                                         void* userdata) {
+        wgpu::Device device = wgpu::Device::Acquire(cDevice);
+        EXPECT_EQ(status, WGPURequestDeviceStatus_Error);
+        EXPECT_EQ(device, nullptr);
+        EXPECT_STRNE(message, nullptr);
+    }
+
+    template <typename F>
+    static void* CheckDevice(F&& f) {
+        return new std::function<void(wgpu::Device)>(f);
+    }
+
+    static void CallCheckDevice(std::function<void(wgpu::Device)>* f, wgpu::Device d) {
+        (*f)(std::move(d));
+        delete f;
+    }
+};
+
+// Test that requesting a device without specifying limits is valid.
+TEST_F(RequestDeviceValidationTest, NoRequiredLimits) {
+    wgpu::DeviceDescriptor descriptor;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceSuccess,
+                          CheckDevice([](wgpu::Device device) {
+                              // Check one of the default limits.
+                              wgpu::SupportedLimits limits;
+                              device.GetLimits(&limits);
+                              EXPECT_EQ(limits.limits.maxBindGroups, 4u);
+                          }));
+}
+
+// Test that requesting a device with the default limits is valid.
+TEST_F(RequestDeviceValidationTest, DefaultLimits) {
+    wgpu::RequiredLimits limits = {};
+    wgpu::DeviceDescriptor descriptor;
+    descriptor.requiredLimits = &limits;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceSuccess,
+                          CheckDevice([](wgpu::Device device) {
+                              // Check one of the default limits.
+                              wgpu::SupportedLimits limits;
+                              device.GetLimits(&limits);
+                              EXPECT_EQ(limits.limits.maxTextureArrayLayers, 256u);
+                          }));
+}
+
+// Test that requesting a device where a required limit is above the maximum value.
+TEST_F(RequestDeviceValidationTest, HigherIsBetter) {
+    wgpu::RequiredLimits limits = {};
+    wgpu::DeviceDescriptor descriptor;
+    descriptor.requiredLimits = &limits;
+
+    wgpu::SupportedLimits supportedLimits;
+    EXPECT_TRUE(adapter.GetLimits(reinterpret_cast<WGPUSupportedLimits*>(&supportedLimits)));
+
+    // If we can support better than the default, test below the max.
+    if (supportedLimits.limits.maxBindGroups > 4u) {
+        limits.limits.maxBindGroups = supportedLimits.limits.maxBindGroups - 1;
+        adapter.RequestDevice(
+            &descriptor, ExpectRequestDeviceSuccess, CheckDevice([&](wgpu::Device device) {
+                wgpu::SupportedLimits limits;
+                device.GetLimits(&limits);
+
+                // Check we got exactly the request.
+                EXPECT_EQ(limits.limits.maxBindGroups, supportedLimits.limits.maxBindGroups - 1);
+                // Check another default limit.
+                EXPECT_EQ(limits.limits.maxTextureArrayLayers, 256u);
+            }));
+    }
+
+    // Test the max.
+    limits.limits.maxBindGroups = supportedLimits.limits.maxBindGroups;
+    adapter.RequestDevice(
+        &descriptor, ExpectRequestDeviceSuccess, CheckDevice([&](wgpu::Device device) {
+            wgpu::SupportedLimits limits;
+            device.GetLimits(&limits);
+
+            // Check we got exactly the request.
+            EXPECT_EQ(limits.limits.maxBindGroups, supportedLimits.limits.maxBindGroups);
+            // Check another default limit.
+            EXPECT_EQ(limits.limits.maxTextureArrayLayers, 256u);
+        }));
+
+    // Test above the max.
+    limits.limits.maxBindGroups = supportedLimits.limits.maxBindGroups + 1;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceError, nullptr);
+
+    // Test worse than the default
+    limits.limits.maxBindGroups = 3u;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceSuccess,
+                          CheckDevice([&](wgpu::Device device) {
+                              wgpu::SupportedLimits limits;
+                              device.GetLimits(&limits);
+
+                              // Check we got the default.
+                              EXPECT_EQ(limits.limits.maxBindGroups, 4u);
+                          }));
+}
+
+// Test that requesting a device where a required limit is below the minimum value.
+TEST_F(RequestDeviceValidationTest, LowerIsBetter) {
+    wgpu::RequiredLimits limits = {};
+    wgpu::DeviceDescriptor descriptor;
+    descriptor.requiredLimits = &limits;
+
+    wgpu::SupportedLimits supportedLimits;
+    EXPECT_TRUE(adapter.GetLimits(reinterpret_cast<WGPUSupportedLimits*>(&supportedLimits)));
+
+    // Test below the min.
+    limits.limits.minUniformBufferOffsetAlignment =
+        supportedLimits.limits.minUniformBufferOffsetAlignment / 2;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceError, nullptr);
+
+    // Test the min.
+    limits.limits.minUniformBufferOffsetAlignment =
+        supportedLimits.limits.minUniformBufferOffsetAlignment;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceSuccess,
+                          CheckDevice([&](wgpu::Device device) {
+                              wgpu::SupportedLimits limits;
+                              device.GetLimits(&limits);
+
+                              // Check we got exactly the request.
+                              EXPECT_EQ(limits.limits.minUniformBufferOffsetAlignment,
+                                        supportedLimits.limits.minUniformBufferOffsetAlignment);
+                              // Check another default limit.
+                              EXPECT_EQ(limits.limits.maxTextureArrayLayers, 256u);
+                          }));
+
+    // IF we can support better than the default, test above the min.
+    if (supportedLimits.limits.minUniformBufferOffsetAlignment > 256u) {
+        limits.limits.minUniformBufferOffsetAlignment =
+            supportedLimits.limits.minUniformBufferOffsetAlignment * 2;
+        adapter.RequestDevice(
+            &descriptor, ExpectRequestDeviceSuccess, CheckDevice([&](wgpu::Device device) {
+                wgpu::SupportedLimits limits;
+                device.GetLimits(&limits);
+
+                // Check we got exactly the request.
+                EXPECT_EQ(limits.limits.minUniformBufferOffsetAlignment,
+                          supportedLimits.limits.minUniformBufferOffsetAlignment * 2);
+                // Check another default limit.
+                EXPECT_EQ(limits.limits.maxTextureArrayLayers, 256u);
+            }));
+    }
+
+    // Test worse than the default
+    limits.limits.minUniformBufferOffsetAlignment = 2u * 256u;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceSuccess,
+                          CheckDevice([&](wgpu::Device device) {
+                              wgpu::SupportedLimits limits;
+                              device.GetLimits(&limits);
+
+                              // Check we got the default.
+                              EXPECT_EQ(limits.limits.minUniformBufferOffsetAlignment, 256u);
+                          }));
+}
+
+// Test that it is an error to request limits with an invalid chained struct
+TEST_F(RequestDeviceValidationTest, InvalidChainedStruct) {
+    wgpu::PrimitiveDepthClampingState depthClamp = {};
+    wgpu::RequiredLimits limits = {};
+    limits.nextInChain = &depthClamp;
+
+    wgpu::DeviceDescriptor descriptor;
+    descriptor.requiredLimits = &limits;
+    adapter.RequestDevice(&descriptor, ExpectRequestDeviceError, nullptr);
+}
+
+class DeviceTickValidationTest : public ValidationTest {};
+
+// Device destroy before API-level Tick should always result in no-op and false.
+TEST_F(DeviceTickValidationTest, DestroyDeviceBeforeAPITick) {
+    ExpectDeviceDestruction();
+    device.Destroy();
+    device.Tick();
+}
+
+// Device destroy before an internal Tick should return an error.
+TEST_F(DeviceTickValidationTest, DestroyDeviceBeforeInternalTick) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    ExpectDeviceDestruction();
+    device.Destroy();
+    dawn::native::DeviceBase* nativeDevice = dawn::native::FromAPI(device.Get());
+    ASSERT_DEVICE_ERROR(nativeDevice->ConsumedError(nativeDevice->Tick()),
+                        HasSubstr("[Device] is lost."));
+}
diff --git a/src/dawn/tests/unittests/validation/DrawIndirectValidationTests.cpp b/src/dawn/tests/unittests/validation/DrawIndirectValidationTests.cpp
new file mode 100644
index 0000000..d004e93
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/DrawIndirectValidationTests.cpp
@@ -0,0 +1,162 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <initializer_list>
+#include <limits>
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class DrawIndirectValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32>{
+                return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+            })");
+
+        // Set up render pipeline
+        wgpu::PipelineLayout pipelineLayout = utils::MakeBasicPipelineLayout(device, nullptr);
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.layout = pipelineLayout;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        pipeline = device.CreateRenderPipeline(&descriptor);
+    }
+
+    void ValidateExpectation(wgpu::CommandEncoder encoder, utils::Expectation expectation) {
+        if (expectation == utils::Expectation::Success) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    void TestIndirectOffsetDrawIndexed(utils::Expectation expectation,
+                                       std::initializer_list<uint32_t> bufferList,
+                                       uint64_t indirectOffset) {
+        TestIndirectOffset(expectation, bufferList, indirectOffset, true);
+    }
+
+    void TestIndirectOffsetDraw(utils::Expectation expectation,
+                                std::initializer_list<uint32_t> bufferList,
+                                uint64_t indirectOffset) {
+        TestIndirectOffset(expectation, bufferList, indirectOffset, false);
+    }
+
+    void TestIndirectOffset(utils::Expectation expectation,
+                            std::initializer_list<uint32_t> bufferList,
+                            uint64_t indirectOffset,
+                            bool indexed,
+                            wgpu::BufferUsage usage = wgpu::BufferUsage::Indirect) {
+        wgpu::Buffer indirectBuffer =
+            utils::CreateBufferFromData<uint32_t>(device, usage, bufferList);
+
+        DummyRenderPass renderPass(device);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline);
+        if (indexed) {
+            uint32_t zeros[100] = {};
+            wgpu::Buffer indexBuffer =
+                utils::CreateBufferFromData(device, zeros, sizeof(zeros), wgpu::BufferUsage::Index);
+            pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+            pass.DrawIndexedIndirect(indirectBuffer, indirectOffset);
+        } else {
+            pass.DrawIndirect(indirectBuffer, indirectOffset);
+        }
+        pass.End();
+
+        ValidateExpectation(encoder, expectation);
+    }
+
+    wgpu::RenderPipeline pipeline;
+};
+
+// Verify out of bounds indirect draw calls are caught early
+TEST_F(DrawIndirectValidationTest, DrawIndirectOffsetBounds) {
+    // In bounds
+    TestIndirectOffsetDraw(utils::Expectation::Success, {1, 2, 3, 4}, 0);
+    // In bounds, bigger buffer
+    TestIndirectOffsetDraw(utils::Expectation::Success, {1, 2, 3, 4, 5, 6, 7}, 0);
+    // In bounds, bigger buffer, positive offset
+    TestIndirectOffsetDraw(utils::Expectation::Success, {1, 2, 3, 4, 5, 6, 7, 8},
+                           4 * sizeof(uint32_t));
+
+    // In bounds, non-multiple of 4 offsets
+    TestIndirectOffsetDraw(utils::Expectation::Failure, {1, 2, 3, 4, 5}, 1);
+    TestIndirectOffsetDraw(utils::Expectation::Failure, {1, 2, 3, 4, 5}, 2);
+
+    // Out of bounds, buffer too small
+    TestIndirectOffsetDraw(utils::Expectation::Failure, {1, 2, 3}, 0);
+    // Out of bounds, index too big
+    TestIndirectOffsetDraw(utils::Expectation::Failure, {1, 2, 3, 4}, 1 * sizeof(uint32_t));
+    // Out of bounds, index past buffer
+    TestIndirectOffsetDraw(utils::Expectation::Failure, {1, 2, 3, 4}, 5 * sizeof(uint32_t));
+    // Out of bounds, index + size of command overflows
+    uint64_t offset = std::numeric_limits<uint64_t>::max();
+    TestIndirectOffsetDraw(utils::Expectation::Failure, {1, 2, 3, 4, 5, 6, 7}, offset);
+}
+
+// Verify out of bounds indirect draw indexed calls are caught early
+TEST_F(DrawIndirectValidationTest, DrawIndexedIndirectOffsetBounds) {
+    // In bounds
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Success, {1, 2, 3, 4, 5}, 0);
+    // In bounds, bigger buffer
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Success, {1, 2, 3, 4, 5, 6, 7, 8, 9}, 0);
+    // In bounds, bigger buffer, positive offset
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Success, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+                                  5 * sizeof(uint32_t));
+
+    // In bounds, non-multiple of 4 offsets
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Failure, {1, 2, 3, 4, 5, 6}, 1);
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Failure, {1, 2, 3, 4, 5, 6}, 2);
+
+    // Out of bounds, buffer too small
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Failure, {1, 2, 3, 4}, 0);
+    // Out of bounds, index too big
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Failure, {1, 2, 3, 4, 5},
+                                  1 * sizeof(uint32_t));
+    // Out of bounds, index past buffer
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Failure, {1, 2, 3, 4, 5},
+                                  5 * sizeof(uint32_t));
+    // Out of bounds, index + size of command overflows
+    uint64_t offset = std::numeric_limits<uint64_t>::max();
+    TestIndirectOffsetDrawIndexed(utils::Expectation::Failure, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+                                  offset);
+}
+
+// Check that the buffer must have the indirect usage
+TEST_F(DrawIndirectValidationTest, IndirectUsage) {
+    // Control cases: using a buffer with the indirect usage is valid.
+    TestIndirectOffset(utils::Expectation::Success, {1, 2, 3, 4}, 0, false,
+                       wgpu::BufferUsage::Indirect);
+    TestIndirectOffset(utils::Expectation::Success, {1, 2, 3, 4, 5}, 0, true,
+                       wgpu::BufferUsage::Indirect);
+
+    // Error cases: using a buffer with the vertex usage is an error.
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3, 4}, 0, false,
+                       wgpu::BufferUsage::Vertex);
+    TestIndirectOffset(utils::Expectation::Failure, {1, 2, 3, 4, 5}, 0, true,
+                       wgpu::BufferUsage::Vertex);
+}
diff --git a/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp b/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
new file mode 100644
index 0000000..c0eacff
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
@@ -0,0 +1,747 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    constexpr uint32_t kRTSize = 4;
+    constexpr uint32_t kFloat32x2Stride = 2 * sizeof(float);
+    constexpr uint32_t kFloat32x4Stride = 4 * sizeof(float);
+
+    class DrawVertexAndIndexBufferOOBValidationTests : public ValidationTest {
+      public:
+        // Parameters for testing index buffer
+        struct IndexBufferParams {
+            wgpu::IndexFormat indexFormat;
+            uint64_t indexBufferSize;              // Size for creating index buffer
+            uint64_t indexBufferOffsetForEncoder;  // Offset for SetIndexBuffer in encoder
+            uint64_t indexBufferSizeForEncoder;    // Size for SetIndexBuffer in encoder
+            uint32_t maxValidIndexNumber;  // max number of {indexCount + firstIndex} for this set
+                                           // of parameters
+        };
+
+        // Parameters for testing vertex-step-mode and instance-step-mode vertex buffer
+        struct VertexBufferParams {
+            uint32_t bufferStride;
+            uint64_t bufferSize;              // Size for creating vertex buffer
+            uint64_t bufferOffsetForEncoder;  // Offset for SetVertexBuffer in encoder
+            uint64_t bufferSizeForEncoder;    // Size for SetVertexBuffer in encoder
+            uint32_t maxValidAccessNumber;    // max number of valid access time for this set of
+                                              // parameters, i.e. {vertexCount + firstVertex} for
+            // vertex-step-mode, and {instanceCount + firstInstance}
+            // for instance-step-mode
+        };
+
+        // Parameters for setIndexBuffer
+        struct IndexBufferDesc {
+            const wgpu::Buffer buffer;
+            wgpu::IndexFormat indexFormat;
+            uint64_t offset = 0;
+            uint64_t size = wgpu::kWholeSize;
+        };
+
+        // Parameters for setVertexBuffer
+        struct VertexBufferSpec {
+            uint32_t slot;
+            const wgpu::Buffer buffer;
+            uint64_t offset = 0;
+            uint64_t size = wgpu::kWholeSize;
+        };
+        using VertexBufferList = std::vector<VertexBufferSpec>;
+
+        // Buffer layout parameters for creating pipeline
+        struct PipelineVertexBufferAttributeDesc {
+            uint32_t shaderLocation;
+            wgpu::VertexFormat format;
+            uint64_t offset = 0;
+        };
+        struct PipelineVertexBufferDesc {
+            uint64_t arrayStride;
+            wgpu::VertexStepMode stepMode;
+            std::vector<PipelineVertexBufferAttributeDesc> attributes = {};
+        };
+
+        void SetUp() override {
+            ValidationTest::SetUp();
+
+            renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+            fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+        }
+
+        const wgpu::RenderPassDescriptor* GetBasicRenderPassDescriptor() const {
+            return &renderPass.renderPassInfo;
+        }
+
+        wgpu::Buffer CreateBuffer(uint64_t size,
+                                  wgpu::BufferUsage usage = wgpu::BufferUsage::Vertex) {
+            wgpu::BufferDescriptor descriptor;
+            descriptor.size = size;
+            descriptor.usage = usage;
+
+            return device.CreateBuffer(&descriptor);
+        }
+
+        wgpu::ShaderModule CreateVertexShaderModuleWithBuffer(
+            std::vector<PipelineVertexBufferDesc> bufferDescList) {
+            uint32_t attributeCount = 0;
+            std::stringstream inputStringStream;
+
+            for (auto buffer : bufferDescList) {
+                for (auto attr : buffer.attributes) {
+                    // @location({shaderLocation}) var_{id} : {typeString},
+                    inputStringStream << "@location(" << attr.shaderLocation << ") var_"
+                                      << attributeCount << " : vec4<f32>,";
+                    attributeCount++;
+                }
+            }
+
+            std::stringstream shaderStringStream;
+
+            shaderStringStream << R"(
+            @stage(vertex)
+            fn main()" << inputStringStream.str()
+                               << R"() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })";
+
+            return utils::CreateShaderModule(device, shaderStringStream.str().c_str());
+        }
+
+        // Create a render pipeline with given buffer layout description, using a vertex shader
+        // module automatically generated from the buffer description.
+        wgpu::RenderPipeline CreateRenderPipelineWithBufferDesc(
+            std::vector<PipelineVertexBufferDesc> bufferDescList) {
+            utils::ComboRenderPipelineDescriptor descriptor;
+
+            descriptor.vertex.module = CreateVertexShaderModuleWithBuffer(bufferDescList);
+            descriptor.cFragment.module = fsModule;
+            descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+
+            descriptor.vertex.bufferCount = bufferDescList.size();
+
+            size_t attributeCount = 0;
+
+            for (size_t bufferCount = 0; bufferCount < bufferDescList.size(); bufferCount++) {
+                auto bufferDesc = bufferDescList[bufferCount];
+                descriptor.cBuffers[bufferCount].arrayStride = bufferDesc.arrayStride;
+                descriptor.cBuffers[bufferCount].stepMode = bufferDesc.stepMode;
+                if (bufferDesc.attributes.size() > 0) {
+                    descriptor.cBuffers[bufferCount].attributeCount = bufferDesc.attributes.size();
+                    descriptor.cBuffers[bufferCount].attributes =
+                        &descriptor.cAttributes[attributeCount];
+                    for (auto attribute : bufferDesc.attributes) {
+                        descriptor.cAttributes[attributeCount].shaderLocation =
+                            attribute.shaderLocation;
+                        descriptor.cAttributes[attributeCount].format = attribute.format;
+                        descriptor.cAttributes[attributeCount].offset = attribute.offset;
+                        attributeCount++;
+                    }
+                } else {
+                    descriptor.cBuffers[bufferCount].attributeCount = 0;
+                    descriptor.cBuffers[bufferCount].attributes = nullptr;
+                }
+            }
+
+            descriptor.cTargets[0].format = renderPass.colorFormat;
+
+            return device.CreateRenderPipeline(&descriptor);
+        }
+
+        // Create a render pipeline using only one vertex-step-mode Float32x4 buffer
+        wgpu::RenderPipeline CreateBasicRenderPipeline(uint32_t bufferStride = kFloat32x4Stride) {
+            DAWN_ASSERT(bufferStride >= kFloat32x4Stride);
+
+            std::vector<PipelineVertexBufferDesc> bufferDescList = {
+                {bufferStride, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
+            };
+
+            return CreateRenderPipelineWithBufferDesc(bufferDescList);
+        }
+
+        // Create a render pipeline using one vertex-step-mode Float32x4 buffer and one
+        // instance-step-mode Float32x2 buffer
+        wgpu::RenderPipeline CreateBasicRenderPipelineWithInstance(
+            uint32_t bufferStride1 = kFloat32x4Stride,
+            uint32_t bufferStride2 = kFloat32x2Stride) {
+            DAWN_ASSERT(bufferStride1 >= kFloat32x4Stride);
+            DAWN_ASSERT(bufferStride2 >= kFloat32x2Stride);
+
+            std::vector<PipelineVertexBufferDesc> bufferDescList = {
+                {bufferStride1, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
+                {bufferStride2,
+                 wgpu::VertexStepMode::Instance,
+                 {{3, wgpu::VertexFormat::Float32x2}}},
+            };
+
+            return CreateRenderPipelineWithBufferDesc(bufferDescList);
+        }
+
+        // Create a render pipeline using one vertex-step-mode and one instance-step-mode buffer,
+        // both with a zero array stride. The minimal size of vertex step mode buffer should be 28,
+        // and the minimal size of instance step mode buffer should be 20.
+        wgpu::RenderPipeline CreateBasicRenderPipelineWithZeroArrayStride() {
+            std::vector<PipelineVertexBufferDesc> bufferDescList = {
+                {0,
+                 wgpu::VertexStepMode::Vertex,
+                 {{0, wgpu::VertexFormat::Float32x4, 0}, {1, wgpu::VertexFormat::Float32x2, 20}}},
+                {0,
+                 wgpu::VertexStepMode::Instance,
+                 // Two attributes are overlapped within this instance step mode vertex buffer
+                 {{3, wgpu::VertexFormat::Float32x4, 4}, {7, wgpu::VertexFormat::Float32x3, 0}}},
+            };
+
+            return CreateRenderPipelineWithBufferDesc(bufferDescList);
+        }
+
+        void TestRenderPassDraw(const wgpu::RenderPipeline& pipeline,
+                                VertexBufferList vertexBufferList,
+                                uint32_t vertexCount,
+                                uint32_t instanceCount,
+                                uint32_t firstVertex,
+                                uint32_t firstInstance,
+                                bool isSuccess) {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
+
+            for (auto vertexBufferParam : vertexBufferList) {
+                renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
+                                                  vertexBufferParam.offset, vertexBufferParam.size);
+            }
+            renderPassEncoder.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+            renderPassEncoder.End();
+
+            if (isSuccess) {
+                encoder.Finish();
+            } else {
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+
+        void TestRenderPassDrawIndexed(const wgpu::RenderPipeline& pipeline,
+                                       IndexBufferDesc indexBuffer,
+                                       VertexBufferList vertexBufferList,
+                                       uint32_t indexCount,
+                                       uint32_t instanceCount,
+                                       uint32_t firstIndex,
+                                       int32_t baseVertex,
+                                       uint32_t firstInstance,
+                                       bool isSuccess) {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
+
+            renderPassEncoder.SetIndexBuffer(indexBuffer.buffer, indexBuffer.indexFormat,
+                                             indexBuffer.offset, indexBuffer.size);
+
+            for (auto vertexBufferParam : vertexBufferList) {
+                renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
+                                                  vertexBufferParam.offset, vertexBufferParam.size);
+            }
+            renderPassEncoder.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex,
+                                          firstInstance);
+            renderPassEncoder.End();
+
+            if (isSuccess) {
+                encoder.Finish();
+            } else {
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+
+        // Parameters list for index buffer. Should cover all IndexFormat, and the zero/non-zero
+        // offset and size case in SetIndexBuffer
+        const std::vector<IndexBufferParams> kIndexParamsList = {
+            {wgpu::IndexFormat::Uint32, 12 * sizeof(uint32_t), 0, wgpu::kWholeSize, 12},
+            {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), sizeof(uint32_t), wgpu::kWholeSize,
+             12},
+            {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), 0, 12 * sizeof(uint32_t), 12},
+            {wgpu::IndexFormat::Uint32, 14 * sizeof(uint32_t), sizeof(uint32_t),
+             12 * sizeof(uint32_t), 12},
+
+            {wgpu::IndexFormat::Uint16, 12 * sizeof(uint16_t), 0, wgpu::kWholeSize, 12},
+            {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), sizeof(uint16_t), wgpu::kWholeSize,
+             12},
+            {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), 0, 12 * sizeof(uint16_t), 12},
+            {wgpu::IndexFormat::Uint16, 14 * sizeof(uint16_t), sizeof(uint16_t),
+             12 * sizeof(uint16_t), 12},
+        };
+        // Parameters list for vertex-step-mode buffer. These parameters should cover different
+        // stride, buffer size, SetVertexBuffer size and offset.
+        const std::vector<VertexBufferParams> kVertexParamsList = {
+            // For stride = kFloat32x4Stride
+            {kFloat32x4Stride, 3 * kFloat32x4Stride, 0, wgpu::kWholeSize, 3},
+            // Non-zero offset
+            {kFloat32x4Stride, 4 * kFloat32x4Stride, kFloat32x4Stride, wgpu::kWholeSize, 3},
+            // Non-default size
+            {kFloat32x4Stride, 4 * kFloat32x4Stride, 0, 3 * kFloat32x4Stride, 3},
+            // Non-zero offset and size
+            {kFloat32x4Stride, 5 * kFloat32x4Stride, kFloat32x4Stride, 3 * kFloat32x4Stride, 3},
+            // For stride = 2 * kFloat32x4Stride
+            {(2 * kFloat32x4Stride), 3 * (2 * kFloat32x4Stride), 0, wgpu::kWholeSize, 3},
+            // Non-zero offset
+            {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
+             wgpu::kWholeSize, 3},
+            // Non-default size
+            {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), 0, 3 * (2 * kFloat32x4Stride), 3},
+            // Non-zero offset and size
+            {(2 * kFloat32x4Stride), 5 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
+             3 * (2 * kFloat32x4Stride), 3},
+        };
+        // Parameters list for instance-step-mode buffer.
+        const std::vector<VertexBufferParams> kInstanceParamsList = {
+            // For stride = kFloat32x2Stride
+            {kFloat32x2Stride, 5 * kFloat32x2Stride, 0, wgpu::kWholeSize, 5},
+            // Non-zero offset
+            {kFloat32x2Stride, 6 * kFloat32x2Stride, kFloat32x2Stride, wgpu::kWholeSize, 5},
+            // Non-default size
+            {kFloat32x2Stride, 6 * kFloat32x2Stride, 0, 5 * kFloat32x2Stride, 5},
+            // Non-zero offset and size
+            {kFloat32x2Stride, 7 * kFloat32x2Stride, kFloat32x2Stride, 5 * kFloat32x2Stride, 5},
+            // For stride = 3 * kFloat32x2Stride
+            {(3 * kFloat32x2Stride), 5 * (3 * kFloat32x2Stride), 0, wgpu::kWholeSize, 5},
+            // Non-zero offset
+            {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
+             wgpu::kWholeSize, 5},
+            // Non-default size
+            {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), 0, 5 * (3 * kFloat32x2Stride), 5},
+            // Non-zero offset and size
+            {(3 * kFloat32x2Stride), 7 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
+             5 * (3 * kFloat32x2Stride), 5},
+        };
+
+      private:
+        wgpu::ShaderModule fsModule;
+        utils::BasicRenderPass renderPass;
+    };
+
+    // Control case for Draw
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawBasic) {
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+
+        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+
+        {
+            // Implicit size
+            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
+            TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, true);
+        }
+
+        {
+            // Explicit zero size
+            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, 0}};
+            TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, false);
+        }
+    }
+
+    // Verify vertex buffer OOB for non-instanced Draw are caught in command encoder
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithoutInstance) {
+        for (VertexBufferParams params : kVertexParamsList) {
+            // Create a render pipeline without instance step mode buffer
+            wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline(params.bufferStride);
+
+            // Build vertex buffer for 3 vertices
+            wgpu::Buffer vertexBuffer = CreateBuffer(params.bufferSize);
+            VertexBufferList vertexBufferList = {
+                {0, vertexBuffer, params.bufferOffsetForEncoder, params.bufferSizeForEncoder}};
+
+            uint32_t n = params.maxValidAccessNumber;
+            // It is ok to draw n vertices with vertex buffer
+            TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 0, 0, true);
+            // It is ok to draw n-1 vertices with offset 1
+            TestRenderPassDraw(pipeline, vertexBufferList, n - 1, 1, 1, 0, true);
+            // Drawing more vertices will cause OOB, even if not enough for another primitive
+            TestRenderPassDraw(pipeline, vertexBufferList, n + 1, 1, 0, 0, false);
+            // Drawing n vertices will non-zero offset will cause OOB
+            TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 1, 0, false);
+            // It is ok to draw any number of instances, as we have no instance-mode buffer
+            TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 0, true);
+            TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 5, true);
+        }
+    }
+
+    // Verify vertex buffer OOB for instanced Draw are caught in command encoder
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithInstance) {
+        for (VertexBufferParams vertexParams : kVertexParamsList) {
+            for (VertexBufferParams instanceParams : kInstanceParamsList) {
+                // Create pipeline with given buffer stride
+                wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
+                    vertexParams.bufferStride, instanceParams.bufferStride);
+
+                // Build vertex buffer
+                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
+                VertexBufferList vertexBufferList = {
+                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                     vertexParams.bufferSizeForEncoder},
+                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                     instanceParams.bufferSizeForEncoder},
+                };
+
+                uint32_t vert = vertexParams.maxValidAccessNumber;
+                uint32_t inst = instanceParams.maxValidAccessNumber;
+                // It is ok to draw vert vertices
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 0, 0, true);
+                TestRenderPassDraw(pipeline, vertexBufferList, vert - 1, 1, 1, 0, true);
+                // It is ok to draw vert vertices and inst instences
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 0, true);
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst - 1, 0, 1, true);
+                // more vertices causing OOB
+                TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, 1, 0, 0, false);
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 1, 0, false);
+                TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, inst, 0, 0, false);
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 0, false);
+                // more instances causing OOB
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 1, false);
+                // Both OOB
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
+                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 1, false);
+            }
+        }
+    }
+
+    // Control case for DrawIndexed
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedBasic) {
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+
+        // Build index buffer for 12 indexes
+        wgpu::Buffer indexBuffer = CreateBuffer(12 * sizeof(uint32_t), wgpu::BufferUsage::Index);
+
+        // Build vertex buffer for 3 vertices
+        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
+
+        IndexBufferDesc indexBufferDesc = {indexBuffer, wgpu::IndexFormat::Uint32};
+
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 1, 0, 0, 0,
+                                  true);
+    }
+
+    // Verify index buffer OOB for DrawIndexed are caught in command encoder
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedIndexBufferOOB) {
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
+
+        for (IndexBufferParams params : kIndexParamsList) {
+            // Build index buffer use given params
+            wgpu::Buffer indexBuffer =
+                CreateBuffer(params.indexBufferSize, wgpu::BufferUsage::Index);
+            // Build vertex buffer for 3 vertices
+            wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+            // Build vertex buffer for 5 instances
+            wgpu::Buffer instanceBuffer = CreateBuffer(5 * kFloat32x2Stride);
+
+            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize},
+                                                 {1, instanceBuffer, 0, wgpu::kWholeSize}};
+
+            IndexBufferDesc indexBufferDesc = {indexBuffer, params.indexFormat,
+                                               params.indexBufferOffsetForEncoder,
+                                               params.indexBufferSizeForEncoder};
+
+            uint32_t n = params.maxValidIndexNumber;
+
+            // Control case
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 0, 0,
+                                      true);
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n - 1, 5, 1, 0,
+                                      0, true);
+            // Index buffer OOB, indexCount too large
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0, 0,
+                                      0, false);
+            // Index buffer OOB, indexCount + firstIndex too large
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 1, 0, 0,
+                                      false);
+
+            if (!HasToggleEnabled("disable_base_vertex")) {
+                // baseVertex is not considered in CPU validation and has no effect on validation
+                // Although baseVertex is too large, it will still pass
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 100,
+                                          0, true);
+                // Index buffer OOB, indexCount too large
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0,
+                                          100, 0, false);
+            }
+        }
+    }
+
+    // Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedVertexBufferOOB) {
+        for (VertexBufferParams vertexParams : kVertexParamsList) {
+            for (VertexBufferParams instanceParams : kInstanceParamsList) {
+                // Create pipeline with given buffer stride
+                wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
+                    vertexParams.bufferStride, instanceParams.bufferStride);
+
+                auto indexFormat = wgpu::IndexFormat::Uint32;
+                auto indexStride = sizeof(uint32_t);
+
+                // Build index buffer for 12 indexes
+                wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+                // Build vertex buffer for vertices
+                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+                // Build vertex buffer for instances
+                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
+                VertexBufferList vertexBufferList = {
+                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                     vertexParams.bufferSizeForEncoder},
+                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                     instanceParams.bufferSizeForEncoder}};
+
+                IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
+
+                uint32_t inst = instanceParams.maxValidAccessNumber;
+                // Control case
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst, 0,
+                                          0, 0, true);
+                // Vertex buffer (stepMode = instance) OOB, instanceCount too large
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst + 1,
+                                          0, 0, 0, false);
+
+                if (!HasToggleEnabled("disable_base_instance")) {
+                    // firstInstance is considered in CPU validation
+                    // Vertex buffer (stepMode = instance) in bound
+                    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12,
+                                              inst - 1, 0, 0, 1, true);
+                    // Vertex buffer (stepMode = instance) OOB, instanceCount + firstInstance too
+                    // large
+                    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst,
+                                              0, 0, 1, false);
+                }
+            }
+        }
+    }
+
+    // Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, ZeroArrayStrideVertexBufferOOB) {
+        // In this test, we use VertexBufferParams.maxValidAccessNumber > 0 to indicate that such
+        // buffer parameter meet the requirement of pipeline, and maxValidAccessNumber == 0 to
+        // indicate that such buffer parameter will cause OOB.
+        const std::vector<VertexBufferParams> kVertexParamsListForZeroStride = {
+            // Control case
+            {0, 28, 0, wgpu::kWholeSize, 1},
+            // Non-zero offset
+            {0, 28, 4, wgpu::kWholeSize, 0},
+            {0, 28, 28, wgpu::kWholeSize, 0},
+            // Non-default size
+            {0, 28, 0, 28, 1},
+            {0, 28, 0, 27, 0},
+            // Non-zero offset and size
+            {0, 32, 4, 28, 1},
+            {0, 31, 4, 27, 0},
+            {0, 31, 4, wgpu::kWholeSize, 0},
+        };
+
+        const std::vector<VertexBufferParams> kInstanceParamsListForZeroStride = {
+            // Control case
+            {0, 20, 0, wgpu::kWholeSize, 1},
+            // Non-zero offset
+            {0, 24, 4, wgpu::kWholeSize, 1},
+            {0, 23, 4, wgpu::kWholeSize, 0},
+            {0, 20, 4, wgpu::kWholeSize, 0},
+            {0, 20, 20, wgpu::kWholeSize, 0},
+            // Non-default size
+            {0, 21, 0, 20, 1},
+            {0, 20, 0, 19, 0},
+            // Non-zero offset and size
+            {0, 30, 4, 20, 1},
+            {0, 30, 4, 19, 0},
+        };
+
+        // Build a pipeline that require a vertex step mode vertex buffer no smaller than 28 bytes
+        // and an instance step mode buffer no smaller than 20 bytes
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithZeroArrayStride();
+
+        for (VertexBufferParams vertexParams : kVertexParamsListForZeroStride) {
+            for (VertexBufferParams instanceParams : kInstanceParamsListForZeroStride) {
+                auto indexFormat = wgpu::IndexFormat::Uint32;
+                auto indexStride = sizeof(uint32_t);
+
+                // Build index buffer for 12 indexes
+                wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+                // Build vertex buffer for vertices
+                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+                // Build vertex buffer for instances
+                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
+                VertexBufferList vertexBufferList = {
+                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                     vertexParams.bufferSizeForEncoder},
+                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                     instanceParams.bufferSizeForEncoder}};
+
+                IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
+
+                const bool isSuccess = (vertexParams.maxValidAccessNumber > 0) &&
+                                       (instanceParams.maxValidAccessNumber > 0);
+                // vertexCount and instanceCount doesn't matter, as array stride is zero and all
+                // vertex/instance access the same space of buffer
+                TestRenderPassDraw(pipeline, vertexBufferList, 100, 100, 0, 0, isSuccess);
+                // indexCount doesn't matter as long as no index buffer OOB happened
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 100, 0,
+                                          0, 0, isSuccess);
+            }
+        }
+    }
+
+    // Verify that if setVertexBuffer and/or setIndexBuffer for multiple times, only the last one is
+    // taken into account
+    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, SetBufferMultipleTime) {
+        wgpu::IndexFormat indexFormat = wgpu::IndexFormat::Uint32;
+        uint32_t indexStride = sizeof(uint32_t);
+
+        // Build index buffer for 11 indexes
+        wgpu::Buffer indexBuffer11 = CreateBuffer(11 * indexStride, wgpu::BufferUsage::Index);
+        // Build index buffer for 12 indexes
+        wgpu::Buffer indexBuffer12 = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+        // Build vertex buffer for 2 vertices
+        wgpu::Buffer vertexBuffer2 = CreateBuffer(2 * kFloat32x4Stride);
+        // Build vertex buffer for 3 vertices
+        wgpu::Buffer vertexBuffer3 = CreateBuffer(3 * kFloat32x4Stride);
+        // Build vertex buffer for 4 instances
+        wgpu::Buffer instanceBuffer4 = CreateBuffer(4 * kFloat32x2Stride);
+        // Build vertex buffer for 5 instances
+        wgpu::Buffer instanceBuffer5 = CreateBuffer(5 * kFloat32x2Stride);
+
+        // Test for setting vertex buffer for multiple times
+        {
+            wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
+
+            // Set to vertexBuffer3 and instanceBuffer5 at last
+            VertexBufferList vertexBufferList = {{0, vertexBuffer2, 0, wgpu::kWholeSize},
+                                                 {1, instanceBuffer4, 0, wgpu::kWholeSize},
+                                                 {1, instanceBuffer5, 0, wgpu::kWholeSize},
+                                                 {0, vertexBuffer3, 0, wgpu::kWholeSize}};
+
+            // For Draw, the max vertexCount is 3 and the max instanceCount is 5
+            TestRenderPassDraw(pipeline, vertexBufferList, 3, 5, 0, 0, true);
+            TestRenderPassDraw(pipeline, vertexBufferList, 4, 5, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, 3, 6, 0, 0, false);
+            // For DrawIndex, the max instanceCount is 5
+            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
+                                      5, 0, 0, 0, true);
+            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
+                                      6, 0, 0, 0, false);
+
+            // Set to vertexBuffer2 and instanceBuffer4 at last
+            vertexBufferList = VertexBufferList{{0, vertexBuffer3, 0, wgpu::kWholeSize},
+                                                {1, instanceBuffer5, 0, wgpu::kWholeSize},
+                                                {0, vertexBuffer2, 0, wgpu::kWholeSize},
+                                                {1, instanceBuffer4, 0, wgpu::kWholeSize}};
+
+            // For Draw, the max vertexCount is 2 and the max instanceCount is 4
+            TestRenderPassDraw(pipeline, vertexBufferList, 2, 4, 0, 0, true);
+            TestRenderPassDraw(pipeline, vertexBufferList, 3, 4, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, 2, 5, 0, 0, false);
+            // For DrawIndex, the max instanceCount is 4
+            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
+                                      4, 0, 0, 0, true);
+            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
+                                      5, 0, 0, 0, false);
+        }
+
+        // Test for setIndexBuffer multiple times
+        {
+            wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder renderPassEncoder =
+                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+                renderPassEncoder.SetPipeline(pipeline);
+
+                // Index buffer is set to indexBuffer12 at last
+                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+
+                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+                // It should be ok to draw 12 index
+                renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
+                renderPassEncoder.End();
+
+                // Expect success
+                encoder.Finish();
+            }
+
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder renderPassEncoder =
+                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+                renderPassEncoder.SetPipeline(pipeline);
+
+                // Index buffer is set to indexBuffer12 at last
+                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+
+                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+                // It should be index buffer OOB to draw 13 index
+                renderPassEncoder.DrawIndexed(13, 1, 0, 0, 0);
+                renderPassEncoder.End();
+
+                // Expect failure
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder renderPassEncoder =
+                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+                renderPassEncoder.SetPipeline(pipeline);
+
+                // Index buffer is set to indexBuffer11 at last
+                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+
+                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+                // It should be ok to draw 11 index
+                renderPassEncoder.DrawIndexed(11, 1, 0, 0, 0);
+                renderPassEncoder.End();
+
+                // Expect success
+                encoder.Finish();
+            }
+
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder renderPassEncoder =
+                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+                renderPassEncoder.SetPipeline(pipeline);
+
+                // Index buffer is set to indexBuffer11 at last
+                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+
+                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+                // It should be index buffer OOB to draw 12 index
+                renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
+                renderPassEncoder.End();
+
+                // Expect failure
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/DynamicStateCommandValidationTests.cpp b/src/dawn/tests/unittests/validation/DynamicStateCommandValidationTests.cpp
new file mode 100644
index 0000000..7268c00
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/DynamicStateCommandValidationTests.cpp
@@ -0,0 +1,258 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cmath>
+
+class SetViewportTest : public ValidationTest {
+  protected:
+    void TestViewportCall(bool success,
+                          float x,
+                          float y,
+                          float width,
+                          float height,
+                          float minDepth,
+                          float maxDepth) {
+        utils::BasicRenderPass rp = utils::CreateBasicRenderPass(device, kWidth, kHeight);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&rp.renderPassInfo);
+        pass.SetViewport(x, y, width, height, minDepth, maxDepth);
+        pass.End();
+
+        if (success) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    static constexpr uint32_t kWidth = 5;
+    static constexpr uint32_t kHeight = 3;
+};
+
+// Test to check basic use of SetViewport
+TEST_F(SetViewportTest, Success) {
+    TestViewportCall(true, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0);
+}
+
+// Test to check that NaN in viewport parameters is not allowed
+TEST_F(SetViewportTest, ViewportParameterNaN) {
+    TestViewportCall(false, NAN, 0.0, 1.0, 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, NAN, 1.0, 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, NAN, 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, 1.0, NAN, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, NAN, 1.0);
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, 0.0, NAN);
+}
+
+// Test to check that an empty viewport is allowed.
+TEST_F(SetViewportTest, EmptyViewport) {
+    // Width of viewport is zero.
+    TestViewportCall(true, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0);
+
+    // Height of viewport is zero.
+    TestViewportCall(true, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0);
+
+    // Both width and height of viewport are zero.
+    TestViewportCall(true, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0);
+}
+
+// Test to check that viewport larger than the framebuffer is disallowed
+TEST_F(SetViewportTest, ViewportLargerThanFramebuffer) {
+    // Control case: width and height are set to the render target size.
+    TestViewportCall(true, 0.0, 0.0, kWidth, kHeight, 0.0, 1.0);
+
+    // Width is larger than the rendertarget's width
+    TestViewportCall(false, 0.0, 0.0, kWidth + 1.0, kHeight, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, nextafter(float(kWidth), 1000.0f), kHeight, 0.0, 1.0);
+
+    // Height is larger than the rendertarget's height
+    TestViewportCall(false, 0.0, 0.0, kWidth, kHeight + 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, kWidth, nextafter(float(kHeight), 1000.0f), 0.0, 1.0);
+
+    // x + width is larger than the rendertarget's width
+    TestViewportCall(false, 2.0, 0.0, kWidth - 1.0, kHeight, 0.0, 1.0);
+    TestViewportCall(false, 1.0, 0.0, nextafter(float(kWidth - 1.0), 1000.0f), kHeight, 0.0, 1.0);
+
+    // Height is larger than the rendertarget's height
+    TestViewportCall(false, 0.0, 2.0, kWidth, kHeight - 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 1.0, kWidth, nextafter(float(kHeight - 1.0), 1000.0f), 0.0, 1.0);
+}
+
+// Test to check that negative x in viewport is disallowed
+TEST_F(SetViewportTest, NegativeXYWidthHeight) {
+    // Control case: everything set to 0 is allowed.
+    TestViewportCall(true, +0.0, +0.0, +0.0, +0.0, 0.0, 1.0);
+    TestViewportCall(true, -0.0, -0.0, -0.0, -0.0, 0.0, 1.0);
+
+    // Nonzero negative values are disallowed
+    TestViewportCall(false, -1.0, 0.0, 1.0, 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, -1.0, 1.0, 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, -1.0, 1.0, 0.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, 1.0, -1.0, 0.0, 1.0);
+}
+
+// Test to check that minDepth out of range [0, 1] is disallowed
+TEST_F(SetViewportTest, MinDepthOutOfRange) {
+    // MinDepth is -1
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, -1.0, 1.0);
+
+    // MinDepth is 2 or 1 + epsilon
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, 2.0, 1.0);
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, nextafter(1.0f, 1000.0f), 1.0);
+}
+
+// Test to check that minDepth out of range [0, 1] is disallowed
+TEST_F(SetViewportTest, MaxDepthOutOfRange) {
+    // MaxDepth is -1
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, 1.0, -1.0);
+
+    // MaxDepth is 2 or 1 + epsilon
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0);
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, 1.0, nextafter(1.0f, 1000.0f));
+}
+
+// Test to check that minDepth equal or greater than maxDepth is disallowed
+TEST_F(SetViewportTest, MinDepthEqualOrGreaterThanMaxDepth) {
+    TestViewportCall(true, 0.0, 0.0, 1.0, 1.0, 0.5, 0.5);
+    TestViewportCall(false, 0.0, 0.0, 1.0, 1.0, 0.8, 0.5);
+}
+
+class SetScissorTest : public ValidationTest {
+  protected:
+    void TestScissorCall(bool success, uint32_t x, uint32_t y, uint32_t width, uint32_t height) {
+        utils::BasicRenderPass rp = utils::CreateBasicRenderPass(device, kWidth, kHeight);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&rp.renderPassInfo);
+        pass.SetScissorRect(x, y, width, height);
+        pass.End();
+
+        if (success) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    static constexpr uint32_t kWidth = 5;
+    static constexpr uint32_t kHeight = 3;
+};
+
+// Test to check basic use of SetScissor
+TEST_F(SetScissorTest, Success) {
+    TestScissorCall(true, 0, 0, kWidth, kHeight);
+    TestScissorCall(true, 0, 0, 1, 1);
+}
+
+// Test to check that an empty scissor is allowed
+TEST_F(SetScissorTest, EmptyScissor) {
+    // Scissor width is 0
+    TestScissorCall(true, 0, 0, 0, kHeight);
+
+    // Scissor height is 0
+    TestScissorCall(true, 0, 0, kWidth, 0);
+
+    // Both scissor width and height are 0
+    TestScissorCall(true, 0, 0, 0, 0);
+}
+
+// Test to check that various scissors contained in the framebuffer is allowed
+TEST_F(SetScissorTest, ScissorContainedInFramebuffer) {
+    // Width and height are set to the render target size.
+    TestScissorCall(true, 0, 0, kWidth, kHeight);
+
+    // Width/height at the limit with 0 x/y is valid.
+    TestScissorCall(true, kWidth, 0, 0, kHeight);
+    TestScissorCall(true, 0, kHeight, kWidth, 0);
+}
+
+// Test to check that a scissor larger than the framebuffer is disallowed
+TEST_F(SetScissorTest, ScissorLargerThanFramebuffer) {
+    // Width/height is larger than the rendertarget's width/height.
+    TestScissorCall(false, 0, 0, kWidth + 1, kHeight);
+    TestScissorCall(false, 0, 0, kWidth, kHeight + 1);
+
+    // x + width is larger than the rendertarget's width.
+    TestScissorCall(false, 2, 0, kWidth - 1, kHeight);
+    TestScissorCall(false, kWidth, 0, 1, kHeight);
+    TestScissorCall(false, std::numeric_limits<uint32_t>::max(), 0, kWidth, kHeight);
+
+    // x + height is larger than the rendertarget's height.
+    TestScissorCall(false, 0, 2, kWidth, kHeight - 1);
+    TestScissorCall(false, 0, kHeight, kWidth, 1);
+    TestScissorCall(false, 0, std::numeric_limits<uint32_t>::max(), kWidth, kHeight);
+}
+
+class SetBlendConstantTest : public ValidationTest {};
+
+// Test to check basic use of SetBlendConstantTest
+TEST_F(SetBlendConstantTest, Success) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        constexpr wgpu::Color kTransparentBlack{0.0f, 0.0f, 0.0f, 0.0f};
+        pass.SetBlendConstant(&kTransparentBlack);
+        pass.End();
+    }
+    encoder.Finish();
+}
+
+// Test that SetBlendConstant allows any value, large, small or negative
+TEST_F(SetBlendConstantTest, AnyValueAllowed) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        constexpr wgpu::Color kAnyColorValue{-1.0f, 42.0f, -0.0f, 0.0f};
+        pass.SetBlendConstant(&kAnyColorValue);
+        pass.End();
+    }
+    encoder.Finish();
+}
+
+class SetStencilReferenceTest : public ValidationTest {};
+
+// Test to check basic use of SetStencilReferenceTest
+TEST_F(SetStencilReferenceTest, Success) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetStencilReference(0);
+        pass.End();
+    }
+    encoder.Finish();
+}
+
+// Test that SetStencilReference allows any bit to be set
+TEST_F(SetStencilReferenceTest, AllBitsAllowed) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetStencilReference(0xFFFFFFFF);
+        pass.End();
+    }
+    encoder.Finish();
+}
diff --git a/src/dawn/tests/unittests/validation/ErrorScopeValidationTests.cpp b/src/dawn/tests/unittests/validation/ErrorScopeValidationTests.cpp
new file mode 100644
index 0000000..808eba4
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ErrorScopeValidationTests.cpp
@@ -0,0 +1,233 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/MockCallback.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include <gmock/gmock.h>
+
+using namespace testing;
+
+class MockDevicePopErrorScopeCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
+};
+
+static std::unique_ptr<MockDevicePopErrorScopeCallback> mockDevicePopErrorScopeCallback;
+static void ToMockDevicePopErrorScopeCallback(WGPUErrorType type,
+                                              const char* message,
+                                              void* userdata) {
+    mockDevicePopErrorScopeCallback->Call(type, message, userdata);
+}
+
+class MockQueueWorkDoneCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
+};
+
+static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
+static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
+    mockQueueWorkDoneCallback->Call(status, userdata);
+}
+
+class ErrorScopeValidationTest : public ValidationTest {
+  private:
+    void SetUp() override {
+        ValidationTest::SetUp();
+        mockDevicePopErrorScopeCallback = std::make_unique<MockDevicePopErrorScopeCallback>();
+        mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
+    }
+
+    void TearDown() override {
+        ValidationTest::TearDown();
+
+        // Delete mocks so that expectations are checked
+        mockDevicePopErrorScopeCallback = nullptr;
+        mockQueueWorkDoneCallback = nullptr;
+    }
+};
+
+// Test the simple success case.
+TEST_F(ErrorScopeValidationTest, Success) {
+    device.PushErrorScope(wgpu::ErrorFilter::Validation);
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this)).Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+    FlushWire();
+}
+
+// Test the simple case where the error scope catches an error.
+TEST_F(ErrorScopeValidationTest, CatchesError) {
+    device.PushErrorScope(wgpu::ErrorFilter::Validation);
+
+    wgpu::BufferDescriptor desc = {};
+    desc.usage = static_cast<wgpu::BufferUsage>(WGPUBufferUsage_Force32);
+    device.CreateBuffer(&desc);
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Validation, _, this)).Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+    FlushWire();
+}
+
+// Test that errors bubble to the parent scope if not handled by the current scope.
+TEST_F(ErrorScopeValidationTest, ErrorBubbles) {
+    device.PushErrorScope(wgpu::ErrorFilter::Validation);
+    device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
+
+    wgpu::BufferDescriptor desc = {};
+    desc.usage = static_cast<wgpu::BufferUsage>(WGPUBufferUsage_Force32);
+    device.CreateBuffer(&desc);
+
+    // OutOfMemory does not match Validation error.
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this)).Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+    FlushWire();
+
+    // Parent validation error scope captures the error.
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Validation, _, this + 1))
+        .Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this + 1);
+    FlushWire();
+}
+
+// Test that if an error scope matches an error, it does not bubble to the parent scope.
+TEST_F(ErrorScopeValidationTest, HandledErrorsStopBubbling) {
+    device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
+    device.PushErrorScope(wgpu::ErrorFilter::Validation);
+
+    wgpu::BufferDescriptor desc = {};
+    desc.usage = static_cast<wgpu::BufferUsage>(WGPUBufferUsage_Force32);
+    device.CreateBuffer(&desc);
+
+    // Inner scope catches the error.
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Validation, _, this)).Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+    FlushWire();
+
+    // Parent scope does not see the error.
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this + 1))
+        .Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this + 1);
+    FlushWire();
+}
+
+// Test that if no error scope handles an error, it goes to the device UncapturedError callback
+TEST_F(ErrorScopeValidationTest, UnhandledErrorsMatchUncapturedErrorCallback) {
+    device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
+
+    wgpu::BufferDescriptor desc = {};
+    desc.usage = static_cast<wgpu::BufferUsage>(WGPUBufferUsage_Force32);
+    ASSERT_DEVICE_ERROR(device.CreateBuffer(&desc));
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this)).Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+    FlushWire();
+}
+
+// Check that push/popping error scopes must be balanced.
+TEST_F(ErrorScopeValidationTest, PushPopBalanced) {
+    // No error scopes to pop.
+    {
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Unknown, _, this))
+            .Times(1);
+        device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+    }
+    // Too many pops
+    {
+        device.PushErrorScope(wgpu::ErrorFilter::Validation);
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this + 1))
+            .Times(1);
+        device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this + 1);
+        FlushWire();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Unknown, _, this + 2))
+            .Times(1);
+        device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this + 2);
+    }
+}
+
+// Test that parent error scopes also call their callbacks before an enclosed Queue::Submit
+// completes
+TEST_F(ErrorScopeValidationTest, EnclosedQueueSubmitNested) {
+    wgpu::Queue queue = device.GetQueue();
+
+    device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
+    device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
+
+    queue.Submit(0, nullptr);
+    queue.OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this);
+
+    testing::Sequence seq;
+
+    MockCallback<WGPUErrorCallback> errorScopeCallback2;
+    EXPECT_CALL(errorScopeCallback2, Call(WGPUErrorType_NoError, _, this + 1)).InSequence(seq);
+    device.PopErrorScope(errorScopeCallback2.Callback(),
+                         errorScopeCallback2.MakeUserdata(this + 1));
+
+    MockCallback<WGPUErrorCallback> errorScopeCallback1;
+    EXPECT_CALL(errorScopeCallback1, Call(WGPUErrorType_NoError, _, this + 2)).InSequence(seq);
+    device.PopErrorScope(errorScopeCallback1.Callback(),
+                         errorScopeCallback1.MakeUserdata(this + 2));
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this))
+        .InSequence(seq);
+    WaitForAllOperations(device);
+}
+
+// Test that if the device is destroyed before the callback occurs, it is called with NoError
+// in dawn_native, but Unknown in dawn_wire because the device is destroyed before the callback
+// message happens.
+TEST_F(ErrorScopeValidationTest, DeviceDestroyedBeforeCallback) {
+    device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
+    {
+        // Note: this is in its own scope to be clear the queue does not outlive the device.
+        wgpu::Queue queue = device.GetQueue();
+        queue.Submit(0, nullptr);
+    }
+
+    if (UsesWire()) {
+        device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Unknown, _, this))
+            .Times(1);
+        ExpectDeviceDestruction();
+        device = nullptr;
+    } else {
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this))
+            .Times(1);
+        device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+        ExpectDeviceDestruction();
+        device = nullptr;
+    }
+}
+
+// If the device is destroyed, pop error scope should callback with device lost.
+TEST_F(ErrorScopeValidationTest, DeviceDestroyedBeforePop) {
+    device.PushErrorScope(wgpu::ErrorFilter::Validation);
+    ExpectDeviceDestruction();
+    device.Destroy();
+    FlushWire();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_DeviceLost, _, this)).Times(1);
+    device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
+}
+
+// Regression test that on device shutdown, we don't get a recursion in O(pushed error scope) that
+// would lead to a stack overflow
+TEST_F(ErrorScopeValidationTest, ShutdownStackOverflow) {
+    for (size_t i = 0; i < 1'000'000; i++) {
+        device.PushErrorScope(wgpu::ErrorFilter::Validation);
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp b/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
new file mode 100644
index 0000000..57cb301
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
@@ -0,0 +1,485 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    class ExternalTextureTest : public ValidationTest {
+      public:
+        wgpu::TextureDescriptor CreateTextureDescriptor(
+            wgpu::TextureFormat format = kDefaultTextureFormat) {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.size.width = kWidth;
+            descriptor.size.height = kHeight;
+            descriptor.size.depthOrArrayLayers = kDefaultDepth;
+            descriptor.mipLevelCount = kDefaultMipLevels;
+            descriptor.sampleCount = kDefaultSampleCount;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.format = format;
+            descriptor.usage = kDefaultUsage;
+            return descriptor;
+        }
+
+      protected:
+        void SetUp() override {
+            ValidationTest::SetUp();
+
+            queue = device.GetQueue();
+        }
+
+        static constexpr uint32_t kWidth = 32;
+        static constexpr uint32_t kHeight = 32;
+        static constexpr uint32_t kDefaultDepth = 1;
+        static constexpr uint32_t kDefaultMipLevels = 1;
+        static constexpr uint32_t kDefaultSampleCount = 1;
+        static constexpr wgpu::TextureUsage kDefaultUsage =
+            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+
+        static constexpr wgpu::TextureFormat kDefaultTextureFormat =
+            wgpu::TextureFormat::RGBA8Unorm;
+        static constexpr wgpu::TextureFormat kBiplanarPlane0Format = wgpu::TextureFormat::R8Unorm;
+        static constexpr wgpu::TextureFormat kBiplanarPlane1Format = wgpu::TextureFormat::RG8Unorm;
+
+        wgpu::Queue queue;
+    };
+
+    TEST_F(ExternalTextureTest, CreateExternalTextureValidation) {
+        // Creating an external texture from a 2D, single-subresource texture should succeed.
+        {
+            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+            wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = texture.CreateView();
+            device.CreateExternalTexture(&externalDesc);
+        }
+
+        // Creating an external texture from a non-2D texture should fail.
+        {
+            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+            textureDescriptor.dimension = wgpu::TextureDimension::e3D;
+            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = internalTexture.CreateView();
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+
+        // Creating an external texture from a texture with mip count > 1 should fail.
+        {
+            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+            textureDescriptor.mipLevelCount = 2;
+            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = internalTexture.CreateView();
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+
+        // Creating an external texture from a texture without TextureUsage::TextureBinding should
+        // fail.
+        {
+            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+            textureDescriptor.mipLevelCount = 2;
+            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = internalTexture.CreateView();
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+
+        // Creating an external texture with an unsupported format should fail.
+        {
+            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+            textureDescriptor.format = wgpu::TextureFormat::R8Uint;
+            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = internalTexture.CreateView();
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+
+        // Creating an external texture with an multisampled texture should fail.
+        {
+            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+            textureDescriptor.sampleCount = 4;
+            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = internalTexture.CreateView();
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+
+        // Creating an external texture with an error texture view should fail.
+        {
+            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+            wgpu::TextureViewDescriptor errorViewDescriptor;
+            errorViewDescriptor.format = kDefaultTextureFormat;
+            errorViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+            errorViewDescriptor.mipLevelCount = 1;
+            errorViewDescriptor.arrayLayerCount = 2;
+            ASSERT_DEVICE_ERROR(wgpu::TextureView errorTextureView =
+                                    internalTexture.CreateView(&errorViewDescriptor));
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = errorTextureView;
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+    }
+
+    // Test that external texture creation works as expected in multiplane scenarios.
+    TEST_F(ExternalTextureTest, CreateMultiplanarExternalTextureValidation) {
+        // Creating an external texture from two 2D, single-subresource textures with a biplanar
+        // format should succeed.
+        {
+            wgpu::TextureDescriptor plane0TextureDescriptor =
+                CreateTextureDescriptor(kBiplanarPlane0Format);
+            wgpu::TextureDescriptor plane1TextureDescriptor =
+                CreateTextureDescriptor(kBiplanarPlane1Format);
+            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = texture0.CreateView();
+            externalDesc.plane1 = texture1.CreateView();
+
+            device.CreateExternalTexture(&externalDesc);
+        }
+
+        // Creating a multiplanar external texture with an unsupported format for plane0 should
+        // result in an error.
+        {
+            wgpu::TextureDescriptor plane0TextureDescriptor =
+                CreateTextureDescriptor(kDefaultTextureFormat);
+            wgpu::TextureDescriptor plane1TextureDescriptor =
+                CreateTextureDescriptor(kBiplanarPlane1Format);
+            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = texture0.CreateView();
+            externalDesc.plane1 = texture1.CreateView();
+
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+
+        // Creating a multiplanar external texture with an unsupported format for plane1 should
+        // result in an error.
+        {
+            wgpu::TextureDescriptor plane0TextureDescriptor =
+                CreateTextureDescriptor(kBiplanarPlane0Format);
+            wgpu::TextureDescriptor plane1TextureDescriptor =
+                CreateTextureDescriptor(kDefaultTextureFormat);
+            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = texture0.CreateView();
+            externalDesc.plane1 = texture1.CreateView();
+
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+
+        // Creating a multiplanar external texture with a non-sRGB color space should fail.
+        {
+            wgpu::TextureDescriptor plane0TextureDescriptor =
+                CreateTextureDescriptor(kBiplanarPlane0Format);
+            wgpu::TextureDescriptor plane1TextureDescriptor =
+                CreateTextureDescriptor(kBiplanarPlane1Format);
+            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+            wgpu::ExternalTextureDescriptor externalDesc;
+            externalDesc.plane0 = texture0.CreateView();
+            externalDesc.plane1 = texture1.CreateView();
+            externalDesc.colorSpace = wgpu::PredefinedColorSpace::Undefined;
+            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+        }
+    }
+
+    // Test that submitting a render pass that contains a destroyed external texture results in
+    // an error.
+    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInRenderPass) {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture.CreateView();
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+        // Create a bind group that contains the external texture.
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+        // Create another texture to use as a color attachment.
+        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+        wgpu::TextureView renderView = renderTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+        // Control case should succeed.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            queue.Submit(1, &commands);
+        }
+
+        // Destroying the external texture should result in an error.
+        {
+            externalTexture.Destroy();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        }
+    }
+
+    // Test that submitting a render pass that contains a dereferenced external texture results in
+    // success
+    TEST_F(ExternalTextureTest, SubmitDereferencedExternalTextureInRenderPass) {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture.CreateView();
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+        // Create a bind group that contains the external texture.
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+        // Create another texture to use as a color attachment.
+        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+        wgpu::TextureView renderView = renderTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+        // Control case should succeed.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            queue.Submit(1, &commands);
+        }
+
+        // Dereferencing the external texture should not result in a use-after-free error.
+        {
+            externalTexture = nullptr;
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            queue.Submit(1, &commands);
+        }
+    }
+
+    // Test that submitting a render pass that contains a destroyed external texture plane
+    // results in an error.
+    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInRenderPass) {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture.CreateView();
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+        // Create a bind group that contains the external texture.
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+        // Create another texture to use as a color attachment.
+        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+        wgpu::TextureView renderView = renderTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+        // Control case should succeed.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            queue.Submit(1, &commands);
+        }
+
+        // Destroying an external texture underlying plane should result in an error.
+        {
+            texture.Destroy();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        }
+    }
+
+    // Test that submitting a compute pass that contains a destroyed external texture results in
+    // an error.
+    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInComputePass) {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture.CreateView();
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+        // Create a bind group that contains the external texture.
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+        wgpu::ComputePassDescriptor computePass;
+
+        // Control case should succeed.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            queue.Submit(1, &commands);
+        }
+
+        // Destroying the external texture should result in an error.
+        {
+            externalTexture.Destroy();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        }
+    }
+
+    // Test that submitting a compute pass that contains a destroyed external texture plane
+    // results in an error.
+    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInComputePass) {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture.CreateView();
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+        // Create a bind group that contains the external texture.
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+        wgpu::ComputePassDescriptor computePass;
+
+        // Control case should succeed.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            queue.Submit(1, &commands);
+        }
+
+        // Destroying an external texture underlying plane should result in an error.
+        {
+            texture.Destroy();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+            {
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+            }
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        }
+    }
+
+    // Ensure that bind group validation catches external textures mimatched from the BGL.
+    TEST_F(ExternalTextureTest, BindGroupDoesNotMatchLayout) {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture.CreateView();
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+        // Control case should succeed.
+        {
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+            utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+        }
+
+        // Bind group creation should fail when an external texture is not present in the
+        // corresponding slot of the bind group layout.
+        {
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+            ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, externalTexture}}));
+        }
+    }
+
+}  // namespace
diff --git a/src/dawn/tests/unittests/validation/GetBindGroupLayoutValidationTests.cpp b/src/dawn/tests/unittests/validation/GetBindGroupLayoutValidationTests.cpp
new file mode 100644
index 0000000..b976ed1
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/GetBindGroupLayoutValidationTests.cpp
@@ -0,0 +1,1125 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class GetBindGroupLayoutTests : public ValidationTest {
+  protected:
+    wgpu::RenderPipeline RenderPipelineFromFragmentShader(const char* shader) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, shader);
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.layout = nullptr;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+};
+
+// Test that GetBindGroupLayout returns the same object for the same index
+// and for matching layouts.
+TEST_F(GetBindGroupLayoutTests, SameObject) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniform0 : S;
+        @group(1) @binding(0) var<uniform> uniform1 : S;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            var pos : vec4<f32> = uniform0.pos;
+            pos = uniform1.pos;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct S2 {
+            pos : vec4<f32>
+        }
+        @group(2) @binding(0) var<uniform> uniform2 : S2;
+
+        struct S3 {
+            pos : mat4x4<f32>
+        }
+        @group(3) @binding(0) var<storage, read_write> storage3 : S3;
+
+        @stage(fragment) fn main() {
+            var pos_u : vec4<f32> = uniform2.pos;
+            var pos_s : mat4x4<f32> = storage3.pos;
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+    // The same value is returned for the same index.
+    EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(0).Get());
+
+    // Matching bind group layouts at different indices are the same object.
+    EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(1).Get());
+
+    // BGLs with different bindings types are different objects.
+    EXPECT_NE(pipeline.GetBindGroupLayout(2).Get(), pipeline.GetBindGroupLayout(3).Get());
+
+    // BGLs with different visibilities are different objects.
+    EXPECT_NE(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(2).Get());
+}
+
+// Test that default BindGroupLayouts cannot be used in the creation of a new PipelineLayout
+TEST_F(GetBindGroupLayoutTests, DefaultBindGroupLayoutPipelineCompatibility) {
+    wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(fragment) fn main() {
+            var pos : vec4<f32> = uniforms.pos;
+        })");
+
+    ASSERT_DEVICE_ERROR(utils::MakePipelineLayout(device, {pipeline.GetBindGroupLayout(0)}));
+}
+
+// Test that getBindGroupLayout defaults are correct
+// - shader stage visibility is the stage that adds the binding.
+// - dynamic offsets is false
+TEST_F(GetBindGroupLayoutTests, DefaultShaderStageAndDynamicOffsets) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(fragment) fn main() {
+            var pos : vec4<f32> = uniforms.pos;
+        })");
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.buffer.type = wgpu::BufferBindingType::Uniform;
+    binding.buffer.minBindingSize = 4 * sizeof(float);
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    // Check that an otherwise compatible bind group layout doesn't match one created as part of a
+    // default pipeline layout.
+    binding.buffer.hasDynamicOffset = false;
+    binding.visibility = wgpu::ShaderStage::Fragment;
+    EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
+
+    // Check that any change in visibility doesn't match.
+    binding.visibility = wgpu::ShaderStage::Vertex;
+    EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
+
+    binding.visibility = wgpu::ShaderStage::Compute;
+    EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
+
+    // Check that any change in hasDynamicOffsets doesn't match.
+    binding.buffer.hasDynamicOffset = true;
+    binding.visibility = wgpu::ShaderStage::Fragment;
+    EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
+}
+
+TEST_F(GetBindGroupLayoutTests, DefaultTextureSampleType) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::BindGroupLayout filteringBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
+                  wgpu::TextureSampleType::Float},
+                 {1, wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
+                  wgpu::SamplerBindingType::Filtering}});
+
+    wgpu::BindGroupLayout nonFilteringBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
+                  wgpu::TextureSampleType::UnfilterableFloat},
+                 {1, wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
+                  wgpu::SamplerBindingType::Filtering}});
+
+    wgpu::ShaderModule emptyVertexModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @group(0) @binding(1) var mySampler : sampler;
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            _ = myTexture;
+            _ = mySampler;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule textureLoadVertexModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @group(0) @binding(1) var mySampler : sampler;
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            textureLoad(myTexture, vec2<i32>(), 0);
+            _ = mySampler;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule textureSampleVertexModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @group(0) @binding(1) var mySampler : sampler;
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            textureSampleLevel(myTexture, mySampler, vec2<f32>(), 0.0);
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule unusedTextureFragmentModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @group(0) @binding(1) var mySampler : sampler;
+        @stage(fragment) fn main() {
+            _ = myTexture;
+            _ = mySampler;
+        })");
+
+    wgpu::ShaderModule textureLoadFragmentModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @group(0) @binding(1) var mySampler : sampler;
+        @stage(fragment) fn main() {
+            textureLoad(myTexture, vec2<i32>(), 0);
+            _ = mySampler;
+        })");
+
+    wgpu::ShaderModule textureSampleFragmentModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+        @group(0) @binding(1) var mySampler : sampler;
+        @stage(fragment) fn main() {
+            textureSample(myTexture, mySampler, vec2<f32>());
+        })");
+
+    auto BGLFromModules = [this](wgpu::ShaderModule vertexModule,
+                                 wgpu::ShaderModule fragmentModule) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vertexModule;
+        descriptor.cFragment.module = fragmentModule;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        return device.CreateRenderPipeline(&descriptor).GetBindGroupLayout(0);
+    };
+
+    // Textures not used default to non-filtering
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(emptyVertexModule, unusedTextureFragmentModule).Get(),
+        nonFilteringBGL.Get()));
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(emptyVertexModule, unusedTextureFragmentModule).Get(), filteringBGL.Get()));
+
+    // Textures used with textureLoad default to non-filtering
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(emptyVertexModule, textureLoadFragmentModule).Get(), nonFilteringBGL.Get()));
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(emptyVertexModule, textureLoadFragmentModule).Get(), filteringBGL.Get()));
+
+    // Textures used with textureLoad on both stages default to non-filtering
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureLoadVertexModule, textureLoadFragmentModule).Get(),
+        nonFilteringBGL.Get()));
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureLoadVertexModule, textureLoadFragmentModule).Get(),
+        filteringBGL.Get()));
+
+    // Textures used with textureSample default to filtering
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(emptyVertexModule, textureSampleFragmentModule).Get(),
+        nonFilteringBGL.Get()));
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(emptyVertexModule, textureSampleFragmentModule).Get(), filteringBGL.Get()));
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureSampleVertexModule, unusedTextureFragmentModule).Get(),
+        nonFilteringBGL.Get()));
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureSampleVertexModule, unusedTextureFragmentModule).Get(),
+        filteringBGL.Get()));
+
+    // Textures used with both textureLoad and textureSample default to filtering
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureLoadVertexModule, textureSampleFragmentModule).Get(),
+        nonFilteringBGL.Get()));
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureLoadVertexModule, textureSampleFragmentModule).Get(),
+        filteringBGL.Get()));
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureSampleVertexModule, textureLoadFragmentModule).Get(),
+        nonFilteringBGL.Get()));
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        BGLFromModules(textureSampleVertexModule, textureLoadFragmentModule).Get(),
+        filteringBGL.Get()));
+}
+
+// Test GetBindGroupLayout works with a compute pipeline
+TEST_F(GetBindGroupLayoutTests, ComputePipeline) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(compute) @workgroup_size(1) fn main() {
+            var pos : vec4<f32> = uniforms.pos;
+        })");
+
+    wgpu::ComputePipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&descriptor);
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.buffer.type = wgpu::BufferBindingType::Uniform;
+    binding.visibility = wgpu::ShaderStage::Compute;
+    binding.buffer.hasDynamicOffset = false;
+    binding.buffer.minBindingSize = 4 * sizeof(float);
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+}
+
+// Test that the binding type matches the shader.
+TEST_F(GetBindGroupLayoutTests, BindingType) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.buffer.hasDynamicOffset = false;
+    binding.buffer.minBindingSize = 4 * sizeof(float);
+    binding.visibility = wgpu::ShaderStage::Fragment;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    {
+        // Storage buffer binding is not supported in vertex shader.
+        binding.visibility = wgpu::ShaderStage::Fragment;
+        binding.buffer.type = wgpu::BufferBindingType::Storage;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            struct S {
+                pos : vec4<f32>
+            }
+            @group(0) @binding(0) var<storage, read_write> ssbo : S;
+
+            @stage(fragment) fn main() {
+                var pos : vec4<f32> = ssbo.pos;
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+    {
+        binding.buffer.type = wgpu::BufferBindingType::Uniform;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            struct S {
+                pos : vec4<f32>
+            }
+            @group(0) @binding(0) var<uniform> uniforms : S;
+
+            @stage(fragment) fn main() {
+                var pos : vec4<f32> = uniforms.pos;
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.buffer.type = wgpu::BufferBindingType::ReadOnlyStorage;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            struct S {
+                pos : vec4<f32>
+            }
+            @group(0) @binding(0) var<storage, read> ssbo : S;
+
+            @stage(fragment) fn main() {
+                var pos : vec4<f32> = ssbo.pos;
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    binding.buffer.type = wgpu::BufferBindingType::Undefined;
+    binding.buffer.minBindingSize = 0;
+    {
+        binding.texture.sampleType = wgpu::TextureSampleType::UnfilterableFloat;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_2d<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.multisampled = true;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_multisampled_2d<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    binding.texture.sampleType = wgpu::TextureSampleType::Undefined;
+    {
+        binding.sampler.type = wgpu::SamplerBindingType::Filtering;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var mySampler: sampler;
+
+            @stage(fragment) fn main() {
+                _ = mySampler;
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+}
+
+// Tests that the external texture binding type matches with a texture_external declared in the
+// shader.
+TEST_F(GetBindGroupLayoutTests, ExternalTextureBindingType) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.visibility = wgpu::ShaderStage::Fragment;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    binding.nextInChain = &utils::kExternalTextureBindingLayout;
+    wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myExternalTexture: texture_external;
+
+            @stage(fragment) fn main() {
+               _ = myExternalTexture;
+            })");
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+}
+
+// Test that texture view dimension matches the shader.
+TEST_F(GetBindGroupLayoutTests, ViewDimension) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.visibility = wgpu::ShaderStage::Fragment;
+    binding.texture.sampleType = wgpu::TextureSampleType::UnfilterableFloat;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    {
+        binding.texture.viewDimension = wgpu::TextureViewDimension::e1D;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_1d<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_2d<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.viewDimension = wgpu::TextureViewDimension::e2DArray;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_2d_array<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.viewDimension = wgpu::TextureViewDimension::e3D;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_3d<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.viewDimension = wgpu::TextureViewDimension::Cube;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_cube<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.viewDimension = wgpu::TextureViewDimension::CubeArray;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_cube_array<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+}
+
+// Test that texture component type matches the shader.
+TEST_F(GetBindGroupLayoutTests, TextureComponentType) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.visibility = wgpu::ShaderStage::Fragment;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    {
+        binding.texture.sampleType = wgpu::TextureSampleType::UnfilterableFloat;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_2d<f32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.sampleType = wgpu::TextureSampleType::Sint;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_2d<i32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.texture.sampleType = wgpu::TextureSampleType::Uint;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            @group(0) @binding(0) var myTexture : texture_2d<u32>;
+
+            @stage(fragment) fn main() {
+                textureDimensions(myTexture);
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+}
+
+// Test that binding= indices match.
+TEST_F(GetBindGroupLayoutTests, BindingIndices) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.visibility = wgpu::ShaderStage::Fragment;
+    binding.buffer.type = wgpu::BufferBindingType::Uniform;
+    binding.buffer.hasDynamicOffset = false;
+    binding.buffer.minBindingSize = 4 * sizeof(float);
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    {
+        binding.binding = 0;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            struct S {
+                pos : vec4<f32>
+            }
+            @group(0) @binding(0) var<uniform> uniforms : S;
+
+            @stage(fragment) fn main() {
+                var pos : vec4<f32> = uniforms.pos;
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.binding = 1;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            struct S {
+                pos : vec4<f32>
+            }
+            @group(0) @binding(1) var<uniform> uniforms : S;
+
+            @stage(fragment) fn main() {
+                var pos : vec4<f32> = uniforms.pos;
+            })");
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+
+    {
+        binding.binding = 2;
+        wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+            struct S {
+                pos : vec4<f32>
+            }
+            @group(0) @binding(1) var<uniform> uniforms : S;
+
+            @stage(fragment) fn main() {
+                var pos : vec4<f32> = uniforms.pos;
+            })");
+        EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get()));
+    }
+}
+
+// Test it is valid to have duplicate bindings in the shaders.
+TEST_F(GetBindGroupLayoutTests, DuplicateBinding) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniform0 : S;
+        @group(1) @binding(0) var<uniform> uniform1 : S;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            var pos : vec4<f32> = uniform0.pos;
+            pos = uniform1.pos;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(1) @binding(0) var<uniform> uniforms : S;
+
+        @stage(fragment) fn main() {
+            var pos : vec4<f32> = uniforms.pos;
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+    device.CreateRenderPipeline(&descriptor);
+}
+
+// Test that minBufferSize is set on the BGL and that the max of the min buffer sizes is used.
+TEST_F(GetBindGroupLayoutTests, MinBufferSize) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::ShaderModule vsModule4 = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : f32
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            var pos : f32 = uniforms.pos;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule vsModule64 = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : mat4x4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            var pos : mat4x4<f32> = uniforms.pos;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule4 = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : f32
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(fragment) fn main() {
+            var pos : f32 = uniforms.pos;
+        })");
+
+    wgpu::ShaderModule fsModule64 = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : mat4x4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(fragment) fn main() {
+            var pos : mat4x4<f32> = uniforms.pos;
+        })");
+
+    // Create BGLs with minBufferBindingSize 4 and 64.
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.buffer.type = wgpu::BufferBindingType::Uniform;
+    binding.visibility = wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    binding.buffer.minBindingSize = 4;
+    wgpu::BindGroupLayout bgl4 = device.CreateBindGroupLayout(&desc);
+    binding.buffer.minBindingSize = 64;
+    wgpu::BindGroupLayout bgl64 = device.CreateBindGroupLayout(&desc);
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+    // Check with both stages using 4 bytes.
+    {
+        descriptor.vertex.module = vsModule4;
+        descriptor.cFragment.module = fsModule4;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            pipeline.GetBindGroupLayout(0).Get(), bgl4.Get()));
+    }
+
+    // Check that the max is taken between 4 and 64.
+    {
+        descriptor.vertex.module = vsModule64;
+        descriptor.cFragment.module = fsModule4;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            pipeline.GetBindGroupLayout(0).Get(), bgl64.Get()));
+    }
+
+    // Check that the order doesn't change that the max is taken.
+    {
+        descriptor.vertex.module = vsModule4;
+        descriptor.cFragment.module = fsModule64;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            pipeline.GetBindGroupLayout(0).Get(), bgl64.Get()));
+    }
+}
+
+// Test that the visibility is correctly aggregated if two stages have the exact same binding.
+TEST_F(GetBindGroupLayoutTests, StageAggregation) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::ShaderModule vsModuleNoSampler = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule vsModuleSampler = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var mySampler: sampler;
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            _ = mySampler;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModuleNoSampler = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() {
+        })");
+
+    wgpu::ShaderModule fsModuleSampler = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var mySampler: sampler;
+        @stage(fragment) fn main() {
+            _ = mySampler;
+        })");
+
+    // Create BGLs with minBufferBindingSize 4 and 64.
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.sampler.type = wgpu::SamplerBindingType::Filtering;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+    // Check with only the vertex shader using the sampler
+    {
+        descriptor.vertex.module = vsModuleSampler;
+        descriptor.cFragment.module = fsModuleNoSampler;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        binding.visibility = wgpu::ShaderStage::Vertex;
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get()));
+    }
+
+    // Check with only the fragment shader using the sampler
+    {
+        descriptor.vertex.module = vsModuleNoSampler;
+        descriptor.cFragment.module = fsModuleSampler;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        binding.visibility = wgpu::ShaderStage::Fragment;
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get()));
+    }
+
+    // Check with both shaders using the sampler
+    {
+        descriptor.vertex.module = vsModuleSampler;
+        descriptor.cFragment.module = fsModuleSampler;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        binding.visibility = wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
+        EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+            pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get()));
+    }
+}
+
+// Test it is invalid to have conflicting binding types in the shaders.
+TEST_F(GetBindGroupLayoutTests, ConflictingBindingType) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> ubo : S;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            var pos : vec4<f32> = ubo.pos;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<storage, read_write> ssbo : S;
+
+        @stage(fragment) fn main() {
+            var pos : vec4<f32> = ssbo.pos;
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Test it is invalid to have conflicting binding texture multisampling in the shaders.
+TEST_F(GetBindGroupLayoutTests, ConflictingBindingTextureMultisampling) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            textureDimensions(myTexture);
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_multisampled_2d<f32>;
+
+        @stage(fragment) fn main() {
+            textureDimensions(myTexture);
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Test it is invalid to have conflicting binding texture dimension in the shaders.
+TEST_F(GetBindGroupLayoutTests, ConflictingBindingViewDimension) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            textureDimensions(myTexture);
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_3d<f32>;
+
+        @stage(fragment) fn main() {
+            textureDimensions(myTexture);
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Test it is invalid to have conflicting binding texture component type in the shaders.
+TEST_F(GetBindGroupLayoutTests, ConflictingBindingTextureComponentType) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<f32>;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            textureDimensions(myTexture);
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var myTexture : texture_2d<i32>;
+
+        @stage(fragment) fn main() {
+            textureDimensions(myTexture);
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Test it is an error to query an out of range bind group layout.
+TEST_F(GetBindGroupLayoutTests, OutOfRangeIndex) {
+    ASSERT_DEVICE_ERROR(RenderPipelineFromFragmentShader(R"(
+        @stage(fragment) fn main() {
+        })")
+                            .GetBindGroupLayout(kMaxBindGroups));
+
+    ASSERT_DEVICE_ERROR(RenderPipelineFromFragmentShader(R"(
+        @stage(fragment) fn main() {
+        })")
+                            .GetBindGroupLayout(kMaxBindGroups + 1));
+}
+
+// Test that unused indices return the empty bind group layout.
+TEST_F(GetBindGroupLayoutTests, UnusedIndex) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms0 : S;
+        @group(2) @binding(0) var<uniform> uniforms2 : S;
+
+        @stage(fragment) fn main() {
+            var pos : vec4<f32> = uniforms0.pos;
+            pos = uniforms2.pos;
+        })");
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 0;
+    desc.entries = nullptr;
+
+    wgpu::BindGroupLayout emptyBindGroupLayout = device.CreateBindGroupLayout(&desc);
+
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        pipeline.GetBindGroupLayout(0).Get(), emptyBindGroupLayout.Get()));  // Used
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        pipeline.GetBindGroupLayout(1).Get(), emptyBindGroupLayout.Get()));  // Not Used.
+    EXPECT_FALSE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        pipeline.GetBindGroupLayout(2).Get(), emptyBindGroupLayout.Get()));  // Used.
+    EXPECT_TRUE(dawn::native::BindGroupLayoutBindingsEqualForTesting(
+        pipeline.GetBindGroupLayout(3).Get(), emptyBindGroupLayout.Get()));  // Not used
+}
+
+// Test that after explicitly creating a pipeline with a pipeline layout, calling
+// GetBindGroupLayout reflects the same bind group layouts.
+TEST_F(GetBindGroupLayoutTests, Reflection) {
+    // This test works assuming Dawn Native's object deduplication.
+    // Getting the same pointer to equivalent bind group layouts is an implementation detail of Dawn
+    // Native.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.buffer.type = wgpu::BufferBindingType::Uniform;
+    binding.visibility = wgpu::ShaderStage::Vertex;
+
+    wgpu::BindGroupLayoutDescriptor bglDesc = {};
+    bglDesc.entryCount = 1;
+    bglDesc.entries = &binding;
+
+    wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&bglDesc);
+
+    wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
+    pipelineLayoutDesc.bindGroupLayoutCount = 1;
+    pipelineLayoutDesc.bindGroupLayouts = &bindGroupLayout;
+
+    wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        struct S {
+            pos : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> uniforms : S;
+
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            var pos : vec4<f32> = uniforms.pos;
+            return vec4<f32>();
+        })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() {
+        })");
+
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.layout = pipelineLayout;
+    pipelineDesc.vertex.module = vsModule;
+    pipelineDesc.cFragment.module = fsModule;
+    pipelineDesc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+    EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), bindGroupLayout.Get());
+
+    {
+        wgpu::BindGroupLayoutDescriptor emptyDesc = {};
+        emptyDesc.entryCount = 0;
+        emptyDesc.entries = nullptr;
+
+        wgpu::BindGroupLayout emptyBindGroupLayout = device.CreateBindGroupLayout(&emptyDesc);
+
+        // Check that the rest of the bind group layouts reflect the empty one.
+        EXPECT_EQ(pipeline.GetBindGroupLayout(1).Get(), emptyBindGroupLayout.Get());
+        EXPECT_EQ(pipeline.GetBindGroupLayout(2).Get(), emptyBindGroupLayout.Get());
+        EXPECT_EQ(pipeline.GetBindGroupLayout(3).Get(), emptyBindGroupLayout.Get());
+    }
+}
+
+// Test that fragment output validation is for the correct entryPoint
+TEST_F(GetBindGroupLayoutTests, FromCorrectEntryPoint) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Data {
+            data : f32
+        }
+        @group(0) @binding(0) var<storage, read_write> data0 : Data;
+        @group(0) @binding(1) var<storage, read_write> data1 : Data;
+
+        @stage(compute) @workgroup_size(1) fn compute0() {
+            data0.data = 0.0;
+        }
+
+        @stage(compute) @workgroup_size(1) fn compute1() {
+            data1.data = 0.0;
+        }
+    )");
+
+    wgpu::ComputePipelineDescriptor pipelineDesc;
+    pipelineDesc.compute.module = module;
+
+    // Get each entryPoint's BGL.
+    pipelineDesc.compute.entryPoint = "compute0";
+    wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&pipelineDesc);
+    wgpu::BindGroupLayout bgl0 = pipeline0.GetBindGroupLayout(0);
+
+    pipelineDesc.compute.entryPoint = "compute1";
+    wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&pipelineDesc);
+    wgpu::BindGroupLayout bgl1 = pipeline1.GetBindGroupLayout(0);
+
+    // Create the buffer used in the bindgroups.
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = 4;
+    bufferDesc.usage = wgpu::BufferUsage::Storage;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    // Success case, the BGL matches the descriptor for the bindgroup.
+    utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+    utils::MakeBindGroup(device, bgl1, {{1, buffer}});
+
+    // Error case, the BGL doesn't match the descriptor for the bindgroup.
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl0, {{1, buffer}}));
+    ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl1, {{0, buffer}}));
+}
diff --git a/src/dawn/tests/unittests/validation/IndexBufferValidationTests.cpp b/src/dawn/tests/unittests/validation/IndexBufferValidationTests.cpp
new file mode 100644
index 0000000..404db88
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/IndexBufferValidationTests.cpp
@@ -0,0 +1,304 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class IndexBufferValidationTest : public ValidationTest {
+  protected:
+    wgpu::RenderPipeline MakeTestPipeline(wgpu::IndexFormat format,
+                                          wgpu::PrimitiveTopology primitiveTopology) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = primitiveTopology;
+        descriptor.primitive.stripIndexFormat = format;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+};
+
+// Test that IndexFormat::Undefined is disallowed.
+TEST_F(IndexBufferValidationTest, UndefinedIndexFormat) {
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.usage = wgpu::BufferUsage::Index;
+    bufferDesc.size = 256;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    DummyRenderPass renderPass(device);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+    pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Undefined);
+    pass.End();
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test that an invalid index format is disallowed.
+TEST_F(IndexBufferValidationTest, InvalidIndexFormat) {
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.usage = wgpu::BufferUsage::Index;
+    bufferDesc.size = 256;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    DummyRenderPass renderPass(device);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+    pass.SetIndexBuffer(buffer, static_cast<wgpu::IndexFormat>(404));
+    pass.End();
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test that for OOB validation of index buffer offset and size.
+TEST_F(IndexBufferValidationTest, IndexBufferOffsetOOBValidation) {
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.usage = wgpu::BufferUsage::Index;
+    bufferDesc.size = 256;
+    wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
+
+    DummyRenderPass renderPass(device);
+    // Control case, using the full buffer, with or without an explicit size is valid.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        // Explicit size
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 0, 256);
+        // Implicit size
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 0, wgpu::kWholeSize);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 256 - 4, wgpu::kWholeSize);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 4, wgpu::kWholeSize);
+        // Implicit size of zero
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 256, wgpu::kWholeSize);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Bad case, offset + size is larger than the buffer
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 4, 256);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Bad case, size is 0 but the offset is larger than the buffer
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 256 + 4, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Control case, using the full buffer, with or without an explicit size is valid.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        // Explicit size
+        encoder.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 0, 256);
+        // Implicit size
+        encoder.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 0, wgpu::kWholeSize);
+        encoder.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 256 - 4, wgpu::kWholeSize);
+        encoder.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 4, wgpu::kWholeSize);
+        // Implicit size of zero
+        encoder.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 256, wgpu::kWholeSize);
+        encoder.Finish();
+    }
+
+    // Bad case, offset + size is larger than the buffer
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 4, 256);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Bad case, size is 0 but the offset is larger than the buffer
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32, 256 + 4, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that formats given when setting an index buffers must match the format specified on the
+// pipeline for strip primitive topologies.
+TEST_F(IndexBufferValidationTest, IndexBufferFormatMatchesPipelineStripFormat) {
+    wgpu::RenderPipeline pipeline32 =
+        MakeTestPipeline(wgpu::IndexFormat::Uint32, wgpu::PrimitiveTopology::TriangleStrip);
+    wgpu::RenderPipeline pipeline16 =
+        MakeTestPipeline(wgpu::IndexFormat::Uint16, wgpu::PrimitiveTopology::LineStrip);
+    wgpu::RenderPipeline pipelineUndef =
+        MakeTestPipeline(wgpu::IndexFormat::Undefined, wgpu::PrimitiveTopology::LineStrip);
+
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
+
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Expected to fail because pipeline and index formats don't match.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16);
+        encoder.SetPipeline(pipeline32);
+        encoder.DrawIndexed(3);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        encoder.SetPipeline(pipeline16);
+        encoder.DrawIndexed(3);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Expected to succeed because pipeline and index formats match.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16);
+        encoder.SetPipeline(pipeline16);
+        encoder.DrawIndexed(3);
+        encoder.Finish();
+    }
+
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        encoder.SetPipeline(pipeline32);
+        encoder.DrawIndexed(3);
+        encoder.Finish();
+    }
+
+    // Expected to fail because pipeline doesn't specify an index format.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16);
+        encoder.SetPipeline(pipelineUndef);
+        encoder.DrawIndexed(3);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        encoder.SetPipeline(pipelineUndef);
+        encoder.DrawIndexed(3);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Expected to succeed because non-indexed draw calls don't require a pipeline index format.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetPipeline(pipelineUndef);
+        encoder.Draw(3);
+        encoder.Finish();
+    }
+}
+
+// Check that the index buffer must have the Index usage.
+TEST_F(IndexBufferValidationTest, InvalidUsage) {
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
+    wgpu::Buffer copyBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::CopySrc, {0, 1, 2});
+
+    DummyRenderPass renderPass(device);
+    // Control case: using the index buffer is valid.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.End();
+        encoder.Finish();
+    }
+    // Error case: using the copy buffer is an error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetIndexBuffer(copyBuffer, wgpu::IndexFormat::Uint32);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    // Control case: using the index buffer is valid.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        encoder.Finish();
+    }
+    // Error case: using the copy buffer is an error.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetIndexBuffer(copyBuffer, wgpu::IndexFormat::Uint32);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Check the alignment constraint on the index buffer offset.
+TEST_F(IndexBufferValidationTest, OffsetAlignment) {
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
+
+    DummyRenderPass renderPass(device);
+    // Control cases: index buffer offset is a multiple of the index format size
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 0);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 4);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16, 0);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16, 2);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case: index buffer offset isn't a multiple of 4 for IndexFormat::Uint32
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32, 2);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+    // Error case: index buffer offset isn't a multiple of 2 for IndexFormat::Uint16
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint16, 1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/InternalUsageValidationTests.cpp b/src/dawn/tests/unittests/validation/InternalUsageValidationTests.cpp
new file mode 100644
index 0000000..623697d
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/InternalUsageValidationTests.cpp
@@ -0,0 +1,298 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+class InternalUsageValidationDisabledTest : public ValidationTest {};
+
+// Test that using DawnTextureInternalUsageDescriptor is an error if DawnInternalUsages is not
+// enabled
+TEST_F(InternalUsageValidationDisabledTest, TextureDescriptorRequiresFeature) {
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size = {1, 1};
+    textureDesc.usage = wgpu::TextureUsage::CopySrc;
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Control case: Normal texture creation works
+    device.CreateTexture(&textureDesc);
+
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    textureDesc.nextInChain = &internalDesc;
+
+    // Error with chained feature struct.
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+
+    // Also does not work with various internal usages.
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+
+    internalDesc.internalUsage = wgpu::TextureUsage::CopyDst;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+}
+
+// Test that using DawnEncoderInternalUsageDescriptor is an error if DawnInternalUsages is not
+// enabled
+TEST_F(InternalUsageValidationDisabledTest, CommandEncoderDescriptorRequiresFeature) {
+    wgpu::CommandEncoderDescriptor encoderDesc = {};
+
+    // Control case: Normal encoder creation works
+    device.CreateCommandEncoder(&encoderDesc);
+
+    wgpu::DawnEncoderInternalUsageDescriptor internalDesc = {};
+    encoderDesc.nextInChain = &internalDesc;
+
+    // Error with chained DawnEncoderInternalUsageDescriptor.
+    ASSERT_DEVICE_ERROR(wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&encoderDesc));
+
+    // Check that the encoder records that it is invalid, and not any other errors.
+    encoder.InjectValidationError("injected error");
+    ASSERT_DEVICE_ERROR(encoder.Finish(),
+                        testing::HasSubstr("[Invalid CommandEncoder] is invalid"));
+}
+
+class TextureInternalUsageValidationTest : public ValidationTest {
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::DawnInternalUsages};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that internal usages can be passed in a chained descriptor.
+TEST_F(TextureInternalUsageValidationTest, Basic) {
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size = {1, 1};
+    textureDesc.usage = wgpu::TextureUsage::CopySrc;
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    textureDesc.nextInChain = &internalDesc;
+
+    // Internal usage: none
+    device.CreateTexture(&textureDesc);
+
+    // Internal usage is the same as the base usage.
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+    device.CreateTexture(&textureDesc);
+
+    // Internal usage adds to the base usage.
+    internalDesc.internalUsage = wgpu::TextureUsage::CopyDst;
+    device.CreateTexture(&textureDesc);
+}
+
+// Test that internal usages takes part in other validation that
+// depends on the usage.
+TEST_F(TextureInternalUsageValidationTest, UsageValidation) {
+    {
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.size = {1, 1};
+        textureDesc.usage = wgpu::TextureUsage::CopySrc;
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+        textureDesc.nextInChain = &internalDesc;
+
+        // Internal usage adds an invalid usage.
+        internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(-1);
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+    }
+
+    {
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.size = {1, 1};
+        textureDesc.usage = wgpu::TextureUsage::CopySrc;
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        textureDesc.sampleCount = 4;
+
+        // Control case: multisampled texture
+        device.CreateTexture(&textureDesc);
+
+        wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+        textureDesc.nextInChain = &internalDesc;
+
+        // OK: internal usage adds nothing.
+        device.CreateTexture(&textureDesc);
+
+        // Internal usage adds storage usage which is invalid
+        // with multisampling.
+        internalDesc.internalUsage = wgpu::TextureUsage::StorageBinding;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+    }
+}
+
+// Test that internal usage does not add to the validated usage
+// for command encoding
+// This test also test the internal copy
+TEST_F(TextureInternalUsageValidationTest, DeprecatedCommandValidation) {
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size = {1, 1};
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    textureDesc.usage = wgpu::TextureUsage::CopyDst;
+    wgpu::Texture dst = device.CreateTexture(&textureDesc);
+
+    textureDesc.usage = wgpu::TextureUsage::CopySrc;
+    wgpu::Texture src = device.CreateTexture(&textureDesc);
+
+    textureDesc.usage = wgpu::TextureUsage::None;
+
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    textureDesc.nextInChain = &internalDesc;
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+
+    wgpu::Texture srcInternal = device.CreateTexture(&textureDesc);
+
+    // Control: src -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture = utils::CreateImageCopyTexture(src, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        encoder.Finish();
+    }
+
+    // Invalid: src internal -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcInternal, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Control with internal copy: src -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture = utils::CreateImageCopyTexture(src, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTextureInternal(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        encoder.Finish();
+    }
+
+    // Valid with internal copy: src internal -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcInternal, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTextureInternal(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        encoder.Finish();
+    }
+}
+
+TEST_F(TextureInternalUsageValidationTest, CommandValidation) {
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size = {1, 1};
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    textureDesc.usage = wgpu::TextureUsage::CopyDst;
+    wgpu::Texture dst = device.CreateTexture(&textureDesc);
+
+    textureDesc.usage = wgpu::TextureUsage::CopySrc;
+    wgpu::Texture src = device.CreateTexture(&textureDesc);
+
+    textureDesc.usage = wgpu::TextureUsage::None;
+
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    textureDesc.nextInChain = &internalDesc;
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+
+    wgpu::Texture srcInternal = device.CreateTexture(&textureDesc);
+
+    // Control: src -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture = utils::CreateImageCopyTexture(src, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        encoder.Finish();
+    }
+
+    // Invalid: src internal -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcInternal, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Invalid: src internal -> dst, with internal descriptor, but useInternalUsages set to false.
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcInternal, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoderDescriptor encoderDesc = {};
+        wgpu::DawnEncoderInternalUsageDescriptor internalDesc = {};
+        internalDesc.useInternalUsages = false;
+        encoderDesc.nextInChain = &internalDesc;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&encoderDesc);
+
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Control with internal copy: src -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture = utils::CreateImageCopyTexture(src, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoderDescriptor encoderDesc = {};
+        wgpu::DawnEncoderInternalUsageDescriptor internalDesc = {};
+        internalDesc.useInternalUsages = true;
+        encoderDesc.nextInChain = &internalDesc;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&encoderDesc);
+
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        encoder.Finish();
+    }
+
+    // Valid with internal copy: src internal -> dst
+    {
+        wgpu::ImageCopyTexture srcImageCopyTexture =
+            utils::CreateImageCopyTexture(srcInternal, 0, {0, 0});
+        wgpu::ImageCopyTexture dstImageCopyTexture = utils::CreateImageCopyTexture(dst, 0, {0, 0});
+        wgpu::Extent3D extent3D = {1, 1};
+
+        wgpu::CommandEncoderDescriptor encoderDesc = {};
+        wgpu::DawnEncoderInternalUsageDescriptor internalDesc = {};
+        internalDesc.useInternalUsages = true;
+        encoderDesc.nextInChain = &internalDesc;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&encoderDesc);
+
+        encoder.CopyTextureToTexture(&srcImageCopyTexture, &dstImageCopyTexture, &extent3D);
+        encoder.Finish();
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/LabelTests.cpp b/src/dawn/tests/unittests/validation/LabelTests.cpp
new file mode 100644
index 0000000..5fe7a90
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/LabelTests.cpp
@@ -0,0 +1,613 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class LabelTest : public ValidationTest {};
+
+TEST_F(LabelTest, BindGroup) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(device, {});
+
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = 0;
+    descriptor.entries = nullptr;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::BindGroup bindGroup = device.CreateBindGroup(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(bindGroup.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::BindGroup bindGroup = device.CreateBindGroup(&descriptor);
+        bindGroup.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(bindGroup.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::BindGroup bindGroup = device.CreateBindGroup(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(bindGroup.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, BindGroupLayout) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+
+    wgpu::BindGroupLayoutDescriptor descriptor = {};
+    descriptor.entryCount = 0;
+    descriptor.entries = nullptr;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(bindGroupLayout.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&descriptor);
+        bindGroupLayout.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(bindGroupLayout.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(bindGroupLayout.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, Buffer) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(buffer.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+        buffer.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(buffer.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(buffer.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, CommandBuffer) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::CommandBufferDescriptor descriptor;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(commandBuffer.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish(&descriptor);
+        commandBuffer.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(commandBuffer.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::CommandBuffer commandBuffer = encoder.Finish(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(commandBuffer.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, CommandEncoder) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::CommandEncoderDescriptor descriptor;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&descriptor);
+        encoder.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, ComputePassEncoder) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+    wgpu::ComputePassDescriptor descriptor;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::ComputePassEncoder encoder = commandEncoder.BeginComputePass(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+        encoder.End();
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::ComputePassEncoder encoder = commandEncoder.BeginComputePass(&descriptor);
+        encoder.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+        encoder.End();
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::ComputePassEncoder encoder = commandEncoder.BeginComputePass(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+        encoder.End();
+    }
+}
+
+TEST_F(LabelTest, ExternalTexture) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.size.width = 1;
+    textureDescriptor.size.height = 1;
+    textureDescriptor.size.depthOrArrayLayers = 1;
+    textureDescriptor.mipLevelCount = 1;
+    textureDescriptor.sampleCount = 1;
+    textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+    textureDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDescriptor.usage =
+        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor descriptor;
+    descriptor.plane0 = texture.CreateView();
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(externalTexture.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&descriptor);
+        externalTexture.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(externalTexture.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(externalTexture.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, PipelineLayout) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(device, {});
+
+    wgpu::PipelineLayoutDescriptor descriptor;
+    descriptor.bindGroupLayoutCount = 1;
+    descriptor.bindGroupLayouts = &layout;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipelineLayout.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&descriptor);
+        pipelineLayout.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipelineLayout.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipelineLayout.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, QuerySet) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::QuerySetDescriptor descriptor;
+    descriptor.type = wgpu::QueryType::Occlusion;
+    descriptor.count = 1;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::QuerySet querySet = device.CreateQuerySet(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(querySet.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::QuerySet querySet = device.CreateQuerySet(&descriptor);
+        querySet.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(querySet.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::QuerySet querySet = device.CreateQuerySet(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(querySet.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, RenderBundleEncoder) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+
+    utils::ComboRenderBundleEncoderDescriptor descriptor = {};
+    descriptor.colorFormatsCount = 1;
+    descriptor.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&descriptor);
+        encoder.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, RenderPassEncoder) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+
+    wgpu::TextureDescriptor textureDescriptor;
+    textureDescriptor.size = {1, 1, 1};
+    textureDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDescriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::RenderAttachment;
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    utils::ComboRenderPassDescriptor descriptor({texture.CreateView()});
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::RenderPassEncoder encoder = commandEncoder.BeginRenderPass(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+        encoder.End();
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::RenderPassEncoder encoder = commandEncoder.BeginRenderPass(&descriptor);
+        encoder.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+        encoder.End();
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::RenderPassEncoder encoder = commandEncoder.BeginRenderPass(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(encoder.Get());
+        ASSERT_EQ(label, readbackLabel);
+        encoder.End();
+    }
+}
+
+TEST_F(LabelTest, Sampler) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::SamplerDescriptor descriptor;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::Sampler sampler = device.CreateSampler(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(sampler.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::Sampler sampler = device.CreateSampler(&descriptor);
+        sampler.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(sampler.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::Sampler sampler = device.CreateSampler(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(sampler.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, Texture) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size.width = 1;
+    descriptor.size.height = 1;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.mipLevelCount = 1;
+    descriptor.sampleCount = 1;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.format = wgpu::TextureFormat::RGBA8Uint;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(texture.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+        texture.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(texture.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(texture.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, TextureView) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size.width = 1;
+    descriptor.size.height = 1;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.mipLevelCount = 1;
+    descriptor.sampleCount = 1;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.format = wgpu::TextureFormat::RGBA8Uint;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
+
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::TextureView textureView = texture.CreateView();
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(textureView.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::TextureView textureView = texture.CreateView();
+        textureView.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(textureView.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        wgpu::TextureViewDescriptor viewDescriptor;
+        viewDescriptor.label = label.c_str();
+        wgpu::TextureView textureView = texture.CreateView(&viewDescriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(textureView.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, RenderPipeline) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipeline.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        pipeline.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipeline.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipeline.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, ComputePipeline) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+
+    wgpu::ShaderModule computeModule = utils::CreateShaderModule(device, R"(
+    @stage(compute) @workgroup_size(1) fn main() {
+    })");
+    wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, nullptr);
+    wgpu::ComputePipelineDescriptor descriptor;
+    descriptor.layout = pl;
+    descriptor.compute.module = computeModule;
+    descriptor.compute.entryPoint = "main";
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipeline.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&descriptor);
+        pipeline.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipeline.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(pipeline.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
+
+TEST_F(LabelTest, ShaderModule) {
+    DAWN_SKIP_TEST_IF(UsesWire());
+    std::string label = "test";
+
+    const char* source = R"(
+    @stage(compute) @workgroup_size(1) fn main() {
+    })";
+
+    wgpu::ShaderModuleWGSLDescriptor wgslDesc;
+    wgslDesc.source = source;
+    wgpu::ShaderModuleDescriptor descriptor;
+    descriptor.nextInChain = &wgslDesc;
+
+    // The label should be empty if one was not set.
+    {
+        wgpu::ShaderModule shaderModule = device.CreateShaderModule(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(shaderModule.Get());
+        ASSERT_TRUE(readbackLabel.empty());
+    }
+
+    // Test setting a label through API
+    {
+        wgpu::ShaderModule shaderModule = device.CreateShaderModule(&descriptor);
+        shaderModule.SetLabel(label.c_str());
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(shaderModule.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+
+    // Test setting a label through the descriptor.
+    {
+        descriptor.label = label.c_str();
+        wgpu::ShaderModule shaderModule = device.CreateShaderModule(&descriptor);
+        std::string readbackLabel = dawn::native::GetObjectLabelForTesting(shaderModule.Get());
+        ASSERT_EQ(label, readbackLabel);
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp b/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
new file mode 100644
index 0000000..cb7044e
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
@@ -0,0 +1,591 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    // Helper for describing bindings throughout the tests
+    struct BindingDescriptor {
+        uint32_t group;
+        uint32_t binding;
+        std::string decl;
+        std::string ref_type;
+        std::string ref_mem;
+        uint64_t size;
+        wgpu::BufferBindingType type = wgpu::BufferBindingType::Storage;
+        wgpu::ShaderStage visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
+    };
+
+    // Runs |func| with a modified version of |originalSizes| as an argument, adding |offset| to
+    // each element one at a time This is useful to verify some behavior happens if any element is
+    // offset from original
+    template <typename F>
+    void WithEachSizeOffsetBy(int64_t offset, const std::vector<uint64_t>& originalSizes, F func) {
+        std::vector<uint64_t> modifiedSizes = originalSizes;
+        for (size_t i = 0; i < originalSizes.size(); ++i) {
+            if (offset < 0) {
+                ASSERT(originalSizes[i] >= static_cast<uint64_t>(-offset));
+            }
+            // Run the function with an element offset, and restore element afterwards
+            modifiedSizes[i] += offset;
+            func(modifiedSizes);
+            modifiedSizes[i] -= offset;
+        }
+    }
+
+    // Runs |func| with |correctSizes|, and an expectation of success and failure
+    template <typename F>
+    void CheckSizeBounds(const std::vector<uint64_t>& correctSizes, F func) {
+        // To validate size:
+        // Check invalid with bind group with one less
+        // Check valid with bind group with correct size
+
+        // Make sure (every size - 1) produces an error
+        WithEachSizeOffsetBy(-1, correctSizes,
+                             [&](const std::vector<uint64_t>& sizes) { func(sizes, false); });
+
+        // Make sure correct sizes work
+        func(correctSizes, true);
+
+        // Make sure (every size + 1) works
+        WithEachSizeOffsetBy(1, correctSizes,
+                             [&](const std::vector<uint64_t>& sizes) { func(sizes, true); });
+    }
+
+    // Creates a bind group with given bindings for shader text
+    std::string GenerateBindingString(const std::vector<BindingDescriptor>& bindings) {
+        std::ostringstream ostream;
+        size_t index = 0;
+        for (const BindingDescriptor& b : bindings) {
+            ostream << "struct S" << index << " { " << b.decl << "}\n";
+            ostream << "@group(" << b.group << ") @binding(" << b.binding << ") ";
+            switch (b.type) {
+                case wgpu::BufferBindingType::Uniform:
+                    ostream << "var<uniform> b" << index << " : S" << index << ";\n";
+                    break;
+                case wgpu::BufferBindingType::Storage:
+                    ostream << "var<storage, read_write> b" << index << " : S" << index << ";\n";
+                    break;
+                case wgpu::BufferBindingType::ReadOnlyStorage:
+                    ostream << "var<storage, read> b" << index << " : S" << index << ";\n";
+                    break;
+                default:
+                    UNREACHABLE();
+            }
+            index++;
+        }
+        return ostream.str();
+    }
+
+    std::string GenerateReferenceString(const std::vector<BindingDescriptor>& bindings,
+                                        wgpu::ShaderStage stage) {
+        std::ostringstream ostream;
+        size_t index = 0;
+        for (const BindingDescriptor& b : bindings) {
+            if (b.visibility & stage) {
+                if (!b.ref_type.empty() && !b.ref_mem.empty()) {
+                    ostream << "var r" << index << " : " << b.ref_type << " = b" << index << "."
+                            << b.ref_mem << ";\n";
+                }
+            }
+            index++;
+        }
+        return ostream.str();
+    }
+
+    // Used for adding custom types available throughout the tests
+    static const std::string kStructs = "struct ThreeFloats {f1 : f32, f2 : f32, f3 : f32,}\n";
+
+    // Creates a compute shader with given bindings
+    std::string CreateComputeShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+        return kStructs + GenerateBindingString(bindings) +
+               "@stage(compute) @workgroup_size(1,1,1) fn main() {\n" +
+               GenerateReferenceString(bindings, wgpu::ShaderStage::Compute) + "}";
+    }
+
+    // Creates a vertex shader with given bindings
+    std::string CreateVertexShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+        return kStructs + GenerateBindingString(bindings) +
+               "@stage(vertex) fn main() -> @builtin(position) vec4<f32> {\n" +
+               GenerateReferenceString(bindings, wgpu::ShaderStage::Vertex) +
+               "\n   return vec4<f32>(); " + "}";
+    }
+
+    // Creates a fragment shader with given bindings
+    std::string CreateFragmentShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+        return kStructs + GenerateBindingString(bindings) + "@stage(fragment) fn main() {\n" +
+               GenerateReferenceString(bindings, wgpu::ShaderStage::Fragment) + "}";
+    }
+
+    // Concatenates vectors containing BindingDescriptor
+    std::vector<BindingDescriptor> CombineBindings(
+        std::initializer_list<std::vector<BindingDescriptor>> bindings) {
+        std::vector<BindingDescriptor> result;
+        for (const std::vector<BindingDescriptor>& b : bindings) {
+            result.insert(result.end(), b.begin(), b.end());
+        }
+        return result;
+    }
+}  // namespace
+
+class MinBufferSizeTestsBase : public ValidationTest {
+  public:
+    void SetUp() override {
+        ValidationTest::SetUp();
+    }
+
+    wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = bufferSize;
+        bufferDescriptor.usage = usage;
+
+        return device.CreateBuffer(&bufferDescriptor);
+    }
+
+    // Creates compute pipeline given a layout and shader
+    wgpu::ComputePipeline CreateComputePipeline(const std::vector<wgpu::BindGroupLayout>& layouts,
+                                                const std::string& shader) {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, shader.c_str());
+
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.layout = nullptr;
+        if (!layouts.empty()) {
+            wgpu::PipelineLayoutDescriptor descriptor;
+            descriptor.bindGroupLayoutCount = layouts.size();
+            descriptor.bindGroupLayouts = layouts.data();
+            csDesc.layout = device.CreatePipelineLayout(&descriptor);
+        }
+        csDesc.compute.module = csModule;
+        csDesc.compute.entryPoint = "main";
+
+        return device.CreateComputePipeline(&csDesc);
+    }
+
+    // Creates compute pipeline with default layout
+    wgpu::ComputePipeline CreateComputePipelineWithDefaultLayout(const std::string& shader) {
+        return CreateComputePipeline({}, shader);
+    }
+
+    // Creates render pipeline give na layout and shaders
+    wgpu::RenderPipeline CreateRenderPipeline(const std::vector<wgpu::BindGroupLayout>& layouts,
+                                              const std::string& vertexShader,
+                                              const std::string& fragShader) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexShader.c_str());
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragShader.c_str());
+
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = nullptr;
+        if (!layouts.empty()) {
+            wgpu::PipelineLayoutDescriptor descriptor;
+            descriptor.bindGroupLayoutCount = layouts.size();
+            descriptor.bindGroupLayouts = layouts.data();
+            pipelineDescriptor.layout = device.CreatePipelineLayout(&descriptor);
+        }
+
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    // Creates render pipeline with default layout
+    wgpu::RenderPipeline CreateRenderPipelineWithDefaultLayout(const std::string& vertexShader,
+                                                               const std::string& fragShader) {
+        return CreateRenderPipeline({}, vertexShader, fragShader);
+    }
+
+    // Creates bind group layout with given minimum sizes for each binding
+    wgpu::BindGroupLayout CreateBindGroupLayout(const std::vector<BindingDescriptor>& bindings,
+                                                const std::vector<uint64_t>& minimumSizes) {
+        ASSERT(bindings.size() == minimumSizes.size());
+        std::vector<wgpu::BindGroupLayoutEntry> entries;
+
+        for (size_t i = 0; i < bindings.size(); ++i) {
+            const BindingDescriptor& b = bindings[i];
+            wgpu::BindGroupLayoutEntry e = {};
+            e.binding = b.binding;
+            e.visibility = b.visibility;
+            e.buffer.type = b.type;
+            e.buffer.minBindingSize = minimumSizes[i];
+            entries.push_back(e);
+        }
+
+        wgpu::BindGroupLayoutDescriptor descriptor;
+        descriptor.entryCount = static_cast<uint32_t>(entries.size());
+        descriptor.entries = entries.data();
+        return device.CreateBindGroupLayout(&descriptor);
+    }
+
+    // Extract the first bind group from a compute shader
+    wgpu::BindGroupLayout GetBGLFromComputeShader(const std::string& shader, uint32_t index) {
+        wgpu::ComputePipeline pipeline = CreateComputePipelineWithDefaultLayout(shader);
+        return pipeline.GetBindGroupLayout(index);
+    }
+
+    // Extract the first bind group from a render pass
+    wgpu::BindGroupLayout GetBGLFromRenderShaders(const std::string& vertexShader,
+                                                  const std::string& fragShader,
+                                                  uint32_t index) {
+        wgpu::RenderPipeline pipeline =
+            CreateRenderPipelineWithDefaultLayout(vertexShader, fragShader);
+        return pipeline.GetBindGroupLayout(index);
+    }
+
+    // Create a bind group with given binding sizes for each entry (backed by the same buffer)
+    wgpu::BindGroup CreateBindGroup(wgpu::BindGroupLayout layout,
+                                    const std::vector<BindingDescriptor>& bindings,
+                                    const std::vector<uint64_t>& bindingSizes) {
+        ASSERT(bindings.size() == bindingSizes.size());
+        wgpu::Buffer buffer =
+            CreateBuffer(1024, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage);
+
+        std::vector<wgpu::BindGroupEntry> entries;
+        entries.reserve(bindingSizes.size());
+
+        for (uint32_t i = 0; i < bindingSizes.size(); ++i) {
+            wgpu::BindGroupEntry entry = {};
+            entry.binding = bindings[i].binding;
+            entry.buffer = buffer;
+            ASSERT(bindingSizes[i] < 1024);
+            entry.size = bindingSizes[i];
+            entries.push_back(entry);
+        }
+
+        wgpu::BindGroupDescriptor descriptor;
+        descriptor.layout = layout;
+        descriptor.entryCount = entries.size();
+        descriptor.entries = entries.data();
+
+        return device.CreateBindGroup(&descriptor);
+    }
+
+    // Runs a single dispatch with given pipeline and bind group (to test lazy validation during
+    // dispatch)
+    void TestDispatch(const wgpu::ComputePipeline& computePipeline,
+                      const std::vector<wgpu::BindGroup>& bindGroups,
+                      bool expectation) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
+        computePassEncoder.SetPipeline(computePipeline);
+        for (size_t i = 0; i < bindGroups.size(); ++i) {
+            computePassEncoder.SetBindGroup(i, bindGroups[i]);
+        }
+        computePassEncoder.Dispatch(1);
+        computePassEncoder.End();
+        if (!expectation) {
+            ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+        } else {
+            commandEncoder.Finish();
+        }
+    }
+
+    // Runs a single draw with given pipeline and bind group (to test lazy validation during draw)
+    void TestDraw(const wgpu::RenderPipeline& renderPipeline,
+                  const std::vector<wgpu::BindGroup>& bindGroups,
+                  bool expectation) {
+        DummyRenderPass renderPass(device);
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
+        renderPassEncoder.SetPipeline(renderPipeline);
+        for (size_t i = 0; i < bindGroups.size(); ++i) {
+            renderPassEncoder.SetBindGroup(i, bindGroups[i]);
+        }
+        renderPassEncoder.Draw(3);
+        renderPassEncoder.End();
+        if (!expectation) {
+            ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+        } else {
+            commandEncoder.Finish();
+        }
+    }
+};
+
+// The check between BGL and pipeline at pipeline creation time
+class MinBufferSizePipelineCreationTests : public MinBufferSizeTestsBase {};
+
+// Pipeline can be created if minimum buffer size in layout is specified as 0
+TEST_F(MinBufferSizePipelineCreationTests, ZeroMinBufferSize) {
+    std::vector<BindingDescriptor> bindings = {{0, 0, "a : f32, b : f32,", "f32", "a", 8},
+                                               {0, 1, "c : f32,", "f32", "c", 4}};
+
+    std::string computeShader = CreateComputeShaderWithBindings(bindings);
+    std::string vertexShader = CreateVertexShaderWithBindings({});
+    std::string fragShader = CreateFragmentShaderWithBindings(bindings);
+
+    wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {0, 0});
+    CreateRenderPipeline({layout}, vertexShader, fragShader);
+    CreateComputePipeline({layout}, computeShader);
+}
+
+// Fail if layout given has non-zero minimum sizes smaller than shader requirements
+TEST_F(MinBufferSizePipelineCreationTests, LayoutSizesTooSmall) {
+    std::vector<BindingDescriptor> bindings = {{0, 0, "a : f32, b : f32,", "f32", "a", 8},
+                                               {0, 1, "c : f32,", "f32", "c", 4}};
+
+    std::string computeShader = CreateComputeShaderWithBindings(bindings);
+    std::string vertexShader = CreateVertexShaderWithBindings({});
+    std::string fragShader = CreateFragmentShaderWithBindings(bindings);
+
+    CheckSizeBounds({8, 4}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
+        wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, sizes);
+        if (expectation) {
+            CreateRenderPipeline({layout}, vertexShader, fragShader);
+            CreateComputePipeline({layout}, computeShader);
+        } else {
+            ASSERT_DEVICE_ERROR(CreateRenderPipeline({layout}, vertexShader, fragShader));
+            ASSERT_DEVICE_ERROR(CreateComputePipeline({layout}, computeShader));
+        }
+    });
+}
+
+// Fail if layout given has non-zero minimum sizes smaller than shader requirements
+TEST_F(MinBufferSizePipelineCreationTests, LayoutSizesTooSmallMultipleGroups) {
+    std::vector<BindingDescriptor> bg0Bindings = {{0, 0, "a : f32, b : f32,", "f32", "a", 8},
+                                                  {0, 1, "c : f32,", "f32", "c", 4}};
+    std::vector<BindingDescriptor> bg1Bindings = {
+        {1, 0, "d : f32, e : f32, f : f32,", "f32", "e", 12},
+        {1, 1, "g : mat2x2<f32>,", "mat2x2<f32>", "g", 16}};
+    std::vector<BindingDescriptor> bindings = CombineBindings({bg0Bindings, bg1Bindings});
+
+    std::string computeShader = CreateComputeShaderWithBindings(bindings);
+    std::string vertexShader = CreateVertexShaderWithBindings({});
+    std::string fragShader = CreateFragmentShaderWithBindings(bindings);
+
+    CheckSizeBounds({8, 4, 12, 16}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
+        wgpu::BindGroupLayout layout0 = CreateBindGroupLayout(bg0Bindings, {sizes[0], sizes[1]});
+        wgpu::BindGroupLayout layout1 = CreateBindGroupLayout(bg1Bindings, {sizes[2], sizes[3]});
+        if (expectation) {
+            CreateRenderPipeline({layout0, layout1}, vertexShader, fragShader);
+            CreateComputePipeline({layout0, layout1}, computeShader);
+        } else {
+            ASSERT_DEVICE_ERROR(CreateRenderPipeline({layout0, layout1}, vertexShader, fragShader));
+            ASSERT_DEVICE_ERROR(CreateComputePipeline({layout0, layout1}, computeShader));
+        }
+    });
+}
+
+// The check between the BGL and the bindings at bindgroup creation time
+class MinBufferSizeBindGroupCreationTests : public MinBufferSizeTestsBase {};
+
+// Fail if a binding is smaller than minimum buffer size
+TEST_F(MinBufferSizeBindGroupCreationTests, BindingTooSmall) {
+    std::vector<BindingDescriptor> bindings = {{0, 0, "a : f32, b : f32,", "f32", "a", 8},
+                                               {0, 1, "c : f32,", "f32", "c", 4}};
+    wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {8, 4});
+
+    CheckSizeBounds({8, 4}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
+        if (expectation) {
+            CreateBindGroup(layout, bindings, sizes);
+        } else {
+            ASSERT_DEVICE_ERROR(CreateBindGroup(layout, bindings, sizes));
+        }
+    });
+}
+
+// Check two layouts with different minimum size are unequal
+TEST_F(MinBufferSizeBindGroupCreationTests, LayoutEquality) {
+    // Returning the same pointer is an implementation detail of Dawn Native.
+    // It is not the same semantic with the Wire.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    auto MakeLayout = [&](uint64_t size) {
+        return utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, false, size}});
+    };
+
+    EXPECT_EQ(MakeLayout(0).Get(), MakeLayout(0).Get());
+    EXPECT_NE(MakeLayout(0).Get(), MakeLayout(4).Get());
+}
+
+// The check between the bindgroup binding sizes and the required pipeline sizes at draw time
+class MinBufferSizeDrawTimeValidationTests : public MinBufferSizeTestsBase {};
+
+// Fail if binding sizes are too small at draw time
+TEST_F(MinBufferSizeDrawTimeValidationTests, ZeroMinSizeAndTooSmallBinding) {
+    std::vector<BindingDescriptor> bindings = {{0, 0, "a : f32, b : f32,", "f32", "a", 8},
+                                               {0, 1, "c : f32,", "f32", "c", 4}};
+
+    std::string computeShader = CreateComputeShaderWithBindings(bindings);
+    std::string vertexShader = CreateVertexShaderWithBindings({});
+    std::string fragShader = CreateFragmentShaderWithBindings(bindings);
+
+    wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {0, 0});
+
+    wgpu::ComputePipeline computePipeline = CreateComputePipeline({layout}, computeShader);
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({layout}, vertexShader, fragShader);
+
+    CheckSizeBounds({8, 4}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
+        wgpu::BindGroup bindGroup = CreateBindGroup(layout, bindings, sizes);
+        TestDispatch(computePipeline, {bindGroup}, expectation);
+        TestDraw(renderPipeline, {bindGroup}, expectation);
+    });
+}
+
+// Draw time validation works for non-contiguous bindings
+TEST_F(MinBufferSizeDrawTimeValidationTests, UnorderedBindings) {
+    std::vector<BindingDescriptor> bindings = {
+        {0, 2, "a : f32, b : f32,", "f32", "a", 8},
+        {0, 0, "c : f32,", "f32", "c", 4},
+        {0, 4, "d : f32, e : f32, f : f32,", "f32", "e", 12}};
+
+    std::string computeShader = CreateComputeShaderWithBindings(bindings);
+    std::string vertexShader = CreateVertexShaderWithBindings({});
+    std::string fragShader = CreateFragmentShaderWithBindings(bindings);
+
+    wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {0, 0, 0});
+
+    wgpu::ComputePipeline computePipeline = CreateComputePipeline({layout}, computeShader);
+    wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({layout}, vertexShader, fragShader);
+
+    CheckSizeBounds({8, 4, 12}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
+        wgpu::BindGroup bindGroup = CreateBindGroup(layout, bindings, sizes);
+        TestDispatch(computePipeline, {bindGroup}, expectation);
+        TestDraw(renderPipeline, {bindGroup}, expectation);
+    });
+}
+
+// Draw time validation works for multiple bind groups
+TEST_F(MinBufferSizeDrawTimeValidationTests, MultipleGroups) {
+    std::vector<BindingDescriptor> bg0Bindings = {{0, 0, "a : f32, b : f32,", "f32", "a", 8},
+                                                  {0, 1, "c : f32,", "f32", "c", 4}};
+    std::vector<BindingDescriptor> bg1Bindings = {
+        {1, 0, "d : f32, e : f32, f : f32,", "f32", "e", 12},
+        {1, 1, "g : mat2x2<f32>,", "mat2x2<f32>", "g", 16}};
+    std::vector<BindingDescriptor> bindings = CombineBindings({bg0Bindings, bg1Bindings});
+
+    std::string computeShader = CreateComputeShaderWithBindings(bindings);
+    std::string vertexShader = CreateVertexShaderWithBindings({});
+    std::string fragShader = CreateFragmentShaderWithBindings(bindings);
+
+    wgpu::BindGroupLayout layout0 = CreateBindGroupLayout(bg0Bindings, {0, 0});
+    wgpu::BindGroupLayout layout1 = CreateBindGroupLayout(bg1Bindings, {0, 0});
+
+    wgpu::ComputePipeline computePipeline =
+        CreateComputePipeline({layout0, layout1}, computeShader);
+    wgpu::RenderPipeline renderPipeline =
+        CreateRenderPipeline({layout0, layout1}, vertexShader, fragShader);
+
+    CheckSizeBounds({8, 4, 12, 16}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
+        wgpu::BindGroup bindGroup0 = CreateBindGroup(layout0, bg0Bindings, {sizes[0], sizes[1]});
+        wgpu::BindGroup bindGroup1 = CreateBindGroup(layout0, bg0Bindings, {sizes[2], sizes[3]});
+        TestDispatch(computePipeline, {bindGroup0, bindGroup1}, expectation);
+        TestDraw(renderPipeline, {bindGroup0, bindGroup1}, expectation);
+    });
+}
+
+// The correctness of minimum buffer size for the defaulted layout for a pipeline
+class MinBufferSizeDefaultLayoutTests : public MinBufferSizeTestsBase {
+  public:
+    // Checks BGL |layout| has minimum buffer sizes equal to sizes in |bindings|
+    void CheckLayoutBindingSizeValidation(const wgpu::BindGroupLayout& layout,
+                                          const std::vector<BindingDescriptor>& bindings) {
+        std::vector<uint64_t> correctSizes;
+        correctSizes.reserve(bindings.size());
+        for (const BindingDescriptor& b : bindings) {
+            correctSizes.push_back(b.size);
+        }
+
+        CheckSizeBounds(correctSizes, [&](const std::vector<uint64_t>& sizes, bool expectation) {
+            if (expectation) {
+                CreateBindGroup(layout, bindings, sizes);
+            } else {
+                ASSERT_DEVICE_ERROR(CreateBindGroup(layout, bindings, sizes));
+            }
+        });
+    }
+
+    // Constructs shaders with given layout type and bindings, checking defaulted sizes match sizes
+    // in |bindings|
+    void CheckShaderBindingSizeReflection(
+        std::initializer_list<std::vector<BindingDescriptor>> bindings) {
+        std::vector<BindingDescriptor> combinedBindings = CombineBindings(bindings);
+        std::string computeShader = CreateComputeShaderWithBindings(combinedBindings);
+        std::string vertexShader = CreateVertexShaderWithBindings({});
+        std::string fragShader = CreateFragmentShaderWithBindings(combinedBindings);
+
+        size_t i = 0;
+        for (const std::vector<BindingDescriptor>& b : bindings) {
+            wgpu::BindGroupLayout computeLayout = GetBGLFromComputeShader(computeShader, i);
+            wgpu::BindGroupLayout renderLayout =
+                GetBGLFromRenderShaders(vertexShader, fragShader, i);
+
+            CheckLayoutBindingSizeValidation(computeLayout, b);
+            CheckLayoutBindingSizeValidation(renderLayout, b);
+            ++i;
+        }
+    }
+};
+
+// Test the minimum size computations for various WGSL types.
+TEST_F(MinBufferSizeDefaultLayoutTests, DefaultLayoutVariousWGSLTypes) {
+    CheckShaderBindingSizeReflection({{{0, 0, "a : f32,", "f32", "a", 4},
+                                       {0, 1, "b : array<f32>,", "f32", "b[0]", 4},
+                                       {0, 2, "c : mat2x2<f32>,", "mat2x2<f32>", "c", 16}}});
+    CheckShaderBindingSizeReflection({{{0, 3, "d : u32; e : array<f32>,", "u32", "d", 8},
+                                       {0, 4, "f : ThreeFloats,", "f32", "f.f1", 12},
+                                       {0, 5, "g : array<ThreeFloats>,", "f32", "g[0].f1", 12}}});
+}
+
+// Test the minimum size computations for various buffer binding types.
+TEST_F(MinBufferSizeDefaultLayoutTests, DefaultLayoutVariousBindingTypes) {
+    CheckShaderBindingSizeReflection(
+        {{{0, 0, "a : f32,", "f32", "a", 4, wgpu::BufferBindingType::Uniform},
+          {0, 1, "a : f32, b : f32,", "f32", "a", 8, wgpu::BufferBindingType::Storage},
+          {0, 2, "a : f32, b : f32, c: f32,", "f32", "a", 12,
+           wgpu::BufferBindingType::ReadOnlyStorage}}});
+}
+
+// Test the minimum size computations works with multiple bind groups.
+TEST_F(MinBufferSizeDefaultLayoutTests, MultipleBindGroups) {
+    CheckShaderBindingSizeReflection(
+        {{{0, 0, "a : f32,", "f32", "a", 4, wgpu::BufferBindingType::Uniform}},
+         {{1, 0, "a : f32, b : f32,", "f32", "a", 8, wgpu::BufferBindingType::Storage}},
+         {{2, 0, "a : f32, b : f32, c : f32,", "f32", "a", 12,
+           wgpu::BufferBindingType::ReadOnlyStorage}}});
+}
+
+// Test the minimum size computations with manual size/align attributes.
+TEST_F(MinBufferSizeDefaultLayoutTests, NonDefaultLayout) {
+    CheckShaderBindingSizeReflection(
+        {{{0, 0, "@size(256) a : u32, b : u32,", "u32", "a", 260},
+          {0, 1, "c : u32, @align(16) d : u32,", "u32", "c", 20},
+          {0, 2, "d : array<array<u32, 10>, 3>,", "u32", "d[0][0]", 120},
+          {0, 3, "e : array<array<u32, 10>>,", "u32", "e[0][0]", 40}}});
+}
+
+// Minimum size should be the max requirement of both vertex and fragment stages.
+TEST_F(MinBufferSizeDefaultLayoutTests, RenderPassConsidersBothStages) {
+    std::string vertexShader = CreateVertexShaderWithBindings(
+        {{0, 0, "a : f32, b : f32,", "f32", "a", 8, wgpu::BufferBindingType::Uniform,
+          wgpu::ShaderStage::Vertex},
+         {0, 1, "c : vec4<f32>,", "vec4<f32>", "c", 16, wgpu::BufferBindingType::Uniform,
+          wgpu::ShaderStage::Vertex}});
+    std::string fragShader = CreateFragmentShaderWithBindings(
+        {{0, 0, "a : f32,", "f32", "a", 4, wgpu::BufferBindingType::Uniform,
+          wgpu::ShaderStage::Fragment},
+         {0, 1, "b : f32, c : f32,", "f32", "b", 8, wgpu::BufferBindingType::Uniform,
+          wgpu::ShaderStage::Fragment}});
+
+    wgpu::BindGroupLayout renderLayout = GetBGLFromRenderShaders(vertexShader, fragShader, 0);
+
+    CheckLayoutBindingSizeValidation(renderLayout, {{0, 0, "", "", "", 8}, {0, 1, "", "", "", 16}});
+}
diff --git a/src/dawn/tests/unittests/validation/MultipleDeviceTests.cpp b/src/dawn/tests/unittests/validation/MultipleDeviceTests.cpp
new file mode 100644
index 0000000..f84748f
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/MultipleDeviceTests.cpp
@@ -0,0 +1,80 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/tests/MockCallback.h"
+
+using namespace testing;
+
+class MultipleDeviceTest : public ValidationTest {};
+
+// Test that it is invalid to submit a command buffer created on a different device.
+TEST_F(MultipleDeviceTest, ValidatesSameDevice) {
+    wgpu::Device device2 = RegisterDevice(CreateTestDevice());
+    wgpu::CommandBuffer commandBuffer = device2.CreateCommandEncoder().Finish();
+
+    ASSERT_DEVICE_ERROR(device.GetQueue().Submit(1, &commandBuffer));
+}
+
+// Test that CreatePipelineAsync fails creation with an Error status if it uses
+// objects from a different device.
+TEST_F(MultipleDeviceTest, ValidatesSameDeviceCreatePipelineAsync) {
+    wgpu::ShaderModuleWGSLDescriptor wgslDesc = {};
+    wgslDesc.source = R"(
+         @stage(compute) @workgroup_size(1, 1, 1) fn main() {
+        }
+    )";
+
+    wgpu::ShaderModuleDescriptor shaderModuleDesc = {};
+    shaderModuleDesc.nextInChain = &wgslDesc;
+
+    // Base case: CreateComputePipelineAsync succeeds.
+    {
+        wgpu::ShaderModule shaderModule = device.CreateShaderModule(&shaderModuleDesc);
+
+        wgpu::ComputePipelineDescriptor pipelineDesc = {};
+        pipelineDesc.compute.module = shaderModule;
+        pipelineDesc.compute.entryPoint = "main";
+
+        StrictMock<MockCallback<WGPUCreateComputePipelineAsyncCallback>> creationCallback;
+        EXPECT_CALL(creationCallback,
+                    Call(WGPUCreatePipelineAsyncStatus_Success, NotNull(), _, this))
+            .WillOnce(WithArg<1>(Invoke(
+                [](WGPUComputePipeline pipeline) { wgpu::ComputePipeline::Acquire(pipeline); })));
+        device.CreateComputePipelineAsync(&pipelineDesc, creationCallback.Callback(),
+                                          creationCallback.MakeUserdata(this));
+
+        WaitForAllOperations(device);
+    }
+
+    // CreateComputePipelineAsync errors if the shader module is created on a different device.
+    {
+        wgpu::Device device2 = RegisterDevice(CreateTestDevice());
+        wgpu::ShaderModule shaderModule = device2.CreateShaderModule(&shaderModuleDesc);
+
+        wgpu::ComputePipelineDescriptor pipelineDesc = {};
+        pipelineDesc.compute.module = shaderModule;
+        pipelineDesc.compute.entryPoint = "main";
+
+        StrictMock<MockCallback<WGPUCreateComputePipelineAsyncCallback>> creationCallback;
+        EXPECT_CALL(creationCallback,
+                    Call(WGPUCreatePipelineAsyncStatus_Error, nullptr, _, this + 1))
+            .Times(1);
+        device.CreateComputePipelineAsync(&pipelineDesc, creationCallback.Callback(),
+                                          creationCallback.MakeUserdata(this + 1));
+
+        WaitForAllOperations(device);
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/OverridableConstantsValidationTests.cpp b/src/dawn/tests/unittests/validation/OverridableConstantsValidationTests.cpp
new file mode 100644
index 0000000..2568f30
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/OverridableConstantsValidationTests.cpp
@@ -0,0 +1,212 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Constants.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class ComputePipelineOverridableConstantsValidationTest : public ValidationTest {
+  protected:
+    void SetUpShadersWithDefaultValueConstants() {
+        computeModule = utils::CreateShaderModule(device, R"(
+override c0: bool = true;      // type: bool
+override c1: bool = false;      // default override
+override c2: f32 = 0.0;         // type: float32
+override c3: f32 = 0.0;         // default override
+override c4: f32 = 4.0;         // default
+override c5: i32 = 0;           // type: int32
+override c6: i32 = 0;           // default override
+override c7: i32 = 7;           // default
+override c8: u32 = 0u;          // type: uint32
+override c9: u32 = 0u;          // default override
+@id(1000) override c10: u32 = 10u;  // default
+
+@stage(compute) @workgroup_size(1) fn main() {
+    // make sure the overridable constants are not optimized out
+    _ = u32(c0);
+    _ = u32(c1);
+    _ = u32(c2);
+    _ = u32(c3);
+    _ = u32(c4);
+    _ = u32(c5);
+    _ = u32(c6);
+    _ = u32(c7);
+    _ = u32(c8);
+    _ = u32(c9);
+    _ = u32(c10);
+})");
+    }
+
+    void SetUpShadersWithUninitializedConstants() {
+        computeModule = utils::CreateShaderModule(device, R"(
+override c0: bool;              // type: bool
+override c1: bool = false;      // default override
+override c2: f32;               // type: float32
+override c3: f32 = 0.0;         // default override
+override c4: f32 = 4.0;         // default
+override c5: i32;               // type: int32
+override c6: i32 = 0;           // default override
+override c7: i32 = 7;           // default
+override c8: u32;               // type: uint32
+override c9: u32 = 0u;          // default override
+@id(1000) override c10: u32 = 10u;  // default
+
+@stage(compute) @workgroup_size(1) fn main() {
+    // make sure the overridable constants are not optimized out
+    _ = u32(c0);
+    _ = u32(c1);
+    _ = u32(c2);
+    _ = u32(c3);
+    _ = u32(c4);
+    _ = u32(c5);
+    _ = u32(c6);
+    _ = u32(c7);
+    _ = u32(c8);
+    _ = u32(c9);
+    _ = u32(c10);
+})");
+    }
+
+    void TestCreatePipeline(const std::vector<wgpu::ConstantEntry>& constants) {
+        wgpu::ComputePipelineDescriptor csDesc;
+        csDesc.compute.module = computeModule;
+        csDesc.compute.entryPoint = "main";
+        csDesc.compute.constants = constants.data();
+        csDesc.compute.constantCount = constants.size();
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
+    }
+
+    wgpu::ShaderModule computeModule;
+    wgpu::Buffer buffer;
+};
+
+// Basic constants lookup tests
+TEST_F(ComputePipelineOverridableConstantsValidationTest, ConstantsIdentifierLookUp) {
+    SetUpShadersWithDefaultValueConstants();
+    {
+        // Valid: no constants specified
+        std::vector<wgpu::ConstantEntry> constants;
+        TestCreatePipeline(constants);
+    }
+    {
+        // Valid: find by constant name
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "c0", 0}};
+        TestCreatePipeline(constants);
+    }
+    {
+        // Error: set the same constant twice
+        std::vector<wgpu::ConstantEntry> constants{
+            {nullptr, "c0", 0},
+            {nullptr, "c0", 1},
+        };
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+    {
+        // Valid: find by constant numeric id
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "1000", 0}};
+        TestCreatePipeline(constants);
+    }
+    {
+        // Error: constant numeric id not specified
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "9999", 0}};
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+    {
+        // Error: constant name doesn't exit
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "c99", 0}};
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+}
+
+// Test that it is invalid to leave any constants uninitialized
+TEST_F(ComputePipelineOverridableConstantsValidationTest, UninitializedConstants) {
+    SetUpShadersWithUninitializedConstants();
+    {
+        // Error: uninitialized constants exist
+        std::vector<wgpu::ConstantEntry> constants;
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+    {
+        // Error: uninitialized constants exist
+        std::vector<wgpu::ConstantEntry> constants{
+            {nullptr, "c0", false},
+            {nullptr, "c2", 1},
+            // c5 is missing
+            {nullptr, "c8", 1},
+        };
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+    {
+        // Valid: all constants initialized
+        std::vector<wgpu::ConstantEntry> constants{
+            {nullptr, "c0", false},
+            {nullptr, "c2", 1},
+            {nullptr, "c5", 1},
+            {nullptr, "c8", 1},
+        };
+        TestCreatePipeline(constants);
+    }
+    {
+        // Error: duplicate initializations
+        std::vector<wgpu::ConstantEntry> constants{
+            {nullptr, "c0", false}, {nullptr, "c2", 1}, {nullptr, "c5", 1},
+            {nullptr, "c8", 1},     {nullptr, "c2", 2},
+        };
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+}
+
+// Test that only explicitly specified numeric ID can be referenced
+TEST_F(ComputePipelineOverridableConstantsValidationTest, ConstantsIdentifierExplicitNumericID) {
+    SetUpShadersWithDefaultValueConstants();
+    {
+        // Error: constant numeric id not explicitly specified
+        // But could be impliciltly assigned to one of the constants
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "0", 0}};
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+    {
+        // Error: constant numeric id not explicitly specified
+        // But could be impliciltly assigned to one of the constants
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "1", 0}};
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+    {
+        // Error: constant numeric id not explicitly specified
+        // But could be impliciltly assigned to one of the constants
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "2", 0}};
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+    {
+        // Error: constant numeric id not explicitly specified
+        // But could be impliciltly assigned to one of the constants
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "3", 0}};
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+}
+
+// Test that identifiers are unique
+TEST_F(ComputePipelineOverridableConstantsValidationTest, ConstantsIdentifierUnique) {
+    SetUpShadersWithDefaultValueConstants();
+    {
+        // Valid: constant without numeric id can be referenced with variable name
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "c0", 0}};
+        TestCreatePipeline(constants);
+    }
+    {
+        // Error: constant with numeric id cannot be referenced with variable name
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "c10", 0}};
+        ASSERT_DEVICE_ERROR(TestCreatePipeline(constants));
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp b/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
new file mode 100644
index 0000000..bbb8589
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
@@ -0,0 +1,182 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+constexpr static uint32_t kSize = 4;
+// Note that format Depth24PlusStencil8 has both depth and stencil aspects, so parameters
+// depthReadOnly and stencilReadOnly should be the same in render pass and render bundle.
+wgpu::TextureFormat kFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+namespace {
+
+    class RenderPipelineAndPassCompatibilityTests : public ValidationTest {
+      public:
+        wgpu::RenderPipeline CreatePipeline(wgpu::TextureFormat format,
+                                            bool enableDepthWrite,
+                                            bool enableStencilWrite) {
+            // Create a NoOp pipeline
+            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+            pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+            pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+                @stage(fragment) fn main() {
+                })");
+            pipelineDescriptor.cFragment.targets = nullptr;
+            pipelineDescriptor.cFragment.targetCount = 0;
+
+            // Enable depth/stencil write if needed
+            wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil(format);
+            if (enableDepthWrite) {
+                depthStencil->depthWriteEnabled = true;
+            }
+            if (enableStencilWrite) {
+                depthStencil->stencilFront.failOp = wgpu::StencilOperation::Replace;
+            }
+            return device.CreateRenderPipeline(&pipelineDescriptor);
+        }
+
+        utils::ComboRenderPassDescriptor CreateRenderPassDescriptor(wgpu::TextureFormat format,
+                                                                    bool depthReadOnly,
+                                                                    bool stencilReadOnly) {
+            wgpu::TextureDescriptor textureDescriptor = {};
+            textureDescriptor.size = {kSize, kSize, 1};
+            textureDescriptor.format = format;
+            textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+            wgpu::Texture depthStencilTexture = device.CreateTexture(&textureDescriptor);
+
+            utils::ComboRenderPassDescriptor passDescriptor({}, depthStencilTexture.CreateView());
+            if (depthReadOnly) {
+                passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
+                passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+                passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            }
+
+            if (stencilReadOnly) {
+                passDescriptor.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+                passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+                passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp =
+                    wgpu::StoreOp::Undefined;
+            }
+
+            return passDescriptor;
+        }
+    };
+
+    // Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
+    // depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
+    TEST_F(RenderPipelineAndPassCompatibilityTests, WriteAndReadOnlyConflictForDepthStencil) {
+        for (bool depthStencilReadOnlyInPass : {true, false}) {
+            for (bool depthWriteInPipeline : {true, false}) {
+                for (bool stencilWriteInPipeline : {true, false}) {
+                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                    utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
+                        kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
+                    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+                    wgpu::RenderPipeline pipeline =
+                        CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+                    pass.SetPipeline(pipeline);
+                    pass.Draw(3);
+                    pass.End();
+                    if (depthStencilReadOnlyInPass &&
+                        (depthWriteInPipeline || stencilWriteInPipeline)) {
+                        ASSERT_DEVICE_ERROR(encoder.Finish());
+                    } else {
+                        encoder.Finish();
+                    }
+                }
+            }
+        }
+    }
+
+    // Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
+    // depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle.
+    TEST_F(RenderPipelineAndPassCompatibilityTests,
+           WriteAndReadOnlyConflictForDepthStencilBetweenPipelineAndBundle) {
+        for (bool depthStencilReadOnlyInBundle : {true, false}) {
+            utils::ComboRenderBundleEncoderDescriptor desc = {};
+            desc.depthStencilFormat = kFormat;
+            desc.depthReadOnly = depthStencilReadOnlyInBundle;
+            desc.stencilReadOnly = depthStencilReadOnlyInBundle;
+
+            for (bool depthWriteInPipeline : {true, false}) {
+                for (bool stencilWriteInPipeline : {true, false}) {
+                    wgpu::RenderBundleEncoder renderBundleEncoder =
+                        device.CreateRenderBundleEncoder(&desc);
+                    wgpu::RenderPipeline pipeline =
+                        CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+                    renderBundleEncoder.SetPipeline(pipeline);
+                    renderBundleEncoder.Draw(3);
+                    if (depthStencilReadOnlyInBundle &&
+                        (depthWriteInPipeline || stencilWriteInPipeline)) {
+                        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+                    } else {
+                        renderBundleEncoder.Finish();
+                    }
+                }
+            }
+        }
+    }
+
+    // Test depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle vs
+    // depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
+    TEST_F(RenderPipelineAndPassCompatibilityTests,
+           WriteAndReadOnlyConflictForDepthStencilBetweenBundleAndPass) {
+        for (bool depthStencilReadOnlyInPass : {true, false}) {
+            for (bool depthStencilReadOnlyInBundle : {true, false}) {
+                for (bool emptyBundle : {true, false}) {
+                    // Create render bundle, with or without a pipeline
+                    utils::ComboRenderBundleEncoderDescriptor desc = {};
+                    desc.depthStencilFormat = kFormat;
+                    desc.depthReadOnly = depthStencilReadOnlyInBundle;
+                    desc.stencilReadOnly = depthStencilReadOnlyInBundle;
+                    wgpu::RenderBundleEncoder renderBundleEncoder =
+                        device.CreateRenderBundleEncoder(&desc);
+                    if (!emptyBundle) {
+                        wgpu::RenderPipeline pipeline = CreatePipeline(
+                            kFormat, !depthStencilReadOnlyInBundle, !depthStencilReadOnlyInBundle);
+                        renderBundleEncoder.SetPipeline(pipeline);
+                        renderBundleEncoder.Draw(3);
+                    }
+                    wgpu::RenderBundle bundle = renderBundleEncoder.Finish();
+
+                    // Create render pass and call ExecuteBundles()
+                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                    utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
+                        kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
+                    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+                    pass.ExecuteBundles(1, &bundle);
+                    pass.End();
+                    if (!depthStencilReadOnlyInPass || depthStencilReadOnlyInBundle) {
+                        encoder.Finish();
+                    } else {
+                        ASSERT_DEVICE_ERROR(encoder.Finish());
+                    }
+                }
+            }
+        }
+    }
+
+    // TODO(dawn:485): add more tests. For example:
+    //   - depth/stencil attachment should be designated if depth/stencil test is enabled.
+    //   - pipeline and pass compatibility tests for color attachment(s).
+    //   - pipeline and pass compatibility tests for compute.
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/QueryValidationTests.cpp b/src/dawn/tests/unittests/validation/QueryValidationTests.cpp
new file mode 100644
index 0000000..b1071a8
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/QueryValidationTests.cpp
@@ -0,0 +1,842 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+class QuerySetValidationTest : public ValidationTest {
+  protected:
+    wgpu::QuerySet CreateQuerySet(
+        wgpu::Device cDevice,
+        wgpu::QueryType queryType,
+        uint32_t queryCount,
+        std::vector<wgpu::PipelineStatisticName> pipelineStatistics = {}) {
+        wgpu::QuerySetDescriptor descriptor;
+        descriptor.type = queryType;
+        descriptor.count = queryCount;
+
+        if (pipelineStatistics.size() > 0) {
+            descriptor.pipelineStatistics = pipelineStatistics.data();
+            descriptor.pipelineStatisticsCount = pipelineStatistics.size();
+        }
+
+        return cDevice.CreateQuerySet(&descriptor);
+    }
+};
+
+// Test creating query set without features
+TEST_F(QuerySetValidationTest, CreationWithoutFeatures) {
+    // Creating a query set for occlusion queries succeeds without any features enabled.
+    CreateQuerySet(device, wgpu::QueryType::Occlusion, 1);
+
+    // Creating a query set for other types of queries fails without features enabled.
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
+                                       {wgpu::PipelineStatisticName::VertexShaderInvocations}));
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::Timestamp, 1));
+}
+
+// Test creating query set with invalid count
+TEST_F(QuerySetValidationTest, InvalidQueryCount) {
+    // Success create a query set with the maximum count
+    CreateQuerySet(device, wgpu::QueryType::Occlusion, kMaxQueryCount);
+
+    // Fail to create a query set with the count which exceeds the maximum
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::Occlusion, kMaxQueryCount + 1));
+}
+
+// Test creating query set with invalid type
+TEST_F(QuerySetValidationTest, InvalidQueryType) {
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, static_cast<wgpu::QueryType>(0xFFFFFFFF), 1));
+}
+
+// Test creating query set with unnecessary pipeline statistics for occlusion queries
+TEST_F(QuerySetValidationTest, UnnecessaryPipelineStatistics) {
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::Occlusion, 1,
+                                       {wgpu::PipelineStatisticName::VertexShaderInvocations}));
+}
+
+// Test destroying a destroyed query set
+TEST_F(QuerySetValidationTest, DestroyDestroyedQuerySet) {
+    wgpu::QuerySetDescriptor descriptor;
+    descriptor.type = wgpu::QueryType::Occlusion;
+    descriptor.count = 1;
+    wgpu::QuerySet querySet = device.CreateQuerySet(&descriptor);
+    querySet.Destroy();
+    querySet.Destroy();
+}
+
+class OcclusionQueryValidationTest : public QuerySetValidationTest {};
+
+// Test the occlusionQuerySet in RenderPassDescriptor
+TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) {
+    wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
+    DummyRenderPass renderPass(device);
+
+    // Success
+    {
+        renderPass.occlusionQuerySet = occlusionQuerySet;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.BeginOcclusionQuery(0);
+        pass.EndOcclusionQuery();
+        pass.BeginOcclusionQuery(1);
+        pass.EndOcclusionQuery();
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Fail to begin occlusion query if the occlusionQuerySet is not set in RenderPassDescriptor
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        DummyRenderPass renderPassWithoutOcclusion(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassWithoutOcclusion);
+        pass.BeginOcclusionQuery(0);
+        pass.EndOcclusionQuery();
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to begin render pass if the occlusionQuerySet is created from other device
+    {
+        wgpu::Device otherDevice = RegisterDevice(adapter.CreateDevice());
+        wgpu::QuerySet occlusionQuerySetOnOther =
+            CreateQuerySet(otherDevice, wgpu::QueryType::Occlusion, 2);
+        renderPass.occlusionQuerySet = occlusionQuerySetOnOther;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+
+        // Clear this out so we don't hold a reference. The query set
+        // must be destroyed before the device local to this test case.
+        renderPass.occlusionQuerySet = wgpu::QuerySet();
+    }
+
+    // Fail to submit occlusion query with a destroyed query set
+    {
+        renderPass.occlusionQuerySet = occlusionQuerySet;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.BeginOcclusionQuery(0);
+        pass.EndOcclusionQuery();
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+        wgpu::Queue queue = device.GetQueue();
+        occlusionQuerySet.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test query index of occlusion query
+TEST_F(OcclusionQueryValidationTest, InvalidQueryIndex) {
+    wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
+    DummyRenderPass renderPass(device);
+    renderPass.occlusionQuerySet = occlusionQuerySet;
+
+    // Fail to begin occlusion query if the query index exceeds the number of queries in query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.BeginOcclusionQuery(2);
+        pass.EndOcclusionQuery();
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Success to begin occlusion query with same query index twice on different render encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass);
+        pass0.BeginOcclusionQuery(0);
+        pass0.EndOcclusionQuery();
+        pass0.End();
+
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass);
+        pass1.BeginOcclusionQuery(0);
+        pass1.EndOcclusionQuery();
+        pass1.End();
+        encoder.Finish();
+    }
+
+    // Fail to begin occlusion query with same query index twice on a same render encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.BeginOcclusionQuery(0);
+        pass.EndOcclusionQuery();
+        pass.BeginOcclusionQuery(0);
+        pass.EndOcclusionQuery();
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test the correspondence between BeginOcclusionQuery and EndOcclusionQuery
+TEST_F(OcclusionQueryValidationTest, InvalidBeginAndEnd) {
+    wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
+    DummyRenderPass renderPass(device);
+    renderPass.occlusionQuerySet = occlusionQuerySet;
+
+    // Fail to begin an occlusion query without corresponding end operation
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.BeginOcclusionQuery(0);
+        pass.BeginOcclusionQuery(1);
+        pass.EndOcclusionQuery();
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to end occlusion query twice in a row even the begin occlusion query twice
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.BeginOcclusionQuery(0);
+        pass.BeginOcclusionQuery(1);
+        pass.EndOcclusionQuery();
+        pass.EndOcclusionQuery();
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to end occlusion query without begin operation
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.EndOcclusionQuery();
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+class TimestampQueryValidationTest : public QuerySetValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::TimestampQuery};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        descriptor.nextInChain = &togglesDesc;
+        const char* forceDisabledToggles[1] = {"disallow_unsafe_apis"};
+        togglesDesc.forceDisabledToggles = forceDisabledToggles;
+        togglesDesc.forceDisabledTogglesCount = 1;
+
+        return adapter.CreateDevice(&descriptor);
+    }
+
+    void EncodeRenderPassWithTimestampWrites(
+        wgpu::CommandEncoder encoder,
+        const std::vector<wgpu::RenderPassTimestampWrite>& timestampWrites) {
+        DummyRenderPass renderPass(device);
+        renderPass.timestampWriteCount = timestampWrites.size();
+        renderPass.timestampWrites = timestampWrites.data();
+
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+    }
+
+    void EncodeComputePassWithTimestampWrites(
+        wgpu::CommandEncoder encoder,
+        const std::vector<wgpu::ComputePassTimestampWrite>& timestampWrites) {
+        wgpu::ComputePassDescriptor descriptor;
+        descriptor.timestampWriteCount = timestampWrites.size();
+        descriptor.timestampWrites = timestampWrites.data();
+
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&descriptor);
+        pass.End();
+    }
+};
+
+// Test creating query set with only the timestamp feature enabled.
+TEST_F(TimestampQueryValidationTest, Creation) {
+    // Creating a query set for occlusion queries succeeds.
+    CreateQuerySet(device, wgpu::QueryType::Occlusion, 1);
+
+    // Creating a query set for pipeline statistics queries fails.
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
+                                       {wgpu::PipelineStatisticName::VertexShaderInvocations}));
+
+    // Creating a query set for timestamp queries succeeds.
+    CreateQuerySet(device, wgpu::QueryType::Timestamp, 1);
+
+    // Fail to create with pipeline statistics for Timestamp query
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::Timestamp, 1,
+                                       {wgpu::PipelineStatisticName::VertexShaderInvocations}));
+}
+
+// Test creating query set with unnecessary pipeline statistics for timestamp queries
+TEST_F(TimestampQueryValidationTest, UnnecessaryPipelineStatistics) {
+    // Fail to create with pipeline statistics for Occlusion query
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::Timestamp, 1,
+                                       {wgpu::PipelineStatisticName::VertexShaderInvocations}));
+}
+
+// Test query set with type of timestamp is set to the occlusionQuerySet of RenderPassDescriptor.
+TEST_F(TimestampQueryValidationTest, SetOcclusionQueryWithTimestampQuerySet) {
+    // Fail to begin render pass if the type of occlusionQuerySet is not Occlusion
+    wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 1);
+    DummyRenderPass renderPass(device);
+    renderPass.occlusionQuerySet = querySet;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.BeginRenderPass(&renderPass);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test timestampWrites in compute pass descriptor
+TEST_F(TimestampQueryValidationTest, TimestampWritesOnComputePass) {
+    wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2);
+
+    // Success
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::ComputePassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::ComputePassTimestampLocation::End}});
+        encoder.Finish();
+    }
+
+    // Fail to write timestamps to other type of query set
+    {
+        wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 1);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{occlusionQuerySet, 0, wgpu::ComputePassTimestampLocation::Beginning}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamps to a query set created from another device
+    {
+        wgpu::Device otherDevice = RegisterDevice(adapter.CreateDevice());
+        wgpu::QuerySet querySetFromOtherDevice =
+            CreateQuerySet(otherDevice, wgpu::QueryType::Timestamp, 2);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::ComputePassTimestampLocation::Beginning},
+                      {querySetFromOtherDevice, 1, wgpu::ComputePassTimestampLocation::End}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamps to the query index which exceeds the number of queries in query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{querySet, 2, wgpu::ComputePassTimestampLocation::Beginning}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Success to write timestamps to the same query index twice on same compute pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::ComputePassTimestampLocation::Beginning},
+                      {querySet, 0, wgpu::ComputePassTimestampLocation::End}});
+        encoder.Finish();
+    }
+
+    // Success to write timestamps at same location of compute pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::ComputePassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::ComputePassTimestampLocation::Beginning}});
+        encoder.Finish();
+    }
+
+    // Fail to write timestamps at invalid location of compute pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{querySet, 0, static_cast<wgpu::ComputePassTimestampLocation>(0xFFFFFFFF)}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamps to a destroyed query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeComputePassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::ComputePassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::ComputePassTimestampLocation::End}});
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        wgpu::Queue queue = device.GetQueue();
+        querySet.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test timestampWrites in render pass descriptor
+TEST_F(TimestampQueryValidationTest, TimestampWritesOnRenderPass) {
+    wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2);
+
+    // Success
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::RenderPassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::RenderPassTimestampLocation::End}});
+        encoder.Finish();
+    }
+
+    // Fail to write timestamps to other type of query set
+    {
+        wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 1);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{occlusionQuerySet, 0, wgpu::RenderPassTimestampLocation::Beginning}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamps to a query set created from another device
+    {
+        wgpu::Device otherDevice = RegisterDevice(adapter.CreateDevice());
+        wgpu::QuerySet querySetFromOtherDevice =
+            CreateQuerySet(otherDevice, wgpu::QueryType::Timestamp, 2);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::RenderPassTimestampLocation::Beginning},
+                      {querySetFromOtherDevice, 1, wgpu::RenderPassTimestampLocation::End}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamps to the query index which exceeds the number of queries in query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 2, wgpu::RenderPassTimestampLocation::Beginning}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Success to write timestamps to the same query index twice on different render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::RenderPassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::RenderPassTimestampLocation::End}});
+        // Encodee other render pass
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::RenderPassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::RenderPassTimestampLocation::End}});
+        encoder.Finish();
+    }
+
+    // Fail to write timestamps to the same query index twice on same render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::RenderPassTimestampLocation::Beginning},
+                      {querySet, 0, wgpu::RenderPassTimestampLocation::End}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Success to write timestamps at same location of render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::RenderPassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::RenderPassTimestampLocation::Beginning}});
+        encoder.Finish();
+    }
+
+    // Fail to write timestamps at invalid location of render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, static_cast<wgpu::RenderPassTimestampLocation>(0xFFFFFFFF)}});
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamps to a destroyed query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeRenderPassWithTimestampWrites(
+            encoder, {{querySet, 0, wgpu::RenderPassTimestampLocation::Beginning},
+                      {querySet, 1, wgpu::RenderPassTimestampLocation::End}});
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        wgpu::Queue queue = device.GetQueue();
+        querySet.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test write timestamp on command encoder
+TEST_F(TimestampQueryValidationTest, WriteTimestampOnCommandEncoder) {
+    wgpu::QuerySet timestampQuerySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2);
+    wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
+
+    // Success on command encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(timestampQuerySet, 0);
+        encoder.Finish();
+    }
+
+    // Fail to write timestamp to the index which exceeds the number of queries in query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(timestampQuerySet, 2);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to submit timestamp query with a destroyed query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(timestampQuerySet, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = device.GetQueue();
+        timestampQuerySet.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test write timestamp on compute pass encoder
+TEST_F(TimestampQueryValidationTest, WriteTimestampOnComputePassEncoder) {
+    wgpu::QuerySet timestampQuerySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2);
+    wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
+
+    // Success on compute pass encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.WriteTimestamp(timestampQuerySet, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Not allow to write timestamp to the query set with other query type
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.WriteTimestamp(occlusionQuerySet, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamp to the index which exceeds the number of queries in query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.WriteTimestamp(timestampQuerySet, 2);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to submit timestamp query with a destroyed query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.WriteTimestamp(timestampQuerySet, 0);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = device.GetQueue();
+        timestampQuerySet.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test write timestamp on render pass encoder
+TEST_F(TimestampQueryValidationTest, WriteTimestampOnRenderPassEncoder) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::QuerySet timestampQuerySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2);
+    wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
+
+    // Success on render pass encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.WriteTimestamp(timestampQuerySet, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Not allow to write timestamp to the query set with other query type
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.WriteTimestamp(occlusionQuerySet, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to write timestamp to the index which exceeds the number of queries in query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.WriteTimestamp(timestampQuerySet, 2);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Success to write timestamp to the same query index twice on command encoder and render
+    // encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteTimestamp(timestampQuerySet, 0);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.WriteTimestamp(timestampQuerySet, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Success to write timestamp to the same query index twice on different render encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass);
+        pass0.WriteTimestamp(timestampQuerySet, 0);
+        pass0.End();
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass);
+        pass1.WriteTimestamp(timestampQuerySet, 0);
+        pass1.End();
+        encoder.Finish();
+    }
+
+    // Fail to write timestamp to the same query index twice on same render encoder
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.WriteTimestamp(timestampQuerySet, 0);
+        pass.WriteTimestamp(timestampQuerySet, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to submit timestamp query with a destroyed query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.WriteTimestamp(timestampQuerySet, 0);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = device.GetQueue();
+        timestampQuerySet.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+class PipelineStatisticsQueryValidationTest : public QuerySetValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::PipelineStatisticsQuery};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+
+        // TODO(crbug.com/1177506): Pipeline statistic query is an unsafe API, disable disallowing
+        // unsafe APIs to test it.
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        descriptor.nextInChain = &togglesDesc;
+        const char* forceDisabledToggles[1] = {"disallow_unsafe_apis"};
+        togglesDesc.forceDisabledToggles = forceDisabledToggles;
+        togglesDesc.forceDisabledTogglesCount = 1;
+
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test creating query set with only the pipeline statistics feature enabled.
+TEST_F(PipelineStatisticsQueryValidationTest, Creation) {
+    // Creating a query set for occlusion queries succeeds.
+    CreateQuerySet(device, wgpu::QueryType::Occlusion, 1);
+
+    // Creating a query set for timestamp queries fails.
+    ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::Timestamp, 1));
+
+    // Creating a query set for pipeline statistics queries succeeds.
+    CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
+                   {wgpu::PipelineStatisticName::VertexShaderInvocations});
+}
+
+// Test creating query set with invalid pipeline statistics
+TEST_F(PipelineStatisticsQueryValidationTest, InvalidPipelineStatistics) {
+    // Success to create with all pipeline statistics names which are not in the same order as
+    // defined in webgpu header file
+    {
+        CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
+                       {wgpu::PipelineStatisticName::ClipperInvocations,
+                        wgpu::PipelineStatisticName::ClipperPrimitivesOut,
+                        wgpu::PipelineStatisticName::ComputeShaderInvocations,
+                        wgpu::PipelineStatisticName::FragmentShaderInvocations,
+                        wgpu::PipelineStatisticName::VertexShaderInvocations});
+    }
+
+    // Fail to create with empty pipeline statistics
+    { ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1, {})); }
+
+    // Fail to create with invalid pipeline statistics
+    {
+        ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
+                                           {static_cast<wgpu::PipelineStatisticName>(0xFFFFFFFF)}));
+    }
+
+    // Fail to create with duplicate pipeline statistics
+    {
+        ASSERT_DEVICE_ERROR(CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
+                                           {wgpu::PipelineStatisticName::VertexShaderInvocations,
+                                            wgpu::PipelineStatisticName::VertexShaderInvocations}));
+    }
+}
+
+// Test query set with type of pipeline statistics is set to the occlusionQuerySet of
+// RenderPassDescriptor.
+TEST_F(PipelineStatisticsQueryValidationTest, BeginRenderPassWithPipelineStatisticsQuerySet) {
+    // Fail to begin render pass if the type of occlusionQuerySet is not Occlusion
+    wgpu::QuerySet querySet =
+        CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
+                       {wgpu::PipelineStatisticName::VertexShaderInvocations});
+    DummyRenderPass renderPass(device);
+    renderPass.occlusionQuerySet = querySet;
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.BeginRenderPass(&renderPass);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+class ResolveQuerySetValidationTest : public QuerySetValidationTest {
+  protected:
+    wgpu::Buffer CreateBuffer(wgpu::Device cDevice, uint64_t size, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
+
+        return cDevice.CreateBuffer(&descriptor);
+    }
+};
+
+// Test resolve query set with invalid query set, first query and query count
+TEST_F(ResolveQuerySetValidationTest, ResolveInvalidQuerySetAndIndexCount) {
+    constexpr uint32_t kQueryCount = 4;
+
+    wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, kQueryCount);
+    wgpu::Buffer destination =
+        CreateBuffer(device, kQueryCount * sizeof(uint64_t), wgpu::BufferUsage::QueryResolve);
+
+    // Success
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = device.GetQueue();
+        queue.Submit(1, &commands);
+    }
+
+    //  Fail to resolve query set if first query out of range
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, kQueryCount, 0, destination, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    //  Fail to resolve query set if the sum of first query and query count is larger than queries
+    //  number in the query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 1, kQueryCount, destination, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to resolve a destroyed query set
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = device.GetQueue();
+        querySet.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test resolve query set with invalid query set, first query and query count
+TEST_F(ResolveQuerySetValidationTest, ResolveToInvalidBufferAndOffset) {
+    constexpr uint32_t kQueryCount = 4;
+    constexpr uint64_t kBufferSize =
+        (kQueryCount - 1) * sizeof(uint64_t) + 256 /*destinationOffset*/;
+
+    wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, kQueryCount);
+    wgpu::Buffer destination = CreateBuffer(device, kBufferSize, wgpu::BufferUsage::QueryResolve);
+
+    // Success
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 1, kQueryCount - 1, destination, 256);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = device.GetQueue();
+        queue.Submit(1, &commands);
+    }
+
+    // Fail to resolve query set to a buffer created from another device
+    {
+        wgpu::Device otherDevice = RegisterDevice(adapter.CreateDevice());
+        wgpu::Buffer bufferOnOther =
+            CreateBuffer(otherDevice, kBufferSize, wgpu::BufferUsage::QueryResolve);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, bufferOnOther, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    //  Fail to resolve query set to a buffer if offset is not a multiple of 256 bytes
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 128);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    //  Fail to resolve query set to a buffer if the data size overflow the buffer
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 256);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    //  Fail to resolve query set to a buffer if the offset is past the end of the buffer
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, 1, destination, kBufferSize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    //  Fail to resolve query set to a buffer does not have the usage of QueryResolve
+    {
+        wgpu::Buffer dstBuffer = CreateBuffer(device, kBufferSize, wgpu::BufferUsage::CopyDst);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, dstBuffer, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Fail to resolve query set to a destroyed buffer.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = device.GetQueue();
+        destination.Destroy();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp
new file mode 100644
index 0000000..1adb1d2
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp
@@ -0,0 +1,58 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include <gmock/gmock.h>
+
+using namespace testing;
+
+class MockQueueWorkDoneCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
+};
+
+static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
+static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
+    mockQueueWorkDoneCallback->Call(status, userdata);
+}
+
+class QueueOnSubmittedWorkDoneValidationTests : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+        mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
+    }
+
+    void TearDown() override {
+        mockQueueWorkDoneCallback = nullptr;
+        ValidationTest::TearDown();
+    }
+};
+
+// Test that OnSubmittedWorkDone can be called as soon as the queue is created.
+TEST_F(QueueOnSubmittedWorkDoneValidationTests, CallBeforeSubmits) {
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
+    device.GetQueue().OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this);
+
+    WaitForAllOperations(device);
+}
+
+// Test that OnSubmittedWorkDone is an error if signalValue isn't 0.
+TEST_F(QueueOnSubmittedWorkDoneValidationTests, SignaledValueNotZeroIsInvalid) {
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
+    ASSERT_DEVICE_ERROR(device.GetQueue().OnSubmittedWorkDone(1u, ToMockQueueWorkDone, this));
+
+    WaitForAllOperations(device);
+}
diff --git a/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
new file mode 100644
index 0000000..f30ff71
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
@@ -0,0 +1,365 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    class QueueSubmitValidationTest : public ValidationTest {};
+
+    // Test submitting with a mapped buffer is disallowed
+    TEST_F(QueueSubmitValidationTest, SubmitWithMappedBuffer) {
+        // Create a map-write buffer.
+        const uint64_t kBufferSize = 4;
+        wgpu::BufferDescriptor descriptor;
+        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+        descriptor.size = kBufferSize;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        // Create a fake copy destination buffer
+        descriptor.usage = wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+
+        // Create a command buffer that reads from the mappable buffer.
+        wgpu::CommandBuffer commands;
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+            commands = encoder.Finish();
+        }
+
+        wgpu::Queue queue = device.GetQueue();
+
+        // Submitting when the buffer has never been mapped should succeed
+        queue.Submit(1, &commands);
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+            commands = encoder.Finish();
+        }
+
+        // Map the buffer, submitting when the buffer is mapped should fail
+        buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
+
+        // Try submitting before the callback is fired.
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+
+        WaitForAllOperations(device);
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+            commands = encoder.Finish();
+        }
+
+        // Try submitting after the callback is fired.
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+            commands = encoder.Finish();
+        }
+
+        // Unmap the buffer, queue submit should succeed
+        buffer.Unmap();
+        queue.Submit(1, &commands);
+    }
+
+    // Test it is invalid to submit a command buffer twice
+    TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedTwice) {
+        wgpu::CommandBuffer commandBuffer = device.CreateCommandEncoder().Finish();
+        wgpu::Queue queue = device.GetQueue();
+
+        // Should succeed
+        queue.Submit(1, &commandBuffer);
+
+        // Should fail because command buffer was already submitted
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commandBuffer));
+    }
+
+    // Test resubmitting failed command buffers
+    TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedFailed) {
+        // Create a map-write buffer
+        const uint64_t kBufferSize = 4;
+        wgpu::BufferDescriptor descriptor;
+        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+        descriptor.size = kBufferSize;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        // Create a destination buffer for the b2b copy
+        descriptor.usage = wgpu::BufferUsage::CopyDst;
+        descriptor.size = kBufferSize;
+        wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+
+        // Create a command buffer that reads from the mappable buffer
+        wgpu::CommandBuffer commands;
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+            commands = encoder.Finish();
+        }
+
+        wgpu::Queue queue = device.GetQueue();
+
+        // Map the source buffer to force a failure
+        buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
+
+        // Submitting a command buffer with a mapped buffer should fail
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+
+        // Unmap buffer to fix the failure
+        buffer.Unmap();
+
+        // Resubmitting any command buffer, even if the problem was fixed, should fail
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+
+    // Test that submitting in a buffer mapping callback doesn't cause re-entrance problems.
+    TEST_F(QueueSubmitValidationTest, SubmitInBufferMapCallback) {
+        // Create a buffer for mapping, to run our callback.
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::MapWrite;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        struct CallbackData {
+            wgpu::Device device;
+            wgpu::Buffer buffer;
+        } callbackData = {device, buffer};
+
+        const auto callback = [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+
+            data->buffer.Unmap();
+
+            wgpu::Queue queue = data->device.GetQueue();
+            queue.Submit(0, nullptr);
+        };
+
+        buffer.MapAsync(wgpu::MapMode::Write, 0, descriptor.size, callback, &callbackData);
+
+        WaitForAllOperations(device);
+    }
+
+    // Test that submitting in a render pipeline creation callback doesn't cause re-entrance
+    // problems.
+    TEST_F(QueueSubmitValidationTest, SubmitInCreateRenderPipelineAsyncCallback) {
+        struct CallbackData {
+            wgpu::Device device;
+        } callbackData = {device};
+
+        const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
+                                 char const* message, void* userdata) {
+            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+
+            wgpuRenderPipelineRelease(pipeline);
+
+            wgpu::Queue queue = data->device.GetQueue();
+            queue.Submit(0, nullptr);
+        };
+
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        device.CreateRenderPipelineAsync(&descriptor, callback, &callbackData);
+
+        WaitForAllOperations(device);
+    }
+
+    // Test that submitting in a compute pipeline creation callback doesn't cause re-entrance
+    // problems.
+    TEST_F(QueueSubmitValidationTest, SubmitInCreateComputePipelineAsyncCallback) {
+        struct CallbackData {
+            wgpu::Device device;
+        } callbackData = {device};
+
+        const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
+                                 char const* message, void* userdata) {
+            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+
+            wgpuComputePipelineRelease(pipeline);
+
+            wgpu::Queue queue = data->device.GetQueue();
+            queue.Submit(0, nullptr);
+        };
+
+        wgpu::ComputePipelineDescriptor descriptor;
+        descriptor.compute.module = utils::CreateShaderModule(device, R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+            })");
+        descriptor.compute.entryPoint = "main";
+        device.CreateComputePipelineAsync(&descriptor, callback, &callbackData);
+
+        WaitForAllOperations(device);
+    }
+
+    // Test that buffers in unused compute pass bindgroups are still checked for in
+    // Queue::Submit validation.
+    TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeBuffer) {
+        wgpu::Queue queue = device.GetQueue();
+
+        wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
+        wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+
+        wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+
+        // In this test we check that BindGroup 1 is checked, the texture test will check
+        // BindGroup 2. This is to provide coverage of for loops in validation code.
+        wgpu::ComputePipelineDescriptor cpDesc;
+        cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, testBGL});
+        cpDesc.compute.entryPoint = "main";
+        cpDesc.compute.module =
+            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+
+        wgpu::BufferDescriptor bufDesc;
+        bufDesc.size = 4;
+        bufDesc.usage = wgpu::BufferUsage::Storage;
+
+        // Test that completely unused bindgroups still have their buffers checked.
+        for (bool destroy : {true, false}) {
+            wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
+            wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(1, unusedBG);
+            pass.End();
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            if (destroy) {
+                unusedBuffer.Destroy();
+                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+            } else {
+                queue.Submit(1, &commands);
+            }
+        }
+
+        // Test that unused bindgroups because they were replaced still have their buffers checked.
+        for (bool destroy : {true, false}) {
+            wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
+            wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
+
+            wgpu::Buffer usedBuffer = device.CreateBuffer(&bufDesc);
+            wgpu::BindGroup usedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, emptyBG);
+            pass.SetBindGroup(1, unusedBG);
+            pass.SetBindGroup(1, usedBG);
+            pass.SetPipeline(pipeline);
+            pass.Dispatch(1);
+            pass.End();
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            if (destroy) {
+                unusedBuffer.Destroy();
+                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+            } else {
+                queue.Submit(1, &commands);
+            }
+        }
+    }
+
+    // Test that textures in unused compute pass bindgroups are still checked for in
+    // Queue::Submit validation.
+    TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeTextures) {
+        wgpu::Queue queue = device.GetQueue();
+
+        wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
+        wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+
+        wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+
+        wgpu::ComputePipelineDescriptor cpDesc;
+        cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, emptyBGL, testBGL});
+        cpDesc.compute.entryPoint = "main";
+        cpDesc.compute.module =
+            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+
+        wgpu::TextureDescriptor texDesc;
+        texDesc.size = {1, 1, 1};
+        texDesc.usage = wgpu::TextureUsage::TextureBinding;
+        texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+        // Test that completely unused bindgroups still have their buffers checked.
+        for (bool destroy : {true, false}) {
+            wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
+            wgpu::BindGroup unusedBG =
+                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(2, unusedBG);
+            pass.End();
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            if (destroy) {
+                unusedTexture.Destroy();
+                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+            } else {
+                queue.Submit(1, &commands);
+            }
+        }
+
+        // Test that unused bindgroups because they were replaced still have their buffers checked.
+        for (bool destroy : {true, false}) {
+            wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
+            wgpu::BindGroup unusedBG =
+                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+            wgpu::Texture usedTexture = device.CreateTexture(&texDesc);
+            wgpu::BindGroup usedBG =
+                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, emptyBG);
+            pass.SetBindGroup(1, emptyBG);
+            pass.SetBindGroup(2, unusedBG);
+            pass.SetBindGroup(2, usedBG);
+            pass.SetPipeline(pipeline);
+            pass.Dispatch(1);
+            pass.End();
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            if (destroy) {
+                unusedTexture.Destroy();
+                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+            } else {
+                queue.Submit(1, &commands);
+            }
+        }
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/QueueWriteBufferValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueWriteBufferValidationTests.cpp
new file mode 100644
index 0000000..ec03dbc
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/QueueWriteBufferValidationTests.cpp
@@ -0,0 +1,127 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+class QueueWriteBufferValidationTest : public ValidationTest {
+  private:
+    void SetUp() override {
+        ValidationTest::SetUp();
+        queue = device.GetQueue();
+    }
+
+  protected:
+    wgpu::Buffer CreateBuffer(uint64_t size) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = wgpu::BufferUsage::CopyDst;
+        return device.CreateBuffer(&descriptor);
+    }
+
+    wgpu::Queue queue;
+};
+
+// Test the success case for WriteBuffer
+TEST_F(QueueWriteBufferValidationTest, Success) {
+    wgpu::Buffer buf = CreateBuffer(4);
+
+    uint32_t foo = 0x01020304;
+    queue.WriteBuffer(buf, 0, &foo, sizeof(foo));
+}
+
+// Test error case for WriteBuffer out of bounds
+TEST_F(QueueWriteBufferValidationTest, OutOfBounds) {
+    wgpu::Buffer buf = CreateBuffer(4);
+
+    uint32_t foo[2] = {0, 0};
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, foo, 8));
+}
+
+// Test error case for WriteBuffer out of bounds with an overflow
+TEST_F(QueueWriteBufferValidationTest, OutOfBoundsOverflow) {
+    wgpu::Buffer buf = CreateBuffer(1024);
+
+    uint32_t foo[2] = {0, 0};
+
+    // An offset that when added to "4" would overflow to be zero and pass validation without
+    // overflow checks.
+    uint64_t offset = uint64_t(int64_t(0) - int64_t(4));
+
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, offset, foo, 4));
+}
+
+// Test error case for WriteBuffer with the wrong usage
+TEST_F(QueueWriteBufferValidationTest, WrongUsage) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::Vertex;
+    wgpu::Buffer buf = device.CreateBuffer(&descriptor);
+
+    uint32_t foo = 0;
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &foo, sizeof(foo)));
+}
+
+// Test WriteBuffer with unaligned size
+TEST_F(QueueWriteBufferValidationTest, UnalignedSize) {
+    wgpu::Buffer buf = CreateBuffer(4);
+
+    uint16_t value = 123;
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &value, sizeof(value)));
+}
+
+// Test WriteBuffer with unaligned offset
+TEST_F(QueueWriteBufferValidationTest, UnalignedOffset) {
+    wgpu::Buffer buf = CreateBuffer(8);
+
+    uint32_t value = 0x01020304;
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 2, &value, sizeof(value)));
+}
+
+// Test WriteBuffer with destroyed buffer
+TEST_F(QueueWriteBufferValidationTest, DestroyedBuffer) {
+    wgpu::Buffer buf = CreateBuffer(4);
+    buf.Destroy();
+
+    uint32_t value = 0;
+    ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &value, sizeof(value)));
+}
+
+// Test WriteBuffer with mapped buffer
+TEST_F(QueueWriteBufferValidationTest, MappedBuffer) {
+    // mappedAtCreation
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::CopyDst;
+        descriptor.mappedAtCreation = true;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        uint32_t value = 0;
+        ASSERT_DEVICE_ERROR(queue.WriteBuffer(buffer, 0, &value, sizeof(value)));
+    }
+
+    // MapAsync
+    {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
+        wgpu::Buffer buf = device.CreateBuffer(&descriptor);
+
+        buf.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
+        uint32_t value = 0;
+        ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &value, sizeof(value)));
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
new file mode 100644
index 0000000..79f28af
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
@@ -0,0 +1,809 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/utils/TestUtils.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    class QueueWriteTextureValidationTest : public ValidationTest {
+      private:
+        void SetUp() override {
+            ValidationTest::SetUp();
+            queue = device.GetQueue();
+        }
+
+      protected:
+        wgpu::Texture Create2DTexture(wgpu::Extent3D size,
+                                      uint32_t mipLevelCount,
+                                      wgpu::TextureFormat format,
+                                      wgpu::TextureUsage usage,
+                                      uint32_t sampleCount = 1) {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.size.width = size.width;
+            descriptor.size.height = size.height;
+            descriptor.size.depthOrArrayLayers = size.depthOrArrayLayers;
+            descriptor.sampleCount = sampleCount;
+            descriptor.format = format;
+            descriptor.mipLevelCount = mipLevelCount;
+            descriptor.usage = usage;
+            wgpu::Texture tex = device.CreateTexture(&descriptor);
+            return tex;
+        }
+
+        void TestWriteTexture(size_t dataSize,
+                              uint32_t dataOffset,
+                              uint32_t dataBytesPerRow,
+                              uint32_t dataRowsPerImage,
+                              wgpu::Texture texture,
+                              uint32_t texLevel,
+                              wgpu::Origin3D texOrigin,
+                              wgpu::Extent3D size,
+                              wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
+            std::vector<uint8_t> data(dataSize);
+
+            wgpu::TextureDataLayout textureDataLayout;
+            textureDataLayout.offset = dataOffset;
+            textureDataLayout.bytesPerRow = dataBytesPerRow;
+            textureDataLayout.rowsPerImage = dataRowsPerImage;
+
+            wgpu::ImageCopyTexture imageCopyTexture =
+                utils::CreateImageCopyTexture(texture, texLevel, texOrigin, aspect);
+
+            queue.WriteTexture(&imageCopyTexture, data.data(), dataSize, &textureDataLayout, &size);
+        }
+
+        void TestWriteTextureExactDataSize(uint32_t bytesPerRow,
+                                           uint32_t rowsPerImage,
+                                           wgpu::Texture texture,
+                                           wgpu::TextureFormat textureFormat,
+                                           wgpu::Origin3D origin,
+                                           wgpu::Extent3D extent3D) {
+            // Check the minimal valid dataSize.
+            uint64_t dataSize =
+                utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, extent3D, textureFormat);
+            TestWriteTexture(dataSize, 0, bytesPerRow, rowsPerImage, texture, 0, origin, extent3D);
+
+            // Check dataSize was indeed minimal.
+            uint64_t invalidSize = dataSize - 1;
+            ASSERT_DEVICE_ERROR(TestWriteTexture(invalidSize, 0, bytesPerRow, rowsPerImage, texture,
+                                                 0, origin, extent3D));
+        }
+
+        wgpu::Queue queue;
+    };
+
+    // Test the success case for WriteTexture
+    TEST_F(QueueWriteTextureValidationTest, Success) {
+        const uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // Different copies, including some that touch the OOB condition
+        {
+            // Copy 4x4 block in corner of first mip.
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
+            // Copy 4x4 block in opposite corner of first mip.
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 12, 0}, {4, 4, 1});
+            // Copy 4x4 block in the 4x4 mip.
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {0, 0, 0}, {4, 4, 1});
+            // Copy with a data offset
+            TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
+            TestWriteTexture(dataSize, dataSize - 4, 256, wgpu::kCopyStrideUndefined, destination,
+                             0, {0, 0, 0}, {1, 1, 1});
+        }
+
+        // Copies with a 256-byte aligned bytes per row but unaligned texture region
+        {
+            // Unaligned region
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {3, 4, 1});
+            // Unaligned region with texture offset
+            TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {5, 7, 0}, {2, 3, 1});
+            // Unaligned region, with data offset
+            TestWriteTexture(dataSize, 31 * 4, 256, 3, destination, 0, {0, 0, 0}, {3, 3, 1});
+        }
+
+        // Empty copies are valid
+        {
+            // An empty copy
+            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                             {0, 0, 1});
+            // An empty copy with depth = 0
+            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 0});
+            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                             {0, 0, 0});
+            // An empty copy touching the end of the data
+            TestWriteTexture(dataSize, dataSize, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+            TestWriteTexture(dataSize, dataSize, 0, wgpu::kCopyStrideUndefined, destination, 0,
+                             {0, 0, 0}, {0, 0, 1});
+            // An empty copy touching the side of the texture
+            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {16, 16, 0}, {0, 0, 1});
+            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0,
+                             {16, 16, 0}, {0, 0, 1});
+            // An empty copy with depth = 1 and bytesPerRow > 0
+            TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+            TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0,
+                             {0, 0, 0}, {0, 0, 1});
+            // An empty copy with height > 0, depth = 0, bytesPerRow > 0 and rowsPerImage > 0
+            TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0,
+                             {0, 0, 0}, {0, 1, 0});
+            TestWriteTexture(dataSize, 0, 256, 1, destination, 0, {0, 0, 0}, {0, 1, 0});
+            TestWriteTexture(dataSize, 0, 256, 16, destination, 0, {0, 0, 0}, {0, 1, 0});
+        }
+    }
+
+    // Test OOB conditions on the data
+    TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnData) {
+        const uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // OOB on the data because we copy too many pixels
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 5, 1}));
+
+        // OOB on the data because of the offset
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 4, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+
+        // OOB on the data because utils::RequiredBytesInCopy overflows
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 512, 3, destination, 0, {0, 0, 0}, {4, 3, 1}));
+
+        // Not OOB on the data although bytes per row * height overflows
+        // but utils::RequiredBytesInCopy * depth does not overflow
+        {
+            uint32_t sourceDataSize =
+                utils::RequiredBytesInCopy(256, 0, {7, 3, 1}, wgpu::TextureFormat::RGBA8Unorm);
+            ASSERT_TRUE(256 * 3 > sourceDataSize) << "bytes per row * height should overflow data";
+
+            TestWriteTexture(sourceDataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {7, 3, 1});
+        }
+    }
+
+    // Test OOB conditions on the texture
+    TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnTexture) {
+        const uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({16, 16, 2}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // OOB on the texture because x + width overflows
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {13, 12, 0}, {4, 4, 1}));
+
+        // OOB on the texture because y + width overflows
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 13, 0}, {4, 4, 1}));
+
+        // OOB on the texture because we overflow a non-zero mip
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {1, 0, 0}, {4, 4, 1}));
+
+        // OOB on the texture even on an empty copy when we copy to a non-existent mip.
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 0, 0, destination, 5, {0, 0, 0}, {0, 0, 1}));
+
+        // OOB on the texture because slice overflows
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 2}, {0, 0, 1}));
+    }
+
+    // Test that we force Depth=1 on writes to 2D textures
+    TEST_F(QueueWriteTextureValidationTest, DepthConstraintFor2DTextures) {
+        const uint64_t dataSize =
+            utils::RequiredBytesInCopy(0, 0, {0, 0, 2}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // Depth > 1 on an empty copy still errors
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 2}));
+    }
+
+    // Test WriteTexture with incorrect texture usage
+    TEST_F(QueueWriteTextureValidationTest, IncorrectUsage) {
+        const uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture sampled = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::TextureBinding);
+
+        // Incorrect destination usage
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 4, sampled, 0, {0, 0, 0}, {4, 4, 1}));
+    }
+
+    // Test incorrect values of bytesPerRow and that values not divisible by 256 are allowed.
+    TEST_F(QueueWriteTextureValidationTest, BytesPerRowConstraints) {
+        wgpu::Texture destination = Create2DTexture({3, 7, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // bytesPerRow = 0 or wgpu::kCopyStrideUndefined
+        {
+            // copyHeight > 1
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
+            TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {0, 7, 1});
+            ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 7, destination,
+                                                 0, {0, 0, 0}, {0, 7, 1}));
+
+            // copyDepth > 1
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
+            TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {0, 1, 2});
+            ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination,
+                                                 0, {0, 0, 0}, {0, 1, 2}));
+
+            // copyHeight = 1 and copyDepth = 1
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
+            TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination, 0, {0, 0, 0},
+                             {3, 1, 1});
+        }
+
+        // bytesPerRow = 11 is invalid since a row takes 12 bytes.
+        {
+            // copyHeight > 1
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 11, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
+            // copyHeight == 0
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 11, 0, destination, 0, {0, 0, 0}, {3, 0, 1}));
+
+            // copyDepth > 1
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
+            // copyDepth == 0
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 0}));
+
+            // copyHeight = 1 and copyDepth = 1
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
+        }
+
+        // bytesPerRow = 12 is valid since a row takes 12 bytes.
+        TestWriteTexture(128, 0, 12, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
+
+        // bytesPerRow = 13 is valid since a row takes 12 bytes.
+        TestWriteTexture(128, 0, 13, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
+    }
+
+    // Test that if rowsPerImage is greater than 0, it must be at least copy height.
+    TEST_F(QueueWriteTextureValidationTest, RowsPerImageConstraints) {
+        uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 5, {4, 4, 2}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({16, 16, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // rowsPerImage is wgpu::kCopyStrideUndefined
+        TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {4, 4, 1});
+
+        // rowsPerImage is equal to copy height (Valid)
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
+
+        // rowsPerImage is larger than copy height (Valid)
+        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 1});
+        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 2});
+
+        // rowsPerImage is less than copy height (Invalid)
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {4, 4, 1}));
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {4, 4, 1}));
+    }
+
+    // Test WriteTexture with data offset
+    TEST_F(QueueWriteTextureValidationTest, DataOffset) {
+        uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // Offset aligned
+        TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
+        // Offset not aligned
+        TestWriteTexture(dataSize, dataSize - 5, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
+        // Offset+size too large
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, dataSize - 3, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1}));
+    }
+
+    // Test multisampled textures can be used in WriteTexture.
+    TEST_F(QueueWriteTextureValidationTest, WriteToMultisampledTexture) {
+        uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 0, {2, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({2, 2, 1}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst, 4);
+
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 2, destination, 0, {0, 0, 0}, {2, 2, 1}));
+    }
+
+    // Test that WriteTexture cannot be run with a destroyed texture.
+    TEST_F(QueueWriteTextureValidationTest, DestroyedTexture) {
+        const uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 4, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+        destination.Destroy();
+
+        ASSERT_DEVICE_ERROR(
+            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+    }
+
+    // Test WriteTexture with texture in error state causes errors.
+    TEST_F(QueueWriteTextureValidationTest, TextureInErrorState) {
+        wgpu::TextureDescriptor errorTextureDescriptor;
+        errorTextureDescriptor.size.depthOrArrayLayers = 0;
+        ASSERT_DEVICE_ERROR(wgpu::Texture errorTexture =
+                                device.CreateTexture(&errorTextureDescriptor));
+        wgpu::ImageCopyTexture errorImageCopyTexture =
+            utils::CreateImageCopyTexture(errorTexture, 0, {0, 0, 0});
+
+        wgpu::Extent3D extent3D = {0, 0, 0};
+
+        {
+            std::vector<uint8_t> data(4);
+            wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 0, 0);
+
+            ASSERT_DEVICE_ERROR(queue.WriteTexture(&errorImageCopyTexture, data.data(), 4,
+                                                   &textureDataLayout, &extent3D));
+        }
+    }
+
+    // Test that WriteTexture throws an error when requiredBytesInCopy overflows uint64_t
+    TEST_F(QueueWriteTextureValidationTest, RequiredBytesInCopyOverflow) {
+        wgpu::Texture destination = Create2DTexture({1, 1, 16}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                    wgpu::TextureUsage::CopyDst);
+
+        // success because depth = 1.
+        TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0, {0, 0, 0}, {1, 1, 1});
+        // failure because bytesPerImage * (depth - 1) overflows.
+        ASSERT_DEVICE_ERROR(TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0,
+                                             {0, 0, 0}, {1, 1, 16}));
+    }
+
+    // Regression tests for a bug in the computation of texture data size in Dawn.
+    TEST_F(QueueWriteTextureValidationTest, TextureWriteDataSizeLastRowComputation) {
+        constexpr uint32_t kBytesPerRow = 256;
+        constexpr uint32_t kWidth = 4;
+        constexpr uint32_t kHeight = 4;
+
+        constexpr std::array<wgpu::TextureFormat, 2> kFormats = {wgpu::TextureFormat::RGBA8Unorm,
+                                                                 wgpu::TextureFormat::RG8Unorm};
+
+        {
+            // kBytesPerRow * (kHeight - 1) + kWidth is not large enough to be the valid data size
+            // in this test because the data sizes in WriteTexture are not in texels but in bytes.
+            constexpr uint32_t kInvalidDataSize = kBytesPerRow * (kHeight - 1) + kWidth;
+
+            for (wgpu::TextureFormat format : kFormats) {
+                wgpu::Texture destination =
+                    Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
+                ASSERT_DEVICE_ERROR(TestWriteTexture(kInvalidDataSize, 0, kBytesPerRow, kHeight,
+                                                     destination, 0, {0, 0, 0},
+                                                     {kWidth, kHeight, 1}));
+            }
+        }
+
+        {
+            for (wgpu::TextureFormat format : kFormats) {
+                uint32_t validDataSize =
+                    utils::RequiredBytesInCopy(kBytesPerRow, 0, {kWidth, kHeight, 1}, format);
+                wgpu::Texture destination =
+                    Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
+
+                // Verify the return value of RequiredBytesInCopy() is exactly the minimum valid
+                // data size in this test.
+                {
+                    uint32_t invalidDataSize = validDataSize - 1;
+                    ASSERT_DEVICE_ERROR(TestWriteTexture(invalidDataSize, 0, kBytesPerRow, kHeight,
+                                                         destination, 0, {0, 0, 0},
+                                                         {kWidth, kHeight, 1}));
+                }
+
+                {
+                    TestWriteTexture(validDataSize, 0, kBytesPerRow, kHeight, destination, 0,
+                                     {0, 0, 0}, {kWidth, kHeight, 1});
+                }
+            }
+        }
+    }
+
+    // Test write from data to mip map of non square texture
+    TEST_F(QueueWriteTextureValidationTest, WriteToMipmapOfNonSquareTexture) {
+        uint64_t dataSize =
+            utils::RequiredBytesInCopy(256, 0, {4, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        uint32_t maxMipmapLevel = 3;
+        wgpu::Texture destination =
+            Create2DTexture({4, 2, 1}, maxMipmapLevel, wgpu::TextureFormat::RGBA8Unorm,
+                            wgpu::TextureUsage::CopyDst);
+
+        // Copy to top level mip map
+        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 1, {0, 0, 0},
+                         {1, 1, 1});
+        // Copy to high level mip map
+        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2, {0, 0, 0},
+                         {2, 1, 1});
+        // Mip level out of range
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel,
+                                             {0, 0, 0}, {1, 1, 1}));
+        // Copy origin out of range
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2,
+                                             {1, 0, 0}, {2, 1, 1}));
+        // Copy size out of range
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 2, destination, maxMipmapLevel - 2,
+                                             {0, 0, 0}, {2, 2, 1}));
+    }
+
+    // Test writes to multiple array layers of an uncompressed texture
+    TEST_F(QueueWriteTextureValidationTest, WriteToMultipleArrayLayers) {
+        wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+            {4, 2, 5}, 1, wgpu::TextureFormat::RGBA8Unorm,
+            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
+
+        // Write to all array layers
+        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
+                                      {0, 0, 0}, {4, 2, 5});
+
+        // Write to the highest array layer
+        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
+                                      {0, 0, 4}, {4, 2, 1});
+
+        // Write to array layers in the middle
+        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
+                                      {0, 0, 1}, {4, 2, 3});
+
+        // Copy with a non-packed rowsPerImage
+        TestWriteTextureExactDataSize(256, 3, destination, wgpu::TextureFormat::RGBA8Unorm,
+                                      {0, 0, 0}, {4, 2, 5});
+
+        // Copy with bytesPerRow = 500
+        TestWriteTextureExactDataSize(500, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
+                                      {0, 0, 1}, {4, 2, 3});
+    }
+
+    // Test it is invalid to write into a depth texture.
+    TEST_F(QueueWriteTextureValidationTest, WriteToDepthAspect) {
+        uint32_t bytesPerRow = sizeof(float) * 4;
+        const uint64_t dataSize = utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1},
+                                                             wgpu::TextureFormat::Depth32Float);
+
+        // Invalid to write into depth32float
+        {
+            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+                {4, 4, 1}, 1, wgpu::TextureFormat::Depth32Float, wgpu::TextureUsage::CopyDst);
+
+            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
+                                                 {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All));
+
+            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
+                                                 {0, 0, 0}, {4, 4, 1},
+                                                 wgpu::TextureAspect::DepthOnly));
+        }
+
+        // Invalid to write into depth24plus
+        {
+            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
+
+            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
+                                                 {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All));
+
+            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
+                                                 {0, 0, 0}, {4, 4, 1},
+                                                 wgpu::TextureAspect::DepthOnly));
+        }
+    }
+
+    // Test write texture to the stencil aspect
+    TEST_F(QueueWriteTextureValidationTest, WriteToStencilAspect) {
+        uint32_t bytesPerRow = 4;
+        const uint64_t dataSize =
+            utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1}, wgpu::TextureFormat::R8Uint);
+
+        // It is valid to write into the stencil aspect of depth24plus-stencil8
+        {
+            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24PlusStencil8,
+                wgpu::TextureUsage::CopyDst);
+
+            TestWriteTexture(dataSize, 0, bytesPerRow, wgpu::kCopyStrideUndefined, destination, 0,
+                             {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::StencilOnly);
+
+            // And that it fails if the buffer is one byte too small
+            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize - 1, 0, bytesPerRow, 4, destination, 0,
+                                                 {0, 0, 0}, {4, 4, 1},
+                                                 wgpu::TextureAspect::StencilOnly));
+
+            // It is invalid to write just part of the subresource size
+            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 3, destination, 0,
+                                                 {0, 0, 0}, {3, 3, 1},
+                                                 wgpu::TextureAspect::StencilOnly));
+        }
+
+        // It is invalid to write into the stencil aspect of depth24plus (no stencil)
+        {
+            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
+
+            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
+                                                 {0, 0, 0}, {4, 4, 1},
+                                                 wgpu::TextureAspect::StencilOnly));
+        }
+    }
+
+    class WriteTextureTest_CompressedTextureFormats : public QueueWriteTextureValidationTest {
+      protected:
+        WGPUDevice CreateTestDevice() override {
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
+                                                     wgpu::FeatureName::TextureCompressionETC2,
+                                                     wgpu::FeatureName::TextureCompressionASTC};
+            descriptor.requiredFeatures = requiredFeatures;
+            descriptor.requiredFeaturesCount = 3;
+            return adapter.CreateDevice(&descriptor);
+        }
+
+        wgpu::Texture Create2DTexture(wgpu::TextureFormat format,
+                                      uint32_t mipmapLevels = 1,
+                                      uint32_t width = kWidth,
+                                      uint32_t height = kHeight) {
+            constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst;
+            constexpr uint32_t kArrayLayers = 1;
+            return QueueWriteTextureValidationTest::Create2DTexture(
+                {width, height, kArrayLayers}, mipmapLevels, format, kUsage, 1);
+        }
+
+        void TestWriteTexture(size_t dataSize,
+                              uint32_t dataOffset,
+                              uint32_t dataBytesPerRow,
+                              uint32_t dataRowsPerImage,
+                              wgpu::Texture texture,
+                              uint32_t textLevel,
+                              wgpu::Origin3D textOrigin,
+                              wgpu::Extent3D size) {
+            QueueWriteTextureValidationTest::TestWriteTexture(dataSize, dataOffset, dataBytesPerRow,
+                                                              dataRowsPerImage, texture, textLevel,
+                                                              textOrigin, size);
+        }
+
+        static constexpr uint32_t kWidth = 120;
+        static constexpr uint32_t kHeight = 120;
+    };
+
+    // Tests to verify that data offset may not be a multiple of the compressed texture block size
+    TEST_F(WriteTextureTest_CompressedTextureFormats, DataOffset) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            wgpu::Texture texture = Create2DTexture(format);
+            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+            // Valid if aligned.
+            {
+                uint32_t kAlignedOffset = utils::GetTexelBlockSizeInBytes(format);
+                TestWriteTexture(1024, kAlignedOffset, 256, 4, texture, 0, {0, 0, 0},
+                                 {blockWidth, blockHeight, 1});
+            }
+
+            // Still valid if not aligned.
+            {
+                uint32_t kUnalignedOffset = utils::GetTexelBlockSizeInBytes(format) - 1;
+                TestWriteTexture(1024, kUnalignedOffset, 256, 4, texture, 0, {0, 0, 0},
+                                 {blockWidth, blockHeight, 1});
+            }
+        }
+    }
+
+    // Tests to verify that bytesPerRow must not be less than (width / blockWidth) *
+    // blockSizeInBytes and that it doesn't have to be a multiple of the compressed
+    // texture block width.
+    TEST_F(WriteTextureTest_CompressedTextureFormats, BytesPerRow) {
+        // Used to compute test width and height.
+        constexpr uint32_t kTestBytesPerRow = 320;
+
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+            uint32_t blockByteSize = utils::GetTexelBlockSizeInBytes(format);
+            uint32_t testWidth = kTestBytesPerRow * blockWidth / blockByteSize;
+            uint32_t testHeight = kTestBytesPerRow * blockHeight / blockByteSize;
+            wgpu::Texture texture = Create2DTexture(format, 1, testWidth, testHeight);
+
+            // Failures on the BytesPerRow that is not large enough.
+            {
+                uint32_t kSmallBytesPerRow = kTestBytesPerRow - blockByteSize;
+                ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, kSmallBytesPerRow, 4, texture, 0,
+                                                     {0, 0, 0}, {testWidth, blockHeight, 1}));
+            }
+
+            // Test it is valid to use a BytesPerRow that is not a multiple of 256.
+            {
+                TestWriteTexture(1024, 0, kTestBytesPerRow, 4, texture, 0, {0, 0, 0},
+                                 {testWidth, blockHeight, 1});
+            }
+
+            // Valid usage of bytesPerRow in WriteTexture with compressed texture formats.
+            {
+                TestWriteTexture(512, 0, blockByteSize, 4, texture, 0, {0, 0, 0},
+                                 {blockWidth, blockHeight, 1});
+            }
+
+            // Valid usage of bytesPerRow in WriteTexture with compressed texture formats. Note that
+            // BytesPerRow is not a multiple of the blockByteSize (but is greater than it).
+            {
+                TestWriteTexture(512, 0, blockByteSize + 1, 4, texture, 0, {0, 0, 0},
+                                 {blockWidth, blockHeight, 1});
+            }
+        }
+    }
+
+    // rowsPerImage must be >= heightInBlocks.
+    TEST_F(WriteTextureTest_CompressedTextureFormats, RowsPerImage) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            wgpu::Texture texture = Create2DTexture(format);
+            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+            // Valid usages of rowsPerImage in WriteTexture with compressed texture formats.
+            {
+                constexpr uint32_t kValidRowsPerImage = 5;
+                TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
+                                 {blockWidth, blockHeight * 4, 1});
+            }
+            {
+                constexpr uint32_t kValidRowsPerImage = 4;
+                TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
+                                 {blockWidth, blockHeight * 4, 1});
+            }
+
+            // rowsPerImage is smaller than height.
+            {
+                constexpr uint32_t kInvalidRowsPerImage = 3;
+                ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, 256, kInvalidRowsPerImage, texture, 0,
+                                                     {0, 0, 0}, {blockWidth, blockWidth * 4, 1}));
+            }
+        }
+    }
+
+    // Tests to verify that ImageOffset.x must be a multiple of the compressed texture block width
+    // and ImageOffset.y must be a multiple of the compressed texture block height
+    TEST_F(WriteTextureTest_CompressedTextureFormats, ImageOffset) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            wgpu::Texture texture = Create2DTexture(format);
+            wgpu::Texture texture2 = Create2DTexture(format);
+            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+            wgpu::Origin3D smallestValidOrigin3D = {blockWidth, blockHeight, 0};
+
+            // Valid usages of ImageOffset in WriteTexture with compressed texture formats.
+            {
+                TestWriteTexture(512, 0, 256, 4, texture, 0, smallestValidOrigin3D,
+                                 {blockWidth, blockHeight, 1});
+            }
+
+            // Failures on invalid ImageOffset.x.
+            {
+                wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x - 1,
+                                                  smallestValidOrigin3D.y, 0};
+                ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
+                                                     {blockWidth, blockHeight, 1}));
+            }
+
+            // Failures on invalid ImageOffset.y.
+            {
+                wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x,
+                                                  smallestValidOrigin3D.y - 1, 0};
+                ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
+                                                     {blockWidth, blockHeight, 1}));
+            }
+        }
+    }
+
+    // Tests to verify that ImageExtent.x must be a multiple of the compressed texture block width
+    // and ImageExtent.y must be a multiple of the compressed texture block height
+    TEST_F(WriteTextureTest_CompressedTextureFormats, ImageExtent) {
+        constexpr uint32_t kMipmapLevels = 3;
+        // We choose a prime that is greater than the current max texel dimension size as a
+        // multiplier to compute the test texture size so that we can be certain that its level 2
+        // mipmap (x4) cannot be a multiple of the dimension. This is useful for testing padding at
+        // the edges of the mipmaps.
+        constexpr uint32_t kBlockPerDim = 13;
+
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+            uint32_t testWidth = blockWidth * kBlockPerDim;
+            uint32_t testHeight = blockHeight * kBlockPerDim;
+            wgpu::Texture texture = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+            wgpu::Texture texture2 = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+
+            wgpu::Extent3D smallestValidExtent3D = {blockWidth, blockHeight, 1};
+
+            // Valid usages of ImageExtent in WriteTexture with compressed texture formats.
+            { TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, smallestValidExtent3D); }
+
+            // Valid usages of ImageExtent in WriteTexture with compressed texture formats
+            // and non-zero mipmap levels.
+            {
+                constexpr uint32_t kTestMipmapLevel = 2;
+                wgpu::Origin3D testOrigin = {
+                    ((testWidth >> kTestMipmapLevel) / blockWidth) * blockWidth,
+                    ((testHeight >> kTestMipmapLevel) / blockHeight) * blockHeight, 0};
+
+                TestWriteTexture(512, 0, 256, 4, texture, kTestMipmapLevel, testOrigin,
+                                 smallestValidExtent3D);
+            }
+
+            // Failures on invalid ImageExtent.x.
+            {
+                wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width - 1,
+                                                  smallestValidExtent3D.height, 1};
+                ASSERT_DEVICE_ERROR(
+                    TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
+            }
+
+            // Failures on invalid ImageExtent.y.
+            {
+                wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width,
+                                                  smallestValidExtent3D.height - 1, 1};
+                ASSERT_DEVICE_ERROR(
+                    TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
+            }
+        }
+    }
+
+    // Test writes to multiple array layers of a compressed texture
+    TEST_F(WriteTextureTest_CompressedTextureFormats, WriteToMultipleArrayLayers) {
+        constexpr uint32_t kWidthMultiplier = 3;
+        constexpr uint32_t kHeightMultiplier = 4;
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+            uint32_t testWidth = kWidthMultiplier * blockWidth;
+            uint32_t testHeight = kHeightMultiplier * blockHeight;
+            wgpu::Texture texture = QueueWriteTextureValidationTest::Create2DTexture(
+                {testWidth, testHeight, 20}, 1, format,
+                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
+
+            // Write to all array layers
+            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 0},
+                                          {testWidth, testHeight, 20});
+
+            // Write to the highest array layer
+            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 19},
+                                          {testWidth, testHeight, 1});
+
+            // Write to array layers in the middle
+            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 1},
+                                          {testWidth, testHeight, 18});
+
+            // Write touching the texture corners with a non-packed rowsPerImage
+            TestWriteTextureExactDataSize(256, 6, texture, format, {blockWidth, blockHeight, 4},
+                                          {testWidth - blockWidth, testHeight - blockHeight, 16});
+        }
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
new file mode 100644
index 0000000..208bcb0
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
@@ -0,0 +1,1141 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Constants.h"
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    class RenderBundleValidationTest : public ValidationTest {
+      protected:
+        void SetUp() override {
+            ValidationTest::SetUp();
+
+            vsModule = utils::CreateShaderModule(device, R"(
+                struct S {
+                    transform : mat2x2<f32>
+                }
+                @group(0) @binding(0) var<uniform> uniforms : S;
+
+                @stage(vertex) fn main(@location(0) pos : vec2<f32>) -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+            fsModule = utils::CreateShaderModule(device, R"(
+                struct Uniforms {
+                    color : vec4<f32>
+                }
+                @group(1) @binding(0) var<uniform> uniforms : Uniforms;
+
+                struct Storage {
+                    dummy : array<f32>
+                }
+                @group(1) @binding(1) var<storage, read_write> ssbo : Storage;
+
+                @stage(fragment) fn main() {
+                })");
+
+            wgpu::BindGroupLayout bgls[] = {
+                utils::MakeBindGroupLayout(
+                    device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}),
+                utils::MakeBindGroupLayout(
+                    device, {
+                                {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform},
+                                {1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage},
+                            })};
+
+            wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
+            pipelineLayoutDesc.bindGroupLayoutCount = 2;
+            pipelineLayoutDesc.bindGroupLayouts = bgls;
+
+            pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+
+            utils::ComboRenderPipelineDescriptor descriptor;
+            InitializeRenderPipelineDescriptor(&descriptor);
+            pipeline = device.CreateRenderPipeline(&descriptor);
+
+            float data[8];
+            wgpu::Buffer buffer = utils::CreateBufferFromData(device, data, 8 * sizeof(float),
+                                                              wgpu::BufferUsage::Uniform);
+
+            constexpr static float kVertices[] = {-1.f, 1.f, 1.f, -1.f, -1.f, 1.f};
+
+            vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
+                                                       wgpu::BufferUsage::Vertex);
+
+            // Dummy storage buffer.
+            wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
+                device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage);
+
+            // Vertex buffer with storage usage for testing read+write error usage.
+            vertexStorageBuffer =
+                utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
+                                            wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage);
+
+            bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer, 0, 8 * sizeof(float)}});
+            bg1 = utils::MakeBindGroup(
+                device, bgls[1],
+                {{0, buffer, 0, 4 * sizeof(float)}, {1, storageBuffer, 0, sizeof(kVertices)}});
+
+            bg1Vertex = utils::MakeBindGroup(device, bgls[1],
+                                             {{0, buffer, 0, 8 * sizeof(float)},
+                                              {1, vertexStorageBuffer, 0, sizeof(kVertices)}});
+        }
+
+        void InitializeRenderPipelineDescriptor(utils::ComboRenderPipelineDescriptor* descriptor) {
+            descriptor->layout = pipelineLayout;
+            descriptor->vertex.module = vsModule;
+            descriptor->cFragment.module = fsModule;
+            descriptor->cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+            descriptor->vertex.bufferCount = 1;
+            descriptor->cBuffers[0].arrayStride = 2 * sizeof(float);
+            descriptor->cBuffers[0].attributeCount = 1;
+            descriptor->cAttributes[0].format = wgpu::VertexFormat::Float32x2;
+            descriptor->cAttributes[0].shaderLocation = 0;
+        }
+
+        wgpu::ShaderModule vsModule;
+        wgpu::ShaderModule fsModule;
+        wgpu::PipelineLayout pipelineLayout;
+        wgpu::RenderPipeline pipeline;
+        wgpu::Buffer vertexBuffer;
+        wgpu::Buffer vertexStorageBuffer;
+        wgpu::BindGroup bg0;
+        wgpu::BindGroup bg1;
+        wgpu::BindGroup bg1Vertex;
+    };
+
+}  // anonymous namespace
+
+// Test creating and encoding an empty render bundle.
+TEST_F(RenderBundleValidationTest, Empty) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.End();
+    commandEncoder.Finish();
+}
+
+// Test that an empty error bundle encoder produces an error bundle.
+// This is a regression test for error render bundle encoders containing no commands would
+// produce non-error render bundles.
+TEST_F(RenderBundleValidationTest, EmptyErrorEncoderProducesErrorBundle) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    // Having 0 attachments is invalid!
+    desc.colorFormatsCount = 0;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder;
+    ASSERT_DEVICE_ERROR(renderBundleEncoder = device.CreateRenderBundleEncoder(&desc));
+    wgpu::RenderBundle renderBundle;
+    ASSERT_DEVICE_ERROR(renderBundle = renderBundleEncoder.Finish());
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.End();
+    ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+}
+
+// Test executing zero render bundles.
+TEST_F(RenderBundleValidationTest, ZeroBundles) {
+    DummyRenderPass renderPass(device);
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+    pass.ExecuteBundles(0, nullptr);
+    pass.End();
+    commandEncoder.Finish();
+}
+
+// Test successfully creating and encoding a render bundle into a command buffer.
+TEST_F(RenderBundleValidationTest, SimpleSuccess) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+    renderBundleEncoder.SetPipeline(pipeline);
+    renderBundleEncoder.SetBindGroup(0, bg0);
+    renderBundleEncoder.SetBindGroup(1, bg1);
+    renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+    renderBundleEncoder.Draw(3);
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.End();
+    commandEncoder.Finish();
+}
+
+// Test that render bundle debug groups must be well nested.
+TEST_F(RenderBundleValidationTest, DebugGroups) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    // Test a single debug group works.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.PushDebugGroup("group");
+        renderBundleEncoder.PopDebugGroup();
+        renderBundleEncoder.Finish();
+    }
+
+    // Test nested debug groups work.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.PushDebugGroup("group");
+        renderBundleEncoder.PushDebugGroup("group2");
+        renderBundleEncoder.PopDebugGroup();
+        renderBundleEncoder.PopDebugGroup();
+        renderBundleEncoder.Finish();
+    }
+
+    // Test popping when no group is pushed is invalid.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.PopDebugGroup();
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+
+    // Test popping too many times is invalid.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.PushDebugGroup("group");
+        renderBundleEncoder.PopDebugGroup();
+        renderBundleEncoder.PopDebugGroup();
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+
+    // Test that a single debug group must be popped.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.PushDebugGroup("group");
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+
+    // Test that all debug groups must be popped.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.PushDebugGroup("group");
+        renderBundleEncoder.PushDebugGroup("group2");
+        renderBundleEncoder.PopDebugGroup();
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+}
+
+// Test render bundles do not inherit command buffer state
+TEST_F(RenderBundleValidationTest, StateInheritance) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    // Render bundle does not inherit pipeline so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+        pass.SetPipeline(pipeline);
+
+        renderBundleEncoder.SetBindGroup(0, bg0);
+        renderBundleEncoder.SetBindGroup(1, bg1);
+        renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        renderBundleEncoder.Draw(3);
+        ASSERT_DEVICE_ERROR(wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish());
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle does not inherit bind groups so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        renderBundleEncoder.Draw(3);
+        ASSERT_DEVICE_ERROR(wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish());
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle does not inherit pipeline and bind groups so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+
+        renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        renderBundleEncoder.Draw(3);
+        ASSERT_DEVICE_ERROR(wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish());
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle does not inherit buffers so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+
+        pass.SetVertexBuffer(0, vertexBuffer);
+
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetBindGroup(0, bg0);
+        renderBundleEncoder.SetBindGroup(1, bg1);
+        renderBundleEncoder.Draw(3);
+        ASSERT_DEVICE_ERROR(wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish());
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test render bundles do not persist command buffer state
+TEST_F(RenderBundleValidationTest, StatePersistence) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    // Render bundle does not persist pipeline so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle does not persist bind groups so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.SetBindGroup(0, bg0);
+        renderBundleEncoder.SetBindGroup(1, bg1);
+        wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle does not persist pipeline and bind groups so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetBindGroup(0, bg0);
+        renderBundleEncoder.SetBindGroup(1, bg1);
+        wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle does not persist buffers so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test executing render bundles clears command buffer state
+TEST_F(RenderBundleValidationTest, ClearsState) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    // Render bundle clears pipeline so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        pass.SetPipeline(pipeline);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle clears bind groups so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetPipeline(pipeline);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle clears pipeline and bind groups so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Render bundle clears buffers so the draw is invalid.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.Draw(3);
+        pass.End();
+
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Test executing 0 bundles still clears command buffer state.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.ExecuteBundles(0, nullptr);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test creating and encoding multiple render bundles.
+TEST_F(RenderBundleValidationTest, MultipleBundles) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    wgpu::RenderBundle renderBundles[2] = {};
+
+    wgpu::RenderBundleEncoder renderBundleEncoder0 = device.CreateRenderBundleEncoder(&desc);
+    renderBundleEncoder0.SetPipeline(pipeline);
+    renderBundleEncoder0.SetBindGroup(0, bg0);
+    renderBundleEncoder0.SetBindGroup(1, bg1);
+    renderBundleEncoder0.SetVertexBuffer(0, vertexBuffer);
+    renderBundleEncoder0.Draw(3);
+    renderBundles[0] = renderBundleEncoder0.Finish();
+
+    wgpu::RenderBundleEncoder renderBundleEncoder1 = device.CreateRenderBundleEncoder(&desc);
+    renderBundleEncoder1.SetPipeline(pipeline);
+    renderBundleEncoder1.SetBindGroup(0, bg0);
+    renderBundleEncoder1.SetBindGroup(1, bg1);
+    renderBundleEncoder1.SetVertexBuffer(0, vertexBuffer);
+    renderBundleEncoder1.Draw(3);
+    renderBundles[1] = renderBundleEncoder1.Finish();
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+    pass.ExecuteBundles(2, renderBundles);
+    pass.End();
+    commandEncoder.Finish();
+}
+
+// Test that is is valid to execute a render bundle more than once.
+TEST_F(RenderBundleValidationTest, ExecuteMultipleTimes) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+    renderBundleEncoder.SetPipeline(pipeline);
+    renderBundleEncoder.SetBindGroup(0, bg0);
+    renderBundleEncoder.SetBindGroup(1, bg1);
+    renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+    renderBundleEncoder.Draw(3);
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.ExecuteBundles(1, &renderBundle);
+    pass.End();
+    commandEncoder.Finish();
+}
+
+// Test that it is an error to call Finish() on a render bundle encoder twice.
+TEST_F(RenderBundleValidationTest, FinishTwice) {
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Uint;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+    renderBundleEncoder.Finish();
+    ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+}
+
+// Test that it is invalid to create a render bundle with no texture formats
+TEST_F(RenderBundleValidationTest, RequiresAtLeastOneTextureFormat) {
+    // Test failure case.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&desc));
+    }
+
+    // Test success with one color format.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Uint;
+        device.CreateRenderBundleEncoder(&desc);
+    }
+
+    // Test success with a depth stencil format.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.depthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+        device.CreateRenderBundleEncoder(&desc);
+    }
+}
+
+// Test that it is invalid to create a render bundle with no texture formats
+TEST_F(RenderBundleValidationTest, ColorFormatsCountOutOfBounds) {
+    std::array<wgpu::TextureFormat, kMaxColorAttachments + 1> colorFormats;
+    for (uint32_t i = 0; i < colorFormats.size(); ++i) {
+        colorFormats[i] = wgpu::TextureFormat::RGBA8Unorm;
+    }
+
+    // colorFormatsCount <= kMaxColorAttachments is valid.
+    {
+        wgpu::RenderBundleEncoderDescriptor desc;
+        desc.colorFormatsCount = kMaxColorAttachments;
+        desc.colorFormats = colorFormats.data();
+        device.CreateRenderBundleEncoder(&desc);
+    }
+
+    // colorFormatsCount > kMaxColorAttachments is invalid.
+    {
+        wgpu::RenderBundleEncoderDescriptor desc;
+        desc.colorFormatsCount = kMaxColorAttachments + 1;
+        desc.colorFormats = colorFormats.data();
+        ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&desc));
+    }
+}
+
+// Test that render bundle sparse color formats.
+TEST_F(RenderBundleValidationTest, SparseColorFormats) {
+    // Sparse color formats is valid.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 2;
+        desc.cColorFormats[0] = wgpu::TextureFormat::Undefined;
+        desc.cColorFormats[1] = wgpu::TextureFormat::RGBA8Unorm;
+        device.CreateRenderBundleEncoder(&desc);
+    }
+
+    // When all color formats are undefined, depth stencil format must not be undefined.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::Undefined;
+        desc.depthStencilFormat = wgpu::TextureFormat::Undefined;
+        ASSERT_DEVICE_ERROR(
+            device.CreateRenderBundleEncoder(&desc),
+            testing::HasSubstr(
+                "No color or depthStencil attachments specified. At least one is required."));
+    }
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::Undefined;
+        desc.depthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+        device.CreateRenderBundleEncoder(&desc);
+    }
+}
+
+// Test that the render bundle depth stencil format cannot be set to undefined.
+TEST_F(RenderBundleValidationTest, DepthStencilFormatUndefined) {
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.depthStencilFormat = wgpu::TextureFormat::Undefined;
+    ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&desc));
+}
+
+// Test that depthReadOnly must be equal to stencilReadOnly if depth stencil format contain
+// both depth and stencil formats.
+TEST_F(RenderBundleValidationTest, DepthStencilReadOnly) {
+    for (wgpu::TextureFormat format :
+         {wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureFormat::Depth32Float}) {
+        for (bool depthReadOnly : {true, false}) {
+            for (bool stencilReadOnly : {true, false}) {
+                utils::ComboRenderBundleEncoderDescriptor desc = {};
+                desc.depthStencilFormat = format;
+                desc.depthReadOnly = depthReadOnly;
+                desc.stencilReadOnly = stencilReadOnly;
+                if (format == wgpu::TextureFormat::Depth24PlusStencil8 &&
+                    depthReadOnly != stencilReadOnly) {
+                    ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&desc));
+                } else {
+                    device.CreateRenderBundleEncoder(&desc);
+                }
+            }
+        }
+    }
+}
+// Test that resource usages are validated inside render bundles.
+TEST_F(RenderBundleValidationTest, UsageTracking) {
+    DummyRenderPass renderPass(device);
+
+    utils::ComboRenderBundleEncoderDescriptor desc = {};
+    desc.colorFormatsCount = 1;
+    desc.cColorFormats[0] = renderPass.attachmentFormat;
+
+    wgpu::RenderBundle renderBundle0;
+    wgpu::RenderBundle renderBundle1;
+
+    // First base case is successful. |bg1Vertex| does not reference |vertexBuffer|.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetBindGroup(0, bg0);
+        renderBundleEncoder.SetBindGroup(1, bg1Vertex);
+        renderBundleEncoder.SetVertexBuffer(0, vertexBuffer);
+        renderBundleEncoder.Draw(3);
+        renderBundle0 = renderBundleEncoder.Finish();
+    }
+
+    // Second base case is successful. |bg1| does not reference |vertexStorageBuffer|
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetBindGroup(0, bg0);
+        renderBundleEncoder.SetBindGroup(1, bg1);
+        renderBundleEncoder.SetVertexBuffer(0, vertexStorageBuffer);
+        renderBundleEncoder.Draw(3);
+        renderBundle1 = renderBundleEncoder.Finish();
+    }
+
+    // Test that a render bundle which sets a buffer as both vertex and storage is invalid.
+    // |bg1Vertex| references |vertexStorageBuffer|
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder = device.CreateRenderBundleEncoder(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.SetBindGroup(0, bg0);
+        renderBundleEncoder.SetBindGroup(1, bg1Vertex);
+        renderBundleEncoder.SetVertexBuffer(0, vertexStorageBuffer);
+        renderBundleEncoder.Draw(3);
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+
+    // When both render bundles are in the same pass, |vertexStorageBuffer| is used
+    // as both read and write usage. This is invalid.
+    // renderBundle0 uses |vertexStorageBuffer| as a storage buffer.
+    // renderBundle1 uses |vertexStorageBuffer| as a vertex buffer.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle0);
+        pass.ExecuteBundles(1, &renderBundle1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // |vertexStorageBuffer| is used as both read and write usage. This is invalid.
+    // The render pass uses |vertexStorageBuffer| as a storage buffer.
+    // renderBundle1 uses |vertexStorageBuffer| as a vertex buffer.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1Vertex);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+
+        pass.ExecuteBundles(1, &renderBundle1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // |vertexStorageBuffer| is used as both read and write usage. This is invalid.
+    // renderBundle0 uses |vertexStorageBuffer| as a storage buffer.
+    // The render pass uses |vertexStorageBuffer| as a vertex buffer.
+    {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+
+        pass.ExecuteBundles(1, &renderBundle0);
+
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetVertexBuffer(0, vertexStorageBuffer);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test that encoding SetPipline with an incompatible color format produces an error.
+TEST_F(RenderBundleValidationTest, PipelineColorFormatMismatch) {
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 3;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    renderBundleDesc.cColorFormats[1] = wgpu::TextureFormat::RG16Float;
+    renderBundleDesc.cColorFormats[2] = wgpu::TextureFormat::R16Sint;
+
+    auto SetupRenderPipelineDescForTest = [this](utils::ComboRenderPipelineDescriptor* desc) {
+        InitializeRenderPipelineDescriptor(desc);
+        desc->cFragment.targetCount = 3;
+        desc->cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+        desc->cTargets[1].format = wgpu::TextureFormat::RG16Float;
+        desc->cTargets[2].format = wgpu::TextureFormat::R16Sint;
+        desc->cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        desc->cTargets[1].writeMask = wgpu::ColorWriteMask::None;
+        desc->cTargets[2].writeMask = wgpu::ColorWriteMask::None;
+    };
+
+    // Test the success case.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        SetupRenderPipelineDescForTest(&desc);
+
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.Finish();
+    }
+
+    // Test the failure case for mismatched format types.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        SetupRenderPipelineDescForTest(&desc);
+        desc.cTargets[1].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+
+    // Test the failure case for missing format
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        SetupRenderPipelineDescForTest(&desc);
+        desc.cFragment.targetCount = 2;
+
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+}
+
+// Test that encoding SetPipline with an incompatible depth stencil format produces an error.
+TEST_F(RenderBundleValidationTest, PipelineDepthStencilFormatMismatch) {
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    renderBundleDesc.depthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    auto SetupRenderPipelineDescForTest = [this](utils::ComboRenderPipelineDescriptor* desc) {
+        InitializeRenderPipelineDescriptor(desc);
+        desc->cFragment.targetCount = 1;
+        desc->cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    };
+
+    // Test the success case.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        SetupRenderPipelineDescForTest(&desc);
+        desc.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.Finish();
+    }
+
+    // Test the failure case for mismatched format.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        SetupRenderPipelineDescForTest(&desc);
+        desc.EnableDepthStencil(wgpu::TextureFormat::Depth24Plus);
+
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+
+    // Test the failure case for missing format.
+    {
+        utils::ComboRenderPipelineDescriptor desc;
+        SetupRenderPipelineDescForTest(&desc);
+        desc.depthStencil = nullptr;
+
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&desc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+}
+
+// Test that encoding SetPipline with an incompatible sample count produces an error.
+TEST_F(RenderBundleValidationTest, PipelineSampleCountMismatch) {
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    renderBundleDesc.sampleCount = 4;
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDesc;
+    InitializeRenderPipelineDescriptor(&renderPipelineDesc);
+    renderPipelineDesc.cFragment.targetCount = 1;
+    renderPipelineDesc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+    renderPipelineDesc.multisample.count = 4;
+
+    // Test the success case.
+    {
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&renderPipelineDesc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        renderBundleEncoder.Finish();
+    }
+
+    // Test the failure case.
+    {
+        renderPipelineDesc.multisample.count = 1;
+
+        wgpu::RenderBundleEncoder renderBundleEncoder =
+            device.CreateRenderBundleEncoder(&renderBundleDesc);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&renderPipelineDesc);
+        renderBundleEncoder.SetPipeline(pipeline);
+        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+    }
+}
+
+// Test that encoding ExecuteBundles with an incompatible color format produces an error.
+TEST_F(RenderBundleValidationTest, RenderPassColorFormatMismatch) {
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 3;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    renderBundleDesc.cColorFormats[1] = wgpu::TextureFormat::RG16Float;
+    renderBundleDesc.cColorFormats[2] = wgpu::TextureFormat::R16Sint;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder =
+        device.CreateRenderBundleEncoder(&renderBundleDesc);
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.usage = wgpu::TextureUsage::RenderAttachment;
+    textureDesc.size = wgpu::Extent3D({400, 400, 1});
+
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture tex0 = device.CreateTexture(&textureDesc);
+
+    textureDesc.format = wgpu::TextureFormat::RG16Float;
+    wgpu::Texture tex1 = device.CreateTexture(&textureDesc);
+
+    textureDesc.format = wgpu::TextureFormat::R16Sint;
+    wgpu::Texture tex2 = device.CreateTexture(&textureDesc);
+
+    // Test the success case
+    {
+        utils::ComboRenderPassDescriptor renderPass({
+            tex0.CreateView(),
+            tex1.CreateView(),
+            tex2.CreateView(),
+        });
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        commandEncoder.Finish();
+    }
+
+    // Test the failure case for mismatched format
+    {
+        utils::ComboRenderPassDescriptor renderPass({
+            tex0.CreateView(),
+            tex1.CreateView(),
+            tex0.CreateView(),
+        });
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Test the failure case for missing format
+    {
+        utils::ComboRenderPassDescriptor renderPass({
+            tex0.CreateView(),
+            tex1.CreateView(),
+        });
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test that encoding ExecuteBundles with an incompatible depth stencil format produces an
+// error.
+TEST_F(RenderBundleValidationTest, RenderPassDepthStencilFormatMismatch) {
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    renderBundleDesc.depthStencilFormat = wgpu::TextureFormat::Depth24Plus;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder =
+        device.CreateRenderBundleEncoder(&renderBundleDesc);
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.usage = wgpu::TextureUsage::RenderAttachment;
+    textureDesc.size = wgpu::Extent3D({400, 400, 1});
+
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture tex0 = device.CreateTexture(&textureDesc);
+
+    textureDesc.format = wgpu::TextureFormat::Depth24Plus;
+    wgpu::Texture tex1 = device.CreateTexture(&textureDesc);
+
+    textureDesc.format = wgpu::TextureFormat::Depth32Float;
+    wgpu::Texture tex2 = device.CreateTexture(&textureDesc);
+
+    // Test the success case
+    {
+        utils::ComboRenderPassDescriptor renderPass({tex0.CreateView()}, tex1.CreateView());
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        commandEncoder.Finish();
+    }
+
+    // Test the failure case for mismatched format
+    {
+        utils::ComboRenderPassDescriptor renderPass({tex0.CreateView()}, tex2.CreateView());
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+
+    // Test the failure case for missing format
+    {
+        utils::ComboRenderPassDescriptor renderPass({tex0.CreateView()});
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test that encoding ExecuteBundles with an incompatible sample count produces an error.
+TEST_F(RenderBundleValidationTest, RenderPassSampleCountMismatch) {
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::RenderBundleEncoder renderBundleEncoder =
+        device.CreateRenderBundleEncoder(&renderBundleDesc);
+    wgpu::RenderBundle renderBundle = renderBundleEncoder.Finish();
+
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.usage = wgpu::TextureUsage::RenderAttachment;
+    textureDesc.size = wgpu::Extent3D({400, 400, 1});
+
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture tex0 = device.CreateTexture(&textureDesc);
+
+    textureDesc.sampleCount = 4;
+    wgpu::Texture tex1 = device.CreateTexture(&textureDesc);
+
+    // Test the success case
+    {
+        utils::ComboRenderPassDescriptor renderPass({tex0.CreateView()});
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        commandEncoder.Finish();
+    }
+
+    // Test the failure case
+    {
+        utils::ComboRenderPassDescriptor renderPass({tex1.CreateView()});
+
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
+        pass.ExecuteBundles(1, &renderBundle);
+        pass.End();
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+    }
+}
+
+// Test that color attachment texture formats must be color renderable and
+// depth stencil texture formats must be depth/stencil.
+TEST_F(RenderBundleValidationTest, TextureFormats) {
+    // Test that color formats are validated as color.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::Depth24PlusStencil8;
+        ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&desc));
+    }
+
+    // Test that color formats are validated as renderable.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.colorFormatsCount = 1;
+        desc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Snorm;
+        ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&desc));
+    }
+
+    // Test that depth/stencil formats are validated as depth/stencil.
+    {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.depthStencilFormat = wgpu::TextureFormat::RGBA8Unorm;
+        ASSERT_DEVICE_ERROR(device.CreateRenderBundleEncoder(&desc));
+    }
+
+    // Don't test non-renerable depth/stencil formats because we don't have any.
+}
diff --git a/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
new file mode 100644
index 0000000..d311f38
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
@@ -0,0 +1,1124 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Constants.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cmath>
+
+namespace {
+
+    class RenderPassDescriptorValidationTest : public ValidationTest {
+      public:
+        void AssertBeginRenderPassSuccess(const wgpu::RenderPassDescriptor* descriptor) {
+            wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
+            commandEncoder.Finish();
+        }
+        void AssertBeginRenderPassError(const wgpu::RenderPassDescriptor* descriptor) {
+            wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
+            ASSERT_DEVICE_ERROR(commandEncoder.Finish());
+        }
+
+      private:
+        wgpu::CommandEncoder TestBeginRenderPass(const wgpu::RenderPassDescriptor* descriptor) {
+            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(descriptor);
+            renderPassEncoder.End();
+            return commandEncoder;
+        }
+    };
+
+    wgpu::Texture CreateTexture(wgpu::Device& device,
+                                wgpu::TextureDimension dimension,
+                                wgpu::TextureFormat format,
+                                uint32_t width,
+                                uint32_t height,
+                                uint32_t arrayLayerCount,
+                                uint32_t mipLevelCount,
+                                uint32_t sampleCount = 1,
+                                wgpu::TextureUsage usage = wgpu::TextureUsage::RenderAttachment) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = dimension;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = arrayLayerCount;
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::TextureView Create2DAttachment(wgpu::Device& device,
+                                         uint32_t width,
+                                         uint32_t height,
+                                         wgpu::TextureFormat format) {
+        wgpu::Texture texture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, format, width, height, 1, 1);
+        return texture.CreateView();
+    }
+
+    // Using BeginRenderPass with no attachments isn't valid
+    TEST_F(RenderPassDescriptorValidationTest, Empty) {
+        utils::ComboRenderPassDescriptor renderPass({}, nullptr);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // A render pass with only one color or one depth attachment is ok
+    TEST_F(RenderPassDescriptorValidationTest, OneAttachment) {
+        // One color attachment
+        {
+            wgpu::TextureView color =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+            utils::ComboRenderPassDescriptor renderPass({color});
+
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+        // One depth-stencil attachment
+        {
+            wgpu::TextureView depthStencil =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencil);
+
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+    }
+
+    // Test OOB color attachment indices are handled
+    TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentOutOfBounds) {
+        std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments + 1> colorAttachments;
+        for (uint32_t i = 0; i < colorAttachments.size(); i++) {
+            colorAttachments[i].view =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+            colorAttachments[i].resolveTarget = nullptr;
+            colorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
+            colorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+            colorAttachments[i].storeOp = wgpu::StoreOp::Store;
+        }
+
+        // Control case: kMaxColorAttachments is valid.
+        {
+            wgpu::RenderPassDescriptor renderPass;
+            renderPass.colorAttachmentCount = kMaxColorAttachments;
+            renderPass.colorAttachments = colorAttachments.data();
+            renderPass.depthStencilAttachment = nullptr;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Error case: kMaxColorAttachments + 1 is an error.
+        {
+            wgpu::RenderPassDescriptor renderPass;
+            renderPass.colorAttachmentCount = kMaxColorAttachments + 1;
+            renderPass.colorAttachments = colorAttachments.data();
+            renderPass.depthStencilAttachment = nullptr;
+            AssertBeginRenderPassError(&renderPass);
+        }
+    }
+
+    // Test sparse color attachment validations
+    TEST_F(RenderPassDescriptorValidationTest, SparseColorAttachment) {
+        // Having sparse color attachment is valid.
+        {
+            std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
+            colorAttachments[0].view = nullptr;
+
+            colorAttachments[1].view =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+            colorAttachments[1].loadOp = wgpu::LoadOp::Load;
+            colorAttachments[1].storeOp = wgpu::StoreOp::Store;
+
+            wgpu::RenderPassDescriptor renderPass;
+            renderPass.colorAttachmentCount = colorAttachments.size();
+            renderPass.colorAttachments = colorAttachments.data();
+            renderPass.depthStencilAttachment = nullptr;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // When all color attachments are null
+        {
+            std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
+            colorAttachments[0].view = nullptr;
+            colorAttachments[1].view = nullptr;
+
+            // Control case: depth stencil attachment is not null is valid.
+            {
+                wgpu::TextureView depthStencilView =
+                    Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+                wgpu::RenderPassDepthStencilAttachment depthStencilAttachment;
+                depthStencilAttachment.view = depthStencilView;
+                depthStencilAttachment.depthClearValue = 1.0f;
+                depthStencilAttachment.stencilClearValue = 0;
+                depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Clear;
+                depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
+                depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Clear;
+                depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
+
+                wgpu::RenderPassDescriptor renderPass;
+                renderPass.colorAttachmentCount = colorAttachments.size();
+                renderPass.colorAttachments = colorAttachments.data();
+                renderPass.depthStencilAttachment = &depthStencilAttachment;
+                AssertBeginRenderPassSuccess(&renderPass);
+            }
+
+            // Error case: depth stencil attachment being null is invalid.
+            {
+                wgpu::RenderPassDescriptor renderPass;
+                renderPass.colorAttachmentCount = colorAttachments.size();
+                renderPass.colorAttachments = colorAttachments.data();
+                renderPass.depthStencilAttachment = nullptr;
+                AssertBeginRenderPassError(&renderPass);
+            }
+        }
+    }
+
+    // Check that the render pass color attachment must have the RenderAttachment usage.
+    TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentInvalidUsage) {
+        // Control case: using a texture with RenderAttachment is valid.
+        {
+            wgpu::TextureView renderView =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+            utils::ComboRenderPassDescriptor renderPass({renderView});
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Error case: using a texture with Sampled is invalid.
+        {
+            wgpu::TextureDescriptor texDesc;
+            texDesc.usage = wgpu::TextureUsage::TextureBinding;
+            texDesc.size = {1, 1, 1};
+            texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
+
+            utils::ComboRenderPassDescriptor renderPass({sampledTex.CreateView()});
+            AssertBeginRenderPassError(&renderPass);
+        }
+    }
+
+    // Attachments must have the same size
+    TEST_F(RenderPassDescriptorValidationTest, SizeMustMatch) {
+        wgpu::TextureView color1x1A =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::TextureView color1x1B =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::TextureView color2x2 =
+            Create2DAttachment(device, 2, 2, wgpu::TextureFormat::RGBA8Unorm);
+
+        wgpu::TextureView depthStencil1x1 =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+        wgpu::TextureView depthStencil2x2 =
+            Create2DAttachment(device, 2, 2, wgpu::TextureFormat::Depth24PlusStencil8);
+
+        // Control case: all the same size (1x1)
+        {
+            utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil1x1);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // One of the color attachments has a different size
+        {
+            utils::ComboRenderPassDescriptor renderPass({color1x1A, color2x2});
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // The depth stencil attachment has a different size
+        {
+            utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil2x2);
+            AssertBeginRenderPassError(&renderPass);
+        }
+    }
+
+    // Attachments formats must match whether they are used for color or depth-stencil
+    TEST_F(RenderPassDescriptorValidationTest, FormatMismatch) {
+        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::TextureView depthStencil =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+
+        // Using depth-stencil for color
+        {
+            utils::ComboRenderPassDescriptor renderPass({depthStencil});
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Using color for depth-stencil
+        {
+            utils::ComboRenderPassDescriptor renderPass({}, color);
+            AssertBeginRenderPassError(&renderPass);
+        }
+    }
+
+    // Depth and stencil storeOps can be different
+    TEST_F(RenderPassDescriptorValidationTest, DepthStencilStoreOpMismatch) {
+        constexpr uint32_t kArrayLayers = 1;
+        constexpr uint32_t kLevelCount = 1;
+        constexpr uint32_t kSize = 32;
+        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr wgpu::TextureFormat kDepthStencilFormat =
+            wgpu::TextureFormat::Depth24PlusStencil8;
+
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+        wgpu::Texture depthStencilTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = kArrayLayers;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = kLevelCount;
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+
+        // Base case: StoreOps match so render pass is a success
+        {
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Base case: StoreOps match so render pass is a success
+        {
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // StoreOps mismatch still is a success
+        {
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+    }
+
+    // Currently only texture views with arrayLayerCount == 1 are allowed to be color and depth
+    // stencil attachments
+    TEST_F(RenderPassDescriptorValidationTest, TextureViewLayerCountForColorAndDepthStencil) {
+        constexpr uint32_t kLevelCount = 1;
+        constexpr uint32_t kSize = 32;
+        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr wgpu::TextureFormat kDepthStencilFormat =
+            wgpu::TextureFormat::Depth24PlusStencil8;
+
+        constexpr uint32_t kArrayLayers = 10;
+
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+        wgpu::Texture depthStencilTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+
+        wgpu::TextureViewDescriptor baseDescriptor;
+        baseDescriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        baseDescriptor.baseArrayLayer = 0;
+        baseDescriptor.arrayLayerCount = kArrayLayers;
+        baseDescriptor.baseMipLevel = 0;
+        baseDescriptor.mipLevelCount = kLevelCount;
+
+        // Using 2D array texture view with arrayLayerCount > 1 is not allowed for color
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kColorFormat;
+            descriptor.arrayLayerCount = 5;
+
+            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Using 2D array texture view with arrayLayerCount > 1 is not allowed for depth stencil
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kDepthStencilFormat;
+            descriptor.arrayLayerCount = 5;
+
+            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Using 2D array texture view that covers the first layer of the texture is OK for color
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kColorFormat;
+            descriptor.baseArrayLayer = 0;
+            descriptor.arrayLayerCount = 1;
+
+            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Using 2D array texture view that covers the first layer is OK for depth stencil
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kDepthStencilFormat;
+            descriptor.baseArrayLayer = 0;
+            descriptor.arrayLayerCount = 1;
+
+            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Using 2D array texture view that covers the last layer is OK for color
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kColorFormat;
+            descriptor.baseArrayLayer = kArrayLayers - 1;
+            descriptor.arrayLayerCount = 1;
+
+            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Using 2D array texture view that covers the last layer is OK for depth stencil
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kDepthStencilFormat;
+            descriptor.baseArrayLayer = kArrayLayers - 1;
+            descriptor.arrayLayerCount = 1;
+
+            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+    }
+
+    // Check that the render pass depth attachment must have the RenderAttachment usage.
+    TEST_F(RenderPassDescriptorValidationTest, DepthAttachmentInvalidUsage) {
+        // Control case: using a texture with RenderAttachment is valid.
+        {
+            wgpu::TextureView renderView =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth32Float);
+            utils::ComboRenderPassDescriptor renderPass({}, renderView);
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Error case: using a texture with Sampled is invalid.
+        {
+            wgpu::TextureDescriptor texDesc;
+            texDesc.usage = wgpu::TextureUsage::TextureBinding;
+            texDesc.size = {1, 1, 1};
+            texDesc.format = wgpu::TextureFormat::Depth32Float;
+            wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
+            wgpu::TextureView sampledView = sampledTex.CreateView();
+
+            utils::ComboRenderPassDescriptor renderPass({}, sampledView);
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+            AssertBeginRenderPassError(&renderPass);
+        }
+    }
+
+    // Only 2D texture views with mipLevelCount == 1 are allowed to be color attachments
+    TEST_F(RenderPassDescriptorValidationTest, TextureViewLevelCountForColorAndDepthStencil) {
+        constexpr uint32_t kArrayLayers = 1;
+        constexpr uint32_t kSize = 32;
+        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr wgpu::TextureFormat kDepthStencilFormat =
+            wgpu::TextureFormat::Depth24PlusStencil8;
+
+        constexpr uint32_t kLevelCount = 4;
+
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+        wgpu::Texture depthStencilTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+
+        wgpu::TextureViewDescriptor baseDescriptor;
+        baseDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+        baseDescriptor.baseArrayLayer = 0;
+        baseDescriptor.arrayLayerCount = kArrayLayers;
+        baseDescriptor.baseMipLevel = 0;
+        baseDescriptor.mipLevelCount = kLevelCount;
+
+        // Using 2D texture view with mipLevelCount > 1 is not allowed for color
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kColorFormat;
+            descriptor.mipLevelCount = 2;
+
+            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Using 2D texture view with mipLevelCount > 1 is not allowed for depth stencil
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kDepthStencilFormat;
+            descriptor.mipLevelCount = 2;
+
+            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Using 2D texture view that covers the first level of the texture is OK for color
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kColorFormat;
+            descriptor.baseMipLevel = 0;
+            descriptor.mipLevelCount = 1;
+
+            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Using 2D texture view that covers the first level is OK for depth stencil
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kDepthStencilFormat;
+            descriptor.baseMipLevel = 0;
+            descriptor.mipLevelCount = 1;
+
+            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Using 2D texture view that covers the last level is OK for color
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kColorFormat;
+            descriptor.baseMipLevel = kLevelCount - 1;
+            descriptor.mipLevelCount = 1;
+
+            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Using 2D texture view that covers the last level is OK for depth stencil
+        {
+            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+            descriptor.format = kDepthStencilFormat;
+            descriptor.baseMipLevel = kLevelCount - 1;
+            descriptor.mipLevelCount = 1;
+
+            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+    }
+
+    // It is not allowed to set resolve target when the color attachment is non-multisampled.
+    TEST_F(RenderPassDescriptorValidationTest, NonMultisampledColorWithResolveTarget) {
+        static constexpr uint32_t kArrayLayers = 1;
+        static constexpr uint32_t kLevelCount = 1;
+        static constexpr uint32_t kSize = 32;
+        static constexpr uint32_t kSampleCount = 1;
+        static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount, kSampleCount);
+        wgpu::Texture resolveTargetTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount, kSampleCount);
+        wgpu::TextureView colorTextureView = colorTexture.CreateView();
+        wgpu::TextureView resolveTargetTextureView = resolveTargetTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    class MultisampledRenderPassDescriptorValidationTest
+        : public RenderPassDescriptorValidationTest {
+      public:
+        utils::ComboRenderPassDescriptor CreateMultisampledRenderPass() {
+            return utils::ComboRenderPassDescriptor({CreateMultisampledColorTextureView()});
+        }
+
+        wgpu::TextureView CreateMultisampledColorTextureView() {
+            return CreateColorTextureView(kSampleCount);
+        }
+
+        wgpu::TextureView CreateNonMultisampledColorTextureView() {
+            return CreateColorTextureView(1);
+        }
+
+        static constexpr uint32_t kArrayLayers = 1;
+        static constexpr uint32_t kLevelCount = 1;
+        static constexpr uint32_t kSize = 32;
+        static constexpr uint32_t kSampleCount = 4;
+        static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+      private:
+        wgpu::TextureView CreateColorTextureView(uint32_t sampleCount) {
+            wgpu::Texture colorTexture =
+                CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                              kArrayLayers, kLevelCount, sampleCount);
+
+            return colorTexture.CreateView();
+        }
+    };
+
+    // Tests on the use of multisampled textures as color attachments
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorAttachments) {
+        wgpu::TextureView colorTextureView = CreateNonMultisampledColorTextureView();
+        wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
+        wgpu::TextureView multisampledColorTextureView = CreateMultisampledColorTextureView();
+
+        // It is allowed to use a multisampled color attachment without setting resolve target.
+        {
+            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // It is not allowed to use multiple color attachments with different sample counts.
+        {
+            utils::ComboRenderPassDescriptor renderPass(
+                {multisampledColorTextureView, colorTextureView});
+            AssertBeginRenderPassError(&renderPass);
+        }
+    }
+
+    // It is not allowed to use a multisampled resolve target.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledResolveTarget) {
+        wgpu::TextureView multisampledResolveTargetView = CreateMultisampledColorTextureView();
+
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = multisampledResolveTargetView;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // It is not allowed to use a resolve target with array layer count > 1.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetArrayLayerMoreThanOne) {
+        constexpr uint32_t kArrayLayers2 = 2;
+        wgpu::Texture resolveTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers2, kLevelCount);
+        wgpu::TextureViewDescriptor viewDesc;
+        viewDesc.dimension = wgpu::TextureViewDimension::e2DArray;
+        wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&viewDesc);
+
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // It is not allowed to use a resolve target with mipmap level count > 1.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetMipmapLevelMoreThanOne) {
+        constexpr uint32_t kLevelCount2 = 2;
+        wgpu::Texture resolveTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount2);
+        wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // It is not allowed to use a resolve target which is created from a texture whose usage does
+    // not include wgpu::TextureUsage::RenderAttachment.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetUsageNoRenderAttachment) {
+        constexpr wgpu::TextureUsage kUsage =
+            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture nonColorUsageResolveTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount, 1, kUsage);
+        wgpu::TextureView nonColorUsageResolveTextureView =
+            nonColorUsageResolveTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = nonColorUsageResolveTextureView;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // It is not allowed to use a resolve target which is in error state.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetInErrorState) {
+        wgpu::Texture resolveTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+        wgpu::TextureViewDescriptor errorTextureView;
+        errorTextureView.dimension = wgpu::TextureViewDimension::e2D;
+        errorTextureView.format = kColorFormat;
+        errorTextureView.baseArrayLayer = kArrayLayers + 1;
+        ASSERT_DEVICE_ERROR(wgpu::TextureView errorResolveTarget =
+                                resolveTexture.CreateView(&errorTextureView));
+
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = errorResolveTarget;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // It is allowed to use a multisampled color attachment and a non-multisampled resolve target.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorWithResolveTarget) {
+        wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
+
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // It is not allowed to use a resolve target in a format different from the color attachment.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetDifferentFormat) {
+        constexpr wgpu::TextureFormat kColorFormat2 = wgpu::TextureFormat::BGRA8Unorm;
+        wgpu::Texture resolveTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat2, kSize, kSize,
+                          kArrayLayers, kLevelCount);
+        wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests on the size of the resolve target.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest,
+           ColorAttachmentResolveTargetDimensionMismatch) {
+        constexpr uint32_t kSize2 = kSize * 2;
+        wgpu::Texture resolveTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize2, kSize2,
+                          kArrayLayers, kLevelCount + 1);
+
+        wgpu::TextureViewDescriptor textureViewDescriptor;
+        textureViewDescriptor.nextInChain = nullptr;
+        textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+        textureViewDescriptor.format = kColorFormat;
+        textureViewDescriptor.mipLevelCount = 1;
+        textureViewDescriptor.baseArrayLayer = 0;
+        textureViewDescriptor.arrayLayerCount = 1;
+
+        {
+            wgpu::TextureViewDescriptor firstMipLevelDescriptor = textureViewDescriptor;
+            firstMipLevelDescriptor.baseMipLevel = 0;
+
+            wgpu::TextureView resolveTextureView =
+                resolveTexture.CreateView(&firstMipLevelDescriptor);
+
+            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+            renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        {
+            wgpu::TextureViewDescriptor secondMipLevelDescriptor = textureViewDescriptor;
+            secondMipLevelDescriptor.baseMipLevel = 1;
+
+            wgpu::TextureView resolveTextureView =
+                resolveTexture.CreateView(&secondMipLevelDescriptor);
+
+            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+            renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+    }
+
+    // Tests the texture format of the resolve target must support being used as resolve target.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetFormat) {
+        for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+            if (!utils::TextureFormatSupportsMultisampling(format) ||
+                !utils::TextureFormatSupportsRendering(format)) {
+                continue;
+            }
+
+            wgpu::Texture colorTexture =
+                CreateTexture(device, wgpu::TextureDimension::e2D, format, kSize, kSize,
+                              kArrayLayers, kLevelCount, kSampleCount);
+            wgpu::Texture resolveTarget = CreateTexture(device, wgpu::TextureDimension::e2D, format,
+                                                        kSize, kSize, kArrayLayers, kLevelCount, 1);
+
+            utils::ComboRenderPassDescriptor renderPass({colorTexture.CreateView()});
+            renderPass.cColorAttachments[0].resolveTarget = resolveTarget.CreateView();
+            if (utils::TextureFormatSupportsResolveTarget(format)) {
+                AssertBeginRenderPassSuccess(&renderPass);
+            } else {
+                AssertBeginRenderPassError(&renderPass);
+            }
+        }
+    }
+
+    // Tests on the sample count of depth stencil attachment.
+    TEST_F(MultisampledRenderPassDescriptorValidationTest, DepthStencilAttachmentSampleCount) {
+        constexpr wgpu::TextureFormat kDepthStencilFormat =
+            wgpu::TextureFormat::Depth24PlusStencil8;
+        wgpu::Texture multisampledDepthStencilTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount, kSampleCount);
+        wgpu::TextureView multisampledDepthStencilTextureView =
+            multisampledDepthStencilTexture.CreateView();
+
+        // It is not allowed to use a depth stencil attachment whose sample count is different from
+        // the one of the color attachment.
+        {
+            wgpu::Texture depthStencilTexture =
+                CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize,
+                              kSize, kArrayLayers, kLevelCount);
+            wgpu::TextureView depthStencilTextureView = depthStencilTexture.CreateView();
+
+            utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
+                                                        depthStencilTextureView);
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        {
+            utils::ComboRenderPassDescriptor renderPass({CreateNonMultisampledColorTextureView()},
+                                                        multisampledDepthStencilTextureView);
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // It is allowed to use a multisampled depth stencil attachment whose sample count is equal
+        // to the one of the color attachment.
+        {
+            utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
+                                                        multisampledDepthStencilTextureView);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // It is allowed to use a multisampled depth stencil attachment while there is no color
+        // attachment.
+        {
+            utils::ComboRenderPassDescriptor renderPass({}, multisampledDepthStencilTextureView);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+    }
+
+    // Tests that NaN cannot be accepted as a valid color or depth clear value and INFINITY is valid
+    // in both color and depth clear values.
+    TEST_F(RenderPassDescriptorValidationTest, UseNaNOrINFINITYAsColorOrDepthClearValue) {
+        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+
+        // Tests that NaN cannot be used in clearColor.
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.r = NAN;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.g = NAN;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.b = NAN;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.a = NAN;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that INFINITY can be used in clearColor.
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.r = INFINITY;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.g = INFINITY;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.b = INFINITY;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        {
+            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+            renderPass.cColorAttachments[0].clearValue.a = INFINITY;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Tests that NaN cannot be used in depthClearValue.
+        {
+            wgpu::TextureView depth =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
+            utils::ComboRenderPassDescriptor renderPass({color}, depth);
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthClearValue = NAN;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that INFINITY can be used in depthClearValue.
+        {
+            wgpu::TextureView depth =
+                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
+            utils::ComboRenderPassDescriptor renderPass({color}, depth);
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthClearValue = INFINITY;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // TODO(https://crbug.com/dawn/666): Add a test case for clearStencil for stencilOnly
+        // once stencil8 is supported.
+    }
+
+    TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilReadOnly) {
+        wgpu::TextureView colorView =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        wgpu::TextureView depthStencilView =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+        wgpu::TextureView depthStencilViewNoStencil =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
+
+        // Tests that a read-only pass with depthReadOnly set to true succeeds.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values passes when
+        // there is no stencil component in the format (deprecated).
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+            EXPECT_DEPRECATION_WARNING(AssertBeginRenderPassSuccess(&renderPass));
+        }
+
+        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
+        // there there is no stencil component in the format and stencil loadOp/storeOp are passed.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+            AssertBeginRenderPassError(&renderPass);
+
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            AssertBeginRenderPassError(&renderPass);
+
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that a pass with depthReadOnly=true and stencilReadOnly=true can pass
+        // when there is only depth component in the format. We actually enable readonly
+        // depth/stencil attachment in this case.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Tests that a pass with depthReadOnly=false and stencilReadOnly=true can pass
+        // when there is only depth component in the format. We actually don't enable readonly
+        // depth/stencil attachment in this case.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = false;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only once stencil8 is
+        // supported (depthReadOnly and stencilReadOnly mismatch but no depth component).
+
+        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
+        // both depth and stencil components exist.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that a pass with loadOp set to clear and readOnly set to true fails.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that a pass with storeOp set to discard and readOnly set to true fails.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that a pass with only depthLoadOp set to load and readOnly set to true fails.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that a pass with only depthStoreOp set to store and readOnly set to true fails.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that a pass with only stencilLoadOp set to load and readOnly set to true fails.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Tests that a pass with only stencilStoreOp set to store and readOnly set to true fails.
+        {
+            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            AssertBeginRenderPassError(&renderPass);
+        }
+    }
+
+    // Check that the depth stencil attachment must use all aspects.
+    TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilAllAspects) {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.usage = wgpu::TextureUsage::RenderAttachment;
+        texDesc.size = {1, 1, 1};
+
+        wgpu::TextureViewDescriptor viewDesc;
+        viewDesc.baseMipLevel = 0;
+        viewDesc.mipLevelCount = 1;
+        viewDesc.baseArrayLayer = 0;
+        viewDesc.arrayLayerCount = 1;
+
+        // Using all aspects of a depth+stencil texture is allowed.
+        {
+            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.aspect = wgpu::TextureAspect::All;
+
+            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+            utils::ComboRenderPassDescriptor renderPass({}, view);
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Using only depth of a depth+stencil texture is an error.
+        {
+            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+            utils::ComboRenderPassDescriptor renderPass({}, view);
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Using only stencil of a depth+stencil texture is an error.
+        {
+            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+
+            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+            utils::ComboRenderPassDescriptor renderPass({}, view);
+            AssertBeginRenderPassError(&renderPass);
+        }
+
+        // Using DepthOnly of a depth only texture is allowed.
+        {
+            texDesc.format = wgpu::TextureFormat::Depth24Plus;
+            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+            utils::ComboRenderPassDescriptor renderPass({}, view);
+            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only on stencil8 once this
+        // format is supported.
+    }
+
+    // TODO(cwallez@chromium.org): Constraints on attachment aliasing?
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
new file mode 100644
index 0000000..8eddecc
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
@@ -0,0 +1,1554 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cmath>
+#include <sstream>
+
+class RenderPipelineValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+        fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+
+        fsModuleUint = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<u32> {
+                return vec4<u32>(0u, 255u, 0u, 255u);
+            })");
+    }
+
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+    wgpu::ShaderModule fsModuleUint;
+};
+
+namespace {
+    bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
+        return blendFactor == wgpu::BlendFactor::SrcAlpha ||
+               blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
+               blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
+    }
+}  // namespace
+
+// Test cases where creation should succeed
+TEST_F(RenderPipelineValidationTest, CreationSuccess) {
+    {
+        // New format
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+}
+
+// Tests that depth bias parameters must not be NaN.
+TEST_F(RenderPipelineValidationTest, DepthBiasParameterNotBeNaN) {
+    // Control case, depth bias parameters in ComboRenderPipeline default to 0 which is finite
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.EnableDepthStencil();
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    // Infinite depth bias clamp is valid
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil = descriptor.EnableDepthStencil();
+        depthStencil->depthBiasClamp = INFINITY;
+        device.CreateRenderPipeline(&descriptor);
+    }
+    // NAN depth bias clamp is invalid
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil = descriptor.EnableDepthStencil();
+        depthStencil->depthBiasClamp = NAN;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    // Infinite depth bias slope is valid
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil = descriptor.EnableDepthStencil();
+        depthStencil->depthBiasSlopeScale = INFINITY;
+        device.CreateRenderPipeline(&descriptor);
+    }
+    // NAN depth bias slope is invalid
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil = descriptor.EnableDepthStencil();
+        depthStencil->depthBiasSlopeScale = NAN;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+}
+
+// Tests that depth or stencil aspect is required if we enable depth or stencil test.
+TEST_F(RenderPipelineValidationTest, DepthStencilAspectRequirement) {
+    // Control case, stencil aspect is required if stencil test or stencil write is enabled
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil =
+            descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+        depthStencil->stencilFront.compare = wgpu::CompareFunction::LessEqual;
+        depthStencil->stencilBack.failOp = wgpu::StencilOperation::Replace;
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    // It is invalid if the texture format doesn't have stencil aspect while stencil test is
+    // enabled (depthStencilState.stencilFront are not default values).
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil =
+            descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24Plus);
+        depthStencil->stencilFront.compare = wgpu::CompareFunction::LessEqual;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    // It is invalid if the texture format doesn't have stencil aspect while stencil write is
+    // enabled (depthStencilState.stencilBack are not default values).
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil =
+            descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24Plus);
+        depthStencil->stencilBack.failOp = wgpu::StencilOperation::Replace;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    // Control case, depth aspect is required if depth test or depth write is enabled
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::DepthStencilState* depthStencil =
+            descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+        depthStencil->depthCompare = wgpu::CompareFunction::LessEqual;
+        depthStencil->depthWriteEnabled = true;
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    // TODO(dawn:666): Add tests for stencil-only format (Stencil8) with depth test or depth write
+    // enabled when Stencil8 format is implemented
+}
+
+// Tests that at least one color target state is required.
+TEST_F(RenderPipelineValidationTest, ColorTargetStateRequired) {
+    {
+        // This one succeeds because attachment 0 is the color attachment
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cFragment.targetCount = 1;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    {  // Fail because lack of color target states (and depth/stencil state)
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cFragment.targetCount = 0;
+
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+}
+
+// Tests that target blend and writeMasks must not be set if the format is undefined.
+TEST_F(RenderPipelineValidationTest, UndefinedColorStateFormatWithBlendOrWriteMask) {
+    {
+        // Control case: Valid undefined format target.
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cFragment.targetCount = 1;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::Undefined;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+    {
+        // Error case: undefined format target with blend state set.
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cFragment.targetCount = 1;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::Undefined;
+        descriptor.cTargets[0].blend = &descriptor.cBlends[0];
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+        ASSERT_DEVICE_ERROR(
+            device.CreateRenderPipeline(&descriptor),
+            testing::HasSubstr("Color target[0] blend state is set when the format is undefined."));
+    }
+    {
+        // Error case: undefined format target with write masking not being none.
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cFragment.targetCount = 1;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::Undefined;
+        descriptor.cTargets[0].blend = nullptr;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::All;
+
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor),
+                            testing::HasSubstr("Color target[0] write mask is set to"));
+    }
+}
+
+// Tests that the color formats must be renderable.
+TEST_F(RenderPipelineValidationTest, NonRenderableFormat) {
+    {
+        // Succeeds because RGBA8Unorm is renderable
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    {
+        // Fails because RG11B10Ufloat is non-renderable
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RG11B10Ufloat;
+
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+}
+
+// Tests that the color formats must be blendable when blending is enabled.
+// Those are renderable color formats with "float" capabilities in
+// https://gpuweb.github.io/gpuweb/#plain-color-formats
+TEST_F(RenderPipelineValidationTest, NonBlendableFormat) {
+    {
+        // Succeeds because RGBA8Unorm is blendable
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].blend = &descriptor.cBlends[0];
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    {
+        // Fails because RGBA32Float is not blendable
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].blend = &descriptor.cBlends[0];
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA32Float;
+
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    {
+        // Succeeds because RGBA32Float is not blendable but blending is disabled
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].blend = nullptr;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA32Float;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    {
+        // Fails because RGBA8Uint is not blendable
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModuleUint;
+        descriptor.cTargets[0].blend = &descriptor.cBlends[0];
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Uint;
+
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    {
+        // Succeeds because RGBA8Uint is not blendable but blending is disabled
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModuleUint;
+        descriptor.cTargets[0].blend = nullptr;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Uint;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+}
+
+// Tests that the format of the color state descriptor must match the output of the fragment shader.
+TEST_F(RenderPipelineValidationTest, FragmentOutputFormatCompatibility) {
+    std::array<const char*, 3> kScalarTypes = {{"f32", "i32", "u32"}};
+    std::array<wgpu::TextureFormat, 3> kColorFormats = {{wgpu::TextureFormat::RGBA8Unorm,
+                                                         wgpu::TextureFormat::RGBA8Sint,
+                                                         wgpu::TextureFormat::RGBA8Uint}};
+
+    for (size_t i = 0; i < kScalarTypes.size(); ++i) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        std::ostringstream stream;
+        stream << R"(
+            @stage(fragment) fn main() -> @location(0) vec4<)"
+               << kScalarTypes[i] << R"(> {
+                var result : vec4<)"
+               << kScalarTypes[i] << R"(>;
+                return result;
+            })";
+        descriptor.cFragment.module = utils::CreateShaderModule(device, stream.str().c_str());
+
+        for (size_t j = 0; j < kColorFormats.size(); ++j) {
+            descriptor.cTargets[0].format = kColorFormats[j];
+            if (i == j) {
+                device.CreateRenderPipeline(&descriptor);
+            } else {
+                ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+            }
+        }
+    }
+}
+
+// Tests that the component count of the color state target format must be fewer than that of the
+// fragment shader output.
+TEST_F(RenderPipelineValidationTest, FragmentOutputComponentCountCompatibility) {
+    std::array<wgpu::TextureFormat, 3> kColorFormats = {wgpu::TextureFormat::R8Unorm,
+                                                        wgpu::TextureFormat::RG8Unorm,
+                                                        wgpu::TextureFormat::RGBA8Unorm};
+
+    std::array<wgpu::BlendFactor, 8> kBlendFactors = {wgpu::BlendFactor::Zero,
+                                                      wgpu::BlendFactor::One,
+                                                      wgpu::BlendFactor::SrcAlpha,
+                                                      wgpu::BlendFactor::OneMinusSrcAlpha,
+                                                      wgpu::BlendFactor::Src,
+                                                      wgpu::BlendFactor::DstAlpha,
+                                                      wgpu::BlendFactor::OneMinusDstAlpha,
+                                                      wgpu::BlendFactor::Dst};
+
+    for (size_t componentCount = 1; componentCount <= 4; ++componentCount) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        std::ostringstream stream;
+        stream << R"(
+            @stage(fragment) fn main() -> @location(0) )";
+        switch (componentCount) {
+            case 1:
+                stream << R"(f32 {
+                return 1.0;
+                })";
+                break;
+            case 2:
+                stream << R"(vec2<f32> {
+                return vec2<f32>(1.0, 1.0);
+                })";
+                break;
+            case 3:
+                stream << R"(vec3<f32> {
+                return vec3<f32>(1.0, 1.0, 1.0);
+                })";
+                break;
+            case 4:
+                stream << R"(vec4<f32> {
+                return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+                })";
+                break;
+            default:
+                UNREACHABLE();
+        }
+        descriptor.cFragment.module = utils::CreateShaderModule(device, stream.str().c_str());
+
+        for (auto colorFormat : kColorFormats) {
+            descriptor.cTargets[0].format = colorFormat;
+
+            descriptor.cTargets[0].blend = nullptr;
+            if (componentCount >= utils::GetWGSLRenderableColorTextureComponentCount(colorFormat)) {
+                device.CreateRenderPipeline(&descriptor);
+            } else {
+                ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+            }
+
+            descriptor.cTargets[0].blend = &descriptor.cBlends[0];
+
+            for (auto colorSrcFactor : kBlendFactors) {
+                descriptor.cBlends[0].color.srcFactor = colorSrcFactor;
+                for (auto colorDstFactor : kBlendFactors) {
+                    descriptor.cBlends[0].color.dstFactor = colorDstFactor;
+                    for (auto alphaSrcFactor : kBlendFactors) {
+                        descriptor.cBlends[0].alpha.srcFactor = alphaSrcFactor;
+                        for (auto alphaDstFactor : kBlendFactors) {
+                            descriptor.cBlends[0].alpha.dstFactor = alphaDstFactor;
+
+                            bool valid = true;
+                            if (componentCount >=
+                                utils::GetWGSLRenderableColorTextureComponentCount(colorFormat)) {
+                                if (BlendFactorContainsSrcAlpha(
+                                        descriptor.cTargets[0].blend->color.srcFactor) ||
+                                    BlendFactorContainsSrcAlpha(
+                                        descriptor.cTargets[0].blend->color.dstFactor)) {
+                                    valid = componentCount == 4;
+                                }
+                            } else {
+                                valid = false;
+                            }
+
+                            if (valid) {
+                                device.CreateRenderPipeline(&descriptor);
+                            } else {
+                                ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+// Tests that when blendOperationMinOrMax is "min" or "max", both srcBlendFactor and dstBlendFactor
+// must be "one".
+TEST_F(RenderPipelineValidationTest, BlendOperationAndBlendFactors) {
+    constexpr std::array<wgpu::BlendFactor, 8> kBlendFactors = {wgpu::BlendFactor::Zero,
+                                                                wgpu::BlendFactor::One,
+                                                                wgpu::BlendFactor::SrcAlpha,
+                                                                wgpu::BlendFactor::OneMinusSrcAlpha,
+                                                                wgpu::BlendFactor::Src,
+                                                                wgpu::BlendFactor::DstAlpha,
+                                                                wgpu::BlendFactor::OneMinusDstAlpha,
+                                                                wgpu::BlendFactor::Dst};
+
+    constexpr std::array<wgpu::BlendOperation, 2> kBlendOperationsForTest = {
+        wgpu::BlendOperation::Max, wgpu::BlendOperation::Min};
+
+    for (wgpu::BlendOperation blendOperationMinOrMax : kBlendOperationsForTest) {
+        for (wgpu::BlendFactor srcFactor : kBlendFactors) {
+            for (wgpu::BlendFactor dstFactor : kBlendFactors) {
+                utils::ComboRenderPipelineDescriptor descriptor;
+                descriptor.vertex.module = vsModule;
+                descriptor.cFragment.module = fsModule;
+                descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+                descriptor.cTargets[0].blend = &descriptor.cBlends[0];
+                descriptor.cBlends[0].color.srcFactor = srcFactor;
+                descriptor.cBlends[0].color.dstFactor = dstFactor;
+                descriptor.cBlends[0].alpha.srcFactor = srcFactor;
+                descriptor.cBlends[0].alpha.dstFactor = dstFactor;
+
+                descriptor.cBlends[0].color.operation = blendOperationMinOrMax;
+                descriptor.cBlends[0].alpha.operation = wgpu::BlendOperation::Add;
+                if (srcFactor == wgpu::BlendFactor::One && dstFactor == wgpu::BlendFactor::One) {
+                    device.CreateRenderPipeline(&descriptor);
+                } else {
+                    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+                }
+
+                descriptor.cBlends[0].color.operation = wgpu::BlendOperation::Add;
+                descriptor.cBlends[0].alpha.operation = blendOperationMinOrMax;
+                if (srcFactor == wgpu::BlendFactor::One && dstFactor == wgpu::BlendFactor::One) {
+                    device.CreateRenderPipeline(&descriptor);
+                } else {
+                    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+                }
+            }
+        }
+    }
+}
+
+/// Tests that the sample count of the render pipeline must be valid.
+TEST_F(RenderPipelineValidationTest, SampleCount) {
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.multisample.count = 4;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.multisample.count = 3;
+
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+}
+
+// Tests that the sample count of the render pipeline must be equal to the one of every attachments
+// in the render pass.
+TEST_F(RenderPipelineValidationTest, SampleCountCompatibilityWithRenderPass) {
+    constexpr uint32_t kMultisampledCount = 4;
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    wgpu::TextureDescriptor baseTextureDescriptor;
+    baseTextureDescriptor.size.width = 4;
+    baseTextureDescriptor.size.height = 4;
+    baseTextureDescriptor.size.depthOrArrayLayers = 1;
+    baseTextureDescriptor.mipLevelCount = 1;
+    baseTextureDescriptor.dimension = wgpu::TextureDimension::e2D;
+    baseTextureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+    utils::ComboRenderPipelineDescriptor nonMultisampledPipelineDescriptor;
+    nonMultisampledPipelineDescriptor.multisample.count = 1;
+    nonMultisampledPipelineDescriptor.vertex.module = vsModule;
+    nonMultisampledPipelineDescriptor.cFragment.module = fsModule;
+    wgpu::RenderPipeline nonMultisampledPipeline =
+        device.CreateRenderPipeline(&nonMultisampledPipelineDescriptor);
+
+    nonMultisampledPipelineDescriptor.cFragment.targetCount = 0;
+    nonMultisampledPipelineDescriptor.EnableDepthStencil();
+    wgpu::RenderPipeline nonMultisampledPipelineWithDepthStencilOnly =
+        device.CreateRenderPipeline(&nonMultisampledPipelineDescriptor);
+
+    utils::ComboRenderPipelineDescriptor multisampledPipelineDescriptor;
+    multisampledPipelineDescriptor.multisample.count = kMultisampledCount;
+    multisampledPipelineDescriptor.vertex.module = vsModule;
+    multisampledPipelineDescriptor.cFragment.module = fsModule;
+    wgpu::RenderPipeline multisampledPipeline =
+        device.CreateRenderPipeline(&multisampledPipelineDescriptor);
+
+    multisampledPipelineDescriptor.cFragment.targetCount = 0;
+    multisampledPipelineDescriptor.EnableDepthStencil();
+    wgpu::RenderPipeline multisampledPipelineWithDepthStencilOnly =
+        device.CreateRenderPipeline(&multisampledPipelineDescriptor);
+
+    // It is not allowed to use multisampled render pass and non-multisampled render pipeline.
+    {
+        wgpu::TextureDescriptor textureDescriptor = baseTextureDescriptor;
+        textureDescriptor.format = kColorFormat;
+        textureDescriptor.sampleCount = kMultisampledCount;
+        wgpu::Texture multisampledColorTexture = device.CreateTexture(&textureDescriptor);
+        utils::ComboRenderPassDescriptor renderPassDescriptor(
+            {multisampledColorTexture.CreateView()});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(nonMultisampledPipeline);
+        renderPass.End();
+
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        wgpu::TextureDescriptor textureDescriptor = baseTextureDescriptor;
+        textureDescriptor.sampleCount = kMultisampledCount;
+        textureDescriptor.format = kDepthStencilFormat;
+        wgpu::Texture multisampledDepthStencilTexture = device.CreateTexture(&textureDescriptor);
+        utils::ComboRenderPassDescriptor renderPassDescriptor(
+            {}, multisampledDepthStencilTexture.CreateView());
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(nonMultisampledPipelineWithDepthStencilOnly);
+        renderPass.End();
+
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // It is allowed to use multisampled render pass and multisampled render pipeline.
+    {
+        wgpu::TextureDescriptor textureDescriptor = baseTextureDescriptor;
+        textureDescriptor.format = kColorFormat;
+        textureDescriptor.sampleCount = kMultisampledCount;
+        wgpu::Texture multisampledColorTexture = device.CreateTexture(&textureDescriptor);
+        utils::ComboRenderPassDescriptor renderPassDescriptor(
+            {multisampledColorTexture.CreateView()});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(multisampledPipeline);
+        renderPass.End();
+
+        encoder.Finish();
+    }
+
+    {
+        wgpu::TextureDescriptor textureDescriptor = baseTextureDescriptor;
+        textureDescriptor.sampleCount = kMultisampledCount;
+        textureDescriptor.format = kDepthStencilFormat;
+        wgpu::Texture multisampledDepthStencilTexture = device.CreateTexture(&textureDescriptor);
+        utils::ComboRenderPassDescriptor renderPassDescriptor(
+            {}, multisampledDepthStencilTexture.CreateView());
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(multisampledPipelineWithDepthStencilOnly);
+        renderPass.End();
+
+        encoder.Finish();
+    }
+
+    // It is not allowed to use non-multisampled render pass and multisampled render pipeline.
+    {
+        wgpu::TextureDescriptor textureDescriptor = baseTextureDescriptor;
+        textureDescriptor.format = kColorFormat;
+        textureDescriptor.sampleCount = 1;
+        wgpu::Texture nonMultisampledColorTexture = device.CreateTexture(&textureDescriptor);
+        utils::ComboRenderPassDescriptor nonMultisampledRenderPassDescriptor(
+            {nonMultisampledColorTexture.CreateView()});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass =
+            encoder.BeginRenderPass(&nonMultisampledRenderPassDescriptor);
+        renderPass.SetPipeline(multisampledPipeline);
+        renderPass.End();
+
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        wgpu::TextureDescriptor textureDescriptor = baseTextureDescriptor;
+        textureDescriptor.sampleCount = 1;
+        textureDescriptor.format = kDepthStencilFormat;
+        wgpu::Texture nonMultisampledDepthStencilTexture = device.CreateTexture(&textureDescriptor);
+        utils::ComboRenderPassDescriptor renderPassDescriptor(
+            {}, nonMultisampledDepthStencilTexture.CreateView());
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(multisampledPipelineWithDepthStencilOnly);
+        renderPass.End();
+
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Tests that the vertex only pipeline must be used with a depth-stencil attachment only render pass
+TEST_F(RenderPipelineValidationTest, VertexOnlyPipelineRequireDepthStencilAttachment) {
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    wgpu::TextureDescriptor baseTextureDescriptor;
+    baseTextureDescriptor.size = {4, 4};
+    baseTextureDescriptor.mipLevelCount = 1;
+    baseTextureDescriptor.dimension = wgpu::TextureDimension::e2D;
+    baseTextureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+    wgpu::TextureDescriptor colorTextureDescriptor = baseTextureDescriptor;
+    colorTextureDescriptor.format = kColorFormat;
+    colorTextureDescriptor.sampleCount = 1;
+    wgpu::Texture colorTexture = device.CreateTexture(&colorTextureDescriptor);
+
+    wgpu::TextureDescriptor depthStencilTextureDescriptor = baseTextureDescriptor;
+    depthStencilTextureDescriptor.sampleCount = 1;
+    depthStencilTextureDescriptor.format = kDepthStencilFormat;
+    wgpu::Texture depthStencilTexture = device.CreateTexture(&depthStencilTextureDescriptor);
+    utils::ComboRenderPassDescriptor renderPassDescriptor({}, depthStencilTexture.CreateView());
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    renderPipelineDescriptor.multisample.count = 1;
+    renderPipelineDescriptor.vertex.module = vsModule;
+
+    renderPipelineDescriptor.fragment = nullptr;
+
+    renderPipelineDescriptor.EnableDepthStencil(kDepthStencilFormat);
+
+    wgpu::RenderPipeline vertexOnlyPipeline =
+        device.CreateRenderPipeline(&renderPipelineDescriptor);
+
+    // Vertex-only render pipeline can work with depth stencil attachment and no color target
+    {
+        utils::ComboRenderPassDescriptor renderPassDescriptor({}, depthStencilTexture.CreateView());
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(vertexOnlyPipeline);
+        renderPass.End();
+
+        encoder.Finish();
+    }
+
+    // Vertex-only render pipeline must have a depth stencil attachment
+    {
+        utils::ComboRenderPassDescriptor renderPassDescriptor({});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(vertexOnlyPipeline);
+        renderPass.End();
+
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Vertex-only render pipeline can not work with color target
+    {
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorTexture.CreateView()},
+                                                              depthStencilTexture.CreateView());
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(vertexOnlyPipeline);
+        renderPass.End();
+
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Vertex-only render pipeline can not work with color target, and must have a depth stencil
+    // attachment
+    {
+        utils::ComboRenderPassDescriptor renderPassDescriptor({colorTexture.CreateView()});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPass.SetPipeline(vertexOnlyPipeline);
+        renderPass.End();
+
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Tests that the sample count of the render pipeline must be valid
+// when the alphaToCoverage mode is enabled.
+TEST_F(RenderPipelineValidationTest, AlphaToCoverageAndSampleCount) {
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.multisample.count = 4;
+        descriptor.multisample.alphaToCoverageEnabled = true;
+
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.multisample.count = 1;
+        descriptor.multisample.alphaToCoverageEnabled = true;
+
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+}
+
+// Tests that the texture component type in shader must match the bind group layout.
+TEST_F(RenderPipelineValidationTest, TextureComponentTypeCompatibility) {
+    constexpr uint32_t kNumTextureComponentType = 3u;
+    std::array<const char*, kNumTextureComponentType> kScalarTypes = {{"f32", "i32", "u32"}};
+    std::array<wgpu::TextureSampleType, kNumTextureComponentType> kTextureComponentTypes = {{
+        wgpu::TextureSampleType::Float,
+        wgpu::TextureSampleType::Sint,
+        wgpu::TextureSampleType::Uint,
+    }};
+
+    for (size_t i = 0; i < kNumTextureComponentType; ++i) {
+        for (size_t j = 0; j < kNumTextureComponentType; ++j) {
+            utils::ComboRenderPipelineDescriptor descriptor;
+            descriptor.vertex.module = vsModule;
+
+            std::ostringstream stream;
+            stream << R"(
+                @group(0) @binding(0) var myTexture : texture_2d<)"
+                   << kScalarTypes[i] << R"(>;
+
+                @stage(fragment) fn main() {
+                    textureDimensions(myTexture);
+                })";
+            descriptor.cFragment.module = utils::CreateShaderModule(device, stream.str().c_str());
+            descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, kTextureComponentTypes[j]}});
+            descriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+
+            if (i == j) {
+                device.CreateRenderPipeline(&descriptor);
+            } else {
+                ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+            }
+        }
+    }
+}
+
+// Tests that the texture view dimension in shader must match the bind group layout.
+TEST_F(RenderPipelineValidationTest, TextureViewDimensionCompatibility) {
+    constexpr uint32_t kNumTextureViewDimensions = 6u;
+    std::array<const char*, kNumTextureViewDimensions> kTextureKeywords = {{
+        "texture_1d",
+        "texture_2d",
+        "texture_2d_array",
+        "texture_cube",
+        "texture_cube_array",
+        "texture_3d",
+    }};
+
+    std::array<wgpu::TextureViewDimension, kNumTextureViewDimensions> kTextureViewDimensions = {{
+        wgpu::TextureViewDimension::e1D,
+        wgpu::TextureViewDimension::e2D,
+        wgpu::TextureViewDimension::e2DArray,
+        wgpu::TextureViewDimension::Cube,
+        wgpu::TextureViewDimension::CubeArray,
+        wgpu::TextureViewDimension::e3D,
+    }};
+
+    for (size_t i = 0; i < kNumTextureViewDimensions; ++i) {
+        for (size_t j = 0; j < kNumTextureViewDimensions; ++j) {
+            utils::ComboRenderPipelineDescriptor descriptor;
+            descriptor.vertex.module = vsModule;
+
+            std::ostringstream stream;
+            stream << R"(
+                @group(0) @binding(0) var myTexture : )"
+                   << kTextureKeywords[i] << R"(<f32>;
+                @stage(fragment) fn main() {
+                    textureDimensions(myTexture);
+                })";
+            descriptor.cFragment.module = utils::CreateShaderModule(device, stream.str().c_str());
+            descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float,
+                          kTextureViewDimensions[j]}});
+            descriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
+
+            if (i == j) {
+                device.CreateRenderPipeline(&descriptor);
+            } else {
+                ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+            }
+        }
+    }
+}
+
+// Test that declaring a storage buffer in the vertex shader without setting pipeline layout won't
+// cause crash.
+TEST_F(RenderPipelineValidationTest, StorageBufferInVertexShaderNoLayout) {
+    wgpu::ShaderModule vsModuleWithStorageBuffer = utils::CreateShaderModule(device, R"(
+        struct Dst {
+            data : array<u32, 100>
+        }
+        @group(0) @binding(0) var<storage, read_write> dst : Dst;
+        @stage(vertex) fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+            dst.data[VertexIndex] = 0x1234u;
+            return vec4<f32>();
+        })");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.layout = nullptr;
+    descriptor.vertex.module = vsModuleWithStorageBuffer;
+    descriptor.cFragment.module = fsModule;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Tests that only strip primitive topologies allow an index format
+TEST_F(RenderPipelineValidationTest, StripIndexFormatAllowed) {
+    constexpr uint32_t kNumStripType = 2u;
+    constexpr uint32_t kNumListType = 3u;
+    constexpr uint32_t kNumIndexFormat = 3u;
+
+    std::array<wgpu::PrimitiveTopology, kNumStripType> kStripTopologyTypes = {
+        {wgpu::PrimitiveTopology::LineStrip, wgpu::PrimitiveTopology::TriangleStrip}};
+
+    std::array<wgpu::PrimitiveTopology, kNumListType> kListTopologyTypes = {
+        {wgpu::PrimitiveTopology::PointList, wgpu::PrimitiveTopology::LineList,
+         wgpu::PrimitiveTopology::TriangleList}};
+
+    std::array<wgpu::IndexFormat, kNumIndexFormat> kIndexFormatTypes = {
+        {wgpu::IndexFormat::Undefined, wgpu::IndexFormat::Uint16, wgpu::IndexFormat::Uint32}};
+
+    for (wgpu::PrimitiveTopology primitiveTopology : kStripTopologyTypes) {
+        for (wgpu::IndexFormat indexFormat : kIndexFormatTypes) {
+            utils::ComboRenderPipelineDescriptor descriptor;
+            descriptor.vertex.module = vsModule;
+            descriptor.cFragment.module = fsModule;
+            descriptor.primitive.topology = primitiveTopology;
+            descriptor.primitive.stripIndexFormat = indexFormat;
+
+            // Always succeeds, regardless of if an index format is given.
+            device.CreateRenderPipeline(&descriptor);
+        }
+    }
+
+    for (wgpu::PrimitiveTopology primitiveTopology : kListTopologyTypes) {
+        for (wgpu::IndexFormat indexFormat : kIndexFormatTypes) {
+            utils::ComboRenderPipelineDescriptor descriptor;
+            descriptor.vertex.module = vsModule;
+            descriptor.cFragment.module = fsModule;
+            descriptor.primitive.topology = primitiveTopology;
+            descriptor.primitive.stripIndexFormat = indexFormat;
+
+            if (indexFormat == wgpu::IndexFormat::Undefined) {
+                // Succeeds even when the index format is undefined because the
+                // primitive topology isn't a strip type.
+                device.CreateRenderPipeline(&descriptor);
+            } else {
+                ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+            }
+        }
+    }
+}
+
+// Test that specifying a clampDepth value results in an error if the feature is not enabled.
+TEST_F(RenderPipelineValidationTest, ClampDepthWithoutFeature) {
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::PrimitiveDepthClampingState clampingState;
+        clampingState.clampDepth = true;
+        descriptor.primitive.nextInChain = &clampingState;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::PrimitiveDepthClampingState clampingState;
+        clampingState.clampDepth = false;
+        descriptor.primitive.nextInChain = &clampingState;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+}
+
+// Test that depthStencil.depthCompare must not be undefiend.
+TEST_F(RenderPipelineValidationTest, DepthCompareUndefinedIsError) {
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth32Float);
+
+    // Control case: Always is valid.
+    descriptor.cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+    device.CreateRenderPipeline(&descriptor);
+
+    // Error case: Undefined is invalid.
+    descriptor.cDepthStencil.depthCompare = wgpu::CompareFunction::Undefined;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Test that the entryPoint names must be present for the correct stage in the shader module.
+TEST_F(RenderPipelineValidationTest, EntryPointNameValidation) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn vertex_main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+        }
+
+        @stage(fragment) fn fragment_main() -> @location(0) vec4<f32> {
+            return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+        }
+    )");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = module;
+    descriptor.vertex.entryPoint = "vertex_main";
+    descriptor.cFragment.module = module;
+    descriptor.cFragment.entryPoint = "fragment_main";
+
+    // Success case.
+    device.CreateRenderPipeline(&descriptor);
+
+    // Test for the vertex stage entryPoint name.
+    {
+        // The entryPoint name doesn't exist in the module.
+        descriptor.vertex.entryPoint = "main";
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+
+        // The entryPoint name exists, but not for the correct stage.
+        descriptor.vertex.entryPoint = "fragment_main";
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    descriptor.vertex.entryPoint = "vertex_main";
+
+    // Test for the fragment stage entryPoint name.
+    {
+        // The entryPoint name doesn't exist in the module.
+        descriptor.cFragment.entryPoint = "main";
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+
+        // The entryPoint name exists, but not for the correct stage.
+        descriptor.cFragment.entryPoint = "vertex_main";
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+}
+
+// Test that vertex attrib validation is for the correct entryPoint
+TEST_F(RenderPipelineValidationTest, VertexAttribCorrectEntryPoint) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn vertex0(@location(0) attrib0 : vec4<f32>)
+                                    -> @builtin(position) vec4<f32> {
+            return attrib0;
+        }
+        @stage(vertex) fn vertex1(@location(1) attrib1 : vec4<f32>)
+                                    -> @builtin(position) vec4<f32> {
+            return attrib1;
+        }
+    )");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = module;
+    descriptor.cFragment.module = fsModule;
+
+    descriptor.vertex.bufferCount = 1;
+    descriptor.cBuffers[0].attributeCount = 1;
+    descriptor.cBuffers[0].arrayStride = 16;
+    descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    descriptor.cAttributes[0].offset = 0;
+
+    // Success cases, the attribute used by the entryPoint is declared in the pipeline.
+    descriptor.vertex.entryPoint = "vertex0";
+    descriptor.cAttributes[0].shaderLocation = 0;
+    device.CreateRenderPipeline(&descriptor);
+
+    descriptor.vertex.entryPoint = "vertex1";
+    descriptor.cAttributes[0].shaderLocation = 1;
+    device.CreateRenderPipeline(&descriptor);
+
+    // Error cases, the attribute used by the entryPoint isn't declared in the pipeline.
+    descriptor.vertex.entryPoint = "vertex1";
+    descriptor.cAttributes[0].shaderLocation = 0;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+
+    descriptor.vertex.entryPoint = "vertex0";
+    descriptor.cAttributes[0].shaderLocation = 1;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Test that fragment output validation is for the correct entryPoint
+TEST_F(RenderPipelineValidationTest, FragmentOutputCorrectEntryPoint) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn fragmentFloat() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        }
+        @stage(fragment) fn fragmentUint() -> @location(0) vec4<u32> {
+            return vec4<u32>(0u, 0u, 0u, 0u);
+        }
+    )");
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = module;
+
+    // Success case, the component type matches between the pipeline and the entryPoint
+    descriptor.cFragment.entryPoint = "fragmentFloat";
+    descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA32Float;
+    device.CreateRenderPipeline(&descriptor);
+
+    descriptor.cFragment.entryPoint = "fragmentUint";
+    descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA32Uint;
+    device.CreateRenderPipeline(&descriptor);
+
+    // Error case, the component type doesn't match between the pipeline and the entryPoint
+    descriptor.cFragment.entryPoint = "fragmentUint";
+    descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA32Float;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+
+    descriptor.cFragment.entryPoint = "fragmentFloat";
+    descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA32Uint;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+// Test that unwritten fragment outputs must have a write mask of 0.
+TEST_F(RenderPipelineValidationTest, UnwrittenFragmentOutputsMask0) {
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>();
+        }
+    )");
+
+    wgpu::ShaderModule fsModuleWriteNone = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() {}
+    )");
+
+    wgpu::ShaderModule fsModuleWrite0 = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>();
+        }
+    )");
+
+    wgpu::ShaderModule fsModuleWrite1 = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(1) vec4<f32> {
+            return vec4<f32>();
+        }
+    )");
+
+    wgpu::ShaderModule fsModuleWriteBoth = utils::CreateShaderModule(device, R"(
+        struct FragmentOut {
+            @location(0) target0 : vec4<f32>,
+            @location(1) target1 : vec4<f32>,
+        }
+        @stage(fragment) fn main() -> FragmentOut {
+            var out : FragmentOut;
+            return out;
+        }
+    )");
+
+    // Control case: write to target 0
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 1;
+        descriptor.cFragment.module = fsModuleWrite0;
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    // Control case: write to target 0 and target 1
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 2;
+        descriptor.cFragment.module = fsModuleWriteBoth;
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    // Write only target 1 (not in pipeline fragment state).
+    // Errors because target 0 does not have a write mask of 0.
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 1;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::All;
+        descriptor.cFragment.module = fsModuleWrite1;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    // Write only target 1 (not in pipeline fragment state).
+    // OK because target 0 has a write mask of 0.
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 1;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        descriptor.cFragment.module = fsModuleWrite1;
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    // Write only target 0 with two color targets.
+    // Errors because target 1 does not have a write mask of 0.
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 2;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::Red;
+        descriptor.cTargets[1].writeMask = wgpu::ColorWriteMask::Alpha;
+        descriptor.cFragment.module = fsModuleWrite0;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    // Write only target 0 with two color targets.
+    // OK because target 1 has a write mask of 0.
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 2;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::All;
+        descriptor.cTargets[1].writeMask = wgpu::ColorWriteMask::None;
+        descriptor.cFragment.module = fsModuleWrite0;
+        device.CreateRenderPipeline(&descriptor);
+    }
+
+    // Write nothing with two color targets.
+    // Errors because both target 0 and 1 have nonzero write masks.
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 2;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::Red;
+        descriptor.cTargets[1].writeMask = wgpu::ColorWriteMask::Green;
+        descriptor.cFragment.module = fsModuleWriteNone;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    // Write nothing with two color targets.
+    // OK because target 0 and 1 have write masks of 0.
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+
+        descriptor.cFragment.targetCount = 2;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        descriptor.cTargets[1].writeMask = wgpu::ColorWriteMask::None;
+        descriptor.cFragment.module = fsModuleWriteNone;
+        device.CreateRenderPipeline(&descriptor);
+    }
+}
+
+// Test that fragment output validation is for the correct entryPoint
+TEST_F(RenderPipelineValidationTest, BindingsFromCorrectEntryPoint) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+        struct Uniforms {
+            data : vec4<f32>
+        }
+        @group(0) @binding(0) var<uniform> var0 : Uniforms;
+        @group(0) @binding(1) var<uniform> var1 : Uniforms;
+
+        @stage(vertex) fn vertex0() -> @builtin(position) vec4<f32> {
+            return var0.data;
+        }
+        @stage(vertex) fn vertex1() -> @builtin(position) vec4<f32> {
+            return var1.data;
+        }
+    )");
+
+    wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+    wgpu::PipelineLayout layout0 = utils::MakeBasicPipelineLayout(device, &bgl0);
+
+    wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+        device, {{1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
+    wgpu::PipelineLayout layout1 = utils::MakeBasicPipelineLayout(device, &bgl1);
+
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = module;
+    descriptor.cFragment.module = fsModule;
+
+    // Success case, the BGL matches the bindings used by the entryPoint
+    descriptor.vertex.entryPoint = "vertex0";
+    descriptor.layout = layout0;
+    device.CreateRenderPipeline(&descriptor);
+
+    descriptor.vertex.entryPoint = "vertex1";
+    descriptor.layout = layout1;
+    device.CreateRenderPipeline(&descriptor);
+
+    // Error case, the BGL doesn't match the bindings used by the entryPoint
+    descriptor.vertex.entryPoint = "vertex1";
+    descriptor.layout = layout0;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+
+    descriptor.vertex.entryPoint = "vertex0";
+    descriptor.layout = layout1;
+    ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+}
+
+class DepthClampingValidationTest : public RenderPipelineValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::DepthClamping};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Tests that specifying a clampDepth value succeeds if the feature is enabled.
+TEST_F(DepthClampingValidationTest, Success) {
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::PrimitiveDepthClampingState clampingState;
+        clampingState.clampDepth = true;
+        descriptor.primitive.nextInChain = &clampingState;
+        device.CreateRenderPipeline(&descriptor);
+    }
+    {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+        wgpu::PrimitiveDepthClampingState clampingState;
+        clampingState.clampDepth = false;
+        descriptor.primitive.nextInChain = &clampingState;
+        device.CreateRenderPipeline(&descriptor);
+    }
+}
+
+class InterStageVariableMatchingValidationTest : public RenderPipelineValidationTest {
+  protected:
+    void CheckCreatingRenderPipeline(wgpu::ShaderModule vertexModule,
+                                     wgpu::ShaderModule fragmentModule,
+                                     bool shouldSucceed) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vertexModule;
+        descriptor.cFragment.module = fragmentModule;
+        if (shouldSucceed) {
+            device.CreateRenderPipeline(&descriptor);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+        }
+    }
+};
+
+// Tests that creating render pipeline should fail when there is a vertex output that doesn't have
+// its corresponding fragment input at the same location, and there is a fragment input that
+// doesn't have its corresponding vertex output at the same location.
+TEST_F(InterStageVariableMatchingValidationTest, MissingDeclarationAtSameLocation) {
+    wgpu::ShaderModule vertexModuleOutputAtLocation0 = utils::CreateShaderModule(device, R"(
+            struct A {
+                @location(0) vout: f32,
+                @builtin(position) pos: vec4<f32>,
+            }
+            @stage(vertex) fn main() -> A {
+                var vertexOut: A;
+                vertexOut.pos = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                return vertexOut;
+            })");
+    wgpu::ShaderModule fragmentModuleAtLocation0 = utils::CreateShaderModule(device, R"(
+            struct B {
+                @location(0) fin: f32
+            }
+            @stage(fragment) fn main(fragmentIn: B) -> @location(0) vec4<f32>  {
+                return vec4<f32>(fragmentIn.fin, 0.0, 0.0, 1.0);
+            })");
+    wgpu::ShaderModule fragmentModuleInputAtLocation1 = utils::CreateShaderModule(device, R"(
+            struct A {
+                @location(1) vout: f32
+            }
+            @stage(fragment) fn main(vertexOut: A) -> @location(0) vec4<f32>  {
+                return vec4<f32>(vertexOut.vout, 0.0, 0.0, 1.0);
+            })");
+    wgpu::ShaderModule vertexModuleOutputAtLocation1 = utils::CreateShaderModule(device, R"(
+            struct B {
+                @location(1) fin: f32,
+                @builtin(position) pos: vec4<f32>,
+            }
+            @stage(vertex) fn main() -> B {
+                var fragmentIn: B;
+                fragmentIn.pos = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                return fragmentIn;
+            })");
+
+    {
+        CheckCreatingRenderPipeline(vertexModuleOutputAtLocation0, fsModule, false);
+        CheckCreatingRenderPipeline(vsModule, fragmentModuleAtLocation0, false);
+        CheckCreatingRenderPipeline(vertexModuleOutputAtLocation0, fragmentModuleInputAtLocation1,
+                                    false);
+        CheckCreatingRenderPipeline(vertexModuleOutputAtLocation1, fragmentModuleAtLocation0,
+                                    false);
+    }
+
+    {
+        CheckCreatingRenderPipeline(vertexModuleOutputAtLocation0, fragmentModuleAtLocation0, true);
+        CheckCreatingRenderPipeline(vertexModuleOutputAtLocation1, fragmentModuleInputAtLocation1,
+                                    true);
+    }
+}
+
+// Tests that creating render pipeline should fail when the type of a vertex stage output variable
+// doesn't match the type of the fragment stage input variable at the same location.
+TEST_F(InterStageVariableMatchingValidationTest, DifferentTypeAtSameLocation) {
+    constexpr std::array<const char*, 12> kTypes = {{"f32", "vec2<f32>", "vec3<f32>", "vec4<f32>",
+                                                     "i32", "vec2<i32>", "vec3<i32>", "vec4<i32>",
+                                                     "u32", "vec2<u32>", "vec3<u32>", "vec4<u32>"}};
+
+    std::array<wgpu::ShaderModule, 12> vertexModules;
+    std::array<wgpu::ShaderModule, 12> fragmentModules;
+    for (uint32_t i = 0; i < kTypes.size(); ++i) {
+        std::string interfaceDeclaration;
+        {
+            std::ostringstream sstream;
+            sstream << "struct A { @location(0) @interpolate(flat) a: " << kTypes[i] << ","
+                    << std::endl;
+            interfaceDeclaration = sstream.str();
+        }
+        {
+            std::ostringstream vertexStream;
+            vertexStream << interfaceDeclaration << R"(
+                    @builtin(position) pos: vec4<f32>,
+                }
+                @stage(vertex) fn main() -> A {
+                    var vertexOut: A;
+                    vertexOut.pos = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                    return vertexOut;
+                })";
+            vertexModules[i] = utils::CreateShaderModule(device, vertexStream.str().c_str());
+        }
+        {
+            std::ostringstream fragmentStream;
+            fragmentStream << interfaceDeclaration << R"(
+                }
+                @stage(fragment) fn main(fragmentIn: A) -> @location(0) vec4<f32> {
+                    return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                })";
+            fragmentModules[i] = utils::CreateShaderModule(device, fragmentStream.str().c_str());
+        }
+    }
+
+    for (uint32_t vertexModuleIndex = 0; vertexModuleIndex < kTypes.size(); ++vertexModuleIndex) {
+        wgpu::ShaderModule vertexModule = vertexModules[vertexModuleIndex];
+        for (uint32_t fragmentModuleIndex = 0; fragmentModuleIndex < kTypes.size();
+             ++fragmentModuleIndex) {
+            wgpu::ShaderModule fragmentModule = fragmentModules[fragmentModuleIndex];
+            bool shouldSuccess = vertexModuleIndex == fragmentModuleIndex;
+            CheckCreatingRenderPipeline(vertexModule, fragmentModule, shouldSuccess);
+        }
+    }
+}
+
+// Tests that creating render pipeline should fail when the interpolation attribute of a vertex
+// stage output variable doesn't match the type of the fragment stage input variable at the same
+// location.
+TEST_F(InterStageVariableMatchingValidationTest, DifferentInterpolationAttributeAtSameLocation) {
+    enum class InterpolationType : uint8_t {
+        None = 0,
+        Perspective,
+        Linear,
+        Flat,
+        Count,
+    };
+    enum class InterpolationSampling : uint8_t {
+        None = 0,
+        Center,
+        Centroid,
+        Sample,
+        Count,
+    };
+    constexpr std::array<const char*, static_cast<size_t>(InterpolationType::Count)>
+        kInterpolationTypeString = {{"", "perspective", "linear", "flat"}};
+    constexpr std::array<const char*, static_cast<size_t>(InterpolationSampling::Count)>
+        kInterpolationSamplingString = {{"", "center", "centroid", "sample"}};
+
+    struct InterpolationAttribute {
+        InterpolationType interpolationType;
+        InterpolationSampling interpolationSampling;
+    };
+
+    // Interpolation sampling is not used with flat interpolation.
+    constexpr std::array<InterpolationAttribute, 10> validInterpolationAttributes = {{
+        {InterpolationType::None, InterpolationSampling::None},
+        {InterpolationType::Flat, InterpolationSampling::None},
+        {InterpolationType::Linear, InterpolationSampling::None},
+        {InterpolationType::Linear, InterpolationSampling::Center},
+        {InterpolationType::Linear, InterpolationSampling::Centroid},
+        {InterpolationType::Linear, InterpolationSampling::Sample},
+        {InterpolationType::Perspective, InterpolationSampling::None},
+        {InterpolationType::Perspective, InterpolationSampling::Center},
+        {InterpolationType::Perspective, InterpolationSampling::Centroid},
+        {InterpolationType::Perspective, InterpolationSampling::Sample},
+    }};
+
+    std::vector<wgpu::ShaderModule> vertexModules(validInterpolationAttributes.size());
+    std::vector<wgpu::ShaderModule> fragmentModules(validInterpolationAttributes.size());
+    for (uint32_t i = 0; i < validInterpolationAttributes.size(); ++i) {
+        std::string interfaceDeclaration;
+        {
+            const auto& interpolationAttribute = validInterpolationAttributes[i];
+            std::ostringstream sstream;
+            sstream << "struct A { @location(0)";
+            if (interpolationAttribute.interpolationType != InterpolationType::None) {
+                sstream << " @interpolate("
+                        << kInterpolationTypeString[static_cast<uint8_t>(
+                               interpolationAttribute.interpolationType)];
+                if (interpolationAttribute.interpolationSampling != InterpolationSampling::None) {
+                    sstream << ", "
+                            << kInterpolationSamplingString[static_cast<uint8_t>(
+                                   interpolationAttribute.interpolationSampling)];
+                }
+                sstream << ")";
+            }
+            sstream << " a : vec4<f32>," << std::endl;
+            interfaceDeclaration = sstream.str();
+        }
+        {
+            std::ostringstream vertexStream;
+            vertexStream << interfaceDeclaration << R"(
+                    @builtin(position) pos: vec4<f32>,
+                }
+                @stage(vertex) fn main() -> A {
+                    var vertexOut: A;
+                    vertexOut.pos = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                    return vertexOut;
+                })";
+            vertexModules[i] = utils::CreateShaderModule(device, vertexStream.str().c_str());
+        }
+        {
+            std::ostringstream fragmentStream;
+            fragmentStream << interfaceDeclaration << R"(
+                }
+                @stage(fragment) fn main(fragmentIn: A) -> @location(0) vec4<f32> {
+                    return fragmentIn.a;
+                })";
+            fragmentModules[i] = utils::CreateShaderModule(device, fragmentStream.str().c_str());
+        }
+    }
+
+    auto GetAppliedInterpolationAttribute = [](const InterpolationAttribute& attribute) {
+        InterpolationAttribute appliedAttribute = {attribute.interpolationType,
+                                                   attribute.interpolationSampling};
+        switch (attribute.interpolationType) {
+            // If the interpolation attribute is not specified, then
+            // @interpolate(perspective, center) or @interpolate(perspective) is assumed.
+            case InterpolationType::None:
+                appliedAttribute.interpolationType = InterpolationType::Perspective;
+                appliedAttribute.interpolationSampling = InterpolationSampling::Center;
+                break;
+
+            // If the interpolation type is perspective or linear, and the interpolation
+            // sampling is not specified, then 'center' is assumed.
+            case InterpolationType::Perspective:
+            case InterpolationType::Linear:
+                if (appliedAttribute.interpolationSampling == InterpolationSampling::None) {
+                    appliedAttribute.interpolationSampling = InterpolationSampling::Center;
+                }
+                break;
+
+            case InterpolationType::Flat:
+                break;
+            default:
+                UNREACHABLE();
+        }
+        return appliedAttribute;
+    };
+
+    auto InterpolationAttributeMatch = [GetAppliedInterpolationAttribute](
+                                           const InterpolationAttribute& attribute1,
+                                           const InterpolationAttribute& attribute2) {
+        InterpolationAttribute appliedAttribute1 = GetAppliedInterpolationAttribute(attribute1);
+        InterpolationAttribute appliedAttribute2 = GetAppliedInterpolationAttribute(attribute2);
+
+        return appliedAttribute1.interpolationType == appliedAttribute2.interpolationType &&
+               appliedAttribute1.interpolationSampling == appliedAttribute2.interpolationSampling;
+    };
+
+    for (uint32_t vertexModuleIndex = 0; vertexModuleIndex < validInterpolationAttributes.size();
+         ++vertexModuleIndex) {
+        wgpu::ShaderModule vertexModule = vertexModules[vertexModuleIndex];
+        for (uint32_t fragmentModuleIndex = 0;
+             fragmentModuleIndex < validInterpolationAttributes.size(); ++fragmentModuleIndex) {
+            wgpu::ShaderModule fragmentModule = fragmentModules[fragmentModuleIndex];
+            bool shouldSuccess =
+                InterpolationAttributeMatch(validInterpolationAttributes[vertexModuleIndex],
+                                            validInterpolationAttributes[fragmentModuleIndex]);
+            CheckCreatingRenderPipeline(vertexModule, fragmentModule, shouldSuccess);
+        }
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp b/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
new file mode 100644
index 0000000..b19e423
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
@@ -0,0 +1,1698 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    class ResourceUsageTrackingTest : public ValidationTest {
+      protected:
+        wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage) {
+            wgpu::BufferDescriptor descriptor;
+            descriptor.size = size;
+            descriptor.usage = usage;
+
+            return device.CreateBuffer(&descriptor);
+        }
+
+        wgpu::Texture CreateTexture(wgpu::TextureUsage usage,
+                                    wgpu::TextureFormat format = wgpu::TextureFormat::RGBA8Unorm) {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.size = {1, 1, 1};
+            descriptor.sampleCount = 1;
+            descriptor.mipLevelCount = 1;
+            descriptor.usage = usage;
+            descriptor.format = format;
+
+            return device.CreateTexture(&descriptor);
+        }
+
+        // Note that it is valid to bind any bind groups for indices that the pipeline doesn't use.
+        // We create a no-op render or compute pipeline without any bindings, and set bind groups
+        // in the caller, so it is always correct for binding validation between bind groups and
+        // pipeline. But those bind groups in caller can be used for validation for other purposes.
+        wgpu::RenderPipeline CreateNoOpRenderPipeline() {
+            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+                @stage(fragment) fn main() {
+                })");
+            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+            pipelineDescriptor.vertex.module = vsModule;
+            pipelineDescriptor.cFragment.module = fsModule;
+            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, nullptr);
+            return device.CreateRenderPipeline(&pipelineDescriptor);
+        }
+
+        wgpu::ComputePipeline CreateNoOpComputePipeline(std::vector<wgpu::BindGroupLayout> bgls) {
+            wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+                @stage(compute) @workgroup_size(1) fn main() {
+                })");
+            wgpu::ComputePipelineDescriptor pipelineDescriptor;
+            pipelineDescriptor.layout = utils::MakePipelineLayout(device, std::move(bgls));
+            pipelineDescriptor.compute.module = csModule;
+            pipelineDescriptor.compute.entryPoint = "main";
+            return device.CreateComputePipeline(&pipelineDescriptor);
+        }
+
+        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+    };
+
+    // Test that using a single buffer in multiple read usages in the same pass is allowed.
+    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleReadUsage) {
+        // Test render pass
+        {
+            // Create a buffer, and use the buffer as both vertex and index buffer.
+            wgpu::Buffer buffer =
+                CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+            pass.SetVertexBuffer(0, buffer);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Test compute pass
+        {
+            // Create buffer and bind group
+            wgpu::Buffer buffer =
+                CreateBuffer(4, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage);
+
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                 {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+            // Use the buffer as both uniform and readonly storage buffer in compute pass.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that it is invalid to use the same buffer as both readable and writable in the same
+    // render pass. It is invalid in the same dispatch in compute pass.
+    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsage) {
+        // test render pass
+        {
+            // Create buffer and bind group
+            wgpu::Buffer buffer =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+            // It is invalid to use the buffer as both index and storage in render pass
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // test compute pass
+        {
+            // Create buffer and bind group
+            wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
+
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                 {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroup bg =
+                utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
+
+            // Create a no-op compute pipeline
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            // It is valid to use the buffer as both storage and readonly storage in a single
+            // compute pass if dispatch command is not called.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetBindGroup(0, bg);
+                pass.End();
+                encoder.Finish();
+            }
+
+            // It is invalid to use the buffer as both storage and readonly storage in a single
+            // dispatch.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetPipeline(cp);
+                pass.SetBindGroup(0, bg);
+                pass.Dispatch(1);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+    }
+
+    // Test the use of a buffer as a storage buffer multiple times in the same synchronization
+    // scope.
+    TEST_F(ResourceUsageTrackingTest, BufferUsedAsStorageMultipleTimes) {
+        // Create buffer and bind group
+        wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                      wgpu::BufferBindingType::Storage},
+                     {1, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                      wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg =
+            utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
+
+        // test render pass
+        {
+            // It is valid to use multiple storage usages on the same buffer in render pass
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // test compute pass
+        {
+            // It is valid to use multiple storage usages on the same buffer in a dispatch
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.Dispatch(1);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that using the same buffer as both readable and writable in different passes is allowed
+    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentPasses) {
+        // Test render pass
+        {
+            // Create buffers that will be used as index and storage buffers
+            wgpu::Buffer buffer0 =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+            wgpu::Buffer buffer1 =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+
+            // Create bind groups to use the buffer as storage
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
+
+            // Use these two buffers as both index and storage in different render passes
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+
+            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&dummyRenderPass);
+            pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+            pass0.SetBindGroup(0, bg1);
+            pass0.End();
+
+            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass);
+            pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+            pass1.SetBindGroup(0, bg0);
+            pass1.End();
+
+            encoder.Finish();
+        }
+
+        // Test compute pass
+        {
+            // Create buffer and bind groups that will be used as storage and uniform bindings
+            wgpu::Buffer buffer =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
+
+            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+            // Use the buffer as both storage and uniform in different compute passes
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+            pass0.SetBindGroup(0, bg0);
+            pass0.End();
+
+            wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
+            pass1.SetBindGroup(1, bg1);
+            pass1.End();
+
+            encoder.Finish();
+        }
+
+        // Test render pass and compute pass mixed together with resource dependency.
+        {
+            // Create buffer and bind groups that will be used as storage and uniform bindings
+            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+            // Use the buffer as storage and uniform in render pass and compute pass respectively
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+            pass0.SetBindGroup(0, bg0);
+            pass0.End();
+
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass);
+            pass1.SetBindGroup(1, bg1);
+            pass1.End();
+
+            encoder.Finish();
+        }
+    }
+
+    // Test that it is invalid to use the same buffer as both readable and writable in different
+    // draws in a single render pass. But it is valid in different dispatches in a single compute
+    // pass.
+    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentDrawsOrDispatches) {
+        // Test render pass
+        {
+            // Create a buffer and a bind group
+            wgpu::Buffer buffer =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+            // Create a no-op render pipeline.
+            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+            // It is not allowed to use the same buffer as both readable and writable in different
+            // draws within the same render pass.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetPipeline(rp);
+
+            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+            pass.Draw(3);
+
+            pass.SetBindGroup(0, bg);
+            pass.Draw(3);
+
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // test compute pass
+        {
+            // Create a buffer and bind groups
+            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp0 = CreateNoOpComputePipeline({bgl0});
+            wgpu::ComputePipeline cp1 = CreateNoOpComputePipeline({bgl1});
+
+            // It is valid to use the same buffer as both readable and writable in different
+            // dispatches within the same compute pass.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+            pass.SetPipeline(cp0);
+            pass.SetBindGroup(0, bg0);
+            pass.Dispatch(1);
+
+            pass.SetPipeline(cp1);
+            pass.SetBindGroup(0, bg1);
+            pass.Dispatch(1);
+
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that it is invalid to use the same buffer as both readable and writable in a single
+    // draw or dispatch.
+    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInSingleDrawOrDispatch) {
+        // Test render pass
+        {
+            // Create a buffer and a bind group
+            wgpu::Buffer buffer =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+            // Create a no-op render pipeline.
+            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+            // It is invalid to use the same buffer as both readable and writable usages in a single
+            // draw
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetPipeline(rp);
+
+            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+            pass.SetBindGroup(0, writeBG);
+            pass.Draw(3);
+
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // test compute pass
+        {
+            // Create a buffer and bind groups
+            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
+
+            // It is invalid to use the same buffer as both readable and writable usages in a single
+            // dispatch
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+
+            pass.SetBindGroup(0, readBG);
+            pass.SetBindGroup(1, writeBG);
+            pass.Dispatch(1);
+
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that using the same buffer as copy src/dst and writable/readable usage is allowed.
+    TEST_F(ResourceUsageTrackingTest, BufferCopyAndBufferUsageInPass) {
+        // Create buffers that will be used as both a copy src/dst buffer and a storage buffer
+        wgpu::Buffer bufferSrc =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+        wgpu::Buffer bufferDst =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+
+        // Create the bind group to use the buffer as storage
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, bufferSrc}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, bufferDst}});
+
+        // Use the buffer as both copy src and storage in render pass
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetBindGroup(0, bg0);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Use the buffer as both copy dst and readonly storage in compute pass
+        {
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl1});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
+
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, bg1);
+            pass.SetPipeline(cp);
+            pass.Dispatch(1);
+            pass.End();
+
+            encoder.Finish();
+        }
+    }
+
+    // Test that all index buffers and vertex buffers take effect even though some buffers are
+    // not used because they are overwritten by another consecutive call.
+    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetIndexOrVertexBuffer) {
+        // Create buffers that will be used as both vertex and index buffer.
+        wgpu::Buffer buffer0 = CreateBuffer(
+            4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage);
+        wgpu::Buffer buffer1 =
+            CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
+
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+
+        DummyRenderPass dummyRenderPass(device);
+
+        // Set index buffer twice. The second one overwrites the first one. No buffer is used as
+        // both read and write in the same pass. But the overwritten index buffer (buffer0) still
+        // take effect during resource tracking.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+            pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Set index buffer twice. The second one overwrites the first one. buffer0 is used as both
+        // read and write in the same pass
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+            pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Set vertex buffer on the same index twice. The second one overwrites the first one. No
+        // buffer is used as both read and write in the same pass. But the overwritten vertex buffer
+        // (buffer0) still take effect during resource tracking.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetVertexBuffer(0, buffer0);
+            pass.SetVertexBuffer(0, buffer1);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Set vertex buffer on the same index twice. The second one overwrites the first one.
+        // buffer0 is used as both read and write in the same pass
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetVertexBuffer(0, buffer1);
+            pass.SetVertexBuffer(0, buffer0);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
+    // used because they are overwritten by a consecutive call.
+    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetBindGroupsOnSameIndex) {
+        // test render pass
+        {
+            // Create buffers that will be used as index and storage buffers
+            wgpu::Buffer buffer0 =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+            wgpu::Buffer buffer1 =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+
+            // Create the bind group to use the buffer as storage
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
+
+            DummyRenderPass dummyRenderPass(device);
+
+            // Set bind group on the same index twice. The second one overwrites the first one.
+            // No buffer is used as both read and write in the same pass. But the overwritten
+            // bind group still take effect during resource tracking.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+                pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+                pass.SetBindGroup(0, bg0);
+                pass.SetBindGroup(0, bg1);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+
+            // Set bind group on the same index twice. The second one overwrites the first one.
+            // buffer0 is used as both read and write in the same pass
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+                pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+                pass.SetBindGroup(0, bg1);
+                pass.SetBindGroup(0, bg0);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+
+        // test compute pass
+        {
+            // Create buffers that will be used as readonly and writable storage buffers
+            wgpu::Buffer buffer0 = CreateBuffer(512, wgpu::BufferUsage::Storage);
+            wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+            // Create the bind group to use the buffer as storage
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, buffer0, 0, 4}});
+            wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, buffer0, 256, 4}});
+            wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, buffer1, 0, 4}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
+
+            // Set bind group against the same index twice. The second one overwrites the first one.
+            // Then no buffer is used as both read and write in the same dispatch. But the
+            // overwritten bind group still take effect.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetBindGroup(0, writeBG0);
+                pass.SetBindGroup(1, readBG0);
+                pass.SetBindGroup(1, readBG1);
+                pass.SetPipeline(cp);
+                pass.Dispatch(1);
+                pass.End();
+                encoder.Finish();
+            }
+
+            // Set bind group against the same index twice. The second one overwrites the first one.
+            // Then buffer0 is used as both read and write in the same dispatch
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetBindGroup(0, writeBG0);
+                pass.SetBindGroup(1, readBG1);
+                pass.SetBindGroup(1, readBG0);
+                pass.SetPipeline(cp);
+                pass.Dispatch(1);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+    }
+
+    // Test that it is invalid to have resource usage conflicts even when all bindings are not
+    // visible to the programmable pass where it is used.
+    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictBetweenInvisibleStagesInBindGroup) {
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        // Test render pass for bind group. The conflict of readonly storage and storage usage
+        // doesn't reside in render related stages at all
+        {
+            // Create a bind group whose bindings are not visible in render pass
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                         {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+            // These two bindings are invisible in render pass. But we still track these bindings.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass for bind group. The conflict of readonly storage and storage usage
+        // doesn't reside in compute related stage at all
+        {
+            // Create a bind group whose bindings are not visible in compute pass
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
+                         {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            // These two bindings are invisible in the dispatch. But we still track these bindings.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.Dispatch(1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+    // visible to the programmable pass where it is used.
+    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithInvisibleStageInBindGroup) {
+        // Test render pass for bind group and index buffer. The conflict of storage and index
+        // buffer usage resides between fragment stage and compute stage. But the compute stage
+        // binding is not visible in render pass.
+        {
+            wgpu::Buffer buffer =
+                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+            // Buffer usage in compute stage in bind group conflicts with index buffer. And binding
+            // for compute stage is not visible in render pass. But we still track this binding.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass for bind group. The conflict of readonly storage and storage buffer
+        // usage resides between compute stage and fragment stage. But the fragment stage binding is
+        // not visible in the dispatch.
+        {
+            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
+                         {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            // Buffer usage in compute stage conflicts with buffer usage in fragment stage. And
+            // binding for fragment stage is not visible in the dispatch. But we still track this
+            // invisible binding.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.Dispatch(1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+    // used in the pipeline.
+    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithUnusedPipelineBindings) {
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        // Test render pass for bind groups with unused bindings. The conflict of readonly storage
+        // and storage usages resides in different bind groups, although some bindings may not be
+        // used because its bind group layout is not designated in pipeline layout.
+        {
+            // Create bind groups. The bindings are visible for render pass.
+            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+            // Create a passthrough render pipeline with a readonly buffer
+            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+                struct RBuffer {
+                    value : f32
+                }
+                @group(0) @binding(0) var<storage, read> rBuffer : RBuffer;
+                @stage(fragment) fn main() {
+                })");
+            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+            pipelineDescriptor.vertex.module = vsModule;
+            pipelineDescriptor.cFragment.module = fsModule;
+            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl0);
+            wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
+
+            // Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is
+            // not used in pipeline. But we still track this binding.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetBindGroup(0, bg0);
+            pass.SetBindGroup(1, bg1);
+            pass.SetPipeline(rp);
+            pass.Draw(3);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test that an unused bind group is not used to detect conflicts between bindings in
+        // compute passes.
+        {
+            // Create bind groups. The bindings are visible for compute pass.
+            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+            // Create a compute pipeline with only one of the two BGLs.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl0});
+
+            // Resource in bg1 conflicts with resources used in bg0. However, the binding in bg1 is
+            // not used in pipeline so no error is produced in the dispatch.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, bg0);
+            pass.SetBindGroup(1, bg1);
+            pass.SetPipeline(cp);
+            pass.Dispatch(1);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that it is invalid to use the same texture as both readable and writable in the same
+    // render pass. It is invalid in the same dispatch in compute pass.
+    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsage) {
+        // Test render pass
+        {
+            // Create a texture
+            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                                  wgpu::TextureUsage::RenderAttachment);
+            wgpu::TextureView view = texture.CreateView();
+
+            // Create a bind group to use the texture as sampled binding
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+            // Create a render pass to use the texture as a render target
+            utils::ComboRenderPassDescriptor renderPass({view});
+
+            // It is invalid to use the texture as both sampled and render target in the same pass
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass
+        {
+            // Create a texture
+            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                                  wgpu::TextureUsage::StorageBinding);
+            wgpu::TextureView view = texture.CreateView();
+
+            // Create a bind group to use the texture as sampled and writeonly bindings
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
+                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+            // Create a no-op compute pipeline
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            // It is valid to use the texture as both sampled and writeonly storage in a single
+            // compute pass if dispatch command is not called.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetBindGroup(0, bg);
+                pass.End();
+                encoder.Finish();
+            }
+
+            // It is invalid to use the texture as both sampled and writeonly storage in a single
+            // dispatch
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetPipeline(cp);
+                pass.SetBindGroup(0, bg);
+                pass.Dispatch(1);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+    }
+
+    // Test that it is invalid to use the same texture as both readable and writable depth/stencil
+    // attachment in the same render pass. But it is valid to use it as both readable and readonly
+    // depth/stencil attachment in the same render pass.
+    // Note that depth/stencil attachment is a special render attachment, it can be readonly.
+    TEST_F(ResourceUsageTrackingTest, TextureWithSamplingAndDepthStencilAttachment) {
+        // Create a texture
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment,
+                          wgpu::TextureFormat::Depth32Float);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create a bind group to use the texture as sampled binding
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+        // Create a render pass to use the texture as a render target
+        utils::ComboRenderPassDescriptor passDescriptor({}, view);
+        passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        // It is invalid to use the texture as both sampled and writeable depth/stencil attachment
+        // in the same pass
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // It is valid to use the texture as both sampled and readonly depth/stencil attachment in
+        // the same pass
+        {
+            passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test using multiple writable usages on the same texture in a single pass/dispatch
+    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleWriteUsage) {
+        // Test render pass
+        {
+            // Create a texture
+            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding |
+                                                  wgpu::TextureUsage::RenderAttachment);
+            wgpu::TextureView view = texture.CreateView();
+
+            // Create a bind group to use the texture as writeonly storage binding
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+            // It is invalid to use the texture as both writeonly storage and render target in
+            // the same pass
+            {
+                utils::ComboRenderPassDescriptor renderPass({view});
+
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+                pass.SetBindGroup(0, bg);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+
+            // It is valid to use multiple writeonly storage usages on the same texture in render
+            // pass
+            {
+                wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                DummyRenderPass dummyRenderPass(device);
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+                pass.SetBindGroup(0, bg);
+                pass.SetBindGroup(1, bg1);
+                pass.End();
+                encoder.Finish();
+            }
+        }
+
+        // Test compute pass
+        {
+            // Create a texture
+            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding);
+            wgpu::TextureView view = texture.CreateView();
+
+            // Create a bind group to use the texture as sampled and writeonly bindings
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat},
+                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+            // Create a no-op compute pipeline
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            // It is valid to use the texture as multiple writeonly storage usages in a single
+            // dispatch
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.Dispatch(1);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that a single subresource of a texture cannot be used as a render attachment more than
+    // once in the same pass.
+    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleRenderAttachmentUsage) {
+        // Create a texture with two array layers
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size = {1, 1, 2};
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        descriptor.format = kFormat;
+
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor viewDesc = {};
+        viewDesc.arrayLayerCount = 1;
+
+        wgpu::TextureView viewLayer0 = texture.CreateView(&viewDesc);
+
+        viewDesc.baseArrayLayer = 1;
+        wgpu::TextureView viewLayer1 = texture.CreateView(&viewDesc);
+
+        // Control: It is valid to use layer0 as a render target for one attachment, and
+        // layer1 as the second attachment in the same pass
+        {
+            utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer1});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Control: It is valid to use layer0 as a render target in separate passes.
+        {
+            utils::ComboRenderPassDescriptor renderPass({viewLayer0});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass);
+            pass0.End();
+            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass);
+            pass1.End();
+            encoder.Finish();
+        }
+
+        // It is invalid to use layer0 as a render target for both attachments in the same pass
+        {
+            utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer0});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // It is invalid to use layer1 as a render target for both attachments in the same pass
+        {
+            utils::ComboRenderPassDescriptor renderPass({viewLayer1, viewLayer1});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that using the same texture as both readable and writable in different passes is
+    // allowed
+    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInDifferentPasses) {
+        // Test render pass
+        {
+            // Create textures that will be used as both a sampled texture and a render target
+            wgpu::Texture t0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                             wgpu::TextureUsage::RenderAttachment);
+            wgpu::TextureView v0 = t0.CreateView();
+            wgpu::Texture t1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                             wgpu::TextureUsage::RenderAttachment);
+            wgpu::TextureView v1 = t1.CreateView();
+
+            // Create bind groups to use the texture as sampled
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, v0}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, v1}});
+
+            // Create render passes that will use the textures as render attachments
+            utils::ComboRenderPassDescriptor renderPass0({v1});
+            utils::ComboRenderPassDescriptor renderPass1({v0});
+
+            // Use the textures as both sampled and render attachments in different passes
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass0);
+            pass0.SetBindGroup(0, bg0);
+            pass0.End();
+
+            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass1);
+            pass1.SetBindGroup(0, bg1);
+            pass1.End();
+
+            encoder.Finish();
+        }
+
+        // Test compute pass
+        {
+            // Create a texture that will be used storage texture
+            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                                  wgpu::TextureUsage::StorageBinding);
+            wgpu::TextureView view = texture.CreateView();
+
+            // Create bind groups to use the texture as sampled and writeonly bindings
+            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+            // Use the textures as both sampled and writeonly storages in different passes
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+            pass0.SetBindGroup(0, readBG);
+            pass0.End();
+
+            wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
+            pass1.SetBindGroup(0, writeBG);
+            pass1.End();
+
+            encoder.Finish();
+        }
+
+        // Test compute pass and render pass mixed together with resource dependency
+        {
+            // Create a texture that will be used a storage texture
+            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                                  wgpu::TextureUsage::StorageBinding);
+            wgpu::TextureView view = texture.CreateView();
+
+            // Create bind groups to use the texture as sampled and writeonly bindings
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+
+            // Use the texture as writeonly and sampled storage in compute pass and render
+            // pass respectively
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+            pass0.SetBindGroup(0, writeBG);
+            pass0.End();
+
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass);
+            pass1.SetBindGroup(0, readBG);
+            pass1.End();
+
+            encoder.Finish();
+        }
+    }
+
+    // Test that it is invalid to use the same texture as both readable and writable in different
+    // draws in a single render pass. But it is valid in different dispatches in a single compute
+    // pass.
+    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageOnDifferentDrawsOrDispatches) {
+        // Create a texture that will be used both as a sampled texture and a storage texture
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Test render pass
+        {
+            // Create bind groups to use the texture as sampled and writeonly storage bindings
+            wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+            // Create a no-op render pipeline.
+            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+            // It is not allowed to use the same texture as both readable and writable in different
+            // draws within the same render pass.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetPipeline(rp);
+
+            pass.SetBindGroup(0, sampledBG);
+            pass.Draw(3);
+
+            pass.SetBindGroup(0, writeBG);
+            pass.Draw(3);
+
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass
+        {
+            // Create bind groups to use the texture as sampled and writeonly storage bindings
+            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
+            wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
+
+            // It is valid to use the same texture as both readable and writable in different
+            // dispatches within the same compute pass.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+            pass.SetPipeline(readCp);
+            pass.SetBindGroup(0, readBG);
+            pass.Dispatch(1);
+
+            pass.SetPipeline(writeCp);
+            pass.SetBindGroup(0, writeBG);
+            pass.Dispatch(1);
+
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that it is invalid to use the same texture as both readable and writable in a single
+    // draw or dispatch.
+    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInSingleDrawOrDispatch) {
+        // Create a texture that will be used both as a sampled texture and a storage texture
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Test render pass
+        {
+            // Create the bind group to use the texture as sampled and writeonly storage bindings
+            wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+            // Create a no-op render pipeline.
+            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+            // It is invalid to use the same texture as both readable and writable usages in a
+            // single draw
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetPipeline(rp);
+
+            pass.SetBindGroup(0, sampledBG);
+            pass.SetBindGroup(1, writeBG);
+            pass.Draw(3);
+
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass
+        {
+            // Create the bind group to use the texture as sampled and writeonly storage bindings
+            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
+
+            // It is invalid to use the same texture as both readable and writable usages in a
+            // single dispatch
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+
+            pass.SetBindGroup(0, readBG);
+            pass.SetBindGroup(1, writeBG);
+            pass.Dispatch(1);
+
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that using a single texture as copy src/dst and writable/readable usage in pass is
+    // allowed.
+    TEST_F(ResourceUsageTrackingTest, TextureCopyAndTextureUsageInPass) {
+        // Create textures that will be used as both a sampled texture and a render target
+        wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::CopySrc);
+        wgpu::Texture texture1 =
+            CreateTexture(wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                          wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view0 = texture0.CreateView();
+        wgpu::TextureView view1 = texture1.CreateView();
+
+        wgpu::ImageCopyTexture srcView = utils::CreateImageCopyTexture(texture0, 0, {0, 0, 0});
+        wgpu::ImageCopyTexture dstView = utils::CreateImageCopyTexture(texture1, 0, {0, 0, 0});
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        // Use the texture as both copy dst and render attachment in render pass
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
+            utils::ComboRenderPassDescriptor renderPass({view1});
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Use the texture as both copy dst and readable usage in compute pass
+        {
+            // Create the bind group to use the texture as sampled
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view1}});
+
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, bg);
+            pass.SetPipeline(cp);
+            pass.Dispatch(1);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
+    // used because they are overwritten by a consecutive call.
+    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleSetBindGroupsOnSameIndex) {
+        // Test render pass
+        {
+            // Create textures that will be used as both a sampled texture and a render target
+            wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                                   wgpu::TextureUsage::RenderAttachment);
+            wgpu::TextureView view0 = texture0.CreateView();
+            wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                                   wgpu::TextureUsage::RenderAttachment);
+            wgpu::TextureView view1 = texture1.CreateView();
+
+            // Create the bind group to use the texture as sampled
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, view0}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view1}});
+
+            // Create the render pass that will use the texture as an render attachment
+            utils::ComboRenderPassDescriptor renderPass({view0});
+
+            // Set bind group on the same index twice. The second one overwrites the first one.
+            // No texture is used as both sampled and render attachment in the same pass. But the
+            // overwritten texture still take effect during resource tracking.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+                pass.SetBindGroup(0, bg0);
+                pass.SetBindGroup(0, bg1);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+
+            // Set bind group on the same index twice. The second one overwrites the first one.
+            // texture0 is used as both sampled and render attachment in the same pass
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+                pass.SetBindGroup(0, bg1);
+                pass.SetBindGroup(0, bg0);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+
+        // Test compute pass
+        {
+            // Create a texture that will be used both as storage texture
+            wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                                   wgpu::TextureUsage::StorageBinding);
+            wgpu::TextureView view0 = texture0.CreateView();
+            wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding);
+            wgpu::TextureView view1 = texture1.CreateView();
+
+            // Create the bind group to use the texture as sampled and writeonly bindings
+            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+
+            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+
+            wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, view0}});
+            wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, view0}});
+            wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, view1}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
+
+            // Set bind group on the same index twice. The second one overwrites the first one.
+            // No texture is used as both sampled and writeonly storage in the same dispatch so
+            // there are no errors.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetBindGroup(0, writeBG0);
+                pass.SetBindGroup(1, readBG0);
+                pass.SetBindGroup(1, readBG1);
+                pass.SetPipeline(cp);
+                pass.Dispatch(1);
+                pass.End();
+                encoder.Finish();
+            }
+
+            // Set bind group on the same index twice. The second one overwrites the first one.
+            // texture0 is used as both writeonly and sampled storage in the same dispatch, which
+            // is an error.
+            {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+                pass.SetBindGroup(0, writeBG0);
+                pass.SetBindGroup(1, readBG1);
+                pass.SetBindGroup(1, readBG0);
+                pass.SetPipeline(cp);
+                pass.Dispatch(1);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+        }
+    }
+
+    // Test that it is invalid to have resource usage conflicts even when all bindings are not
+    // visible to the programmable pass where it is used.
+    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictBetweenInvisibleStagesInBindGroup) {
+        // Create texture and texture view
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Test render pass for bind group. The conflict of sampled storage and writeonly storage
+        // usage doesn't reside in render related stages at all
+        {
+            // Create a bind group whose bindings are not visible in render pass
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
+                 {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+            // These two bindings are invisible in render pass. But we still track these bindings.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass for bind group. The conflict of sampled storage and writeonly storage
+        // usage doesn't reside in compute related stage at all
+        {
+            // Create a bind group whose bindings are not visible in compute pass
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
+                 {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            // These two bindings are invisible in compute pass. But we still track these bindings.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.Dispatch(1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+    // visible to the programmable pass where it is used.
+    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithInvisibleStageInBindGroup) {
+        // Create texture and texture view
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding |
+                          wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Test render pass
+        {
+            // Create the render pass that will use the texture as an render attachment
+            utils::ComboRenderPassDescriptor renderPass({view});
+
+            // Create a bind group which use the texture as sampled storage in compute stage
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+            // Texture usage in compute stage in bind group conflicts with render target. And
+            // binding for compute stage is not visible in render pass. But we still track this
+            // binding.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetBindGroup(0, bg);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass
+        {
+            // Create a bind group which contains both fragment and compute stages
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
+                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+            // Create a no-op compute pipeline.
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+            // Texture usage in compute stage conflicts with texture usage in fragment stage. And
+            // binding for fragment stage is not visible in compute pass. But we still track this
+            // invisible binding.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.Dispatch(1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+    // used in the pipeline.
+    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithUnusedPipelineBindings) {
+        // Create texture and texture view
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create bind groups.
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                      wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                      wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Test render pass
+        {
+            // Create a passthrough render pipeline with a sampled storage texture
+            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+                @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                    return vec4<f32>();
+                })");
+
+            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+                @group(0) @binding(0) var tex : texture_2d<f32>;
+                @stage(fragment) fn main() {
+                })");
+            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+            pipelineDescriptor.vertex.module = vsModule;
+            pipelineDescriptor.cFragment.module = fsModule;
+            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &readBGL);
+            wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
+
+            // Texture binding in readBG conflicts with texture binding in writeBG. The binding
+            // in writeBG is not used in pipeline. But we still track this binding.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetBindGroup(0, readBG);
+            pass.SetBindGroup(1, writeBG);
+            pass.SetPipeline(rp);
+            pass.Draw(3);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test compute pass
+        {
+            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL});
+
+            // Texture binding in readBG conflicts with texture binding in writeBG. The binding
+            // in writeBG is not used in pipeline's layout so it isn't an error.
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, readBG);
+            pass.SetBindGroup(1, writeBG);
+            pass.SetPipeline(cp);
+            pass.Dispatch(1);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    // Test that using an indirect buffer is disallowed with a writable usage (like storage) but
+    // allowed with a readable usage (like readonly storage).
+    TEST_F(ResourceUsageTrackingTest, IndirectBufferWithReadOrWriteStorage) {
+        wgpu::Buffer buffer =
+            CreateBuffer(20, wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+        // Test pipelines
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+        wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
+        wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
+
+        // Test that indirect + readonly is allowed in the same render pass.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetPipeline(rp);
+            pass.SetBindGroup(0, readBG);
+            pass.DrawIndirect(buffer, 0);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Test that indirect + writable is disallowed in the same render pass.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            DummyRenderPass dummyRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
+            pass.SetPipeline(rp);
+            pass.SetBindGroup(0, writeBG);
+            pass.DrawIndirect(buffer, 0);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Test that indirect + readonly is allowed in the same dispatch
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(readCp);
+            pass.SetBindGroup(0, readBG);
+            pass.DispatchIndirect(buffer, 0);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Test that indirect + writable is disallowed in the same dispatch
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(writeCp);
+            pass.SetBindGroup(0, writeBG);
+            pass.DispatchIndirect(buffer, 0);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // TODO (yunchao.he@intel.com):
+    //
+    //	* Add tests for multiple encoders upon the same resource simultaneously. This situation fits
+    //	some cases like VR, multi-threading, etc.
+    //
+    //	* Add tests for bundle
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp b/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
new file mode 100644
index 0000000..dbf3ebc
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
@@ -0,0 +1,124 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <cmath>
+
+namespace {
+
+    class SamplerValidationTest : public ValidationTest {};
+
+    // Test NaN and INFINITY values are not allowed
+    TEST_F(SamplerValidationTest, InvalidLOD) {
+        { device.CreateSampler(); }
+        {
+            wgpu::SamplerDescriptor samplerDesc;
+            samplerDesc.lodMinClamp = NAN;
+            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc;
+            samplerDesc.lodMaxClamp = NAN;
+            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc;
+            samplerDesc.lodMaxClamp = INFINITY;
+            device.CreateSampler(&samplerDesc);
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc;
+            samplerDesc.lodMaxClamp = INFINITY;
+            samplerDesc.lodMinClamp = INFINITY;
+            device.CreateSampler(&samplerDesc);
+        }
+    }
+
+    TEST_F(SamplerValidationTest, InvalidFilterAnisotropic) {
+        wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
+        kValidAnisoSamplerDesc.maxAnisotropy = 2;
+        kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
+        kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
+        kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
+        {
+            // when maxAnisotropy > 1, min, mag, mipmap filter should be linear
+            device.CreateSampler(&kValidAnisoSamplerDesc);
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.maxAnisotropy = 0;
+            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.minFilter = wgpu::FilterMode::Nearest;
+            samplerDesc.magFilter = wgpu::FilterMode::Nearest;
+            samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
+            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.minFilter = wgpu::FilterMode::Nearest;
+            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.magFilter = wgpu::FilterMode::Nearest;
+            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
+            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+        }
+    }
+
+    TEST_F(SamplerValidationTest, ValidFilterAnisotropic) {
+        wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
+        kValidAnisoSamplerDesc.maxAnisotropy = 2;
+        kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
+        kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
+        kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
+        { device.CreateSampler(); }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.maxAnisotropy = 16;
+            device.CreateSampler(&samplerDesc);
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.maxAnisotropy = 32;
+            device.CreateSampler(&samplerDesc);
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.maxAnisotropy = 0x7FFF;
+            device.CreateSampler(&samplerDesc);
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.maxAnisotropy = 0x8000;
+            device.CreateSampler(&samplerDesc);
+        }
+        {
+            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+            samplerDesc.maxAnisotropy = 0xFFFF;
+            device.CreateSampler(&samplerDesc);
+        }
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp b/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
new file mode 100644
index 0000000..2cf4497
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
@@ -0,0 +1,661 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/ShaderModule.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <sstream>
+
+class ShaderModuleValidationTest : public ValidationTest {};
+
+// Test case with a simpler shader that should successfully be created
+TEST_F(ShaderModuleValidationTest, CreationSuccess) {
+    const char* shader = R"(
+                   OpCapability Shader
+              %1 = OpExtInstImport "GLSL.std.450"
+                   OpMemoryModel Logical GLSL450
+                   OpEntryPoint Fragment %main "main" %fragColor
+                   OpExecutionMode %main OriginUpperLeft
+                   OpSource GLSL 450
+                   OpSourceExtension "GL_GOOGLE_cpp_style_line_directive"
+                   OpSourceExtension "GL_GOOGLE_include_directive"
+                   OpName %main "main"
+                   OpName %fragColor "fragColor"
+                   OpDecorate %fragColor Location 0
+           %void = OpTypeVoid
+              %3 = OpTypeFunction %void
+          %float = OpTypeFloat 32
+        %v4float = OpTypeVector %float 4
+    %_ptr_Output_v4float = OpTypePointer Output %v4float
+      %fragColor = OpVariable %_ptr_Output_v4float Output
+        %float_1 = OpConstant %float 1
+        %float_0 = OpConstant %float 0
+             %12 = OpConstantComposite %v4float %float_1 %float_0 %float_0 %float_1
+           %main = OpFunction %void None %3
+              %5 = OpLabel
+                   OpStore %fragColor %12
+                   OpReturn
+                   OpFunctionEnd)";
+
+    utils::CreateShaderModuleFromASM(device, shader);
+}
+
+// Tests that if the output location exceeds kMaxColorAttachments the fragment shader will fail to
+// be compiled.
+TEST_F(ShaderModuleValidationTest, FragmentOutputLocationExceedsMaxColorAttachments) {
+    std::ostringstream stream;
+    stream << "@stage(fragment) fn main() -> @location(" << kMaxColorAttachments << R"() vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })";
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, stream.str().c_str()));
+}
+
+// Test that it is invalid to create a shader module with no chained descriptor. (It must be
+// WGSL or SPIRV, not empty)
+TEST_F(ShaderModuleValidationTest, NoChainedDescriptor) {
+    wgpu::ShaderModuleDescriptor desc = {};
+    ASSERT_DEVICE_ERROR(device.CreateShaderModule(&desc));
+}
+
+// Test that it is not allowed to use combined texture and sampler.
+TEST_F(ShaderModuleValidationTest, CombinedTextureAndSampler) {
+    // SPIR-V ASM produced by glslang for the following fragment shader:
+    //
+    //   #version 450
+    //   layout(set = 0, binding = 0) uniform sampler2D tex;
+    //   void main () {}
+    //
+    // Note that the following defines an interface combined texture/sampler which is not allowed
+    // in Dawn / WebGPU.
+    //
+    //   %8 = OpTypeSampledImage %7
+    //   %_ptr_UniformConstant_8 = OpTypePointer UniformConstant %8
+    //   %tex = OpVariable %_ptr_UniformConstant_8 UniformConstant
+    const char* shader = R"(
+               OpCapability Shader
+          %1 = OpExtInstImport "GLSL.std.450"
+               OpMemoryModel Logical GLSL450
+               OpEntryPoint Fragment %main "main"
+               OpExecutionMode %main OriginUpperLeft
+               OpSource GLSL 450
+               OpName %main "main"
+               OpName %tex "tex"
+               OpDecorate %tex DescriptorSet 0
+               OpDecorate %tex Binding 0
+       %void = OpTypeVoid
+          %3 = OpTypeFunction %void
+      %float = OpTypeFloat 32
+          %7 = OpTypeImage %float 2D 0 0 0 1 Unknown
+          %8 = OpTypeSampledImage %7
+%_ptr_UniformConstant_8 = OpTypePointer UniformConstant %8
+        %tex = OpVariable %_ptr_UniformConstant_8 UniformConstant
+       %main = OpFunction %void None %3
+          %5 = OpLabel
+               OpReturn
+               OpFunctionEnd
+        )";
+
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModuleFromASM(device, shader));
+}
+
+// Test that it is not allowed to declare a multisampled-array interface texture.
+// TODO(enga): Also test multisampled cube, cube array, and 3D. These have no GLSL keywords.
+TEST_F(ShaderModuleValidationTest, MultisampledArrayTexture) {
+    // SPIR-V ASM produced by glslang for the following fragment shader:
+    //
+    //  #version 450
+    //  layout(set=0, binding=0) uniform texture2DMSArray tex;
+    //  void main () {}}
+    //
+    // Note that the following defines an interface array multisampled texture which is not allowed
+    // in Dawn / WebGPU.
+    //
+    //  %7 = OpTypeImage %float 2D 0 1 1 1 Unknown
+    //  %_ptr_UniformConstant_7 = OpTypePointer UniformConstant %7
+    //  %tex = OpVariable %_ptr_UniformConstant_7 UniformConstant
+    const char* shader = R"(
+               OpCapability Shader
+          %1 = OpExtInstImport "GLSL.std.450"
+               OpMemoryModel Logical GLSL450
+               OpEntryPoint Fragment %main "main"
+               OpExecutionMode %main OriginUpperLeft
+               OpSource GLSL 450
+               OpName %main "main"
+               OpName %tex "tex"
+               OpDecorate %tex DescriptorSet 0
+               OpDecorate %tex Binding 0
+       %void = OpTypeVoid
+          %3 = OpTypeFunction %void
+      %float = OpTypeFloat 32
+          %7 = OpTypeImage %float 2D 0 1 1 1 Unknown
+%_ptr_UniformConstant_7 = OpTypePointer UniformConstant %7
+        %tex = OpVariable %_ptr_UniformConstant_7 UniformConstant
+       %main = OpFunction %void None %3
+          %5 = OpLabel
+               OpReturn
+               OpFunctionEnd
+        )";
+
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModuleFromASM(device, shader));
+}
+
+// Tests that shader module compilation messages can be queried.
+TEST_F(ShaderModuleValidationTest, GetCompilationMessages) {
+    // This test works assuming ShaderModule is backed by a dawn::native::ShaderModuleBase, which
+    // is not the case on the wire.
+    DAWN_SKIP_TEST_IF(UsesWire());
+
+    wgpu::ShaderModule shaderModule = utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+        })");
+
+    dawn::native::ShaderModuleBase* shaderModuleBase = dawn::native::FromAPI(shaderModule.Get());
+    dawn::native::OwnedCompilationMessages* messages = shaderModuleBase->GetCompilationMessages();
+    messages->ClearMessages();
+    messages->AddMessageForTesting("Info Message");
+    messages->AddMessageForTesting("Warning Message", wgpu::CompilationMessageType::Warning);
+    messages->AddMessageForTesting("Error Message", wgpu::CompilationMessageType::Error, 3, 4);
+    messages->AddMessageForTesting("Complete Message", wgpu::CompilationMessageType::Info, 3, 4, 5,
+                                   6);
+
+    auto callback = [](WGPUCompilationInfoRequestStatus status, const WGPUCompilationInfo* info,
+                       void* userdata) {
+        ASSERT_EQ(WGPUCompilationInfoRequestStatus_Success, status);
+        ASSERT_NE(nullptr, info);
+        ASSERT_EQ(4u, info->messageCount);
+
+        const WGPUCompilationMessage* message = &info->messages[0];
+        ASSERT_STREQ("Info Message", message->message);
+        ASSERT_EQ(WGPUCompilationMessageType_Info, message->type);
+        ASSERT_EQ(0u, message->lineNum);
+        ASSERT_EQ(0u, message->linePos);
+
+        message = &info->messages[1];
+        ASSERT_STREQ("Warning Message", message->message);
+        ASSERT_EQ(WGPUCompilationMessageType_Warning, message->type);
+        ASSERT_EQ(0u, message->lineNum);
+        ASSERT_EQ(0u, message->linePos);
+
+        message = &info->messages[2];
+        ASSERT_STREQ("Error Message", message->message);
+        ASSERT_EQ(WGPUCompilationMessageType_Error, message->type);
+        ASSERT_EQ(3u, message->lineNum);
+        ASSERT_EQ(4u, message->linePos);
+
+        message = &info->messages[3];
+        ASSERT_STREQ("Complete Message", message->message);
+        ASSERT_EQ(WGPUCompilationMessageType_Info, message->type);
+        ASSERT_EQ(3u, message->lineNum);
+        ASSERT_EQ(4u, message->linePos);
+        ASSERT_EQ(5u, message->offset);
+        ASSERT_EQ(6u, message->length);
+    };
+
+    shaderModule.GetCompilationInfo(callback, nullptr);
+}
+
+// Validate the maximum location of effective inter-stage variables cannot be greater than 14
+// (kMaxInterStageShaderComponents / 4 - 1).
+TEST_F(ShaderModuleValidationTest, MaximumShaderIOLocations) {
+    auto CheckTestPipeline = [&](bool success, uint32_t maximumOutputLocation,
+                                 wgpu::ShaderStage failingShaderStage) {
+        // Build the ShaderIO struct containing variables up to maximumOutputLocation.
+        std::ostringstream stream;
+        stream << "struct ShaderIO {" << std::endl;
+        for (uint32_t location = 1; location <= maximumOutputLocation; ++location) {
+            stream << "@location(" << location << ") var" << location << ": f32," << std::endl;
+        }
+
+        if (failingShaderStage == wgpu::ShaderStage::Vertex) {
+            stream << " @builtin(position) pos: vec4<f32>,";
+        }
+        stream << "}\n";
+
+        std::string ioStruct = stream.str();
+
+        // Build the test pipeline. Note that it's not possible with just ASSERT_DEVICE_ERROR
+        // whether it is the vertex or fragment shader that fails. So instead we will look for the
+        // string "failingVertex" or "failingFragment" in the error message.
+        utils::ComboRenderPipelineDescriptor pDesc;
+        pDesc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        const char* errorMatcher = nullptr;
+        switch (failingShaderStage) {
+            case wgpu::ShaderStage::Vertex: {
+                errorMatcher = "failingVertex";
+                pDesc.vertex.entryPoint = "failingVertex";
+                pDesc.vertex.module = utils::CreateShaderModule(device, (ioStruct + R"(
+                    @stage(vertex) fn failingVertex() -> ShaderIO {
+                        var shaderIO : ShaderIO;
+                        shaderIO.pos = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                        return shaderIO;
+                     }
+                )")
+                                                                            .c_str());
+                pDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+                    @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                        return vec4<f32>(0.0);
+                    }
+                )");
+                break;
+            }
+
+            case wgpu::ShaderStage::Fragment: {
+                errorMatcher = "failingFragment";
+                pDesc.cFragment.entryPoint = "failingFragment";
+                pDesc.cFragment.module = utils::CreateShaderModule(device, (ioStruct + R"(
+                    @stage(fragment) fn failingFragment(io : ShaderIO) -> @location(0) vec4<f32> {
+                        return vec4<f32>(0.0);
+                     }
+                )")
+                                                                               .c_str());
+                pDesc.vertex.module = utils::CreateShaderModule(device, R"(
+                    @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                        return vec4<f32>(0.0);
+                    }
+                )");
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+        }
+
+        if (success) {
+            ASSERT_DEVICE_ERROR(
+                device.CreateRenderPipeline(&pDesc),
+                testing::HasSubstr(
+                    "One or more fragment inputs and vertex outputs are not one-to-one matching"));
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&pDesc),
+                                testing::HasSubstr(errorMatcher));
+        }
+    };
+
+    constexpr uint32_t kMaxInterShaderIOLocation = kMaxInterStageShaderComponents / 4 - 1;
+
+    // It is allowed to create a shader module with the maximum active vertex output location == 14;
+    CheckTestPipeline(true, kMaxInterShaderIOLocation, wgpu::ShaderStage::Vertex);
+
+    // It isn't allowed to create a shader module with the maximum active vertex output location >
+    // 14;
+    CheckTestPipeline(false, kMaxInterShaderIOLocation + 1, wgpu::ShaderStage::Vertex);
+
+    // It is allowed to create a shader module with the maximum active fragment input location ==
+    // 14;
+    CheckTestPipeline(true, kMaxInterShaderIOLocation, wgpu::ShaderStage::Fragment);
+
+    // It is allowed to create a shader module with the maximum active vertex output location > 14;
+    CheckTestPipeline(false, kMaxInterShaderIOLocation + 1, wgpu::ShaderStage::Fragment);
+}
+
+// Validate the maximum number of total inter-stage user-defined variable component count and
+// built-in variables cannot exceed kMaxInterStageShaderComponents.
+TEST_F(ShaderModuleValidationTest, MaximumInterStageShaderComponents) {
+    auto CheckTestPipeline = [&](bool success,
+                                 uint32_t totalUserDefinedInterStageShaderComponentCount,
+                                 wgpu::ShaderStage failingShaderStage,
+                                 const char* extraBuiltInDeclarations = "") {
+        // Build the ShaderIO struct containing totalUserDefinedInterStageShaderComponentCount
+        // components. Components are added in two parts, a bunch of vec4s, then one additional
+        // variable for the remaining components.
+        std::ostringstream stream;
+        stream << "struct ShaderIO {" << std::endl << extraBuiltInDeclarations << std::endl;
+        uint32_t vec4InputLocations = totalUserDefinedInterStageShaderComponentCount / 4;
+
+        for (uint32_t location = 0; location < vec4InputLocations; ++location) {
+            stream << "@location(" << location << ") var" << location << ": vec4<f32>,"
+                   << std::endl;
+        }
+
+        uint32_t lastComponentCount = totalUserDefinedInterStageShaderComponentCount % 4;
+        if (lastComponentCount > 0) {
+            stream << "@location(" << vec4InputLocations << ") var" << vec4InputLocations << ": ";
+            if (lastComponentCount == 1) {
+                stream << "f32,";
+            } else {
+                stream << " vec" << lastComponentCount << "<f32>,";
+            }
+            stream << std::endl;
+        }
+
+        if (failingShaderStage == wgpu::ShaderStage::Vertex) {
+            stream << " @builtin(position) pos: vec4<f32>,";
+        }
+        stream << "}\n";
+
+        std::string ioStruct = stream.str();
+
+        // Build the test pipeline. Note that it's not possible with just ASSERT_DEVICE_ERROR
+        // whether it is the vertex or fragment shader that fails. So instead we will look for the
+        // string "failingVertex" or "failingFragment" in the error message.
+        utils::ComboRenderPipelineDescriptor pDesc;
+        pDesc.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        const char* errorMatcher = nullptr;
+        switch (failingShaderStage) {
+            case wgpu::ShaderStage::Vertex: {
+                errorMatcher = "failingVertex";
+                pDesc.vertex.entryPoint = "failingVertex";
+                pDesc.vertex.module = utils::CreateShaderModule(device, (ioStruct + R"(
+                    @stage(vertex) fn failingVertex() -> ShaderIO {
+                        var shaderIO : ShaderIO;
+                        shaderIO.pos = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+                        return shaderIO;
+                     }
+                )")
+                                                                            .c_str());
+                pDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+                    @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                        return vec4<f32>(0.0);
+                    }
+                )");
+                break;
+            }
+
+            case wgpu::ShaderStage::Fragment: {
+                errorMatcher = "failingFragment";
+                pDesc.cFragment.entryPoint = "failingFragment";
+                pDesc.cFragment.module = utils::CreateShaderModule(device, (ioStruct + R"(
+                    @stage(fragment) fn failingFragment(io : ShaderIO) -> @location(0) vec4<f32> {
+                        return vec4<f32>(0.0);
+                     }
+                )")
+                                                                               .c_str());
+                pDesc.vertex.module = utils::CreateShaderModule(device, R"(
+                    @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                        return vec4<f32>(0.0);
+                    }
+                )");
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+        }
+
+        if (success) {
+            ASSERT_DEVICE_ERROR(
+                device.CreateRenderPipeline(&pDesc),
+                testing::HasSubstr(
+                    "One or more fragment inputs and vertex outputs are not one-to-one matching"));
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&pDesc),
+                                testing::HasSubstr(errorMatcher));
+        }
+    };
+
+    // Verify when there is no input builtin variable in a fragment shader, the total user-defined
+    // input component count must be less than kMaxInterStageShaderComponents.
+    {
+        CheckTestPipeline(true, kMaxInterStageShaderComponents, wgpu::ShaderStage::Fragment);
+        CheckTestPipeline(false, kMaxInterStageShaderComponents + 1, wgpu::ShaderStage::Fragment);
+    }
+
+    // @builtin(position) should be counted into the maximum inter-stage component count.
+    // Note that in vertex shader we always have @position so we don't need to specify it
+    // again in the parameter "builtInDeclarations" of generateShaderForTest().
+    {
+        CheckTestPipeline(true, kMaxInterStageShaderComponents - 4, wgpu::ShaderStage::Vertex);
+        CheckTestPipeline(false, kMaxInterStageShaderComponents - 3, wgpu::ShaderStage::Vertex);
+    }
+
+    // @builtin(position) in fragment shaders should be counted into the maximum inter-stage
+    // component count.
+    {
+        CheckTestPipeline(true, kMaxInterStageShaderComponents - 4, wgpu::ShaderStage::Fragment,
+                          "@builtin(position) fragCoord : vec4<f32>,");
+        CheckTestPipeline(false, kMaxInterStageShaderComponents - 3, wgpu::ShaderStage::Fragment,
+                          "@builtin(position) fragCoord : vec4<f32>,");
+    }
+
+    // @builtin(front_facing) should be counted into the maximum inter-stage component count.
+    {
+        CheckTestPipeline(true, kMaxInterStageShaderComponents - 1, wgpu::ShaderStage::Fragment,
+                          "@builtin(front_facing) frontFacing : bool,");
+        CheckTestPipeline(false, kMaxInterStageShaderComponents, wgpu::ShaderStage::Fragment,
+                          "@builtin(front_facing) frontFacing : bool,");
+    }
+
+    // @builtin(sample_index) should be counted into the maximum inter-stage component count.
+    {
+        CheckTestPipeline(true, kMaxInterStageShaderComponents - 1, wgpu::ShaderStage::Fragment,
+                          "@builtin(sample_index) sampleIndex : u32,");
+        CheckTestPipeline(false, kMaxInterStageShaderComponents, wgpu::ShaderStage::Fragment,
+                          "@builtin(sample_index) sampleIndex : u32,");
+    }
+
+    // @builtin(sample_mask) should be counted into the maximum inter-stage component count.
+    {
+        CheckTestPipeline(true, kMaxInterStageShaderComponents - 1, wgpu::ShaderStage::Fragment,
+                          "@builtin(sample_mask) sampleMask : u32,");
+        CheckTestPipeline(false, kMaxInterStageShaderComponents, wgpu::ShaderStage::Fragment,
+                          "@builtin(sample_mask) sampleMask : u32,");
+    }
+}
+
+// Tests that we validate workgroup size limits.
+TEST_F(ShaderModuleValidationTest, ComputeWorkgroupSizeLimits) {
+    auto CheckShaderWithWorkgroupSize = [this](bool success, uint32_t x, uint32_t y, uint32_t z) {
+        std::ostringstream ss;
+        ss << "@stage(compute) @workgroup_size(" << x << "," << y << "," << z << ") fn main() {}";
+
+        wgpu::ComputePipelineDescriptor desc;
+        desc.compute.entryPoint = "main";
+        desc.compute.module = utils::CreateShaderModule(device, ss.str().c_str());
+
+        if (success) {
+            device.CreateComputePipeline(&desc);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&desc));
+        }
+    };
+
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    CheckShaderWithWorkgroupSize(true, 1, 1, 1);
+    CheckShaderWithWorkgroupSize(true, supportedLimits.maxComputeWorkgroupSizeX, 1, 1);
+    CheckShaderWithWorkgroupSize(true, 1, supportedLimits.maxComputeWorkgroupSizeY, 1);
+    CheckShaderWithWorkgroupSize(true, 1, 1, supportedLimits.maxComputeWorkgroupSizeZ);
+
+    CheckShaderWithWorkgroupSize(false, supportedLimits.maxComputeWorkgroupSizeX + 1, 1, 1);
+    CheckShaderWithWorkgroupSize(false, 1, supportedLimits.maxComputeWorkgroupSizeY + 1, 1);
+    CheckShaderWithWorkgroupSize(false, 1, 1, supportedLimits.maxComputeWorkgroupSizeZ + 1);
+
+    // No individual dimension exceeds its limit, but the combined size should definitely exceed the
+    // total invocation limit.
+    CheckShaderWithWorkgroupSize(false, supportedLimits.maxComputeWorkgroupSizeX,
+                                 supportedLimits.maxComputeWorkgroupSizeY,
+                                 supportedLimits.maxComputeWorkgroupSizeZ);
+}
+
+// Tests that we validate workgroup storage size limits.
+TEST_F(ShaderModuleValidationTest, ComputeWorkgroupStorageSizeLimits) {
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    constexpr uint32_t kVec4Size = 16;
+    const uint32_t maxVec4Count = supportedLimits.maxComputeWorkgroupStorageSize / kVec4Size;
+    constexpr uint32_t kMat4Size = 64;
+    const uint32_t maxMat4Count = supportedLimits.maxComputeWorkgroupStorageSize / kMat4Size;
+
+    auto CheckPipelineWithWorkgroupStorage = [this](bool success, uint32_t vec4_count,
+                                                    uint32_t mat4_count) {
+        std::ostringstream ss;
+        std::ostringstream body;
+        if (vec4_count > 0) {
+            ss << "var<workgroup> vec4_data: array<vec4<f32>, " << vec4_count << ">;";
+            body << "_ = vec4_data;";
+        }
+        if (mat4_count > 0) {
+            ss << "var<workgroup> mat4_data: array<mat4x4<f32>, " << mat4_count << ">;";
+            body << "_ = mat4_data;";
+        }
+        ss << "@stage(compute) @workgroup_size(1) fn main() { " << body.str() << " }";
+
+        wgpu::ComputePipelineDescriptor desc;
+        desc.compute.entryPoint = "main";
+        desc.compute.module = utils::CreateShaderModule(device, ss.str().c_str());
+
+        if (success) {
+            device.CreateComputePipeline(&desc);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&desc));
+        }
+    };
+
+    CheckPipelineWithWorkgroupStorage(true, 1, 1);
+    CheckPipelineWithWorkgroupStorage(true, maxVec4Count, 0);
+    CheckPipelineWithWorkgroupStorage(true, 0, maxMat4Count);
+    CheckPipelineWithWorkgroupStorage(true, maxVec4Count - 4, 1);
+    CheckPipelineWithWorkgroupStorage(true, 4, maxMat4Count - 1);
+
+    CheckPipelineWithWorkgroupStorage(false, maxVec4Count + 1, 0);
+    CheckPipelineWithWorkgroupStorage(false, maxVec4Count - 3, 1);
+    CheckPipelineWithWorkgroupStorage(false, 0, maxMat4Count + 1);
+    CheckPipelineWithWorkgroupStorage(false, 4, maxMat4Count);
+}
+
+// Test that numeric ID must be unique
+TEST_F(ShaderModuleValidationTest, OverridableConstantsNumericIDConflicts) {
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+@id(1234) override c0: u32;
+@id(1234) override c1: u32;
+
+struct Buf {
+    data : array<u32, 2>
+}
+
+@group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+@stage(compute) @workgroup_size(1) fn main() {
+    // make sure the overridable constants are not optimized out
+    buf.data[0] = c0;
+    buf.data[1] = c1;
+})"));
+}
+
+// Test that @binding must be less then kMaxBindingNumber
+TEST_F(ShaderModuleValidationTest, MaxBindingNumber) {
+    static_assert(kMaxBindingNumber == 65535);
+
+    wgpu::ComputePipelineDescriptor desc;
+    desc.compute.entryPoint = "main";
+
+    // kMaxBindingNumber is valid.
+    desc.compute.module = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(65535) var s : sampler;
+        @stage(compute) @workgroup_size(1) fn main() {
+            _ = s;
+        }
+    )");
+    device.CreateComputePipeline(&desc);
+
+    // kMaxBindingNumber + 1 is an error
+    desc.compute.module = utils::CreateShaderModule(device, R"(
+        @group(0) @binding(65536) var s : sampler;
+        @stage(compute) @workgroup_size(1) fn main() {
+            _ = s;
+        }
+    )");
+    ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&desc));
+}
+
+// Test that missing decorations on shader IO or bindings causes a validation error.
+TEST_F(ShaderModuleValidationTest, MissingDecorations) {
+    // Vertex input.
+    utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main(@location(0) a : vec4<f32>) -> @builtin(position) vec4<f32> {
+            return vec4(1.0);
+        }
+    )");
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+        @stage(vertex) fn main(a : vec4<f32>) -> @builtin(position) vec4<f32> {
+            return vec4(1.0);
+        }
+    )"));
+
+    // Vertex output
+    utils::CreateShaderModule(device, R"(
+        struct Output {
+            @builtin(position) pos : vec4<f32>,
+            @location(0) a : f32,
+        }
+        @stage(vertex) fn main() -> Output {
+            var output : Output;
+            return output;
+        }
+    )");
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+        struct Output {
+            @builtin(position) pos : vec4<f32>,
+            a : f32,
+        }
+        @stage(vertex) fn main() -> Output {
+            var output : Output;
+            return output;
+        }
+    )"));
+
+    // Fragment input
+    utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main(@location(0) a : vec4<f32>) -> @location(0) f32 {
+            return 1.0;
+        }
+    )");
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main(a : vec4<f32>) -> @location(0) f32 {
+            return 1.0;
+        }
+    )"));
+
+    // Fragment input
+    utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> @location(0) f32 {
+            return 1.0;
+        }
+    )");
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+        @stage(fragment) fn main() -> f32 {
+            return 1.0;
+        }
+    )"));
+
+    // Binding decorations
+    utils::CreateShaderModule(device, R"(
+        @group(0) @binding(0) var s : sampler;
+        @stage(fragment) fn main() -> @location(0) f32 {
+            _ = s;
+            return 1.0;
+        }
+    )");
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+        @binding(0) var s : sampler;
+        @stage(fragment) fn main() -> @location(0) f32 {
+            _ = s;
+            return 1.0;
+        }
+    )"));
+    ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+        @group(0) var s : sampler;
+        @stage(fragment) fn main() -> @location(0) f32 {
+            _ = s;
+            return 1.0;
+        }
+    )"));
+}
diff --git a/src/dawn/tests/unittests/validation/StorageTextureValidationTests.cpp b/src/dawn/tests/unittests/validation/StorageTextureValidationTests.cpp
new file mode 100644
index 0000000..cfe5973
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/StorageTextureValidationTests.cpp
@@ -0,0 +1,810 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class StorageTextureValidationTests : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        mDefaultVSModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+        mDefaultFSModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            })");
+    }
+
+    static const char* GetFloatImageTypeDeclaration(wgpu::TextureViewDimension dimension) {
+        // TODO(bclayton): Support and test texture_storage_1d_array
+        switch (dimension) {
+            case wgpu::TextureViewDimension::e1D:
+                return "texture_storage_1d";
+            case wgpu::TextureViewDimension::e2D:
+                return "texture_storage_2d";
+            case wgpu::TextureViewDimension::e2DArray:
+                return "texture_storage_2d_array";
+            case wgpu::TextureViewDimension::e3D:
+                return "texture_storage_3d";
+            case wgpu::TextureViewDimension::Cube:
+                return "texture_storage_cube";  // Note: Doesn't exist in WGSL (yet)
+            case wgpu::TextureViewDimension::CubeArray:
+                return "texture_storage_cube_array";  // Note: Doesn't exist in WGSL (yet)
+            case wgpu::TextureViewDimension::Undefined:
+            default:
+                UNREACHABLE();
+                return "";
+        }
+    }
+
+    static std::string CreateComputeShaderWithStorageTexture(
+        wgpu::StorageTextureAccess storageTextureBindingType,
+        wgpu::TextureFormat textureFormat,
+        wgpu::TextureViewDimension textureViewDimension = wgpu::TextureViewDimension::e2D) {
+        return CreateComputeShaderWithStorageTexture(
+            storageTextureBindingType, utils::GetWGSLImageFormatQualifier(textureFormat),
+            GetFloatImageTypeDeclaration(textureViewDimension));
+    }
+
+    static std::string CreateComputeShaderWithStorageTexture(
+        wgpu::StorageTextureAccess storageTextureBindingType,
+        const char* imageFormatQualifier,
+        const char* imageTypeDeclaration = "texture_storage_2d") {
+        const char* access = "";
+        switch (storageTextureBindingType) {
+            case wgpu::StorageTextureAccess::WriteOnly:
+                access = "write";
+                break;
+            default:
+                UNREACHABLE();
+                break;
+        }
+
+        std::ostringstream ostream;
+        ostream << "@group(0) @binding(0) var image0 : " << imageTypeDeclaration << "<"
+                << imageFormatQualifier << ", " << access
+                << ">;\n"
+                   "@stage(compute) @workgroup_size(1) fn main() {\n"
+                   "    textureDimensions(image0);\n"
+                   "}\n";
+
+        return ostream.str();
+    }
+
+    wgpu::Texture CreateTexture(wgpu::TextureUsage usage,
+                                wgpu::TextureFormat format,
+                                uint32_t sampleCount = 1,
+                                uint32_t arrayLayerCount = 1,
+                                wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = dimension;
+        descriptor.size = {16, 16, arrayLayerCount};
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = format;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::ShaderModule mDefaultVSModule;
+    wgpu::ShaderModule mDefaultFSModule;
+
+    const std::array<wgpu::StorageTextureAccess, 1> kSupportedStorageTextureAccess = {
+        wgpu::StorageTextureAccess::WriteOnly};
+};
+
+// Validate read-only storage textures can be declared in vertex and fragment shaders, while
+// writeonly storage textures cannot be used in vertex shaders.
+TEST_F(StorageTextureValidationTests, RenderPipeline) {
+    // Write-only storage textures cannot be declared in a vertex shader.
+    {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, write>;
+            @stage(vertex)
+            fn main(@builtin(vertex_index) vertex_index : u32) -> @builtin(position) vec4<f32> {
+                textureStore(image0, vec2<i32>(i32(vertex_index), 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+                return vec4<f32>(0.0);
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.layout = nullptr;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = mDefaultFSModule;
+        ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+    }
+
+    // Write-only storage textures can be declared in a fragment shader.
+    {
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, write>;
+            @stage(fragment) fn main(@builtin(position) position : vec4<f32>) {
+                textureStore(image0, vec2<i32>(position.xy), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+            })");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.layout = nullptr;
+        descriptor.vertex.module = mDefaultVSModule;
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        device.CreateRenderPipeline(&descriptor);
+    }
+}
+
+// Validate both read-only and write-only storage textures can be declared in
+// compute shaders.
+TEST_F(StorageTextureValidationTests, ComputePipeline) {
+    // Write-only storage textures can be declared in a compute shader.
+    {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, write>;
+
+            @stage(compute) @workgroup_size(1) fn main(@builtin(local_invocation_id) LocalInvocationID : vec3<u32>) {
+                textureStore(image0, vec2<i32>(LocalInvocationID.xy), vec4<f32>(0.0, 0.0, 0.0, 0.0));
+            })");
+
+        wgpu::ComputePipelineDescriptor descriptor;
+        descriptor.layout = nullptr;
+        descriptor.compute.module = csModule;
+        descriptor.compute.entryPoint = "main";
+
+        device.CreateComputePipeline(&descriptor);
+    }
+}
+
+// Validate read-write storage textures are not currently supported.
+TEST_F(StorageTextureValidationTests, ReadWriteStorageTexture) {
+    // Read-write storage textures cannot be declared in a vertex shader by default.
+    {
+        ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, read_write>;
+            @stage(vertex) fn main() {
+                textureDimensions(image0);
+            })"));
+    }
+
+    // Read-write storage textures cannot be declared in a fragment shader by default.
+    {
+        ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, read_write>;
+            @stage(fragment) fn main() {
+                textureDimensions(image0);
+            })"));
+    }
+
+    // Read-write storage textures cannot be declared in a compute shader by default.
+    {
+        ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, read_write>;
+            @stage(compute) @workgroup_size(1) fn main() {
+                textureDimensions(image0);
+            })"));
+    }
+}
+
+// Test that using read-only storage texture and write-only storage texture in
+// BindGroupLayout is valid, while using read-write storage texture is not allowed now.
+TEST_F(StorageTextureValidationTests, BindGroupLayoutWithStorageTextureBindingType) {
+    struct TestSpec {
+        wgpu::ShaderStage stage;
+        wgpu::StorageTextureAccess type;
+        bool valid;
+    };
+    constexpr std::array<TestSpec, 6> kTestSpecs = {
+        {{wgpu::ShaderStage::Vertex, wgpu::StorageTextureAccess::WriteOnly, false},
+         {wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, true},
+         {wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, true}}};
+
+    for (const auto& testSpec : kTestSpecs) {
+        wgpu::BindGroupLayoutEntry entry = utils::BindingLayoutEntryInitializationHelper(
+            0, testSpec.stage, testSpec.type, wgpu::TextureFormat::R32Uint);
+
+        wgpu::BindGroupLayoutDescriptor descriptor;
+        descriptor.entryCount = 1;
+        descriptor.entries = &entry;
+
+        if (testSpec.valid) {
+            device.CreateBindGroupLayout(&descriptor);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateBindGroupLayout(&descriptor));
+        }
+    }
+}
+
+// Validate it is an error to declare a read-only or write-only storage texture in shaders with any
+// format that doesn't support TextureUsage::StorageBinding texture usages.
+TEST_F(StorageTextureValidationTests, StorageTextureFormatInShaders) {
+    // Not include RGBA8UnormSrgb, BGRA8Unorm, BGRA8UnormSrgb because they are not related to any
+    // SPIR-V Image Formats.
+    constexpr std::array<wgpu::TextureFormat, 32> kWGPUTextureFormatSupportedAsSPIRVImageFormats = {
+        wgpu::TextureFormat::R32Uint,      wgpu::TextureFormat::R32Sint,
+        wgpu::TextureFormat::R32Float,     wgpu::TextureFormat::RGBA8Unorm,
+        wgpu::TextureFormat::RGBA8Snorm,   wgpu::TextureFormat::RGBA8Uint,
+        wgpu::TextureFormat::RGBA8Sint,    wgpu::TextureFormat::RG32Uint,
+        wgpu::TextureFormat::RG32Sint,     wgpu::TextureFormat::RG32Float,
+        wgpu::TextureFormat::RGBA16Uint,   wgpu::TextureFormat::RGBA16Sint,
+        wgpu::TextureFormat::RGBA16Float,  wgpu::TextureFormat::RGBA32Uint,
+        wgpu::TextureFormat::RGBA32Sint,   wgpu::TextureFormat::RGBA32Float,
+        wgpu::TextureFormat::R8Unorm,      wgpu::TextureFormat::R8Snorm,
+        wgpu::TextureFormat::R8Uint,       wgpu::TextureFormat::R8Sint,
+        wgpu::TextureFormat::R16Uint,      wgpu::TextureFormat::R16Sint,
+        wgpu::TextureFormat::R16Float,     wgpu::TextureFormat::RG8Unorm,
+        wgpu::TextureFormat::RG8Snorm,     wgpu::TextureFormat::RG8Uint,
+        wgpu::TextureFormat::RG8Sint,      wgpu::TextureFormat::RG16Uint,
+        wgpu::TextureFormat::RG16Sint,     wgpu::TextureFormat::RG16Float,
+        wgpu::TextureFormat::RGB10A2Unorm, wgpu::TextureFormat::RG11B10Ufloat};
+
+    for (wgpu::StorageTextureAccess storageTextureBindingType : kSupportedStorageTextureAccess) {
+        for (wgpu::TextureFormat format : kWGPUTextureFormatSupportedAsSPIRVImageFormats) {
+            std::string computeShader =
+                CreateComputeShaderWithStorageTexture(storageTextureBindingType, format);
+            if (utils::TextureFormatSupportsStorageTexture(format)) {
+                utils::CreateShaderModule(device, computeShader.c_str());
+            } else {
+                ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, computeShader.c_str()));
+            }
+        }
+    }
+}
+
+// Verify that declaring a storage texture format that is not supported in WebGPU causes validation
+// error.
+TEST_F(StorageTextureValidationTests, UnsupportedWGSLStorageTextureFormat) {
+    constexpr std::array<wgpu::TextureFormat, 16> kUnsupportedTextureFormats = {
+        wgpu::TextureFormat::R8Unorm,      wgpu::TextureFormat::R8Snorm,
+        wgpu::TextureFormat::R8Uint,       wgpu::TextureFormat::R8Sint,
+        wgpu::TextureFormat::R16Uint,      wgpu::TextureFormat::R16Sint,
+        wgpu::TextureFormat::R16Float,     wgpu::TextureFormat::RG8Unorm,
+        wgpu::TextureFormat::RG8Snorm,     wgpu::TextureFormat::RG8Uint,
+        wgpu::TextureFormat::RG8Sint,      wgpu::TextureFormat::RG16Uint,
+        wgpu::TextureFormat::RG16Sint,     wgpu::TextureFormat::RG16Float,
+        wgpu::TextureFormat::RGB10A2Unorm, wgpu::TextureFormat::RG11B10Ufloat,
+    };
+
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        for (wgpu::TextureFormat format : kUnsupportedTextureFormats) {
+            std::string computeShader = CreateComputeShaderWithStorageTexture(bindingType, format);
+            ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, computeShader.c_str()));
+        }
+    }
+}
+
+// Verify that declaring a storage texture dimension that isn't supported by
+// WebGPU causes a compile failure. WebGPU doesn't support using cube map
+// texture views and cube map array texture views as storage textures.
+TEST_F(StorageTextureValidationTests, UnsupportedTextureViewDimensionInShader) {
+    constexpr std::array<wgpu::TextureViewDimension, 2> kUnsupportedTextureViewDimensions = {
+        wgpu::TextureViewDimension::Cube, wgpu::TextureViewDimension::CubeArray};
+    constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::R32Float;
+
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        for (wgpu::TextureViewDimension dimension : kUnsupportedTextureViewDimensions) {
+            std::string computeShader =
+                CreateComputeShaderWithStorageTexture(bindingType, kFormat, dimension);
+            ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, computeShader.c_str()));
+        }
+    }
+}
+
+// Verify that declaring a texture view dimension that is not supported to be used as storage
+// textures in WebGPU in bind group layout causes validation error. WebGPU doesn't support using
+// cube map texture views and cube map array texture views as storage textures.
+TEST_F(StorageTextureValidationTests, UnsupportedTextureViewDimensionInBindGroupLayout) {
+    constexpr std::array<wgpu::TextureViewDimension, 2> kUnsupportedTextureViewDimensions = {
+        wgpu::TextureViewDimension::Cube, wgpu::TextureViewDimension::CubeArray};
+    constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::R32Float;
+
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        for (wgpu::TextureViewDimension dimension : kUnsupportedTextureViewDimensions) {
+            ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Compute, bindingType, kFormat, dimension}}));
+        }
+    }
+}
+
+// Verify when we create and use a bind group layout with storage textures in the creation of
+// render and compute pipeline, the binding type in the bind group layout must match the
+// declaration in the shader.
+TEST_F(StorageTextureValidationTests, BindGroupLayoutEntryTypeMatchesShaderDeclaration) {
+    constexpr wgpu::TextureFormat kStorageTextureFormat = wgpu::TextureFormat::R32Float;
+
+    std::initializer_list<utils::BindingLayoutEntryInitializationHelper> kSupportedBindingTypes = {
+        {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+        {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+        {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
+        {0, wgpu::ShaderStage::Compute, wgpu::SamplerBindingType::Filtering},
+        {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
+        {0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly,
+         kStorageTextureFormat}};
+
+    for (wgpu::StorageTextureAccess bindingTypeInShader : kSupportedStorageTextureAccess) {
+        // Create the compute shader with the given binding type.
+        std::string computeShader =
+            CreateComputeShaderWithStorageTexture(bindingTypeInShader, kStorageTextureFormat);
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, computeShader.c_str());
+
+        // Set common fields of compute pipeline descriptor.
+        wgpu::ComputePipelineDescriptor defaultComputePipelineDescriptor;
+        defaultComputePipelineDescriptor.compute.module = csModule;
+        defaultComputePipelineDescriptor.compute.entryPoint = "main";
+
+        for (utils::BindingLayoutEntryInitializationHelper bindingLayoutEntry :
+             kSupportedBindingTypes) {
+            wgpu::ComputePipelineDescriptor computePipelineDescriptor =
+                defaultComputePipelineDescriptor;
+
+            // Create bind group layout with different binding types.
+            wgpu::BindGroupLayout bindGroupLayout =
+                utils::MakeBindGroupLayout(device, {bindingLayoutEntry});
+            computePipelineDescriptor.layout =
+                utils::MakeBasicPipelineLayout(device, &bindGroupLayout);
+
+            // The binding type in the bind group layout must the same as the related image object
+            // declared in shader.
+            if (bindingLayoutEntry.storageTexture.access == bindingTypeInShader) {
+                device.CreateComputePipeline(&computePipelineDescriptor);
+            } else {
+                ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&computePipelineDescriptor));
+            }
+        }
+    }
+}
+
+// Verify it is invalid not to set a valid texture format in a bind group layout when the binding
+// type is read-only or write-only storage texture.
+TEST_F(StorageTextureValidationTests, UndefinedStorageTextureFormatInBindGroupLayout) {
+    wgpu::BindGroupLayoutEntry errorBindGroupLayoutEntry;
+    errorBindGroupLayoutEntry.binding = 0;
+    errorBindGroupLayoutEntry.visibility = wgpu::ShaderStage::Compute;
+    errorBindGroupLayoutEntry.storageTexture.format = wgpu::TextureFormat::Undefined;
+
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        errorBindGroupLayoutEntry.storageTexture.access = bindingType;
+        ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(device, {errorBindGroupLayoutEntry}));
+    }
+}
+
+// Verify it is invalid to create a bind group layout with storage textures and an unsupported
+// storage texture format.
+TEST_F(StorageTextureValidationTests, StorageTextureFormatInBindGroupLayout) {
+    wgpu::BindGroupLayoutEntry defaultBindGroupLayoutEntry;
+    defaultBindGroupLayoutEntry.binding = 0;
+    defaultBindGroupLayoutEntry.visibility = wgpu::ShaderStage::Compute;
+
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        for (wgpu::TextureFormat textureFormat : utils::kAllTextureFormats) {
+            wgpu::BindGroupLayoutEntry bindGroupLayoutBinding = defaultBindGroupLayoutEntry;
+            bindGroupLayoutBinding.storageTexture.access = bindingType;
+            bindGroupLayoutBinding.storageTexture.format = textureFormat;
+            if (utils::TextureFormatSupportsStorageTexture(textureFormat)) {
+                utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding});
+            } else {
+                ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding}));
+            }
+        }
+    }
+}
+
+// Verify the storage texture format in the bind group layout must match the declaration in shader.
+TEST_F(StorageTextureValidationTests, BindGroupLayoutStorageTextureFormatMatchesShaderDeclaration) {
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        for (wgpu::TextureFormat storageTextureFormatInShader : utils::kAllTextureFormats) {
+            if (!utils::TextureFormatSupportsStorageTexture(storageTextureFormatInShader)) {
+                continue;
+            }
+
+            // Create the compute shader module with the given binding type and storage texture
+            // format.
+            std::string computeShader =
+                CreateComputeShaderWithStorageTexture(bindingType, storageTextureFormatInShader);
+            wgpu::ShaderModule csModule = utils::CreateShaderModule(device, computeShader.c_str());
+
+            // Set common fields of compute pipeline descriptor.
+            wgpu::ComputePipelineDescriptor defaultComputePipelineDescriptor;
+            defaultComputePipelineDescriptor.compute.module = csModule;
+            defaultComputePipelineDescriptor.compute.entryPoint = "main";
+
+            // Set common fileds of bind group layout binding.
+            utils::BindingLayoutEntryInitializationHelper defaultBindGroupLayoutEntry = {
+                0, wgpu::ShaderStage::Compute, bindingType, utils::kAllTextureFormats[0]};
+
+            for (wgpu::TextureFormat storageTextureFormatInBindGroupLayout :
+                 utils::kAllTextureFormats) {
+                if (!utils::TextureFormatSupportsStorageTexture(
+                        storageTextureFormatInBindGroupLayout)) {
+                    continue;
+                }
+
+                // Create the bind group layout with the given storage texture format.
+                wgpu::BindGroupLayoutEntry bindGroupLayoutBinding = defaultBindGroupLayoutEntry;
+                bindGroupLayoutBinding.storageTexture.format =
+                    storageTextureFormatInBindGroupLayout;
+                wgpu::BindGroupLayout bindGroupLayout =
+                    utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding});
+
+                // Create the compute pipeline with the bind group layout.
+                wgpu::ComputePipelineDescriptor computePipelineDescriptor =
+                    defaultComputePipelineDescriptor;
+                computePipelineDescriptor.layout =
+                    utils::MakeBasicPipelineLayout(device, &bindGroupLayout);
+
+                // The storage texture format in the bind group layout must be the same as the one
+                // declared in the shader.
+                if (storageTextureFormatInShader == storageTextureFormatInBindGroupLayout) {
+                    device.CreateComputePipeline(&computePipelineDescriptor);
+                } else {
+                    ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&computePipelineDescriptor));
+                }
+            }
+        }
+    }
+}
+
+// Verify the dimension of the bind group layout with storage textures must match the one declared
+// in shader.
+TEST_F(StorageTextureValidationTests, BindGroupLayoutViewDimensionMatchesShaderDeclaration) {
+    constexpr std::array<wgpu::TextureViewDimension, 4> kSupportedDimensions = {
+        wgpu::TextureViewDimension::e1D, wgpu::TextureViewDimension::e2D,
+        wgpu::TextureViewDimension::e2DArray, wgpu::TextureViewDimension::e3D};
+    constexpr wgpu::TextureFormat kStorageTextureFormat = wgpu::TextureFormat::R32Float;
+
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        for (wgpu::TextureViewDimension dimensionInShader : kSupportedDimensions) {
+            // Create the compute shader with the given texture view dimension.
+            std::string computeShader = CreateComputeShaderWithStorageTexture(
+                bindingType, kStorageTextureFormat, dimensionInShader);
+            wgpu::ShaderModule csModule = utils::CreateShaderModule(device, computeShader.c_str());
+
+            // Set common fields of compute pipeline descriptor.
+            wgpu::ComputePipelineDescriptor defaultComputePipelineDescriptor;
+            defaultComputePipelineDescriptor.compute.module = csModule;
+            defaultComputePipelineDescriptor.compute.entryPoint = "main";
+
+            // Set common fields of bind group layout binding.
+            utils::BindingLayoutEntryInitializationHelper defaultBindGroupLayoutEntry = {
+                0, wgpu::ShaderStage::Compute, bindingType, kStorageTextureFormat};
+
+            for (wgpu::TextureViewDimension dimensionInBindGroupLayout : kSupportedDimensions) {
+                // Create the bind group layout with the given texture view dimension.
+                wgpu::BindGroupLayoutEntry bindGroupLayoutBinding = defaultBindGroupLayoutEntry;
+                bindGroupLayoutBinding.storageTexture.viewDimension = dimensionInBindGroupLayout;
+                wgpu::BindGroupLayout bindGroupLayout =
+                    utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding});
+
+                // Create the compute pipeline with the bind group layout.
+                wgpu::ComputePipelineDescriptor computePipelineDescriptor =
+                    defaultComputePipelineDescriptor;
+                computePipelineDescriptor.layout =
+                    utils::MakeBasicPipelineLayout(device, &bindGroupLayout);
+
+                // The texture dimension in the bind group layout must be the same as the one
+                // declared in the shader.
+                if (dimensionInShader == dimensionInBindGroupLayout) {
+                    device.CreateComputePipeline(&computePipelineDescriptor);
+                } else {
+                    ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&computePipelineDescriptor));
+                }
+            }
+        }
+    }
+}
+
+// Verify that only a texture view can be used as a read-only or write-only storage texture in a
+// bind group.
+TEST_F(StorageTextureValidationTests, StorageTextureBindingTypeInBindGroup) {
+    constexpr wgpu::TextureFormat kStorageTextureFormat = wgpu::TextureFormat::R32Float;
+    for (wgpu::StorageTextureAccess storageBindingType : kSupportedStorageTextureAccess) {
+        // Create a bind group layout.
+        wgpu::BindGroupLayoutEntry bindGroupLayoutBinding;
+        bindGroupLayoutBinding.binding = 0;
+        bindGroupLayoutBinding.visibility = wgpu::ShaderStage::Compute;
+        bindGroupLayoutBinding.storageTexture.access = storageBindingType;
+        bindGroupLayoutBinding.storageTexture.format = kStorageTextureFormat;
+        wgpu::BindGroupLayout bindGroupLayout =
+            utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding});
+
+        // Buffers are not allowed to be used as storage textures in a bind group.
+        {
+            wgpu::BufferDescriptor descriptor;
+            descriptor.size = 1024;
+            descriptor.usage = wgpu::BufferUsage::Uniform;
+            wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+            ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bindGroupLayout, {{0, buffer}}));
+        }
+
+        // Samplers are not allowed to be used as storage textures in a bind group.
+        {
+            wgpu::Sampler sampler = device.CreateSampler();
+            ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bindGroupLayout, {{0, sampler}}));
+        }
+
+        // Texture views are allowed to be used as storage textures in a bind group.
+        {
+            wgpu::TextureView textureView =
+                CreateTexture(wgpu::TextureUsage::StorageBinding, kStorageTextureFormat)
+                    .CreateView();
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, textureView}});
+        }
+    }
+}
+
+// Verify that a texture used as read-only or write-only storage texture in a bind group must be
+// created with the texture usage wgpu::TextureUsage::StorageBinding.
+TEST_F(StorageTextureValidationTests, StorageTextureUsageInBindGroup) {
+    constexpr wgpu::TextureFormat kStorageTextureFormat = wgpu::TextureFormat::R32Float;
+    constexpr std::array<wgpu::TextureUsage, 6> kTextureUsages = {
+        wgpu::TextureUsage::CopySrc,          wgpu::TextureUsage::CopyDst,
+        wgpu::TextureUsage::TextureBinding,   wgpu::TextureUsage::StorageBinding,
+        wgpu::TextureUsage::RenderAttachment, wgpu::TextureUsage::Present};
+
+    for (wgpu::StorageTextureAccess storageBindingType : kSupportedStorageTextureAccess) {
+        // Create a bind group layout.
+        wgpu::BindGroupLayoutEntry bindGroupLayoutBinding;
+        bindGroupLayoutBinding.binding = 0;
+        bindGroupLayoutBinding.visibility = wgpu::ShaderStage::Compute;
+        bindGroupLayoutBinding.storageTexture.access = storageBindingType;
+        bindGroupLayoutBinding.storageTexture.format = wgpu::TextureFormat::R32Float;
+        wgpu::BindGroupLayout bindGroupLayout =
+            utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding});
+
+        for (wgpu::TextureUsage usage : kTextureUsages) {
+            // Create texture views with different texture usages
+            wgpu::TextureView textureView =
+                CreateTexture(usage, kStorageTextureFormat).CreateView();
+
+            // Verify that the texture used as storage texture must be created with the texture
+            // usage wgpu::TextureUsage::StorageBinding.
+            if (usage & wgpu::TextureUsage::StorageBinding) {
+                utils::MakeBindGroup(device, bindGroupLayout, {{0, textureView}});
+            } else {
+                ASSERT_DEVICE_ERROR(
+                    utils::MakeBindGroup(device, bindGroupLayout, {{0, textureView}}));
+            }
+        }
+    }
+}
+
+// Verify that the format of a texture used as read-only or write-only storage texture in a bind
+// group must match the corresponding bind group binding.
+TEST_F(StorageTextureValidationTests, StorageTextureFormatInBindGroup) {
+    for (wgpu::StorageTextureAccess storageBindingType : kSupportedStorageTextureAccess) {
+        wgpu::BindGroupLayoutEntry defaultBindGroupLayoutEntry;
+        defaultBindGroupLayoutEntry.binding = 0;
+        defaultBindGroupLayoutEntry.visibility = wgpu::ShaderStage::Compute;
+        defaultBindGroupLayoutEntry.storageTexture.access = storageBindingType;
+
+        for (wgpu::TextureFormat formatInBindGroupLayout : utils::kAllTextureFormats) {
+            if (!utils::TextureFormatSupportsStorageTexture(formatInBindGroupLayout)) {
+                continue;
+            }
+
+            // Create a bind group layout with given storage texture format.
+            wgpu::BindGroupLayoutEntry bindGroupLayoutBinding = defaultBindGroupLayoutEntry;
+            bindGroupLayoutBinding.storageTexture.format = formatInBindGroupLayout;
+            wgpu::BindGroupLayout bindGroupLayout =
+                utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding});
+
+            for (wgpu::TextureFormat textureViewFormat : utils::kAllTextureFormats) {
+                if (!utils::TextureFormatSupportsStorageTexture(textureViewFormat)) {
+                    continue;
+                }
+
+                // Create texture views with different texture formats.
+                wgpu::TextureView storageTextureView =
+                    CreateTexture(wgpu::TextureUsage::StorageBinding, textureViewFormat)
+                        .CreateView();
+
+                // Verify that the format of the texture view used as storage texture in a bind
+                // group must match the storage texture format declaration in the bind group layout.
+                if (textureViewFormat == formatInBindGroupLayout) {
+                    utils::MakeBindGroup(device, bindGroupLayout, {{0, storageTextureView}});
+                } else {
+                    ASSERT_DEVICE_ERROR(
+                        utils::MakeBindGroup(device, bindGroupLayout, {{0, storageTextureView}}));
+                }
+            }
+        }
+    }
+}
+
+// Verify that the dimension of a texture view used as read-only or write-only storage texture in a
+// bind group must match the corresponding bind group binding.
+TEST_F(StorageTextureValidationTests, StorageTextureViewDimensionInBindGroup) {
+    constexpr wgpu::TextureFormat kStorageTextureFormat = wgpu::TextureFormat::R32Float;
+    constexpr uint32_t kDepthOrArrayLayers = 6u;
+
+    // TODO(crbug.com/dawn/814): test the use of 1D texture view dimensions when they are
+    // supported in Dawn.
+    constexpr std::array<wgpu::TextureViewDimension, 3> kSupportedDimensions = {
+        wgpu::TextureViewDimension::e2D, wgpu::TextureViewDimension::e2DArray,
+        wgpu::TextureViewDimension::e3D};
+
+    wgpu::TextureViewDescriptor kDefaultTextureViewDescriptor;
+    kDefaultTextureViewDescriptor.format = kStorageTextureFormat;
+    kDefaultTextureViewDescriptor.baseMipLevel = 0;
+    kDefaultTextureViewDescriptor.mipLevelCount = 1;
+    kDefaultTextureViewDescriptor.baseArrayLayer = 0;
+    kDefaultTextureViewDescriptor.arrayLayerCount = 1u;
+
+    for (wgpu::StorageTextureAccess storageBindingType : kSupportedStorageTextureAccess) {
+        wgpu::BindGroupLayoutEntry defaultBindGroupLayoutEntry;
+        defaultBindGroupLayoutEntry.binding = 0;
+        defaultBindGroupLayoutEntry.visibility = wgpu::ShaderStage::Compute;
+        defaultBindGroupLayoutEntry.storageTexture.access = storageBindingType;
+        defaultBindGroupLayoutEntry.storageTexture.format = kStorageTextureFormat;
+
+        for (wgpu::TextureViewDimension dimensionInBindGroupLayout : kSupportedDimensions) {
+            // Create a bind group layout with given texture view dimension.
+            wgpu::BindGroupLayoutEntry bindGroupLayoutBinding = defaultBindGroupLayoutEntry;
+            bindGroupLayoutBinding.storageTexture.viewDimension = dimensionInBindGroupLayout;
+            wgpu::BindGroupLayout bindGroupLayout =
+                utils::MakeBindGroupLayout(device, {bindGroupLayoutBinding});
+
+            for (wgpu::TextureViewDimension dimensionOfTextureView : kSupportedDimensions) {
+                // Create a texture view with given texture view dimension.
+                wgpu::Texture texture =
+                    CreateTexture(wgpu::TextureUsage::StorageBinding, kStorageTextureFormat, 1,
+                                  kDepthOrArrayLayers,
+                                  utils::ViewDimensionToTextureDimension(dimensionOfTextureView));
+
+                wgpu::TextureViewDescriptor textureViewDescriptor = kDefaultTextureViewDescriptor;
+                textureViewDescriptor.dimension = dimensionOfTextureView;
+                wgpu::TextureView storageTextureView = texture.CreateView(&textureViewDescriptor);
+
+                // Verify that the dimension of the texture view used as storage texture in a bind
+                // group must match the texture view dimension declaration in the bind group layout.
+                if (dimensionInBindGroupLayout == dimensionOfTextureView) {
+                    utils::MakeBindGroup(device, bindGroupLayout, {{0, storageTextureView}});
+                } else {
+                    ASSERT_DEVICE_ERROR(
+                        utils::MakeBindGroup(device, bindGroupLayout, {{0, storageTextureView}}));
+                }
+            }
+        }
+    }
+}
+
+// Verify multisampled storage textures cannot be supported now.
+TEST_F(StorageTextureValidationTests, MultisampledStorageTexture) {
+    for (wgpu::StorageTextureAccess bindingType : kSupportedStorageTextureAccess) {
+        std::string computeShader =
+            CreateComputeShaderWithStorageTexture(bindingType, "", "image2DMS");
+        ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, computeShader.c_str()));
+    }
+}
+
+// Verify it is valid to use a texture as either read-only storage texture or write-only storage
+// texture in a render pass.
+TEST_F(StorageTextureValidationTests, StorageTextureInRenderPass) {
+    constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture storageTexture = CreateTexture(wgpu::TextureUsage::StorageBinding, kFormat);
+
+    wgpu::Texture outputAttachment = CreateTexture(wgpu::TextureUsage::RenderAttachment, kFormat);
+    utils::ComboRenderPassDescriptor renderPassDescriptor({outputAttachment.CreateView()});
+
+    for (wgpu::StorageTextureAccess storageTextureType : kSupportedStorageTextureAccess) {
+        // Create a bind group that contains a storage texture.
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, storageTextureType, kFormat}});
+
+        wgpu::BindGroup bindGroupWithStorageTexture =
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, storageTexture.CreateView()}});
+
+        // It is valid to use a texture as read-only or write-only storage texture in the render
+        // pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPassEncoder.SetBindGroup(0, bindGroupWithStorageTexture);
+        renderPassEncoder.End();
+        encoder.Finish();
+    }
+}
+
+// Verify it is valid to use a a texture as both read-only storage texture and sampled texture in
+// one render pass, while it is invalid to use a texture as both write-only storage texture and
+// sampled texture in one render pass.
+TEST_F(StorageTextureValidationTests, StorageTextureAndSampledTextureInOneRenderPass) {
+    constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture storageTexture = CreateTexture(
+        wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding, kFormat);
+
+    wgpu::Texture outputAttachment = CreateTexture(wgpu::TextureUsage::RenderAttachment, kFormat);
+    utils::ComboRenderPassDescriptor renderPassDescriptor({outputAttachment.CreateView()});
+
+    // Create a bind group that contains a storage texture and a sampled texture.
+    for (wgpu::StorageTextureAccess storageTextureType : kSupportedStorageTextureAccess) {
+        // Create a bind group that binds the same texture as both storage texture and sampled
+        // texture.
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, storageTextureType, kFormat},
+                     {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, bindGroupLayout,
+            {{0, storageTexture.CreateView()}, {1, storageTexture.CreateView()}});
+
+        // It is valid to use a a texture as both read-only storage texture and sampled texture in
+        // one render pass, while it is invalid to use a texture as both write-only storage
+        // texture an sampled texture in one render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPassEncoder.SetBindGroup(0, bindGroup);
+        renderPassEncoder.End();
+        switch (storageTextureType) {
+            case wgpu::StorageTextureAccess::WriteOnly:
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+                break;
+            default:
+                UNREACHABLE();
+                break;
+        }
+    }
+}
+
+// Verify it is invalid to use a a texture as both storage texture (either read-only or write-only)
+// and render attachment in one render pass.
+TEST_F(StorageTextureValidationTests, StorageTextureAndRenderAttachmentInOneRenderPass) {
+    constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture storageTexture = CreateTexture(
+        wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::RenderAttachment, kFormat);
+    utils::ComboRenderPassDescriptor renderPassDescriptor({storageTexture.CreateView()});
+
+    for (wgpu::StorageTextureAccess storageTextureType : kSupportedStorageTextureAccess) {
+        // Create a bind group that contains a storage texture.
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, storageTextureType, kFormat}});
+        wgpu::BindGroup bindGroupWithStorageTexture =
+            utils::MakeBindGroup(device, bindGroupLayout, {{0, storageTexture.CreateView()}});
+
+        // It is invalid to use a texture as both storage texture and render attachment in one
+        // render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
+        renderPassEncoder.SetBindGroup(0, bindGroupWithStorageTexture);
+        renderPassEncoder.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Verify it is valid to use a texture as both storage texture (read-only or write-only) and
+// sampled texture in one compute pass.
+TEST_F(StorageTextureValidationTests, StorageTextureAndSampledTextureInOneComputePass) {
+    constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+    wgpu::Texture storageTexture = CreateTexture(
+        wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding, kFormat);
+
+    for (wgpu::StorageTextureAccess storageTextureType : kSupportedStorageTextureAccess) {
+        // Create a bind group that binds the same texture as both storage texture and sampled
+        // texture.
+        wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, storageTextureType, kFormat},
+                     {1, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, bindGroupLayout,
+            {{0, storageTexture.CreateView()}, {1, storageTexture.CreateView()}});
+
+        // It is valid to use a a texture as both storage texture (read-only or write-only) and
+        // sampled texture in one compute pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder computePassEncoder = encoder.BeginComputePass();
+        computePassEncoder.SetBindGroup(0, bindGroup);
+        computePassEncoder.End();
+        encoder.Finish();
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp b/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
new file mode 100644
index 0000000..428777c
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
@@ -0,0 +1,144 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+namespace {
+
+    class TextureSubresourceTest : public ValidationTest {
+      public:
+        static constexpr uint32_t kSize = 32u;
+        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+        wgpu::Texture CreateTexture(uint32_t mipLevelCount,
+                                    uint32_t arrayLayerCount,
+                                    wgpu::TextureUsage usage) {
+            wgpu::TextureDescriptor texDesc;
+            texDesc.dimension = wgpu::TextureDimension::e2D;
+            texDesc.size = {kSize, kSize, arrayLayerCount};
+            texDesc.sampleCount = 1;
+            texDesc.mipLevelCount = mipLevelCount;
+            texDesc.usage = usage;
+            texDesc.format = kFormat;
+            return device.CreateTexture(&texDesc);
+        }
+
+        wgpu::TextureView CreateTextureView(wgpu::Texture texture,
+                                            uint32_t baseMipLevel,
+                                            uint32_t baseArrayLayer) {
+            wgpu::TextureViewDescriptor viewDesc;
+            viewDesc.format = kFormat;
+            viewDesc.baseArrayLayer = baseArrayLayer;
+            viewDesc.arrayLayerCount = 1;
+            viewDesc.baseMipLevel = baseMipLevel;
+            viewDesc.mipLevelCount = 1;
+            viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+            return texture.CreateView(&viewDesc);
+        }
+
+        void TestRenderPass(const wgpu::TextureView& renderView,
+                            const wgpu::TextureView& samplerView) {
+            // Create bind group
+            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+
+            utils::ComboRenderPassDescriptor renderPassDesc({renderView});
+
+            // It is valid to read from and write into different subresources of the same texture
+            {
+                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+                encoder.Finish();
+            }
+
+            // It is not currently possible to test that it is valid to have multiple reads from a
+            // subresource while there is a single write in another subresource.
+
+            // It is invalid to read and write into the same subresources
+            {
+                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, renderView}});
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+                ASSERT_DEVICE_ERROR(encoder.Finish());
+            }
+
+            // It is valid to write into and then read from the same level of a texture in different
+            // render passes
+            {
+                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
+
+                wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                    device, {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly,
+                              kFormat}});
+                wgpu::BindGroup bindGroup1 = utils::MakeBindGroup(device, bgl1, {{0, samplerView}});
+
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPassDesc);
+                pass1.SetBindGroup(0, bindGroup1);
+                pass1.End();
+
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+                pass.SetBindGroup(0, bindGroup);
+                pass.End();
+
+                encoder.Finish();
+            }
+        }
+    };
+
+    // Test different mipmap levels
+    TEST_F(TextureSubresourceTest, MipmapLevelsTest) {
+        // Create texture with 2 mipmap levels and 1 layer
+        wgpu::Texture texture = CreateTexture(2, 1,
+                                              wgpu::TextureUsage::TextureBinding |
+                                                  wgpu::TextureUsage::RenderAttachment |
+                                                  wgpu::TextureUsage::StorageBinding);
+
+        // Create two views on different mipmap levels.
+        wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+        wgpu::TextureView renderView = CreateTextureView(texture, 1, 0);
+        TestRenderPass(samplerView, renderView);
+    }
+
+    // Test different array layers
+    TEST_F(TextureSubresourceTest, ArrayLayersTest) {
+        // Create texture with 1 mipmap level and 2 layers
+        wgpu::Texture texture = CreateTexture(1, 2,
+                                              wgpu::TextureUsage::TextureBinding |
+                                                  wgpu::TextureUsage::RenderAttachment |
+                                                  wgpu::TextureUsage::StorageBinding);
+
+        // Create two views on different layers.
+        wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+        wgpu::TextureView renderView = CreateTextureView(texture, 0, 1);
+
+        TestRenderPass(samplerView, renderView);
+    }
+
+    // TODO (yunchao.he@intel.com):
+    //	* Add tests for compute, in which texture subresource is traced per dispatch.
+    //
+    //	* Add tests for multiple encoders upon the same resource simultaneously. This situation fits
+    //	some cases like VR, multi-threading, etc.
+    //
+    //	* Add tests for conflicts between usages in two render bundles used in the same pass.
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/TextureValidationTests.cpp b/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
new file mode 100644
index 0000000..3136c76
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
@@ -0,0 +1,891 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    constexpr wgpu::TextureFormat kNonRenderableColorFormats[] = {
+        wgpu::TextureFormat::RG11B10Ufloat, wgpu::TextureFormat::RGB9E5Ufloat,
+        wgpu::TextureFormat::R8Snorm,       wgpu::TextureFormat::RG8Snorm,
+        wgpu::TextureFormat::RGBA8Snorm,
+    };
+
+    wgpu::TextureDimension kDimensions[] = {
+        wgpu::TextureDimension::e1D,
+        wgpu::TextureDimension::e3D,
+    };
+
+    class TextureValidationTest : public ValidationTest {
+      protected:
+        void SetUp() override {
+            ValidationTest::SetUp();
+
+            queue = device.GetQueue();
+        }
+
+        wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.size.width = kWidth;
+            descriptor.size.height = kHeight;
+            descriptor.size.depthOrArrayLayers = kDefaultDepth;
+            descriptor.mipLevelCount = kDefaultMipLevels;
+            descriptor.sampleCount = kDefaultSampleCount;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.format = kDefaultTextureFormat;
+            descriptor.usage =
+                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
+            return descriptor;
+        }
+
+        wgpu::Queue queue;
+
+      private:
+        // Choose the LCM of all current compressed texture format texel dimensions as the
+        // dimensions of the default texture.
+        static constexpr uint32_t kWidth = 120;
+        static constexpr uint32_t kHeight = 120;
+        static constexpr uint32_t kDefaultDepth = 1;
+        static constexpr uint32_t kDefaultMipLevels = 1;
+        static constexpr uint32_t kDefaultSampleCount = 1;
+
+        static constexpr wgpu::TextureFormat kDefaultTextureFormat =
+            wgpu::TextureFormat::RGBA8Unorm;
+    };
+
+    // Test the validation of non-zero texture usage
+    TEST_F(TextureValidationTest, UsageNonZero) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+        // Descriptor with proper usage is allowed
+        {
+            descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+            device.CreateTexture(&descriptor);
+        }
+
+        // It is an error to create a texture with zero usage
+        {
+            descriptor.usage = wgpu::TextureUsage::None;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test the validation of sample count
+    TEST_F(TextureValidationTest, SampleCount) {
+        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+
+        // sampleCount == 1 is allowed.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 1;
+
+            device.CreateTexture(&descriptor);
+        }
+
+        // sampleCount == 4 is allowed.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 4;
+
+            device.CreateTexture(&descriptor);
+        }
+
+        // It is an error to create a texture with an invalid sampleCount.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 3;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // It is an error to create a multisampled texture with mipLevelCount > 1.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 4;
+            descriptor.mipLevelCount = 2;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // It is an error to create a multisampled 1D or 3D texture.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 4;
+
+            descriptor.size.height = 1;
+            descriptor.dimension = wgpu::TextureDimension::e1D;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.dimension = wgpu::TextureDimension::e3D;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // It is an error to create a multisample texture when the format cannot support
+        // multisample.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 4;
+            descriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+            for (wgpu::TextureFormat format : utils::kFormatsInCoreSpec) {
+                descriptor.format = format;
+                if (utils::TextureFormatSupportsMultisampling(format)) {
+                    device.CreateTexture(&descriptor);
+                } else {
+                    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+                }
+            }
+        }
+
+        // Currently we do not support multisampled 2D textures with depth > 1.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 4;
+            descriptor.size.depthOrArrayLayers = 2;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // It is an error to set TextureUsage::StorageBinding when sampleCount > 1.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.sampleCount = 4;
+            descriptor.usage |= wgpu::TextureUsage::StorageBinding;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test the validation of the mip level count
+    TEST_F(TextureValidationTest, MipLevelCount) {
+        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+
+        // mipLevelCount == 1 is allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 32;
+            descriptor.size.height = 32;
+            descriptor.mipLevelCount = 1;
+
+            device.CreateTexture(&descriptor);
+        }
+
+        // mipLevelCount == 0 is an error
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 32;
+            descriptor.size.height = 32;
+            descriptor.mipLevelCount = 0;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Full mip chains are allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 32;
+            descriptor.size.height = 32;
+            // Mip level sizes: 32, 16, 8, 4, 2, 1
+            descriptor.mipLevelCount = 6;
+
+            device.CreateTexture(&descriptor);
+        }
+
+        // Test non-power-of-two width
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            // Mip level width: 31, 15, 7, 3, 1
+            descriptor.size.width = 31;
+            descriptor.size.height = 4;
+
+            // Full mip chains on non-power-of-two width are allowed
+            descriptor.mipLevelCount = 5;
+            device.CreateTexture(&descriptor);
+
+            // Too big mip chains on non-power-of-two width are disallowed
+            descriptor.mipLevelCount = 6;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Test non-power-of-two height
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 4;
+            // Mip level height: 31, 15, 7, 3, 1
+            descriptor.size.height = 31;
+
+            // Full mip chains on non-power-of-two height are allowed
+            descriptor.mipLevelCount = 5;
+            device.CreateTexture(&descriptor);
+
+            // Too big mip chains on non-power-of-two height are disallowed
+            descriptor.mipLevelCount = 6;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Undefined shift check if miplevel is bigger than the integer bit width.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 32;
+            descriptor.size.height = 32;
+            descriptor.mipLevelCount = 100;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Non square mip map halves the resolution until a 1x1 dimension
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 32;
+            descriptor.size.height = 8;
+            // Mip maps: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 * 1, 1 * 1
+            descriptor.mipLevelCount = 6;
+
+            device.CreateTexture(&descriptor);
+        }
+
+        // Non square mip map for a 3D textures
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 32;
+            descriptor.size.height = 8;
+            descriptor.size.depthOrArrayLayers = 64;
+            descriptor.dimension = wgpu::TextureDimension::e3D;
+            // Non square mip map halves width, height and depth until a 1x1x1 dimension for a 3D
+            // texture. So there are 7 mipmaps at most: 32 * 8 * 64, 16 * 4 * 32, 8 * 2 * 16,
+            // 4 * 1 * 8, 2 * 1 * 4, 1 * 1 * 2, 1 * 1 * 1.
+            descriptor.mipLevelCount = 7;
+            device.CreateTexture(&descriptor);
+        }
+
+        // Non square mip map for 2D textures with depth > 1
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = 32;
+            descriptor.size.height = 8;
+            descriptor.size.depthOrArrayLayers = 64;
+            // Non square mip map halves width and height until a 1x1 dimension for a 2D texture,
+            // even its depth > 1. So there are 6 mipmaps at most: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 *
+            // 1, 1 * 1.
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.mipLevelCount = 7;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+            descriptor.mipLevelCount = 6;
+            device.CreateTexture(&descriptor);
+        }
+
+        // Mip level equal to the maximum for a 2D texture is allowed
+        {
+            uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = maxTextureDimension2D;
+            descriptor.size.height = maxTextureDimension2D;
+            descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 1u;
+
+            device.CreateTexture(&descriptor);
+        }
+
+        // Mip level exceeding the maximum for a 2D texture not allowed
+        {
+            uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = maxTextureDimension2D;
+            descriptor.size.height = maxTextureDimension2D;
+            descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 2u;
+
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // 1D textures can only have a single mip level.
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.dimension = wgpu::TextureDimension::e1D;
+            descriptor.size.width = 32;
+            descriptor.size.height = 1;
+
+            // Having a single mip level is allowed.
+            descriptor.mipLevelCount = 1;
+            device.CreateTexture(&descriptor);
+
+            // Having more than 1 is an error.
+            descriptor.mipLevelCount = 2;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test the validation of array layer count
+    TEST_F(TextureValidationTest, ArrayLayerCount) {
+        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+        // Array layer count exceeding maxTextureArrayLayers is not allowed for 2D texture
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers + 1u;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Array layer count less than maxTextureArrayLayers is allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers >> 1;
+            device.CreateTexture(&descriptor);
+        }
+
+        // Array layer count equal to maxTextureArrayLayers is allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers;
+            device.CreateTexture(&descriptor);
+        }
+    }
+
+    // Test the validation of 1D texture size
+    TEST_F(TextureValidationTest, 1DTextureSize) {
+        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+        wgpu::TextureDescriptor defaultDescriptor;
+        defaultDescriptor.size = {4, 1, 1};
+        defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
+        defaultDescriptor.usage = wgpu::TextureUsage::CopySrc;
+        defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+
+        // Width must be in [1, kMaxTextureDimension1D]
+        {
+            wgpu::TextureDescriptor desc = defaultDescriptor;
+            desc.size.width = 0;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+            desc.size.width = 1;
+            device.CreateTexture(&desc);
+
+            desc.size.width = supportedLimits.maxTextureDimension1D;
+            device.CreateTexture(&desc);
+            desc.size.width = supportedLimits.maxTextureDimension1D + 1u;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+        }
+
+        // Height must be 1
+        {
+            wgpu::TextureDescriptor desc = defaultDescriptor;
+            desc.size.height = 2;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+
+            desc.size.height = 0;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+        }
+
+        // DepthOrArrayLayers must be 1
+        {
+            wgpu::TextureDescriptor desc = defaultDescriptor;
+            desc.size.depthOrArrayLayers = 2;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+
+            desc.size.depthOrArrayLayers = 0;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+        }
+    }
+
+    // Test the validation of 2D texture size
+    TEST_F(TextureValidationTest, 2DTextureSize) {
+        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+        // Out-of-bound texture dimension is not allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = supportedLimits.maxTextureDimension2D + 1u;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.size.width = 1;
+            descriptor.size.height = supportedLimits.maxTextureDimension2D + 1u;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Zero-sized texture is not allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size = {0, 1, 1};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.size = {1, 0, 1};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.size = {1, 1, 0};
+            // 2D texture with depth=0 is not allowed
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Texture size less than max dimension is allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = supportedLimits.maxTextureDimension2D >> 1;
+            descriptor.size.height = supportedLimits.maxTextureDimension2D >> 1;
+            device.CreateTexture(&descriptor);
+        }
+
+        // Texture size equal to max dimension is allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.size.width = supportedLimits.maxTextureDimension2D;
+            descriptor.size.height = supportedLimits.maxTextureDimension2D;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            device.CreateTexture(&descriptor);
+        }
+    }
+
+    // Test the validation of 3D texture size
+    TEST_F(TextureValidationTest, 3DTextureSize) {
+        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+        // Out-of-bound texture dimension is not allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.dimension = wgpu::TextureDimension::e3D;
+
+            descriptor.size = {supportedLimits.maxTextureDimension3D + 1u, 1, 1};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.size = {1, supportedLimits.maxTextureDimension3D + 1u, 1};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.size = {1, 1, supportedLimits.maxTextureDimension3D + 1u};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Zero-sized texture is not allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.dimension = wgpu::TextureDimension::e3D;
+
+            descriptor.size = {0, 1, 1};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.size = {1, 0, 1};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+            descriptor.size = {1, 1, 0};
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+
+        // Texture size less than max dimension is allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.dimension = wgpu::TextureDimension::e3D;
+
+            descriptor.size = {supportedLimits.maxTextureDimension3D >> 1,
+                               supportedLimits.maxTextureDimension3D >> 1,
+                               supportedLimits.maxTextureDimension3D >> 1};
+            device.CreateTexture(&descriptor);
+        }
+
+        // Texture size equal to max dimension is allowed
+        {
+            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+            descriptor.dimension = wgpu::TextureDimension::e3D;
+
+            descriptor.size = {supportedLimits.maxTextureDimension3D,
+                               supportedLimits.maxTextureDimension3D,
+                               supportedLimits.maxTextureDimension3D};
+            device.CreateTexture(&descriptor);
+        }
+    }
+
+    // Test that depth/stencil formats are invalid for 1D and 3D texture
+    TEST_F(TextureValidationTest, DepthStencilFormatsFor1DAnd3D) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+        wgpu::TextureFormat depthStencilFormats[] = {
+            wgpu::TextureFormat::Stencil8,     wgpu::TextureFormat::Depth16Unorm,
+            wgpu::TextureFormat::Depth24Plus,  wgpu::TextureFormat::Depth24PlusStencil8,
+            wgpu::TextureFormat::Depth32Float,
+        };
+
+        for (wgpu::TextureDimension dimension : kDimensions) {
+            for (wgpu::TextureFormat format : depthStencilFormats) {
+                descriptor.format = format;
+                descriptor.dimension = dimension;
+                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+            }
+        }
+    }
+
+    // Test that it is valid to destroy a texture
+    TEST_F(TextureValidationTest, DestroyTexture) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+        texture.Destroy();
+    }
+
+    // Test that it's valid to destroy a destroyed texture
+    TEST_F(TextureValidationTest, DestroyDestroyedTexture) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+        texture.Destroy();
+        texture.Destroy();
+    }
+
+    // Test that it's invalid to submit a destroyed texture in a queue
+    // in the case of destroy, encode, submit
+    TEST_F(TextureValidationTest, DestroyEncodeSubmit) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+        wgpu::TextureView textureView = texture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({textureView});
+
+        // Destroy the texture
+        texture.Destroy();
+
+        wgpu::CommandEncoder encoder_post_destroy = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder_post_destroy.BeginRenderPass(&renderPass);
+            pass.End();
+        }
+        wgpu::CommandBuffer commands = encoder_post_destroy.Finish();
+
+        // Submit should fail due to destroyed texture
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+
+    // Test that it's invalid to submit a destroyed texture in a queue
+    // in the case of encode, destroy, submit
+    TEST_F(TextureValidationTest, EncodeDestroySubmit) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+        wgpu::TextureView textureView = texture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({textureView});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+        }
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        // Destroy the texture
+        texture.Destroy();
+
+        // Submit should fail due to destroyed texture
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+
+    // Test it is an error to create an RenderAttachment texture with a non-renderable format.
+    TEST_F(TextureValidationTest, NonRenderableAndRenderAttachment) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = {1, 1, 1};
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+        // Succeeds because RGBA8Unorm is renderable
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        device.CreateTexture(&descriptor);
+
+        for (wgpu::TextureFormat format : kNonRenderableColorFormats) {
+            // Fails because `format` is non-renderable
+            descriptor.format = format;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test it is an error to create a Storage texture with any format that doesn't support
+    // TextureUsage::StorageBinding texture usages.
+    TEST_F(TextureValidationTest, TextureFormatNotSupportTextureUsageStorage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = {1, 1, 1};
+        descriptor.usage = wgpu::TextureUsage::StorageBinding;
+
+        for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+            descriptor.format = format;
+            if (utils::TextureFormatSupportsStorageTexture(format)) {
+                device.CreateTexture(&descriptor);
+            } else {
+                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+            }
+        }
+    }
+
+    // Test it is an error to create a texture with format "Undefined".
+    TEST_F(TextureValidationTest, TextureFormatUndefined) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = wgpu::TextureFormat::Undefined;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Test that the creation of a texture with depth24unorm-stencil8 will fail when the feature
+    // Depth24UnormStencil8 is not enabled.
+    TEST_F(TextureValidationTest, UseD24S8FormatWithoutEnablingFeature) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Test that the creation of a texture with depth32float-stencil8 will fail when the feature
+    // Depth32FloatStencil8 is not enabled.
+    TEST_F(TextureValidationTest, UseD32S8FormatWithoutEnablingFeature) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Test that the creation of a texture with BC format will fail when the feature
+    // textureCompressionBC is not enabled.
+    TEST_F(TextureValidationTest, UseBCFormatWithoutEnablingFeature) {
+        for (wgpu::TextureFormat format : utils::kBCFormats) {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test that the creation of a texture with ETC2 format will fail when the feature
+    // textureCompressionETC2 is not enabled.
+    TEST_F(TextureValidationTest, UseETC2FormatWithoutEnablingFeature) {
+        for (wgpu::TextureFormat format : utils::kETC2Formats) {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test that the creation of a texture with ASTC format will fail when the feature
+    // textureCompressionASTC is not enabled.
+    TEST_F(TextureValidationTest, UseASTCFormatWithoutEnablingFeature) {
+        for (wgpu::TextureFormat format : utils::kASTCFormats) {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    class D24S8TextureFormatsValidationTests : public TextureValidationTest {
+      protected:
+        WGPUDevice CreateTestDevice() override {
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
+            descriptor.requiredFeatures = requiredFeatures;
+            descriptor.requiredFeaturesCount = 1;
+            return adapter.CreateDevice(&descriptor);
+        }
+    };
+
+    // Test that depth24unorm-stencil8 format is invalid for 3D texture
+    TEST_F(D24S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+        for (wgpu::TextureDimension dimension : kDimensions) {
+            descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
+            descriptor.dimension = dimension;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    class D32S8TextureFormatsValidationTests : public TextureValidationTest {
+      protected:
+        WGPUDevice CreateTestDevice() override {
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
+            descriptor.requiredFeatures = requiredFeatures;
+            descriptor.requiredFeaturesCount = 1;
+            return adapter.CreateDevice(&descriptor);
+        }
+    };
+
+    // Test that depth32float-stencil8 format is invalid for 3D texture
+    TEST_F(D32S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+        for (wgpu::TextureDimension dimension : kDimensions) {
+            descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
+            descriptor.dimension = dimension;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    class CompressedTextureFormatsValidationTests : public TextureValidationTest {
+      protected:
+        WGPUDevice CreateTestDevice() override {
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
+                                                     wgpu::FeatureName::TextureCompressionETC2,
+                                                     wgpu::FeatureName::TextureCompressionASTC};
+            descriptor.requiredFeatures = requiredFeatures;
+            descriptor.requiredFeaturesCount = 3;
+
+            // TODO(dawn:814): Remove when 1D texture support is complete.
+            const char* kDisallowUnsafeApis = "disallow_unsafe_apis";
+            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+            togglesDesc.forceDisabledToggles = &kDisallowUnsafeApis;
+            togglesDesc.forceDisabledTogglesCount = 1;
+
+            descriptor.nextInChain = &togglesDesc;
+
+            return adapter.CreateDevice(&descriptor);
+        }
+
+        wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
+            wgpu::TextureDescriptor descriptor =
+                TextureValidationTest::CreateDefaultTextureDescriptor();
+            descriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                               wgpu::TextureUsage::TextureBinding;
+            descriptor.size.width = kWidth;
+            descriptor.size.height = kHeight;
+            return descriptor;
+        }
+
+      private:
+        // Choose the LCM of all current compressed texture format texel dimensions as the
+        // dimensions of the default texture.
+        static constexpr uint32_t kWidth = 120;
+        static constexpr uint32_t kHeight = 120;
+    };
+
+    // Test that only CopySrc, CopyDst and Sampled are accepted as usage in compressed formats.
+    TEST_F(CompressedTextureFormatsValidationTests, TextureUsage) {
+        wgpu::TextureUsage invalidUsages[] = {
+            wgpu::TextureUsage::RenderAttachment,
+            wgpu::TextureUsage::StorageBinding,
+            wgpu::TextureUsage::Present,
+        };
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            for (wgpu::TextureUsage usage : invalidUsages) {
+                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+                descriptor.format = format;
+                descriptor.usage = usage;
+                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+            }
+        }
+    }
+
+    // Test that using various MipLevelCount is allowed for compressed formats.
+    TEST_F(CompressedTextureFormatsValidationTests, MipLevelCount) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            for (uint32_t mipLevels : {1, 3, 6}) {
+                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+                descriptor.format = format;
+                descriptor.mipLevelCount = mipLevels;
+                device.CreateTexture(&descriptor);
+            }
+        }
+    }
+
+    // Test that it is invalid to specify SampleCount>1 in compressed formats.
+    TEST_F(CompressedTextureFormatsValidationTests, SampleCount) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.sampleCount = 4;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test that it is allowed to create a 2D texture with depth>1 in compressed formats.
+    TEST_F(CompressedTextureFormatsValidationTests, 2DArrayTexture) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.depthOrArrayLayers = 6;
+            device.CreateTexture(&descriptor);
+        }
+    }
+
+    // Test that it is not allowed to create a 1D texture in compressed formats.
+    TEST_F(CompressedTextureFormatsValidationTests, 1DTexture) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            // Unfortunately we can't use the block height here otherwise validation for the max
+            // texture 1D size will trigger. We check the error message below to make sure the
+            // correct code path is covered.
+            descriptor.size.height = 1;
+            descriptor.size.depthOrArrayLayers = 1;
+            descriptor.dimension = wgpu::TextureDimension::e1D;
+            ASSERT_DEVICE_ERROR(
+                device.CreateTexture(&descriptor),
+                testing::HasSubstr(
+                    "The dimension (TextureDimension::e1D) of a texture with a compressed format"));
+        }
+    }
+
+    // Test that it is not allowed to create a 3D texture in compressed formats.
+    TEST_F(CompressedTextureFormatsValidationTests, 3DTexture) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.depthOrArrayLayers = 4;
+            descriptor.dimension = wgpu::TextureDimension::e3D;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+
+    // Test that it is invalid to use numbers for a texture's width/height that are not multiples
+    // of the compressed block sizes.
+    TEST_F(CompressedTextureFormatsValidationTests, TextureSize) {
+        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+            // Test that the default size (120 x 120) is valid for all formats.
+            {
+                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+                descriptor.format = format;
+                ASSERT_TRUE(descriptor.size.width % blockWidth == 0 &&
+                            descriptor.size.height % blockHeight == 0);
+                device.CreateTexture(&descriptor);
+            }
+
+            // Test that invalid width should cause an error. Note that if the block width of the
+            // compression type is even, we test that alignment to half the width is not sufficient.
+            {
+                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+                descriptor.format = format;
+                descriptor.size.width =
+                    blockWidth % 2 == 0 ? blockWidth - (blockWidth / 2) : blockWidth - 1;
+                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+            }
+
+            // Test that invalid width should cause an error. Note that if the block height of the
+            // compression type is even, we test that alignment to half the height is not
+            // sufficient.
+            {
+                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+                descriptor.format = format;
+                descriptor.size.height =
+                    blockHeight % 2 == 0 ? blockHeight - (blockHeight / 2) : blockHeight - 1;
+                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+            }
+
+            // Test a working dimension based on some constant multipliers to the dimensions.
+            {
+                constexpr uint32_t kWidthMultiplier = 3;
+                constexpr uint32_t kHeightMultiplier = 8;
+                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+                descriptor.format = format;
+                descriptor.size.width = kWidthMultiplier * blockWidth;
+                descriptor.size.height = kHeightMultiplier * blockHeight;
+                device.CreateTexture(&descriptor);
+            }
+        }
+    }
+
+}  // namespace
diff --git a/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp b/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
new file mode 100644
index 0000000..3691726
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
@@ -0,0 +1,1002 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include <array>
+
+namespace {
+
+    class TextureViewValidationTest : public ValidationTest {};
+
+    constexpr uint32_t kWidth = 32u;
+    constexpr uint32_t kHeight = 32u;
+    constexpr uint32_t kDepth = 6u;
+    constexpr uint32_t kDefaultMipLevels = 6u;
+
+    constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::Texture Create2DArrayTexture(wgpu::Device& device,
+                                       uint32_t arrayLayerCount,
+                                       uint32_t width = kWidth,
+                                       uint32_t height = kHeight,
+                                       uint32_t mipLevelCount = kDefaultMipLevels,
+                                       uint32_t sampleCount = 1) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = arrayLayerCount;
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = kDefaultTextureFormat;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = wgpu::TextureUsage::TextureBinding;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::Texture Create3DTexture(wgpu::Device& device) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        descriptor.size = {kWidth, kHeight, kDepth};
+        descriptor.sampleCount = 1;
+        descriptor.format = kDefaultTextureFormat;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        descriptor.usage = wgpu::TextureUsage::TextureBinding;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::Texture Create1DTexture(wgpu::Device& device) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e1D;
+        descriptor.size = {kWidth, 1, 1};
+        descriptor.format = kDefaultTextureFormat;
+        descriptor.usage = wgpu::TextureUsage::TextureBinding;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::Texture CreateDepthStencilTexture(wgpu::Device& device, wgpu::TextureFormat format) {
+        wgpu::TextureDescriptor descriptor = {};
+        descriptor.size = {kWidth, kHeight, kDepth};
+        descriptor.usage =
+            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        descriptor.format = format;
+        return device.CreateTexture(&descriptor);
+    }
+
+    wgpu::TextureViewDescriptor CreateDefaultViewDescriptor(wgpu::TextureViewDimension dimension) {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = kDefaultTextureFormat;
+        descriptor.dimension = dimension;
+        descriptor.baseMipLevel = 0;
+        if (dimension != wgpu::TextureViewDimension::e1D) {
+            descriptor.mipLevelCount = kDefaultMipLevels;
+        }
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = 1;
+        return descriptor;
+    }
+
+    // Test creating texture view on a 2D non-array texture
+    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2D) {
+        wgpu::Texture texture = Create2DArrayTexture(device, 1);
+
+        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+        // It is an error to create a view with zero 'arrayLayerCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.arrayLayerCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a view with zero 'mipLevelCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.mipLevelCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is OK to create a 2D texture view on a 2D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.arrayLayerCount = 1;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to view a layer past the end of the texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.arrayLayerCount = 2;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is OK to create a 1-layer 2D array texture view on a 2D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+            descriptor.arrayLayerCount = 1;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to create a 3D texture view on a 2D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::e3D;
+            descriptor.arrayLayerCount = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
+        // k..end.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
+
+            descriptor.baseMipLevel = 0;
+            texture.CreateView(&descriptor);
+            descriptor.baseMipLevel = 1;
+            texture.CreateView(&descriptor);
+            descriptor.baseMipLevel = kDefaultMipLevels - 1;
+            texture.CreateView(&descriptor);
+            descriptor.baseMipLevel = kDefaultMipLevels;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to make the mip level out of range.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.baseMipLevel = 0;
+            descriptor.mipLevelCount = kDefaultMipLevels + 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseMipLevel = 1;
+            descriptor.mipLevelCount = kDefaultMipLevels;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseMipLevel = kDefaultMipLevels - 1;
+            descriptor.mipLevelCount = 2;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseMipLevel = kDefaultMipLevels;
+            descriptor.mipLevelCount = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+    }
+
+    // Test creating texture view on a 2D array texture
+    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2DArray) {
+        constexpr uint32_t kDefaultArrayLayers = 6;
+
+        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+        wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
+
+        // It is an error to create a view with zero 'arrayLayerCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::e2D;
+            descriptor.arrayLayerCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a view with zero 'mipLevelCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::e2D;
+            descriptor.mipLevelCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is OK to create a 2D texture view on a 2D array texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::e2D;
+            descriptor.arrayLayerCount = 1;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is OK to create a 2D array texture view on a 2D array texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.arrayLayerCount = kDefaultArrayLayers;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to create a 3D texture view on a 2D array texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::e3D;
+            descriptor.arrayLayerCount = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a 1D texture view on a 2D array texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::e1D;
+            descriptor.arrayLayerCount = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
+        // layers k..end.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+
+            descriptor.baseArrayLayer = 0;
+            texture.CreateView(&descriptor);
+            descriptor.baseArrayLayer = 1;
+            texture.CreateView(&descriptor);
+            descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
+            texture.CreateView(&descriptor);
+            descriptor.baseArrayLayer = kDefaultArrayLayers;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error for the array layer range of the view to exceed that of the texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.baseArrayLayer = 0;
+            descriptor.arrayLayerCount = kDefaultArrayLayers + 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseArrayLayer = 1;
+            descriptor.arrayLayerCount = kDefaultArrayLayers;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
+            descriptor.arrayLayerCount = 2;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseArrayLayer = kDefaultArrayLayers;
+            descriptor.arrayLayerCount = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+    }
+
+    // Test creating texture view on a 3D texture
+    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture3D) {
+        wgpu::Texture texture = Create3DTexture(device);
+
+        wgpu::TextureViewDescriptor base3DTextureViewDescriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e3D);
+
+        // It is an error to create a view with zero 'arrayLayerCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            descriptor.arrayLayerCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a view with zero 'mipLevelCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            descriptor.mipLevelCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is OK to create a 3D texture view on a 3D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to create a 1D/2D/2DArray/Cube/CubeArray texture view on a 3D texture.
+        {
+            wgpu::TextureViewDimension invalidDimensions[] = {
+                wgpu::TextureViewDimension::e1D,       wgpu::TextureViewDimension::e2D,
+                wgpu::TextureViewDimension::e2DArray,  wgpu::TextureViewDimension::Cube,
+                wgpu::TextureViewDimension::CubeArray,
+            };
+            for (wgpu::TextureViewDimension dimension : invalidDimensions) {
+                wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+                descriptor.dimension = dimension;
+                if (dimension == wgpu::TextureViewDimension::Cube ||
+                    dimension == wgpu::TextureViewDimension::CubeArray) {
+                    descriptor.arrayLayerCount = 6;
+                }
+                ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            }
+        }
+
+        // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
+        // k..end.
+        {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
+
+            descriptor.baseMipLevel = 0;
+            texture.CreateView(&descriptor);
+            descriptor.baseMipLevel = 1;
+            texture.CreateView(&descriptor);
+            descriptor.baseMipLevel = kDefaultMipLevels - 1;
+            texture.CreateView(&descriptor);
+            descriptor.baseMipLevel = kDefaultMipLevels;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to make the mip level out of range.
+        {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            descriptor.baseMipLevel = 0;
+            descriptor.mipLevelCount = kDefaultMipLevels + 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseMipLevel = 1;
+            descriptor.mipLevelCount = kDefaultMipLevels;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseMipLevel = kDefaultMipLevels - 1;
+            descriptor.mipLevelCount = 2;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseMipLevel = kDefaultMipLevels;
+            descriptor.mipLevelCount = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
+        // layers k..end. But baseArrayLayer must be 0, and arrayLayerCount must be 1 at most for 3D
+        // texture view.
+        {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+            descriptor.baseArrayLayer = 0;
+            texture.CreateView(&descriptor);
+            descriptor.baseArrayLayer = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+
+            descriptor.baseArrayLayer = 0;
+            descriptor.arrayLayerCount = 1;
+            texture.CreateView(&descriptor);
+            descriptor.arrayLayerCount = 2;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.arrayLayerCount = kDepth;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+    }
+
+    // Test creating texture view on a 1D texture
+    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture1D) {
+        wgpu::Texture texture = Create1DTexture(device);
+
+        wgpu::TextureViewDescriptor base1DTextureViewDescriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e1D);
+
+        // It is an error to create a view with zero 'arrayLayerCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+            descriptor.arrayLayerCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a view with zero 'mipLevelCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+            descriptor.mipLevelCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is OK to create a 1D texture view on a 1D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to create a 2D/2DArray/Cube/CubeArray/3D texture view on a 1D texture.
+        {
+            wgpu::TextureViewDimension invalidDimensions[] = {
+                wgpu::TextureViewDimension::e2D,  wgpu::TextureViewDimension::e2DArray,
+                wgpu::TextureViewDimension::Cube, wgpu::TextureViewDimension::CubeArray,
+                wgpu::TextureViewDimension::e3D,
+            };
+            for (wgpu::TextureViewDimension dimension : invalidDimensions) {
+                wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+                descriptor.dimension = dimension;
+                ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            }
+        }
+
+        // No tests for setting mip levels / array layer ranges because 1D textures can only have
+        // a single mip and layer.
+    }
+
+    // Test creating texture view on a multisampled 2D texture
+    TEST_F(TextureViewValidationTest, CreateTextureViewOnMultisampledTexture2D) {
+        wgpu::Texture texture =
+            Create2DArrayTexture(device, /* arrayLayerCount */ 1, kWidth, kHeight,
+                                 /* mipLevelCount */ 1, /* sampleCount */ 4);
+
+        // It is OK to create a 2D texture view on a multisampled 2D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = {};
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to create a 1-layer 2D array texture view on a multisampled 2D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = {};
+            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+            descriptor.arrayLayerCount = 1;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a 1D texture view on a multisampled 2D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = {};
+            descriptor.dimension = wgpu::TextureViewDimension::e1D;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a 3D texture view on a multisampled 2D texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = {};
+            descriptor.dimension = wgpu::TextureViewDimension::e3D;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+    }
+
+    // Using the "none" ("default") values validates the same as explicitly
+    // specifying the values they're supposed to default to.
+    // Variant for a 2D texture with more than 1 array layer.
+    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DArray) {
+        constexpr uint32_t kDefaultArrayLayers = 8;
+        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+        { texture.CreateView(); }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.format = wgpu::TextureFormat::Undefined;
+            texture.CreateView(&descriptor);
+            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            texture.CreateView(&descriptor);
+            descriptor.format = wgpu::TextureFormat::R8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+            texture.CreateView(&descriptor);
+            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+            texture.CreateView(&descriptor);
+            // Setting view dimension to 2D, its arrayLayer will default to 1. And view creation
+            // will success.
+            descriptor.dimension = wgpu::TextureViewDimension::e2D;
+            texture.CreateView(&descriptor);
+            // Setting view dimension to Cube, its arrayLayer will default to 6.
+            descriptor.dimension = wgpu::TextureViewDimension::Cube;
+            texture.CreateView(&descriptor);
+            descriptor.baseArrayLayer = 2;
+            texture.CreateView(&descriptor);
+            descriptor.baseArrayLayer = 3;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            // Setting view dimension to CubeArray, its arrayLayer will default to
+            // size.depthOrArrayLayers (kDefaultArrayLayers) - baseArrayLayer.
+            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+            descriptor.baseArrayLayer = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.baseArrayLayer = 2;
+            texture.CreateView(&descriptor);
+            descriptor.baseArrayLayer = 3;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+
+            // Setting array layers to non-0 means the dimensionality will
+            // default to 2D so by itself it causes an error.
+            descriptor.arrayLayerCount = kDefaultArrayLayers;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+            texture.CreateView(&descriptor);
+
+            descriptor.mipLevelCount = kDefaultMipLevels;
+            texture.CreateView(&descriptor);
+        }
+    }
+
+    // Using the "none" ("default") values validates the same as explicitly
+    // specifying the values they're supposed to default to.
+    // Variant for a 2D texture with only 1 array layer.
+    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DNonArray) {
+        constexpr uint32_t kDefaultArrayLayers = 1;
+        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+        { texture.CreateView(); }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.format = wgpu::TextureFormat::Undefined;
+            texture.CreateView(&descriptor);
+            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            texture.CreateView(&descriptor);
+            descriptor.format = wgpu::TextureFormat::R8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+            texture.CreateView(&descriptor);
+            descriptor.dimension = wgpu::TextureViewDimension::e2D;
+            texture.CreateView(&descriptor);
+            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+            texture.CreateView(&descriptor);
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+            texture.CreateView(&descriptor);
+            descriptor.arrayLayerCount = 1;
+            texture.CreateView(&descriptor);
+            descriptor.arrayLayerCount = 2;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.mipLevelCount = kDefaultMipLevels;
+            texture.CreateView(&descriptor);
+            descriptor.arrayLayerCount = kDefaultArrayLayers;
+            texture.CreateView(&descriptor);
+        }
+    }
+
+    // Using the "none" ("default") values validates the same as explicitly
+    // specifying the values they're supposed to default to.
+    // Variant for a 3D texture.
+    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults3D) {
+        wgpu::Texture texture = Create3DTexture(device);
+
+        { texture.CreateView(); }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.format = wgpu::TextureFormat::Undefined;
+            texture.CreateView(&descriptor);
+            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            texture.CreateView(&descriptor);
+            descriptor.format = wgpu::TextureFormat::R8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+            texture.CreateView(&descriptor);
+            descriptor.dimension = wgpu::TextureViewDimension::e3D;
+            texture.CreateView(&descriptor);
+            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+            descriptor.dimension = wgpu::TextureViewDimension::e2D;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+            texture.CreateView(&descriptor);
+            descriptor.arrayLayerCount = 1;
+            texture.CreateView(&descriptor);
+            descriptor.arrayLayerCount = 2;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+        {
+            wgpu::TextureViewDescriptor descriptor;
+            descriptor.mipLevelCount = kDefaultMipLevels;
+            texture.CreateView(&descriptor);
+            descriptor.arrayLayerCount = kDepth;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+    }
+
+    // Test creating cube map texture view
+    TEST_F(TextureViewValidationTest, CreateCubeMapTextureView) {
+        constexpr uint32_t kDefaultArrayLayers = 16;
+
+        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+        wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
+
+        // It is an error to create a view with zero 'arrayLayerCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Cube;
+            descriptor.arrayLayerCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a view with zero 'mipLevelCount'.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Cube;
+            descriptor.mipLevelCount = 0;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is OK to create a cube map texture view with arrayLayerCount == 6.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Cube;
+            descriptor.arrayLayerCount = 6;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to create a cube map texture view with arrayLayerCount != 6.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Cube;
+            descriptor.arrayLayerCount = 3;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is OK to create a cube map array texture view with arrayLayerCount % 6 == 0.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+            descriptor.arrayLayerCount = 12;
+            texture.CreateView(&descriptor);
+        }
+
+        // It is an error to create a cube map array texture view with arrayLayerCount % 6 != 0.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+            descriptor.arrayLayerCount = 11;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a cube map texture view with width != height.
+        {
+            wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
+
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::Cube;
+            descriptor.arrayLayerCount = 6;
+            ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
+        }
+
+        // It is an error to create a cube map array texture view with width != height.
+        {
+            wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
+
+            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+            descriptor.arrayLayerCount = 12;
+            ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
+        }
+    }
+
+    // Test the format compatibility rules when creating a texture view.
+    TEST_F(TextureViewValidationTest, TextureViewFormatCompatibility) {
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.size.width = 4;
+        textureDesc.size.height = 4;
+        textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+
+        wgpu::TextureViewDescriptor viewDesc = {};
+
+        // It is an error to create an sRGB texture view from an RGB texture, without viewFormats.
+        {
+            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // It is an error to create an RGB texture view from an sRGB texture, without viewFormats.
+        {
+            textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // It is an error to create a texture view with a depth-stencil format of an RGBA texture.
+        {
+            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // It is an error to create a texture view with a depth format of a depth-stencil texture.
+        {
+            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.format = wgpu::TextureFormat::Depth24Plus;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // It is invalid to create a texture view with a combined depth-stencil format if only
+        // the depth aspect is selected.
+        {
+            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // It is invalid to create a texture view with a combined depth-stencil format if only
+        // the stencil aspect is selected.
+        {
+            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // Regression test for crbug.com/1312780.
+        // viewFormat is not supported (Null backend does not support any optional features).
+        {
+            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.format = wgpu::TextureFormat::Depth24UnormStencil8;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc), testing::HasSubstr("Unsupported"));
+        }
+
+        // It is valid to create a texture view with a depth format of a depth-stencil texture
+        // if the depth only aspect is selected.
+        {
+            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewDesc.format = wgpu::TextureFormat::Depth24Plus;
+            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+            texture.CreateView(&viewDesc);
+
+            viewDesc = {};
+        }
+
+        // Prep for testing a single view format in viewFormats.
+        wgpu::TextureFormat viewFormat;
+        textureDesc.viewFormats = &viewFormat;
+        textureDesc.viewFormatCount = 1;
+
+        // An aspect format is not a valid view format of a depth-stencil texture.
+        {
+            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            viewFormat = wgpu::TextureFormat::Depth24Plus;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+        }
+
+        // Test that a RGBA texture can be viewed as both RGBA and RGBASrgb, but not BGRA or
+        // BGRASrgb
+        {
+            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            viewFormat = wgpu::TextureFormat::RGBA8UnormSrgb;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+            texture.CreateView(&viewDesc);
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            texture.CreateView(&viewDesc);
+
+            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // Test that a BGRASrgb texture can be viewed as both BGRA and BGRASrgb, but not RGBA or
+        // RGBASrgb
+        {
+            textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+            viewFormat = wgpu::TextureFormat::BGRA8Unorm;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+            texture.CreateView(&viewDesc);
+
+            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+            texture.CreateView(&viewDesc);
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // Test an RGBA format may be viewed as RGBA (same)
+        {
+            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            viewFormat = wgpu::TextureFormat::RGBA8Unorm;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            texture.CreateView(&viewDesc);
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+
+        // Test that duplicate, and multiple view formats are allowed.
+        {
+            std::array<wgpu::TextureFormat, 5> viewFormats = {
+                wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Unorm,
+                wgpu::TextureFormat::RGBA8Unorm,     wgpu::TextureFormat::RGBA8UnormSrgb,
+                wgpu::TextureFormat::RGBA8Unorm,
+            };
+            textureDesc.viewFormats = viewFormats.data();
+            textureDesc.viewFormatCount = viewFormats.size();
+
+            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+            texture.CreateView(&viewDesc);
+
+            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+            texture.CreateView(&viewDesc);
+
+            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+        }
+    }
+
+    // Test that it's valid to create a texture view from a destroyed texture
+    TEST_F(TextureViewValidationTest, DestroyCreateTextureView) {
+        wgpu::Texture texture = Create2DArrayTexture(device, 1);
+        wgpu::TextureViewDescriptor descriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+        texture.Destroy();
+        texture.CreateView(&descriptor);
+    }
+
+    // Test that the selected TextureAspects must exist in the texture format
+    TEST_F(TextureViewValidationTest, AspectMustExist) {
+        wgpu::TextureDescriptor descriptor = {};
+        descriptor.size = {1, 1, 1};
+        descriptor.usage =
+            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+
+        // Can select: All and DepthOnly from Depth32Float, but not StencilOnly
+        {
+            descriptor.format = wgpu::TextureFormat::Depth32Float;
+            wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+            wgpu::TextureViewDescriptor viewDescriptor = {};
+            viewDescriptor.aspect = wgpu::TextureAspect::All;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+        }
+
+        // Can select: All, DepthOnly, and StencilOnly from Depth24PlusStencil8
+        {
+            descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+            wgpu::TextureViewDescriptor viewDescriptor = {};
+            viewDescriptor.aspect = wgpu::TextureAspect::All;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+            texture.CreateView(&viewDescriptor);
+        }
+
+        // Can select: All from RGBA8Unorm
+        {
+            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+            wgpu::TextureViewDescriptor viewDescriptor = {};
+            viewDescriptor.aspect = wgpu::TextureAspect::All;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+
+            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+        }
+    }
+
+    class D24S8TextureViewValidationTests : public ValidationTest {
+      protected:
+        WGPUDevice CreateTestDevice() override {
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
+            descriptor.requiredFeatures = requiredFeatures;
+            descriptor.requiredFeaturesCount = 1;
+            return adapter.CreateDevice(&descriptor);
+        }
+    };
+
+    // Test that the selected TextureAspects must exist in the Depth24UnormStencil8 texture format
+    TEST_F(D24S8TextureViewValidationTests, AspectMustExist) {
+        wgpu::Texture texture =
+            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+
+        // Can select: All, DepthOnly, and StencilOnly from Depth24UnormStencil8
+        {
+            wgpu::TextureViewDescriptor viewDescriptor = {};
+            viewDescriptor.aspect = wgpu::TextureAspect::All;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+            texture.CreateView(&viewDescriptor);
+        }
+    }
+
+    // Test the format compatibility rules when creating a texture view.
+    TEST_F(D24S8TextureViewValidationTests, TextureViewFormatCompatibility) {
+        wgpu::Texture texture =
+            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+
+        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+        // It is an error to create a texture view in color format on a depth-stencil texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+    }
+
+    class D32S8TextureViewValidationTests : public ValidationTest {
+      protected:
+        WGPUDevice CreateTestDevice() override {
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
+            descriptor.requiredFeatures = requiredFeatures;
+            descriptor.requiredFeaturesCount = 1;
+            return adapter.CreateDevice(&descriptor);
+        }
+    };
+
+    // Test that the selected TextureAspects must exist in the Depth32FloatStencil8 texture format
+    TEST_F(D32S8TextureViewValidationTests, AspectMustExist) {
+        wgpu::Texture texture =
+            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+
+        // Can select: All, DepthOnly, and StencilOnly from Depth32FloatStencil8
+        {
+            wgpu::TextureViewDescriptor viewDescriptor = {};
+            viewDescriptor.aspect = wgpu::TextureAspect::All;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+            texture.CreateView(&viewDescriptor);
+
+            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+            texture.CreateView(&viewDescriptor);
+        }
+    }
+
+    // Test the format compatibility rules when creating a texture view.
+    TEST_F(D32S8TextureViewValidationTests, TextureViewFormatCompatibility) {
+        wgpu::Texture texture =
+            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+
+        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+        // It is an error to create a texture view in color format on a depth-stencil texture.
+        {
+            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        }
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp b/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
new file mode 100644
index 0000000..8189215
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
@@ -0,0 +1,103 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+namespace {
+
+    class ToggleValidationTest : public ValidationTest {};
+
+    // Tests querying the detail of a toggle from dawn::native::InstanceBase works correctly.
+    TEST_F(ToggleValidationTest, QueryToggleInfo) {
+        // Query with a valid toggle name
+        {
+            const char* kValidToggleName = "emulate_store_and_msaa_resolve";
+            const dawn::native::ToggleInfo* toggleInfo = instance->GetToggleInfo(kValidToggleName);
+            ASSERT_NE(nullptr, toggleInfo);
+            ASSERT_NE(nullptr, toggleInfo->name);
+            ASSERT_NE(nullptr, toggleInfo->description);
+            ASSERT_NE(nullptr, toggleInfo->url);
+        }
+
+        // Query with an invalid toggle name
+        {
+            const char* kInvalidToggleName = "!@#$%^&*";
+            const dawn::native::ToggleInfo* toggleInfo =
+                instance->GetToggleInfo(kInvalidToggleName);
+            ASSERT_EQ(nullptr, toggleInfo);
+        }
+    }
+
+    // Tests overriding toggles when creating a device works correctly.
+    TEST_F(ToggleValidationTest, OverrideToggleUsage) {
+        // Create device with a valid name of a toggle
+        {
+            const char* kValidToggleName = "emulate_store_and_msaa_resolve";
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+            descriptor.nextInChain = &togglesDesc;
+            togglesDesc.forceEnabledToggles = &kValidToggleName;
+            togglesDesc.forceEnabledTogglesCount = 1;
+
+            WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
+            std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
+            bool validToggleExists = false;
+            for (const char* toggle : toggleNames) {
+                if (strcmp(toggle, kValidToggleName) == 0) {
+                    validToggleExists = true;
+                }
+            }
+            ASSERT_EQ(validToggleExists, true);
+        }
+
+        // Create device with an invalid toggle name
+        {
+            const char* kInvalidToggleName = "!@#$%^&*";
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+            descriptor.nextInChain = &togglesDesc;
+            togglesDesc.forceEnabledToggles = &kInvalidToggleName;
+            togglesDesc.forceEnabledTogglesCount = 1;
+
+            WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
+            std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
+            bool InvalidToggleExists = false;
+            for (const char* toggle : toggleNames) {
+                if (strcmp(toggle, kInvalidToggleName) == 0) {
+                    InvalidToggleExists = true;
+                }
+            }
+            ASSERT_EQ(InvalidToggleExists, false);
+        }
+    }
+
+    TEST_F(ToggleValidationTest, TurnOffVsyncWithToggle) {
+        const char* kValidToggleName = "turn_off_vsync";
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        descriptor.nextInChain = &togglesDesc;
+        togglesDesc.forceEnabledToggles = &kValidToggleName;
+        togglesDesc.forceEnabledTogglesCount = 1;
+
+        WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
+        std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
+        bool validToggleExists = false;
+        for (const char* toggle : toggleNames) {
+            if (strcmp(toggle, kValidToggleName) == 0) {
+                validToggleExists = true;
+            }
+        }
+        ASSERT_EQ(validToggleExists, true);
+    }
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp b/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
new file mode 100644
index 0000000..371f06c3
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
@@ -0,0 +1,136 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/tests/MockCallback.h"
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    using testing::HasSubstr;
+}  // anonymous namespace
+
+class UnsafeAPIValidationTest : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        descriptor.nextInChain = &togglesDesc;
+        const char* toggle = "disallow_unsafe_apis";
+        togglesDesc.forceEnabledToggles = &toggle;
+        togglesDesc.forceEnabledTogglesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Check that pipeline overridable constants are disallowed as part of unsafe APIs.
+// TODO(dawn:1041) Remove when implementation for all backend is added
+TEST_F(UnsafeAPIValidationTest, PipelineOverridableConstants) {
+    // Create the dummy compute pipeline.
+    wgpu::ComputePipelineDescriptor pipelineDescBase;
+    pipelineDescBase.compute.entryPoint = "main";
+
+    // Control case: shader without overridable constant is allowed.
+    {
+        wgpu::ComputePipelineDescriptor pipelineDesc = pipelineDescBase;
+        pipelineDesc.compute.module =
+            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+
+        device.CreateComputePipeline(&pipelineDesc);
+    }
+
+    // Error case: shader with overridable constant with default value
+    {
+        ASSERT_DEVICE_ERROR(utils::CreateShaderModule(device, R"(
+@id(1000) override c0: u32 = 1u;
+@id(1000) override c1: u32;
+
+@stage(compute) @workgroup_size(1) fn main() {
+    _ = c0;
+    _ = c1;
+})"));
+    }
+
+    // Error case: pipeline stage with constant entry is disallowed
+    {
+        wgpu::ComputePipelineDescriptor pipelineDesc = pipelineDescBase;
+        pipelineDesc.compute.module =
+            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+        std::vector<wgpu::ConstantEntry> constants{{nullptr, "c", 1u}};
+        pipelineDesc.compute.constants = constants.data();
+        pipelineDesc.compute.constantCount = constants.size();
+        ASSERT_DEVICE_ERROR(device.CreateComputePipeline(&pipelineDesc));
+    }
+}
+
+class UnsafeQueryAPIValidationTest : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[2] = {wgpu::FeatureName::PipelineStatisticsQuery,
+                                                 wgpu::FeatureName::TimestampQuery};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 2;
+
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        descriptor.nextInChain = &togglesDesc;
+        const char* toggle = "disallow_unsafe_apis";
+        togglesDesc.forceEnabledToggles = &toggle;
+        togglesDesc.forceEnabledTogglesCount = 1;
+
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Check that pipeline statistics query are disallowed.
+TEST_F(UnsafeQueryAPIValidationTest, PipelineStatisticsDisallowed) {
+    wgpu::QuerySetDescriptor descriptor;
+    descriptor.count = 1;
+
+    // Control case: occlusion query creation is allowed.
+    {
+        descriptor.type = wgpu::QueryType::Occlusion;
+        device.CreateQuerySet(&descriptor);
+    }
+
+    // Error case: pipeline statistics query creation is disallowed.
+    {
+        descriptor.type = wgpu::QueryType::PipelineStatistics;
+        std::vector<wgpu::PipelineStatisticName> pipelineStatistics = {
+            wgpu::PipelineStatisticName::VertexShaderInvocations};
+        descriptor.pipelineStatistics = pipelineStatistics.data();
+        descriptor.pipelineStatisticsCount = pipelineStatistics.size();
+        ASSERT_DEVICE_ERROR(device.CreateQuerySet(&descriptor));
+    }
+}
+
+// Check timestamp queries are disallowed.
+TEST_F(UnsafeQueryAPIValidationTest, TimestampQueryDisallowed) {
+    wgpu::QuerySetDescriptor descriptor;
+    descriptor.count = 1;
+
+    // Control case: occlusion query creation is allowed.
+    {
+        descriptor.type = wgpu::QueryType::Occlusion;
+        device.CreateQuerySet(&descriptor);
+    }
+
+    // Error case: timestamp query creation is disallowed.
+    {
+        descriptor.type = wgpu::QueryType::Timestamp;
+        ASSERT_DEVICE_ERROR(device.CreateQuerySet(&descriptor));
+    }
+}
diff --git a/src/dawn/tests/unittests/validation/ValidationTest.cpp b/src/dawn/tests/unittests/validation/ValidationTest.cpp
new file mode 100644
index 0000000..716b8d1
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ValidationTest.cpp
@@ -0,0 +1,278 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/NullBackend.h"
+#include "dawn/tests/ToggleParser.h"
+#include "dawn/utils/WireHelper.h"
+#include "dawn/webgpu.h"
+
+#include <algorithm>
+
+namespace {
+
+    bool gUseWire = false;
+    std::string gWireTraceDir = "";
+    std::unique_ptr<ToggleParser> gToggleParser = nullptr;
+
+}  // namespace
+
+void InitDawnValidationTestEnvironment(int argc, char** argv) {
+    gToggleParser = std::make_unique<ToggleParser>();
+
+    for (int i = 1; i < argc; ++i) {
+        if (strcmp("-w", argv[i]) == 0 || strcmp("--use-wire", argv[i]) == 0) {
+            gUseWire = true;
+            continue;
+        }
+
+        constexpr const char kWireTraceDirArg[] = "--wire-trace-dir=";
+        size_t argLen = sizeof(kWireTraceDirArg) - 1;
+        if (strncmp(argv[i], kWireTraceDirArg, argLen) == 0) {
+            gWireTraceDir = argv[i] + argLen;
+            continue;
+        }
+
+        if (gToggleParser->ParseEnabledToggles(argv[i])) {
+            continue;
+        }
+
+        if (gToggleParser->ParseDisabledToggles(argv[i])) {
+            continue;
+        }
+
+        if (strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+            dawn::InfoLog()
+                << "\n\nUsage: " << argv[0]
+                << " [GTEST_FLAGS...] [-w]\n"
+                   "    [--enable-toggles=toggles] [--disable-toggles=toggles]\n"
+                   "  -w, --use-wire: Run the tests through the wire (defaults to no wire)\n"
+                   "  --enable-toggles: Comma-delimited list of Dawn toggles to enable.\n"
+                   "    ex.) skip_validation,disable_robustness,turn_off_vsync\n"
+                   "  --disable-toggles: Comma-delimited list of Dawn toggles to disable\n";
+            continue;
+        }
+
+        // Skip over args that look like they're for Googletest.
+        constexpr const char kGtestArgPrefix[] = "--gtest_";
+        if (strncmp(kGtestArgPrefix, argv[i], sizeof(kGtestArgPrefix) - 1) == 0) {
+            continue;
+        }
+
+        dawn::WarningLog() << " Unused argument: " << argv[i];
+    }
+}
+
+ValidationTest::ValidationTest()
+    : mWireHelper(utils::CreateWireHelper(gUseWire, gWireTraceDir.c_str())) {
+}
+
+void ValidationTest::SetUp() {
+    instance = std::make_unique<dawn::native::Instance>();
+    instance->DiscoverDefaultAdapters();
+
+    std::vector<dawn::native::Adapter> adapters = instance->GetAdapters();
+
+    // Validation tests run against the null backend, find the corresponding adapter
+    bool foundNullAdapter = false;
+    for (auto& currentAdapter : adapters) {
+        wgpu::AdapterProperties adapterProperties;
+        currentAdapter.GetProperties(&adapterProperties);
+
+        if (adapterProperties.backendType == wgpu::BackendType::Null) {
+            adapter = currentAdapter;
+            foundNullAdapter = true;
+            break;
+        }
+    }
+
+    ASSERT(foundNullAdapter);
+
+    std::tie(device, backendDevice) = mWireHelper->RegisterDevice(CreateTestDevice());
+    device.SetUncapturedErrorCallback(ValidationTest::OnDeviceError, this);
+    device.SetDeviceLostCallback(ValidationTest::OnDeviceLost, this);
+
+    std::string traceName =
+        std::string(::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name()) +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name();
+    mWireHelper->BeginWireTrace(traceName.c_str());
+}
+
+ValidationTest::~ValidationTest() {
+    // We need to destroy Dawn objects before setting the procs to null otherwise the dawn*Release
+    // will call a nullptr
+    device = wgpu::Device();
+    mWireHelper.reset();
+}
+
+void ValidationTest::TearDown() {
+    FlushWire();
+    ASSERT_FALSE(mExpectError);
+
+    if (device) {
+        EXPECT_EQ(mLastWarningCount,
+                  dawn::native::GetDeprecationWarningCountForTesting(backendDevice));
+    }
+
+    // The device will be destroyed soon after, so we want to set the expectation.
+    ExpectDeviceDestruction();
+}
+
+void ValidationTest::StartExpectDeviceError(testing::Matcher<std::string> errorMatcher) {
+    mExpectError = true;
+    mError = false;
+    mErrorMatcher = errorMatcher;
+}
+
+void ValidationTest::StartExpectDeviceError() {
+    StartExpectDeviceError(testing::_);
+}
+
+bool ValidationTest::EndExpectDeviceError() {
+    mExpectError = false;
+    mErrorMatcher = testing::_;
+    return mError;
+}
+std::string ValidationTest::GetLastDeviceErrorMessage() const {
+    return mDeviceErrorMessage;
+}
+
+void ValidationTest::ExpectDeviceDestruction() {
+    mExpectDestruction = true;
+}
+
+wgpu::Device ValidationTest::RegisterDevice(WGPUDevice backendDevice) {
+    return mWireHelper->RegisterDevice(backendDevice).first;
+}
+
+bool ValidationTest::UsesWire() const {
+    return gUseWire;
+}
+
+void ValidationTest::FlushWire() {
+    EXPECT_TRUE(mWireHelper->FlushClient());
+    EXPECT_TRUE(mWireHelper->FlushServer());
+}
+
+void ValidationTest::WaitForAllOperations(const wgpu::Device& device) {
+    bool done = false;
+    device.GetQueue().OnSubmittedWorkDone(
+        0u, [](WGPUQueueWorkDoneStatus, void* userdata) { *static_cast<bool*>(userdata) = true; },
+        &done);
+
+    // Force the currently submitted operations to completed.
+    while (!done) {
+        device.Tick();
+        FlushWire();
+    }
+
+    // TODO(cwallez@chromium.org): It's not clear why we need this additional tick. Investigate it
+    // once WebGPU has defined the ordering of callbacks firing.
+    device.Tick();
+    FlushWire();
+}
+
+bool ValidationTest::HasToggleEnabled(const char* toggle) const {
+    auto toggles = dawn::native::GetTogglesUsed(backendDevice);
+    return std::find_if(toggles.begin(), toggles.end(), [toggle](const char* name) {
+               return strcmp(toggle, name) == 0;
+           }) != toggles.end();
+}
+
+wgpu::SupportedLimits ValidationTest::GetSupportedLimits() {
+    WGPUSupportedLimits supportedLimits;
+    supportedLimits.nextInChain = nullptr;
+    dawn::native::GetProcs().deviceGetLimits(backendDevice, &supportedLimits);
+    return *reinterpret_cast<wgpu::SupportedLimits*>(&supportedLimits);
+}
+
+WGPUDevice ValidationTest::CreateTestDevice() {
+    // Disabled disallowing unsafe APIs so we can test them.
+    std::vector<const char*> forceEnabledToggles;
+    std::vector<const char*> forceDisabledToggles = {"disallow_unsafe_apis"};
+
+    for (const std::string& toggle : gToggleParser->GetEnabledToggles()) {
+        forceEnabledToggles.push_back(toggle.c_str());
+    }
+
+    for (const std::string& toggle : gToggleParser->GetDisabledToggles()) {
+        forceDisabledToggles.push_back(toggle.c_str());
+    }
+
+    wgpu::DeviceDescriptor deviceDescriptor;
+    wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+    deviceDescriptor.nextInChain = &togglesDesc;
+
+    togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
+    togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
+    togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
+    togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
+
+    return adapter.CreateDevice(&deviceDescriptor);
+}
+
+// static
+void ValidationTest::OnDeviceError(WGPUErrorType type, const char* message, void* userdata) {
+    ASSERT(type != WGPUErrorType_NoError);
+    auto self = static_cast<ValidationTest*>(userdata);
+    self->mDeviceErrorMessage = message;
+
+    ASSERT_TRUE(self->mExpectError) << "Got unexpected device error: " << message;
+    ASSERT_FALSE(self->mError) << "Got two errors in expect block";
+    if (self->mExpectError) {
+        ASSERT_THAT(message, self->mErrorMatcher);
+    }
+    self->mError = true;
+}
+
+void ValidationTest::OnDeviceLost(WGPUDeviceLostReason reason,
+                                  const char* message,
+                                  void* userdata) {
+    auto self = static_cast<ValidationTest*>(userdata);
+    if (self->mExpectDestruction) {
+        EXPECT_EQ(reason, WGPUDeviceLostReason_Destroyed);
+        return;
+    }
+    ADD_FAILURE() << "Device lost during test: " << message;
+    ASSERT(false);
+}
+
+ValidationTest::DummyRenderPass::DummyRenderPass(const wgpu::Device& device)
+    : attachmentFormat(wgpu::TextureFormat::RGBA8Unorm), width(400), height(400) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = attachmentFormat;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    attachment = device.CreateTexture(&descriptor);
+
+    wgpu::TextureView view = attachment.CreateView();
+    mColorAttachment.view = view;
+    mColorAttachment.resolveTarget = nullptr;
+    mColorAttachment.clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
+    mColorAttachment.loadOp = wgpu::LoadOp::Clear;
+    mColorAttachment.storeOp = wgpu::StoreOp::Store;
+
+    colorAttachmentCount = 1;
+    colorAttachments = &mColorAttachment;
+    depthStencilAttachment = nullptr;
+}
diff --git a/src/dawn/tests/unittests/validation/ValidationTest.h b/src/dawn/tests/unittests/validation/ValidationTest.h
new file mode 100644
index 0000000..989de41
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/ValidationTest.h
@@ -0,0 +1,159 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_UNITTESTS_VALIDATIONTEST_H_
+#define TESTS_UNITTESTS_VALIDATIONTEST_H_
+
+#include "dawn/common/Log.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+// Argument helpers to allow macro overriding.
+#define UNIMPLEMENTED_MACRO(...) UNREACHABLE()
+#define GET_3RD_ARG_HELPER_(_1, _2, NAME, ...) NAME
+#define GET_3RD_ARG_(args) GET_3RD_ARG_HELPER_ args
+
+// Overloaded to allow further validation of the error messages given an error is expected.
+// Especially useful to verify that the expected errors are occuring, not just any error.
+//
+// Example usages:
+//   1 Argument Case:
+//     ASSERT_DEVICE_ERROR(FunctionThatExpectsError());
+//
+//   2 Argument Case:
+//     ASSERT_DEVICE_ERROR(FunctionThatHasLongError(), HasSubstr("partial match"))
+//     ASSERT_DEVICE_ERROR(FunctionThatHasShortError(), Eq("exact match"));
+#define ASSERT_DEVICE_ERROR(...)                                                         \
+    GET_3RD_ARG_((__VA_ARGS__, ASSERT_DEVICE_ERROR_IMPL_2_, ASSERT_DEVICE_ERROR_IMPL_1_, \
+                  UNIMPLEMENTED_MACRO))                                                  \
+    (__VA_ARGS__)
+
+#define ASSERT_DEVICE_ERROR_IMPL_1_(statement)                  \
+    StartExpectDeviceError();                                   \
+    statement;                                                  \
+    FlushWire();                                                \
+    if (!EndExpectDeviceError()) {                              \
+        FAIL() << "Expected device error in:\n " << #statement; \
+    }                                                           \
+    do {                                                        \
+    } while (0)
+
+#define ASSERT_DEVICE_ERROR_IMPL_2_(statement, matcher)         \
+    StartExpectDeviceError(matcher);                            \
+    statement;                                                  \
+    FlushWire();                                                \
+    if (!EndExpectDeviceError()) {                              \
+        FAIL() << "Expected device error in:\n " << #statement; \
+    }                                                           \
+    do {                                                        \
+    } while (0)
+
+// Skip a test when the given condition is satisfied.
+#define DAWN_SKIP_TEST_IF(condition)                            \
+    do {                                                        \
+        if (condition) {                                        \
+            dawn::InfoLog() << "Test skipped: " #condition "."; \
+            GTEST_SKIP();                                       \
+            return;                                             \
+        }                                                       \
+    } while (0)
+
+#define EXPECT_DEPRECATION_WARNINGS(statement, n)                                                  \
+    do {                                                                                           \
+        FlushWire();                                                                               \
+        size_t warningsBefore = dawn::native::GetDeprecationWarningCountForTesting(backendDevice); \
+        EXPECT_EQ(mLastWarningCount, warningsBefore);                                              \
+        statement;                                                                                 \
+        FlushWire();                                                                               \
+        size_t warningsAfter = dawn::native::GetDeprecationWarningCountForTesting(backendDevice);  \
+        EXPECT_EQ(warningsAfter, warningsBefore + n);                                              \
+        mLastWarningCount = warningsAfter;                                                         \
+    } while (0)
+#define EXPECT_DEPRECATION_WARNING(statement) EXPECT_DEPRECATION_WARNINGS(statement, 1)
+
+namespace utils {
+    class WireHelper;
+}  // namespace utils
+
+void InitDawnValidationTestEnvironment(int argc, char** argv);
+
+class ValidationTest : public testing::Test {
+  public:
+    ValidationTest();
+    ~ValidationTest() override;
+
+    void SetUp() override;
+    void TearDown() override;
+
+    void StartExpectDeviceError(testing::Matcher<std::string> errorMatcher);
+    void StartExpectDeviceError();
+    bool EndExpectDeviceError();
+    std::string GetLastDeviceErrorMessage() const;
+
+    void ExpectDeviceDestruction();
+
+    wgpu::Device RegisterDevice(WGPUDevice backendDevice);
+
+    bool UsesWire() const;
+
+    void FlushWire();
+    void WaitForAllOperations(const wgpu::Device& device);
+
+    // Helper functions to create objects to test validation.
+
+    struct DummyRenderPass : public wgpu::RenderPassDescriptor {
+      public:
+        DummyRenderPass(const wgpu::Device& device);
+        wgpu::Texture attachment;
+        wgpu::TextureFormat attachmentFormat;
+        uint32_t width;
+        uint32_t height;
+
+      private:
+        wgpu::RenderPassColorAttachment mColorAttachment;
+    };
+
+    bool HasToggleEnabled(const char* toggle) const;
+
+    // TODO(crbug.com/dawn/689): Use limits returned from the wire
+    // This is implemented here because tests need to always query
+    // the |backendDevice| since limits are not implemented in the wire.
+    wgpu::SupportedLimits GetSupportedLimits();
+
+  protected:
+    virtual WGPUDevice CreateTestDevice();
+
+    std::unique_ptr<dawn::native::Instance> instance;
+    dawn::native::Adapter adapter;
+    wgpu::Device device;
+    WGPUDevice backendDevice;
+
+    size_t mLastWarningCount = 0;
+
+  private:
+    std::unique_ptr<utils::WireHelper> mWireHelper;
+
+    static void OnDeviceError(WGPUErrorType type, const char* message, void* userdata);
+    static void OnDeviceLost(WGPUDeviceLostReason reason, const char* message, void* userdata);
+    std::string mDeviceErrorMessage;
+    bool mExpectError = false;
+    bool mError = false;
+    testing::Matcher<std::string> mErrorMatcher;
+    bool mExpectDestruction = false;
+};
+
+#endif  // TESTS_UNITTESTS_VALIDATIONTEST_H_
diff --git a/src/dawn/tests/unittests/validation/VertexBufferValidationTests.cpp b/src/dawn/tests/unittests/validation/VertexBufferValidationTests.cpp
new file mode 100644
index 0000000..d26b5d5
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/VertexBufferValidationTests.cpp
@@ -0,0 +1,846 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class VertexBufferValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
+
+        // dummy vertex shader module
+        vsModule = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+            })");
+        fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+            })");
+    }
+
+    wgpu::Buffer MakeVertexBuffer() {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 256;
+        descriptor.usage = wgpu::BufferUsage::Vertex;
+
+        return device.CreateBuffer(&descriptor);
+    }
+
+    wgpu::ShaderModule MakeVertexShader(unsigned int bufferCount) {
+        std::ostringstream vs;
+        vs << "@stage(vertex) fn main(\n";
+        for (unsigned int i = 0; i < bufferCount; ++i) {
+            // TODO(cwallez@chromium.org): remove this special handling of 0 once Tint supports
+            // trailing commas in argument lists.
+            if (i != 0) {
+                vs << ", ";
+            }
+            vs << "@location(" << i << ") a_position" << i << " : vec3<f32>\n";
+        }
+        vs << ") -> @builtin(position) vec4<f32> {";
+
+        vs << "return vec4<f32>(";
+        for (unsigned int i = 0; i < bufferCount; ++i) {
+            vs << "a_position" << i;
+            if (i != bufferCount - 1) {
+                vs << " + ";
+            }
+        }
+        vs << ", 1.0);";
+
+        vs << "}\n";
+
+        return utils::CreateShaderModule(device, vs.str().c_str());
+    }
+
+    wgpu::RenderPipeline MakeRenderPipeline(const wgpu::ShaderModule& vsModule,
+                                            const utils::ComboVertexState& state) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        descriptor.vertex.bufferCount = state.vertexBufferCount;
+        descriptor.vertex.buffers = &state.cVertexBuffers[0];
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+
+    wgpu::RenderPipeline MakeRenderPipeline(const wgpu::ShaderModule& vsModule,
+                                            unsigned int bufferCount) {
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.cFragment.module = fsModule;
+
+        for (unsigned int i = 0; i < bufferCount; ++i) {
+            descriptor.cBuffers[i].attributeCount = 1;
+            descriptor.cBuffers[i].attributes = &descriptor.cAttributes[i];
+            descriptor.cAttributes[i].shaderLocation = i;
+            descriptor.cAttributes[i].format = wgpu::VertexFormat::Float32x3;
+        }
+        descriptor.vertex.bufferCount = bufferCount;
+
+        return device.CreateRenderPipeline(&descriptor);
+    }
+
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+};
+
+// Check that vertex buffers still count as bound if we switch the pipeline.
+TEST_F(VertexBufferValidationTest, VertexBuffersInheritedBetweenPipelines) {
+    DummyRenderPass renderPass(device);
+    wgpu::ShaderModule vsModule2 = MakeVertexShader(2);
+    wgpu::ShaderModule vsModule1 = MakeVertexShader(1);
+
+    wgpu::RenderPipeline pipeline2 = MakeRenderPipeline(vsModule2, 2);
+    wgpu::RenderPipeline pipeline1 = MakeRenderPipeline(vsModule1, 1);
+
+    wgpu::Buffer vertexBuffer1 = MakeVertexBuffer();
+    wgpu::Buffer vertexBuffer2 = MakeVertexBuffer();
+
+    // Check failure when vertex buffer is not set
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.Draw(3);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    // Check success when vertex buffer is inherited from previous pipeline
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer1);
+        pass.SetVertexBuffer(1, vertexBuffer2);
+        pass.Draw(3);
+        pass.SetPipeline(pipeline1);
+        pass.Draw(3);
+        pass.End();
+    }
+    encoder.Finish();
+}
+
+// Check that vertex buffers that are set are reset between render passes.
+TEST_F(VertexBufferValidationTest, VertexBuffersNotInheritedBetweenRenderPasses) {
+    DummyRenderPass renderPass(device);
+    wgpu::ShaderModule vsModule2 = MakeVertexShader(2);
+    wgpu::ShaderModule vsModule1 = MakeVertexShader(1);
+
+    wgpu::RenderPipeline pipeline2 = MakeRenderPipeline(vsModule2, 2);
+    wgpu::RenderPipeline pipeline1 = MakeRenderPipeline(vsModule1, 1);
+
+    wgpu::Buffer vertexBuffer1 = MakeVertexBuffer();
+    wgpu::Buffer vertexBuffer2 = MakeVertexBuffer();
+
+    // Check success when vertex buffer is set for each render pass
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer1);
+        pass.SetVertexBuffer(1, vertexBuffer2);
+        pass.Draw(3);
+        pass.End();
+    }
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer1);
+        pass.Draw(3);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Check failure because vertex buffer is not inherited in second subpass
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer1);
+        pass.SetVertexBuffer(1, vertexBuffer2);
+        pass.Draw(3);
+        pass.End();
+    }
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.Draw(3);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Check validation of the vertex buffer slot for OOB.
+TEST_F(VertexBufferValidationTest, VertexBufferSlotValidation) {
+    wgpu::Buffer buffer = MakeVertexBuffer();
+
+    DummyRenderPass renderPass(device);
+
+    // Control case: using the last vertex buffer slot in render passes is ok.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(kMaxVertexBuffers - 1, buffer, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case: using past the last vertex buffer slot in render pass fails.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(kMaxVertexBuffers, buffer, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Control case: using the last vertex buffer slot in render bundles is ok.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetVertexBuffer(kMaxVertexBuffers - 1, buffer, 0);
+        encoder.Finish();
+    }
+
+    // Error case: using past the last vertex buffer slot in render bundle fails.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetVertexBuffer(kMaxVertexBuffers, buffer, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that for OOB validation of vertex buffer offset and size.
+TEST_F(VertexBufferValidationTest, VertexBufferOffsetOOBValidation) {
+    wgpu::Buffer buffer = MakeVertexBuffer();
+
+    DummyRenderPass renderPass(device);
+    // Control case, using the full buffer, with or without an explicit size is valid.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        // Explicit size
+        pass.SetVertexBuffer(0, buffer, 0, 256);
+        // Implicit size
+        pass.SetVertexBuffer(0, buffer, 0, wgpu::kWholeSize);
+        pass.SetVertexBuffer(0, buffer, 256 - 4, wgpu::kWholeSize);
+        pass.SetVertexBuffer(0, buffer, 4, wgpu::kWholeSize);
+        // Implicit size of zero
+        pass.SetVertexBuffer(0, buffer, 256, wgpu::kWholeSize);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Bad case, offset + size is larger than the buffer
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(0, buffer, 4, 256);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Bad case, size is 0 but the offset is larger than the buffer
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(0, buffer, 256 + 4, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Control case, using the full buffer, with or without an explicit size is valid.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        // Explicit size
+        encoder.SetVertexBuffer(0, buffer, 0, 256);
+        // Implicit size
+        encoder.SetVertexBuffer(0, buffer, 0, wgpu::kWholeSize);
+        encoder.SetVertexBuffer(0, buffer, 256 - 4, wgpu::kWholeSize);
+        encoder.SetVertexBuffer(0, buffer, 4, wgpu::kWholeSize);
+        // Implicit size of zero
+        encoder.SetVertexBuffer(0, buffer, 256, wgpu::kWholeSize);
+        encoder.Finish();
+    }
+
+    // Bad case, offset + size is larger than the buffer
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetVertexBuffer(0, buffer, 4, 256);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Bad case, size is 0 but the offset is larger than the buffer
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetVertexBuffer(0, buffer, 256 + 4, 0);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Check that the vertex buffer must have the Vertex usage.
+TEST_F(VertexBufferValidationTest, InvalidUsage) {
+    wgpu::Buffer vertexBuffer = MakeVertexBuffer();
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 0, 0});
+
+    DummyRenderPass renderPass(device);
+    // Control case: using the vertex buffer is valid.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.End();
+        encoder.Finish();
+    }
+    // Error case: using the index buffer is an error.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(0, indexBuffer);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    utils::ComboRenderBundleEncoderDescriptor renderBundleDesc = {};
+    renderBundleDesc.colorFormatsCount = 1;
+    renderBundleDesc.cColorFormats[0] = wgpu::TextureFormat::RGBA8Unorm;
+    // Control case: using the vertex buffer is valid.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetVertexBuffer(0, vertexBuffer);
+        encoder.Finish();
+    }
+    // Error case: using the index buffer is an error.
+    {
+        wgpu::RenderBundleEncoder encoder = device.CreateRenderBundleEncoder(&renderBundleDesc);
+        encoder.SetVertexBuffer(0, indexBuffer);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Check the alignment constraint on the index buffer offset.
+TEST_F(VertexBufferValidationTest, OffsetAlignment) {
+    wgpu::Buffer vertexBuffer = MakeVertexBuffer();
+
+    DummyRenderPass renderPass(device);
+    // Control cases: vertex buffer offset is a multiple of 4
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(0, vertexBuffer, 0);
+        pass.SetVertexBuffer(0, vertexBuffer, 4);
+        pass.SetVertexBuffer(0, vertexBuffer, 12);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Error case: vertex buffer offset isn't a multiple of 4
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetVertexBuffer(0, vertexBuffer, 2);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Check vertex buffer stride requirements for draw command.
+TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertex) {
+    DummyRenderPass renderPass(device);
+
+    // Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
+    // The last element doesn't have the full stride size
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 28;
+    descriptor.usage = wgpu::BufferUsage::Vertex;
+    wgpu::Buffer vertexBuffer = device.CreateBuffer(&descriptor);
+
+    // Vertex attribute offset is 0
+    wgpu::RenderPipeline pipeline1;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 8;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Vertex;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].offset = 0;
+
+        pipeline1 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // Vertex attribute offset is 4
+    wgpu::RenderPipeline pipeline2;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 8;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Vertex;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].offset = 4;
+
+        pipeline2 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // Control case: draw 3 elements, 3 * 8 = 24 <= 28, is valid anyway
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 3 elements with firstVertex == 1, (2 + 1) * 8 + 4 = 28 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3, 0, 1, 0);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 3 elements with offset == 4, 4 + 3 * 8 = 24 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 4 elements, 4 * 8 = 32 > 28
+    // But the last element does not require to have the full stride size
+    // So 3 * 8 + 4 = 28 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(4);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Invalid: draw 4 elements with firstVertex == 1
+    // It requires a buffer with size of (3 + 1) * 8 + 4 = 36 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(4, 0, 1, 0);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    // Invalid: draw 4 elements with offset == 4
+    // It requires a buffer with size of 4 + 3 * 8 + 4 = 32 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(4);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    // Valid: stride count == 0
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(0);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Invalid: stride count == 4
+    // It requires a buffer with size of 4 + 3 * 8 + 4 = 32 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(0, 0, 4);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Check instance buffer stride requirements with instanced attributes for draw command.
+TEST_F(VertexBufferValidationTest, DrawStrideLimitsInstance) {
+    DummyRenderPass renderPass(device);
+
+    // Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
+    // The last element doesn't have the full stride size
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 28;
+    descriptor.usage = wgpu::BufferUsage::Vertex;
+    wgpu::Buffer vertexBuffer = device.CreateBuffer(&descriptor);
+
+    // Vertex attribute offset is 0
+    wgpu::RenderPipeline pipeline1;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 8;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Instance;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].offset = 0;
+
+        pipeline1 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // Vertex attribute offset is 4
+    wgpu::RenderPipeline pipeline2;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 8;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Instance;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].offset = 4;
+
+        pipeline2 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // Control case: draw 3 instances, 3 * 8 = 24 <= 28, is valid anyway
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 3);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 3 instances with firstInstance == 1, (2 + 1) * 8 + 4 = 28 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 3, 0, 1);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 3 instances with offset == 4, 4 + 3 * 8 = 24 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 3);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 4 instances, 4 * 8 = 32 > 28
+    // But the last element does not require to have the full stride size
+    // So 3 * 8 + 4 = 28 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 4);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Invalid: draw 4 instances with firstInstance == 1
+    // It requires a buffer with size of (3 + 1) * 8 + 4 = 36 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 4, 0, 1);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    // Invalid: draw 4 instances with offset == 4
+    // It requires a buffer with size of 4 + 3 * 8 + 4 = 32 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 4);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    // Valid: stride count == 0
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 0);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Invalid, stride count == 4
+    // It requires a buffer with size of 4 + 3 * 8 + 4 = 32 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(1, 0, 0, 4);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Check vertex buffer stride requirements with instanced attributes for draw indexed command.
+TEST_F(VertexBufferValidationTest, DrawIndexedStrideLimitsInstance) {
+    DummyRenderPass renderPass(device);
+
+    // Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
+    // The last element doesn't have the full stride size
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 28;
+    descriptor.usage = wgpu::BufferUsage::Vertex;
+    wgpu::Buffer vertexBuffer = device.CreateBuffer(&descriptor);
+
+    wgpu::Buffer indexBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
+
+    // Vertex attribute offset is 0
+    wgpu::RenderPipeline pipeline1;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 8;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Instance;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].offset = 0;
+
+        pipeline1 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // Vertex attribute offset is 4
+    wgpu::RenderPipeline pipeline2;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 8;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Instance;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].offset = 4;
+
+        pipeline2 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // Control case: draw 3 instances, 3 * 8 = 24 <= 28, is valid anyway
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.DrawIndexed(3, 3);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 3 instances with firstInstance == 1, (2 + 1) * 8 + 4 = 28 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3, 3, 0, 1);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 3 instances with offset == 4, 4 + 3 * 8 = 24 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3, 3);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Valid: draw 4 instances, 4 * 8 = 32 > 28
+    // But the last element does not require to have the full stride size
+    // So 3 * 8 + 4 = 28 <= 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3, 4);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Invalid: draw 4 instances with firstInstance == 1
+    // It requires a buffer with size of (3 + 1) * 8 + 4 = 36 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3, 4, 0, 1);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    // Invalid: draw 4 instances with offset == 4
+    // It requires a buffer with size of 4 + 3 * 8 + 4 = 32 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3, 4);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+
+    // Valid: stride count == 0
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(3, 0);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Invalid, stride count == 4
+    // It requires a buffer with size of 4 + 3 * 8 + 4 = 32 > 28
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.SetIndexBuffer(indexBuffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3, 0, 0, 4);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Check last stride is computed correctly for vertex buffer with multiple attributes.
+TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertexMultipleAttributes) {
+    DummyRenderPass renderPass(device);
+
+    // Create a buffer of size 44, array stride size = 12
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 44;
+    descriptor.usage = wgpu::BufferUsage::Vertex;
+    wgpu::Buffer vertexBuffer = device.CreateBuffer(&descriptor);
+
+    // lastStride = attribute[1].offset + sizeof(attribute[1].format) = 8
+    wgpu::RenderPipeline pipeline1;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 12;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Vertex;
+        state.cVertexBuffers[0].attributeCount = 2;
+        state.cAttributes[0].format = wgpu::VertexFormat::Float32;
+        state.cAttributes[0].offset = 0;
+        state.cAttributes[0].shaderLocation = 0;
+        state.cAttributes[1].format = wgpu::VertexFormat::Float32;
+        state.cAttributes[1].offset = 4;
+        state.cAttributes[1].shaderLocation = 1;
+
+        pipeline1 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // lastStride = attribute[1].offset + sizeof(attribute[1].format) = 12
+    wgpu::RenderPipeline pipeline2;
+    {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 12;
+        state.cVertexBuffers[0].stepMode = wgpu::VertexStepMode::Vertex;
+        state.cVertexBuffers[0].attributeCount = 2;
+        state.cAttributes[0].format = wgpu::VertexFormat::Float32;
+        state.cAttributes[0].offset = 0;
+        state.cAttributes[0].shaderLocation = 0;
+        state.cAttributes[1].format = wgpu::VertexFormat::Float32x2;
+        state.cAttributes[1].offset = 4;
+        state.cAttributes[1].shaderLocation = 1;
+
+        pipeline2 = MakeRenderPipeline(vsModule, state);
+    }
+
+    // Valid: draw 4 elements, last stride is 8, 3 * 12 + 8 = 44 <= 44
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline1);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(4);
+        pass.End();
+    }
+    encoder.Finish();
+
+    // Invalid: draw 4 elements, last stride is 12, 3 * 12 + 12 = 48 > 44
+    encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetPipeline(pipeline2);
+        pass.SetVertexBuffer(0, vertexBuffer);
+        pass.Draw(4);
+        pass.End();
+    }
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
diff --git a/src/dawn/tests/unittests/validation/VertexStateValidationTests.cpp b/src/dawn/tests/unittests/validation/VertexStateValidationTests.cpp
new file mode 100644
index 0000000..e522cb3
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/VertexStateValidationTests.cpp
@@ -0,0 +1,427 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class VertexStateTest : public ValidationTest {
+  protected:
+    void CreatePipeline(bool success,
+                        const utils::ComboVertexState& state,
+                        const char* vertexSource) {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, vertexSource);
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+            }
+        )");
+
+        utils::ComboRenderPipelineDescriptor descriptor;
+        descriptor.vertex.module = vsModule;
+        descriptor.vertex.bufferCount = state.vertexBufferCount;
+        descriptor.vertex.buffers = &state.cVertexBuffers[0];
+        descriptor.cFragment.module = fsModule;
+        descriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
+
+        if (!success) {
+            ASSERT_DEVICE_ERROR(device.CreateRenderPipeline(&descriptor));
+        } else {
+            device.CreateRenderPipeline(&descriptor);
+        }
+    }
+
+    const char* kDummyVertexShader = R"(
+        @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        }
+    )";
+};
+
+// Check an empty vertex input is valid
+TEST_F(VertexStateTest, EmptyIsOk) {
+    utils::ComboVertexState state;
+    CreatePipeline(true, state, kDummyVertexShader);
+}
+
+// Check null buffer is valid
+TEST_F(VertexStateTest, NullBufferIsOk) {
+    utils::ComboVertexState state;
+    // One null buffer (buffer[0]) is OK
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].arrayStride = 0;
+    state.cVertexBuffers[0].attributeCount = 0;
+    state.cVertexBuffers[0].attributes = nullptr;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // One null buffer (buffer[0]) followed by a buffer (buffer[1]) is OK
+    state.vertexBufferCount = 2;
+    state.cVertexBuffers[1].arrayStride = 0;
+    state.cVertexBuffers[1].attributeCount = 1;
+    state.cVertexBuffers[1].attributes = &state.cAttributes[0];
+    state.cAttributes[0].shaderLocation = 0;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Null buffer (buffer[2]) sitting between buffers (buffer[1] and buffer[3]) is OK
+    state.vertexBufferCount = 4;
+    state.cVertexBuffers[2].attributeCount = 0;
+    state.cVertexBuffers[2].attributes = nullptr;
+    state.cVertexBuffers[3].attributeCount = 1;
+    state.cVertexBuffers[3].attributes = &state.cAttributes[1];
+    state.cAttributes[1].shaderLocation = 1;
+    CreatePipeline(true, state, kDummyVertexShader);
+}
+
+// Check validation that pipeline vertex buffers are backed by attributes in the vertex input
+TEST_F(VertexStateTest, PipelineCompatibility) {
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].arrayStride = 2 * sizeof(float);
+    state.cVertexBuffers[0].attributeCount = 2;
+    state.cAttributes[0].shaderLocation = 0;
+    state.cAttributes[1].shaderLocation = 1;
+    state.cAttributes[1].offset = sizeof(float);
+
+    // Control case: pipeline with one input per attribute
+    CreatePipeline(true, state, R"(
+        @stage(vertex) fn main(
+            @location(0) a : vec4<f32>,
+            @location(1) b : vec4<f32>
+        ) -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        }
+    )");
+
+    // Check it is valid for the pipeline to use a subset of the VertexState
+    CreatePipeline(true, state, R"(
+        @stage(vertex) fn main(
+            @location(0) a : vec4<f32>
+        ) -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        }
+    )");
+
+    // Check for an error when the pipeline uses an attribute not in the vertex input
+    CreatePipeline(false, state, R"(
+        @stage(vertex) fn main(
+            @location(2) a : vec4<f32>
+        ) -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        }
+    )");
+}
+
+// Test that a arrayStride of 0 is valid
+TEST_F(VertexStateTest, StrideZero) {
+    // Works ok without attributes
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].arrayStride = 0;
+    state.cVertexBuffers[0].attributeCount = 1;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Works ok with attributes at a large-ish offset
+    state.cAttributes[0].offset = 128;
+    CreatePipeline(true, state, kDummyVertexShader);
+}
+
+// Check validation that vertex attribute offset should be within vertex buffer arrayStride,
+// if vertex buffer arrayStride is not zero.
+TEST_F(VertexStateTest, SetOffsetOutOfBounds) {
+    // Control case, setting correct arrayStride and offset
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].arrayStride = 2 * sizeof(float);
+    state.cVertexBuffers[0].attributeCount = 2;
+    state.cAttributes[0].shaderLocation = 0;
+    state.cAttributes[1].shaderLocation = 1;
+    state.cAttributes[1].offset = sizeof(float);
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test vertex attribute offset exceed vertex buffer arrayStride range
+    state.cVertexBuffers[0].arrayStride = sizeof(float);
+    CreatePipeline(false, state, kDummyVertexShader);
+
+    // It's OK if arrayStride is zero
+    state.cVertexBuffers[0].arrayStride = 0;
+    CreatePipeline(true, state, kDummyVertexShader);
+}
+
+// Check out of bounds condition on total number of vertex buffers
+TEST_F(VertexStateTest, SetVertexBuffersNumLimit) {
+    // Control case, setting max vertex buffer number
+    utils::ComboVertexState state;
+    state.vertexBufferCount = kMaxVertexBuffers;
+    for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+        state.cVertexBuffers[i].attributeCount = 1;
+        state.cVertexBuffers[i].attributes = &state.cAttributes[i];
+        state.cAttributes[i].shaderLocation = i;
+    }
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test vertex buffer number exceed the limit
+    state.vertexBufferCount = kMaxVertexBuffers + 1;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check out of bounds condition on total number of vertex attributes
+TEST_F(VertexStateTest, SetVertexAttributesNumLimit) {
+    // Control case, setting max vertex attribute number
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 2;
+    state.cVertexBuffers[0].attributeCount = kMaxVertexAttributes;
+    for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+        state.cAttributes[i].shaderLocation = i;
+    }
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test vertex attribute number exceed the limit
+    state.cVertexBuffers[1].attributeCount = 1;
+    state.cVertexBuffers[1].attributes = &state.cAttributes[kMaxVertexAttributes - 1];
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check out of bounds condition on input arrayStride
+TEST_F(VertexStateTest, SetInputStrideOutOfBounds) {
+    // Control case, setting max input arrayStride
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride;
+    state.cVertexBuffers[0].attributeCount = 1;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test input arrayStride OOB
+    state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride + 1;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check multiple of 4 bytes constraint on input arrayStride
+TEST_F(VertexStateTest, SetInputStrideNotAligned) {
+    // Control case, setting input arrayStride 4 bytes.
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].arrayStride = 4;
+    state.cVertexBuffers[0].attributeCount = 1;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test input arrayStride not multiple of 4 bytes
+    state.cVertexBuffers[0].arrayStride = 2;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Test that we cannot set an already set attribute
+TEST_F(VertexStateTest, AlreadySetAttribute) {
+    // Control case, setting attribute 0
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].attributeCount = 1;
+    state.cAttributes[0].shaderLocation = 0;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Oh no, attribute 0 is set twice
+    state.cVertexBuffers[0].attributeCount = 2;
+    state.cAttributes[0].shaderLocation = 0;
+    state.cAttributes[1].shaderLocation = 0;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Test that a arrayStride of 0 is valid
+TEST_F(VertexStateTest, SetSameShaderLocation) {
+    // Control case, setting different shader locations in two attributes
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].attributeCount = 2;
+    state.cAttributes[0].shaderLocation = 0;
+    state.cAttributes[1].shaderLocation = 1;
+    state.cAttributes[1].offset = sizeof(float);
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test same shader location in two attributes in the same buffer
+    state.cAttributes[1].shaderLocation = 0;
+    CreatePipeline(false, state, kDummyVertexShader);
+
+    // Test same shader location in two attributes in different buffers
+    state.vertexBufferCount = 2;
+    state.cVertexBuffers[0].attributeCount = 1;
+    state.cAttributes[0].shaderLocation = 0;
+    state.cVertexBuffers[1].attributeCount = 1;
+    state.cVertexBuffers[1].attributes = &state.cAttributes[1];
+    state.cAttributes[1].shaderLocation = 0;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check out of bounds condition on attribute shader location
+TEST_F(VertexStateTest, SetAttributeLocationOutOfBounds) {
+    // Control case, setting last attribute shader location
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].attributeCount = 1;
+    state.cAttributes[0].shaderLocation = kMaxVertexAttributes - 1;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test attribute location OOB
+    state.cAttributes[0].shaderLocation = kMaxVertexAttributes;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check attribute offset out of bounds
+TEST_F(VertexStateTest, SetAttributeOffsetOutOfBounds) {
+    // Control case, setting max attribute offset for FloatR32 vertex format
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].attributeCount = 1;
+    state.cAttributes[0].offset = kMaxVertexBufferArrayStride - sizeof(wgpu::VertexFormat::Float32);
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    // Test attribute offset out of bounds
+    state.cAttributes[0].offset = kMaxVertexBufferArrayStride - 1;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check the min(4, formatSize) alignment constraint for the offset.
+TEST_F(VertexStateTest, SetOffsetNotAligned) {
+    // Control case, setting the offset at the correct alignments.
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].attributeCount = 1;
+
+    // Test that for small formats, the offset must be aligned to the format size.
+    state.cAttributes[0].format = wgpu::VertexFormat::Float32;
+    state.cAttributes[0].offset = 4;
+    CreatePipeline(true, state, kDummyVertexShader);
+    state.cAttributes[0].offset = 2;
+    CreatePipeline(false, state, kDummyVertexShader);
+
+    state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x2;
+    state.cAttributes[0].offset = 4;
+    CreatePipeline(true, state, kDummyVertexShader);
+    state.cAttributes[0].offset = 2;
+    CreatePipeline(false, state, kDummyVertexShader);
+
+    state.cAttributes[0].format = wgpu::VertexFormat::Unorm8x2;
+    state.cAttributes[0].offset = 2;
+    CreatePipeline(true, state, kDummyVertexShader);
+    state.cAttributes[0].offset = 1;
+    CreatePipeline(false, state, kDummyVertexShader);
+
+    // Test that for large formts the offset only needs to be aligned to 4.
+    state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x4;
+    state.cAttributes[0].offset = 4;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    state.cAttributes[0].format = wgpu::VertexFormat::Uint32x3;
+    state.cAttributes[0].offset = 4;
+    CreatePipeline(true, state, kDummyVertexShader);
+
+    state.cAttributes[0].format = wgpu::VertexFormat::Sint32x4;
+    state.cAttributes[0].offset = 4;
+    CreatePipeline(true, state, kDummyVertexShader);
+}
+
+// Check attribute offset overflow
+TEST_F(VertexStateTest, SetAttributeOffsetOverflow) {
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].attributeCount = 1;
+    state.cAttributes[0].offset = std::numeric_limits<uint32_t>::max();
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check for some potential underflow in the vertex input validation
+TEST_F(VertexStateTest, VertexFormatLargerThanNonZeroStride) {
+    utils::ComboVertexState state;
+    state.vertexBufferCount = 1;
+    state.cVertexBuffers[0].arrayStride = 4;
+    state.cVertexBuffers[0].attributeCount = 1;
+    state.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+    CreatePipeline(false, state, kDummyVertexShader);
+}
+
+// Check that the vertex format base type must match the shader's variable base type.
+TEST_F(VertexStateTest, BaseTypeMatching) {
+    auto DoTest = [&](wgpu::VertexFormat format, std::string shaderType, bool success) {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 16;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].format = format;
+
+        std::string shader = "@stage(vertex) fn main(@location(0) attrib : " + shaderType +
+                             R"() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })";
+
+        CreatePipeline(success, state, shader.c_str());
+    };
+
+    // Test that a float format is compatible only with f32 base type.
+    DoTest(wgpu::VertexFormat::Float32, "f32", true);
+    DoTest(wgpu::VertexFormat::Float32, "i32", false);
+    DoTest(wgpu::VertexFormat::Float32, "u32", false);
+
+    // Test that an unorm format is compatible only with f32.
+    DoTest(wgpu::VertexFormat::Unorm16x2, "f32", true);
+    DoTest(wgpu::VertexFormat::Unorm16x2, "i32", false);
+    DoTest(wgpu::VertexFormat::Unorm16x2, "u32", false);
+
+    // Test that an snorm format is compatible only with f32.
+    DoTest(wgpu::VertexFormat::Snorm16x4, "f32", true);
+    DoTest(wgpu::VertexFormat::Snorm16x4, "i32", false);
+    DoTest(wgpu::VertexFormat::Snorm16x4, "u32", false);
+
+    // Test that an uint format is compatible only with u32.
+    DoTest(wgpu::VertexFormat::Uint32x3, "f32", false);
+    DoTest(wgpu::VertexFormat::Uint32x3, "i32", false);
+    DoTest(wgpu::VertexFormat::Uint32x3, "u32", true);
+
+    // Test that an sint format is compatible only with u32.
+    DoTest(wgpu::VertexFormat::Sint8x4, "f32", false);
+    DoTest(wgpu::VertexFormat::Sint8x4, "i32", true);
+    DoTest(wgpu::VertexFormat::Sint8x4, "u32", false);
+
+    // Test that formats are compatible with any width of vectors.
+    DoTest(wgpu::VertexFormat::Float32, "f32", true);
+    DoTest(wgpu::VertexFormat::Float32, "vec2<f32>", true);
+    DoTest(wgpu::VertexFormat::Float32, "vec3<f32>", true);
+    DoTest(wgpu::VertexFormat::Float32, "vec4<f32>", true);
+
+    DoTest(wgpu::VertexFormat::Float32x4, "f32", true);
+    DoTest(wgpu::VertexFormat::Float32x4, "vec2<f32>", true);
+    DoTest(wgpu::VertexFormat::Float32x4, "vec3<f32>", true);
+    DoTest(wgpu::VertexFormat::Float32x4, "vec4<f32>", true);
+}
+
+// Check that we only check base type compatibility for vertex inputs the shader uses.
+TEST_F(VertexStateTest, BaseTypeMatchingForInexistentInput) {
+    auto DoTest = [&](wgpu::VertexFormat format) {
+        utils::ComboVertexState state;
+        state.vertexBufferCount = 1;
+        state.cVertexBuffers[0].arrayStride = 16;
+        state.cVertexBuffers[0].attributeCount = 1;
+        state.cAttributes[0].format = format;
+
+        std::string shader = R"(@stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+            return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+        })";
+
+        CreatePipeline(true, state, shader.c_str());
+    };
+
+    DoTest(wgpu::VertexFormat::Float32);
+    DoTest(wgpu::VertexFormat::Unorm16x2);
+    DoTest(wgpu::VertexFormat::Snorm16x4);
+    DoTest(wgpu::VertexFormat::Uint8x4);
+    DoTest(wgpu::VertexFormat::Sint32x2);
+}
diff --git a/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp b/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
new file mode 100644
index 0000000..7c8677b
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
@@ -0,0 +1,341 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    class VideoViewsValidation : public ValidationTest {
+      protected:
+        WGPUDevice CreateTestDevice() override {
+            wgpu::DeviceDescriptor descriptor;
+            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::DawnMultiPlanarFormats};
+            descriptor.requiredFeatures = requiredFeatures;
+            descriptor.requiredFeaturesCount = 1;
+            return adapter.CreateDevice(&descriptor);
+        }
+
+        wgpu::Texture CreateVideoTextureForTest(wgpu::TextureFormat format,
+                                                wgpu::TextureUsage usage) {
+            wgpu::TextureDescriptor descriptor;
+            descriptor.dimension = wgpu::TextureDimension::e2D;
+            descriptor.size.width = 1;
+            descriptor.size.height = 1;
+            descriptor.format = format;
+            descriptor.usage = usage;
+            return device.CreateTexture(&descriptor);
+        }
+    };
+
+    // Test texture views compatibility rules.
+    TEST_F(VideoViewsValidation, CreateViewFails) {
+        wgpu::Texture videoTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::TextureViewDescriptor viewDesc = {};
+
+        // Correct plane index but incompatible view format.
+        viewDesc.format = wgpu::TextureFormat::R8Uint;
+        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
+
+        // Compatible view format but wrong plane index.
+        viewDesc.format = wgpu::TextureFormat::R8Unorm;
+        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
+
+        // Compatible view format but wrong aspect.
+        viewDesc.format = wgpu::TextureFormat::R8Unorm;
+        viewDesc.aspect = wgpu::TextureAspect::All;
+        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
+
+        // Create a single plane texture.
+        wgpu::TextureDescriptor desc;
+        desc.format = wgpu::TextureFormat::RGBA8Unorm;
+        desc.dimension = wgpu::TextureDimension::e2D;
+        desc.usage = wgpu::TextureUsage::TextureBinding;
+        desc.size = {1, 1, 1};
+
+        wgpu::Texture texture = device.CreateTexture(&desc);
+
+        // Plane aspect specified with non-planar texture.
+        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        // Planar views with non-planar texture.
+        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+        viewDesc.format = wgpu::TextureFormat::R8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+        viewDesc.format = wgpu::TextureFormat::RG8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Test texture views compatibility rules.
+    TEST_F(VideoViewsValidation, CreateViewSucceeds) {
+        wgpu::Texture yuvTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        // Per plane view formats unspecified.
+        wgpu::TextureViewDescriptor planeViewDesc = {};
+        planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+        wgpu::TextureView plane0View = yuvTexture.CreateView(&planeViewDesc);
+
+        planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+        wgpu::TextureView plane1View = yuvTexture.CreateView(&planeViewDesc);
+
+        ASSERT_NE(plane0View.Get(), nullptr);
+        ASSERT_NE(plane1View.Get(), nullptr);
+
+        // Per plane view formats specified.
+        planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+        planeViewDesc.format = wgpu::TextureFormat::R8Unorm;
+        plane0View = yuvTexture.CreateView(&planeViewDesc);
+
+        planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+        planeViewDesc.format = wgpu::TextureFormat::RG8Unorm;
+        plane1View = yuvTexture.CreateView(&planeViewDesc);
+
+        ASSERT_NE(plane0View.Get(), nullptr);
+        ASSERT_NE(plane1View.Get(), nullptr);
+    }
+
+    // Test copying from one multi-planar format into another fails.
+    TEST_F(VideoViewsValidation, T2TCopyAllAspectsFails) {
+        wgpu::Texture srcTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::Texture dstTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+
+        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test copying from one multi-planar format into another per plane fails.
+    TEST_F(VideoViewsValidation, T2TCopyPlaneAspectFails) {
+        wgpu::Texture srcTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::Texture dstTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(
+            srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(
+            dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0},
+                                                wgpu::TextureAspect::Plane1Only);
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test copying from a multi-planar format to a buffer fails.
+    TEST_F(VideoViewsValidation, T2BCopyAllAspectsFails) {
+        wgpu::Texture srcTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = 1;
+        bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+
+        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+
+        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test copying from multi-planar format per plane to a buffer fails.
+    TEST_F(VideoViewsValidation, T2BCopyPlaneAspectsFails) {
+        wgpu::Texture srcTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::BufferDescriptor bufferDescriptor;
+        bufferDescriptor.size = 1;
+        bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+
+        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(
+            srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0},
+                                                wgpu::TextureAspect::Plane1Only);
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test copying from a buffer to a multi-planar format fails.
+    TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) {
+        std::vector<uint8_t> dummyData(4, 0);
+
+        wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
+            device, dummyData.data(), dummyData.size(), wgpu::BufferUsage::CopySrc);
+
+        wgpu::Texture dstTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
+
+        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test copying from a buffer to a multi-planar format per plane fails.
+    TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) {
+        std::vector<uint8_t> dummyData(4, 0);
+
+        wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
+            device, dummyData.data(), dummyData.size(), wgpu::BufferUsage::CopySrc);
+
+        wgpu::Texture dstTexture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
+
+        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(
+            dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0},
+                                                wgpu::TextureAspect::Plane1Only);
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Tests which multi-planar formats are allowed to be sampled.
+    TEST_F(VideoViewsValidation, SamplingMultiPlanarTexture) {
+        wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+
+        // R8BG8Biplanar420Unorm is allowed to be sampled, if plane 0 or plane 1 is selected.
+        wgpu::Texture texture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::TextureViewDescriptor desc = {};
+
+        desc.aspect = wgpu::TextureAspect::Plane0Only;
+        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
+
+        desc.aspect = wgpu::TextureAspect::Plane1Only;
+        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
+    }
+
+    // Tests creating a texture with a multi-plane format.
+    TEST_F(VideoViewsValidation, CreateTextureFails) {
+        // multi-planar formats are NOT allowed to be renderable.
+        ASSERT_DEVICE_ERROR(CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                      wgpu::TextureUsage::RenderAttachment));
+    }
+
+    // Tests writing into a multi-planar format fails.
+    TEST_F(VideoViewsValidation, WriteTextureAllAspectsFails) {
+        wgpu::Texture texture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 4, 4);
+
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+
+        std::vector<uint8_t> dummyData(4, 0);
+        wgpu::Extent3D writeSize = {1, 1, 1};
+
+        wgpu::Queue queue = device.GetQueue();
+
+        ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, dummyData.data(),
+                                               dummyData.size(), &textureDataLayout, &writeSize));
+    }
+
+    // Tests writing into a multi-planar format per plane fails.
+    TEST_F(VideoViewsValidation, WriteTexturePlaneAspectsFails) {
+        wgpu::Texture texture = CreateVideoTextureForTest(
+            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+
+        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 12, 4);
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+        std::vector<uint8_t> dummmyData(4, 0);
+        wgpu::Extent3D writeSize = {1, 1, 1};
+
+        wgpu::Queue queue = device.GetQueue();
+
+        ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, dummmyData.data(),
+                                               dummmyData.size(), &textureDataLayout, &writeSize));
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/WriteBufferTests.cpp b/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
new file mode 100644
index 0000000..0f5c957
--- /dev/null
+++ b/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
@@ -0,0 +1,104 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/validation/ValidationTest.h"
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    class WriteBufferTest : public ValidationTest {
+      public:
+        wgpu::Buffer CreateWritableBuffer(uint64_t size) {
+            wgpu::BufferDescriptor desc;
+            desc.usage = wgpu::BufferUsage::CopyDst;
+            desc.size = size;
+            return device.CreateBuffer(&desc);
+        }
+
+        wgpu::CommandBuffer EncodeWriteBuffer(wgpu::Buffer buffer,
+                                              uint64_t bufferOffset,
+                                              uint64_t size) {
+            std::vector<uint8_t> data(size);
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.WriteBuffer(buffer, bufferOffset, data.data(), size);
+            return encoder.Finish();
+        }
+    };
+
+    // Tests that the buffer offset is validated to be a multiple of 4 bytes.
+    TEST_F(WriteBufferTest, OffsetAlignment) {
+        wgpu::Buffer buffer = CreateWritableBuffer(64);
+        EncodeWriteBuffer(buffer, 0, 4);
+        EncodeWriteBuffer(buffer, 4, 4);
+        EncodeWriteBuffer(buffer, 60, 4);
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 1, 4));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 2, 4));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 3, 4));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 5, 4));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 11, 4));
+    }
+
+    // Tests that the buffer size is validated to be a multiple of 4 bytes.
+    TEST_F(WriteBufferTest, SizeAlignment) {
+        wgpu::Buffer buffer = CreateWritableBuffer(64);
+        EncodeWriteBuffer(buffer, 0, 64);
+        EncodeWriteBuffer(buffer, 4, 60);
+        EncodeWriteBuffer(buffer, 40, 24);
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 63));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 1));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 2));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 40, 23));
+    }
+
+    // Tests that the buffer size and offset are validated to fit within the bounds of the buffer.
+    TEST_F(WriteBufferTest, BufferBounds) {
+        wgpu::Buffer buffer = CreateWritableBuffer(64);
+        EncodeWriteBuffer(buffer, 0, 64);
+        EncodeWriteBuffer(buffer, 4, 60);
+        EncodeWriteBuffer(buffer, 40, 24);
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 68));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 64));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 60, 8));
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 64, 4));
+    }
+
+    // Tests that the destination buffer's usage is validated to contain CopyDst.
+    TEST_F(WriteBufferTest, RequireCopyDstUsage) {
+        wgpu::BufferDescriptor desc;
+        desc.usage = wgpu::BufferUsage::CopySrc;
+        desc.size = 64;
+        wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 64));
+    }
+
+    // Tests that the destination buffer's state is validated at submission.
+    TEST_F(WriteBufferTest, ValidBufferState) {
+        wgpu::BufferDescriptor desc;
+        desc.usage = wgpu::BufferUsage::CopyDst;
+        desc.size = 64;
+        desc.mappedAtCreation = true;
+        wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+        wgpu::CommandBuffer commands = EncodeWriteBuffer(buffer, 0, 64);
+        ASSERT_DEVICE_ERROR(device.GetQueue().Submit(1, &commands));
+
+        commands = EncodeWriteBuffer(buffer, 0, 64);
+        buffer.Unmap();
+        device.GetQueue().Submit(1, &commands);
+    }
+
+}  // namespace
diff --git a/src/dawn/tests/unittests/wire/WireAdapterTests.cpp b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
new file mode 100644
index 0000000..c1d2d45
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
@@ -0,0 +1,330 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/MockCallback.h"
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+#include <webgpu/webgpu_cpp.h>
+#include <unordered_set>
+#include <vector>
+
+namespace {
+
+    using namespace testing;
+    using namespace dawn::wire;
+
+    class WireAdapterTests : public WireTest {
+      protected:
+        // Bootstrap the tests and create a fake adapter.
+        void SetUp() override {
+            WireTest::SetUp();
+
+            auto reservation = GetWireClient()->ReserveInstance();
+            instance = wgpu::Instance::Acquire(reservation.instance);
+
+            WGPUInstance apiInstance = api.GetNewInstance();
+            EXPECT_CALL(api, InstanceReference(apiInstance));
+            EXPECT_TRUE(GetWireServer()->InjectInstance(apiInstance, reservation.id,
+                                                        reservation.generation));
+
+            wgpu::RequestAdapterOptions options = {};
+            MockCallback<WGPURequestAdapterCallback> cb;
+            auto* userdata = cb.MakeUserdata(this);
+            instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+            // Expect the server to receive the message. Then, mock a fake reply.
+            apiAdapter = api.GetNewAdapter();
+            EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+                .WillOnce(InvokeWithoutArgs([&]() {
+                    EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                        .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
+                            *properties = {};
+                            properties->name = "";
+                            properties->driverDescription = "";
+                        })));
+
+                    EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
+                        .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                            *limits = {};
+                            return true;
+                        })));
+
+                    EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                        .WillOnce(Return(0))
+                        .WillOnce(Return(0));
+                    api.CallInstanceRequestAdapterCallback(
+                        apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
+                }));
+            FlushClient();
+
+            // Expect the callback in the client.
+            WGPUAdapter cAdapter;
+            EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+                .WillOnce(SaveArg<1>(&cAdapter));
+            FlushServer();
+
+            EXPECT_NE(cAdapter, nullptr);
+            adapter = wgpu::Adapter::Acquire(cAdapter);
+        }
+
+        void TearDown() override {
+            adapter = nullptr;
+            instance = nullptr;
+            WireTest::TearDown();
+        }
+
+        WGPUAdapter apiAdapter;
+        wgpu::Instance instance;
+        wgpu::Adapter adapter;
+    };
+
+    // Test that the DeviceDescriptor is passed from the client to the server.
+    TEST_F(WireAdapterTests, RequestDevicePassesDescriptor) {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+
+        // Test an empty descriptor
+        {
+            wgpu::DeviceDescriptor desc = {};
+            adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+            EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+                .WillOnce(WithArg<1>(Invoke([](const WGPUDeviceDescriptor* apiDesc) {
+                    EXPECT_EQ(apiDesc->label, nullptr);
+                    EXPECT_EQ(apiDesc->requiredFeaturesCount, 0u);
+                    EXPECT_EQ(apiDesc->requiredLimits, nullptr);
+                })));
+            FlushClient();
+        }
+
+        // Test a non-empty descriptor
+        {
+            wgpu::RequiredLimits limits = {};
+            limits.limits.maxStorageTexturesPerShaderStage = 5;
+
+            std::vector<wgpu::FeatureName> features = {wgpu::FeatureName::TextureCompressionETC2,
+                                                       wgpu::FeatureName::TextureCompressionASTC};
+
+            wgpu::DeviceDescriptor desc = {};
+            desc.label = "hello device";
+            desc.requiredLimits = &limits;
+            desc.requiredFeaturesCount = features.size();
+            desc.requiredFeatures = features.data();
+
+            adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+            EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](const WGPUDeviceDescriptor* apiDesc) {
+                    EXPECT_STREQ(apiDesc->label, desc.label);
+
+                    ASSERT_EQ(apiDesc->requiredFeaturesCount, features.size());
+                    for (uint32_t i = 0; i < features.size(); ++i) {
+                        EXPECT_EQ(apiDesc->requiredFeatures[i],
+                                  static_cast<WGPUFeatureName>(features[i]));
+                    }
+
+                    ASSERT_NE(apiDesc->requiredLimits, nullptr);
+                    EXPECT_EQ(apiDesc->requiredLimits->nextInChain, nullptr);
+                    EXPECT_EQ(apiDesc->requiredLimits->limits.maxStorageTexturesPerShaderStage,
+                              limits.limits.maxStorageTexturesPerShaderStage);
+                })));
+            FlushClient();
+        }
+
+        // Delete the adapter now, or it'll call the mock callback after it's deleted.
+        adapter = nullptr;
+    }
+
+    // Test that RequestDevice forwards the device information to the client.
+    TEST_F(WireAdapterTests, RequestDeviceSuccess) {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+
+        wgpu::SupportedLimits fakeLimits = {};
+        fakeLimits.limits.maxTextureDimension1D = 433;
+        fakeLimits.limits.maxVertexAttributes = 1243;
+
+        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+            wgpu::FeatureName::Depth32FloatStencil8,
+            wgpu::FeatureName::TextureCompressionBC,
+        };
+
+        wgpu::DeviceDescriptor desc = {};
+        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+        // Expect the server to receive the message. Then, mock a fake reply.
+        WGPUDevice apiDevice = api.GetNewDevice();
+        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+            .WillOnce(InvokeWithoutArgs([&]() {
+                // Set on device creation to forward callbacks to the client.
+                EXPECT_CALL(api,
+                            OnDeviceSetUncapturedErrorCallback(apiDevice, NotNull(), NotNull()))
+                    .Times(1);
+                EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, NotNull(), NotNull()))
+                    .Times(1);
+                EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, NotNull(), NotNull()))
+                    .Times(1);
+
+                EXPECT_CALL(api, DeviceGetLimits(apiDevice, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                        *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                        return true;
+                    })));
+
+                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
+                    .WillOnce(Return(fakeFeatures.size()));
+
+                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                        for (wgpu::FeatureName feature : fakeFeatures) {
+                            *(features++) = static_cast<WGPUFeatureName>(feature);
+                        }
+                        return fakeFeatures.size();
+                    })));
+
+                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
+                                                     apiDevice, nullptr);
+            }));
+        FlushClient();
+
+        // Expect the callback in the client and all the device information to match.
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(WithArg<1>(Invoke([&](WGPUDevice cDevice) {
+                wgpu::Device device = wgpu::Device::Acquire(cDevice);
+
+                wgpu::SupportedLimits limits;
+                EXPECT_TRUE(device.GetLimits(&limits));
+                EXPECT_EQ(limits.limits.maxTextureDimension1D,
+                          fakeLimits.limits.maxTextureDimension1D);
+                EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
+
+                std::vector<wgpu::FeatureName> features;
+                features.resize(device.EnumerateFeatures(nullptr));
+                ASSERT_EQ(features.size(), fakeFeatures.size());
+                EXPECT_EQ(device.EnumerateFeatures(&features[0]), features.size());
+
+                std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
+                for (wgpu::FeatureName feature : features) {
+                    EXPECT_EQ(featureSet.erase(feature), 1u);
+                }
+            })));
+        FlushServer();
+
+        // Cleared when the device is destroyed.
+        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)).Times(1);
+        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)).Times(1);
+        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)).Times(1);
+    }
+
+    // Test that features requested that the implementation supports, but not the
+    // wire reject the callback.
+    TEST_F(WireAdapterTests, RequestFeatureUnsupportedByWire) {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+
+        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+            // Some value that is not a valid feature
+            static_cast<wgpu::FeatureName>(-2),
+            wgpu::FeatureName::TextureCompressionASTC,
+        };
+
+        wgpu::DeviceDescriptor desc = {};
+        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+        // Expect the server to receive the message. Then, mock a fake reply.
+        // The reply contains features that the device implementation supports, but the
+        // wire does not.
+        WGPUDevice apiDevice = api.GetNewDevice();
+        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+            .WillOnce(InvokeWithoutArgs([&]() {
+                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
+                    .WillOnce(Return(fakeFeatures.size()));
+
+                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                        for (wgpu::FeatureName feature : fakeFeatures) {
+                            *(features++) = static_cast<WGPUFeatureName>(feature);
+                        }
+                        return fakeFeatures.size();
+                    })));
+
+                // The device was actually created, but the wire didn't support its features.
+                // Expect it to be released.
+                EXPECT_CALL(api, DeviceRelease(apiDevice));
+
+                // Fake successful creation. The client still receives a failure due to
+                // unsupported features.
+                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
+                                                     apiDevice, nullptr);
+            }));
+        FlushClient();
+
+        // Expect an error callback since the feature is not supported.
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+        FlushServer();
+    }
+
+    // Test that RequestDevice errors forward to the client.
+    TEST_F(WireAdapterTests, RequestDeviceError) {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+
+        wgpu::DeviceDescriptor desc = {};
+        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+        // Expect the server to receive the message. Then, mock an error.
+        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+            .WillOnce(InvokeWithoutArgs([&]() {
+                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Error,
+                                                     nullptr, "Request device failed");
+            }));
+        FlushClient();
+
+        // Expect the callback in the client.
+        EXPECT_CALL(
+            cb, Call(WGPURequestDeviceStatus_Error, nullptr, StrEq("Request device failed"), this))
+            .Times(1);
+        FlushServer();
+    }
+
+    // Test that RequestDevice receives unknown status if the adapter is deleted
+    // before the callback happens.
+    TEST_F(WireAdapterTests, RequestDeviceAdapterDestroyedBeforeCallback) {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+
+        wgpu::DeviceDescriptor desc = {};
+        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+        adapter = nullptr;
+    }
+
+    // Test that RequestDevice receives unknown status if the wire is disconnected
+    // before the callback happens.
+    TEST_F(WireAdapterTests, RequestDeviceWireDisconnectedBeforeCallback) {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+
+        wgpu::DeviceDescriptor desc = {};
+        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+        GetWireClient()->Disconnect();
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/wire/WireArgumentTests.cpp b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
new file mode 100644
index 0000000..7277301
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
@@ -0,0 +1,368 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/common/Constants.h"
+
+#include <array>
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireArgumentTests : public WireTest {
+  public:
+    WireArgumentTests() {
+    }
+    ~WireArgumentTests() override = default;
+};
+
+// Test that the wire is able to send numerical values
+TEST_F(WireArgumentTests, ValueArgument) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+    wgpuComputePassEncoderDispatch(pass, 1, 2, 3);
+
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
+
+    WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
+    EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
+
+    EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
+
+    FlushClient();
+}
+
+// Test that the wire is able to send arrays of numerical values
+TEST_F(WireArgumentTests, ValueArrayArgument) {
+    // Create a bindgroup.
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = 0;
+    bglDescriptor.entries = nullptr;
+
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
+
+    WGPUBindGroupDescriptor bindGroupDescriptor = {};
+    bindGroupDescriptor.layout = bgl;
+    bindGroupDescriptor.entryCount = 0;
+    bindGroupDescriptor.entries = nullptr;
+
+    WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor);
+    WGPUBindGroup apiBindGroup = api.GetNewBindGroup();
+    EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup));
+
+    // Use the bindgroup in SetBindGroup that takes an array of value offsets.
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+
+    std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
+    wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(), testOffsets.data());
+
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
+
+    WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
+    EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
+
+    EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
+                         apiPass, 0, apiBindGroup, testOffsets.size(),
+                         MatchesLambda([testOffsets](const uint32_t* offsets) -> bool {
+                             for (size_t i = 0; i < testOffsets.size(); i++) {
+                                 if (offsets[i] != testOffsets[i]) {
+                                     return false;
+                                 }
+                             }
+                             return true;
+                         })));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send C strings
+TEST_F(WireArgumentTests, CStringArgument) {
+    // Create shader module
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    // Create the color state descriptor
+    WGPUBlendComponent blendComponent = {};
+    blendComponent.operation = WGPUBlendOperation_Add;
+    blendComponent.srcFactor = WGPUBlendFactor_One;
+    blendComponent.dstFactor = WGPUBlendFactor_One;
+    WGPUBlendState blendState = {};
+    blendState.alpha = blendComponent;
+    blendState.color = blendComponent;
+    WGPUColorTargetState colorTargetState = {};
+    colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
+    colorTargetState.blend = &blendState;
+    colorTargetState.writeMask = WGPUColorWriteMask_All;
+
+    // Create the depth-stencil state
+    WGPUStencilFaceState stencilFace = {};
+    stencilFace.compare = WGPUCompareFunction_Always;
+    stencilFace.failOp = WGPUStencilOperation_Keep;
+    stencilFace.depthFailOp = WGPUStencilOperation_Keep;
+    stencilFace.passOp = WGPUStencilOperation_Keep;
+
+    WGPUDepthStencilState depthStencilState = {};
+    depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
+    depthStencilState.depthWriteEnabled = false;
+    depthStencilState.depthCompare = WGPUCompareFunction_Always;
+    depthStencilState.stencilBack = stencilFace;
+    depthStencilState.stencilFront = stencilFace;
+    depthStencilState.stencilReadMask = 0xff;
+    depthStencilState.stencilWriteMask = 0xff;
+    depthStencilState.depthBias = 0;
+    depthStencilState.depthBiasSlopeScale = 0.0;
+    depthStencilState.depthBiasClamp = 0.0;
+
+    // Create the pipeline layout
+    WGPUPipelineLayoutDescriptor layoutDescriptor = {};
+    layoutDescriptor.bindGroupLayoutCount = 0;
+    layoutDescriptor.bindGroupLayouts = nullptr;
+    WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
+    WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
+
+    // Create pipeline
+    WGPURenderPipelineDescriptor pipelineDescriptor = {};
+
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.vertex.bufferCount = 0;
+    pipelineDescriptor.vertex.buffers = nullptr;
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    fragment.targetCount = 1;
+    fragment.targets = &colorTargetState;
+    pipelineDescriptor.fragment = &fragment;
+
+    pipelineDescriptor.multisample.count = 1;
+    pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
+    pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
+    pipelineDescriptor.layout = layout;
+    pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+    pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
+    pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
+    pipelineDescriptor.depthStencil = &depthStencilState;
+
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+
+    WGPURenderPipeline apiDummyPipeline = api.GetNewRenderPipeline();
+    EXPECT_CALL(api,
+                DeviceCreateRenderPipeline(
+                    apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                        return desc->vertex.entryPoint == std::string("main");
+                    })))
+        .WillOnce(Return(apiDummyPipeline));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send objects as value arguments
+TEST_F(WireArgumentTests, ObjectAsValueArgument) {
+    WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 8;
+    descriptor.usage =
+        static_cast<WGPUBufferUsage>(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst);
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+        .WillOnce(Return(apiBuffer))
+        .RetiresOnSaturation();
+
+    wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
+    EXPECT_CALL(api, CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send array of objects
+TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
+    WGPUCommandBuffer cmdBufs[2];
+    WGPUCommandBuffer apiCmdBufs[2];
+
+    // Create two command buffers we need to use a GMock sequence otherwise the order of the
+    // CreateCommandEncoder might be swapped since they are equivalent in term of matchers
+    Sequence s;
+    for (int i = 0; i < 2; ++i) {
+        WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+        cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr);
+
+        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+            .InSequence(s)
+            .WillOnce(Return(apiCmdBufEncoder));
+
+        apiCmdBufs[i] = api.GetNewCommandBuffer();
+        EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
+            .WillOnce(Return(apiCmdBufs[i]));
+    }
+
+    // Submit command buffer and check we got a call with both API-side command buffers
+    wgpuQueueSubmit(queue, 2, cmdBufs);
+
+    EXPECT_CALL(
+        api, QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
+                             return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1];
+                         })));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send structures that contain pure values (non-objects)
+TEST_F(WireArgumentTests, StructureOfValuesArgument) {
+    WGPUSamplerDescriptor descriptor = {};
+    descriptor.magFilter = WGPUFilterMode_Linear;
+    descriptor.minFilter = WGPUFilterMode_Nearest;
+    descriptor.mipmapFilter = WGPUFilterMode_Linear;
+    descriptor.addressModeU = WGPUAddressMode_ClampToEdge;
+    descriptor.addressModeV = WGPUAddressMode_Repeat;
+    descriptor.addressModeW = WGPUAddressMode_MirrorRepeat;
+    descriptor.lodMinClamp = kLodMin;
+    descriptor.lodMaxClamp = kLodMax;
+    descriptor.compare = WGPUCompareFunction_Never;
+
+    wgpuDeviceCreateSampler(device, &descriptor);
+
+    WGPUSampler apiDummySampler = api.GetNewSampler();
+    EXPECT_CALL(api, DeviceCreateSampler(
+                         apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool {
+                             return desc->nextInChain == nullptr &&
+                                    desc->magFilter == WGPUFilterMode_Linear &&
+                                    desc->minFilter == WGPUFilterMode_Nearest &&
+                                    desc->mipmapFilter == WGPUFilterMode_Linear &&
+                                    desc->addressModeU == WGPUAddressMode_ClampToEdge &&
+                                    desc->addressModeV == WGPUAddressMode_Repeat &&
+                                    desc->addressModeW == WGPUAddressMode_MirrorRepeat &&
+                                    desc->compare == WGPUCompareFunction_Never &&
+                                    desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax;
+                         })))
+        .WillOnce(Return(apiDummySampler));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send structures that contain objects
+TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = 0;
+    bglDescriptor.entries = nullptr;
+
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
+
+    WGPUPipelineLayoutDescriptor descriptor = {};
+    descriptor.bindGroupLayoutCount = 1;
+    descriptor.bindGroupLayouts = &bgl;
+
+    wgpuDeviceCreatePipelineLayout(device, &descriptor);
+
+    WGPUPipelineLayout apiDummyLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(
+                         apiDevice,
+                         MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
+                             return desc->nextInChain == nullptr &&
+                                    desc->bindGroupLayoutCount == 1 &&
+                                    desc->bindGroupLayouts[0] == apiBgl;
+                         })))
+        .WillOnce(Return(apiDummyLayout));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send structures that contain objects
+TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
+    static constexpr int NUM_BINDINGS = 3;
+    WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{
+        {nullptr,
+         0,
+         WGPUShaderStage_Vertex,
+         {},
+         {nullptr, WGPUSamplerBindingType_Filtering},
+         {},
+         {}},
+        {nullptr,
+         1,
+         WGPUShaderStage_Vertex,
+         {},
+         {},
+         {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false},
+         {}},
+        {nullptr,
+         2,
+         static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment),
+         {nullptr, WGPUBufferBindingType_Uniform, false, 0},
+         {},
+         {},
+         {}},
+    };
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = NUM_BINDINGS;
+    bglDescriptor.entries = entries;
+
+    wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(
+        api,
+        DeviceCreateBindGroupLayout(
+            apiDevice, MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
+                for (int i = 0; i < NUM_BINDINGS; ++i) {
+                    const auto& a = desc->entries[i];
+                    const auto& b = entries[i];
+                    if (a.binding != b.binding || a.visibility != b.visibility ||
+                        a.buffer.type != b.buffer.type || a.sampler.type != b.sampler.type ||
+                        a.texture.sampleType != b.texture.sampleType) {
+                        return false;
+                    }
+                }
+                return desc->nextInChain == nullptr && desc->entryCount == 3;
+            })))
+        .WillOnce(Return(apiBgl));
+
+    FlushClient();
+}
+
+// Test passing nullptr instead of objects - array of objects version
+TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
+    WGPUBindGroupLayout nullBGL = nullptr;
+
+    WGPUPipelineLayoutDescriptor descriptor = {};
+    descriptor.bindGroupLayoutCount = 1;
+    descriptor.bindGroupLayouts = &nullBGL;
+
+    wgpuDeviceCreatePipelineLayout(device, &descriptor);
+    EXPECT_CALL(api,
+                DeviceCreatePipelineLayout(
+                    apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool {
+                        return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
+                               desc->bindGroupLayouts[0] == nullptr;
+                    })))
+        .WillOnce(Return(nullptr));
+
+    FlushClient();
+}
diff --git a/src/dawn/tests/unittests/wire/WireBasicTests.cpp b/src/dawn/tests/unittests/wire/WireBasicTests.cpp
new file mode 100644
index 0000000..abf5d9d
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireBasicTests.cpp
@@ -0,0 +1,80 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireBasicTests : public WireTest {
+  public:
+    WireBasicTests() {
+    }
+    ~WireBasicTests() override = default;
+};
+
+// One call gets forwarded correctly.
+TEST_F(WireBasicTests, CallForwarded) {
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+
+    FlushClient();
+}
+
+// Test that calling methods on a new object works as expected.
+TEST_F(WireBasicTests, CreateThenCall) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    wgpuCommandEncoderFinish(encoder, nullptr);
+
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+
+    WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer();
+    EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)).WillOnce(Return(apiCmdBuf));
+
+    FlushClient();
+}
+
+// Test that client reference/release do not call the backend API.
+TEST_F(WireBasicTests, RefCountKeptInClient) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+    wgpuCommandEncoderReference(encoder);
+    wgpuCommandEncoderRelease(encoder);
+
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+
+    FlushClient();
+}
+
+// Test that client reference/release do not call the backend API.
+TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+    wgpuCommandEncoderRelease(encoder);
+
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+
+    EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder));
+
+    FlushClient();
+}
diff --git a/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
new file mode 100644
index 0000000..eb831d8
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
@@ -0,0 +1,812 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+namespace {
+
+    // Mock class to add expectations on the wire calling callbacks
+    class MockBufferMapCallback {
+      public:
+        MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
+    void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+        mockBufferMapCallback->Call(status, userdata);
+    }
+
+}  // anonymous namespace
+
+class WireBufferMappingTests : public WireTest {
+  public:
+    WireBufferMappingTests() {
+    }
+    ~WireBufferMappingTests() override = default;
+
+    void SetUp() override {
+        WireTest::SetUp();
+
+        mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
+        apiBuffer = api.GetNewBuffer();
+    }
+
+    void TearDown() override {
+        WireTest::TearDown();
+
+        // Delete mock so that expectations are checked
+        mockBufferMapCallback = nullptr;
+    }
+
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
+    }
+
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
+    }
+
+    void SetupBuffer(WGPUBufferUsageFlags usage) {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.size = kBufferSize;
+        descriptor.usage = usage;
+
+        buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+            .WillOnce(Return(apiBuffer))
+            .RetiresOnSaturation();
+        FlushClient();
+    }
+
+  protected:
+    static constexpr uint64_t kBufferSize = sizeof(uint32_t);
+    // A successfully created buffer
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+};
+
+// Tests specific to mapping for reading
+class WireBufferMappingReadTests : public WireBufferMappingTests {
+  public:
+    WireBufferMappingReadTests() {
+    }
+    ~WireBufferMappingReadTests() override = default;
+
+    void SetUp() override {
+        WireBufferMappingTests::SetUp();
+
+        SetupBuffer(WGPUBufferUsage_MapRead);
+    }
+};
+
+// Check mapping for reading a succesfully created buffer
+TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(bufferContent,
+              *static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Check that things work correctly when a validation error happens when mapping the buffer for
+// reading
+TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize));
+}
+
+// Check that the map read callback is called with UNKNOWN when the buffer is destroyed before the
+// request is finished
+TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Return success
+    uint32_t bufferContent = 0;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Destroy before the client gets the success, so the callback is called with
+    // DestroyedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+    FlushServer();
+}
+
+// Check the map read callback is called with "UnmappedBeforeCallback" when the map request would
+// have worked, but Unmap was called
+TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server.
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer));
+
+    FlushClient();
+
+    // The callback shouldn't get called with success, even when the request succeeded on the
+    // server side
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Check that even if Unmap() was called early client-side, we correctly surface server-side
+// validation errors.
+TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server that the mapAsync call was an error.
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer));
+
+    FlushClient();
+
+    // The callback should be called with the server-side error and not the UnmappedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+}
+
+// Check the map read callback is called with "DestroyedBeforeCallback" when the map request would
+// have worked, but Destroy was called
+TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server.
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer));
+
+    FlushClient();
+
+    // The callback shouldn't get called with success, even when the request succeeded on the
+    // server side
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Check that even if Destroy() was called early client-side, we correctly surface server-side
+// validation errors.
+TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    // Oh no! We are calling Destroy too early! However the callback gets fired only after we get
+    // an answer from the server that the mapAsync call was an error.
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer));
+
+    FlushClient();
+
+    // The callback should be called with the server-side error and not the DestroyedBeforCallback..
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+}
+
+// Check that an error map read while a buffer is already mapped won't changed the result of get
+// mapped range
+TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) {
+    // Successful map
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // Map failure while the buffer is already mapped
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(bufferContent,
+              *static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+}
+
+// Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback
+TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the
+// callback
+TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+}
+
+// Tests specific to mapping for writing
+class WireBufferMappingWriteTests : public WireBufferMappingTests {
+  public:
+    WireBufferMappingWriteTests() {
+    }
+    ~WireBufferMappingWriteTests() override = default;
+
+    void SetUp() override {
+        WireBufferMappingTests::SetUp();
+
+        SetupBuffer(WGPUBufferUsage_MapWrite);
+    }
+};
+
+// Check mapping for writing a succesfully created buffer
+TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t serverBufferContent = 31337;
+    uint32_t updatedContent = 4242;
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&serverBufferContent));
+
+    FlushClient();
+
+    // The map write callback always gets a buffer full of zeroes.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    uint32_t* lastMapWritePointer =
+        static_cast<uint32_t*>(wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
+    ASSERT_EQ(0u, *lastMapWritePointer);
+
+    // Write something to the mapped pointer
+    *lastMapWritePointer = updatedContent;
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // After the buffer is unmapped, the content of the buffer is updated on the server
+    ASSERT_EQ(serverBufferContent, updatedContent);
+}
+
+// Check that things work correctly when a validation error happens when mapping the buffer for
+// writing
+TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
+}
+
+// Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is
+// destroyed before the request is finished
+TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Return success
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Destroy before the client gets the success, so the callback is called with
+    // DestroyedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+    FlushServer();
+}
+
+// Check the map write callback is called with "UnmappedBeforeCallback" when the map request would
+// have worked, but Unmap was called
+TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    // Oh no! We are calling Unmap too early!
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferUnmap(buffer);
+
+    // The callback shouldn't get called, even when the request succeeded on the server side
+    FlushServer();
+}
+
+// Check that an error map write while a buffer is already mapped
+TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
+    // Successful map
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // Map failure while the buffer is already mapped
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_NE(nullptr,
+              static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+}
+
+// Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback
+TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in the
+// callback
+TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+}
+
+// Test successful buffer creation with mappedAtCreation=true
+TEST_F(WireBufferMappingTests, MappedAtCreationSuccess) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that releasing a buffer mapped at creation does not call Unmap
+TEST_F(WireBufferMappingTests, MappedAtCreationReleaseBeforeUnmap) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that it is valid to map a buffer after it is mapped at creation and unmapped
+TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.usage = WGPUMapMode_Write;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+}
+
+// Test that it is invalid to map a buffer after mappedAtCreation but before Unmap
+TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_NE(nullptr,
+              static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client and
+// never gets to the server-side.
+TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) {
+    size_t kOOMSize = std::numeric_limits<size_t>::max();
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+
+    // Check for CreateBufferMapped.
+    {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.usage = WGPUBufferUsage_CopySrc;
+        descriptor.size = kOOMSize;
+        descriptor.mappedAtCreation = true;
+
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
+        FlushClient();
+    }
+
+    // Check for MapRead usage.
+    {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.usage = WGPUBufferUsage_MapRead;
+        descriptor.size = kOOMSize;
+
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
+        FlushClient();
+    }
+
+    // Check for MapWrite usage.
+    {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.usage = WGPUBufferUsage_MapWrite;
+        descriptor.size = kOOMSize;
+
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
+        FlushClient();
+    }
+}
+
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireBufferMappingTests, MapThenDisconnect) {
+    SetupBuffer(WGPUMapMode_Write);
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, this);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireBufferMappingTests, MapAfterDisconnect) {
+    SetupBuffer(WGPUMapMode_Read);
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireBufferMappingTests* pTest;
+    WGPUBuffer* pTestBuffer;
+    size_t numRequests;
+};
+
+static void ToMockBufferMapCallbackWithNewRequests(WGPUBufferMapAsyncStatus status,
+                                                   void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestBuffer, nullptr);
+
+    mockBufferMapCallback->Call(status, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuBufferMapAsync(*(testData->pTestBuffer), WGPUMapMode_Write, 0, sizeof(uint32_t),
+                           ToMockBufferMapCallback, testData->pTest);
+    }
+}
+
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireBufferMappingTests, MapInsideCallbackBeforeDisconnect) {
+    SetupBuffer(WGPUMapMode_Write);
+    TestData testData = {this, &buffer, 10};
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
+                       ToMockBufferMapCallbackWithNewRequests, &testData);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
+
+// Test that requests inside user callbacks before object destruction are called
+TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) {
+    TestData testData = {this, &buffer, 10};
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
+                       ToMockBufferMapCallbackWithNewRequests, &testData);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback,
+                Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, this))
+        .Times(1 + testData.numRequests);
+    wgpuBufferRelease(buffer);
+}
diff --git a/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
new file mode 100644
index 0000000..41db091
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
@@ -0,0 +1,376 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+namespace {
+
+    // Mock class to add expectations on the wire calling callbacks
+    class MockCreateComputePipelineAsyncCallback {
+      public:
+        MOCK_METHOD(void,
+                    Call,
+                    (WGPUCreatePipelineAsyncStatus status,
+                     WGPUComputePipeline pipeline,
+                     const char* message,
+                     void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockCreateComputePipelineAsyncCallback>>
+        mockCreateComputePipelineAsyncCallback;
+    void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
+                                                  WGPUComputePipeline pipeline,
+                                                  const char* message,
+                                                  void* userdata) {
+        mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata);
+    }
+
+    class MockCreateRenderPipelineAsyncCallback {
+      public:
+        MOCK_METHOD(void,
+                    Call,
+                    (WGPUCreatePipelineAsyncStatus status,
+                     WGPURenderPipeline pipeline,
+                     const char* message,
+                     void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockCreateRenderPipelineAsyncCallback>>
+        mockCreateRenderPipelineAsyncCallback;
+    void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
+                                                 WGPURenderPipeline pipeline,
+                                                 const char* message,
+                                                 void* userdata) {
+        mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata);
+    }
+
+}  // anonymous namespace
+
+class WireCreatePipelineAsyncTest : public WireTest {
+  public:
+    void SetUp() override {
+        WireTest::SetUp();
+
+        mockCreateComputePipelineAsyncCallback =
+            std::make_unique<StrictMock<MockCreateComputePipelineAsyncCallback>>();
+        mockCreateRenderPipelineAsyncCallback =
+            std::make_unique<StrictMock<MockCreateRenderPipelineAsyncCallback>>();
+    }
+
+    void TearDown() override {
+        WireTest::TearDown();
+
+        // Delete mock so that expectations are checked
+        mockCreateComputePipelineAsyncCallback = nullptr;
+        mockCreateRenderPipelineAsyncCallback = nullptr;
+    }
+
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
+    }
+
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
+    }
+};
+
+// Test when creating a compute pipeline with CreateComputePipelineAsync() successfully.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test when creating a render pipeline with CreateRenderPipelineAsync() successfully.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
+        .Times(1);
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
+        .Times(1);
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+}
+
+TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule));
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = module;
+    pipelineDescriptor.vertex.entryPoint = "main";
+
+    WGPUFragmentState fragment = {};
+    fragment.module = module;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _));
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this))
+        .Times(1);
+
+    wgpuDeviceRelease(device);
+
+    // Expect release on all objects created by the client.
+    Sequence s1, s2;
+    EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
+    EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2);
+
+    FlushClient();
+    DefaultApiDeviceWasReleased();
+}
diff --git a/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
new file mode 100644
index 0000000..658325c
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
@@ -0,0 +1,58 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/MockCallback.h"
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireDestroyObjectTests : public WireTest {};
+
+// Test that destroying the device also destroys child objects.
+TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
+
+    FlushClient();
+
+    // Release the device. It should cause the command encoder to be destroyed.
+    wgpuDeviceRelease(device);
+
+    Sequence s1, s2;
+    // The device and child objects should be released.
+    EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1);
+    EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2);
+
+    FlushClient();
+
+    // Signal that we already released and cleared callbacks for |apiDevice|
+    DefaultApiDeviceWasReleased();
+
+    // Using the command encoder should be an error.
+    wgpuCommandEncoderFinish(encoder, nullptr);
+    FlushClient(false);
+}
diff --git a/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
new file mode 100644
index 0000000..5ba589c
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
@@ -0,0 +1,174 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/tests/MockCallback.h"
+#include "dawn/wire/WireClient.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+namespace {
+
+    class WireDisconnectTests : public WireTest {};
+
+}  // anonymous namespace
+
+// Test that commands are not received if the client disconnects.
+TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
+    // Sanity check that commands work at all.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
+
+    // Disconnect.
+    GetWireClient()->Disconnect();
+
+    // Command is not received because client disconnected.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0));
+    FlushClient();
+}
+
+// Test that commands that are serialized before a disconnect but flushed
+// after are received.
+TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
+    // Sanity check that commands work at all.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+    // Disconnect.
+    GetWireClient()->Disconnect();
+
+    // Already-serialized commmands are still received.
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
+}
+
+// Check that disconnecting the wire client calls the device lost callback exacty once.
+TEST_F(WireDisconnectTests, CallsDeviceLostCallback) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
+
+    // Disconnect the wire client. We should receive device lost only once.
+    EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
+        .Times(Exactly(1));
+    GetWireClient()->Disconnect();
+    GetWireClient()->Disconnect();
+}
+
+// Check that disconnecting the wire client after a device loss does not trigger the callback again.
+TEST_F(WireDisconnectTests, ServerLostThenDisconnect) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
+
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "some reason");
+
+    // Flush the device lost return command.
+    EXPECT_CALL(mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this))
+        .Times(Exactly(1));
+    FlushServer();
+
+    // Disconnect the client. We shouldn't see the lost callback again.
+    EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+    GetWireClient()->Disconnect();
+}
+
+// Check that disconnecting the wire client inside the device loss callback does not trigger the
+// callback again.
+TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
+
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "lost reason");
+
+    // Disconnect the client inside the lost callback. We should see the callback
+    // only once.
+    EXPECT_CALL(mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+            GetWireClient()->Disconnect();
+        }));
+    FlushServer();
+}
+
+// Check that a device loss after a disconnect does not trigger the callback again.
+TEST_F(WireDisconnectTests, DisconnectThenServerLost) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
+
+    // Disconnect the client. We should see the callback once.
+    EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
+        .Times(Exactly(1));
+    GetWireClient()->Disconnect();
+
+    // Lose the device on the server. The client callback shouldn't be
+    // called again.
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "lost reason");
+    EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+    FlushServer();
+}
+
+// Test that client objects are all destroyed if the WireClient is destroyed.
+TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) {
+    WGPUSamplerDescriptor desc = {};
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+    wgpuDeviceCreateSampler(device, &desc);
+
+    WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCommandEncoder));
+
+    WGPUSampler apiSampler = api.GetNewSampler();
+    EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler));
+
+    FlushClient();
+
+    DeleteClient();
+
+    // Expect release on all objects created by the client.
+    Sequence s1, s2, s3;
+    EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
+    EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2);
+    EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3);
+    FlushClient();
+
+    // Signal that we already released and cleared callbacks for |apiDevice|
+    DefaultApiDeviceWasReleased();
+}
diff --git a/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
new file mode 100644
index 0000000..a9c5522
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
@@ -0,0 +1,305 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+namespace {
+
+    // Mock classes to add expectations on the wire calling callbacks
+    class MockDeviceErrorCallback {
+      public:
+        MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockDeviceErrorCallback>> mockDeviceErrorCallback;
+    void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) {
+        mockDeviceErrorCallback->Call(type, message, userdata);
+    }
+
+    class MockDevicePopErrorScopeCallback {
+      public:
+        MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>> mockDevicePopErrorScopeCallback;
+    void ToMockDevicePopErrorScopeCallback(WGPUErrorType type,
+                                           const char* message,
+                                           void* userdata) {
+        mockDevicePopErrorScopeCallback->Call(type, message, userdata);
+    }
+
+    class MockDeviceLoggingCallback {
+      public:
+        MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
+    void ToMockDeviceLoggingCallback(WGPULoggingType type, const char* message, void* userdata) {
+        mockDeviceLoggingCallback->Call(type, message, userdata);
+    }
+
+    class MockDeviceLostCallback {
+      public:
+        MOCK_METHOD(void, Call, (WGPUDeviceLostReason reason, const char* message, void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
+    void ToMockDeviceLostCallback(WGPUDeviceLostReason reason,
+                                  const char* message,
+                                  void* userdata) {
+        mockDeviceLostCallback->Call(reason, message, userdata);
+    }
+
+}  // anonymous namespace
+
+class WireErrorCallbackTests : public WireTest {
+  public:
+    WireErrorCallbackTests() {
+    }
+    ~WireErrorCallbackTests() override = default;
+
+    void SetUp() override {
+        WireTest::SetUp();
+
+        mockDeviceErrorCallback = std::make_unique<StrictMock<MockDeviceErrorCallback>>();
+        mockDeviceLoggingCallback = std::make_unique<StrictMock<MockDeviceLoggingCallback>>();
+        mockDevicePopErrorScopeCallback =
+            std::make_unique<StrictMock<MockDevicePopErrorScopeCallback>>();
+        mockDeviceLostCallback = std::make_unique<StrictMock<MockDeviceLostCallback>>();
+    }
+
+    void TearDown() override {
+        WireTest::TearDown();
+
+        mockDeviceErrorCallback = nullptr;
+        mockDeviceLoggingCallback = nullptr;
+        mockDevicePopErrorScopeCallback = nullptr;
+        mockDeviceLostCallback = nullptr;
+    }
+
+    void FlushServer() {
+        WireTest::FlushServer();
+
+        Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback);
+        Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback);
+    }
+};
+
+// Test the return wire for device error callbacks
+TEST_F(WireErrorCallbackTests, DeviceErrorCallback) {
+    wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this);
+
+    // Setting the error callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation,
+                                                     "Some error message");
+
+    EXPECT_CALL(*mockDeviceErrorCallback,
+                Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test the return wire for device user warning callbacks
+TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
+    wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this);
+
+    // Setting the injected warning callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
+
+    EXPECT_CALL(*mockDeviceLoggingCallback, Call(WGPULoggingType_Info, StrEq("Some message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test the return wire for error scopes.
+TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
+
+    WGPUErrorCallback callback;
+    void* userdata;
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+        .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+        .Times(1);
+    callback(WGPUErrorType_Validation, "Some error message", userdata);
+    FlushServer();
+}
+
+// Test the return wire for error scopes when callbacks return in a various orders.
+TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) {
+    // Two error scopes are popped, and the first one returns first.
+    {
+        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+        FlushClient();
+
+        WGPUErrorCallback callback1;
+        WGPUErrorCallback callback2;
+        void* userdata1;
+        void* userdata2;
+        EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+            .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
+            .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
+        FlushClient();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("First error message"), this))
+            .Times(1);
+        callback1(WGPUErrorType_Validation, "First error message", userdata1);
+        FlushServer();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
+            .Times(1);
+        callback2(WGPUErrorType_Validation, "Second error message", userdata2);
+        FlushServer();
+    }
+
+    // Two error scopes are popped, and the second one returns first.
+    {
+        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+        FlushClient();
+
+        WGPUErrorCallback callback1;
+        WGPUErrorCallback callback2;
+        void* userdata1;
+        void* userdata2;
+        EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+            .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
+            .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
+        FlushClient();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
+            .Times(1);
+        callback2(WGPUErrorType_Validation, "Second error message", userdata2);
+        FlushServer();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("First error message"), this))
+            .Times(1);
+        callback1(WGPUErrorType_Validation, "First error message", userdata1);
+        FlushServer();
+    }
+}
+
+// Test the return wire for error scopes in flight when the device is destroyed.
+TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
+
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    // Incomplete callback called in Device destructor. This is resolved after the end of this test.
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Unknown, ValidStringMessage(), this))
+        .Times(1);
+}
+
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
+        .Times(1);
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+}
+
+// Empty stack (We are emulating the errors that would be callback-ed from native).
+TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) {
+        WGPUErrorCallback callback;
+        void* userdata;
+        EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+            .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+        FlushClient();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this))
+            .Times(1);
+        callback(WGPUErrorType_Validation, "No error scopes to pop", userdata);
+        FlushServer();
+}
+
+// Test the return wire for device lost callback
+TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
+    wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this);
+
+    // Setting the error callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "Some error message");
+
+    EXPECT_CALL(*mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
diff --git a/src/dawn/tests/unittests/wire/WireExtensionTests.cpp b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
new file mode 100644
index 0000000..9e1ac58
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
@@ -0,0 +1,241 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireExtensionTests : public WireTest {
+  public:
+    WireExtensionTests() {
+    }
+    ~WireExtensionTests() override = default;
+};
+
+// Serialize/Deserializes a chained struct correctly.
+TEST_F(WireExtensionTests, ChainedStruct) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
+
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt.chain.next = nullptr;
+    clientExt.clampDepth = true;
+
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext->chain.sType, clientExt.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
+                EXPECT_EQ(ext->chain.next, nullptr);
+
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
+
+// Serialize/Deserializes multiple chained structs correctly.
+TEST_F(WireExtensionTests, MutlipleChainedStructs) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
+
+    WGPUPrimitiveDepthClampingState clientExt2 = {};
+    clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt2.chain.next = nullptr;
+    clientExt2.clampDepth = false;
+
+    WGPUPrimitiveDepthClampingState clientExt1 = {};
+    clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt1.chain.next = &clientExt2.chain;
+    clientExt1.clampDepth = true;
+
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
+
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext1 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext1->clampDepth, true);
+
+                const auto* ext2 =
+                    reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(ext1->chain.next);
+                EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType);
+                EXPECT_EQ(ext2->clampDepth, false);
+                EXPECT_EQ(ext2->chain.next, nullptr);
+
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+
+    // Swap the order of the chained structs.
+    renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
+    clientExt2.chain.next = &clientExt1.chain;
+    clientExt1.chain.next = nullptr;
+
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext2 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType);
+                EXPECT_EQ(ext2->clampDepth, false);
+
+                const auto* ext1 =
+                    reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(ext2->chain.next);
+                EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext1->clampDepth, true);
+                EXPECT_EQ(ext1->chain.next, nullptr);
+
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
+
+// Test that a chained struct with Invalid sType passes through as Invalid.
+TEST_F(WireExtensionTests, InvalidSType) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
+
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = WGPUSType_Invalid;
+    clientExt.chain.next = nullptr;
+
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+                EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
+
+// Test that a chained struct with unknown sType passes through as Invalid.
+TEST_F(WireExtensionTests, UnknownSType) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
+
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = static_cast<WGPUSType>(-1);
+    clientExt.chain.next = nullptr;
+
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+                EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
+
+// Test that if both an invalid and valid stype are passed on the chain, only the invalid
+// sType passes through as Invalid.
+TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
+
+    WGPUPrimitiveDepthClampingState clientExt2 = {};
+    clientExt2.chain.sType = WGPUSType_Invalid;
+    clientExt2.chain.next = nullptr;
+
+    WGPUPrimitiveDepthClampingState clientExt1 = {};
+    clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt1.chain.next = &clientExt2.chain;
+    clientExt1.clampDepth = true;
+
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
+
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
+
+                EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid);
+                EXPECT_EQ(ext->chain.next->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+
+    // Swap the order of the chained structs.
+    renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
+    clientExt2.chain.next = &clientExt1.chain;
+    clientExt1.chain.next = nullptr;
+
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain->next);
+                EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
+                EXPECT_EQ(ext->chain.next, nullptr);
+
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
diff --git a/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
new file mode 100644
index 0000000..fd3b258
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
@@ -0,0 +1,272 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireInjectDeviceTests : public WireTest {
+  public:
+    WireInjectDeviceTests() {
+    }
+    ~WireInjectDeviceTests() override = default;
+};
+
+// Test that reserving and injecting a device makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectDeviceTests, CallAfterReserveInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    WGPUBufferDescriptor bufferDesc = {};
+    wgpuDeviceCreateBuffer(reservation.device, &bufferDesc);
+    WGPUBuffer serverBuffer = api.GetNewBuffer();
+    EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) {
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.device, reservation2.device);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectDeviceTests, InjectExistingID) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that the server only borrows the device and does a single reference-release
+TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    // Injecting the device adds a reference
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // Releasing the device removes a single reference and clears its error callbacks.
+    wgpuDeviceRelease(reservation.device);
+    EXPECT_CALL(api, DeviceRelease(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that it is an error to get the primary queue of a device before it has been
+// injected on the server.
+TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    wgpuDeviceGetQueue(reservation.device);
+    FlushClient(false);
+}
+
+// Test that it is valid to get the primary queue of a device after it has been
+// injected on the server.
+TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    wgpuDeviceGetQueue(reservation.device);
+
+    WGPUQueue apiQueue = api.GetNewQueue();
+    EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that the list of live devices can be reflected using GetDevice.
+TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
+    // Reserve two devices.
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    // Inject both devices.
+
+    WGPUDevice serverDevice1 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
+
+    WGPUDevice serverDevice2 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice2));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
+
+    // Test that both devices can be reflected.
+    ASSERT_EQ(serverDevice1, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
+    ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
+
+    // Release the first device
+    wgpuDeviceRelease(reservation1.device);
+    EXPECT_CALL(api, DeviceRelease(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    FlushClient();
+
+    // The first device should no longer reflect, but the second should
+    ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
+    ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
+}
+
+// This is a regression test where a second device reservation invalidated pointers into the
+// KnownObjects std::vector of devices. The fix was to store pointers to heap allocated
+// objects instead.
+TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
+    // Reserve one device, inject it, and get the primary queue.
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice1 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
+
+    WGPUCommandEncoder commandEncoder =
+        wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr);
+
+    WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _))
+        .WillOnce(Return(serverCommandEncoder));
+    FlushClient();
+
+    // Reserve a second device, and inject it.
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice2 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice2));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
+
+    // Release the encoder. This should work without error because it stores a stable
+    // pointer to its device's list of child objects. On destruction, it removes itself from the
+    // list.
+    wgpuCommandEncoderRelease(commandEncoder);
+    EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
+}
+
+// Test that a device reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
+        ReservedDevice reservation = GetWireClient()->ReserveDevice();
+        wgpuDeviceRelease(reservation.device);
+        FlushClient(false);
+    }
+
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
+        ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+        GetWireClient()->ReclaimDeviceReservation(reservation1);
+
+        ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
+
+        // No errors should occur.
+        FlushClient();
+    }
+}
diff --git a/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
new file mode 100644
index 0000000..eef7671
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
@@ -0,0 +1,119 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+namespace {
+
+    class WireInjectInstanceTests : public WireTest {
+      public:
+        WireInjectInstanceTests() {
+        }
+        ~WireInjectInstanceTests() override = default;
+    };
+
+    // Test that reserving and injecting an instance makes calls on the client object forward to the
+    // server object correctly.
+    TEST_F(WireInjectInstanceTests, CallAfterReserveInject) {
+        ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+        WGPUInstance serverInstance = api.GetNewInstance();
+        EXPECT_CALL(api, InstanceReference(serverInstance));
+        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
+                                                    reservation.generation));
+
+        WGPUSurfaceDescriptor surfaceDesc = {};
+        wgpuInstanceCreateSurface(reservation.instance, &surfaceDesc);
+        WGPUSurface serverSurface = api.GetNewSurface();
+        EXPECT_CALL(api, InstanceCreateSurface(serverInstance, NotNull()))
+            .WillOnce(Return(serverSurface));
+        FlushClient();
+    }
+
+    // Test that reserve correctly returns different IDs each time.
+    TEST_F(WireInjectInstanceTests, ReserveDifferentIDs) {
+        ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
+        ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
+
+        ASSERT_NE(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.instance, reservation2.instance);
+    }
+
+    // Test that injecting the same id fails.
+    TEST_F(WireInjectInstanceTests, InjectExistingID) {
+        ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+        WGPUInstance serverInstance = api.GetNewInstance();
+        EXPECT_CALL(api, InstanceReference(serverInstance));
+        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
+                                                    reservation.generation));
+
+        // ID already in use, call fails.
+        ASSERT_FALSE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
+                                                     reservation.generation));
+    }
+
+    // Test that the server only borrows the instance and does a single reference-release
+    TEST_F(WireInjectInstanceTests, InjectedInstanceLifetime) {
+        ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+        // Injecting the instance adds a reference
+        WGPUInstance serverInstance = api.GetNewInstance();
+        EXPECT_CALL(api, InstanceReference(serverInstance));
+        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
+                                                    reservation.generation));
+
+        // Releasing the instance removes a single reference.
+        wgpuInstanceRelease(reservation.instance);
+        EXPECT_CALL(api, InstanceRelease(serverInstance));
+        FlushClient();
+
+        // Deleting the server doesn't release a second reference.
+        DeleteServer();
+        Mock::VerifyAndClearExpectations(&api);
+    }
+
+    // Test that a device reservation can be reclaimed. This is necessary to
+    // avoid leaking ObjectIDs for reservations that are never injected.
+    TEST_F(WireInjectInstanceTests, ReclaimInstanceReservation) {
+        // Test that doing a reservation and full release is an error.
+        {
+            ReservedInstance reservation = GetWireClient()->ReserveInstance();
+            wgpuInstanceRelease(reservation.instance);
+            FlushClient(false);
+        }
+
+        // Test that doing a reservation and then reclaiming it recycles the ID.
+        {
+            ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
+            GetWireClient()->ReclaimInstanceReservation(reservation1);
+
+            ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
+
+            // The ID is the same, but the generation is still different.
+            ASSERT_EQ(reservation1.id, reservation2.id);
+            ASSERT_NE(reservation1.generation, reservation2.generation);
+
+            // No errors should occur.
+            FlushClient();
+        }
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
new file mode 100644
index 0000000..5bcc8a6
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
@@ -0,0 +1,116 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireInjectSwapChainTests : public WireTest {
+  public:
+    WireInjectSwapChainTests() {
+    }
+    ~WireInjectSwapChainTests() override = default;
+};
+
+// Test that reserving and injecting a swapchain makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    wgpuSwapChainPresent(reservation.swapchain);
+    EXPECT_CALL(api, SwapChainPresent(apiSwapchain));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) {
+    ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
+    ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.swapchain, reservation2.swapchain);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectSwapChainTests, InjectExistingID) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                  reservation.generation, reservation.deviceId,
+                                                  reservation.deviceGeneration));
+}
+
+// Test that the server only borrows the swapchain and does a single reference-release
+TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    // Injecting the swapchain adds a reference
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    // Releasing the swapchain removes a single reference.
+    wgpuSwapChainRelease(reservation.swapchain);
+    EXPECT_CALL(api, SwapChainRelease(apiSwapchain));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a swapchain reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
+        ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+        wgpuSwapChainRelease(reservation.swapchain);
+        FlushClient(false);
+    }
+
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
+        ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
+        GetWireClient()->ReclaimSwapChainReservation(reservation1);
+
+        ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
+
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
+
+        // No errors should occur.
+        FlushClient();
+    }
+}
diff --git a/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
new file mode 100644
index 0000000..991050b
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
@@ -0,0 +1,114 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireInjectTextureTests : public WireTest {
+  public:
+    WireInjectTextureTests() {
+    }
+    ~WireInjectTextureTests() override = default;
+};
+
+// Test that reserving and injecting a texture makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    wgpuTextureCreateView(reservation.texture, nullptr);
+    WGPUTextureView apiDummyView = api.GetNewTextureView();
+    EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiDummyView));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectTextureTests, ReserveDifferentIDs) {
+    ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
+    ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.texture, reservation2.texture);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectTextureTests, InjectExistingID) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                                reservation.deviceId,
+                                                reservation.deviceGeneration));
+}
+
+// Test that the server only borrows the texture and does a single reference-release
+TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    // Injecting the texture adds a reference
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    // Releasing the texture removes a single reference.
+    wgpuTextureRelease(reservation.texture);
+    EXPECT_CALL(api, TextureRelease(apiTexture));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a texture reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
+        ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+        wgpuTextureRelease(reservation.texture);
+        FlushClient(false);
+    }
+
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
+        ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
+        GetWireClient()->ReclaimTextureReservation(reservation1);
+
+        ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
+
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
+
+        // No errors should occur.
+        FlushClient();
+    }
+}
diff --git a/src/dawn/tests/unittests/wire/WireInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
new file mode 100644
index 0000000..ab6ec71
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
@@ -0,0 +1,286 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/MockCallback.h"
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+#include <webgpu/webgpu_cpp.h>
+#include <unordered_set>
+#include <vector>
+
+namespace {
+
+    using namespace testing;
+    using namespace dawn::wire;
+
+    class WireInstanceBasicTest : public WireTest {};
+    class WireInstanceTests : public WireTest {
+      protected:
+        void SetUp() override {
+            WireTest::SetUp();
+
+            auto reservation = GetWireClient()->ReserveInstance();
+            instance = wgpu::Instance::Acquire(reservation.instance);
+
+            apiInstance = api.GetNewInstance();
+            EXPECT_CALL(api, InstanceReference(apiInstance));
+            EXPECT_TRUE(GetWireServer()->InjectInstance(apiInstance, reservation.id,
+                                                        reservation.generation));
+        }
+
+        void TearDown() override {
+            instance = nullptr;
+            WireTest::TearDown();
+        }
+
+        wgpu::Instance instance;
+        WGPUInstance apiInstance;
+    };
+
+    // Test that an Instance can be reserved and injected into the wire.
+    TEST_F(WireInstanceBasicTest, ReserveAndInject) {
+        auto reservation = GetWireClient()->ReserveInstance();
+        wgpu::Instance instance = wgpu::Instance::Acquire(reservation.instance);
+
+        WGPUInstance apiInstance = api.GetNewInstance();
+        EXPECT_CALL(api, InstanceReference(apiInstance));
+        EXPECT_TRUE(
+            GetWireServer()->InjectInstance(apiInstance, reservation.id, reservation.generation));
+
+        instance = nullptr;
+
+        EXPECT_CALL(api, InstanceRelease(apiInstance));
+        FlushClient();
+    }
+
+    // Test that RequestAdapterOptions are passed from the client to the server.
+    TEST_F(WireInstanceTests, RequestAdapterPassesOptions) {
+        MockCallback<WGPURequestAdapterCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+
+        for (wgpu::PowerPreference powerPreference :
+             {wgpu::PowerPreference::LowPower, wgpu::PowerPreference::HighPerformance}) {
+            wgpu::RequestAdapterOptions options = {};
+            options.powerPreference = powerPreference;
+
+            instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+            EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](const WGPURequestAdapterOptions* apiOptions) {
+                    EXPECT_EQ(apiOptions->powerPreference,
+                              static_cast<WGPUPowerPreference>(options.powerPreference));
+                    EXPECT_EQ(apiOptions->forceFallbackAdapter, options.forceFallbackAdapter);
+                })));
+            FlushClient();
+        }
+
+        // Delete the instance now, or it'll call the mock callback after it's deleted.
+        instance = nullptr;
+    }
+
+    // Test that RequestAdapter forwards the adapter information to the client.
+    TEST_F(WireInstanceTests, RequestAdapterSuccess) {
+        wgpu::RequestAdapterOptions options = {};
+        MockCallback<WGPURequestAdapterCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+        wgpu::AdapterProperties fakeProperties = {};
+        fakeProperties.vendorID = 0x134;
+        fakeProperties.deviceID = 0x918;
+        fakeProperties.name = "fake adapter";
+        fakeProperties.driverDescription = "hello world";
+        fakeProperties.backendType = wgpu::BackendType::D3D12;
+        fakeProperties.adapterType = wgpu::AdapterType::IntegratedGPU;
+
+        wgpu::SupportedLimits fakeLimits = {};
+        fakeLimits.limits.maxTextureDimension1D = 433;
+        fakeLimits.limits.maxVertexAttributes = 1243;
+
+        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+            wgpu::FeatureName::Depth32FloatStencil8,
+            wgpu::FeatureName::TextureCompressionBC,
+        };
+
+        // Expect the server to receive the message. Then, mock a fake reply.
+        WGPUAdapter apiAdapter = api.GetNewAdapter();
+        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+            .WillOnce(InvokeWithoutArgs([&]() {
+                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                    .WillOnce(SetArgPointee<1>(
+                        *reinterpret_cast<WGPUAdapterProperties*>(&fakeProperties)));
+
+                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                        *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                        return true;
+                    })));
+
+                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                    .WillOnce(Return(fakeFeatures.size()));
+
+                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                        for (wgpu::FeatureName feature : fakeFeatures) {
+                            *(features++) = static_cast<WGPUFeatureName>(feature);
+                        }
+                        return fakeFeatures.size();
+                    })));
+                api.CallInstanceRequestAdapterCallback(
+                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
+            }));
+        FlushClient();
+
+        // Expect the callback in the client and all the adapter information to match.
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
+                wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+
+                wgpu::AdapterProperties properties;
+                adapter.GetProperties(&properties);
+                EXPECT_EQ(properties.vendorID, fakeProperties.vendorID);
+                EXPECT_EQ(properties.deviceID, fakeProperties.deviceID);
+                EXPECT_STREQ(properties.name, fakeProperties.name);
+                EXPECT_STREQ(properties.driverDescription, fakeProperties.driverDescription);
+                EXPECT_EQ(properties.backendType, fakeProperties.backendType);
+                EXPECT_EQ(properties.adapterType, fakeProperties.adapterType);
+
+                wgpu::SupportedLimits limits;
+                EXPECT_TRUE(adapter.GetLimits(&limits));
+                EXPECT_EQ(limits.limits.maxTextureDimension1D,
+                          fakeLimits.limits.maxTextureDimension1D);
+                EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
+
+                std::vector<wgpu::FeatureName> features;
+                features.resize(adapter.EnumerateFeatures(nullptr));
+                ASSERT_EQ(features.size(), fakeFeatures.size());
+                EXPECT_EQ(adapter.EnumerateFeatures(&features[0]), features.size());
+
+                std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
+                for (wgpu::FeatureName feature : features) {
+                    EXPECT_EQ(featureSet.erase(feature), 1u);
+                }
+            })));
+        FlushServer();
+    }
+
+    // Test that features returned by the implementation that aren't supported
+    // in the wire are not exposed.
+    TEST_F(WireInstanceTests, RequestAdapterWireLacksFeatureSupport) {
+        wgpu::RequestAdapterOptions options = {};
+        MockCallback<WGPURequestAdapterCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+            wgpu::FeatureName::Depth24UnormStencil8,
+            // Some value that is not a valid feature
+            static_cast<wgpu::FeatureName>(-2),
+        };
+
+        // Expect the server to receive the message. Then, mock a fake reply.
+        WGPUAdapter apiAdapter = api.GetNewAdapter();
+        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+            .WillOnce(InvokeWithoutArgs([&]() {
+                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
+                        *properties = {};
+                        properties->name = "";
+                        properties->driverDescription = "";
+                    })));
+
+                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                        *limits = {};
+                        return true;
+                    })));
+
+                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                    .WillOnce(Return(fakeFeatures.size()));
+
+                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                        for (wgpu::FeatureName feature : fakeFeatures) {
+                            *(features++) = static_cast<WGPUFeatureName>(feature);
+                        }
+                        return fakeFeatures.size();
+                    })));
+                api.CallInstanceRequestAdapterCallback(
+                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
+            }));
+        FlushClient();
+
+        // Expect the callback in the client and all the adapter information to match.
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
+                wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+
+                wgpu::FeatureName feature;
+                ASSERT_EQ(adapter.EnumerateFeatures(nullptr), 1u);
+                adapter.EnumerateFeatures(&feature);
+
+                EXPECT_EQ(feature, wgpu::FeatureName::Depth24UnormStencil8);
+            })));
+        FlushServer();
+    }
+
+    // Test that RequestAdapter errors forward to the client.
+    TEST_F(WireInstanceTests, RequestAdapterError) {
+        wgpu::RequestAdapterOptions options = {};
+        MockCallback<WGPURequestAdapterCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+        // Expect the server to receive the message. Then, mock an error.
+        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+            .WillOnce(InvokeWithoutArgs([&]() {
+                api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Error,
+                                                       nullptr, "Some error");
+            }));
+        FlushClient();
+
+        // Expect the callback in the client.
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Error, nullptr, StrEq("Some error"), this))
+            .Times(1);
+        FlushServer();
+    }
+
+    // Test that RequestAdapter receives unknown status if the instance is deleted
+    // before the callback happens.
+    TEST_F(WireInstanceTests, RequestAdapterInstanceDestroyedBeforeCallback) {
+        wgpu::RequestAdapterOptions options = {};
+        MockCallback<WGPURequestAdapterCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+        instance = nullptr;
+    }
+
+    // Test that RequestAdapter receives unknown status if the wire is disconnected
+    // before the callback happens.
+    TEST_F(WireInstanceTests, RequestAdapterWireDisconnectBeforeCallback) {
+        wgpu::RequestAdapterOptions options = {};
+        MockCallback<WGPURequestAdapterCallback> cb;
+        auto* userdata = cb.MakeUserdata(this);
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+        GetWireClient()->Disconnect();
+    }
+
+}  // anonymous namespace
diff --git a/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
new file mode 100644
index 0000000..530e31d
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
@@ -0,0 +1,1052 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/client/ClientMemoryTransferService_mock.h"
+#include "dawn/wire/server/ServerMemoryTransferService_mock.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+namespace {
+
+    // Mock class to add expectations on the wire calling callbacks
+    class MockBufferMapCallback {
+      public:
+        MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
+    void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+        mockBufferMapCallback->Call(status, userdata);
+    }
+
+}  // anonymous namespace
+
+// WireMemoryTransferServiceTests test the MemoryTransferService with buffer mapping.
+// They test the basic success and error cases for buffer mapping, and they test
+// mocked failures of each fallible MemoryTransferService method that an embedder
+// could implement.
+// The test harness defines multiple helpers for expecting operations on Read/Write handles
+// and for mocking failures. The helpers are designed such that for a given run of a test,
+// a Serialization expection has a corresponding Deserialization expectation for which the
+// serialized data must match.
+// There are tests which check for Success for every mapping operation which mock an entire mapping
+// operation from map to unmap, and add all MemoryTransferService expectations.
+// Tests which check for errors perform the same mapping operations but insert mocked failures for
+// various mapping or MemoryTransferService operations.
+class WireMemoryTransferServiceTests : public WireTest {
+  public:
+    WireMemoryTransferServiceTests() {
+    }
+    ~WireMemoryTransferServiceTests() override = default;
+
+    client::MemoryTransferService* GetClientMemoryTransferService() override {
+        return &clientMemoryTransferService;
+    }
+
+    server::MemoryTransferService* GetServerMemoryTransferService() override {
+        return &serverMemoryTransferService;
+    }
+
+    void SetUp() override {
+        WireTest::SetUp();
+
+        mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
+
+        // TODO(enga): Make this thread-safe.
+        mBufferContent++;
+        mMappedBufferContent = 0;
+        mUpdatedBufferContent++;
+        mSerializeCreateInfo++;
+        mReadHandleSerializeDataInfo++;
+        mWriteHandleSerializeDataInfo++;
+    }
+
+    void TearDown() override {
+        WireTest::TearDown();
+
+        // Delete mock so that expectations are checked
+        mockBufferMapCallback = nullptr;
+    }
+
+    void FlushClient(bool success = true) {
+        WireTest::FlushClient(success);
+        Mock::VerifyAndClearExpectations(&serverMemoryTransferService);
+    }
+
+    void FlushServer(bool success = true) {
+        WireTest::FlushServer(success);
+
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
+        Mock::VerifyAndClearExpectations(&clientMemoryTransferService);
+    }
+
+  protected:
+    using ClientReadHandle = client::MockMemoryTransferService::MockReadHandle;
+    using ServerReadHandle = server::MockMemoryTransferService::MockReadHandle;
+    using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle;
+    using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle;
+
+    std::pair<WGPUBuffer, WGPUBuffer> CreateBuffer(WGPUBufferUsage usage = WGPUBufferUsage_None) {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.size = kBufferSize;
+        descriptor.usage = usage;
+
+        WGPUBuffer apiBuffer = api.GetNewBuffer();
+        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+            .WillOnce(Return(apiBuffer))
+            .RetiresOnSaturation();
+
+        return std::make_pair(apiBuffer, buffer);
+    }
+
+    std::pair<WGPUBuffer, WGPUBuffer> CreateBufferMapped(
+        WGPUBufferUsage usage = WGPUBufferUsage_None) {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.size = sizeof(mBufferContent);
+        descriptor.mappedAtCreation = true;
+        descriptor.usage = usage;
+
+        WGPUBuffer apiBuffer = api.GetNewBuffer();
+
+        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, sizeof(mBufferContent)))
+            .WillOnce(Return(&mMappedBufferContent));
+
+        return std::make_pair(apiBuffer, buffer);
+    }
+
+    ClientReadHandle* ExpectReadHandleCreation() {
+        // Create the handle first so we can use it in later expectations.
+        ClientReadHandle* handle = clientMemoryTransferService.NewReadHandle();
+
+        EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
+
+        return handle;
+    }
+
+    void MockReadHandleCreationFailure() {
+        EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
+    }
+
+    void ExpectReadHandleSerialization(ClientReadHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreateSize(handle))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreate(handle, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
+                return sizeof(mSerializeCreateInfo);
+            }));
+    }
+
+    ServerReadHandle* ExpectServerReadHandleDeserialize() {
+        // Create the handle first so we can use it in later expectations.
+        ServerReadHandle* handle = serverMemoryTransferService.NewReadHandle();
+
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                            sizeof(mSerializeCreateInfo), _))
+            .WillOnce(WithArg<2>([=](server::MemoryTransferService::ReadHandle** readHandle) {
+                *readHandle = handle;
+                return true;
+            }));
+
+        return handle;
+    }
+
+    void MockServerReadHandleDeserializeFailure() {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                            sizeof(mSerializeCreateInfo), _))
+            .WillOnce(InvokeWithoutArgs([&]() { return false; }));
+    }
+
+    void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnReadHandleSizeOfSerializeDataUpdate(handle, _, _))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); }));
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnReadHandleSerializeDataUpdate(handle, _, _, _, _))
+            .WillOnce(WithArg<4>([&](void* serializePointer) {
+                memcpy(serializePointer, &mReadHandleSerializeDataInfo,
+                       sizeof(mReadHandleSerializeDataInfo));
+                return sizeof(mReadHandleSerializeDataInfo);
+            }));
+    }
+
+    void ExpectClientReadHandleDeserializeDataUpdate(ClientReadHandle* handle,
+                                                     uint32_t* mappedData) {
+        EXPECT_CALL(
+            clientMemoryTransferService,
+            OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
+                                              sizeof(mReadHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(true));
+    }
+
+    void MockClientReadHandleDeserializeDataUpdateFailure(ClientReadHandle* handle) {
+        EXPECT_CALL(
+            clientMemoryTransferService,
+            OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
+                                              sizeof(mReadHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(false));
+    }
+
+    ClientWriteHandle* ExpectWriteHandleCreation(bool mappedAtCreation) {
+        // Create the handle first so we can use it in later expectations.
+        ClientWriteHandle* handle = clientMemoryTransferService.NewWriteHandle();
+
+        EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
+        if (mappedAtCreation) {
+            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(handle))
+                .WillOnce(Return(&mBufferContent));
+        }
+
+        return handle;
+    }
+
+    void MockWriteHandleCreationFailure() {
+        EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
+    }
+
+    void ExpectWriteHandleSerialization(ClientWriteHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreateSize(handle))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreate(handle, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
+                return sizeof(mSerializeCreateInfo);
+            }));
+    }
+
+    ServerWriteHandle* ExpectServerWriteHandleDeserialization() {
+        // Create the handle first so it can be used in later expectations.
+        ServerWriteHandle* handle = serverMemoryTransferService.NewWriteHandle();
+
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                             sizeof(mSerializeCreateInfo), _))
+            .WillOnce(WithArg<2>([=](server::MemoryTransferService::WriteHandle** writeHandle) {
+                *writeHandle = handle;
+                return true;
+            }));
+
+        return handle;
+    }
+
+    void MockServerWriteHandleDeserializeFailure() {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                             sizeof(mSerializeCreateInfo), _))
+            .WillOnce(Return(false));
+    }
+
+    void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService,
+                    OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeDataUpdate(handle, _, _, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mWriteHandleSerializeDataInfo,
+                       sizeof(mWriteHandleSerializeDataInfo));
+                return sizeof(mWriteHandleSerializeDataInfo);
+            }));
+    }
+
+    void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle,
+                                                      uint32_t expectedData) {
+        EXPECT_CALL(
+            serverMemoryTransferService,
+            OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
+                                               sizeof(mWriteHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(true));
+    }
+
+    void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) {
+        EXPECT_CALL(
+            serverMemoryTransferService,
+            OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
+                                               sizeof(mWriteHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(false));
+    }
+
+    // Arbitrary values used within tests to check if serialized data is correctly passed
+    // between the client and server. The static data changes between runs of the tests and
+    // test expectations will check that serialized values are passed to the respective
+    // deserialization function.
+    static uint32_t mSerializeCreateInfo;
+    static uint32_t mReadHandleSerializeDataInfo;
+    static uint32_t mWriteHandleSerializeDataInfo;
+
+    // Represents the buffer contents for the test.
+    static uint32_t mBufferContent;
+
+    static constexpr size_t kBufferSize = sizeof(mBufferContent);
+
+    // The client's zero-initialized buffer for writing.
+    uint32_t mMappedBufferContent = 0;
+
+    // |mMappedBufferContent| should be set equal to |mUpdatedBufferContent| when the client
+    // performs a write. Test expectations should check that |mBufferContent ==
+    // mUpdatedBufferContent| after all writes are flushed.
+    static uint32_t mUpdatedBufferContent;
+
+    testing::StrictMock<dawn::wire::server::MockMemoryTransferService> serverMemoryTransferService;
+    testing::StrictMock<dawn::wire::client::MockMemoryTransferService> clientMemoryTransferService;
+};
+
+uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337;
+uint32_t WireMemoryTransferServiceTests::mUpdatedBufferContent = 2349;
+uint32_t WireMemoryTransferServiceTests::mSerializeCreateInfo = 4242;
+uint32_t WireMemoryTransferServiceTests::mReadHandleSerializeDataInfo = 1394;
+uint32_t WireMemoryTransferServiceTests::mWriteHandleSerializeDataInfo = 1235;
+
+// Test successful mapping for reading.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    // The client should receive the handle data update message from the server.
+    ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test ReadHandle destroy behavior
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroy) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test unsuccessful mapping for reading.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the ReadHandle from the client.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a failed callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    // The client receives an error callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test ReadHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadHandleCreationFailure) {
+    // Mock a ReadHandle creation failure
+    MockReadHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = kBufferSize;
+    descriptor.usage = WGPUBufferUsage_MapRead;
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+}
+
+// Test MapRead DeserializeReadHandle failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeReadHandleFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading..
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // Mock a Deserialization failure.
+    MockServerReadHandleDeserializeFailure();
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test read handle DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client should receive the handle data update message from the server.
+    // Mock a deserialization failure.
+    MockClientReadHandleDeserializeDataUpdateFailure(clientHandle);
+
+    // Failed deserialization is a fatal failure and the client synchronously receives a
+    // DEVICE_LOST callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, _)).Times(1);
+
+    FlushServer(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test mapping for reading destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading..
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    // The client should receive the handle data update message from the server.
+    ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
+
+    FlushServer();
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+        wgpuBufferDestroy(buffer);
+
+        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+        FlushClient();
+
+        // The handle is already destroyed so unmap only results in a server unmap call.
+        wgpuBufferUnmap(buffer);
+
+        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+        FlushClient();
+    }
+}
+
+// Test successful mapping for writing.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // The client will then serialize data update and destroy the handle on Unmap()
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test WriteHandle destroy behavior
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroy) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test unsuccessful MapWrite.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a WriteHandle on buffer creation with MapWrite usage.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock an error callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    // The client receives an error callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test WriteHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteHandleCreationFailure) {
+    // Mock a WriteHandle creation failure
+    MockWriteHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = kBufferSize;
+    descriptor.usage = WGPUBufferUsage_MapWrite;
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+}
+
+// Test MapWrite DeserializeWriteHandle failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a WriteHandle on buffer creation with MapWrite usage.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // Mock a deserialization failure.
+    MockServerWriteHandleDeserializeFailure();
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test MapWrite DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a success callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // The client will then serialize data update
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message. Mock a deserialization failure.
+    MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test MapWrite destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        // The handle is destroyed once the buffer is destroyed.
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+        wgpuBufferDestroy(buffer);
+
+        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+        FlushClient();
+
+        // The handle is already destroyed so unmap only results in a server unmap call.
+        wgpuBufferUnmap(buffer);
+
+        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+        FlushClient();
+    }
+}
+
+// Test successful buffer creation with mappedAtCreation = true.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationSuccess) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+
+    // After the handle is updated it can be destroyed.
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test buffer creation with mappedAtCreation WriteHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationWriteHandleCreationFailure) {
+    // Mock a WriteHandle creation failure
+    MockWriteHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = sizeof(mBufferContent);
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+    EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, sizeof(mBufferContent)));
+}
+
+// Test buffer creation with mappedAtCreation DeserializeWriteHandle failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeWriteHandleFailure) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    MockServerWriteHandleDeserializeFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = sizeof(mBufferContent);
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    // Now bufferGetMappedRange won't be called if deserialize writeHandle fails
+
+    FlushClient(false);
+
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test buffer creation with mappedAtCreation = true DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeDataUpdateFailure) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message. Mock a deserialization failure.
+    MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
+
+    FlushClient(false);
+
+    // Failed BufferUpdateMappedData cmd will early return so BufferUnmap is not processed.
+    // The server side writeHandle is destructed at buffer destruction.
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test mappedAtCreation=true destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+        wgpuBufferDestroy(buffer);
+
+        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+        FlushClient();
+
+        // The handle is already destroyed so unmap only results in a server unmap call.
+        wgpuBufferUnmap(buffer);
+
+        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+        FlushClient();
+    }
+}
+
+// Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch
+// data pointer to ReadHandle
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) {
+    // The client should create and serialize a ReadHandle and a WriteHandle on createBufferMapped.
+    ClientReadHandle* clientReadHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientReadHandle);
+    ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientWriteHandle);
+
+    // The server should then deserialize a ReadHandle and a WriteHandle from the client.
+    ServerReadHandle* serverReadHandle = ExpectServerReadHandleDeserialize();
+    ServerWriteHandle* serverWriteHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientWriteHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientWriteHandle)).Times(1);
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientReadHandle))
+        .WillOnce(Return(&mBufferContent));
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverWriteHandle, mUpdatedBufferContent);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverWriteHandle)).Times(1);
+    FlushClient();
+
+    // The ReadHandle will be destoryed on buffer destroy.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientReadHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverReadHandle)).Times(1);
+}
+
+// Test WriteHandle preserves after unmap for a buffer with mappedAtCreation and MapWrite usage
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapWrite);
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The writeHandle is preserved after unmap and is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
diff --git a/src/dawn/tests/unittests/wire/WireOptionalTests.cpp b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
new file mode 100644
index 0000000..ca68cdd
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
@@ -0,0 +1,179 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class WireOptionalTests : public WireTest {
+  public:
+    WireOptionalTests() {
+    }
+    ~WireOptionalTests() override = default;
+};
+
+// Test passing nullptr instead of objects - object as value version
+TEST_F(WireOptionalTests, OptionalObjectValue) {
+    WGPUBindGroupLayoutDescriptor bglDesc = {};
+    bglDesc.entryCount = 0;
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc);
+
+    WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _))
+        .WillOnce(Return(apiBindGroupLayout));
+
+    // The `sampler`, `textureView` and `buffer` members of a binding are optional.
+    WGPUBindGroupEntry entry;
+    entry.binding = 0;
+    entry.sampler = nullptr;
+    entry.textureView = nullptr;
+    entry.buffer = nullptr;
+    entry.nextInChain = nullptr;
+
+    WGPUBindGroupDescriptor bgDesc = {};
+    bgDesc.layout = bgl;
+    bgDesc.entryCount = 1;
+    bgDesc.entries = &entry;
+
+    wgpuDeviceCreateBindGroup(device, &bgDesc);
+
+    WGPUBindGroup apiDummyBindGroup = api.GetNewBindGroup();
+    EXPECT_CALL(api, DeviceCreateBindGroup(
+                         apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
+                             return desc->nextInChain == nullptr && desc->entryCount == 1 &&
+                                    desc->entries[0].binding == 0 &&
+                                    desc->entries[0].sampler == nullptr &&
+                                    desc->entries[0].buffer == nullptr &&
+                                    desc->entries[0].textureView == nullptr;
+                         })))
+        .WillOnce(Return(apiDummyBindGroup));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send optional pointers to structures
+TEST_F(WireOptionalTests, OptionalStructPointer) {
+    // Create shader module
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    // Create the color state descriptor
+    WGPUBlendComponent blendComponent = {};
+    blendComponent.operation = WGPUBlendOperation_Add;
+    blendComponent.srcFactor = WGPUBlendFactor_One;
+    blendComponent.dstFactor = WGPUBlendFactor_One;
+    WGPUBlendState blendState = {};
+    blendState.alpha = blendComponent;
+    blendState.color = blendComponent;
+    WGPUColorTargetState colorTargetState = {};
+    colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
+    colorTargetState.blend = &blendState;
+    colorTargetState.writeMask = WGPUColorWriteMask_All;
+
+    // Create the depth-stencil state
+    WGPUStencilFaceState stencilFace = {};
+    stencilFace.compare = WGPUCompareFunction_Always;
+    stencilFace.failOp = WGPUStencilOperation_Keep;
+    stencilFace.depthFailOp = WGPUStencilOperation_Keep;
+    stencilFace.passOp = WGPUStencilOperation_Keep;
+
+    WGPUDepthStencilState depthStencilState = {};
+    depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
+    depthStencilState.depthWriteEnabled = false;
+    depthStencilState.depthCompare = WGPUCompareFunction_Always;
+    depthStencilState.stencilBack = stencilFace;
+    depthStencilState.stencilFront = stencilFace;
+    depthStencilState.stencilReadMask = 0xff;
+    depthStencilState.stencilWriteMask = 0xff;
+    depthStencilState.depthBias = 0;
+    depthStencilState.depthBiasSlopeScale = 0.0;
+    depthStencilState.depthBiasClamp = 0.0;
+
+    // Create the pipeline layout
+    WGPUPipelineLayoutDescriptor layoutDescriptor = {};
+    layoutDescriptor.bindGroupLayoutCount = 0;
+    layoutDescriptor.bindGroupLayouts = nullptr;
+    WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
+    WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
+
+    // Create pipeline
+    WGPURenderPipelineDescriptor pipelineDescriptor = {};
+
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.vertex.bufferCount = 0;
+    pipelineDescriptor.vertex.buffers = nullptr;
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    fragment.targetCount = 1;
+    fragment.targets = &colorTargetState;
+    pipelineDescriptor.fragment = &fragment;
+
+    pipelineDescriptor.multisample.count = 1;
+    pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
+    pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
+    pipelineDescriptor.layout = layout;
+    pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+    pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
+    pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
+
+    // First case: depthStencil is not null.
+    pipelineDescriptor.depthStencil = &depthStencilState;
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+
+    WGPURenderPipeline apiDummyPipeline = api.GetNewRenderPipeline();
+    EXPECT_CALL(
+        api,
+        DeviceCreateRenderPipeline(
+            apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                return desc->depthStencil != nullptr &&
+                       desc->depthStencil->nextInChain == nullptr &&
+                       desc->depthStencil->depthWriteEnabled == false &&
+                       desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilBack.depthFailOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.depthFailOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilReadMask == 0xff &&
+                       desc->depthStencil->stencilWriteMask == 0xff &&
+                       desc->depthStencil->depthBias == 0 &&
+                       desc->depthStencil->depthBiasSlopeScale == 0.0 &&
+                       desc->depthStencil->depthBiasClamp == 0.0;
+            })))
+        .WillOnce(Return(apiDummyPipeline));
+
+    FlushClient();
+
+    // Second case: depthStencil is null.
+    pipelineDescriptor.depthStencil = nullptr;
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+    EXPECT_CALL(api,
+                DeviceCreateRenderPipeline(
+                    apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                        return desc->depthStencil == nullptr;
+                    })))
+        .WillOnce(Return(apiDummyPipeline));
+
+    FlushClient();
+}
diff --git a/src/dawn/tests/unittests/wire/WireQueueTests.cpp b/src/dawn/tests/unittests/wire/WireQueueTests.cpp
new file mode 100644
index 0000000..31b9c1f
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireQueueTests.cpp
@@ -0,0 +1,140 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+class MockQueueWorkDoneCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
+};
+
+static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
+static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
+    mockQueueWorkDoneCallback->Call(status, userdata);
+}
+
+class WireQueueTests : public WireTest {
+  protected:
+    void SetUp() override {
+        WireTest::SetUp();
+        mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
+    }
+
+    void TearDown() override {
+        WireTest::TearDown();
+        mockQueueWorkDoneCallback = nullptr;
+    }
+
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback);
+    }
+};
+
+// Test that a successful OnSubmittedWorkDone call is forwarded to the client.
+TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
+    FlushServer();
+}
+
+// Test that an error OnSubmittedWorkDone call is forwarded as an error to the client.
+TEST_F(WireQueueTests, OnSubmittedWorkDoneError) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
+    FlushServer();
+}
+
+// Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with
+// device loss
+TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with
+// device loss
+TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) {
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1);
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireQueueTests* pTest;
+    WGPUQueue* pTestQueue;
+    size_t numRequests;
+};
+
+static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestQueue, nullptr);
+    mockQueueWorkDoneCallback->Call(status, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone,
+                                     testData->pTest);
+    }
+}
+
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) {
+    TestData testData = {this, &queue, 10};
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
+
+// Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks
+// since it is always destructed after the test TearDown, and we cannot create a new queue obj
+// with wgpuDeviceGetQueue
diff --git a/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
new file mode 100644
index 0000000..9ffa624
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
@@ -0,0 +1,235 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/wire/WireClient.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+namespace {
+
+    // Mock class to add expectations on the wire calling callbacks
+    class MockCompilationInfoCallback {
+      public:
+        MOCK_METHOD(void,
+                    Call,
+                    (WGPUCompilationInfoRequestStatus status,
+                     const WGPUCompilationInfo* info,
+                     void* userdata));
+    };
+
+    std::unique_ptr<StrictMock<MockCompilationInfoCallback>> mockCompilationInfoCallback;
+    void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status,
+                                          const WGPUCompilationInfo* info,
+                                          void* userdata) {
+        mockCompilationInfoCallback->Call(status, info, userdata);
+    }
+
+}  // anonymous namespace
+
+class WireShaderModuleTests : public WireTest {
+  public:
+    WireShaderModuleTests() {
+    }
+    ~WireShaderModuleTests() override = default;
+
+    void SetUp() override {
+        WireTest::SetUp();
+
+        mockCompilationInfoCallback = std::make_unique<StrictMock<MockCompilationInfoCallback>>();
+        apiShaderModule = api.GetNewShaderModule();
+
+        WGPUShaderModuleDescriptor descriptor = {};
+        shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor);
+
+        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _))
+            .WillOnce(Return(apiShaderModule))
+            .RetiresOnSaturation();
+        FlushClient();
+    }
+
+    void TearDown() override {
+        WireTest::TearDown();
+
+        // Delete mock so that expectations are checked
+        mockCompilationInfoCallback = nullptr;
+    }
+
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
+    }
+
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
+    }
+
+  protected:
+    WGPUShaderModule shaderModule;
+    WGPUShaderModule apiShaderModule;
+};
+
+// Check getting CompilationInfo for a successfully created shader module
+TEST_F(WireShaderModuleTests, GetCompilationInfo) {
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_Success,
+                     MatchesLambda([&](const WGPUCompilationInfo* info) -> bool {
+                         if (info->messageCount != compilationInfo.messageCount) {
+                             return false;
+                         }
+                         const WGPUCompilationMessage* infoMessage = &info->messages[0];
+                         return strcmp(infoMessage->message, message.message) == 0 &&
+                                infoMessage->nextInChain == message.nextInChain &&
+                                infoMessage->type == message.type &&
+                                infoMessage->lineNum == message.lineNum &&
+                                infoMessage->linePos == message.linePos &&
+                                infoMessage->offset == message.offset &&
+                                infoMessage->length == message.length;
+                     }),
+                     _))
+        .Times(1);
+    FlushServer();
+}
+
+// Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a device
+// loss.
+TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
+    GetWireClient()->Disconnect();
+}
+
+// Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a
+// device loss.
+TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) {
+    GetWireClient()->Disconnect();
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireShaderModuleTests* pTest;
+    WGPUShaderModule* pTestShaderModule;
+    size_t numRequests;
+};
+
+static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status,
+                                                   const WGPUCompilationInfo* info,
+                                                   void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestShaderModule, nullptr);
+
+    mockCompilationInfoCallback->Call(status, info, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule),
+                                           ToMockGetCompilationInfoCallback, nullptr);
+    }
+}
+
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) {
+    TestData testData = {this, &shaderModule, 10};
+
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
+                                       &testData);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
+
+// Test that requests inside user callbacks before object destruction are called
+TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) {
+    TestData testData = {this, &shaderModule, 10};
+
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
+                                       &testData);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _))
+        .Times(1 + testData.numRequests);
+    wgpuShaderModuleRelease(shaderModule);
+}
diff --git a/src/dawn/tests/unittests/wire/WireTest.cpp b/src/dawn/tests/unittests/wire/WireTest.cpp
new file mode 100644
index 0000000..cba143e
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireTest.cpp
@@ -0,0 +1,153 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/unittests/wire/WireTest.h"
+
+#include "dawn/dawn_proc.h"
+#include "dawn/utils/TerribleCommandBuffer.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+using namespace testing;
+using namespace dawn::wire;
+
+WireTest::WireTest() {
+}
+
+WireTest::~WireTest() {
+}
+
+client::MemoryTransferService* WireTest::GetClientMemoryTransferService() {
+    return nullptr;
+}
+
+server::MemoryTransferService* WireTest::GetServerMemoryTransferService() {
+    return nullptr;
+}
+
+void WireTest::SetUp() {
+    DawnProcTable mockProcs;
+    api.GetProcTable(&mockProcs);
+    WGPUDevice mockDevice = api.GetNewDevice();
+
+    // This SetCallback call cannot be ignored because it is done as soon as we start the server
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(_, _, _)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(_, _, _)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(_, _, _)).Times(Exactly(1));
+    SetupIgnoredCallExpectations();
+
+    mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
+    mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>(mWireServer.get());
+
+    WireServerDescriptor serverDesc = {};
+    serverDesc.procs = &mockProcs;
+    serverDesc.serializer = mS2cBuf.get();
+    serverDesc.memoryTransferService = GetServerMemoryTransferService();
+
+    mWireServer.reset(new WireServer(serverDesc));
+    mC2sBuf->SetHandler(mWireServer.get());
+
+    WireClientDescriptor clientDesc = {};
+    clientDesc.serializer = mC2sBuf.get();
+    clientDesc.memoryTransferService = GetClientMemoryTransferService();
+
+    mWireClient.reset(new WireClient(clientDesc));
+    mS2cBuf->SetHandler(mWireClient.get());
+
+    dawnProcSetProcs(&dawn::wire::client::GetProcs());
+
+    auto deviceReservation = mWireClient->ReserveDevice();
+    EXPECT_CALL(api, DeviceReference(mockDevice));
+    mWireServer->InjectDevice(mockDevice, deviceReservation.id, deviceReservation.generation);
+
+    device = deviceReservation.device;
+    apiDevice = mockDevice;
+
+    // The GetQueue is done on WireClient startup so we expect it now.
+    queue = wgpuDeviceGetQueue(device);
+    apiQueue = api.GetNewQueue();
+    EXPECT_CALL(api, DeviceGetQueue(apiDevice)).WillOnce(Return(apiQueue));
+    FlushClient();
+}
+
+void WireTest::TearDown() {
+    dawnProcSetProcs(nullptr);
+
+    // Derived classes should call the base TearDown() first. The client must
+    // be reset before any mocks are deleted.
+    // Incomplete client callbacks will be called on deletion, so the mocks
+    // cannot be null.
+    api.IgnoreAllReleaseCalls();
+    mWireClient = nullptr;
+
+    if (mWireServer && apiDevice) {
+        // These are called on server destruction to clear the callbacks. They must not be
+        // called after the server is destroyed.
+        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+            .Times(Exactly(1));
+        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)).Times(Exactly(1));
+        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+            .Times(Exactly(1));
+    }
+    mWireServer = nullptr;
+}
+
+// This should be called if |apiDevice| is no longer exists on the wire.
+// This signals that expectations in |TearDowb| shouldn't be added.
+void WireTest::DefaultApiDeviceWasReleased() {
+    apiDevice = nullptr;
+}
+
+void WireTest::FlushClient(bool success) {
+    ASSERT_EQ(mC2sBuf->Flush(), success);
+
+    Mock::VerifyAndClearExpectations(&api);
+    SetupIgnoredCallExpectations();
+}
+
+void WireTest::FlushServer(bool success) {
+    ASSERT_EQ(mS2cBuf->Flush(), success);
+}
+
+dawn::wire::WireServer* WireTest::GetWireServer() {
+    return mWireServer.get();
+}
+
+dawn::wire::WireClient* WireTest::GetWireClient() {
+    return mWireClient.get();
+}
+
+void WireTest::DeleteServer() {
+    EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1);
+
+    if (mWireServer) {
+        // These are called on server destruction to clear the callbacks. They must not be
+        // called after the server is destroyed.
+        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+            .Times(Exactly(1));
+        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)).Times(Exactly(1));
+        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+            .Times(Exactly(1));
+    }
+    mWireServer = nullptr;
+}
+
+void WireTest::DeleteClient() {
+    mWireClient = nullptr;
+}
+
+void WireTest::SetupIgnoredCallExpectations() {
+    EXPECT_CALL(api, DeviceTick(_)).Times(AnyNumber());
+}
diff --git a/src/dawn/tests/unittests/wire/WireTest.h b/src/dawn/tests/unittests/wire/WireTest.h
new file mode 100644
index 0000000..235dcf5
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireTest.h
@@ -0,0 +1,150 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/mock_webgpu.h"
+#include "gtest/gtest.h"
+
+#include <memory>
+
+// Definition of a "Lambda predicate matcher" for GMock to allow checking deep structures
+// are passed correctly by the wire.
+
+// Helper templates to extract the argument type of a lambda.
+template <typename T>
+struct MatcherMethodArgument;
+
+template <typename Lambda, typename Arg>
+struct MatcherMethodArgument<bool (Lambda::*)(Arg) const> {
+    using Type = Arg;
+};
+
+template <typename Lambda>
+using MatcherLambdaArgument = typename MatcherMethodArgument<decltype(&Lambda::operator())>::Type;
+
+// The matcher itself, unfortunately it isn't able to return detailed information like other
+// matchers do.
+template <typename Lambda, typename Arg>
+class LambdaMatcherImpl : public testing::MatcherInterface<Arg> {
+  public:
+    explicit LambdaMatcherImpl(Lambda lambda) : mLambda(lambda) {
+    }
+
+    void DescribeTo(std::ostream* os) const override {
+        *os << "with a custom matcher";
+    }
+
+    bool MatchAndExplain(Arg value, testing::MatchResultListener* listener) const override {
+        if (!mLambda(value)) {
+            *listener << "which doesn't satisfy the custom predicate";
+            return false;
+        }
+        return true;
+    }
+
+  private:
+    Lambda mLambda;
+};
+
+// Use the MatchesLambda as follows:
+//
+//   EXPECT_CALL(foo, Bar(MatchesLambda([](ArgType arg) -> bool {
+//       return CheckPredicateOnArg(arg);
+//   })));
+template <typename Lambda>
+inline testing::Matcher<MatcherLambdaArgument<Lambda>> MatchesLambda(Lambda lambda) {
+    return MakeMatcher(new LambdaMatcherImpl<Lambda, MatcherLambdaArgument<Lambda>>(lambda));
+}
+
+class StringMessageMatcher : public testing::MatcherInterface<const char*> {
+  public:
+    explicit StringMessageMatcher() {
+    }
+
+    bool MatchAndExplain(const char* message,
+                         testing::MatchResultListener* listener) const override {
+        if (message == nullptr) {
+            *listener << "missing error message";
+            return false;
+        }
+        if (std::strlen(message) <= 1) {
+            *listener << "message is truncated";
+            return false;
+        }
+        return true;
+    }
+
+    void DescribeTo(std::ostream* os) const override {
+        *os << "valid error message";
+    }
+
+    void DescribeNegationTo(std::ostream* os) const override {
+        *os << "invalid error message";
+    }
+};
+
+inline testing::Matcher<const char*> ValidStringMessage() {
+    return MakeMatcher(new StringMessageMatcher());
+}
+
+namespace dawn::wire {
+    class WireClient;
+    class WireServer;
+    namespace client {
+        class MemoryTransferService;
+    }  // namespace client
+    namespace server {
+        class MemoryTransferService;
+    }  // namespace server
+}  // namespace dawn::wire
+
+namespace utils {
+    class TerribleCommandBuffer;
+}
+
+class WireTest : public testing::Test {
+  protected:
+    WireTest();
+    ~WireTest() override;
+
+    void SetUp() override;
+    void TearDown() override;
+
+    void FlushClient(bool success = true);
+    void FlushServer(bool success = true);
+
+    void DefaultApiDeviceWasReleased();
+
+    testing::StrictMock<MockProcTable> api;
+    WGPUDevice apiDevice;
+    WGPUQueue apiQueue;
+    WGPUDevice device;
+    WGPUQueue queue;
+
+    dawn::wire::WireServer* GetWireServer();
+    dawn::wire::WireClient* GetWireClient();
+
+    void DeleteServer();
+    void DeleteClient();
+
+  private:
+    void SetupIgnoredCallExpectations();
+
+    virtual dawn::wire::client::MemoryTransferService* GetClientMemoryTransferService();
+    virtual dawn::wire::server::MemoryTransferService* GetServerMemoryTransferService();
+
+    std::unique_ptr<dawn::wire::WireServer> mWireServer;
+    std::unique_ptr<dawn::wire::WireClient> mWireClient;
+    std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
+    std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+};
diff --git a/src/dawn/tests/unittests/wire/WireWGPUDevicePropertiesTests.cpp b/src/dawn/tests/unittests/wire/WireWGPUDevicePropertiesTests.cpp
new file mode 100644
index 0000000..9f7cdf5
--- /dev/null
+++ b/src/dawn/tests/unittests/wire/WireWGPUDevicePropertiesTests.cpp
@@ -0,0 +1,57 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/Wire.h"
+#include "gtest/gtest.h"
+
+#include <vector>
+
+class WireWGPUDevicePropertiesTests : public testing::Test {};
+
+// Test that the serialization and deserialization of WGPUDeviceProperties can work correctly.
+TEST_F(WireWGPUDevicePropertiesTests, SerializeWGPUDeviceProperties) {
+    WGPUDeviceProperties sentWGPUDeviceProperties = {};
+    sentWGPUDeviceProperties.textureCompressionBC = true;
+    // Set false to test that the serialization can handle both true and false correctly.
+    sentWGPUDeviceProperties.pipelineStatisticsQuery = false;
+    sentWGPUDeviceProperties.timestampQuery = true;
+
+    size_t sentWGPUDevicePropertiesSize =
+        dawn::wire::SerializedWGPUDevicePropertiesSize(&sentWGPUDeviceProperties);
+    std::vector<char> buffer;
+    buffer.resize(sentWGPUDevicePropertiesSize);
+    dawn::wire::SerializeWGPUDeviceProperties(&sentWGPUDeviceProperties, buffer.data());
+
+    WGPUDeviceProperties receivedWGPUDeviceProperties;
+    ASSERT_TRUE(dawn::wire::DeserializeWGPUDeviceProperties(&receivedWGPUDeviceProperties,
+                                                            buffer.data(), buffer.size()));
+    ASSERT_TRUE(receivedWGPUDeviceProperties.textureCompressionBC);
+    ASSERT_FALSE(receivedWGPUDeviceProperties.pipelineStatisticsQuery);
+    ASSERT_TRUE(receivedWGPUDeviceProperties.timestampQuery);
+}
+
+// Test that deserialization if the buffer is just one byte too small fails.
+TEST_F(WireWGPUDevicePropertiesTests, DeserializeBufferTooSmall) {
+    WGPUDeviceProperties sentWGPUDeviceProperties = {};
+
+    size_t sentWGPUDevicePropertiesSize =
+        dawn::wire::SerializedWGPUDevicePropertiesSize(&sentWGPUDeviceProperties);
+    std::vector<char> buffer;
+    buffer.resize(sentWGPUDevicePropertiesSize);
+    dawn::wire::SerializeWGPUDeviceProperties(&sentWGPUDeviceProperties, buffer.data());
+
+    WGPUDeviceProperties receivedWGPUDeviceProperties;
+    ASSERT_FALSE(dawn::wire::DeserializeWGPUDeviceProperties(&receivedWGPUDeviceProperties,
+                                                             buffer.data(), buffer.size() - 1));
+}
diff --git a/src/dawn/tests/white_box/BufferAllocatedSizeTests.cpp b/src/dawn/tests/white_box/BufferAllocatedSizeTests.cpp
new file mode 100644
index 0000000..991c145
--- /dev/null
+++ b/src/dawn/tests/white_box/BufferAllocatedSizeTests.cpp
@@ -0,0 +1,85 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/DawnNative.h"
+
+#include <algorithm>
+
+class BufferAllocatedSizeTests : public DawnTest {
+  protected:
+    wgpu::Buffer CreateBuffer(wgpu::BufferUsage usage, uint64_t size) {
+        wgpu::BufferDescriptor desc = {};
+        desc.usage = usage;
+        desc.size = size;
+        return device.CreateBuffer(&desc);
+    }
+
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    }
+};
+
+// Test expected allocated size for buffers with uniform usage
+TEST_P(BufferAllocatedSizeTests, UniformUsage) {
+    // Some backends have a minimum buffer size, so make sure
+    // we allocate above that.
+    constexpr uint32_t kMinBufferSize = 4u;
+
+    uint32_t requiredBufferAlignment = 1u;
+    if (IsD3D12()) {
+        requiredBufferAlignment = 256u;
+    } else if (IsMetal()) {
+        requiredBufferAlignment = 16u;
+    } else if (IsVulkan()) {
+        requiredBufferAlignment = 4u;
+    }
+
+    // Test uniform usage
+    {
+        const uint32_t bufferSize = kMinBufferSize;
+        wgpu::Buffer buffer = CreateBuffer(wgpu::BufferUsage::Uniform, bufferSize);
+        EXPECT_EQ(dawn::native::GetAllocatedSizeForTesting(buffer.Get()),
+                  Align(bufferSize, requiredBufferAlignment));
+    }
+
+    // Test uniform usage and with size just above requiredBufferAlignment allocates to the next
+    // multiple of |requiredBufferAlignment|
+    {
+        const uint32_t bufferSize = std::max(1u + requiredBufferAlignment, kMinBufferSize);
+        wgpu::Buffer buffer =
+            CreateBuffer(wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage, bufferSize);
+        EXPECT_EQ(dawn::native::GetAllocatedSizeForTesting(buffer.Get()),
+                  Align(bufferSize, requiredBufferAlignment));
+    }
+
+    // Test uniform usage and another usage
+    {
+        const uint32_t bufferSize = kMinBufferSize;
+        wgpu::Buffer buffer =
+            CreateBuffer(wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage, bufferSize);
+        EXPECT_EQ(dawn::native::GetAllocatedSizeForTesting(buffer.Get()),
+                  Align(bufferSize, requiredBufferAlignment));
+    }
+}
+
+DAWN_INSTANTIATE_TEST(BufferAllocatedSizeTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      OpenGLBackend(),
+                      OpenGLESBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
new file mode 100644
index 0000000..05baa9d
--- /dev/null
+++ b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
@@ -0,0 +1,1046 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/Toggles.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+constexpr uint32_t kRTSize = 4;
+
+// Pooling tests are required to advance the GPU completed serial to reuse heaps.
+// This requires Tick() to be called at-least |kFrameDepth| times. This constant
+// should be updated if the internals of Tick() change.
+constexpr uint32_t kFrameDepth = 2;
+
+using namespace dawn::native::d3d12;
+
+class D3D12DescriptorHeapTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        mD3DDevice = reinterpret_cast<Device*>(device.Get());
+
+        mSimpleVSModule = utils::CreateShaderModule(device, R"(
+
+            @stage(vertex) fn main(
+                @builtin(vertex_index) VertexIndex : u32
+            ) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0)
+                );
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+        mSimpleFSModule = utils::CreateShaderModule(device, R"(
+            struct U {
+                color : vec4<f32>
+            }
+            @group(0) @binding(0) var<uniform> colorBuffer : U;
+
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return colorBuffer.color;
+            })");
+    }
+
+    utils::BasicRenderPass MakeRenderPass(uint32_t width,
+                                          uint32_t height,
+                                          wgpu::TextureFormat format) {
+        DAWN_ASSERT(width > 0 && height > 0);
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = format;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture color = device.CreateTexture(&descriptor);
+
+        return utils::BasicRenderPass(width, height, color);
+    }
+
+    std::array<float, 4> GetSolidColor(uint32_t n) const {
+        ASSERT(n >> 24 == 0);
+        float b = (n & 0xFF) / 255.0f;
+        float g = ((n >> 8) & 0xFF) / 255.0f;
+        float r = ((n >> 16) & 0xFF) / 255.0f;
+        return {r, g, b, 1};
+    }
+
+    Device* mD3DDevice = nullptr;
+
+    wgpu::ShaderModule mSimpleVSModule;
+    wgpu::ShaderModule mSimpleFSModule;
+};
+
+class DummyStagingDescriptorAllocator {
+  public:
+    DummyStagingDescriptorAllocator(Device* device,
+                                    uint32_t descriptorCount,
+                                    uint32_t allocationsPerHeap)
+        : mAllocator(device,
+                     descriptorCount,
+                     allocationsPerHeap * descriptorCount,
+                     D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) {
+    }
+
+    CPUDescriptorHeapAllocation AllocateCPUDescriptors() {
+        dawn::native::ResultOrError<CPUDescriptorHeapAllocation> result =
+            mAllocator.AllocateCPUDescriptors();
+        return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{};
+    }
+
+    void Deallocate(CPUDescriptorHeapAllocation& allocation) {
+        mAllocator.Deallocate(&allocation);
+    }
+
+  private:
+    StagingDescriptorAllocator mAllocator;
+};
+
+// Verify the shader visible view heaps switch over within a single submit.
+TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+
+    // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a
+    // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
+    renderPipelineDescriptor.vertex.module = mSimpleVSModule;
+    renderPipelineDescriptor.cFragment.module = mSimpleFSModule;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
+    ShaderVisibleDescriptorAllocator* allocator =
+        d3dDevice->GetViewShaderVisibleDescriptorAllocator();
+    const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        for (uint32_t i = 0; i < heapSize + 1; ++i) {
+            pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, uniformBuffer, 0, sizeof(redColor)}}));
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1));
+}
+
+// Verify the shader visible sampler heaps does not switch over within a single submit.
+TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+
+    // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating a
+    // sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
+    // because the sampler heap allocations are de-duplicated.
+    renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
+                return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+            })");
+
+    renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var sampler0 : sampler;
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                _ = sampler0;
+                return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+            })");
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
+    ShaderVisibleDescriptorAllocator* allocator =
+        d3dDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting();
+
+    const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
+            pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, sampler}}));
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID);
+}
+
+// Verify shader-visible heaps can be recycled for multiple submits.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    std::list<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always unique.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.push_back(heap);
+        // CheckPassedSerials() will update the last internally completed serial.
+        EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
+        // NextSerial() will increment the last internally submitted serial.
+        EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
+    }
+
+    // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order
+    // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in the
+    // check.
+    for (uint32_t i = 0; i < kFrameDepth + 1; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(heaps.front() == heap);
+        heaps.pop_front();
+        EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
+        EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
+    }
+
+    EXPECT_TRUE(heaps.empty());
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth);
+}
+
+// Verify shader-visible heaps do not recycle in a pending submit.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    constexpr uint32_t kNumOfSwitches = 5;
+
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Switch-over |kNumOfSwitches| and ensure heaps are always unique.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    // After |kNumOfSwitches|, no heaps are recycled.
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+}
+
+// Verify switching shader-visible heaps do not recycle in a pending submit but do so
+// once no longer pending.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    constexpr uint32_t kNumOfSwitches = 5;
+
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Switch-over |kNumOfSwitches| to create a pool of unique heaps.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+
+    // Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        mD3DDevice->APITick();
+    }
+
+    // Switch-over |kNumOfSwitches| again reusing the same heaps.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end());
+        heaps.erase(heap);
+    }
+
+    // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist.
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches * 2));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+}
+
+// Verify shader-visible heaps do not recycle in multiple submits.
+TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Growth: Allocate + Tick() and ensure heaps are always unique.
+    while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+        mD3DDevice->APITick();
+    }
+
+    // Verify the number of switches equals the size of heaps allocated (minus the initial).
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(heaps.size() - 1));
+}
+
+// Verify shader-visible heaps do not recycle in a pending submit.
+TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Growth: Allocate new heaps.
+    while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    // Verify the number of switches equals the size of heaps allocated (minus the initial).
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(heaps.size() - 1));
+}
+
+// Verify switching shader-visible heaps do not recycle in a pending submit but do so
+// once no longer pending.
+// Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated.
+TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    uint32_t kNumOfPooledHeaps = 5;
+    while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+
+    // Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        mD3DDevice->APITick();
+    }
+
+    // Switch-over the pool-allocated heaps.
+    for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+    }
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+}
+
+// Verify encoding multiple heaps worth of bindgroups.
+// Shader-visible heaps will switch out |kNumOfHeaps| times.
+TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
+    // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup that
+    // has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize| draws,
+    // the result is the arithmetic sum of the sequence after the framebuffer is blended by
+    // accumulation. By checking for this sum, we ensure each bindgroup was encoded correctly.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::BasicRenderPass renderPass =
+        MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+
+    pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        struct U {
+            heapSize : f32
+        }
+        @group(0) @binding(0) var<uniform> buffer0 : U;
+
+        @stage(fragment) fn main() -> @location(0) vec4<f32> {
+            return vec4<f32>(buffer0.heapSize, 0.0, 0.0, 1.0);
+        })");
+
+    wgpu::BlendState blend;
+    blend.color.operation = wgpu::BlendOperation::Add;
+    blend.color.srcFactor = wgpu::BlendFactor::One;
+    blend.color.dstFactor = wgpu::BlendFactor::One;
+    blend.alpha.operation = wgpu::BlendOperation::Add;
+    blend.alpha.srcFactor = wgpu::BlendFactor::One;
+    blend.alpha.dstFactor = wgpu::BlendFactor::One;
+
+    pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float;
+    pipelineDescriptor.cTargets[0].blend = &blend;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    constexpr uint32_t kNumOfHeaps = 2;
+
+    const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize;
+
+    std::vector<wgpu::BindGroup> bindGroups;
+    for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) {
+        const float color = i + 1;
+        wgpu::Buffer uniformBuffer =
+            utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
+        bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer}}));
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2;
+    EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0);
+}
+
+// Verify encoding one bindgroup then a heaps worth in different submits.
+// Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors.
+// The first descriptor's memory will be reused when the second submit encodes |heapSize|
+// descriptors.
+TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    // Encode the first descriptor and submit.
+    {
+        std::array<float, 4> greenColor = {0, 1, 0, 1};
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform);
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+            pass.SetPipeline(renderPipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+
+    // Encode a heap worth of descriptors.
+    {
+        const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator()
+                                      ->GetShaderVisibleHeapSizeForTesting();
+
+        std::vector<wgpu::BindGroup> bindGroups;
+        for (uint32_t i = 0; i < heapSize - 1; i++) {
+            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+
+            bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, uniformBuffer}}));
+        }
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, lastUniformBuffer, 0, sizeof(redColor)}}));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+            pass.SetPipeline(renderPipeline);
+
+            for (uint32_t i = 0; i < heapSize; ++i) {
+                pass.SetBindGroup(0, bindGroups[i]);
+                pass.Draw(3);
+            }
+
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+// Verify encoding a heaps worth of bindgroups plus one more then reuse the first
+// bindgroup in the same submit.
+// Shader-visible heaps should switch out once then re-encode the first descriptor at a new offset
+// in the heap.
+TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    std::array<float, 4> redColor = {1, 0, 0, 1};
+    wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
+        device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+    std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
+        device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
+
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    for (uint32_t i = 0; i < heapSize; i++) {
+        const std::array<float, 4>& fillColor = GetSolidColor(i + 1);  // Avoid black
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer, 0, sizeof(fillColor)}}));
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(pipeline);
+
+        // Encode a heap worth of descriptors plus one more.
+        for (uint32_t i = 0; i < heapSize + 1; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        // Re-encode the first bindgroup again.
+        pass.SetBindGroup(0, bindGroups[0]);
+        pass.Draw(3);
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Make sure the first bindgroup was encoded correctly.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+// Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the
+// first bindgroup again in the second submit.
+// Shader-visible heaps should switch out once then re-encode the
+// first descriptor at the same offset in the heap.
+TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    // Encode heap worth of descriptors plus one more.
+    std::array<float, 4> redColor = {1, 0, 0, 1};
+
+    wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
+        device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+    std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
+        device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
+
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    for (uint32_t i = 0; i < heapSize; i++) {
+        std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer, 0, sizeof(fillColor)}}));
+    }
+
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+            pass.SetPipeline(pipeline);
+
+            for (uint32_t i = 0; i < heapSize + 1; ++i) {
+                pass.SetBindGroup(0, bindGroups[i]);
+                pass.Draw(3);
+            }
+
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    // Re-encode the first bindgroup again.
+    {
+        std::array<float, 4> greenColor = {0, 1, 0, 1};
+        queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+            pass.SetPipeline(pipeline);
+
+            pass.SetBindGroup(0, bindGroups[0]);
+            pass.Draw(3);
+
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    // Make sure the first bindgroup was re-encoded correctly.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+}
+
+// Verify encoding many sampler and ubo worth of bindgroups.
+// Shader-visible heaps should switch out |kNumOfViewHeaps| times.
+TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    // Create a solid filled texture.
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = kRTSize;
+    descriptor.size.height = kRTSize;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                       wgpu::TextureUsage::CopySrc;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    {
+        utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture);
+
+        utils::ComboRenderPassDescriptor renderPassDesc({textureView});
+        renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
+        renderPass.renderPassInfo.cColorAttachments[0].view = textureView;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        auto pass = encoder.BeginRenderPass(&renderPassDesc);
+        pass.End();
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        RGBA8 filled(0, 255, 0, 255);
+        EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
+    }
+
+    {
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+            struct U {
+                transform : mat2x2<f32>
+            }
+            @group(0) @binding(0) var<uniform> buffer0 : U;
+
+            @stage(vertex) fn main(
+                @builtin(vertex_index) VertexIndex : u32
+            ) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0)
+                );
+                return vec4<f32>(buffer0.transform * (pos[VertexIndex]), 0.0, 1.0);
+            })");
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            struct U {
+                color : vec4<f32>
+            }
+            @group(0) @binding(1) var sampler0 : sampler;
+            @group(0) @binding(2) var texture0 : texture_2d<f32>;
+            @group(0) @binding(3) var<uniform> buffer0 : U;
+
+            @stage(fragment) fn main(
+                @builtin(position) FragCoord : vec4<f32>
+            ) -> @location(0) vec4<f32> {
+                return textureSample(texture0, sampler0, FragCoord.xy) + buffer0.color;
+            })");
+
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+        // Encode a heap worth of descriptors |kNumOfHeaps| times.
+        constexpr float transform[] = {1.f, 0.f, 0.f, 1.f};
+        wgpu::Buffer transformBuffer = utils::CreateBufferFromData(
+            device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform);
+
+        wgpu::SamplerDescriptor samplerDescriptor;
+        wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor);
+
+        ShaderVisibleDescriptorAllocator* viewAllocator =
+            mD3DDevice->GetViewShaderVisibleDescriptorAllocator();
+
+        ShaderVisibleDescriptorAllocator* samplerAllocator =
+            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+        const HeapVersionID viewHeapSerial = viewAllocator->GetShaderVisibleHeapSerialForTesting();
+        const HeapVersionID samplerHeapSerial =
+            samplerAllocator->GetShaderVisibleHeapSerialForTesting();
+
+        const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting();
+
+        // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per
+        // group. This means the count of heaps switches is determined by the total number of views
+        // to encode. Compute the number of bindgroups to encode by counting the required views for
+        // |kNumOfViewHeaps| heaps worth.
+        constexpr uint32_t kViewsPerBindGroup = 3;
+        constexpr uint32_t kNumOfViewHeaps = 5;
+
+        const uint32_t numOfEncodedBindGroups =
+            (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup;
+
+        std::vector<wgpu::BindGroup> bindGroups;
+        for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) {
+            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+
+            bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {{0, transformBuffer, 0, sizeof(transform)},
+                                                       {1, sampler},
+                                                       {2, textureView},
+                                                       {3, uniformBuffer, 0, sizeof(fillColor)}}));
+        }
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, transformBuffer, 0, sizeof(transform)},
+                                                   {1, sampler},
+                                                   {2, textureView},
+                                                   {3, lastUniformBuffer, 0, sizeof(redColor)}}));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(pipeline);
+
+        for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Final accumulated color is result of sampled + UBO color.
+        RGBA8 filled(255, 255, 0, 255);
+        RGBA8 notFilled(0, 0, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0);
+
+        EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps);
+        EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(),
+                  viewHeapSerial + HeapVersionID(kNumOfViewHeaps));
+
+        EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+        EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial);
+    }
+}
+
+// Verify a single allocate/deallocate.
+// One non-shader visible heap will be created.
+TEST_P(D3D12DescriptorHeapTests, Single) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 3;
+    DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
+
+    CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+    EXPECT_EQ(allocation.GetHeapIndex(), 0u);
+    EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+
+    allocator.Deallocate(allocation);
+    EXPECT_FALSE(allocation.IsValid());
+}
+
+// Verify allocating many times causes the pool to increase in size.
+// Creates |kNumOfHeaps| non-shader visible heaps.
+TEST_P(D3D12DescriptorHeapTests, Sequential) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 3;
+    DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
+
+    // Allocate |kNumOfHeaps| worth.
+    constexpr uint32_t kNumOfHeaps = 2;
+
+    std::set<uint32_t> allocatedHeaps;
+
+    std::vector<CPUDescriptorHeapAllocation> allocations;
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap);
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+        allocatedHeaps.insert(allocation.GetHeapIndex());
+    }
+
+    EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps);
+
+    // Deallocate all.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
+
+// Verify that re-allocating a number of allocations < pool size, all heaps are reused.
+// Creates and reuses |kNumofHeaps| non-shader visible heaps.
+TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 25;
+    DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
+
+    constexpr uint32_t kNumofHeaps = 10;
+
+    std::list<CPUDescriptorHeapAllocation> allocations;
+    std::set<size_t> allocationPtrs;
+
+    // Allocate |kNumofHeaps| heaps worth.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        allocations.push_back(allocation);
+        EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
+    }
+
+    // Deallocate all.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+
+    allocations.clear();
+
+    // Re-allocate all again.
+    std::set<size_t> reallocatedPtrs;
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        allocations.push_back(allocation);
+        EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
+        EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(),
+                              allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end());
+    }
+
+    // Deallocate all again.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
+
+// Verify allocating then deallocating many times.
+TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 25;
+    DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
+
+    std::list<CPUDescriptorHeapAllocation> list3;
+    std::list<CPUDescriptorHeapAllocation> list5;
+    std::list<CPUDescriptorHeapAllocation> allocations;
+
+    constexpr uint32_t kNumofHeaps = 2;
+
+    // Allocate |kNumofHeaps| heaps worth.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        if (i % 3 == 0) {
+            list3.push_back(allocation);
+        } else {
+            allocations.push_back(allocation);
+        }
+    }
+
+    // Deallocate every 3rd allocation.
+    for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) {
+        allocator.Deallocate(*it);
+    }
+
+    // Allocate again.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        if (i % 5 == 0) {
+            list5.push_back(allocation);
+        } else {
+            allocations.push_back(allocation);
+        }
+    }
+
+    // Deallocate every 5th allocation.
+    for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) {
+        allocator.Deallocate(*it);
+    }
+
+    // Allocate again.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+    }
+
+    // Deallocate remaining.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
+
+DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
+                      D3D12Backend(),
+                      D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
diff --git a/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
new file mode 100644
index 0000000..bff420f
--- /dev/null
+++ b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
@@ -0,0 +1,118 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+    class ExpectBetweenTimestamps : public detail::Expectation {
+      public:
+        ~ExpectBetweenTimestamps() override = default;
+
+        ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) {
+            mValue0 = value0;
+            mValue1 = value1;
+        }
+
+        // Expect the actual results are between mValue0 and mValue1.
+        testing::AssertionResult Check(const void* data, size_t size) override {
+            const uint64_t* actual = static_cast<const uint64_t*>(data);
+            for (size_t i = 0; i < size / sizeof(uint64_t); ++i) {
+                if (actual[i] < mValue0 || actual[i] > mValue1) {
+                    return testing::AssertionFailure()
+                           << "Expected data[" << i << "] to be between " << mValue0 << " and "
+                           << mValue1 << ", actual " << actual[i] << std::endl;
+                }
+            }
+
+            return testing::AssertionSuccess();
+        }
+
+      private:
+        uint64_t mValue0;
+        uint64_t mValue1;
+    };
+
+}  // anonymous namespace
+
+using namespace dawn::native::d3d12;
+
+class D3D12GPUTimestampCalibrationTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        // Requires that timestamp query feature is enabled and timestamp query conversion is
+        // disabled.
+        DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) ||
+                                 !HasToggleEnabled("disable_timestamp_query_conversion"));
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        std::vector<wgpu::FeatureName> requiredFeatures = {};
+        if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
+            requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
+        }
+        return requiredFeatures;
+    }
+};
+
+// Check that the timestamps got by timestamp query are between the two timestamps from
+// GetClockCalibration() after the timestamp conversion is disabled.
+TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
+    constexpr uint32_t kQueryCount = 2;
+
+    wgpu::QuerySetDescriptor querySetDescriptor;
+    querySetDescriptor.count = kQueryCount;
+    querySetDescriptor.type = wgpu::QueryType::Timestamp;
+    wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor);
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
+    bufferDescriptor.usage =
+        wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.WriteTimestamp(querySet, 0);
+    encoder.WriteTimestamp(querySet, 1);
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    Device* d3DDevice = reinterpret_cast<Device*>(device.Get());
+    uint64_t gpuTimestamp0, gpuTimestamp1;
+    uint64_t cpuTimestamp0, cpuTimestamp1;
+    d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0);
+    queue.Submit(1, &commands);
+    WaitForAllOperations();
+    d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1);
+
+    // Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp,
+    // so that the timestamp in the querySet will be closer to both gpuTimestamps from
+    // GetClockCalibration.
+    wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
+    resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
+    queue.Submit(1, &resolveCommands);
+
+    EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t),
+                  new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1));
+}
+
+DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
+                      D3D12Backend({"disable_timestamp_query_conversion"}));
\ No newline at end of file
diff --git a/src/dawn/tests/white_box/D3D12ResidencyTests.cpp b/src/dawn/tests/white_box/D3D12ResidencyTests.cpp
new file mode 100644
index 0000000..d6008bd
--- /dev/null
+++ b/src/dawn/tests/white_box/D3D12ResidencyTests.cpp
@@ -0,0 +1,425 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <vector>
+
+constexpr uint32_t kRestrictedBudgetSize = 100000000;         // 100MB
+constexpr uint32_t kDirectlyAllocatedResourceSize = 5000000;  // 5MB
+constexpr uint32_t kSuballocatedResourceSize = 1000000;       // 1MB
+constexpr uint32_t kSourceBufferSize = 4;                     // 4B
+
+constexpr wgpu::BufferUsage kMapReadBufferUsage =
+    wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
+constexpr wgpu::BufferUsage kMapWriteBufferUsage =
+    wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+constexpr wgpu::BufferUsage kNonMappableBufferUsage = wgpu::BufferUsage::CopyDst;
+
+class D3D12ResidencyTestBase : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+        // Restrict Dawn's budget to create an artificial budget.
+        dawn::native::d3d12::Device* d3dDevice =
+            dawn::native::d3d12::ToBackend(dawn::native::FromAPI((device.Get())));
+        d3dDevice->GetResidencyManager()->RestrictBudgetForTesting(kRestrictedBudgetSize);
+
+        // Initialize a source buffer on the GPU to serve as a source to quickly copy data to other
+        // buffers.
+        constexpr uint32_t one = 1;
+        mSourceBuffer =
+            utils::CreateBufferFromData(device, &one, sizeof(one), wgpu::BufferUsage::CopySrc);
+    }
+
+    std::vector<wgpu::Buffer> AllocateBuffers(uint32_t bufferSize,
+                                              uint32_t numberOfBuffers,
+                                              wgpu::BufferUsage usage) {
+        std::vector<wgpu::Buffer> buffers;
+
+        for (uint64_t i = 0; i < numberOfBuffers; i++) {
+            buffers.push_back(CreateBuffer(bufferSize, usage));
+        }
+
+        return buffers;
+    }
+
+    wgpu::Buffer CreateBuffer(uint32_t bufferSize, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+
+        descriptor.size = bufferSize;
+        descriptor.usage = usage;
+
+        return device.CreateBuffer(&descriptor);
+    }
+
+    void TouchBuffers(uint32_t beginIndex,
+                      uint32_t numBuffers,
+                      const std::vector<wgpu::Buffer>& bufferSet) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        // Perform a copy on the range of buffers to ensure the are moved to dedicated GPU memory.
+        for (uint32_t i = beginIndex; i < beginIndex + numBuffers; i++) {
+            encoder.CopyBufferToBuffer(mSourceBuffer, 0, bufferSet[i], 0, kSourceBufferSize);
+        }
+        wgpu::CommandBuffer copy = encoder.Finish();
+        queue.Submit(1, &copy);
+    }
+
+    wgpu::Buffer mSourceBuffer;
+};
+
+class D3D12ResourceResidencyTests : public D3D12ResidencyTestBase {
+  protected:
+    bool CheckAllocationMethod(wgpu::Buffer buffer,
+                               dawn::native::AllocationMethod allocationMethod) const {
+        dawn::native::d3d12::Buffer* d3dBuffer =
+            dawn::native::d3d12::ToBackend(dawn::native::FromAPI((buffer.Get())));
+        return d3dBuffer->CheckAllocationMethodForTesting(allocationMethod);
+    }
+
+    bool CheckIfBufferIsResident(wgpu::Buffer buffer) const {
+        dawn::native::d3d12::Buffer* d3dBuffer =
+            dawn::native::d3d12::ToBackend(dawn::native::FromAPI((buffer.Get())));
+        return d3dBuffer->CheckIsResidentForTesting();
+    }
+
+    bool IsUMA() const {
+        return dawn::native::d3d12::ToBackend(dawn::native::FromAPI(device.Get()))
+            ->GetDeviceInfo()
+            .isUMA;
+    }
+};
+
+class D3D12DescriptorResidencyTests : public D3D12ResidencyTestBase {};
+
+// Check that resources existing on suballocated heaps are made resident and evicted correctly.
+TEST_P(D3D12ResourceResidencyTests, OvercommitSmallResources) {
+    // TODO(http://crbug.com/dawn/416): Tests fails on Intel HD 630 bot.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() && IsBackendValidationEnabled());
+
+    // Create suballocated buffers to fill half the budget.
+    std::vector<wgpu::Buffer> bufferSet1 = AllocateBuffers(
+        kSuballocatedResourceSize, ((kRestrictedBudgetSize / 2) / kSuballocatedResourceSize),
+        kNonMappableBufferUsage);
+
+    // Check that all the buffers allocated are resident. Also make sure they were suballocated
+    // internally.
+    for (uint32_t i = 0; i < bufferSet1.size(); i++) {
+        EXPECT_TRUE(CheckIfBufferIsResident(bufferSet1[i]));
+        EXPECT_TRUE(
+            CheckAllocationMethod(bufferSet1[i], dawn::native::AllocationMethod::kSubAllocated));
+    }
+
+    // Create enough directly-allocated buffers to use the entire budget.
+    std::vector<wgpu::Buffer> bufferSet2 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize, kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,
+        kNonMappableBufferUsage);
+
+    // Check that everything in bufferSet1 is now evicted.
+    for (uint32_t i = 0; i < bufferSet1.size(); i++) {
+        EXPECT_FALSE(CheckIfBufferIsResident(bufferSet1[i]));
+    }
+
+    // Touch one of the non-resident buffers. This should cause the buffer to become resident.
+    constexpr uint32_t indexOfBufferInSet1 = 5;
+    TouchBuffers(indexOfBufferInSet1, 1, bufferSet1);
+    // Check that this buffer is now resident.
+    EXPECT_TRUE(CheckIfBufferIsResident(bufferSet1[indexOfBufferInSet1]));
+
+    // Touch everything in bufferSet2 again to evict the buffer made resident in the previous
+    // operation.
+    TouchBuffers(0, bufferSet2.size(), bufferSet2);
+    // Check that indexOfBufferInSet1 was evicted.
+    EXPECT_FALSE(CheckIfBufferIsResident(bufferSet1[indexOfBufferInSet1]));
+}
+
+// Check that resources existing on directly allocated heaps are made resident and evicted
+// correctly.
+TEST_P(D3D12ResourceResidencyTests, OvercommitLargeResources) {
+    // Create directly-allocated buffers to fill half the budget.
+    std::vector<wgpu::Buffer> bufferSet1 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize,
+        ((kRestrictedBudgetSize / 2) / kDirectlyAllocatedResourceSize), kNonMappableBufferUsage);
+
+    // Check that all the allocated buffers are resident. Also make sure they were directly
+    // allocated internally.
+    for (uint32_t i = 0; i < bufferSet1.size(); i++) {
+        EXPECT_TRUE(CheckIfBufferIsResident(bufferSet1[i]));
+        EXPECT_TRUE(CheckAllocationMethod(bufferSet1[i], dawn::native::AllocationMethod::kDirect));
+    }
+
+    // Create enough directly-allocated buffers to use the entire budget.
+    std::vector<wgpu::Buffer> bufferSet2 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize, kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,
+        kNonMappableBufferUsage);
+
+    // Check that everything in bufferSet1 is now evicted.
+    for (uint32_t i = 0; i < bufferSet1.size(); i++) {
+        EXPECT_FALSE(CheckIfBufferIsResident(bufferSet1[i]));
+    }
+
+    // Touch one of the non-resident buffers. This should cause the buffer to become resident.
+    constexpr uint32_t indexOfBufferInSet1 = 1;
+    TouchBuffers(indexOfBufferInSet1, 1, bufferSet1);
+    EXPECT_TRUE(CheckIfBufferIsResident(bufferSet1[indexOfBufferInSet1]));
+
+    // Touch everything in bufferSet2 again to evict the buffer made resident in the previous
+    // operation.
+    TouchBuffers(0, bufferSet2.size(), bufferSet2);
+    // Check that indexOfBufferInSet1 was evicted.
+    EXPECT_FALSE(CheckIfBufferIsResident(bufferSet1[indexOfBufferInSet1]));
+}
+
+// Check that calling MapAsync for reading makes the buffer resident and keeps it locked resident.
+TEST_P(D3D12ResourceResidencyTests, AsyncMappedBufferRead) {
+    // Create a mappable buffer.
+    wgpu::Buffer buffer = CreateBuffer(4, kMapReadBufferUsage);
+
+    uint32_t data = 12345;
+    queue.WriteBuffer(buffer, 0, &data, sizeof(uint32_t));
+
+    // The mappable buffer should be resident.
+    EXPECT_TRUE(CheckIfBufferIsResident(buffer));
+
+    // Create and touch enough buffers to use the entire budget.
+    std::vector<wgpu::Buffer> bufferSet = AllocateBuffers(
+        kDirectlyAllocatedResourceSize, kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,
+        kMapReadBufferUsage);
+    TouchBuffers(0, bufferSet.size(), bufferSet);
+
+    // The mappable buffer should have been evicted.
+    EXPECT_FALSE(CheckIfBufferIsResident(buffer));
+
+    // Calling MapAsync for reading should make the buffer resident.
+    bool done = false;
+    buffer.MapAsync(
+        wgpu::MapMode::Read, 0, sizeof(uint32_t),
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+            *static_cast<bool*>(userdata) = true;
+        },
+        &done);
+    EXPECT_TRUE(CheckIfBufferIsResident(buffer));
+
+    while (!done) {
+        WaitABit();
+    }
+
+    // Touch enough resources such that the entire budget is used. The mappable buffer should remain
+    // locked resident.
+    TouchBuffers(0, bufferSet.size(), bufferSet);
+    EXPECT_TRUE(CheckIfBufferIsResident(buffer));
+
+    // Unmap the buffer, allocate and touch enough resources such that the entire budget is used.
+    // This should evict the mappable buffer.
+    buffer.Unmap();
+    std::vector<wgpu::Buffer> bufferSet2 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize, kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,
+        kMapReadBufferUsage);
+    TouchBuffers(0, bufferSet2.size(), bufferSet2);
+    EXPECT_FALSE(CheckIfBufferIsResident(buffer));
+}
+
+// Check that calling MapAsync for writing makes the buffer resident and keeps it locked resident.
+TEST_P(D3D12ResourceResidencyTests, AsyncMappedBufferWrite) {
+    // Create a mappable buffer.
+    wgpu::Buffer buffer = CreateBuffer(4, kMapWriteBufferUsage);
+    // The mappable buffer should be resident.
+    EXPECT_TRUE(CheckIfBufferIsResident(buffer));
+
+    // Create and touch enough buffers to use the entire budget.
+    std::vector<wgpu::Buffer> bufferSet1 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize, kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,
+        kMapReadBufferUsage);
+    TouchBuffers(0, bufferSet1.size(), bufferSet1);
+
+    // The mappable buffer should have been evicted.
+    EXPECT_FALSE(CheckIfBufferIsResident(buffer));
+
+    // Calling MapAsync for writing should make the buffer resident.
+    bool done = false;
+    buffer.MapAsync(
+        wgpu::MapMode::Write, 0, sizeof(uint32_t),
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+            *static_cast<bool*>(userdata) = true;
+        },
+        &done);
+    EXPECT_TRUE(CheckIfBufferIsResident(buffer));
+
+    while (!done) {
+        WaitABit();
+    }
+
+    // Touch enough resources such that the entire budget is used. The mappable buffer should remain
+    // locked resident.
+    TouchBuffers(0, bufferSet1.size(), bufferSet1);
+    EXPECT_TRUE(CheckIfBufferIsResident(buffer));
+
+    // Unmap the buffer, allocate and touch enough resources such that the entire budget is used.
+    // This should evict the mappable buffer.
+    buffer.Unmap();
+    std::vector<wgpu::Buffer> bufferSet2 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize, kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,
+        kMapReadBufferUsage);
+    TouchBuffers(0, bufferSet2.size(), bufferSet2);
+    EXPECT_FALSE(CheckIfBufferIsResident(buffer));
+}
+
+// Check that overcommitting in a single submit works, then make sure the budget is enforced after.
+TEST_P(D3D12ResourceResidencyTests, OvercommitInASingleSubmit) {
+    // Create enough buffers to exceed the budget
+    constexpr uint32_t numberOfBuffersToOvercommit = 5;
+    std::vector<wgpu::Buffer> bufferSet1 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize,
+        (kRestrictedBudgetSize / kDirectlyAllocatedResourceSize) + numberOfBuffersToOvercommit,
+        kNonMappableBufferUsage);
+    // Touch the buffers, which creates an overcommitted command list.
+    TouchBuffers(0, bufferSet1.size(), bufferSet1);
+    // Ensure that all of these buffers are resident, even though we're exceeding the budget.
+    for (uint32_t i = 0; i < bufferSet1.size(); i++) {
+        EXPECT_TRUE(CheckIfBufferIsResident(bufferSet1[i]));
+    }
+
+    // Allocate another set of buffers that exceeds the budget.
+    std::vector<wgpu::Buffer> bufferSet2 = AllocateBuffers(
+        kDirectlyAllocatedResourceSize,
+        (kRestrictedBudgetSize / kDirectlyAllocatedResourceSize) + numberOfBuffersToOvercommit,
+        kNonMappableBufferUsage);
+    // Ensure the first <numberOfBuffersToOvercommit> buffers in the second buffer set were evicted,
+    // since they shouldn't fit in the budget.
+    for (uint32_t i = 0; i < numberOfBuffersToOvercommit; i++) {
+        EXPECT_FALSE(CheckIfBufferIsResident(bufferSet2[i]));
+    }
+}
+
+TEST_P(D3D12ResourceResidencyTests, SetExternalReservation) {
+    // Set an external reservation of 20% the budget. We should succesfully reserve the amount we
+    // request.
+    uint64_t amountReserved = dawn::native::d3d12::SetExternalMemoryReservation(
+        device.Get(), kRestrictedBudgetSize * .2, dawn::native::d3d12::MemorySegment::Local);
+    EXPECT_EQ(amountReserved, kRestrictedBudgetSize * .2);
+
+    // If we're on a non-UMA device, we should also check the NON_LOCAL memory segment.
+    if (!IsUMA()) {
+        amountReserved = dawn::native::d3d12::SetExternalMemoryReservation(
+            device.Get(), kRestrictedBudgetSize * .2, dawn::native::d3d12::MemorySegment::NonLocal);
+        EXPECT_EQ(amountReserved, kRestrictedBudgetSize * .2);
+    }
+}
+
+// Checks that when a descriptor heap is bound, it is locked resident. Also checks that when a
+// previous descriptor heap becomes unbound, it is unlocked, placed in the LRU and can be evicted.
+TEST_P(D3D12DescriptorResidencyTests, SwitchedViewHeapResidency) {
+    // TODO(crbug.com/dawn/739):
+    // unknown file: error: SEH exception with code 0x87d thrown in the test body.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP() && IsBackendValidationEnabled());
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+
+    // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a
+    // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
+    renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex) fn main(
+                @builtin(vertex_index) VertexIndex : u32
+            ) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 3>(
+                    vec2<f32>(-1.0,  1.0),
+                    vec2<f32>( 1.0,  1.0),
+                    vec2<f32>(-1.0, -1.0)
+                );
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            })");
+
+    renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+            struct U {
+                color : vec4<f32>
+            }
+            @group(0) @binding(0) var<uniform> colorBuffer : U;
+
+            @stage(fragment) fn main() -> @location(0) vec4<f32> {
+                return colorBuffer.color;
+            })");
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+    constexpr uint32_t kSize = 512;
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kSize, kSize);
+
+    wgpu::Sampler sampler = device.CreateSampler();
+
+    dawn::native::d3d12::Device* d3dDevice =
+        dawn::native::d3d12::ToBackend(dawn::native::FromAPI(device.Get()));
+
+    dawn::native::d3d12::ShaderVisibleDescriptorAllocator* allocator =
+        d3dDevice->GetViewShaderVisibleDescriptorAllocator();
+    const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting();
+
+    const dawn::native::d3d12::HeapVersionID heapSerial =
+        allocator->GetShaderVisibleHeapSerialForTesting();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        for (uint32_t i = 0; i < heapSize + 1; ++i) {
+            pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, uniformBuffer, 0, sizeof(redColor)}}));
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Check the heap serial to ensure the heap has switched.
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + dawn::native::d3d12::HeapVersionID(1));
+
+    // Check that currrently bound ShaderVisibleHeap is locked resident.
+    EXPECT_TRUE(allocator->IsShaderVisibleHeapLockedResidentForTesting());
+    // Check that the previously bound ShaderVisibleHeap was unlocked and was placed in the LRU
+    // cache.
+    EXPECT_TRUE(allocator->IsLastShaderVisibleHeapInLRUForTesting());
+    // Allocate enough buffers to exceed the budget, which will purge everything from the Residency
+    // LRU.
+    AllocateBuffers(kDirectlyAllocatedResourceSize,
+                    kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,
+                    kNonMappableBufferUsage);
+    // Check that currrently bound ShaderVisibleHeap remained locked resident.
+    EXPECT_TRUE(allocator->IsShaderVisibleHeapLockedResidentForTesting());
+    // Check that the previously bound ShaderVisibleHeap has been evicted from the LRU cache.
+    EXPECT_FALSE(allocator->IsLastShaderVisibleHeapInLRUForTesting());
+}
+
+DAWN_INSTANTIATE_TEST(D3D12ResourceResidencyTests, D3D12Backend());
+DAWN_INSTANTIATE_TEST(D3D12DescriptorResidencyTests,
+                      D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
diff --git a/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
new file mode 100644
index 0000000..4ad2eac
--- /dev/null
+++ b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
@@ -0,0 +1,105 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+using namespace dawn::native::d3d12;
+
+class D3D12ResourceHeapTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
+        if (!mIsBCFormatSupported) {
+            return {};
+        }
+
+        return {wgpu::FeatureName::TextureCompressionBC};
+    }
+
+    bool IsBCFormatSupported() const {
+        return mIsBCFormatSupported;
+    }
+
+  private:
+    bool mIsBCFormatSupported = false;
+};
+
+// Verify that creating a small compressed textures will be 4KB aligned.
+TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
+
+    // TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP());
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = 8;
+    descriptor.size.height = 8;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+    // Create a smaller one that allows use of the smaller alignment.
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    Texture* d3dTexture = reinterpret_cast<Texture*>(texture.Get());
+
+    EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
+              static_cast<uint64_t>(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT));
+
+    // Create a larger one (>64KB) that forbids use the smaller alignment.
+    descriptor.size.width = 4096;
+    descriptor.size.height = 4096;
+
+    texture = device.CreateTexture(&descriptor);
+    d3dTexture = reinterpret_cast<Texture*>(texture.Get());
+
+    EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
+              static_cast<uint64_t>(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT));
+}
+
+// Verify creating a UBO will always be 256B aligned.
+TEST_P(D3D12ResourceHeapTests, AlignUBO) {
+    // Create a small UBO
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4 * 1024;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    Buffer* d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
+
+    EXPECT_TRUE((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
+                 static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)) == 0u);
+
+    // Create a larger UBO
+    descriptor.size = (4 * 1024 * 1024) + 255;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+
+    buffer = device.CreateBuffer(&descriptor);
+    d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
+
+    EXPECT_TRUE((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
+                 static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)) == 0u);
+}
+
+DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());
diff --git a/src/dawn/tests/white_box/EGLImageWrappingTests.cpp b/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
new file mode 100644
index 0000000..e415da5
--- /dev/null
+++ b/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
@@ -0,0 +1,382 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/native/OpenGLBackend.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <EGL/egl.h>
+
+namespace {
+
+    class EGLFunctions {
+      public:
+        EGLFunctions() {
+#ifdef DAWN_PLATFORM_WINDOWS
+            const char* eglLib = "libEGL.dll";
+#else
+            const char* eglLib = "libEGL.so";
+#endif
+            EXPECT_TRUE(mlibEGL.Open(eglLib));
+            CreateImage = reinterpret_cast<PFNEGLCREATEIMAGEPROC>(LoadProc("eglCreateImage"));
+            DestroyImage = reinterpret_cast<PFNEGLDESTROYIMAGEPROC>(LoadProc("eglDestroyImage"));
+            GetCurrentContext =
+                reinterpret_cast<PFNEGLGETCURRENTCONTEXTPROC>(LoadProc("eglGetCurrentContext"));
+            GetCurrentDisplay =
+                reinterpret_cast<PFNEGLGETCURRENTDISPLAYPROC>(LoadProc("eglGetCurrentDisplay"));
+        }
+
+      private:
+        void* LoadProc(const char* name) {
+            void* proc = mlibEGL.GetProc(name);
+            EXPECT_NE(proc, nullptr);
+            return proc;
+        }
+
+      public:
+        PFNEGLCREATEIMAGEPROC CreateImage;
+        PFNEGLDESTROYIMAGEPROC DestroyImage;
+        PFNEGLGETCURRENTCONTEXTPROC GetCurrentContext;
+        PFNEGLGETCURRENTDISPLAYPROC GetCurrentDisplay;
+
+      private:
+        DynamicLib mlibEGL;
+    };
+
+    class ScopedEGLImage {
+      public:
+        ScopedEGLImage(PFNEGLDESTROYIMAGEPROC destroyImage,
+                       PFNGLDELETETEXTURESPROC deleteTextures,
+                       EGLDisplay display,
+                       EGLImage image,
+                       GLuint texture)
+            : mDestroyImage(destroyImage),
+              mDeleteTextures(deleteTextures),
+              mDisplay(display),
+              mImage(image),
+              mTexture(texture) {
+        }
+
+        ScopedEGLImage(ScopedEGLImage&& other) {
+            if (mImage != nullptr) {
+                mDestroyImage(mDisplay, mImage);
+            }
+            if (mTexture != 0) {
+                mDeleteTextures(1, &mTexture);
+            }
+            mDestroyImage = std::move(other.mDestroyImage);
+            mDeleteTextures = std::move(other.mDeleteTextures);
+            mDisplay = std::move(other.mDisplay);
+            mImage = std::move(other.mImage);
+            mTexture = std::move(other.mTexture);
+        }
+
+        ~ScopedEGLImage() {
+            if (mTexture != 0) {
+                mDeleteTextures(1, &mTexture);
+            }
+            if (mImage != nullptr) {
+                mDestroyImage(mDisplay, mImage);
+            }
+        }
+
+        EGLImage getImage() const {
+            return mImage;
+        }
+
+        GLuint getTexture() const {
+            return mTexture;
+        }
+
+      private:
+        PFNEGLDESTROYIMAGEPROC mDestroyImage = nullptr;
+        PFNGLDELETETEXTURESPROC mDeleteTextures = nullptr;
+        EGLDisplay mDisplay = nullptr;
+        EGLImage mImage = nullptr;
+        GLuint mTexture = 0;
+    };
+
+}  // anonymous namespace
+
+class EGLImageTestBase : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        return {wgpu::FeatureName::DawnInternalUsages};
+    }
+
+  public:
+    ScopedEGLImage CreateEGLImage(uint32_t width,
+                                  uint32_t height,
+                                  GLenum internalFormat,
+                                  GLenum format,
+                                  GLenum type,
+                                  void* data,
+                                  size_t size) {
+        dawn::native::opengl::Device* openglDevice =
+            dawn::native::opengl::ToBackend(dawn::native::FromAPI(device.Get()));
+        const dawn::native::opengl::OpenGLFunctions& gl = openglDevice->gl;
+        GLuint tex;
+        gl.GenTextures(1, &tex);
+        gl.BindTexture(GL_TEXTURE_2D, tex);
+        gl.TexImage2D(GL_TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, data);
+        EGLAttrib attribs[1] = {EGL_NONE};
+        EGLClientBuffer buffer = reinterpret_cast<EGLClientBuffer>(static_cast<intptr_t>(tex));
+        EGLDisplay dpy = egl.GetCurrentDisplay();
+        EGLContext ctx = egl.GetCurrentContext();
+        EGLImage eglImage = egl.CreateImage(dpy, ctx, EGL_GL_TEXTURE_2D, buffer, attribs);
+        EXPECT_NE(nullptr, eglImage);
+
+        return ScopedEGLImage(egl.DestroyImage, gl.DeleteTextures, dpy, eglImage, tex);
+    }
+    wgpu::Texture WrapEGLImage(const wgpu::TextureDescriptor* descriptor, EGLImage eglImage) {
+        dawn::native::opengl::ExternalImageDescriptorEGLImage externDesc;
+        externDesc.cTextureDescriptor = reinterpret_cast<const WGPUTextureDescriptor*>(descriptor);
+        externDesc.image = eglImage;
+        WGPUTexture texture = dawn::native::opengl::WrapExternalEGLImage(device.Get(), &externDesc);
+        return wgpu::Texture::Acquire(texture);
+    }
+    EGLFunctions egl;
+};
+
+// A small fixture used to initialize default data for the EGLImage validation tests.
+// These tests are skipped if the harness is using the wire.
+class EGLImageValidationTests : public EGLImageTestBase {
+  public:
+    EGLImageValidationTests() {
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.size = {10, 10, 1};
+        descriptor.sampleCount = 1;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    }
+
+    ScopedEGLImage CreateDefaultEGLImage() {
+        return CreateEGLImage(10, 10, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, nullptr, 0);
+    }
+
+  protected:
+    wgpu::TextureDescriptor descriptor;
+};
+
+// Test a successful wrapping of an EGLImage in a texture
+TEST_P(EGLImageValidationTests, Success) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage());
+    ASSERT_NE(texture.Get(), nullptr);
+}
+
+// Test a successful wrapping of an EGLImage in a texture with DawnTextureInternalUsageDescriptor
+TEST_P(EGLImageValidationTests, SuccessWithInternalUsageDescriptor) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    descriptor.nextInChain = &internalDesc;
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+    internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage());
+    ASSERT_NE(texture.Get(), nullptr);
+}
+
+// Test an error occurs if an invalid sType is the nextInChain
+TEST_P(EGLImageValidationTests, InvalidTextureDescriptor) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+    wgpu::ChainedStruct chainedDescriptor;
+    chainedDescriptor.sType = wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel;
+    descriptor.nextInChain = &chainedDescriptor;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage()));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor dimension isn't 2D
+TEST_P(EGLImageValidationTests, InvalidTextureDimension) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.dimension = wgpu::TextureDimension::e3D;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage()));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the texture usage is not RenderAttachment
+TEST_P(EGLImageValidationTests, InvalidTextureUsage) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    wgpu::Texture texture;
+    ASSERT_DEVICE_ERROR(texture = WrapEGLImage(&descriptor, image.getImage()));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+    descriptor.usage = wgpu::TextureUsage::StorageBinding;
+
+    ASSERT_DEVICE_ERROR(texture = WrapEGLImage(&descriptor, image.getImage()));
+
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor mip level count isn't 1
+TEST_P(EGLImageValidationTests, InvalidMipLevelCount) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.mipLevelCount = 2;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor depth isn't 1
+TEST_P(EGLImageValidationTests, InvalidDepth) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.size.depthOrArrayLayers = 2;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor sample count isn't 1
+TEST_P(EGLImageValidationTests, InvalidSampleCount) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.sampleCount = 4;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor width doesn't match the surface's
+TEST_P(EGLImageValidationTests, InvalidWidth) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.size.width = 11;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor height doesn't match the surface's
+TEST_P(EGLImageValidationTests, InvalidHeight) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    descriptor.size.height = 11;
+
+    ScopedEGLImage image = CreateDefaultEGLImage();
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapEGLImage(&descriptor, image.getImage()));
+    ASSERT_EQ(texture.Get(), nullptr);
+}
+
+// Fixture to test using EGLImages through different usages.
+// These tests are skipped if the harness is using the wire.
+class EGLImageUsageTests : public EGLImageTestBase {
+  public:
+    // Test that clearing using BeginRenderPass writes correct data in the eglImage.
+    void DoClearTest(EGLImage eglImage,
+                     GLuint texture,
+                     wgpu::TextureFormat format,
+                     GLenum glFormat,
+                     GLenum glType,
+                     void* data,
+                     size_t dataSize) {
+        dawn::native::opengl::Device* openglDevice =
+            dawn::native::opengl::ToBackend(dawn::native::FromAPI(device.Get()));
+        const dawn::native::opengl::OpenGLFunctions& gl = openglDevice->gl;
+
+        // Get a texture view for the eglImage
+        wgpu::TextureDescriptor textureDescriptor;
+        textureDescriptor.dimension = wgpu::TextureDimension::e2D;
+        textureDescriptor.format = format;
+        textureDescriptor.size = {1, 1, 1};
+        textureDescriptor.sampleCount = 1;
+        textureDescriptor.mipLevelCount = 1;
+        textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+        wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+        textureDescriptor.nextInChain = &internalDesc;
+        internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+        internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+
+        wgpu::Texture eglImageTexture = WrapEGLImage(&textureDescriptor, eglImage);
+        ASSERT_NE(eglImageTexture, nullptr);
+
+        wgpu::TextureView eglImageView = eglImageTexture.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPassDescriptor({eglImageView}, {});
+        renderPassDescriptor.cColorAttachments[0].clearValue = {1 / 255.0f, 2 / 255.0f, 3 / 255.0f,
+                                                                4 / 255.0f};
+
+        // Execute commands to clear the eglImage
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Check the correct data was written
+        std::vector<uint8_t> result(dataSize);
+        GLuint fbo;
+        gl.GenFramebuffers(1, &fbo);
+        gl.BindFramebuffer(GL_FRAMEBUFFER, fbo);
+        gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture,
+                                0);
+        gl.ReadPixels(0, 0, 1, 1, glFormat, glType, result.data());
+        gl.BindFramebuffer(GL_FRAMEBUFFER, 0);
+        gl.DeleteFramebuffers(1, &fbo);
+        ASSERT_EQ(0, memcmp(result.data(), data, dataSize));
+    }
+};
+
+// Test clearing a R8 EGLImage
+TEST_P(EGLImageUsageTests, ClearR8EGLImage) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedEGLImage eglImage = CreateEGLImage(1, 1, GL_R8, GL_RED, GL_UNSIGNED_BYTE, nullptr, 0);
+
+    uint8_t data = 0x01;
+    DoClearTest(eglImage.getImage(), eglImage.getTexture(), wgpu::TextureFormat::R8Unorm, GL_RED,
+                GL_UNSIGNED_BYTE, &data, sizeof(data));
+}
+
+// Test clearing a RG8 EGLImage
+TEST_P(EGLImageUsageTests, ClearRG8EGLImage) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedEGLImage eglImage = CreateEGLImage(1, 1, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, nullptr, 0);
+
+    uint16_t data = 0x0201;
+    DoClearTest(eglImage.getImage(), eglImage.getTexture(), wgpu::TextureFormat::RG8Unorm, GL_RG,
+                GL_UNSIGNED_BYTE, &data, sizeof(data));
+}
+
+// Test clearing an RGBA8 EGLImage
+TEST_P(EGLImageUsageTests, ClearRGBA8EGLImage) {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    ScopedEGLImage eglImage = CreateEGLImage(1, 1, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, nullptr, 0);
+
+    uint32_t data = 0x04030201;
+    DoClearTest(eglImage.getImage(), eglImage.getTexture(), wgpu::TextureFormat::RGBA8Unorm,
+                GL_RGBA, GL_UNSIGNED_BYTE, &data, sizeof(data));
+}
+
+DAWN_INSTANTIATE_TEST(EGLImageValidationTests, OpenGLESBackend());
+DAWN_INSTANTIATE_TEST(EGLImageUsageTests, OpenGLESBackend());
diff --git a/src/dawn/tests/white_box/InternalResourceUsageTests.cpp b/src/dawn/tests/white_box/InternalResourceUsageTests.cpp
new file mode 100644
index 0000000..5e46b50
--- /dev/null
+++ b/src/dawn/tests/white_box/InternalResourceUsageTests.cpp
@@ -0,0 +1,60 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/dawn_platform.h"
+
+class InternalResourceUsageTests : public DawnTest {
+  protected:
+    wgpu::Buffer CreateBuffer(wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = 4;
+        descriptor.usage = usage;
+
+        return device.CreateBuffer(&descriptor);
+    }
+};
+
+// Verify it is an error to create a buffer with a buffer usage that should only be used
+// internally.
+TEST_P(InternalResourceUsageTests, InternalBufferUsage) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    ASSERT_DEVICE_ERROR(CreateBuffer(dawn::native::kReadOnlyStorageBuffer));
+
+    ASSERT_DEVICE_ERROR(CreateBuffer(dawn::native::kInternalStorageBuffer));
+}
+
+DAWN_INSTANTIATE_TEST(InternalResourceUsageTests, NullBackend());
+
+class InternalBindingTypeTests : public DawnTest {};
+
+// Verify it is an error to create a bind group layout with a buffer binding type that should only
+// be used internally.
+TEST_P(InternalBindingTypeTests, InternalStorageBufferBindingType) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    wgpu::BindGroupLayoutEntry bglEntry;
+    bglEntry.binding = 0;
+    bglEntry.buffer.type = dawn::native::kInternalStorageBufferBinding;
+    bglEntry.visibility = wgpu::ShaderStage::Compute;
+
+    wgpu::BindGroupLayoutDescriptor bglDesc;
+    bglDesc.entryCount = 1;
+    bglDesc.entries = &bglEntry;
+    ASSERT_DEVICE_ERROR(device.CreateBindGroupLayout(&bglDesc));
+}
+
+DAWN_INSTANTIATE_TEST(InternalBindingTypeTests, NullBackend());
diff --git a/src/dawn/tests/white_box/InternalStorageBufferBindingTests.cpp b/src/dawn/tests/white_box/InternalStorageBufferBindingTests.cpp
new file mode 100644
index 0000000..2599614
--- /dev/null
+++ b/src/dawn/tests/white_box/InternalStorageBufferBindingTests.cpp
@@ -0,0 +1,112 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/dawn_platform.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+class InternalStorageBufferBindingTests : public DawnTest {
+  protected:
+    static constexpr uint32_t kNumValues = 4;
+    static constexpr uint32_t kIterations = 4;
+
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    }
+
+    wgpu::ComputePipeline CreateComputePipelineWithInternalStorage() {
+        wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+            struct Buf {
+                data : array<u32, 4>
+            }
+
+            @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+            @stage(compute) @workgroup_size(1)
+            fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+                buf.data[GlobalInvocationID.x] = buf.data[GlobalInvocationID.x] + 0x1234u;
+            }
+        )");
+
+        // Create binding group layout with internal storage buffer binding type
+        dawn::native::BindGroupLayoutEntry bglEntry;
+        bglEntry.binding = 0;
+        bglEntry.buffer.type = dawn::native::kInternalStorageBufferBinding;
+        bglEntry.visibility = wgpu::ShaderStage::Compute;
+
+        dawn::native::BindGroupLayoutDescriptor bglDesc;
+        bglDesc.entryCount = 1;
+        bglDesc.entries = &bglEntry;
+
+        dawn::native::DeviceBase* nativeDevice = dawn::native::FromAPI(device.Get());
+
+        Ref<dawn::native::BindGroupLayoutBase> bglRef =
+            nativeDevice->CreateBindGroupLayout(&bglDesc, true).AcquireSuccess();
+
+        wgpu::BindGroupLayout bgl =
+            wgpu::BindGroupLayout::Acquire(dawn::native::ToAPI(bglRef.Detach()));
+
+        // Create pipeline layout
+        wgpu::PipelineLayoutDescriptor plDesc;
+        plDesc.bindGroupLayoutCount = 1;
+        plDesc.bindGroupLayouts = &bgl;
+        wgpu::PipelineLayout layout = device.CreatePipelineLayout(&plDesc);
+
+        wgpu::ComputePipelineDescriptor pipelineDesc = {};
+        pipelineDesc.layout = layout;
+        pipelineDesc.compute.module = module;
+        pipelineDesc.compute.entryPoint = "main";
+
+        return device.CreateComputePipeline(&pipelineDesc);
+    }
+};
+
+// Test that query resolve buffer can be bound as internal storage buffer, multiple dispatches to
+// increment values in the query resolve buffer are synchronized.
+TEST_P(InternalStorageBufferBindingTests, QueryResolveBufferBoundAsInternalStorageBuffer) {
+    std::vector<uint32_t> data(kNumValues, 0);
+    std::vector<uint32_t> expected(kNumValues, 0x1234u * kIterations);
+
+    uint64_t bufferSize = static_cast<uint64_t>(data.size() * sizeof(uint32_t));
+    wgpu::Buffer buffer =
+        utils::CreateBufferFromData(device, data.data(), bufferSize,
+                                    wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc);
+
+    wgpu::ComputePipeline pipeline = CreateComputePipelineWithInternalStorage();
+
+    wgpu::BindGroup bindGroup =
+        utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, buffer, 0, bufferSize}});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    for (uint32_t i = 0; i < kIterations; ++i) {
+        pass.Dispatch(kNumValues);
+    }
+    pass.End();
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), buffer, 0, kNumValues);
+}
+
+DAWN_INSTANTIATE_TEST(InternalStorageBufferBindingTests,
+                      D3D12Backend(),
+                      MetalBackend(),
+                      VulkanBackend());
diff --git a/src/dawn/tests/white_box/MetalAutoreleasePoolTests.mm b/src/dawn/tests/white_box/MetalAutoreleasePoolTests.mm
new file mode 100644
index 0000000..766f861
--- /dev/null
+++ b/src/dawn/tests/white_box/MetalAutoreleasePoolTests.mm
@@ -0,0 +1,62 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/metal/DeviceMTL.h"
+
+using namespace dawn::native::metal;
+
+class MetalAutoreleasePoolTests : public DawnTest {
+  private:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+        mMtlDevice = reinterpret_cast<Device*>(device.Get());
+    }
+
+  protected:
+    Device* mMtlDevice = nullptr;
+};
+
+// Test that the MTLCommandBuffer owned by the pending command context can
+// outlive an autoreleasepool block.
+TEST_P(MetalAutoreleasePoolTests, CommandBufferOutlivesAutorelease) {
+    @autoreleasepool {
+        // Get the recording context which will allocate a MTLCommandBuffer.
+        // It will get autoreleased at the end of this block.
+        mMtlDevice->GetPendingCommandContext();
+    }
+
+    // Submitting the command buffer should succeed.
+    ASSERT_TRUE(mMtlDevice->SubmitPendingCommandBuffer().IsSuccess());
+}
+
+// Test that the MTLBlitCommandEncoder owned by the pending command context
+// can outlive an autoreleasepool block.
+TEST_P(MetalAutoreleasePoolTests, EncoderOutlivesAutorelease) {
+    @autoreleasepool {
+        // Get the recording context which will allocate a MTLCommandBuffer.
+        // Begin a blit encoder.
+        // Both will get autoreleased at the end of this block.
+        mMtlDevice->GetPendingCommandContext()->EnsureBlit();
+    }
+
+    // Submitting the command buffer should succeed.
+    mMtlDevice->GetPendingCommandContext()->EndBlit();
+    ASSERT_TRUE(mMtlDevice->SubmitPendingCommandBuffer().IsSuccess());
+}
+
+DAWN_INSTANTIATE_TEST(MetalAutoreleasePoolTests, MetalBackend());
diff --git a/src/dawn/tests/white_box/QueryInternalShaderTests.cpp b/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
new file mode 100644
index 0000000..96d3b2f
--- /dev/null
+++ b/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
@@ -0,0 +1,224 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/QueryHelper.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace {
+
+    void EncodeConvertTimestampsToNanoseconds(wgpu::CommandEncoder encoder,
+                                              wgpu::Buffer timestamps,
+                                              wgpu::Buffer availability,
+                                              wgpu::Buffer params) {
+        ASSERT_TRUE(
+            dawn::native::EncodeConvertTimestampsToNanoseconds(
+                dawn::native::FromAPI(encoder.Get()), dawn::native::FromAPI(timestamps.Get()),
+                dawn::native::FromAPI(availability.Get()), dawn::native::FromAPI(params.Get()))
+                .IsSuccess());
+    }
+
+    class InternalShaderExpectation : public detail::Expectation {
+      public:
+        ~InternalShaderExpectation() override = default;
+
+        InternalShaderExpectation(const uint64_t* values, const unsigned int count) {
+            mExpected.assign(values, values + count);
+        }
+
+        // Expect the actual results are approximately equal to the expected values.
+        testing::AssertionResult Check(const void* data, size_t size) override {
+            DAWN_ASSERT(size == sizeof(uint64_t) * mExpected.size());
+            // The computations in the shader use a multiplier that's a 16bit integer plus a shift
+            // that maximize the multiplier. This means that for the range of periods we care about
+            // (1 to 2^16-1 ns per tick), the high order bit of the multiplier will always be set.
+            // Intuitively this means that we have 15 bits of precision in the computation so we
+            // expect that for the error tolerance.
+            constexpr static float kErrorToleranceRatio = 1.0 / (1 << 15);  // about 3e-5.
+
+            const uint64_t* actual = static_cast<const uint64_t*>(data);
+            for (size_t i = 0; i < mExpected.size(); ++i) {
+                if (mExpected[i] == 0) {
+                    if (actual[i] != 0) {
+                        return testing::AssertionFailure()
+                               << "Expected data[" << i << "] to be 0, actual " << actual[i]
+                               << std::endl;
+                    }
+                    continue;
+                }
+
+                float errorRate =
+                    abs(static_cast<int64_t>(mExpected[i] - actual[i])) / float(mExpected[i]);
+                if (errorRate > kErrorToleranceRatio) {
+                    return testing::AssertionFailure()
+                           << "Expected data[" << i << "] to be " << mExpected[i] << ", actual "
+                           << actual[i] << ". Error rate " << errorRate << " is larger than "
+                           << kErrorToleranceRatio << std::endl;
+                }
+            }
+
+            return testing::AssertionSuccess();
+        }
+
+      private:
+        std::vector<uint64_t> mExpected;
+    };
+
+}  // anonymous namespace
+
+constexpr static uint64_t kSentinelValue = ~uint64_t(0u);
+
+class QueryInternalShaderTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("disable_timestamp_query_conversion"));
+    }
+
+    // Original timestamp values in query set for testing
+    const std::vector<uint64_t> querySetValues = {
+        kSentinelValue,  // garbage data which is not written at beginning
+        10079569507,     // t0
+        10394415012,     // t1
+        kSentinelValue,  // garbage data which is not written between timestamps
+        11713454943,     // t2
+        38912556941,     // t3 (big value)
+        10080295766,     // t4 (reset)
+        12159966783,     // t5 (after reset)
+        12651224612,     // t6
+        39872473956,     // t7
+    };
+
+    const uint32_t kQueryCount = querySetValues.size();
+
+    // Timestamps available state
+    const std::vector<uint32_t> availabilities = {0, 1, 1, 0, 1, 1, 1, 1, 1, 1};
+
+    const std::vector<uint64_t> GetExpectedResults(const std::vector<uint64_t>& origin,
+                                                   uint32_t start,
+                                                   uint32_t firstQuery,
+                                                   uint32_t queryCount,
+                                                   float period) {
+        std::vector<uint64_t> expected(origin.begin(), origin.end());
+        for (size_t i = 0; i < queryCount; i++) {
+            if (availabilities[firstQuery + i] == 0) {
+                // Not a available timestamp, write 0
+                expected[start + i] = 0u;
+            } else {
+                // Maybe the timestamp * period is larger than the maximum of uint64, so cast the
+                // delta value to double (higher precision than float)
+                expected[start + i] =
+                    static_cast<uint64_t>(static_cast<double>(origin[start + i]) * period);
+            }
+        }
+        return expected;
+    }
+
+    void RunTest(uint32_t firstQuery,
+                 uint32_t queryCount,
+                 uint32_t destinationOffset,
+                 float period) {
+        ASSERT(destinationOffset % 256 == 0);
+
+        uint64_t size = queryCount * sizeof(uint64_t) + destinationOffset;
+
+        // The resolve buffer storing original timestamps and the converted values
+        wgpu::BufferDescriptor timestampsDesc;
+        timestampsDesc.size = size;
+        timestampsDesc.usage = wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc |
+                               wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer timestampsBuffer = device.CreateBuffer(&timestampsDesc);
+
+        // Set sentinel values to check the slots before the destination offset should not be
+        // converted
+        std::vector<uint64_t> timestampValues(size / sizeof(uint64_t), 1u);
+        uint32_t start = destinationOffset / sizeof(uint64_t);
+        for (uint32_t i = 0; i < queryCount; i++) {
+            timestampValues[start + i] = querySetValues[firstQuery + i];
+        }
+        // Write sentinel values and orignal timestamps to timestamps buffer
+        queue.WriteBuffer(timestampsBuffer, 0, timestampValues.data(), size);
+
+        // The buffer indicating which values are available timestamps
+        wgpu::Buffer availabilityBuffer =
+            utils::CreateBufferFromData(device, availabilities.data(),
+                                        kQueryCount * sizeof(uint32_t), wgpu::BufferUsage::Storage);
+
+        // The params uniform buffer
+        dawn::native::TimestampParams params(firstQuery, queryCount, destinationOffset, period);
+        wgpu::Buffer paramsBuffer = utils::CreateBufferFromData(device, &params, sizeof(params),
+                                                                wgpu::BufferUsage::Uniform);
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        EncodeConvertTimestampsToNanoseconds(encoder, timestampsBuffer, availabilityBuffer,
+                                             paramsBuffer);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        const std::vector<uint64_t> expected =
+            GetExpectedResults(timestampValues, start, firstQuery, queryCount, period);
+
+        EXPECT_BUFFER(timestampsBuffer, 0, size,
+                      new InternalShaderExpectation(expected.data(), size / sizeof(uint64_t)))
+            << "Conversion test for period:" << period << " firstQuery:" << firstQuery
+            << " queryCount:" << queryCount << " destinationOffset:" << destinationOffset;
+    }
+};
+
+// Test the accuracy of timestamp compute shader which uses unsigned 32-bit integers to simulate
+// unsigned 64-bit integers (timestamps) multiplied by float (period).
+// The arguments pass to timestamp internal pipeline:
+// - The timestamps buffer contains the original timestamps resolved from query set (created
+//   manually here), and will be used to store the results processed by the compute shader.
+//   Expect 0 for unavailable timestamps and nanoseconds for available timestamps in an expected
+//   error tolerance ratio.
+// - The availability buffer passes the data of which slot in timestamps buffer is an initialized
+//   timestamp.
+// - The params buffer passes the timestamp count, the offset in timestamps buffer and the
+//   timestamp period (here use GPU frequency (HZ) on Intel D3D12 to calculate the period in
+//   ns for testing).
+TEST_P(QueryInternalShaderTests, TimestampComputeShader) {
+    // TODO(crbug.com/dawn/741): Test output is wrong with D3D12 + WARP.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+    constexpr std::array<float, 5> kPeriodsToTest = {
+        1,
+        7,
+        // A gpu frequency on Intel D3D12 (ticks/second)
+        83.333,
+        1042,
+        65535,
+    };
+
+    for (float period : kPeriodsToTest) {
+        // Convert timestamps in timestamps buffer with offset 0
+        // Test for ResolveQuerySet(querySet, 0, kQueryCount, timestampsBuffer, 0)
+        RunTest(0, kQueryCount, 0, period);
+
+        // Convert timestamps in timestamps buffer with offset 256
+        // Test for ResolveQuerySet(querySet, 1, kQueryCount - 1, timestampsBuffer, 256)
+        RunTest(1, kQueryCount - 1, 256, period);
+
+        // Convert partial timestamps in timestamps buffer with offset 256
+        // Test for ResolveQuerySet(querySet, 1, 4, timestampsBuffer, 256)
+        RunTest(1, 4, 256, period);
+    }
+}
+
+DAWN_INSTANTIATE_TEST(QueryInternalShaderTests, D3D12Backend(), MetalBackend(), VulkanBackend());
diff --git a/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp b/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
new file mode 100644
index 0000000..1249973
--- /dev/null
+++ b/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
@@ -0,0 +1,124 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/DawnTest.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace {
+
+    class VulkanErrorInjectorTests : public DawnTest {
+      public:
+        void SetUp() override {
+            DawnTest::SetUp();
+            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+            mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
+        }
+
+      protected:
+        dawn::native::vulkan::Device* mDeviceVk;
+    };
+
+}  // anonymous namespace
+
+TEST_P(VulkanErrorInjectorTests, InjectErrorOnCreateBuffer) {
+    VkBufferCreateInfo createInfo = {};
+    createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.size = 16;
+    createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+
+    // Check that making a buffer works.
+    {
+        VkBuffer buffer = VK_NULL_HANDLE;
+        EXPECT_EQ(
+            mDeviceVk->fn.CreateBuffer(mDeviceVk->GetVkDevice(), &createInfo, nullptr, &buffer),
+            VK_SUCCESS);
+        mDeviceVk->fn.DestroyBuffer(mDeviceVk->GetVkDevice(), buffer, nullptr);
+    }
+
+    auto CreateTestBuffer = [&]() -> bool {
+        VkBuffer buffer = VK_NULL_HANDLE;
+        dawn::native::MaybeError err = CheckVkSuccess(
+            mDeviceVk->fn.CreateBuffer(mDeviceVk->GetVkDevice(), &createInfo, nullptr, &buffer),
+            "vkCreateBuffer");
+        if (err.IsError()) {
+            // The handle should never be written to, even for mock failures.
+            EXPECT_EQ(buffer, VK_NULL_HANDLE);
+            err.AcquireError();
+            return false;
+        }
+        EXPECT_NE(buffer, VK_NULL_HANDLE);
+
+        // We never use the buffer, only test mocking errors on creation. Cleanup now.
+        mDeviceVk->fn.DestroyBuffer(mDeviceVk->GetVkDevice(), buffer, nullptr);
+
+        return true;
+    };
+
+    // Check that making a buffer inside CheckVkSuccess works.
+    {
+        EXPECT_TRUE(CreateTestBuffer());
+
+        // The error injector call count should be empty
+        EXPECT_EQ(dawn::native::AcquireErrorInjectorCallCount(), 0u);
+    }
+
+    // Test error injection works.
+    dawn::native::EnableErrorInjector();
+    {
+        EXPECT_TRUE(CreateTestBuffer());
+        EXPECT_TRUE(CreateTestBuffer());
+
+        // The error injector call count should be two.
+        EXPECT_EQ(dawn::native::AcquireErrorInjectorCallCount(), 2u);
+
+        // Inject an error at index 0. The first should fail, the second succeed.
+        {
+            dawn::native::InjectErrorAt(0u);
+            EXPECT_FALSE(CreateTestBuffer());
+            EXPECT_TRUE(CreateTestBuffer());
+
+            dawn::native::ClearErrorInjector();
+        }
+
+        // Inject an error at index 1. The second should fail, the first succeed.
+        {
+            dawn::native::InjectErrorAt(1u);
+            EXPECT_TRUE(CreateTestBuffer());
+            EXPECT_FALSE(CreateTestBuffer());
+
+            dawn::native::ClearErrorInjector();
+        }
+
+        // Inject an error and then clear the injector. Calls should be successful.
+        {
+            dawn::native::InjectErrorAt(0u);
+            dawn::native::DisableErrorInjector();
+
+            EXPECT_TRUE(CreateTestBuffer());
+            EXPECT_TRUE(CreateTestBuffer());
+
+            dawn::native::ClearErrorInjector();
+        }
+    }
+}
+
+DAWN_INSTANTIATE_TEST(VulkanErrorInjectorTests, VulkanBackend());
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
new file mode 100644
index 0000000..7a087e3
--- /dev/null
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
@@ -0,0 +1,887 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/white_box/VulkanImageWrappingTests.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/tests/DawnTest.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace dawn::native { namespace vulkan {
+
+    using ExternalTexture = VulkanImageWrappingTestBackend::ExternalTexture;
+    using ExternalSemaphore = VulkanImageWrappingTestBackend::ExternalSemaphore;
+
+    namespace {
+
+        class VulkanImageWrappingTestBase : public DawnTest {
+          protected:
+            std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+                return {wgpu::FeatureName::DawnInternalUsages};
+            }
+
+          public:
+            void SetUp() override {
+                DawnTest::SetUp();
+                DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+
+                mBackend = VulkanImageWrappingTestBackend::Create(device);
+
+                defaultDescriptor.dimension = wgpu::TextureDimension::e2D;
+                defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+                defaultDescriptor.size = {1, 1, 1};
+                defaultDescriptor.sampleCount = 1;
+                defaultDescriptor.mipLevelCount = 1;
+                defaultDescriptor.usage = wgpu::TextureUsage::RenderAttachment |
+                                          wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+
+                defaultTexture = mBackend->CreateTexture(1, 1, defaultDescriptor.format,
+                                                         defaultDescriptor.usage);
+            }
+
+            void TearDown() override {
+                if (UsesWire()) {
+                    DawnTest::TearDown();
+                    return;
+                }
+
+                defaultTexture = nullptr;
+                mBackend = nullptr;
+                DawnTest::TearDown();
+            }
+
+            wgpu::Texture WrapVulkanImage(
+                wgpu::Device dawnDevice,
+                const wgpu::TextureDescriptor* textureDescriptor,
+                const ExternalTexture* externalTexture,
+                std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
+                bool isInitialized = true,
+                bool expectValid = true) {
+                ExternalImageDescriptorVkForTesting descriptor;
+                return WrapVulkanImage(dawnDevice, textureDescriptor, externalTexture,
+                                       std::move(semaphores), descriptor.releasedOldLayout,
+                                       descriptor.releasedNewLayout, isInitialized, expectValid);
+            }
+
+            wgpu::Texture WrapVulkanImage(
+                wgpu::Device dawnDevice,
+                const wgpu::TextureDescriptor* textureDescriptor,
+                const ExternalTexture* externalTexture,
+                std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
+                VkImageLayout releasedOldLayout,
+                VkImageLayout releasedNewLayout,
+                bool isInitialized = true,
+                bool expectValid = true) {
+                ExternalImageDescriptorVkForTesting descriptor;
+                descriptor.cTextureDescriptor =
+                    reinterpret_cast<const WGPUTextureDescriptor*>(textureDescriptor);
+                descriptor.isInitialized = isInitialized;
+                descriptor.releasedOldLayout = releasedOldLayout;
+                descriptor.releasedNewLayout = releasedNewLayout;
+
+                wgpu::Texture texture = mBackend->WrapImage(dawnDevice, externalTexture, descriptor,
+                                                            std::move(semaphores));
+
+                if (expectValid) {
+                    EXPECT_NE(texture, nullptr) << "Failed to wrap image, are external memory / "
+                                                   "semaphore extensions supported?";
+                } else {
+                    EXPECT_EQ(texture, nullptr);
+                }
+
+                return texture;
+            }
+
+            // Exports the signal from a wrapped texture and ignores it
+            // We have to export the signal before destroying the wrapped texture else it's an
+            // assertion failure
+            void IgnoreSignalSemaphore(wgpu::Texture wrappedTexture) {
+                ExternalImageExportInfoVkForTesting exportInfo;
+                bool result =
+                    mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo);
+                ASSERT(result);
+            }
+
+          protected:
+            std::unique_ptr<VulkanImageWrappingTestBackend> mBackend;
+
+            wgpu::TextureDescriptor defaultDescriptor;
+            std::unique_ptr<ExternalTexture> defaultTexture;
+        };
+
+    }  // anonymous namespace
+
+    using VulkanImageWrappingValidationTests = VulkanImageWrappingTestBase;
+
+    // Test no error occurs if the import is valid
+    TEST_P(VulkanImageWrappingValidationTests, SuccessfulImport) {
+        wgpu::Texture texture =
+            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+        EXPECT_NE(texture.Get(), nullptr);
+        IgnoreSignalSemaphore(texture);
+    }
+
+    // Test no error occurs if the import is valid with DawnTextureInternalUsageDescriptor
+    TEST_P(VulkanImageWrappingValidationTests, SuccessfulImportWithInternalUsageDescriptor) {
+        wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+        defaultDescriptor.nextInChain = &internalDesc;
+        internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+        internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+
+        wgpu::Texture texture =
+            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+        EXPECT_NE(texture.Get(), nullptr);
+        IgnoreSignalSemaphore(texture);
+    }
+
+    // Test an error occurs if an invalid sType is the nextInChain
+    TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDescriptor) {
+        wgpu::ChainedStruct chainedDescriptor;
+        chainedDescriptor.sType = wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel;
+        defaultDescriptor.nextInChain = &chainedDescriptor;
+
+        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+        EXPECT_EQ(texture.Get(), nullptr);
+    }
+
+    // Test an error occurs if the descriptor dimension isn't 2D
+    TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDimension) {
+        defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
+
+        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+        EXPECT_EQ(texture.Get(), nullptr);
+    }
+
+    // Test an error occurs if the descriptor mip level count isn't 1
+    TEST_P(VulkanImageWrappingValidationTests, InvalidMipLevelCount) {
+        defaultDescriptor.mipLevelCount = 2;
+
+        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+        EXPECT_EQ(texture.Get(), nullptr);
+    }
+
+    // Test an error occurs if the descriptor depth isn't 1
+    TEST_P(VulkanImageWrappingValidationTests, InvalidDepth) {
+        defaultDescriptor.size.depthOrArrayLayers = 2;
+
+        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+        EXPECT_EQ(texture.Get(), nullptr);
+    }
+
+    // Test an error occurs if the descriptor sample count isn't 1
+    TEST_P(VulkanImageWrappingValidationTests, InvalidSampleCount) {
+        defaultDescriptor.sampleCount = 4;
+
+        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+        EXPECT_EQ(texture.Get(), nullptr);
+    }
+
+    // Test an error occurs if we try to export the signal semaphore twice
+    TEST_P(VulkanImageWrappingValidationTests, DoubleSignalSemaphoreExport) {
+        wgpu::Texture texture =
+            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+        ASSERT_NE(texture.Get(), nullptr);
+        IgnoreSignalSemaphore(texture);
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_DEVICE_ERROR(
+            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+        ASSERT_FALSE(success);
+        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+    }
+
+    // Test an error occurs if we try to export the signal semaphore from a normal texture
+    TEST_P(VulkanImageWrappingValidationTests, NormalTextureSignalSemaphoreExport) {
+        wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
+        ASSERT_NE(texture.Get(), nullptr);
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_DEVICE_ERROR(
+            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+        ASSERT_FALSE(success);
+        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+    }
+
+    // Test an error occurs if we try to export the signal semaphore from a destroyed texture
+    TEST_P(VulkanImageWrappingValidationTests, DestroyedTextureSignalSemaphoreExport) {
+        wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
+        ASSERT_NE(texture.Get(), nullptr);
+        texture.Destroy();
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_DEVICE_ERROR(
+            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+        ASSERT_FALSE(success);
+        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+    }
+
+    // Fixture to test using external memory textures through different usages.
+    // These tests are skipped if the harness is using the wire.
+    class VulkanImageWrappingUsageTests : public VulkanImageWrappingTestBase {
+      public:
+        void SetUp() override {
+            VulkanImageWrappingTestBase::SetUp();
+            if (UsesWire()) {
+                return;
+            }
+
+            // Create another device based on the original
+            backendAdapter =
+                dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get())->GetAdapter());
+            deviceDescriptor.nextInChain = &togglesDesc;
+            togglesDesc.forceEnabledToggles = GetParam().forceEnabledWorkarounds.data();
+            togglesDesc.forceEnabledTogglesCount = GetParam().forceEnabledWorkarounds.size();
+            togglesDesc.forceDisabledToggles = GetParam().forceDisabledWorkarounds.data();
+            togglesDesc.forceDisabledTogglesCount = GetParam().forceDisabledWorkarounds.size();
+
+            secondDeviceVk =
+                dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
+            secondDevice = wgpu::Device::Acquire(dawn::native::ToAPI(secondDeviceVk));
+        }
+
+      protected:
+        dawn::native::vulkan::Adapter* backendAdapter;
+        dawn::native::DeviceDescriptor deviceDescriptor;
+        dawn::native::DawnTogglesDeviceDescriptor togglesDesc;
+
+        wgpu::Device secondDevice;
+        dawn::native::vulkan::Device* secondDeviceVk;
+
+        // Clear a texture on a given device
+        void ClearImage(wgpu::Device dawnDevice,
+                        wgpu::Texture wrappedTexture,
+                        wgpu::Color clearColor) {
+            wgpu::TextureView wrappedView = wrappedTexture.CreateView();
+
+            // Submit a clear operation
+            utils::ComboRenderPassDescriptor renderPassDescriptor({wrappedView}, {});
+            renderPassDescriptor.cColorAttachments[0].clearValue = clearColor;
+            renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+
+            wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+            pass.End();
+
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            wgpu::Queue queue = dawnDevice.GetQueue();
+            queue.Submit(1, &commands);
+        }
+
+        // Submits a 1x1x1 copy from source to destination
+        void SimpleCopyTextureToTexture(wgpu::Device dawnDevice,
+                                        wgpu::Queue dawnQueue,
+                                        wgpu::Texture source,
+                                        wgpu::Texture destination) {
+            wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(source, 0, {0, 0, 0});
+            wgpu::ImageCopyTexture copyDst =
+                utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
+
+            wgpu::Extent3D copySize = {1, 1, 1};
+
+            wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
+            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+            wgpu::CommandBuffer commands = encoder.Finish();
+
+            dawnQueue.Submit(1, &commands);
+        }
+    };
+
+    // Clear an image in |secondDevice|
+    // Verify clear color is visible in |device|
+    TEST_P(VulkanImageWrappingUsageTests, ClearImageAcrossDevices) {
+        // Import the image on |secondDevice|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        // Clear |wrappedTexture| on |secondDevice|
+        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image to |device|, making sure we wait on signalFd
+        wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+        // Verify |device| sees the changes from |secondDevice|
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+        IgnoreSignalSemaphore(nextWrappedTexture);
+    }
+
+    // Clear an image in |secondDevice|
+    // Verify clear color is not visible in |device| if we import the texture as not cleared
+    TEST_P(VulkanImageWrappingUsageTests, UninitializedTextureIsCleared) {
+        // Import the image on |secondDevice|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        // Clear |wrappedTexture| on |secondDevice|
+        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image to |device|, making sure we wait on signalFd
+        wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout, false);
+
+        // Verify |device| doesn't see the changes from |secondDevice|
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), nextWrappedTexture, 0, 0);
+
+        IgnoreSignalSemaphore(nextWrappedTexture);
+    }
+
+    // Import a texture into |secondDevice|
+    // Clear the texture on |secondDevice|
+    // Issue a copy of the imported texture inside |device| to |copyDstTexture|
+    // Verify the clear color from |secondDevice| is visible in |copyDstTexture|
+    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureSrcSync) {
+        // Import the image on |secondDevice|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        // Clear |wrappedTexture| on |secondDevice|
+        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image to |device|, making sure we wait on |signalFd|
+        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+        // Create a second texture on |device|
+        wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+        // Copy |deviceWrappedTexture| into |copyDstTexture|
+        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+
+        // Verify |copyDstTexture| sees changes from |secondDevice|
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+
+        IgnoreSignalSemaphore(deviceWrappedTexture);
+    }
+
+    // Import a texture into |device|
+    // Clear texture with color A on |device|
+    // Import same texture into |secondDevice|, waiting on the copy signal
+    // Clear the new texture with color B on |secondDevice|
+    // Copy color B using Texture to Texture copy on |secondDevice|
+    // Import texture back into |device|, waiting on color B signal
+    // Verify texture contains color B
+    // If texture destination isn't synchronized, |secondDevice| could copy color B
+    // into the texture first, then |device| writes color A
+    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureDstSync) {
+        // Import the image on |device|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        // Clear |wrappedTexture| on |device|
+        ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image to |secondDevice|, making sure we wait on |signalFd|
+        wgpu::Texture secondDeviceWrappedTexture =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(),
+                            std::move(exportInfo.semaphores), exportInfo.releasedOldLayout,
+                            exportInfo.releasedNewLayout);
+
+        // Create a texture with color B on |secondDevice|
+        wgpu::Texture copySrcTexture = secondDevice.CreateTexture(&defaultDescriptor);
+        ClearImage(secondDevice, copySrcTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+        // Copy color B on |secondDevice|
+        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+        SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, copySrcTexture,
+                                   secondDeviceWrappedTexture);
+
+        // Re-import back into |device|, waiting on |secondDevice|'s signal
+        ExternalImageExportInfoVkForTesting secondExportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
+                                          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+
+        wgpu::Texture nextWrappedTexture =
+            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(),
+                            std::move(secondExportInfo.semaphores),
+                            secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+
+        // Verify |nextWrappedTexture| contains the color from our copy
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+        IgnoreSignalSemaphore(nextWrappedTexture);
+    }
+
+    // Import a texture from |secondDevice|
+    // Clear the texture on |secondDevice|
+    // Issue a copy of the imported texture inside |device| to |copyDstBuffer|
+    // Verify the clear color from |secondDevice| is visible in |copyDstBuffer|
+    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToBufferSrcSync) {
+        // Import the image on |secondDevice|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        // Clear |wrappedTexture| on |secondDevice|
+        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image to |device|, making sure we wait on |signalFd|
+        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+        // Create a destination buffer on |device|
+        wgpu::BufferDescriptor bufferDesc;
+        bufferDesc.size = 4;
+        bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+        wgpu::Buffer copyDstBuffer = device.CreateBuffer(&bufferDesc);
+
+        // Copy |deviceWrappedTexture| into |copyDstBuffer|
+        wgpu::ImageCopyTexture copySrc =
+            utils::CreateImageCopyTexture(deviceWrappedTexture, 0, {0, 0, 0});
+        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(copyDstBuffer, 0, 256);
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Verify |copyDstBuffer| sees changes from |secondDevice|
+        uint32_t expected = 0x04030201;
+        EXPECT_BUFFER_U32_EQ(expected, copyDstBuffer, 0);
+
+        IgnoreSignalSemaphore(deviceWrappedTexture);
+    }
+
+    // Import a texture into |device|
+    // Clear texture with color A on |device|
+    // Import same texture into |secondDevice|, waiting on the copy signal
+    // Copy color B using Buffer to Texture copy on |secondDevice|
+    // Import texture back into |device|, waiting on color B signal
+    // Verify texture contains color B
+    // If texture destination isn't synchronized, |secondDevice| could copy color B
+    // into the texture first, then |device| writes color A
+    TEST_P(VulkanImageWrappingUsageTests, CopyBufferToTextureDstSync) {
+        // Import the image on |device|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        // Clear |wrappedTexture| on |device|
+        ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image to |secondDevice|, making sure we wait on |signalFd|
+        wgpu::Texture secondDeviceWrappedTexture =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(),
+                            std::move(exportInfo.semaphores), exportInfo.releasedOldLayout,
+                            exportInfo.releasedNewLayout);
+
+        // Copy color B on |secondDevice|
+        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+
+        // Create a buffer on |secondDevice|
+        wgpu::Buffer copySrcBuffer =
+            utils::CreateBufferFromData(secondDevice, wgpu::BufferUsage::CopySrc, {0x04030201});
+
+        // Copy |copySrcBuffer| into |secondDeviceWrappedTexture|
+        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(copySrcBuffer, 0, 256);
+        wgpu::ImageCopyTexture copyDst =
+            utils::CreateImageCopyTexture(secondDeviceWrappedTexture, 0, {0, 0, 0});
+
+        wgpu::Extent3D copySize = {1, 1, 1};
+
+        wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        secondDeviceQueue.Submit(1, &commands);
+
+        // Re-import back into |device|, waiting on |secondDevice|'s signal
+        ExternalImageExportInfoVkForTesting secondExportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
+                                          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+
+        wgpu::Texture nextWrappedTexture =
+            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(),
+                            std::move(secondExportInfo.semaphores),
+                            secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+
+        // Verify |nextWrappedTexture| contains the color from our copy
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+        IgnoreSignalSemaphore(nextWrappedTexture);
+    }
+
+    // Import a texture from |secondDevice|
+    // Clear the texture on |secondDevice|
+    // Issue a copy of the imported texture inside |device| to |copyDstTexture|
+    // Issue second copy to |secondCopyDstTexture|
+    // Verify the clear color from |secondDevice| is visible in both copies
+    TEST_P(VulkanImageWrappingUsageTests, DoubleTextureUsage) {
+        // Import the image on |secondDevice|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        // Clear |wrappedTexture| on |secondDevice|
+        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image to |device|, making sure we wait on |signalFd|
+        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+        // Create a second texture on |device|
+        wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+        // Create a third texture on |device|
+        wgpu::Texture secondCopyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+        // Copy |deviceWrappedTexture| into |copyDstTexture|
+        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+
+        // Copy |deviceWrappedTexture| into |secondCopyDstTexture|
+        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, secondCopyDstTexture);
+
+        // Verify |copyDstTexture| sees changes from |secondDevice|
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+
+        // Verify |secondCopyDstTexture| sees changes from |secondDevice|
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), secondCopyDstTexture, 0, 0);
+
+        IgnoreSignalSemaphore(deviceWrappedTexture);
+    }
+
+    // Tex A on device 3 (external export)
+    // Tex B on device 2 (external export)
+    // Tex C on device 1 (external export)
+    // Clear color for A on device 3
+    // Copy A->B on device 3
+    // Copy B->C on device 2 (wait on B from previous op)
+    // Copy C->D on device 1 (wait on C from previous op)
+    // Verify D has same color as A
+    TEST_P(VulkanImageWrappingUsageTests, ChainTextureCopy) {
+        // device 1 = |device|
+        // device 2 = |secondDevice|
+        // Create device 3
+        dawn::native::vulkan::Device* thirdDeviceVk =
+            dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
+        wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn::native::ToAPI(thirdDeviceVk));
+
+        // Make queue for device 2 and 3
+        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+        wgpu::Queue thirdDeviceQueue = thirdDevice.GetQueue();
+
+        // Create textures A, B, C
+        std::unique_ptr<ExternalTexture> textureA =
+            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+        std::unique_ptr<ExternalTexture> textureB =
+            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+        std::unique_ptr<ExternalTexture> textureC =
+            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+
+        // Import TexA, TexB on device 3
+        wgpu::Texture wrappedTexADevice3 =
+            WrapVulkanImage(thirdDevice, &defaultDescriptor, textureA.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+        wgpu::Texture wrappedTexBDevice3 =
+            WrapVulkanImage(thirdDevice, &defaultDescriptor, textureB.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+        // Clear TexA
+        ClearImage(thirdDevice, wrappedTexADevice3,
+                   {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+        // Copy A->B
+        SimpleCopyTextureToTexture(thirdDevice, thirdDeviceQueue, wrappedTexADevice3,
+                                   wrappedTexBDevice3);
+
+        ExternalImageExportInfoVkForTesting exportInfoTexBDevice3;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexBDevice3, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfoTexBDevice3));
+        IgnoreSignalSemaphore(wrappedTexADevice3);
+
+        // Import TexB, TexC on device 2
+        wgpu::Texture wrappedTexBDevice2 = WrapVulkanImage(
+            secondDevice, &defaultDescriptor, textureB.get(),
+            std::move(exportInfoTexBDevice3.semaphores), exportInfoTexBDevice3.releasedOldLayout,
+            exportInfoTexBDevice3.releasedNewLayout);
+
+        wgpu::Texture wrappedTexCDevice2 =
+            WrapVulkanImage(secondDevice, &defaultDescriptor, textureC.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+        // Copy B->C on device 2
+        SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, wrappedTexBDevice2,
+                                   wrappedTexCDevice2);
+
+        ExternalImageExportInfoVkForTesting exportInfoTexCDevice2;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexCDevice2, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfoTexCDevice2));
+        IgnoreSignalSemaphore(wrappedTexBDevice2);
+
+        // Import TexC on device 1
+        wgpu::Texture wrappedTexCDevice1 = WrapVulkanImage(
+            device, &defaultDescriptor, textureC.get(), std::move(exportInfoTexCDevice2.semaphores),
+            exportInfoTexCDevice2.releasedOldLayout, exportInfoTexCDevice2.releasedNewLayout);
+
+        // Create TexD on device 1
+        wgpu::Texture texD = device.CreateTexture(&defaultDescriptor);
+
+        // Copy C->D on device 1
+        SimpleCopyTextureToTexture(device, queue, wrappedTexCDevice1, texD);
+
+        // Verify D matches clear color
+        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), texD, 0, 0);
+
+        IgnoreSignalSemaphore(wrappedTexCDevice1);
+    }
+
+    // Tests a larger image is preserved when importing
+    TEST_P(VulkanImageWrappingUsageTests, LargerImage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = 640;
+        descriptor.size.height = 480;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+
+        // Fill memory with textures
+        std::vector<wgpu::Texture> textures;
+        for (int i = 0; i < 20; i++) {
+            textures.push_back(device.CreateTexture(&descriptor));
+        }
+
+        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+
+        // Make an image on |secondDevice|
+        std::unique_ptr<ExternalTexture> texture = mBackend->CreateTexture(
+            descriptor.size.width, descriptor.size.height, descriptor.format, descriptor.usage);
+
+        // Import the image on |secondDevice|
+        wgpu::Texture wrappedTexture =
+            WrapVulkanImage(secondDevice, &descriptor, texture.get(), {}, VK_IMAGE_LAYOUT_UNDEFINED,
+                            VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+        // Draw a non-trivial picture
+        uint32_t width = 640, height = 480, pixelSize = 4;
+        uint32_t bytesPerRow = Align(width * pixelSize, kTextureBytesPerRowAlignment);
+        std::vector<unsigned char> data(bytesPerRow * (height - 1) + width * pixelSize);
+
+        for (uint32_t row = 0; row < height; row++) {
+            for (uint32_t col = 0; col < width; col++) {
+                float normRow = static_cast<float>(row) / height;
+                float normCol = static_cast<float>(col) / width;
+                float dist = sqrt(normRow * normRow + normCol * normCol) * 3;
+                dist = dist - static_cast<int>(dist);
+                data[4 * (row * width + col)] = static_cast<unsigned char>(dist * 255);
+                data[4 * (row * width + col) + 1] = static_cast<unsigned char>(dist * 255);
+                data[4 * (row * width + col) + 2] = static_cast<unsigned char>(dist * 255);
+                data[4 * (row * width + col) + 3] = 255;
+            }
+        }
+
+        // Write the picture
+        {
+            wgpu::Buffer copySrcBuffer = utils::CreateBufferFromData(
+                secondDevice, data.data(), data.size(), wgpu::BufferUsage::CopySrc);
+            wgpu::ImageCopyBuffer copySrc =
+                utils::CreateImageCopyBuffer(copySrcBuffer, 0, bytesPerRow);
+            wgpu::ImageCopyTexture copyDst =
+                utils::CreateImageCopyTexture(wrappedTexture, 0, {0, 0, 0});
+            wgpu::Extent3D copySize = {width, height, 1};
+
+            wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
+            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+            wgpu::CommandBuffer commands = encoder.Finish();
+            secondDeviceQueue.Submit(1, &commands);
+        }
+        ExternalImageExportInfoVkForTesting exportInfo;
+        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                          &exportInfo));
+
+        // Import the image on |device|
+        wgpu::Texture nextWrappedTexture =
+            WrapVulkanImage(device, &descriptor, texture.get(), std::move(exportInfo.semaphores),
+                            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+        // Copy the image into a buffer for comparison
+        wgpu::BufferDescriptor copyDesc;
+        copyDesc.size = data.size();
+        copyDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer copyDstBuffer = device.CreateBuffer(&copyDesc);
+        {
+            wgpu::ImageCopyTexture copySrc =
+                utils::CreateImageCopyTexture(nextWrappedTexture, 0, {0, 0, 0});
+            wgpu::ImageCopyBuffer copyDst =
+                utils::CreateImageCopyBuffer(copyDstBuffer, 0, bytesPerRow);
+
+            wgpu::Extent3D copySize = {width, height, 1};
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+            wgpu::CommandBuffer commands = encoder.Finish();
+            queue.Submit(1, &commands);
+        }
+
+        // Check the image is not corrupted on |device|
+        EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(data.data()), copyDstBuffer, 0,
+                                   data.size() / 4);
+
+        IgnoreSignalSemaphore(nextWrappedTexture);
+    }
+
+    // Test that texture descriptor view formats are passed to the backend for wrapped external
+    // textures, and that contents may be reinterpreted as sRGB.
+    TEST_P(VulkanImageWrappingUsageTests, SRGBReinterpretation) {
+        wgpu::TextureViewDescriptor viewDesc = {};
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.size = {2, 2, 1};
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        textureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+        textureDesc.viewFormatCount = 1;
+        textureDesc.viewFormats = &viewDesc.format;
+
+        std::unique_ptr<ExternalTexture> backendTexture = mBackend->CreateTexture(
+            textureDesc.size.width, textureDesc.size.height, textureDesc.format, textureDesc.usage);
+
+        // Import the image on |device|
+        wgpu::Texture texture =
+            WrapVulkanImage(device, &textureDesc, backendTexture.get(), {},
+                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+        ASSERT_NE(texture.Get(), nullptr);
+
+        wgpu::ImageCopyTexture dst = {};
+        dst.texture = texture;
+        std::array<RGBA8, 4> rgbaTextureData = {
+            RGBA8(180, 0, 0, 255),
+            RGBA8(0, 84, 0, 127),
+            RGBA8(0, 0, 62, 100),
+            RGBA8(62, 180, 84, 90),
+        };
+
+        wgpu::TextureDataLayout dataLayout = {};
+        dataLayout.bytesPerRow = textureDesc.size.width * sizeof(RGBA8);
+
+        queue.WriteTexture(&dst, rgbaTextureData.data(), rgbaTextureData.size() * sizeof(RGBA8),
+                           &dataLayout, &textureDesc.size);
+
+        wgpu::TextureView textureView = texture.CreateView(&viewDesc);
+
+        utils::ComboRenderPipelineDescriptor pipelineDesc;
+        pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+            @stage(vertex)
+            fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+                var pos = array<vec2<f32>, 6>(
+                                            vec2<f32>(-1.0, -1.0),
+                                            vec2<f32>(-1.0,  1.0),
+                                            vec2<f32>( 1.0, -1.0),
+                                            vec2<f32>(-1.0,  1.0),
+                                            vec2<f32>( 1.0, -1.0),
+                                            vec2<f32>( 1.0,  1.0));
+                return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+            }
+        )");
+        pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+            @group(0) @binding(0) var texture : texture_2d<f32>;
+
+            @stage(fragment)
+            fn main(@builtin(position) coord: vec4<f32>) -> @location(0) vec4<f32> {
+                return textureLoad(texture, vec2<i32>(coord.xy), 0);
+            }
+        )");
+
+        utils::BasicRenderPass renderPass =
+            utils::CreateBasicRenderPass(device, textureDesc.size.width, textureDesc.size.height,
+                                         wgpu::TextureFormat::RGBA8Unorm);
+        pipelineDesc.cTargets[0].format = renderPass.colorFormat;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+            wgpu::BindGroup bindGroup =
+                utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, textureView}});
+
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+            pass.SetPipeline(pipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(6);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        EXPECT_PIXEL_RGBA8_BETWEEN(  //
+            RGBA8(116, 0, 0, 255),   //
+            RGBA8(117, 0, 0, 255), renderPass.color, 0, 0);
+        EXPECT_PIXEL_RGBA8_BETWEEN(  //
+            RGBA8(0, 23, 0, 127),    //
+            RGBA8(0, 24, 0, 127), renderPass.color, 1, 0);
+        EXPECT_PIXEL_RGBA8_BETWEEN(  //
+            RGBA8(0, 0, 12, 100),    //
+            RGBA8(0, 0, 13, 100), renderPass.color, 0, 1);
+        EXPECT_PIXEL_RGBA8_BETWEEN(  //
+            RGBA8(12, 116, 23, 90),  //
+            RGBA8(13, 117, 24, 90), renderPass.color, 1, 1);
+
+        IgnoreSignalSemaphore(texture);
+    }
+
+    DAWN_INSTANTIATE_TEST(VulkanImageWrappingValidationTests, VulkanBackend());
+    DAWN_INSTANTIATE_TEST(VulkanImageWrappingUsageTests, VulkanBackend());
+
+}}  // namespace dawn::native::vulkan
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests.h b/src/dawn/tests/white_box/VulkanImageWrappingTests.h
new file mode 100644
index 0000000..d4173a7
--- /dev/null
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests.h
@@ -0,0 +1,76 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TESTS_VULKANIMAGEWRAPPINGTESTS_H_
+#define TESTS_VULKANIMAGEWRAPPINGTESTS_H_
+
+// This must be above all other includes otherwise VulkanBackend.h includes vulkan.h before we had
+// time to wrap it with vulkan_platform.h
+#include "dawn/common/vulkan_platform.h"
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/webgpu_cpp.h"
+
+#include <memory>
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+    struct ExternalImageDescriptorVkForTesting;
+    struct ExternalImageExportInfoVkForTesting;
+
+    class VulkanImageWrappingTestBackend {
+      public:
+        static std::unique_ptr<VulkanImageWrappingTestBackend> Create(const wgpu::Device& device);
+        virtual ~VulkanImageWrappingTestBackend() = default;
+
+        class ExternalTexture : NonCopyable {
+          public:
+            virtual ~ExternalTexture() = default;
+        };
+        class ExternalSemaphore : NonCopyable {
+          public:
+            virtual ~ExternalSemaphore() = default;
+        };
+
+        virtual std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                               uint32_t height,
+                                                               wgpu::TextureFormat format,
+                                                               wgpu::TextureUsage usage) = 0;
+        virtual wgpu::Texture WrapImage(
+            const wgpu::Device& device,
+            const ExternalTexture* texture,
+            const ExternalImageDescriptorVkForTesting& descriptor,
+            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) = 0;
+
+        virtual bool ExportImage(const wgpu::Texture& texture,
+                                 VkImageLayout layout,
+                                 ExternalImageExportInfoVkForTesting* exportInfo) = 0;
+    };
+
+    struct ExternalImageDescriptorVkForTesting : public ExternalImageDescriptorVk {
+      public:
+        ExternalImageDescriptorVkForTesting();
+    };
+
+    struct ExternalImageExportInfoVkForTesting : public ExternalImageExportInfoVk {
+      public:
+        ExternalImageExportInfoVkForTesting();
+        std::vector<std::unique_ptr<VulkanImageWrappingTestBackend::ExternalSemaphore>> semaphores;
+    };
+
+}  // namespace dawn::native::vulkan
+
+#endif  // TESTS_VULKANIMAGEWRAPPINGTESTS_H_
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
new file mode 100644
index 0000000..6c54924
--- /dev/null
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
@@ -0,0 +1,189 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/white_box/VulkanImageWrappingTests.h"
+
+#include <fcntl.h>
+#include <gbm.h>
+#include <gtest/gtest.h>
+#include <unistd.h>
+
+namespace dawn::native::vulkan {
+
+    ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
+        : ExternalImageDescriptorVk(ExternalImageType::DmaBuf) {
+    }
+    ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
+        : ExternalImageExportInfoVk(ExternalImageType::DmaBuf) {
+    }
+
+    class ExternalSemaphoreDmaBuf : public VulkanImageWrappingTestBackend::ExternalSemaphore {
+      public:
+        ExternalSemaphoreDmaBuf(int handle) : mHandle(handle) {
+        }
+        ~ExternalSemaphoreDmaBuf() override {
+            if (mHandle != -1) {
+                close(mHandle);
+            }
+        }
+        int AcquireHandle() {
+            int handle = mHandle;
+            mHandle = -1;
+            return handle;
+        }
+
+      private:
+        int mHandle = -1;
+    };
+
+    class ExternalTextureDmaBuf : public VulkanImageWrappingTestBackend::ExternalTexture {
+      public:
+        ExternalTextureDmaBuf(gbm_bo* bo, int fd, uint32_t stride, uint64_t drmModifier)
+            : mGbmBo(bo), mFd(fd), stride(stride), drmModifier(drmModifier) {
+        }
+
+        ~ExternalTextureDmaBuf() override {
+            if (mFd != -1) {
+                close(mFd);
+            }
+            if (mGbmBo != nullptr) {
+                gbm_bo_destroy(mGbmBo);
+            }
+        }
+
+        int Dup() const {
+            return dup(mFd);
+        }
+
+      private:
+        gbm_bo* mGbmBo = nullptr;
+        int mFd = -1;
+
+      public:
+        const uint32_t stride;
+        const uint64_t drmModifier;
+    };
+
+    class VulkanImageWrappingTestBackendDmaBuf : public VulkanImageWrappingTestBackend {
+      public:
+        VulkanImageWrappingTestBackendDmaBuf(const wgpu::Device& device) {
+        }
+
+        ~VulkanImageWrappingTestBackendDmaBuf() {
+            if (mGbmDevice != nullptr) {
+                gbm_device_destroy(mGbmDevice);
+                mGbmDevice = nullptr;
+            }
+        }
+
+        std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                       uint32_t height,
+                                                       wgpu::TextureFormat format,
+                                                       wgpu::TextureUsage usage) override {
+            EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
+
+            gbm_bo* bo = CreateGbmBo(width, height, true);
+
+            return std::make_unique<ExternalTextureDmaBuf>(
+                bo, gbm_bo_get_fd(bo), gbm_bo_get_stride_for_plane(bo, 0), gbm_bo_get_modifier(bo));
+        }
+
+        wgpu::Texture WrapImage(
+            const wgpu::Device& device,
+            const ExternalTexture* texture,
+            const ExternalImageDescriptorVkForTesting& descriptor,
+            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
+            const ExternalTextureDmaBuf* textureDmaBuf =
+                static_cast<const ExternalTextureDmaBuf*>(texture);
+            std::vector<int> waitFDs;
+            for (auto& semaphore : semaphores) {
+                waitFDs.push_back(
+                    static_cast<ExternalSemaphoreDmaBuf*>(semaphore.get())->AcquireHandle());
+            }
+
+            ExternalImageDescriptorDmaBuf descriptorDmaBuf;
+            *static_cast<ExternalImageDescriptorVk*>(&descriptorDmaBuf) = descriptor;
+
+            descriptorDmaBuf.memoryFD = textureDmaBuf->Dup();
+            descriptorDmaBuf.waitFDs = std::move(waitFDs);
+
+            descriptorDmaBuf.stride = textureDmaBuf->stride;
+            descriptorDmaBuf.drmModifier = textureDmaBuf->drmModifier;
+
+            return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorDmaBuf);
+        }
+
+        bool ExportImage(const wgpu::Texture& texture,
+                         VkImageLayout layout,
+                         ExternalImageExportInfoVkForTesting* exportInfo) override {
+            ExternalImageExportInfoDmaBuf infoDmaBuf;
+            bool success = ExportVulkanImage(texture.Get(), layout, &infoDmaBuf);
+
+            *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoDmaBuf;
+            for (int fd : infoDmaBuf.semaphoreHandles) {
+                EXPECT_NE(fd, -1);
+                exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreDmaBuf>(fd));
+            }
+
+            return success;
+        }
+
+        void CreateGbmDevice() {
+            // Render nodes [1] are the primary interface for communicating with the GPU on
+            // devices that support DRM. The actual filename of the render node is
+            // implementation-specific, so we must scan through all possible filenames to find
+            // one that we can use [2].
+            //
+            // [1] https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#render-nodes
+            // [2]
+            // https://cs.chromium.org/chromium/src/ui/ozone/platform/wayland/gpu/drm_render_node_path_finder.cc
+            const uint32_t kRenderNodeStart = 128;
+            const uint32_t kRenderNodeEnd = kRenderNodeStart + 16;
+            const std::string kRenderNodeTemplate = "/dev/dri/renderD";
+
+            int renderNodeFd = -1;
+            for (uint32_t i = kRenderNodeStart; i < kRenderNodeEnd; i++) {
+                std::string renderNode = kRenderNodeTemplate + std::to_string(i);
+                renderNodeFd = open(renderNode.c_str(), O_RDWR);
+                if (renderNodeFd >= 0)
+                    break;
+            }
+            EXPECT_GE(renderNodeFd, 0) << "Failed to get file descriptor for render node";
+
+            gbm_device* gbmDevice = gbm_create_device(renderNodeFd);
+            EXPECT_NE(gbmDevice, nullptr) << "Failed to create GBM device";
+            mGbmDevice = gbmDevice;
+        }
+
+      private:
+        gbm_bo* CreateGbmBo(uint32_t width, uint32_t height, bool linear) {
+            uint32_t flags = GBM_BO_USE_RENDERING;
+            if (linear)
+                flags |= GBM_BO_USE_LINEAR;
+            gbm_bo* gbmBo = gbm_bo_create(mGbmDevice, width, height, GBM_FORMAT_XBGR8888, flags);
+            EXPECT_NE(gbmBo, nullptr) << "Failed to create GBM buffer object";
+            return gbmBo;
+        }
+
+        gbm_device* mGbmDevice = nullptr;
+    };
+
+    // static
+    std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
+        const wgpu::Device& device) {
+        auto backend = std::make_unique<VulkanImageWrappingTestBackendDmaBuf>(device);
+        backend->CreateGbmDevice();
+        return backend;
+    }
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
new file mode 100644
index 0000000..b41abdb
--- /dev/null
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
@@ -0,0 +1,277 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/white_box/VulkanImageWrappingTests.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+
+#include <gtest/gtest.h>
+#include <unistd.h>
+
+namespace dawn::native::vulkan {
+
+    ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
+        : ExternalImageDescriptorVk(ExternalImageType::OpaqueFD) {
+    }
+    ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
+        : ExternalImageExportInfoVk(ExternalImageType::OpaqueFD) {
+    }
+
+    class ExternalSemaphoreOpaqueFD : public VulkanImageWrappingTestBackend::ExternalSemaphore {
+      public:
+        ExternalSemaphoreOpaqueFD(int handle) : mHandle(handle) {
+        }
+        ~ExternalSemaphoreOpaqueFD() override {
+            if (mHandle != -1) {
+                close(mHandle);
+            }
+        }
+        int AcquireHandle() {
+            int handle = mHandle;
+            mHandle = -1;
+            return handle;
+        }
+
+      private:
+        int mHandle = -1;
+    };
+
+    class ExternalTextureOpaqueFD : public VulkanImageWrappingTestBackend::ExternalTexture {
+      public:
+        ExternalTextureOpaqueFD(dawn::native::vulkan::Device* device,
+                                int fd,
+                                VkDeviceMemory allocation,
+                                VkImage handle,
+                                VkDeviceSize allocationSize,
+                                uint32_t memoryTypeIndex)
+            : mDevice(device),
+              mFd(fd),
+              mAllocation(allocation),
+              mHandle(handle),
+              allocationSize(allocationSize),
+              memoryTypeIndex(memoryTypeIndex) {
+        }
+
+        ~ExternalTextureOpaqueFD() override {
+            if (mFd != -1) {
+                close(mFd);
+            }
+            if (mAllocation != VK_NULL_HANDLE) {
+                mDevice->GetFencedDeleter()->DeleteWhenUnused(mAllocation);
+            }
+            if (mHandle != VK_NULL_HANDLE) {
+                mDevice->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+            }
+        }
+
+        int Dup() const {
+            return dup(mFd);
+        }
+
+      private:
+        dawn::native::vulkan::Device* mDevice;
+        int mFd = -1;
+        VkDeviceMemory mAllocation = VK_NULL_HANDLE;
+        VkImage mHandle = VK_NULL_HANDLE;
+
+      public:
+        const VkDeviceSize allocationSize;
+        const uint32_t memoryTypeIndex;
+    };
+
+    class VulkanImageWrappingTestBackendOpaqueFD : public VulkanImageWrappingTestBackend {
+      public:
+        VulkanImageWrappingTestBackendOpaqueFD(const wgpu::Device& device) : mDevice(device) {
+            mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
+        }
+
+        std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                       uint32_t height,
+                                                       wgpu::TextureFormat format,
+                                                       wgpu::TextureUsage usage) override {
+            EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
+            VkFormat vulkanFormat = VK_FORMAT_R8G8B8A8_UNORM;
+
+            VkImage handle;
+            ::VkResult result = CreateImage(mDeviceVk, width, height, vulkanFormat, &handle);
+            EXPECT_EQ(result, VK_SUCCESS) << "Failed to create external image";
+
+            VkDeviceMemory allocation;
+            VkDeviceSize allocationSize;
+            uint32_t memoryTypeIndex;
+            ::VkResult resultBool =
+                AllocateMemory(mDeviceVk, handle, &allocation, &allocationSize, &memoryTypeIndex);
+            EXPECT_EQ(resultBool, VK_SUCCESS) << "Failed to allocate external memory";
+
+            result = BindMemory(mDeviceVk, handle, allocation);
+            EXPECT_EQ(result, VK_SUCCESS) << "Failed to bind image memory";
+
+            int fd = GetMemoryFd(mDeviceVk, allocation);
+
+            return std::make_unique<ExternalTextureOpaqueFD>(mDeviceVk, fd, allocation, handle,
+                                                             allocationSize, memoryTypeIndex);
+        }
+
+        wgpu::Texture WrapImage(
+            const wgpu::Device& device,
+            const ExternalTexture* texture,
+            const ExternalImageDescriptorVkForTesting& descriptor,
+            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
+            const ExternalTextureOpaqueFD* textureOpaqueFD =
+                static_cast<const ExternalTextureOpaqueFD*>(texture);
+            std::vector<int> waitFDs;
+            for (auto& semaphore : semaphores) {
+                waitFDs.push_back(
+                    static_cast<ExternalSemaphoreOpaqueFD*>(semaphore.get())->AcquireHandle());
+            }
+
+            ExternalImageDescriptorOpaqueFD descriptorOpaqueFD;
+            *static_cast<ExternalImageDescriptorVk*>(&descriptorOpaqueFD) = descriptor;
+            descriptorOpaqueFD.memoryFD = textureOpaqueFD->Dup();
+            descriptorOpaqueFD.allocationSize = textureOpaqueFD->allocationSize;
+            descriptorOpaqueFD.memoryTypeIndex = textureOpaqueFD->memoryTypeIndex;
+            descriptorOpaqueFD.waitFDs = std::move(waitFDs);
+
+            return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorOpaqueFD);
+        }
+
+        bool ExportImage(const wgpu::Texture& texture,
+                         VkImageLayout layout,
+                         ExternalImageExportInfoVkForTesting* exportInfo) override {
+            ExternalImageExportInfoOpaqueFD infoOpaqueFD;
+            bool success = ExportVulkanImage(texture.Get(), layout, &infoOpaqueFD);
+
+            *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoOpaqueFD;
+            for (int fd : infoOpaqueFD.semaphoreHandles) {
+                EXPECT_NE(fd, -1);
+                exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreOpaqueFD>(fd));
+            }
+
+            return success;
+        }
+
+      private:
+        // Creates a VkImage with external memory
+        ::VkResult CreateImage(dawn::native::vulkan::Device* deviceVk,
+                               uint32_t width,
+                               uint32_t height,
+                               VkFormat format,
+                               VkImage* image) {
+            VkExternalMemoryImageCreateInfoKHR externalInfo;
+            externalInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
+            externalInfo.pNext = nullptr;
+            externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+            auto usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+                         VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+            VkImageCreateInfo createInfo;
+            createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+            createInfo.pNext = &externalInfo;
+            createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+            createInfo.imageType = VK_IMAGE_TYPE_2D;
+            createInfo.format = format;
+            createInfo.extent = {width, height, 1};
+            createInfo.mipLevels = 1;
+            createInfo.arrayLayers = 1;
+            createInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+            createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+            createInfo.usage = usage;
+            createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+            createInfo.queueFamilyIndexCount = 0;
+            createInfo.pQueueFamilyIndices = nullptr;
+            createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+            return deviceVk->fn.CreateImage(deviceVk->GetVkDevice(), &createInfo, nullptr,
+                                            &**image);
+        }
+
+        // Allocates memory for an image
+        ::VkResult AllocateMemory(dawn::native::vulkan::Device* deviceVk,
+                                  VkImage handle,
+                                  VkDeviceMemory* allocation,
+                                  VkDeviceSize* allocationSize,
+                                  uint32_t* memoryTypeIndex) {
+            // Create the image memory and associate it with the container
+            VkMemoryRequirements requirements;
+            deviceVk->fn.GetImageMemoryRequirements(deviceVk->GetVkDevice(), handle, &requirements);
+
+            // Import memory from file descriptor
+            VkExportMemoryAllocateInfoKHR externalInfo;
+            externalInfo.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
+            externalInfo.pNext = nullptr;
+            externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+            int bestType = deviceVk->GetResourceMemoryAllocator()->FindBestTypeIndex(
+                requirements, MemoryKind::Opaque);
+            VkMemoryAllocateInfo allocateInfo;
+            allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+            allocateInfo.pNext = &externalInfo;
+            allocateInfo.allocationSize = requirements.size;
+            allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
+
+            *allocationSize = allocateInfo.allocationSize;
+            *memoryTypeIndex = allocateInfo.memoryTypeIndex;
+
+            return deviceVk->fn.AllocateMemory(deviceVk->GetVkDevice(), &allocateInfo, nullptr,
+                                               &**allocation);
+        }
+
+        // Binds memory to an image
+        ::VkResult BindMemory(dawn::native::vulkan::Device* deviceVk,
+                              VkImage handle,
+                              VkDeviceMemory memory) {
+            return deviceVk->fn.BindImageMemory(deviceVk->GetVkDevice(), handle, memory, 0);
+        }
+
+        // Extracts a file descriptor representing memory on a device
+        int GetMemoryFd(dawn::native::vulkan::Device* deviceVk, VkDeviceMemory memory) {
+            VkMemoryGetFdInfoKHR getFdInfo;
+            getFdInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
+            getFdInfo.pNext = nullptr;
+            getFdInfo.memory = memory;
+            getFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+            int memoryFd = -1;
+            deviceVk->fn.GetMemoryFdKHR(deviceVk->GetVkDevice(), &getFdInfo, &memoryFd);
+
+            EXPECT_GE(memoryFd, 0) << "Failed to get file descriptor for external memory";
+            return memoryFd;
+        }
+
+        // Prepares and exports memory for an image on a given device
+        void CreateBindExportImage(dawn::native::vulkan::Device* deviceVk,
+                                   uint32_t width,
+                                   uint32_t height,
+                                   VkFormat format,
+                                   VkImage* handle,
+                                   VkDeviceMemory* allocation,
+                                   VkDeviceSize* allocationSize,
+                                   uint32_t* memoryTypeIndex,
+                                   int* memoryFd) {
+        }
+
+        wgpu::Device mDevice;
+        dawn::native::vulkan::Device* mDeviceVk;
+    };
+
+    // static
+    std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
+        const wgpu::Device& device) {
+        return std::make_unique<VulkanImageWrappingTestBackendOpaqueFD>(device);
+    }
+
+}  // namespace dawn::native::vulkan
diff --git a/src/dawn/utils/BUILD.gn b/src/dawn/utils/BUILD.gn
new file mode 100644
index 0000000..e281b41
--- /dev/null
+++ b/src/dawn/utils/BUILD.gn
@@ -0,0 +1,193 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/scripts/dawn_features.gni")
+
+###############################################################################
+# GLFW wrapping target
+###############################################################################
+
+# GLFW does not support ChromeOS, Android or Fuchsia, so provide a small mock
+# library that can be linked into the Dawn tests on these platforms. Otherwise,
+# use the real library from third_party/.
+if (dawn_supports_glfw_for_windowing) {
+  group("glfw") {
+    public_deps = [ "${dawn_root}/third_party/gn/glfw" ]
+  }
+} else if (is_fuchsia) {
+  # The mock implementation of GLFW on Fuchsia
+  config("glfw_public_config") {
+    # Allow inclusion of <GLFW/glfw3.h>
+    include_dirs = [ "${dawn_glfw_dir}/include" ]
+
+    # The GLFW/glfw3.h header includes <GL/gl.h> by default, but the latter
+    # does not exist on Fuchsia. Defining GLFW_INCLUDE_NONE helps work around
+    # the issue, but it needs to be defined for any file that includes the
+    # header.
+    defines = [
+      "GLFW_INCLUDE_NONE",
+      "GLFW_INCLUDE_VULKAN",
+    ]
+  }
+
+  static_library("glfw") {
+    sources = [
+      # NOTE: The header below is required to pass "gn check".
+      "${dawn_glfw_dir}/include/GLFW/glfw3.h",
+      "Glfw3Fuchsia.cpp",
+    ]
+    public_configs = [ ":glfw_public_config" ]
+    deps = [ "${dawn_root}/src/dawn/common" ]
+  }
+} else {
+  # Just skip GLFW on other systems
+  group("glfw") {
+  }
+}
+
+###############################################################################
+# Utils for tests and samples
+###############################################################################
+
+static_library("utils") {
+  configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+  sources = [
+    "ComboRenderBundleEncoderDescriptor.cpp",
+    "ComboRenderBundleEncoderDescriptor.h",
+    "ComboRenderPipelineDescriptor.cpp",
+    "ComboRenderPipelineDescriptor.h",
+    "PlatformDebugLogger.h",
+    "ScopedAutoreleasePool.h",
+    "SystemUtils.cpp",
+    "SystemUtils.h",
+    "TerribleCommandBuffer.cpp",
+    "TerribleCommandBuffer.h",
+    "TestUtils.cpp",
+    "TestUtils.h",
+    "TextureUtils.cpp",
+    "TextureUtils.h",
+    "Timer.h",
+    "WGPUHelpers.cpp",
+    "WGPUHelpers.h",
+    "WireHelper.cpp",
+    "WireHelper.h",
+  ]
+  deps = [
+    "${dawn_root}/src/dawn:proc",
+    "${dawn_root}/src/dawn/common",
+    "${dawn_root}/src/dawn/native:headers",
+    "${dawn_root}/src/dawn/wire",
+    "${dawn_spirv_tools_dir}:spvtools_opt",
+  ]
+  libs = []
+  frameworks = []
+
+  if (is_win && !dawn_is_winuwp) {
+    sources += [ "WindowsDebugLogger.cpp" ]
+  } else {
+    sources += [ "EmptyDebugLogger.cpp" ]
+  }
+
+  if (is_win) {
+    sources += [ "WindowsTimer.cpp" ]
+  } else if (is_mac) {
+    sources += [
+      "OSXTimer.cpp",
+      "ObjCUtils.h",
+      "ObjCUtils.mm",
+    ]
+    frameworks += [ "QuartzCore.framework" ]
+  } else {
+    sources += [ "PosixTimer.cpp" ]
+  }
+
+  if (is_mac) {
+    sources += [ "ScopedAutoreleasePool.mm" ]
+  } else {
+    sources += [ "ScopedAutoreleasePool.cpp" ]
+  }
+
+  if (dawn_supports_glfw_for_windowing) {
+    sources += [
+      "GLFWUtils.cpp",
+      "GLFWUtils.h",
+    ]
+    deps += [ ":glfw" ]
+
+    if (dawn_enable_metal) {
+      sources += [ "GLFWUtils_metal.mm" ]
+      frameworks += [ "Metal.framework" ]
+    }
+  }
+
+  public_deps = [ "${dawn_root}/include/dawn:cpp_headers" ]
+}
+
+###############################################################################
+# Dawn samples, only in standalone builds
+###############################################################################
+
+if (dawn_standalone) {
+  # Library to handle the interaction of Dawn with GLFW windows in samples
+  static_library("bindings") {
+    configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+    sources = [
+      "BackendBinding.cpp",
+      "BackendBinding.h",
+    ]
+
+    public_deps = [ "${dawn_root}/include/dawn:headers" ]
+
+    deps = [
+      ":glfw",
+      "${dawn_root}/src/dawn/common",
+      "${dawn_root}/src/dawn/native",
+    ]
+    libs = []
+    frameworks = []
+
+    if (dawn_enable_d3d12) {
+      sources += [ "D3D12Binding.cpp" ]
+    }
+
+    if (dawn_enable_metal) {
+      sources += [ "MetalBinding.mm" ]
+      frameworks += [
+        "Metal.framework",
+        "QuartzCore.framework",
+      ]
+
+      # Suppress warnings that Metal isn't in the deployment target of Chrome
+      if (is_mac) {
+        cflags_objcc = [ "-Wno-unguarded-availability" ]
+      }
+    }
+
+    if (dawn_enable_null) {
+      sources += [ "NullBinding.cpp" ]
+    }
+
+    if (dawn_enable_opengl) {
+      sources += [ "OpenGLBinding.cpp" ]
+    }
+
+    if (dawn_enable_vulkan) {
+      sources += [ "VulkanBinding.cpp" ]
+    }
+  }
+}
diff --git a/src/dawn/utils/BackendBinding.cpp b/src/dawn/utils/BackendBinding.cpp
new file mode 100644
index 0000000..f97e6b9
--- /dev/null
+++ b/src/dawn/utils/BackendBinding.cpp
@@ -0,0 +1,109 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Compiler.h"
+
+#include "GLFW/glfw3.h"
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+#    include "dawn/native/OpenGLBackend.h"
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGL)
+
+namespace utils {
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+    BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+    BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+    BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+    BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+    BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+
+    BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
+        : mWindow(window), mDevice(device) {
+    }
+
+    void DiscoverAdapter(dawn::native::Instance* instance,
+                         GLFWwindow* window,
+                         wgpu::BackendType type) {
+        DAWN_UNUSED(type);
+        DAWN_UNUSED(window);
+
+        if (type == wgpu::BackendType::OpenGL || type == wgpu::BackendType::OpenGLES) {
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+            glfwMakeContextCurrent(window);
+            auto getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+            if (type == wgpu::BackendType::OpenGL) {
+                dawn::native::opengl::AdapterDiscoveryOptions adapterOptions;
+                adapterOptions.getProc = getProc;
+                instance->DiscoverAdapters(&adapterOptions);
+            } else {
+                dawn::native::opengl::AdapterDiscoveryOptionsES adapterOptions;
+                adapterOptions.getProc = getProc;
+                instance->DiscoverAdapters(&adapterOptions);
+            }
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGL)
+        } else {
+            instance->DiscoverDefaultAdapters();
+        }
+    }
+
+    BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
+        switch (type) {
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+            case wgpu::BackendType::D3D12:
+                return CreateD3D12Binding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+            case wgpu::BackendType::Metal:
+                return CreateMetalBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+            case wgpu::BackendType::Null:
+                return CreateNullBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+            case wgpu::BackendType::OpenGL:
+                return CreateOpenGLBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+            case wgpu::BackendType::OpenGLES:
+                return CreateOpenGLBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+            case wgpu::BackendType::Vulkan:
+                return CreateVulkanBinding(window, device);
+#endif
+
+            default:
+                return nullptr;
+        }
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/BackendBinding.h b/src/dawn/utils/BackendBinding.h
new file mode 100644
index 0000000..352d294
--- /dev/null
+++ b/src/dawn/utils/BackendBinding.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_BACKENDBINDING_H_
+#define UTILS_BACKENDBINDING_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+
+struct GLFWwindow;
+
+namespace utils {
+
+    class BackendBinding {
+      public:
+        virtual ~BackendBinding() = default;
+
+        virtual uint64_t GetSwapChainImplementation() = 0;
+        virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
+
+      protected:
+        BackendBinding(GLFWwindow* window, WGPUDevice device);
+
+        GLFWwindow* mWindow = nullptr;
+        WGPUDevice mDevice = nullptr;
+    };
+
+    void DiscoverAdapter(dawn::native::Instance* instance,
+                         GLFWwindow* window,
+                         wgpu::BackendType type);
+    BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
+
+}  // namespace utils
+
+#endif  // UTILS_BACKENDBINDING_H_
diff --git a/src/dawn/utils/CMakeLists.txt b/src/dawn/utils/CMakeLists.txt
new file mode 100644
index 0000000..6e4d6f7
--- /dev/null
+++ b/src/dawn/utils/CMakeLists.txt
@@ -0,0 +1,102 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_utils STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_utils PRIVATE
+    "ComboRenderBundleEncoderDescriptor.cpp"
+    "ComboRenderBundleEncoderDescriptor.h"
+    "ComboRenderPipelineDescriptor.cpp"
+    "ComboRenderPipelineDescriptor.h"
+    "PlatformDebugLogger.h"
+    "ScopedAutoreleasePool.cpp"
+    "ScopedAutoreleasePool.h"
+    "SystemUtils.cpp"
+    "SystemUtils.h"
+    "TerribleCommandBuffer.cpp"
+    "TerribleCommandBuffer.h"
+    "TestUtils.cpp"
+    "TestUtils.h"
+    "TextureUtils.cpp"
+    "TextureUtils.h"
+    "Timer.h"
+    "WGPUHelpers.cpp"
+    "WGPUHelpers.h"
+    "WireHelper.cpp"
+    "WireHelper.h"
+)
+target_link_libraries(dawn_utils
+    PUBLIC dawncpp_headers
+    PRIVATE dawn_internal_config
+            dawn_common
+            dawn_native
+            dawn_proc
+            dawn_wire
+            SPIRV-Tools-opt
+)
+
+if(WIN32 AND NOT WINDOWS_STORE)
+    target_sources(dawn_utils PRIVATE "WindowsDebugLogger.cpp")
+else()
+    target_sources(dawn_utils PRIVATE "EmptyDebugLogger.cpp")
+endif()
+
+if(WIN32)
+    target_sources(dawn_utils PRIVATE "WindowsTimer.cpp")
+elseif(APPLE)
+    target_sources(dawn_utils PRIVATE
+        "OSXTimer.cpp"
+        "ObjCUtils.h"
+        "ObjCUtils.mm"
+    )
+    target_link_libraries(dawn_utils PRIVATE "-framework QuartzCore")
+elseif(UNIX)
+    target_sources(dawn_utils PRIVATE "PosixTimer.cpp")
+endif()
+
+if (DAWN_ENABLE_METAL)
+    target_link_libraries(dawn_utils PRIVATE "-framework Metal")
+endif()
+
+if(DAWN_SUPPORTS_GLFW_FOR_WINDOWING)
+    target_sources(dawn_utils PRIVATE
+        "BackendBinding.cpp"
+        "BackendBinding.h"
+        "GLFWUtils.cpp"
+        "GLFWUtils.h"
+    )
+    target_link_libraries(dawn_utils PRIVATE glfw)
+
+    if (DAWN_ENABLE_D3D12)
+        target_sources(dawn_utils PRIVATE "D3D12Binding.cpp")
+    endif()
+
+    if (DAWN_ENABLE_METAL)
+        target_sources(dawn_utils PRIVATE
+            "GLFWUtils_metal.mm"
+            "MetalBinding.mm"
+        )
+    endif()
+
+    if (DAWN_ENABLE_NULL)
+        target_sources(dawn_utils PRIVATE "NullBinding.cpp")
+    endif()
+
+    if (DAWN_ENABLE_OPENGL)
+        target_sources(dawn_utils PRIVATE "OpenGLBinding.cpp")
+    endif()
+
+    if (DAWN_ENABLE_VULKAN)
+        target_sources(dawn_utils PRIVATE "VulkanBinding.cpp")
+    endif()
+endif()
diff --git a/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
new file mode 100644
index 0000000..9c413d2
--- /dev/null
+++ b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
@@ -0,0 +1,28 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace utils {
+
+    ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
+        wgpu::RenderBundleEncoderDescriptor* descriptor = this;
+
+        descriptor->colorFormatsCount = 0;
+        descriptor->colorFormats = &cColorFormats[0];
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
new file mode 100644
index 0000000..c1ef12b
--- /dev/null
+++ b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
@@ -0,0 +1,35 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
+#define UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
+
+#include <dawn/webgpu_cpp.h>
+
+#include "dawn/common/Constants.h"
+
+#include <array>
+
+namespace utils {
+
+    class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
+      public:
+        ComboRenderBundleEncoderDescriptor();
+
+        std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
+    };
+
+}  // namespace utils
+
+#endif  // UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
diff --git a/src/dawn/utils/ComboRenderPipelineDescriptor.cpp b/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
new file mode 100644
index 0000000..1114af4
--- /dev/null
+++ b/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
@@ -0,0 +1,145 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace utils {
+
+    ComboVertexState::ComboVertexState() {
+        vertexBufferCount = 0;
+
+        // Fill the default values for vertexBuffers and vertexAttributes in buffers.
+        wgpu::VertexAttribute vertexAttribute;
+        vertexAttribute.shaderLocation = 0;
+        vertexAttribute.offset = 0;
+        vertexAttribute.format = wgpu::VertexFormat::Float32;
+        for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+            cAttributes[i] = vertexAttribute;
+        }
+        for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+            cVertexBuffers[i].arrayStride = 0;
+            cVertexBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+            cVertexBuffers[i].attributeCount = 0;
+            cVertexBuffers[i].attributes = nullptr;
+        }
+        // cVertexBuffers[i].attributes points to somewhere in cAttributes.
+        // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+        // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
+        // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
+        // cVertexBuffers[2].attributes should point to &cAttributes[5].
+        cVertexBuffers[0].attributes = &cAttributes[0];
+    }
+
+    ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor() {
+        wgpu::RenderPipelineDescriptor* descriptor = this;
+
+        // Set defaults for the vertex state.
+        {
+            wgpu::VertexState* vertex = &descriptor->vertex;
+            vertex->module = nullptr;
+            vertex->entryPoint = "main";
+            vertex->bufferCount = 0;
+
+            // Fill the default values for vertexBuffers and vertexAttributes in buffers.
+            for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+                cAttributes[i].shaderLocation = 0;
+                cAttributes[i].offset = 0;
+                cAttributes[i].format = wgpu::VertexFormat::Float32;
+            }
+            for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+                cBuffers[i].arrayStride = 0;
+                cBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+                cBuffers[i].attributeCount = 0;
+                cBuffers[i].attributes = nullptr;
+            }
+            // cBuffers[i].attributes points to somewhere in cAttributes.
+            // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+            // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
+            // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
+            // cBuffers[2].attributes should point to &cAttributes[5].
+            cBuffers[0].attributes = &cAttributes[0];
+            vertex->buffers = &cBuffers[0];
+        }
+
+        // Set the defaults for the primitive state
+        {
+            wgpu::PrimitiveState* primitive = &descriptor->primitive;
+            primitive->topology = wgpu::PrimitiveTopology::TriangleList;
+            primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
+            primitive->frontFace = wgpu::FrontFace::CCW;
+            primitive->cullMode = wgpu::CullMode::None;
+        }
+
+        // Set the defaults for the depth-stencil state
+        {
+            wgpu::StencilFaceState stencilFace;
+            stencilFace.compare = wgpu::CompareFunction::Always;
+            stencilFace.failOp = wgpu::StencilOperation::Keep;
+            stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+            stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+            cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            cDepthStencil.depthWriteEnabled = false;
+            cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+            cDepthStencil.stencilBack = stencilFace;
+            cDepthStencil.stencilFront = stencilFace;
+            cDepthStencil.stencilReadMask = 0xff;
+            cDepthStencil.stencilWriteMask = 0xff;
+            cDepthStencil.depthBias = 0;
+            cDepthStencil.depthBiasSlopeScale = 0.0;
+            cDepthStencil.depthBiasClamp = 0.0;
+        }
+
+        // Set the defaults for the multisample state
+        {
+            wgpu::MultisampleState* multisample = &descriptor->multisample;
+            multisample->count = 1;
+            multisample->mask = 0xFFFFFFFF;
+            multisample->alphaToCoverageEnabled = false;
+        }
+
+        // Set the defaults for the fragment state
+        {
+            cFragment.module = nullptr;
+            cFragment.entryPoint = "main";
+            cFragment.targetCount = 1;
+            cFragment.targets = &cTargets[0];
+            descriptor->fragment = &cFragment;
+
+            wgpu::BlendComponent blendComponent;
+            blendComponent.srcFactor = wgpu::BlendFactor::One;
+            blendComponent.dstFactor = wgpu::BlendFactor::Zero;
+            blendComponent.operation = wgpu::BlendOperation::Add;
+
+            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+                cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
+                cTargets[i].blend = nullptr;
+                cTargets[i].writeMask = wgpu::ColorWriteMask::All;
+
+                cBlends[i].color = blendComponent;
+                cBlends[i].alpha = blendComponent;
+            }
+        }
+    }
+
+    wgpu::DepthStencilState* ComboRenderPipelineDescriptor::EnableDepthStencil(
+        wgpu::TextureFormat format) {
+        this->depthStencil = &cDepthStencil;
+        cDepthStencil.format = format;
+        return &cDepthStencil;
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/ComboRenderPipelineDescriptor.h b/src/dawn/utils/ComboRenderPipelineDescriptor.h
new file mode 100644
index 0000000..1e4662f
--- /dev/null
+++ b/src/dawn/utils/ComboRenderPipelineDescriptor.h
@@ -0,0 +1,64 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
+#define UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
+
+#include <dawn/webgpu_cpp.h>
+
+#include "dawn/common/Constants.h"
+
+#include <array>
+
+namespace utils {
+
+    // Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
+    class ComboVertexState {
+      public:
+        ComboVertexState();
+
+        ComboVertexState(const ComboVertexState&) = delete;
+        ComboVertexState& operator=(const ComboVertexState&) = delete;
+        ComboVertexState(ComboVertexState&&) = delete;
+        ComboVertexState& operator=(ComboVertexState&&) = delete;
+
+        uint32_t vertexBufferCount;
+        std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
+        std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+    };
+
+    class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
+      public:
+        ComboRenderPipelineDescriptor();
+
+        ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
+        ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
+        ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
+        ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
+
+        wgpu::DepthStencilState* EnableDepthStencil(
+            wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
+
+        std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
+        std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+        std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
+        std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
+
+        wgpu::FragmentState cFragment;
+        wgpu::DepthStencilState cDepthStencil;
+    };
+
+}  // namespace utils
+
+#endif  // UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
diff --git a/src/dawn/utils/D3D12Binding.cpp b/src/dawn/utils/D3D12Binding.cpp
new file mode 100644
index 0000000..9ed65b2
--- /dev/null
+++ b/src/dawn/utils/D3D12Binding.cpp
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/D3D12Backend.h"
+
+#include "GLFW/glfw3.h"
+#define GLFW_EXPOSE_NATIVE_WIN32
+#include "GLFW/glfw3native.h"
+
+#include <memory>
+
+namespace utils {
+
+    class D3D12Binding : public BackendBinding {
+      public:
+        D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+        }
+
+        uint64_t GetSwapChainImplementation() override {
+            if (mSwapchainImpl.userData == nullptr) {
+                HWND win32Window = glfwGetWin32Window(mWindow);
+                mSwapchainImpl =
+                    dawn::native::d3d12::CreateNativeSwapChainImpl(mDevice, win32Window);
+            }
+            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+        }
+
+        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+            ASSERT(mSwapchainImpl.userData != nullptr);
+            return dawn::native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+        }
+
+      private:
+        DawnSwapChainImplementation mSwapchainImpl = {};
+    };
+
+    BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
+        return new D3D12Binding(window, device);
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/EmptyDebugLogger.cpp b/src/dawn/utils/EmptyDebugLogger.cpp
new file mode 100644
index 0000000..b52b38f
--- /dev/null
+++ b/src/dawn/utils/EmptyDebugLogger.cpp
@@ -0,0 +1,29 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/PlatformDebugLogger.h"
+
+namespace utils {
+
+    class EmptyDebugLogger : public PlatformDebugLogger {
+      public:
+        EmptyDebugLogger() = default;
+        ~EmptyDebugLogger() override = default;
+    };
+
+    PlatformDebugLogger* CreatePlatformDebugLogger() {
+        return new EmptyDebugLogger();
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/GLFWUtils.cpp b/src/dawn/utils/GLFWUtils.cpp
new file mode 100644
index 0000000..de77ccd
--- /dev/null
+++ b/src/dawn/utils/GLFWUtils.cpp
@@ -0,0 +1,88 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/GLFWUtils.h"
+
+#include "GLFW/glfw3.h"
+#include "dawn/common/Platform.h"
+
+#include <cstdlib>
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    define GLFW_EXPOSE_NATIVE_WIN32
+#elif defined(DAWN_USE_X11)
+#    define GLFW_EXPOSE_NATIVE_X11
+#endif
+#include "GLFW/glfw3native.h"
+
+namespace utils {
+
+    void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
+        if (type == wgpu::BackendType::OpenGL) {
+            // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
+            // texture views.
+            glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+            glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+            glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+            glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+        } else if (type == wgpu::BackendType::OpenGLES) {
+            glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+            glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+            glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+            glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+        } else {
+            // Without this GLFW will initialize a GL context on the window, which prevents using
+            // the window with other APIs (by crashing in weird ways).
+            glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+        }
+    }
+
+    wgpu::Surface CreateSurfaceForWindow(wgpu::Instance instance, GLFWwindow* window) {
+        std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+            SetupWindowAndGetSurfaceDescriptorForTesting(window);
+
+        wgpu::SurfaceDescriptor descriptor;
+        descriptor.nextInChain = chainedDescriptor.get();
+        wgpu::Surface surface = instance.CreateSurface(&descriptor);
+
+        return surface;
+    }
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+        GLFWwindow* window) {
+        std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
+            std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
+        desc->hwnd = glfwGetWin32Window(window);
+        desc->hinstance = GetModuleHandle(nullptr);
+        return std::move(desc);
+    }
+#elif defined(DAWN_USE_X11)
+    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+        GLFWwindow* window) {
+        std::unique_ptr<wgpu::SurfaceDescriptorFromXlibWindow> desc =
+            std::make_unique<wgpu::SurfaceDescriptorFromXlibWindow>();
+        desc->display = glfwGetX11Display();
+        desc->window = glfwGetX11Window(window);
+        return std::move(desc);
+    }
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+    // SetupWindowAndGetSurfaceDescriptorForTesting defined in GLFWUtils_metal.mm
+#else
+    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(GLFWwindow*) {
+        return nullptr;
+    }
+#endif
+
+}  // namespace utils
diff --git a/src/dawn/utils/GLFWUtils.h b/src/dawn/utils/GLFWUtils.h
new file mode 100644
index 0000000..f2299cb
--- /dev/null
+++ b/src/dawn/utils/GLFWUtils.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_GLFWUTILS_H_
+#define UTILS_GLFWUTILS_H_
+
+#include "dawn/webgpu_cpp.h"
+
+#include <memory>
+
+struct GLFWwindow;
+
+namespace utils {
+
+    // Adds all the necessary glfwWindowHint calls for the next GLFWwindow created to be used with
+    // the specified backend.
+    void SetupGLFWWindowHintsForBackend(wgpu::BackendType type);
+
+    // Does the necessary setup on the GLFWwindow to allow creating a wgpu::Surface with it and
+    // calls `instance.CreateSurface` with the correct descriptor for this window.
+    // Returns a null wgpu::Surface on failure.
+    wgpu::Surface CreateSurfaceForWindow(wgpu::Instance instance, GLFWwindow* window);
+
+    // Use for testing only. Does everything that CreateSurfaceForWindow does except the call to
+    // CreateSurface so the descriptor can be modified for testing.
+    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+        GLFWwindow* window);
+
+}  // namespace utils
+
+#endif  // UTILS_GLFWUTILS_H_
diff --git a/src/dawn/utils/GLFWUtils_metal.mm b/src/dawn/utils/GLFWUtils_metal.mm
new file mode 100644
index 0000000..b574002
--- /dev/null
+++ b/src/dawn/utils/GLFWUtils_metal.mm
@@ -0,0 +1,54 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#if !defined(DAWN_ENABLE_BACKEND_METAL)
+#    error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
+#endif  // !defined(DAWN_ENABLE_BACKEND_METAL)
+
+#include "dawn/utils/GLFWUtils.h"
+
+#import <QuartzCore/CAMetalLayer.h>
+#include "GLFW/glfw3.h"
+
+#include <cstdlib>
+
+#define GLFW_EXPOSE_NATIVE_COCOA
+#include "GLFW/glfw3native.h"
+
+namespace utils {
+
+    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+        GLFWwindow* window) {
+        if (@available(macOS 10.11, *)) {
+            NSWindow* nsWindow = glfwGetCocoaWindow(window);
+            NSView* view = [nsWindow contentView];
+
+            // Create a CAMetalLayer that covers the whole window that will be passed to
+            // CreateSurface.
+            [view setWantsLayer:YES];
+            [view setLayer:[CAMetalLayer layer]];
+
+            // Use retina if the window was created with retina support.
+            [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
+
+            std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
+                std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
+            desc->layer = [view layer];
+            return std::move(desc);
+        }
+
+        return nullptr;
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/Glfw3Fuchsia.cpp b/src/dawn/utils/Glfw3Fuchsia.cpp
new file mode 100644
index 0000000..4caa7ac
--- /dev/null
+++ b/src/dawn/utils/Glfw3Fuchsia.cpp
@@ -0,0 +1,100 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A mock GLFW implementation that supports Fuchsia, but only implements
+// the functions called from Dawn.
+
+// NOTE: This must be included before GLFW/glfw3.h because the latter will
+// include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be
+// the first header to do so for sanity reasons (e.g. undefining weird
+// macros on Windows and Linux).
+// clang-format off
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/common/Assert.h"
+#include <GLFW/glfw3.h>
+// clang-format on
+
+#include <dlfcn.h>
+
+int glfwInit(void) {
+    return GLFW_TRUE;
+}
+
+void glfwDefaultWindowHints(void) {
+}
+
+void glfwWindowHint(int hint, int value) {
+    DAWN_UNUSED(hint);
+    DAWN_UNUSED(value);
+}
+
+struct GLFWwindow {
+    PFN_vkGetInstanceProcAddr GetInstanceProcAddress = nullptr;
+    void* vulkan_loader = nullptr;
+
+    GLFWwindow() {
+        vulkan_loader = ::dlopen("libvulkan.so", RTLD_NOW);
+        ASSERT(vulkan_loader != nullptr);
+        GetInstanceProcAddress = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
+            dlsym(vulkan_loader, "vkGetInstanceProcAddr"));
+        ASSERT(GetInstanceProcAddress != nullptr);
+    }
+
+    ~GLFWwindow() {
+        if (vulkan_loader) {
+            ::dlclose(vulkan_loader);
+        }
+        vulkan_loader = nullptr;
+    }
+};
+
+GLFWwindow* glfwCreateWindow(int width,
+                             int height,
+                             const char* title,
+                             GLFWmonitor* monitor,
+                             GLFWwindow* share) {
+    ASSERT(monitor == nullptr);
+    ASSERT(share == nullptr);
+    DAWN_UNUSED(width);
+    DAWN_UNUSED(height);
+    DAWN_UNUSED(title);
+    return new GLFWwindow();
+}
+
+VkResult glfwCreateWindowSurface(VkInstance instance,
+                                 GLFWwindow* window,
+                                 const VkAllocationCallbacks* allocator,
+                                 VkSurfaceKHR* surface) {
+    // IMPORTANT: This assumes that the VkInstance was created with a Fuchsia
+    // swapchain layer enabled, as well as the corresponding extension that
+    // is queried here to perform the surface creation. Dawn should do all
+    // required steps in VulkanInfo.cpp, VulkanFunctions.cpp and BackendVk.cpp.
+
+    auto vkCreateImagePipeSurfaceFUCHSIA = reinterpret_cast<PFN_vkCreateImagePipeSurfaceFUCHSIA>(
+        window->GetInstanceProcAddress(instance, "vkCreateImagePipeSurfaceFUCHSIA"));
+    ASSERT(vkCreateImagePipeSurfaceFUCHSIA != nullptr);
+    if (!vkCreateImagePipeSurfaceFUCHSIA) {
+        *surface = VK_NULL_HANDLE;
+        return VK_ERROR_FEATURE_NOT_PRESENT;
+    }
+
+    const struct VkImagePipeSurfaceCreateInfoFUCHSIA create_info = {
+        VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA,
+        nullptr,            // pNext
+        0,                  // flags, ignored for now
+        ZX_HANDLE_INVALID,  // imagePipeHandle, a null handle matches the framebuffer.
+    };
+
+    return vkCreateImagePipeSurfaceFUCHSIA(instance, &create_info, nullptr, surface);
+}
diff --git a/src/dawn/utils/MetalBinding.mm b/src/dawn/utils/MetalBinding.mm
new file mode 100644
index 0000000..b35245c
--- /dev/null
+++ b/src/dawn/utils/MetalBinding.mm
@@ -0,0 +1,135 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/MetalBackend.h"
+
+#define GLFW_EXPOSE_NATIVE_COCOA
+#include "GLFW/glfw3.h"
+#include "GLFW/glfw3native.h"
+
+#import <QuartzCore/CAMetalLayer.h>
+
+namespace utils {
+    class SwapChainImplMTL {
+      public:
+        using WSIContext = DawnWSIContextMetal;
+
+        SwapChainImplMTL(id nsWindow) : mNsWindow(nsWindow) {
+        }
+
+        ~SwapChainImplMTL() {
+            [mCurrentTexture release];
+            [mCurrentDrawable release];
+        }
+
+        void Init(DawnWSIContextMetal* ctx) {
+            mMtlDevice = ctx->device;
+            mCommandQueue = ctx->queue;
+        }
+
+        DawnSwapChainError Configure(WGPUTextureFormat format,
+                                     WGPUTextureUsage usage,
+                                     uint32_t width,
+                                     uint32_t height) {
+            if (format != WGPUTextureFormat_BGRA8Unorm) {
+                return "unsupported format";
+            }
+            ASSERT(width > 0);
+            ASSERT(height > 0);
+
+            NSView* contentView = [mNsWindow contentView];
+            [contentView setWantsLayer:YES];
+
+            CGSize size = {};
+            size.width = width;
+            size.height = height;
+
+            mLayer = [CAMetalLayer layer];
+            [mLayer setDevice:mMtlDevice];
+            [mLayer setPixelFormat:MTLPixelFormatBGRA8Unorm];
+            [mLayer setDrawableSize:size];
+
+            constexpr uint32_t kFramebufferOnlyTextureUsages =
+                WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_Present;
+            bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
+            if (hasOnlyFramebufferUsages) {
+                [mLayer setFramebufferOnly:YES];
+            }
+
+            [contentView setLayer:mLayer];
+
+            return DAWN_SWAP_CHAIN_NO_ERROR;
+        }
+
+        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+            [mCurrentDrawable release];
+            mCurrentDrawable = [mLayer nextDrawable];
+            [mCurrentDrawable retain];
+
+            [mCurrentTexture release];
+            mCurrentTexture = mCurrentDrawable.texture;
+            [mCurrentTexture retain];
+
+            nextTexture->texture.ptr = reinterpret_cast<void*>(mCurrentTexture);
+
+            return DAWN_SWAP_CHAIN_NO_ERROR;
+        }
+
+        DawnSwapChainError Present() {
+            id<MTLCommandBuffer> commandBuffer = [mCommandQueue commandBuffer];
+            [commandBuffer presentDrawable:mCurrentDrawable];
+            [commandBuffer commit];
+
+            return DAWN_SWAP_CHAIN_NO_ERROR;
+        }
+
+      private:
+        id mNsWindow = nil;
+        id<MTLDevice> mMtlDevice = nil;
+        id<MTLCommandQueue> mCommandQueue = nil;
+
+        CAMetalLayer* mLayer = nullptr;
+        id<CAMetalDrawable> mCurrentDrawable = nil;
+        id<MTLTexture> mCurrentTexture = nil;
+    };
+
+    class MetalBinding : public BackendBinding {
+      public:
+        MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+        }
+
+        uint64_t GetSwapChainImplementation() override {
+            if (mSwapchainImpl.userData == nullptr) {
+                mSwapchainImpl = CreateSwapChainImplementation(
+                    new SwapChainImplMTL(glfwGetCocoaWindow(mWindow)));
+            }
+            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+        }
+
+        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+            return WGPUTextureFormat_BGRA8Unorm;
+        }
+
+      private:
+        DawnSwapChainImplementation mSwapchainImpl = {};
+    };
+
+    BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
+        return new MetalBinding(window, device);
+    }
+}
diff --git a/src/dawn/utils/NullBinding.cpp b/src/dawn/utils/NullBinding.cpp
new file mode 100644
index 0000000..c33b6dd
--- /dev/null
+++ b/src/dawn/utils/NullBinding.cpp
@@ -0,0 +1,47 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/NullBackend.h"
+
+#include <memory>
+
+namespace utils {
+
+    class NullBinding : public BackendBinding {
+      public:
+        NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+        }
+
+        uint64_t GetSwapChainImplementation() override {
+            if (mSwapchainImpl.userData == nullptr) {
+                mSwapchainImpl = dawn::native::null::CreateNativeSwapChainImpl();
+            }
+            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+        }
+        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+            return WGPUTextureFormat_RGBA8Unorm;
+        }
+
+      private:
+        DawnSwapChainImplementation mSwapchainImpl = {};
+    };
+
+    BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
+        return new NullBinding(window, device);
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/OSXTimer.cpp b/src/dawn/utils/OSXTimer.cpp
new file mode 100644
index 0000000..818b27b
--- /dev/null
+++ b/src/dawn/utils/OSXTimer.cpp
@@ -0,0 +1,77 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/Timer.h"
+
+#include <CoreServices/CoreServices.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+
+namespace utils {
+
+    class OSXTimer : public Timer {
+      public:
+        OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {
+        }
+
+        ~OSXTimer() override = default;
+
+        void Start() override {
+            mStartTime = mach_absolute_time();
+            // Cache secondCoeff
+            GetSecondCoeff();
+            mRunning = true;
+        }
+
+        void Stop() override {
+            mStopTime = mach_absolute_time();
+            mRunning = false;
+        }
+
+        double GetElapsedTime() const override {
+            if (mRunning) {
+                return mSecondCoeff * (mach_absolute_time() - mStartTime);
+            } else {
+                return mSecondCoeff * (mStopTime - mStartTime);
+            }
+        }
+
+        double GetAbsoluteTime() override {
+            return GetSecondCoeff() * mach_absolute_time();
+        }
+
+      private:
+        double GetSecondCoeff() {
+            // If this is the first time we've run, get the timebase.
+            if (mSecondCoeff == 0.0) {
+                mach_timebase_info_data_t timebaseInfo;
+                mach_timebase_info(&timebaseInfo);
+
+                mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
+            }
+
+            return mSecondCoeff;
+        }
+
+        bool mRunning;
+        uint64_t mStartTime;
+        uint64_t mStopTime;
+        double mSecondCoeff;
+    };
+
+    Timer* CreateTimer() {
+        return new OSXTimer();
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/ObjCUtils.h b/src/dawn/utils/ObjCUtils.h
new file mode 100644
index 0000000..17b3956
--- /dev/null
+++ b/src/dawn/utils/ObjCUtils.h
@@ -0,0 +1,29 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_OBJCUTILS_H_
+#define UTILS_OBJCUTILS_H_
+
+// Contains helper function to manipulate ObjC objects. This helps having C++ files do a little bit
+// of ObjectiveC calls, when they cannot be converted to ObjectiveC++ because they are used on
+// multiple platforms.
+
+namespace utils {
+
+    // The returned CALayer is autoreleased.
+    void* CreateDummyCALayer();
+
+}  // namespace utils
+
+#endif  // UTILS_OBJCUTILS_H_
diff --git a/src/dawn/utils/ObjCUtils.mm b/src/dawn/utils/ObjCUtils.mm
new file mode 100644
index 0000000..c006976
--- /dev/null
+++ b/src/dawn/utils/ObjCUtils.mm
@@ -0,0 +1,25 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ObjCUtils.h"
+
+#include <QuartzCore/CALayer.h>
+
+namespace utils {
+
+    void* CreateDummyCALayer() {
+        return [CALayer layer];
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/OpenGLBinding.cpp b/src/dawn/utils/OpenGLBinding.cpp
new file mode 100644
index 0000000..35972af
--- /dev/null
+++ b/src/dawn/utils/OpenGLBinding.cpp
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/OpenGLBackend.h"
+
+#include <cstdio>
+#include "GLFW/glfw3.h"
+
+namespace utils {
+
+    class OpenGLBinding : public BackendBinding {
+      public:
+        OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+        }
+
+        uint64_t GetSwapChainImplementation() override {
+            if (mSwapchainImpl.userData == nullptr) {
+                mSwapchainImpl = dawn::native::opengl::CreateNativeSwapChainImpl(
+                    mDevice,
+                    [](void* userdata) { glfwSwapBuffers(static_cast<GLFWwindow*>(userdata)); },
+                    mWindow);
+            }
+            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+        }
+
+        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+            return dawn::native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+        }
+
+      private:
+        DawnSwapChainImplementation mSwapchainImpl = {};
+    };
+
+    BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
+        return new OpenGLBinding(window, device);
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/PlatformDebugLogger.h b/src/dawn/utils/PlatformDebugLogger.h
new file mode 100644
index 0000000..33c46de
--- /dev/null
+++ b/src/dawn/utils/PlatformDebugLogger.h
@@ -0,0 +1,29 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_PLATFORMDEBUGLOGGER_H_
+#define UTILS_PLATFORMDEBUGLOGGER_H_
+
+namespace utils {
+
+    class PlatformDebugLogger {
+      public:
+        virtual ~PlatformDebugLogger() = default;
+    };
+
+    PlatformDebugLogger* CreatePlatformDebugLogger();
+
+}  // namespace utils
+
+#endif  // UTILS_PLATFORMDEBUGLOGGER_H_
diff --git a/src/dawn/utils/PosixTimer.cpp b/src/dawn/utils/PosixTimer.cpp
new file mode 100644
index 0000000..18eb5e6
--- /dev/null
+++ b/src/dawn/utils/PosixTimer.cpp
@@ -0,0 +1,74 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/Timer.h"
+
+#include <stdint.h>
+#include <time.h>
+
+namespace utils {
+
+    namespace {
+
+        uint64_t GetCurrentTimeNs() {
+            struct timespec currentTime;
+            clock_gettime(CLOCK_MONOTONIC, &currentTime);
+            return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
+        }
+
+    }  // anonymous namespace
+
+    class PosixTimer : public Timer {
+      public:
+        PosixTimer() : Timer(), mRunning(false) {
+        }
+
+        ~PosixTimer() override = default;
+
+        void Start() override {
+            mStartTimeNs = GetCurrentTimeNs();
+            mRunning = true;
+        }
+
+        void Stop() override {
+            mStopTimeNs = GetCurrentTimeNs();
+            mRunning = false;
+        }
+
+        double GetElapsedTime() const override {
+            uint64_t endTimeNs;
+            if (mRunning) {
+                endTimeNs = GetCurrentTimeNs();
+            } else {
+                endTimeNs = mStopTimeNs;
+            }
+
+            return (endTimeNs - mStartTimeNs) * 1e-9;
+        }
+
+        double GetAbsoluteTime() override {
+            return GetCurrentTimeNs() * 1e-9;
+        }
+
+      private:
+        bool mRunning;
+        uint64_t mStartTimeNs;
+        uint64_t mStopTimeNs;
+    };
+
+    Timer* CreateTimer() {
+        return new PosixTimer();
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/ScopedAutoreleasePool.cpp b/src/dawn/utils/ScopedAutoreleasePool.cpp
new file mode 100644
index 0000000..2f5f050
--- /dev/null
+++ b/src/dawn/utils/ScopedAutoreleasePool.cpp
@@ -0,0 +1,34 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ScopedAutoreleasePool.h"
+
+#include "dawn/common/Compiler.h"
+
+namespace utils {
+
+    ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool(nullptr) {
+        DAWN_UNUSED(mPool);
+    }
+
+    ScopedAutoreleasePool::~ScopedAutoreleasePool() = default;
+
+    ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+    }
+
+    ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+        return *this;
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/ScopedAutoreleasePool.h b/src/dawn/utils/ScopedAutoreleasePool.h
new file mode 100644
index 0000000..bd00a1a
--- /dev/null
+++ b/src/dawn/utils/ScopedAutoreleasePool.h
@@ -0,0 +1,61 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_SCOPEDAUTORELEASEPOOL_H_
+#define UTILS_SCOPEDAUTORELEASEPOOL_H_
+
+#include "dawn/common/Compiler.h"
+
+#include <cstddef>
+
+namespace utils {
+
+    /**
+     * ScopedAutoreleasePool is a scoped class which initializes an NSAutoreleasePool on
+     * creation, and drains it on destruction. On non-Apple platforms, ScopedAutoreleasePool
+     * is a no-op.
+     *
+     * An autoreleasepool is needed when using protocol objects in Objective-C because Cocoa
+     * expects a pool to always be available in each thread. If a pool is not available, then
+     * autoreleased objects will never be released and will leak.
+     *
+     * In long-running blocks of code or loops, it is important to periodically create and drain
+     * autorelease pools so that memory is recycled. In Dawn's tests, we have an autoreleasepool
+     * per-test. In graphics applications it's advised to create an autoreleasepool around the
+     * frame loop. Ex.)
+     *   void frame() {
+     *     // Any protocol objects will be reclaimed when this object falls out of scope.
+     *     utils::ScopedAutoreleasePool pool;
+     *
+     *     // do rendering ...
+     *   }
+     */
+    class [[nodiscard]] ScopedAutoreleasePool {
+      public:
+        ScopedAutoreleasePool();
+        ~ScopedAutoreleasePool();
+
+        ScopedAutoreleasePool(const ScopedAutoreleasePool&) = delete;
+        ScopedAutoreleasePool& operator=(const ScopedAutoreleasePool&) = delete;
+
+        ScopedAutoreleasePool(ScopedAutoreleasePool &&);
+        ScopedAutoreleasePool& operator=(ScopedAutoreleasePool&&);
+
+      private:
+        void* mPool = nullptr;
+    };
+
+}  // namespace utils
+
+#endif  // UTILS_SCOPEDAUTORELEASEPOOL_H_
diff --git a/src/dawn/utils/ScopedAutoreleasePool.mm b/src/dawn/utils/ScopedAutoreleasePool.mm
new file mode 100644
index 0000000..c4cb9a2
--- /dev/null
+++ b/src/dawn/utils/ScopedAutoreleasePool.mm
@@ -0,0 +1,44 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ScopedAutoreleasePool.h"
+
+#import <Foundation/Foundation.h>
+
+namespace utils {
+
+    ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool([[NSAutoreleasePool alloc] init]) {
+    }
+
+    ScopedAutoreleasePool::~ScopedAutoreleasePool() {
+        if (mPool != nullptr) {
+            [static_cast<NSAutoreleasePool*>(mPool) release];
+            mPool = nullptr;
+        }
+    }
+
+    ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+        mPool = rhs.mPool;
+        rhs.mPool = nullptr;
+    }
+
+    ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+        if (&rhs != this) {
+            mPool = rhs.mPool;
+            rhs.mPool = nullptr;
+        }
+        return *this;
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/SystemUtils.cpp b/src/dawn/utils/SystemUtils.cpp
new file mode 100644
index 0000000..9010e2b
--- /dev/null
+++ b/src/dawn/utils/SystemUtils.cpp
@@ -0,0 +1,39 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Platform.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+#    include <Windows.h>
+#elif defined(DAWN_PLATFORM_POSIX)
+#    include <unistd.h>
+#else
+#    error "Unsupported platform."
+#endif
+
+namespace utils {
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+    void USleep(unsigned int usecs) {
+        Sleep(static_cast<DWORD>(usecs / 1000));
+    }
+#elif defined(DAWN_PLATFORM_POSIX)
+    void USleep(unsigned int usecs) {
+        usleep(usecs);
+    }
+#else
+#    error "Implement USleep for your platform."
+#endif
+
+}  // namespace utils
diff --git a/src/dawn/utils/SystemUtils.h b/src/dawn/utils/SystemUtils.h
new file mode 100644
index 0000000..1f42cc5
--- /dev/null
+++ b/src/dawn/utils/SystemUtils.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_SYSTEMUTILS_H_
+#define UTILS_SYSTEMUTILS_H_
+
+namespace utils {
+
+    void USleep(unsigned int usecs);
+}
+
+#endif  // UTILS_SYSTEMUTILS_H_
diff --git a/src/dawn/utils/TerribleCommandBuffer.cpp b/src/dawn/utils/TerribleCommandBuffer.cpp
new file mode 100644
index 0000000..b99243b
--- /dev/null
+++ b/src/dawn/utils/TerribleCommandBuffer.cpp
@@ -0,0 +1,59 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/TerribleCommandBuffer.h"
+
+#include "dawn/common/Assert.h"
+
+namespace utils {
+
+    TerribleCommandBuffer::TerribleCommandBuffer() {
+    }
+
+    TerribleCommandBuffer::TerribleCommandBuffer(dawn::wire::CommandHandler* handler)
+        : mHandler(handler) {
+    }
+
+    void TerribleCommandBuffer::SetHandler(dawn::wire::CommandHandler* handler) {
+        mHandler = handler;
+    }
+
+    size_t TerribleCommandBuffer::GetMaximumAllocationSize() const {
+        return sizeof(mBuffer);
+    }
+
+    void* TerribleCommandBuffer::GetCmdSpace(size_t size) {
+        // Note: This returns non-null even if size is zero.
+        if (size > sizeof(mBuffer)) {
+            return nullptr;
+        }
+        char* result = &mBuffer[mOffset];
+        if (sizeof(mBuffer) - size < mOffset) {
+            if (!Flush()) {
+                return nullptr;
+            }
+            return GetCmdSpace(size);
+        }
+
+        mOffset += size;
+        return result;
+    }
+
+    bool TerribleCommandBuffer::Flush() {
+        bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
+        mOffset = 0;
+        return success;
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/TerribleCommandBuffer.h b/src/dawn/utils/TerribleCommandBuffer.h
new file mode 100644
index 0000000..6960b2b
--- /dev/null
+++ b/src/dawn/utils/TerribleCommandBuffer.h
@@ -0,0 +1,42 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TERRIBLE_COMMAND_BUFFER_H_
+#define UTILS_TERRIBLE_COMMAND_BUFFER_H_
+
+#include "dawn/wire/Wire.h"
+
+namespace utils {
+
+    class TerribleCommandBuffer : public dawn::wire::CommandSerializer {
+      public:
+        TerribleCommandBuffer();
+        TerribleCommandBuffer(dawn::wire::CommandHandler* handler);
+
+        void SetHandler(dawn::wire::CommandHandler* handler);
+
+        size_t GetMaximumAllocationSize() const override;
+
+        void* GetCmdSpace(size_t size) override;
+        bool Flush() override;
+
+      private:
+        dawn::wire::CommandHandler* mHandler = nullptr;
+        size_t mOffset = 0;
+        char mBuffer[1000000];
+    };
+
+}  // namespace utils
+
+#endif  // UTILS_TERRIBLE_COMMAND_BUFFER_H_
diff --git a/src/dawn/utils/TestUtils.cpp b/src/dawn/utils/TestUtils.cpp
new file mode 100644
index 0000000..31535f2
--- /dev/null
+++ b/src/dawn/utils/TestUtils.cpp
@@ -0,0 +1,181 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/TestUtils.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <vector>
+
+namespace utils {
+
+    uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
+        const uint32_t bytesPerBlock = utils::GetTexelBlockSizeInBytes(format);
+        const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        ASSERT(width % blockWidth == 0);
+        return Align(bytesPerBlock * (width / blockWidth), kTextureBytesPerRowAlignment);
+    }
+
+    TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
+        wgpu::TextureFormat format,
+        wgpu::Extent3D textureSizeAtLevel0,
+        uint32_t mipmapLevel,
+        wgpu::TextureDimension dimension,
+        uint32_t rowsPerImage) {
+        // Compressed texture formats not supported in this function yet.
+        ASSERT(utils::GetTextureFormatBlockWidth(format) == 1);
+
+        TextureDataCopyLayout layout;
+
+        layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
+                          std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
+                          textureSizeAtLevel0.depthOrArrayLayers};
+
+        if (dimension == wgpu::TextureDimension::e3D) {
+            layout.mipSize.depthOrArrayLayers =
+                std::max(textureSizeAtLevel0.depthOrArrayLayers >> mipmapLevel, 1u);
+        }
+
+        layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
+
+        if (rowsPerImage == wgpu::kCopyStrideUndefined) {
+            rowsPerImage = layout.mipSize.height;
+        }
+        layout.rowsPerImage = rowsPerImage;
+
+        uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
+        layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
+
+        layout.byteLength =
+            RequiredBytesInCopy(layout.bytesPerRow, appliedRowsPerImage, layout.mipSize, format);
+
+        const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+        layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
+        layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
+        layout.texelBlockCount = layout.byteLength / bytesPerTexel;
+
+        return layout;
+    }
+
+    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                                 uint64_t rowsPerImage,
+                                 wgpu::Extent3D copyExtent,
+                                 wgpu::TextureFormat textureFormat) {
+        uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
+        ASSERT(copyExtent.width % blockWidth == 0);
+        uint32_t widthInBlocks = copyExtent.width / blockWidth;
+        ASSERT(copyExtent.height % blockHeight == 0);
+        uint32_t heightInBlocks = copyExtent.height / blockHeight;
+        return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
+                                   copyExtent.depthOrArrayLayers, blockSize);
+    }
+
+    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                                 uint64_t rowsPerImage,
+                                 uint64_t widthInBlocks,
+                                 uint64_t heightInBlocks,
+                                 uint64_t depth,
+                                 uint64_t bytesPerBlock) {
+        if (depth == 0) {
+            return 0;
+        }
+
+        uint64_t bytesPerImage = bytesPerRow * rowsPerImage;
+        uint64_t requiredBytesInCopy = bytesPerImage * (depth - 1);
+        if (heightInBlocks != 0) {
+            uint64_t lastRowBytes = widthInBlocks * bytesPerBlock;
+            uint64_t lastImageBytes = bytesPerRow * (heightInBlocks - 1) + lastRowBytes;
+            requiredBytesInCopy += lastImageBytes;
+        }
+        return requiredBytesInCopy;
+    }
+
+    uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
+                                       uint64_t rowsPerImage,
+                                       wgpu::Extent3D copyExtent,
+                                       wgpu::TextureFormat textureFormat) {
+        return RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, textureFormat) /
+               utils::GetTexelBlockSizeInBytes(textureFormat);
+    }
+
+    void UnalignDynamicUploader(wgpu::Device device) {
+        std::vector<uint8_t> data = {1};
+
+        wgpu::TextureDescriptor descriptor = {};
+        descriptor.size = {1, 1, 1};
+        descriptor.format = wgpu::TextureFormat::R8Unorm;
+        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+        wgpu::TextureDataLayout textureDataLayout =
+            utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
+        wgpu::Extent3D copyExtent = {1, 1, 1};
+
+        // WriteTexture with exactly 1 byte of data.
+        device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
+                                       &copyExtent);
+    }
+
+    uint32_t VertexFormatSize(wgpu::VertexFormat format) {
+        switch (format) {
+            case wgpu::VertexFormat::Uint8x2:
+            case wgpu::VertexFormat::Sint8x2:
+            case wgpu::VertexFormat::Unorm8x2:
+            case wgpu::VertexFormat::Snorm8x2:
+                return 2;
+            case wgpu::VertexFormat::Uint8x4:
+            case wgpu::VertexFormat::Sint8x4:
+            case wgpu::VertexFormat::Unorm8x4:
+            case wgpu::VertexFormat::Snorm8x4:
+            case wgpu::VertexFormat::Uint16x2:
+            case wgpu::VertexFormat::Sint16x2:
+            case wgpu::VertexFormat::Unorm16x2:
+            case wgpu::VertexFormat::Snorm16x2:
+            case wgpu::VertexFormat::Float16x2:
+            case wgpu::VertexFormat::Float32:
+            case wgpu::VertexFormat::Uint32:
+            case wgpu::VertexFormat::Sint32:
+                return 4;
+            case wgpu::VertexFormat::Uint16x4:
+            case wgpu::VertexFormat::Sint16x4:
+            case wgpu::VertexFormat::Unorm16x4:
+            case wgpu::VertexFormat::Snorm16x4:
+            case wgpu::VertexFormat::Float16x4:
+            case wgpu::VertexFormat::Float32x2:
+            case wgpu::VertexFormat::Uint32x2:
+            case wgpu::VertexFormat::Sint32x2:
+                return 8;
+            case wgpu::VertexFormat::Float32x3:
+            case wgpu::VertexFormat::Uint32x3:
+            case wgpu::VertexFormat::Sint32x3:
+                return 12;
+            case wgpu::VertexFormat::Float32x4:
+            case wgpu::VertexFormat::Uint32x4:
+            case wgpu::VertexFormat::Sint32x4:
+                return 16;
+            case wgpu::VertexFormat::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/TestUtils.h b/src/dawn/utils/TestUtils.h
new file mode 100644
index 0000000..02b0daf
--- /dev/null
+++ b/src/dawn/utils/TestUtils.h
@@ -0,0 +1,67 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TESTHELPERS_H_
+#define UTILS_TESTHELPERS_H_
+
+#include <dawn/webgpu_cpp.h>
+
+namespace utils {
+
+    struct TextureDataCopyLayout {
+        uint64_t byteLength;
+        uint64_t texelBlockCount;
+        uint32_t bytesPerRow;
+        uint32_t rowsPerImage;
+        uint32_t texelBlocksPerRow;
+        uint32_t bytesPerImage;
+        uint32_t texelBlocksPerImage;
+        wgpu::Extent3D mipSize;
+    };
+
+    uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
+    TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
+        wgpu::TextureFormat format,
+        wgpu::Extent3D textureSizeAtLevel0,
+        uint32_t mipmapLevel,
+        wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D,
+        uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+
+    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                                 uint64_t rowsPerImage,
+                                 wgpu::Extent3D copyExtent,
+                                 wgpu::TextureFormat textureFormat);
+    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                                 uint64_t rowsPerImage,
+                                 uint64_t widthInBlocks,
+                                 uint64_t heightInBlocks,
+                                 uint64_t depth,
+                                 uint64_t bytesPerBlock);
+
+    uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
+                                       uint64_t rowsPerImage,
+                                       wgpu::Extent3D copyExtent,
+                                       wgpu::TextureFormat textureFormat);
+
+    // A helper function used for testing DynamicUploader offset alignment.
+    // A call of this function will do a Queue::WriteTexture with 1 byte of data,
+    // so that assuming that WriteTexture uses DynamicUploader, the first RingBuffer
+    // in it will contain 1 byte of data.
+    void UnalignDynamicUploader(wgpu::Device device);
+
+    uint32_t VertexFormatSize(wgpu::VertexFormat format);
+
+}  // namespace utils
+
+#endif  // UTILS_TESTHELPERS_H_
diff --git a/src/dawn/utils/TextureUtils.cpp b/src/dawn/utils/TextureUtils.cpp
new file mode 100644
index 0000000..312c1db
--- /dev/null
+++ b/src/dawn/utils/TextureUtils.cpp
@@ -0,0 +1,770 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "TextureUtils.h"
+
+namespace utils {
+    bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8Snorm:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGBA32Uint:
+            case wgpu::TextureFormat::RGBA32Sint:
+            case wgpu::TextureFormat::RGBA32Float:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC4RUnorm:
+            case wgpu::TextureFormat::BC4RSnorm:
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC5RGUnorm:
+            case wgpu::TextureFormat::BC5RGSnorm:
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+            case wgpu::TextureFormat::BC6HRGBFloat:
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+            case wgpu::TextureFormat::EACR11Unorm:
+            case wgpu::TextureFormat::EACR11Snorm:
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+            case wgpu::TextureFormat::EACRG11Unorm:
+            case wgpu::TextureFormat::EACRG11Snorm:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::Depth16Unorm:
+            case wgpu::TextureFormat::Depth24Plus:
+            case wgpu::TextureFormat::Depth32Float:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat) {
+        if (IsBCTextureFormat(textureFormat) || IsETC2TextureFormat(textureFormat) ||
+            IsASTCTextureFormat(textureFormat)) {
+            return false;
+        }
+
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RGBA32Uint:
+            case wgpu::TextureFormat::RGBA32Sint:
+            case wgpu::TextureFormat::RGBA32Float:
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+            case wgpu::TextureFormat::R8Snorm:
+            case wgpu::TextureFormat::RG8Snorm:
+            case wgpu::TextureFormat::RGBA8Snorm:
+            case wgpu::TextureFormat::RG11B10Ufloat:
+                return false;
+
+            default:
+                return true;
+        }
+    }
+
+    bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::R8Uint:
+            case wgpu::TextureFormat::R8Sint:
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RG8Uint:
+            case wgpu::TextureFormat::RG8Sint:
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::R16Uint:
+            case wgpu::TextureFormat::R16Sint:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::RG16Uint:
+            case wgpu::TextureFormat::RG16Sint:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RGBA32Uint:
+            case wgpu::TextureFormat::RGBA32Sint:
+            case wgpu::TextureFormat::RGBA32Float:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat) {
+        return textureFormat == wgpu::TextureFormat::Stencil8;
+    }
+
+    uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::R8Snorm:
+            case wgpu::TextureFormat::R8Uint:
+            case wgpu::TextureFormat::R8Sint:
+            case wgpu::TextureFormat::Stencil8:
+                return 1u;
+
+            case wgpu::TextureFormat::R16Uint:
+            case wgpu::TextureFormat::R16Sint:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RG8Snorm:
+            case wgpu::TextureFormat::RG8Uint:
+            case wgpu::TextureFormat::RG8Sint:
+                return 2u;
+
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::RG16Uint:
+            case wgpu::TextureFormat::RG16Sint:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+            case wgpu::TextureFormat::RGBA8Snorm:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+            case wgpu::TextureFormat::RG11B10Ufloat:
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+                return 4u;
+
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA16Float:
+                return 8u;
+
+            case wgpu::TextureFormat::RGBA32Float:
+            case wgpu::TextureFormat::RGBA32Uint:
+            case wgpu::TextureFormat::RGBA32Sint:
+                return 16u;
+
+            case wgpu::TextureFormat::Depth16Unorm:
+                return 2u;
+
+            case wgpu::TextureFormat::Depth24Plus:
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+            case wgpu::TextureFormat::Depth32Float:
+                return 4u;
+
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC4RUnorm:
+            case wgpu::TextureFormat::BC4RSnorm:
+                return 8u;
+
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC5RGUnorm:
+            case wgpu::TextureFormat::BC5RGSnorm:
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+            case wgpu::TextureFormat::BC6HRGBFloat:
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+                return 16u;
+
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+            case wgpu::TextureFormat::EACR11Unorm:
+            case wgpu::TextureFormat::EACR11Snorm:
+                return 8u;
+
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+            case wgpu::TextureFormat::EACRG11Unorm:
+            case wgpu::TextureFormat::EACRG11Snorm:
+                return 16u;
+
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return 16u;
+
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+
+            // Block size of a multi-planar format depends on aspect.
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+            case wgpu::TextureFormat::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::R8Snorm:
+            case wgpu::TextureFormat::R8Uint:
+            case wgpu::TextureFormat::R8Sint:
+            case wgpu::TextureFormat::R16Uint:
+            case wgpu::TextureFormat::R16Sint:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RG8Snorm:
+            case wgpu::TextureFormat::RG8Uint:
+            case wgpu::TextureFormat::RG8Sint:
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::RG16Uint:
+            case wgpu::TextureFormat::RG16Sint:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+            case wgpu::TextureFormat::RGBA8Snorm:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+            case wgpu::TextureFormat::RG11B10Ufloat:
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGBA32Float:
+            case wgpu::TextureFormat::RGBA32Uint:
+            case wgpu::TextureFormat::RGBA32Sint:
+            case wgpu::TextureFormat::Depth32Float:
+            case wgpu::TextureFormat::Depth24Plus:
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth16Unorm:
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+            case wgpu::TextureFormat::Stencil8:
+                return 1u;
+
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC4RUnorm:
+            case wgpu::TextureFormat::BC4RSnorm:
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC5RGUnorm:
+            case wgpu::TextureFormat::BC5RGSnorm:
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+            case wgpu::TextureFormat::BC6HRGBFloat:
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+            case wgpu::TextureFormat::EACR11Unorm:
+            case wgpu::TextureFormat::EACR11Snorm:
+            case wgpu::TextureFormat::EACRG11Unorm:
+            case wgpu::TextureFormat::EACRG11Snorm:
+                return 4u;
+
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+                return 4u;
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+                return 5u;
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+                return 6u;
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+                return 8u;
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+                return 10u;
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return 12u;
+
+            // Block size of a multi-planar format depends on aspect.
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+            case wgpu::TextureFormat::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::R8Snorm:
+            case wgpu::TextureFormat::R8Uint:
+            case wgpu::TextureFormat::R8Sint:
+            case wgpu::TextureFormat::R16Uint:
+            case wgpu::TextureFormat::R16Sint:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RG8Snorm:
+            case wgpu::TextureFormat::RG8Uint:
+            case wgpu::TextureFormat::RG8Sint:
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::RG16Uint:
+            case wgpu::TextureFormat::RG16Sint:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+            case wgpu::TextureFormat::RGBA8Snorm:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+            case wgpu::TextureFormat::RG11B10Ufloat:
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGBA32Float:
+            case wgpu::TextureFormat::RGBA32Uint:
+            case wgpu::TextureFormat::RGBA32Sint:
+            case wgpu::TextureFormat::Depth32Float:
+            case wgpu::TextureFormat::Depth24Plus:
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth16Unorm:
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+            case wgpu::TextureFormat::Stencil8:
+                return 1u;
+
+            case wgpu::TextureFormat::BC1RGBAUnorm:
+            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC4RUnorm:
+            case wgpu::TextureFormat::BC4RSnorm:
+            case wgpu::TextureFormat::BC2RGBAUnorm:
+            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC3RGBAUnorm:
+            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            case wgpu::TextureFormat::BC5RGUnorm:
+            case wgpu::TextureFormat::BC5RGSnorm:
+            case wgpu::TextureFormat::BC6HRGBUfloat:
+            case wgpu::TextureFormat::BC6HRGBFloat:
+            case wgpu::TextureFormat::BC7RGBAUnorm:
+            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8Unorm:
+            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+            case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+            case wgpu::TextureFormat::EACR11Unorm:
+            case wgpu::TextureFormat::EACR11Snorm:
+            case wgpu::TextureFormat::EACRG11Unorm:
+            case wgpu::TextureFormat::EACRG11Snorm:
+                return 4u;
+
+            case wgpu::TextureFormat::ASTC4x4Unorm:
+            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            case wgpu::TextureFormat::ASTC5x4Unorm:
+            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+                return 4u;
+            case wgpu::TextureFormat::ASTC5x5Unorm:
+            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC6x5Unorm:
+            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x5Unorm:
+            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x5Unorm:
+            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+                return 5u;
+            case wgpu::TextureFormat::ASTC6x6Unorm:
+            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC8x6Unorm:
+            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x6Unorm:
+            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+                return 6u;
+            case wgpu::TextureFormat::ASTC8x8Unorm:
+            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            case wgpu::TextureFormat::ASTC10x8Unorm:
+            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+                return 8u;
+            case wgpu::TextureFormat::ASTC10x10Unorm:
+            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            case wgpu::TextureFormat::ASTC12x10Unorm:
+            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+                return 10u;
+            case wgpu::TextureFormat::ASTC12x12Unorm:
+            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+                return 12u;
+
+            // Block size of a multi-planar format depends on aspect.
+            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+            case wgpu::TextureFormat::Undefined:
+                break;
+        }
+        UNREACHABLE();
+    }
+
+    const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::R8Snorm:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RG8Snorm:
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8Snorm:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+            case wgpu::TextureFormat::RG11B10Ufloat:
+            case wgpu::TextureFormat::RGB9E5Ufloat:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGBA32Float:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+                return "f32";
+
+            case wgpu::TextureFormat::R8Uint:
+            case wgpu::TextureFormat::R16Uint:
+            case wgpu::TextureFormat::RG8Uint:
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::RG16Uint:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA32Uint:
+                return "u32";
+
+            case wgpu::TextureFormat::R8Sint:
+            case wgpu::TextureFormat::R16Sint:
+            case wgpu::TextureFormat::RG8Sint:
+            case wgpu::TextureFormat::R32Sint:
+            case wgpu::TextureFormat::RG16Sint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::RG32Sint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA32Sint:
+                return "i32";
+
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::R8Unorm:
+            case wgpu::TextureFormat::R8Uint:
+            case wgpu::TextureFormat::R8Sint:
+            case wgpu::TextureFormat::R16Uint:
+            case wgpu::TextureFormat::R16Sint:
+            case wgpu::TextureFormat::R16Float:
+            case wgpu::TextureFormat::R32Float:
+            case wgpu::TextureFormat::R32Uint:
+            case wgpu::TextureFormat::R32Sint:
+                return 1u;
+            case wgpu::TextureFormat::RG8Unorm:
+            case wgpu::TextureFormat::RG8Uint:
+            case wgpu::TextureFormat::RG8Sint:
+            case wgpu::TextureFormat::RG16Uint:
+            case wgpu::TextureFormat::RG16Sint:
+            case wgpu::TextureFormat::RG16Float:
+            case wgpu::TextureFormat::RG32Float:
+            case wgpu::TextureFormat::RG32Uint:
+            case wgpu::TextureFormat::RG32Sint:
+                return 2u;
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8UnormSrgb:
+            case wgpu::TextureFormat::RGBA8Uint:
+            case wgpu::TextureFormat::RGBA8Sint:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::BGRA8UnormSrgb:
+            case wgpu::TextureFormat::RGB10A2Unorm:
+            case wgpu::TextureFormat::RGBA16Uint:
+            case wgpu::TextureFormat::RGBA16Sint:
+            case wgpu::TextureFormat::RGBA16Float:
+            case wgpu::TextureFormat::RGBA32Float:
+            case wgpu::TextureFormat::RGBA32Uint:
+            case wgpu::TextureFormat::RGBA32Sint:
+                return 4u;
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
+        switch (textureFormat) {
+            case wgpu::TextureFormat::RGBA8Unorm:
+                return "rgba8unorm";
+            case wgpu::TextureFormat::RGBA8Snorm:
+                return "rgba8snorm";
+            case wgpu::TextureFormat::RGBA8Uint:
+                return "rgba8uint";
+            case wgpu::TextureFormat::RGBA8Sint:
+                return "rgba8sint";
+            case wgpu::TextureFormat::RGBA16Uint:
+                return "rgba16uint";
+            case wgpu::TextureFormat::RGBA16Sint:
+                return "rgba16sint";
+            case wgpu::TextureFormat::RGBA16Float:
+                return "rgba16float";
+            case wgpu::TextureFormat::R32Uint:
+                return "r32uint";
+            case wgpu::TextureFormat::R32Sint:
+                return "r32sint";
+            case wgpu::TextureFormat::R32Float:
+                return "r32float";
+            case wgpu::TextureFormat::RG32Uint:
+                return "rg32uint";
+            case wgpu::TextureFormat::RG32Sint:
+                return "rg32sint";
+            case wgpu::TextureFormat::RG32Float:
+                return "rg32float";
+            case wgpu::TextureFormat::RGBA32Uint:
+                return "rgba32uint";
+            case wgpu::TextureFormat::RGBA32Sint:
+                return "rgba32sint";
+            case wgpu::TextureFormat::RGBA32Float:
+                return "rgba32float";
+
+            // The below do not currently exist in the WGSL spec, but are used
+            // for tests that expect compilation failure.
+            case wgpu::TextureFormat::R8Unorm:
+                return "r8unorm";
+            case wgpu::TextureFormat::R8Snorm:
+                return "r8snorm";
+            case wgpu::TextureFormat::R8Uint:
+                return "r8uint";
+            case wgpu::TextureFormat::R8Sint:
+                return "r8sint";
+            case wgpu::TextureFormat::R16Uint:
+                return "r16uint";
+            case wgpu::TextureFormat::R16Sint:
+                return "r16sint";
+            case wgpu::TextureFormat::R16Float:
+                return "r16float";
+            case wgpu::TextureFormat::RG8Unorm:
+                return "rg8unorm";
+            case wgpu::TextureFormat::RG8Snorm:
+                return "rg8snorm";
+            case wgpu::TextureFormat::RG8Uint:
+                return "rg8uint";
+            case wgpu::TextureFormat::RG8Sint:
+                return "rg8sint";
+            case wgpu::TextureFormat::RG16Uint:
+                return "rg16uint";
+            case wgpu::TextureFormat::RG16Sint:
+                return "rg16sint";
+            case wgpu::TextureFormat::RG16Float:
+                return "rg16float";
+            case wgpu::TextureFormat::RGB10A2Unorm:
+                return "rgb10a2unorm";
+            case wgpu::TextureFormat::RG11B10Ufloat:
+                return "rg11b10ufloat";
+
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    wgpu::TextureDimension ViewDimensionToTextureDimension(
+        const wgpu::TextureViewDimension dimension) {
+        switch (dimension) {
+            case wgpu::TextureViewDimension::e2D:
+            case wgpu::TextureViewDimension::e2DArray:
+            case wgpu::TextureViewDimension::Cube:
+            case wgpu::TextureViewDimension::CubeArray:
+                return wgpu::TextureDimension::e2D;
+            case wgpu::TextureViewDimension::e3D:
+                return wgpu::TextureDimension::e3D;
+            // TODO(crbug.com/dawn/814): Implement for 1D texture.
+            case wgpu::TextureViewDimension::e1D:
+            default:
+                UNREACHABLE();
+                break;
+        }
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/TextureUtils.h b/src/dawn/utils/TextureUtils.h
new file mode 100644
index 0000000..f9dab08
--- /dev/null
+++ b/src/dawn/utils/TextureUtils.h
@@ -0,0 +1,251 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TEXTURE_UTILS_H_
+#define UTILS_TEXTURE_UTILS_H_
+
+#include <array>
+
+#include <dawn/webgpu_cpp.h>
+
+#include "dawn/common/Assert.h"
+
+namespace utils {
+    static constexpr std::array<wgpu::TextureFormat, 95> kAllTextureFormats = {
+        wgpu::TextureFormat::R8Unorm,
+        wgpu::TextureFormat::R8Snorm,
+        wgpu::TextureFormat::R8Uint,
+        wgpu::TextureFormat::R8Sint,
+        wgpu::TextureFormat::R16Uint,
+        wgpu::TextureFormat::R16Sint,
+        wgpu::TextureFormat::R16Float,
+        wgpu::TextureFormat::RG8Unorm,
+        wgpu::TextureFormat::RG8Snorm,
+        wgpu::TextureFormat::RG8Uint,
+        wgpu::TextureFormat::RG8Sint,
+        wgpu::TextureFormat::R32Float,
+        wgpu::TextureFormat::R32Uint,
+        wgpu::TextureFormat::R32Sint,
+        wgpu::TextureFormat::RG16Uint,
+        wgpu::TextureFormat::RG16Sint,
+        wgpu::TextureFormat::RG16Float,
+        wgpu::TextureFormat::RGBA8Unorm,
+        wgpu::TextureFormat::RGBA8UnormSrgb,
+        wgpu::TextureFormat::RGBA8Snorm,
+        wgpu::TextureFormat::RGBA8Uint,
+        wgpu::TextureFormat::RGBA8Sint,
+        wgpu::TextureFormat::BGRA8Unorm,
+        wgpu::TextureFormat::BGRA8UnormSrgb,
+        wgpu::TextureFormat::RGB10A2Unorm,
+        wgpu::TextureFormat::RG11B10Ufloat,
+        wgpu::TextureFormat::RGB9E5Ufloat,
+        wgpu::TextureFormat::RG32Float,
+        wgpu::TextureFormat::RG32Uint,
+        wgpu::TextureFormat::RG32Sint,
+        wgpu::TextureFormat::RGBA16Uint,
+        wgpu::TextureFormat::RGBA16Sint,
+        wgpu::TextureFormat::RGBA16Float,
+        wgpu::TextureFormat::RGBA32Float,
+        wgpu::TextureFormat::RGBA32Uint,
+        wgpu::TextureFormat::RGBA32Sint,
+        wgpu::TextureFormat::Depth16Unorm,
+        wgpu::TextureFormat::Depth32Float,
+        wgpu::TextureFormat::Depth24Plus,
+        wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth24UnormStencil8,
+        wgpu::TextureFormat::Depth32FloatStencil8,
+        wgpu::TextureFormat::Stencil8,
+        wgpu::TextureFormat::BC1RGBAUnorm,
+        wgpu::TextureFormat::BC1RGBAUnormSrgb,
+        wgpu::TextureFormat::BC2RGBAUnorm,
+        wgpu::TextureFormat::BC2RGBAUnormSrgb,
+        wgpu::TextureFormat::BC3RGBAUnorm,
+        wgpu::TextureFormat::BC3RGBAUnormSrgb,
+        wgpu::TextureFormat::BC4RUnorm,
+        wgpu::TextureFormat::BC4RSnorm,
+        wgpu::TextureFormat::BC5RGUnorm,
+        wgpu::TextureFormat::BC5RGSnorm,
+        wgpu::TextureFormat::BC6HRGBUfloat,
+        wgpu::TextureFormat::BC6HRGBFloat,
+        wgpu::TextureFormat::BC7RGBAUnorm,
+        wgpu::TextureFormat::BC7RGBAUnormSrgb,
+        wgpu::TextureFormat::ETC2RGB8Unorm,
+        wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+        wgpu::TextureFormat::ETC2RGB8A1Unorm,
+        wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+        wgpu::TextureFormat::ETC2RGBA8Unorm,
+        wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+        wgpu::TextureFormat::EACR11Unorm,
+        wgpu::TextureFormat::EACR11Snorm,
+        wgpu::TextureFormat::EACRG11Unorm,
+        wgpu::TextureFormat::EACRG11Snorm,
+        wgpu::TextureFormat::ASTC4x4Unorm,
+        wgpu::TextureFormat::ASTC4x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x4Unorm,
+        wgpu::TextureFormat::ASTC5x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x5Unorm,
+        wgpu::TextureFormat::ASTC5x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x5Unorm,
+        wgpu::TextureFormat::ASTC6x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x6Unorm,
+        wgpu::TextureFormat::ASTC6x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x5Unorm,
+        wgpu::TextureFormat::ASTC8x5UnormSrgb,
+        wgpu::TextureFormat::ASTC8x6Unorm,
+        wgpu::TextureFormat::ASTC8x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x8Unorm,
+        wgpu::TextureFormat::ASTC8x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x5Unorm,
+        wgpu::TextureFormat::ASTC10x5UnormSrgb,
+        wgpu::TextureFormat::ASTC10x6Unorm,
+        wgpu::TextureFormat::ASTC10x6UnormSrgb,
+        wgpu::TextureFormat::ASTC10x8Unorm,
+        wgpu::TextureFormat::ASTC10x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x10Unorm,
+        wgpu::TextureFormat::ASTC10x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x10Unorm,
+        wgpu::TextureFormat::ASTC12x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x12Unorm,
+        wgpu::TextureFormat::ASTC12x12UnormSrgb};
+
+    static constexpr std::array<wgpu::TextureFormat, 40> kFormatsInCoreSpec = {
+        wgpu::TextureFormat::R8Unorm,        wgpu::TextureFormat::R8Snorm,
+        wgpu::TextureFormat::R8Uint,         wgpu::TextureFormat::R8Sint,
+        wgpu::TextureFormat::R16Uint,        wgpu::TextureFormat::R16Sint,
+        wgpu::TextureFormat::R16Float,       wgpu::TextureFormat::RG8Unorm,
+        wgpu::TextureFormat::RG8Snorm,       wgpu::TextureFormat::RG8Uint,
+        wgpu::TextureFormat::RG8Sint,        wgpu::TextureFormat::R32Float,
+        wgpu::TextureFormat::R32Uint,        wgpu::TextureFormat::R32Sint,
+        wgpu::TextureFormat::RG16Uint,       wgpu::TextureFormat::RG16Sint,
+        wgpu::TextureFormat::RG16Float,      wgpu::TextureFormat::RGBA8Unorm,
+        wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
+        wgpu::TextureFormat::RGBA8Uint,      wgpu::TextureFormat::RGBA8Sint,
+        wgpu::TextureFormat::BGRA8Unorm,     wgpu::TextureFormat::BGRA8UnormSrgb,
+        wgpu::TextureFormat::RGB10A2Unorm,   wgpu::TextureFormat::RG11B10Ufloat,
+        wgpu::TextureFormat::RGB9E5Ufloat,   wgpu::TextureFormat::RG32Float,
+        wgpu::TextureFormat::RG32Uint,       wgpu::TextureFormat::RG32Sint,
+        wgpu::TextureFormat::RGBA16Uint,     wgpu::TextureFormat::RGBA16Sint,
+        wgpu::TextureFormat::RGBA16Float,    wgpu::TextureFormat::RGBA32Float,
+        wgpu::TextureFormat::RGBA32Uint,     wgpu::TextureFormat::RGBA32Sint,
+        wgpu::TextureFormat::Depth16Unorm,   wgpu::TextureFormat::Depth32Float,
+        wgpu::TextureFormat::Depth24Plus,    wgpu::TextureFormat::Depth24PlusStencil8,
+    };
+
+    static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
+        wgpu::TextureFormat::BC1RGBAUnorm,  wgpu::TextureFormat::BC1RGBAUnormSrgb,
+        wgpu::TextureFormat::BC2RGBAUnorm,  wgpu::TextureFormat::BC2RGBAUnormSrgb,
+        wgpu::TextureFormat::BC3RGBAUnorm,  wgpu::TextureFormat::BC3RGBAUnormSrgb,
+        wgpu::TextureFormat::BC4RUnorm,     wgpu::TextureFormat::BC4RSnorm,
+        wgpu::TextureFormat::BC5RGUnorm,    wgpu::TextureFormat::BC5RGSnorm,
+        wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
+        wgpu::TextureFormat::BC7RGBAUnorm,  wgpu::TextureFormat::BC7RGBAUnormSrgb};
+
+    static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
+        wgpu::TextureFormat::ETC2RGB8Unorm,   wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+        wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+        wgpu::TextureFormat::ETC2RGBA8Unorm,  wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+        wgpu::TextureFormat::EACR11Unorm,     wgpu::TextureFormat::EACR11Snorm,
+        wgpu::TextureFormat::EACRG11Unorm,    wgpu::TextureFormat::EACRG11Snorm};
+
+    static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
+        wgpu::TextureFormat::ASTC4x4Unorm,   wgpu::TextureFormat::ASTC4x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x4Unorm,   wgpu::TextureFormat::ASTC5x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x5Unorm,   wgpu::TextureFormat::ASTC5x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x5Unorm,   wgpu::TextureFormat::ASTC6x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x6Unorm,   wgpu::TextureFormat::ASTC6x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x5Unorm,   wgpu::TextureFormat::ASTC8x5UnormSrgb,
+        wgpu::TextureFormat::ASTC8x6Unorm,   wgpu::TextureFormat::ASTC8x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x8Unorm,   wgpu::TextureFormat::ASTC8x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x5Unorm,  wgpu::TextureFormat::ASTC10x5UnormSrgb,
+        wgpu::TextureFormat::ASTC10x6Unorm,  wgpu::TextureFormat::ASTC10x6UnormSrgb,
+        wgpu::TextureFormat::ASTC10x8Unorm,  wgpu::TextureFormat::ASTC10x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
+    };
+
+    static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
+        wgpu::TextureFormat::BC1RGBAUnorm,    wgpu::TextureFormat::BC1RGBAUnormSrgb,
+        wgpu::TextureFormat::BC2RGBAUnorm,    wgpu::TextureFormat::BC2RGBAUnormSrgb,
+        wgpu::TextureFormat::BC3RGBAUnorm,    wgpu::TextureFormat::BC3RGBAUnormSrgb,
+        wgpu::TextureFormat::BC4RUnorm,       wgpu::TextureFormat::BC4RSnorm,
+        wgpu::TextureFormat::BC5RGUnorm,      wgpu::TextureFormat::BC5RGSnorm,
+        wgpu::TextureFormat::BC6HRGBUfloat,   wgpu::TextureFormat::BC6HRGBFloat,
+        wgpu::TextureFormat::BC7RGBAUnorm,    wgpu::TextureFormat::BC7RGBAUnormSrgb,
+        wgpu::TextureFormat::ETC2RGB8Unorm,   wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+        wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+        wgpu::TextureFormat::ETC2RGBA8Unorm,  wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+        wgpu::TextureFormat::EACR11Unorm,     wgpu::TextureFormat::EACR11Snorm,
+        wgpu::TextureFormat::EACRG11Unorm,    wgpu::TextureFormat::EACRG11Snorm,
+        wgpu::TextureFormat::ASTC4x4Unorm,    wgpu::TextureFormat::ASTC4x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x4Unorm,    wgpu::TextureFormat::ASTC5x4UnormSrgb,
+        wgpu::TextureFormat::ASTC5x5Unorm,    wgpu::TextureFormat::ASTC5x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x5Unorm,    wgpu::TextureFormat::ASTC6x5UnormSrgb,
+        wgpu::TextureFormat::ASTC6x6Unorm,    wgpu::TextureFormat::ASTC6x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x5Unorm,    wgpu::TextureFormat::ASTC8x5UnormSrgb,
+        wgpu::TextureFormat::ASTC8x6Unorm,    wgpu::TextureFormat::ASTC8x6UnormSrgb,
+        wgpu::TextureFormat::ASTC8x8Unorm,    wgpu::TextureFormat::ASTC8x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x5Unorm,   wgpu::TextureFormat::ASTC10x5UnormSrgb,
+        wgpu::TextureFormat::ASTC10x6Unorm,   wgpu::TextureFormat::ASTC10x6UnormSrgb,
+        wgpu::TextureFormat::ASTC10x8Unorm,   wgpu::TextureFormat::ASTC10x8UnormSrgb,
+        wgpu::TextureFormat::ASTC10x10Unorm,  wgpu::TextureFormat::ASTC10x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x10Unorm,  wgpu::TextureFormat::ASTC12x10UnormSrgb,
+        wgpu::TextureFormat::ASTC12x12Unorm,  wgpu::TextureFormat::ASTC12x12UnormSrgb};
+    static_assert(kCompressedFormats.size() ==
+                      kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
+                  "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
+
+    static constexpr std::array<wgpu::TextureFormat, 6> kDepthFormats = {
+        wgpu::TextureFormat::Depth16Unorm,         wgpu::TextureFormat::Depth32Float,
+        wgpu::TextureFormat::Depth24Plus,          wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth24UnormStencil8, wgpu::TextureFormat::Depth32FloatStencil8,
+    };
+    static constexpr std::array<wgpu::TextureFormat, 4> kStencilFormats = {
+        wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth24UnormStencil8,
+        wgpu::TextureFormat::Depth32FloatStencil8,
+        wgpu::TextureFormat::Stencil8,
+    };
+    static constexpr std::array<wgpu::TextureFormat, 3> kDepthAndStencilFormats = {
+        wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth24UnormStencil8,
+        wgpu::TextureFormat::Depth32FloatStencil8,
+    };
+
+    bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
+
+    bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
+    bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
+    bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
+
+    bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat);
+    bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat);
+
+    bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat);
+    bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat);
+    bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat);
+
+    uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
+    uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
+    uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
+
+    const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat);
+    const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
+    uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat);
+
+    wgpu::TextureDimension ViewDimensionToTextureDimension(
+        const wgpu::TextureViewDimension dimension);
+}  // namespace utils
+
+#endif
diff --git a/src/dawn/utils/Timer.h b/src/dawn/utils/Timer.h
new file mode 100644
index 0000000..86587dd
--- /dev/null
+++ b/src/dawn/utils/Timer.h
@@ -0,0 +1,41 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TIMER_H_
+#define UTILS_TIMER_H_
+
+namespace utils {
+
+    class Timer {
+      public:
+        virtual ~Timer() {
+        }
+
+        // Timer functionality: Use start() and stop() to record the duration and use
+        // getElapsedTime() to query that duration.  If getElapsedTime() is called in between, it
+        // will report the elapsed time since start().
+        virtual void Start() = 0;
+        virtual void Stop() = 0;
+        virtual double GetElapsedTime() const = 0;
+
+        // Timestamp functionality: Use getAbsoluteTime() to get an absolute time with an unknown
+        // origin. This time moves forward regardless of start()/stop().
+        virtual double GetAbsoluteTime() = 0;
+    };
+
+    Timer* CreateTimer();
+
+}  // namespace utils
+
+#endif  // UTILS_TIMER_H_
diff --git a/src/dawn/utils/VulkanBinding.cpp b/src/dawn/utils/VulkanBinding.cpp
new file mode 100644
index 0000000..fc94090
--- /dev/null
+++ b/src/dawn/utils/VulkanBinding.cpp
@@ -0,0 +1,57 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/VulkanBackend.h"
+
+// Include GLFW after VulkanBackend so that it declares the Vulkan-specific functions
+#include "GLFW/glfw3.h"
+
+#include <memory>
+
+namespace utils {
+
+    class VulkanBinding : public BackendBinding {
+      public:
+        VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+        }
+
+        uint64_t GetSwapChainImplementation() override {
+            if (mSwapchainImpl.userData == nullptr) {
+                VkSurfaceKHR surface = VK_NULL_HANDLE;
+                if (glfwCreateWindowSurface(dawn::native::vulkan::GetInstance(mDevice), mWindow,
+                                            nullptr, &surface) != VK_SUCCESS) {
+                    ASSERT(false);
+                }
+
+                mSwapchainImpl = dawn::native::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
+            }
+            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+        }
+        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+            ASSERT(mSwapchainImpl.userData != nullptr);
+            return dawn::native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+        }
+
+      private:
+        DawnSwapChainImplementation mSwapchainImpl = {};
+    };
+
+    BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
+        return new VulkanBinding(window, device);
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/WGPUHelpers.cpp b/src/dawn/utils/WGPUHelpers.cpp
new file mode 100644
index 0000000..537cdcb
--- /dev/null
+++ b/src/dawn/utils/WGPUHelpers.cpp
@@ -0,0 +1,391 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Log.h"
+
+#include "spirv-tools/optimizer.hpp"
+
+#include <cstring>
+#include <iomanip>
+#include <limits>
+#include <mutex>
+#include <sstream>
+
+namespace utils {
+    wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
+        // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
+        // aren't RAII, we don't return directly on success and instead always go through the code
+        // path that destroys the SPIRV-Tools objects.
+        wgpu::ShaderModule result = nullptr;
+
+        spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
+        ASSERT(context != nullptr);
+
+        spv_binary spirv = nullptr;
+        spv_diagnostic diagnostic = nullptr;
+        if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
+            ASSERT(spirv != nullptr);
+            ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
+
+            wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
+            spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
+            spirvDesc.code = spirv->code;
+
+            wgpu::ShaderModuleDescriptor descriptor;
+            descriptor.nextInChain = &spirvDesc;
+            result = device.CreateShaderModule(&descriptor);
+        } else {
+            ASSERT(diagnostic != nullptr);
+            dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
+                               << diagnostic->position.line + 1 << ":"
+                               << diagnostic->position.column + 1 << ": " << diagnostic->error;
+        }
+
+        spvDiagnosticDestroy(diagnostic);
+        spvBinaryDestroy(spirv);
+        spvContextDestroy(context);
+
+        return result;
+    }
+
+    wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
+        wgpu::ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = source;
+        wgpu::ShaderModuleDescriptor descriptor;
+        descriptor.nextInChain = &wgslDesc;
+        return device.CreateShaderModule(&descriptor);
+    }
+
+    wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+                                      const void* data,
+                                      uint64_t size,
+                                      wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
+        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+        device.GetQueue().WriteBuffer(buffer, 0, data, size);
+        return buffer;
+    }
+
+    ComboRenderPassDescriptor::ComboRenderPassDescriptor(
+        std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+        wgpu::TextureView depthStencil) {
+        for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+            cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+            cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
+            cColorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
+        }
+
+        cDepthStencilAttachmentInfo.depthClearValue = 1.0f;
+        cDepthStencilAttachmentInfo.stencilClearValue = 0;
+        cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+        cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+        colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
+        uint32_t colorAttachmentIndex = 0;
+        for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
+            if (colorAttachment.Get() != nullptr) {
+                cColorAttachments[colorAttachmentIndex].view = colorAttachment;
+            }
+            ++colorAttachmentIndex;
+        }
+        colorAttachments = cColorAttachments.data();
+
+        if (depthStencil.Get() != nullptr) {
+            cDepthStencilAttachmentInfo.view = depthStencil;
+            depthStencilAttachment = &cDepthStencilAttachmentInfo;
+        } else {
+            depthStencilAttachment = nullptr;
+        }
+    }
+
+    ComboRenderPassDescriptor::ComboRenderPassDescriptor(const ComboRenderPassDescriptor& other) {
+        *this = other;
+    }
+
+    const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
+        const ComboRenderPassDescriptor& otherRenderPass) {
+        cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
+        cColorAttachments = otherRenderPass.cColorAttachments;
+        colorAttachmentCount = otherRenderPass.colorAttachmentCount;
+
+        colorAttachments = cColorAttachments.data();
+
+        if (otherRenderPass.depthStencilAttachment != nullptr) {
+            // Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
+            depthStencilAttachment = &cDepthStencilAttachmentInfo;
+        } else {
+            depthStencilAttachment = nullptr;
+        }
+
+        return *this;
+    }
+    void ComboRenderPassDescriptor::UnsetDepthStencilLoadStoreOpsForFormat(
+        wgpu::TextureFormat format) {
+        switch (format) {
+            case wgpu::TextureFormat::Depth24Plus:
+            case wgpu::TextureFormat::Depth32Float:
+            case wgpu::TextureFormat::Depth16Unorm:
+                cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+                cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+                break;
+            case wgpu::TextureFormat::Stencil8:
+                cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+                cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+                break;
+            default:
+                break;
+        }
+    }
+
+    BasicRenderPass::BasicRenderPass()
+        : width(0),
+          height(0),
+          color(nullptr),
+          colorFormat(wgpu::TextureFormat::RGBA8Unorm),
+          renderPassInfo({}) {
+    }
+
+    BasicRenderPass::BasicRenderPass(uint32_t texWidth,
+                                     uint32_t texHeight,
+                                     wgpu::Texture colorAttachment,
+                                     wgpu::TextureFormat textureFormat)
+        : width(texWidth),
+          height(texHeight),
+          color(colorAttachment),
+          colorFormat(textureFormat),
+          renderPassInfo({colorAttachment.CreateView()}) {
+    }
+
+    BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
+                                          uint32_t width,
+                                          uint32_t height,
+                                          wgpu::TextureFormat format) {
+        DAWN_ASSERT(width > 0 && height > 0);
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = format;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture color = device.CreateTexture(&descriptor);
+
+        return BasicRenderPass(width, height, color);
+    }
+
+    wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+                                                uint64_t offset,
+                                                uint32_t bytesPerRow,
+                                                uint32_t rowsPerImage) {
+        wgpu::ImageCopyBuffer imageCopyBuffer = {};
+        imageCopyBuffer.buffer = buffer;
+        imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
+
+        return imageCopyBuffer;
+    }
+
+    wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
+                                                  uint32_t mipLevel,
+                                                  wgpu::Origin3D origin,
+                                                  wgpu::TextureAspect aspect) {
+        wgpu::ImageCopyTexture imageCopyTexture;
+        imageCopyTexture.texture = texture;
+        imageCopyTexture.mipLevel = mipLevel;
+        imageCopyTexture.origin = origin;
+        imageCopyTexture.aspect = aspect;
+
+        return imageCopyTexture;
+    }
+
+    wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
+                                                    uint32_t bytesPerRow,
+                                                    uint32_t rowsPerImage) {
+        wgpu::TextureDataLayout textureDataLayout;
+        textureDataLayout.offset = offset;
+        textureDataLayout.bytesPerRow = bytesPerRow;
+        textureDataLayout.rowsPerImage = rowsPerImage;
+
+        return textureDataLayout;
+    }
+
+    wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+                                                 const wgpu::BindGroupLayout* bindGroupLayout) {
+        wgpu::PipelineLayoutDescriptor descriptor;
+        if (bindGroupLayout != nullptr) {
+            descriptor.bindGroupLayoutCount = 1;
+            descriptor.bindGroupLayouts = bindGroupLayout;
+        } else {
+            descriptor.bindGroupLayoutCount = 0;
+            descriptor.bindGroupLayouts = nullptr;
+        }
+        return device.CreatePipelineLayout(&descriptor);
+    }
+
+    wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+                                            std::vector<wgpu::BindGroupLayout> bgls) {
+        wgpu::PipelineLayoutDescriptor descriptor;
+        descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
+        descriptor.bindGroupLayouts = bgls.data();
+        return device.CreatePipelineLayout(&descriptor);
+    }
+
+    wgpu::BindGroupLayout MakeBindGroupLayout(
+        const wgpu::Device& device,
+        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
+        std::vector<wgpu::BindGroupLayoutEntry> entries;
+        for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+            entries.push_back(entry);
+        }
+
+        wgpu::BindGroupLayoutDescriptor descriptor;
+        descriptor.entryCount = static_cast<uint32_t>(entries.size());
+        descriptor.entries = entries.data();
+        return device.CreateBindGroupLayout(&descriptor);
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::BufferBindingType bufferType,
+        bool bufferHasDynamicOffset,
+        uint64_t bufferMinBindingSize) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        buffer.type = bufferType;
+        buffer.hasDynamicOffset = bufferHasDynamicOffset;
+        buffer.minBindingSize = bufferMinBindingSize;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::SamplerBindingType samplerType) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        sampler.type = samplerType;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::TextureSampleType textureSampleType,
+        wgpu::TextureViewDimension textureViewDimension,
+        bool textureMultisampled) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        texture.sampleType = textureSampleType;
+        texture.viewDimension = textureViewDimension;
+        texture.multisampled = textureMultisampled;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::StorageTextureAccess storageTextureAccess,
+        wgpu::TextureFormat format,
+        wgpu::TextureViewDimension textureViewDimension) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        storageTexture.access = storageTextureAccess;
+        storageTexture.format = format;
+        storageTexture.viewDimension = textureViewDimension;
+    }
+
+    // ExternalTextureBindingLayout never contains data, so just make one that can be reused instead
+    // of declaring a new one every time it's needed.
+    wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout = {};
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::ExternalTextureBindingLayout* bindingLayout) {
+        binding = entryBinding;
+        visibility = entryVisibility;
+        nextInChain = bindingLayout;
+    }
+
+    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+        const wgpu::BindGroupLayoutEntry& entry)
+        : wgpu::BindGroupLayoutEntry(entry) {
+    }
+
+    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                             const wgpu::Sampler& sampler)
+        : binding(binding), sampler(sampler) {
+    }
+
+    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                             const wgpu::TextureView& textureView)
+        : binding(binding), textureView(textureView) {
+    }
+
+    BindingInitializationHelper::BindingInitializationHelper(
+        uint32_t binding,
+        const wgpu::ExternalTexture& externalTexture)
+        : binding(binding) {
+        externalTextureBindingEntry.externalTexture = externalTexture;
+    }
+
+    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                             const wgpu::Buffer& buffer,
+                                                             uint64_t offset,
+                                                             uint64_t size)
+        : binding(binding), buffer(buffer), offset(offset), size(size) {
+    }
+
+    wgpu::BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+        wgpu::BindGroupEntry result;
+
+        result.binding = binding;
+        result.sampler = sampler;
+        result.textureView = textureView;
+        result.buffer = buffer;
+        result.offset = offset;
+        result.size = size;
+        if (externalTextureBindingEntry.externalTexture != nullptr) {
+            result.nextInChain = &externalTextureBindingEntry;
+        }
+
+        return result;
+    }
+
+    wgpu::BindGroup MakeBindGroup(
+        const wgpu::Device& device,
+        const wgpu::BindGroupLayout& layout,
+        std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+        std::vector<wgpu::BindGroupEntry> entries;
+        for (const BindingInitializationHelper& helper : entriesInitializer) {
+            entries.push_back(helper.GetAsBinding());
+        }
+
+        wgpu::BindGroupDescriptor descriptor;
+        descriptor.layout = layout;
+        descriptor.entryCount = entries.size();
+        descriptor.entries = entries.data();
+
+        return device.CreateBindGroup(&descriptor);
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/WGPUHelpers.h b/src/dawn/utils/WGPUHelpers.h
new file mode 100644
index 0000000..bfffae2
--- /dev/null
+++ b/src/dawn/utils/WGPUHelpers.h
@@ -0,0 +1,182 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_DAWNHELPERS_H_
+#define UTILS_DAWNHELPERS_H_
+
+#include <dawn/webgpu_cpp.h>
+
+#include <array>
+#include <initializer_list>
+#include <vector>
+
+#include "dawn/common/Constants.h"
+#include "dawn/utils/TextureUtils.h"
+
+namespace utils {
+
+    enum Expectation { Success, Failure };
+
+    wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
+    wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
+
+    wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+                                      const void* data,
+                                      uint64_t size,
+                                      wgpu::BufferUsage usage);
+
+    template <typename T>
+    wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+                                      wgpu::BufferUsage usage,
+                                      std::initializer_list<T> data) {
+        return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
+    }
+
+    wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+                                                uint64_t offset,
+                                                uint32_t bytesPerRow,
+                                                uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+    wgpu::ImageCopyTexture CreateImageCopyTexture(
+        wgpu::Texture texture,
+        uint32_t level,
+        wgpu::Origin3D origin,
+        wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
+    wgpu::TextureDataLayout CreateTextureDataLayout(
+        uint64_t offset,
+        uint32_t bytesPerRow,
+        uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+
+    struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
+      public:
+        ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+                                  wgpu::TextureView depthStencil = wgpu::TextureView());
+
+        ComboRenderPassDescriptor(const ComboRenderPassDescriptor& otherRenderPass);
+        const ComboRenderPassDescriptor& operator=(
+            const ComboRenderPassDescriptor& otherRenderPass);
+
+        void UnsetDepthStencilLoadStoreOpsForFormat(wgpu::TextureFormat format);
+
+        std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
+        wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
+    };
+
+    struct BasicRenderPass {
+      public:
+        BasicRenderPass();
+        BasicRenderPass(uint32_t width,
+                        uint32_t height,
+                        wgpu::Texture color,
+                        wgpu::TextureFormat texture = kDefaultColorFormat);
+
+        static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+        uint32_t width;
+        uint32_t height;
+        wgpu::Texture color;
+        wgpu::TextureFormat colorFormat;
+        utils::ComboRenderPassDescriptor renderPassInfo;
+    };
+    BasicRenderPass CreateBasicRenderPass(
+        const wgpu::Device& device,
+        uint32_t width,
+        uint32_t height,
+        wgpu::TextureFormat format = BasicRenderPass::kDefaultColorFormat);
+
+    wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+                                                 const wgpu::BindGroupLayout* bindGroupLayout);
+
+    wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+                                            std::vector<wgpu::BindGroupLayout> bgls);
+
+    extern wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout;
+
+    // Helpers to make creating bind group layouts look nicer:
+    //
+    //   utils::MakeBindGroupLayout(device, {
+    //       {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+    //       {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+    //       {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+    //   });
+
+    struct BindingLayoutEntryInitializationHelper : wgpu::BindGroupLayoutEntry {
+        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                               wgpu::ShaderStage entryVisibility,
+                                               wgpu::BufferBindingType bufferType,
+                                               bool bufferHasDynamicOffset = false,
+                                               uint64_t bufferMinBindingSize = 0);
+        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                               wgpu::ShaderStage entryVisibility,
+                                               wgpu::SamplerBindingType samplerType);
+        BindingLayoutEntryInitializationHelper(
+            uint32_t entryBinding,
+            wgpu::ShaderStage entryVisibility,
+            wgpu::TextureSampleType textureSampleType,
+            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+            bool textureMultisampled = false);
+        BindingLayoutEntryInitializationHelper(
+            uint32_t entryBinding,
+            wgpu::ShaderStage entryVisibility,
+            wgpu::StorageTextureAccess storageTextureAccess,
+            wgpu::TextureFormat format,
+            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                               wgpu::ShaderStage entryVisibility,
+                                               wgpu::ExternalTextureBindingLayout* bindingLayout);
+
+        BindingLayoutEntryInitializationHelper(const wgpu::BindGroupLayoutEntry& entry);
+    };
+
+    wgpu::BindGroupLayout MakeBindGroupLayout(
+        const wgpu::Device& device,
+        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer);
+
+    // Helpers to make creating bind groups look nicer:
+    //
+    //   utils::MakeBindGroup(device, layout, {
+    //       {0, mySampler},
+    //       {1, myBuffer, offset, size},
+    //       {3, myTextureView}
+    //   });
+
+    // Structure with one constructor per-type of bindings, so that the initializer_list accepts
+    // bindings with the right type and no extra information.
+    struct BindingInitializationHelper {
+        BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
+        BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
+        BindingInitializationHelper(uint32_t binding, const wgpu::ExternalTexture& externalTexture);
+        BindingInitializationHelper(uint32_t binding,
+                                    const wgpu::Buffer& buffer,
+                                    uint64_t offset = 0,
+                                    uint64_t size = wgpu::kWholeSize);
+
+        wgpu::BindGroupEntry GetAsBinding() const;
+
+        uint32_t binding;
+        wgpu::Sampler sampler;
+        wgpu::TextureView textureView;
+        wgpu::Buffer buffer;
+        wgpu::ExternalTextureBindingEntry externalTextureBindingEntry;
+        uint64_t offset = 0;
+        uint64_t size = 0;
+    };
+
+    wgpu::BindGroup MakeBindGroup(
+        const wgpu::Device& device,
+        const wgpu::BindGroupLayout& layout,
+        std::initializer_list<BindingInitializationHelper> entriesInitializer);
+
+}  // namespace utils
+
+#endif  // UTILS_DAWNHELPERS_H_
diff --git a/src/dawn/utils/WindowsDebugLogger.cpp b/src/dawn/utils/WindowsDebugLogger.cpp
new file mode 100644
index 0000000..159c71a
--- /dev/null
+++ b/src/dawn/utils/WindowsDebugLogger.cpp
@@ -0,0 +1,111 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/PlatformDebugLogger.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/windows_with_undefs.h"
+
+#include <array>
+#include <thread>
+
+namespace utils {
+
+    class WindowsDebugLogger : public PlatformDebugLogger {
+      public:
+        WindowsDebugLogger() : PlatformDebugLogger() {
+            if (IsDebuggerPresent()) {
+                // This condition is true when running inside Visual Studio or some other debugger.
+                // Messages are already printed there so we don't need to do anything.
+                return;
+            }
+
+            mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
+            ASSERT(mShouldExitHandle != nullptr);
+
+            mThread = std::thread(
+                [](HANDLE shouldExit) {
+                    // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
+                    // for the layout of this struct.
+                    struct {
+                        DWORD process_id;
+                        char data[4096 - sizeof(DWORD)];
+                    }* dbWinBuffer = nullptr;
+
+                    HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE,
+                                                     0, sizeof(*dbWinBuffer), "DBWIN_BUFFER");
+                    ASSERT(file != nullptr);
+                    ASSERT(file != INVALID_HANDLE_VALUE);
+
+                    dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
+                        MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
+                    ASSERT(dbWinBuffer != nullptr);
+
+                    HANDLE dbWinBufferReady =
+                        CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
+                    ASSERT(dbWinBufferReady != nullptr);
+
+                    HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
+                    ASSERT(dbWinDataReady != nullptr);
+
+                    std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
+                    while (true) {
+                        SetEvent(dbWinBufferReady);
+                        DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
+                                                            FALSE, INFINITE);
+                        if (wait == WAIT_OBJECT_0) {
+                            break;
+                        }
+                        ASSERT(wait == WAIT_OBJECT_0 + 1);
+                        fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
+                                dbWinBuffer->data);
+                        fflush(stderr);
+                    }
+
+                    CloseHandle(dbWinDataReady);
+                    CloseHandle(dbWinBufferReady);
+                    UnmapViewOfFile(dbWinBuffer);
+                    CloseHandle(file);
+                },
+                mShouldExitHandle);
+        }
+
+        ~WindowsDebugLogger() override {
+            if (IsDebuggerPresent()) {
+                // This condition is true when running inside Visual Studio or some other debugger.
+                // Messages are already printed there so we don't need to do anything.
+                return;
+            }
+
+            if (mShouldExitHandle != nullptr) {
+                BOOL result = SetEvent(mShouldExitHandle);
+                ASSERT(result != 0);
+                CloseHandle(mShouldExitHandle);
+            }
+
+            if (mThread.joinable()) {
+                mThread.join();
+            }
+        }
+
+      private:
+        std::thread mThread;
+        HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
+    };
+
+    PlatformDebugLogger* CreatePlatformDebugLogger() {
+        return new WindowsDebugLogger();
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/WindowsTimer.cpp b/src/dawn/utils/WindowsTimer.cpp
new file mode 100644
index 0000000..ca165d0
--- /dev/null
+++ b/src/dawn/utils/WindowsTimer.cpp
@@ -0,0 +1,89 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/Timer.h"
+
+#include <windows.h>
+
+namespace utils {
+
+    class WindowsTimer : public Timer {
+      public:
+        WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {
+        }
+
+        ~WindowsTimer() override = default;
+
+        void Start() override {
+            LARGE_INTEGER curTime;
+            QueryPerformanceCounter(&curTime);
+            mStartTime = curTime.QuadPart;
+
+            // Cache the frequency
+            GetFrequency();
+
+            mRunning = true;
+        }
+
+        void Stop() override {
+            LARGE_INTEGER curTime;
+            QueryPerformanceCounter(&curTime);
+            mStopTime = curTime.QuadPart;
+
+            mRunning = false;
+        }
+
+        double GetElapsedTime() const override {
+            LONGLONG endTime;
+            if (mRunning) {
+                LARGE_INTEGER curTime;
+                QueryPerformanceCounter(&curTime);
+                endTime = curTime.QuadPart;
+            } else {
+                endTime = mStopTime;
+            }
+
+            return static_cast<double>(endTime - mStartTime) / mFrequency;
+        }
+
+        double GetAbsoluteTime() override {
+            LARGE_INTEGER curTime;
+            QueryPerformanceCounter(&curTime);
+
+            return static_cast<double>(curTime.QuadPart) / GetFrequency();
+        }
+
+      private:
+        LONGLONG GetFrequency() {
+            if (mFrequency == 0) {
+                LARGE_INTEGER frequency = {};
+                QueryPerformanceFrequency(&frequency);
+
+                mFrequency = frequency.QuadPart;
+            }
+
+            return mFrequency;
+        }
+
+        bool mRunning;
+        LONGLONG mStartTime;
+        LONGLONG mStopTime;
+        LONGLONG mFrequency;
+    };
+
+    Timer* CreateTimer() {
+        return new WindowsTimer();
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/WireHelper.cpp b/src/dawn/utils/WireHelper.cpp
new file mode 100644
index 0000000..73eed81
--- /dev/null
+++ b/src/dawn/utils/WireHelper.cpp
@@ -0,0 +1,178 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/WireHelper.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/utils/TerribleCommandBuffer.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+#include <algorithm>
+#include <cstring>
+#include <fstream>
+#include <iomanip>
+#include <set>
+#include <sstream>
+
+namespace utils {
+
+    namespace {
+
+        class WireServerTraceLayer : public dawn::wire::CommandHandler {
+          public:
+            WireServerTraceLayer(const char* dir, dawn::wire::CommandHandler* handler)
+                : dawn::wire::CommandHandler(), mDir(dir), mHandler(handler) {
+                const char* sep = GetPathSeparator();
+                if (mDir.size() > 0 && mDir.back() != *sep) {
+                    mDir += sep;
+                }
+            }
+
+            void BeginWireTrace(const char* name) {
+                std::string filename = name;
+                // Replace slashes in gtest names with underscores so everything is in one
+                // directory.
+                std::replace(filename.begin(), filename.end(), '/', '_');
+                std::replace(filename.begin(), filename.end(), '\\', '_');
+
+                // Prepend the filename with the directory.
+                filename = mDir + filename;
+
+                ASSERT(!mFile.is_open());
+                mFile.open(filename,
+                           std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
+
+                // Write the initial 8 bytes. This means the fuzzer should never inject an
+                // error.
+                const uint64_t injectedErrorIndex = 0xFFFF'FFFF'FFFF'FFFF;
+                mFile.write(reinterpret_cast<const char*>(&injectedErrorIndex),
+                            sizeof(injectedErrorIndex));
+            }
+
+            const volatile char* HandleCommands(const volatile char* commands,
+                                                size_t size) override {
+                if (mFile.is_open()) {
+                    mFile.write(const_cast<const char*>(commands), size);
+                }
+                return mHandler->HandleCommands(commands, size);
+            }
+
+          private:
+            std::string mDir;
+            dawn::wire::CommandHandler* mHandler;
+            std::ofstream mFile;
+        };
+
+        class WireHelperDirect : public WireHelper {
+          public:
+            WireHelperDirect() {
+                dawnProcSetProcs(&dawn::native::GetProcs());
+            }
+
+            std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
+                ASSERT(backendDevice != nullptr);
+                return std::make_pair(wgpu::Device::Acquire(backendDevice), backendDevice);
+            }
+
+            void BeginWireTrace(const char* name) override {
+            }
+
+            bool FlushClient() override {
+                return true;
+            }
+
+            bool FlushServer() override {
+                return true;
+            }
+        };
+
+        class WireHelperProxy : public WireHelper {
+          public:
+            explicit WireHelperProxy(const char* wireTraceDir) {
+                mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
+                mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
+
+                dawn::wire::WireServerDescriptor serverDesc = {};
+                serverDesc.procs = &dawn::native::GetProcs();
+                serverDesc.serializer = mS2cBuf.get();
+
+                mWireServer.reset(new dawn::wire::WireServer(serverDesc));
+                mC2sBuf->SetHandler(mWireServer.get());
+
+                if (wireTraceDir != nullptr && strlen(wireTraceDir) > 0) {
+                    mWireServerTraceLayer.reset(
+                        new WireServerTraceLayer(wireTraceDir, mWireServer.get()));
+                    mC2sBuf->SetHandler(mWireServerTraceLayer.get());
+                }
+
+                dawn::wire::WireClientDescriptor clientDesc = {};
+                clientDesc.serializer = mC2sBuf.get();
+
+                mWireClient.reset(new dawn::wire::WireClient(clientDesc));
+                mS2cBuf->SetHandler(mWireClient.get());
+                dawnProcSetProcs(&dawn::wire::client::GetProcs());
+            }
+
+            std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
+                ASSERT(backendDevice != nullptr);
+
+                auto reservation = mWireClient->ReserveDevice();
+                mWireServer->InjectDevice(backendDevice, reservation.id, reservation.generation);
+                dawn::native::GetProcs().deviceRelease(backendDevice);
+
+                return std::make_pair(wgpu::Device::Acquire(reservation.device), backendDevice);
+            }
+
+            void BeginWireTrace(const char* name) override {
+                if (mWireServerTraceLayer) {
+                    return mWireServerTraceLayer->BeginWireTrace(name);
+                }
+            }
+
+            bool FlushClient() override {
+                return mC2sBuf->Flush();
+            }
+
+            bool FlushServer() override {
+                return mS2cBuf->Flush();
+            }
+
+          private:
+            std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+            std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
+            std::unique_ptr<WireServerTraceLayer> mWireServerTraceLayer;
+            std::unique_ptr<dawn::wire::WireServer> mWireServer;
+            std::unique_ptr<dawn::wire::WireClient> mWireClient;
+        };
+
+    }  // anonymous namespace
+
+    std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir) {
+        if (useWire) {
+            return std::unique_ptr<WireHelper>(new WireHelperProxy(wireTraceDir));
+        } else {
+            return std::unique_ptr<WireHelper>(new WireHelperDirect());
+        }
+    }
+
+    WireHelper::~WireHelper() {
+        dawnProcSetProcs(nullptr);
+    }
+
+}  // namespace utils
diff --git a/src/dawn/utils/WireHelper.h b/src/dawn/utils/WireHelper.h
new file mode 100644
index 0000000..78aa802
--- /dev/null
+++ b/src/dawn/utils/WireHelper.h
@@ -0,0 +1,44 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_WIREHELPER_H_
+#define UTILS_WIREHELPER_H_
+
+#include "dawn/webgpu_cpp.h"
+
+#include <cstdint>
+#include <memory>
+
+namespace utils {
+
+    class WireHelper {
+      public:
+        virtual ~WireHelper();
+
+        // Registers the device on the wire, if present.
+        // Returns a pair of the client device and backend device.
+        // The function should take ownership of |backendDevice|.
+        virtual std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) = 0;
+
+        virtual void BeginWireTrace(const char* name) = 0;
+
+        virtual bool FlushClient() = 0;
+        virtual bool FlushServer() = 0;
+    };
+
+    std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir = nullptr);
+
+}  // namespace utils
+
+#endif  // UTILS_WIREHELPER_H_
diff --git a/src/dawn/wire/BUILD.gn b/src/dawn/wire/BUILD.gn
new file mode 100644
index 0000000..bff2136
--- /dev/null
+++ b/src/dawn/wire/BUILD.gn
@@ -0,0 +1,111 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_component.gni")
+
+# Public dawn wire headers so they can be publically visible for dependencies of
+# dawn/wire
+source_set("headers") {
+  public_deps = [ "${dawn_root}/include/dawn:headers" ]
+  all_dependent_configs = [ "${dawn_root}/include/dawn:public" ]
+  sources = [
+    "${dawn_root}/include/dawn/wire/Wire.h",
+    "${dawn_root}/include/dawn/wire/WireClient.h",
+    "${dawn_root}/include/dawn/wire/WireServer.h",
+    "${dawn_root}/include/dawn/wire/dawn_wire_export.h",
+  ]
+}
+
+dawn_json_generator("gen") {
+  target = "wire"
+  outputs = [
+    "src/dawn/wire/ObjectType_autogen.h",
+    "src/dawn/wire/WireCmd_autogen.h",
+    "src/dawn/wire/WireCmd_autogen.cpp",
+    "src/dawn/wire/client/ApiObjects_autogen.h",
+    "src/dawn/wire/client/ApiProcs_autogen.cpp",
+    "src/dawn/wire/client/ClientBase_autogen.h",
+    "src/dawn/wire/client/ClientHandlers_autogen.cpp",
+    "src/dawn/wire/client/ClientPrototypes_autogen.inc",
+    "src/dawn/wire/server/ServerBase_autogen.h",
+    "src/dawn/wire/server/ServerDoers_autogen.cpp",
+    "src/dawn/wire/server/ServerHandlers_autogen.cpp",
+    "src/dawn/wire/server/ServerPrototypes_autogen.inc",
+  ]
+}
+
+dawn_component("wire") {
+  DEFINE_PREFIX = "DAWN_WIRE"
+
+  deps = [
+    ":gen",
+    "${dawn_root}/src/dawn/common",
+  ]
+
+  configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+  sources = get_target_outputs(":gen")
+  sources += [
+    "BufferConsumer.h",
+    "BufferConsumer_impl.h",
+    "ChunkedCommandHandler.cpp",
+    "ChunkedCommandHandler.h",
+    "ChunkedCommandSerializer.cpp",
+    "ChunkedCommandSerializer.h",
+    "SupportedFeatures.cpp",
+    "SupportedFeatures.h",
+    "Wire.cpp",
+    "WireClient.cpp",
+    "WireDeserializeAllocator.cpp",
+    "WireDeserializeAllocator.h",
+    "WireResult.h",
+    "WireServer.cpp",
+    "client/Adapter.cpp",
+    "client/Adapter.h",
+    "client/ApiObjects.h",
+    "client/Buffer.cpp",
+    "client/Buffer.h",
+    "client/Client.cpp",
+    "client/Client.h",
+    "client/ClientDoers.cpp",
+    "client/ClientInlineMemoryTransferService.cpp",
+    "client/Device.cpp",
+    "client/Device.h",
+    "client/Instance.cpp",
+    "client/Instance.h",
+    "client/LimitsAndFeatures.cpp",
+    "client/LimitsAndFeatures.h",
+    "client/ObjectAllocator.h",
+    "client/Queue.cpp",
+    "client/Queue.h",
+    "client/RequestTracker.h",
+    "client/ShaderModule.cpp",
+    "client/ShaderModule.h",
+    "server/ObjectStorage.h",
+    "server/Server.cpp",
+    "server/Server.h",
+    "server/ServerAdapter.cpp",
+    "server/ServerBuffer.cpp",
+    "server/ServerDevice.cpp",
+    "server/ServerInlineMemoryTransferService.cpp",
+    "server/ServerInstance.cpp",
+    "server/ServerQueue.cpp",
+    "server/ServerShaderModule.cpp",
+  ]
+
+  # Make headers publicly visible
+  public_deps = [ ":headers" ]
+}
diff --git a/src/dawn/wire/BufferConsumer.h b/src/dawn/wire/BufferConsumer.h
new file mode 100644
index 0000000..b11a68a
--- /dev/null
+++ b/src/dawn/wire/BufferConsumer.h
@@ -0,0 +1,85 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_BUFFERCONSUMER_H_
+#define DAWNWIRE_BUFFERCONSUMER_H_
+
+#include "dawn/wire/WireResult.h"
+
+#include <cstddef>
+
+namespace dawn::wire {
+
+    // BufferConsumer is a utility class that allows reading bytes from a buffer
+    // while simultaneously decrementing the amount of remaining space by exactly
+    // the amount read. It helps prevent bugs where incrementing a pointer and
+    // decrementing a size value are not kept in sync.
+    // BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
+    template <typename BufferT>
+    class BufferConsumer {
+        static_assert(sizeof(BufferT) == 1,
+                      "BufferT must be 1-byte, but may have const/volatile qualifiers.");
+
+      public:
+        BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {
+        }
+
+        BufferT* Buffer() const {
+            return mBuffer;
+        }
+        size_t AvailableSize() const {
+            return mSize;
+        }
+
+      protected:
+        template <typename T, typename N>
+        WireResult NextN(N count, T** data);
+
+        template <typename T>
+        WireResult Next(T** data);
+
+        template <typename T>
+        WireResult Peek(T** data);
+
+      private:
+        BufferT* mBuffer;
+        size_t mSize;
+    };
+
+    class SerializeBuffer : public BufferConsumer<char> {
+      public:
+        using BufferConsumer::BufferConsumer;
+        using BufferConsumer::Next;
+        using BufferConsumer::NextN;
+    };
+
+    class DeserializeBuffer : public BufferConsumer<const volatile char> {
+      public:
+        using BufferConsumer::BufferConsumer;
+        using BufferConsumer::Peek;
+
+        template <typename T, typename N>
+        WireResult ReadN(N count, const volatile T** data) {
+            return NextN(count, data);
+        }
+
+        template <typename T>
+        WireResult Read(const volatile T** data) {
+            return Next(data);
+        }
+    };
+
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_BUFFERCONSUMER_H_
diff --git a/src/dawn/wire/BufferConsumer_impl.h b/src/dawn/wire/BufferConsumer_impl.h
new file mode 100644
index 0000000..eef5d72
--- /dev/null
+++ b/src/dawn/wire/BufferConsumer_impl.h
@@ -0,0 +1,73 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_BUFFERCONSUMER_IMPL_H_
+#define DAWNWIRE_BUFFERCONSUMER_IMPL_H_
+
+#include "dawn/wire/BufferConsumer.h"
+
+#include <limits>
+#include <type_traits>
+
+namespace dawn::wire {
+
+    template <typename BufferT>
+    template <typename T>
+    WireResult BufferConsumer<BufferT>::Peek(T** data) {
+        if (sizeof(T) > mSize) {
+            return WireResult::FatalError;
+        }
+
+        *data = reinterpret_cast<T*>(mBuffer);
+        return WireResult::Success;
+    }
+
+    template <typename BufferT>
+    template <typename T>
+    WireResult BufferConsumer<BufferT>::Next(T** data) {
+        if (sizeof(T) > mSize) {
+            return WireResult::FatalError;
+        }
+
+        *data = reinterpret_cast<T*>(mBuffer);
+        mBuffer += sizeof(T);
+        mSize -= sizeof(T);
+        return WireResult::Success;
+    }
+
+    template <typename BufferT>
+    template <typename T, typename N>
+    WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
+        static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
+
+        constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
+        if (count > kMaxCountWithoutOverflows) {
+            return WireResult::FatalError;
+        }
+
+        // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
+        size_t totalSize = sizeof(T) * count;
+        if (totalSize > mSize) {
+            return WireResult::FatalError;
+        }
+
+        *data = reinterpret_cast<T*>(mBuffer);
+        mBuffer += totalSize;
+        mSize -= totalSize;
+        return WireResult::Success;
+    }
+
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_BUFFERCONSUMER_IMPL_H_
diff --git a/src/dawn/wire/CMakeLists.txt b/src/dawn/wire/CMakeLists.txt
new file mode 100644
index 0000000..e490a4d
--- /dev/null
+++ b/src/dawn/wire/CMakeLists.txt
@@ -0,0 +1,83 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DawnJSONGenerator(
+    TARGET "wire"
+    PRINT_NAME "Dawn wire"
+    RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES"
+)
+
+add_library(dawn_wire ${DAWN_DUMMY_FILE})
+
+target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+    target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_SHARED_LIBRARY")
+endif()
+
+target_sources(dawn_wire PRIVATE
+    "${DAWN_INCLUDE_DIR}/dawn/wire/Wire.h"
+    "${DAWN_INCLUDE_DIR}/dawn/wire/WireClient.h"
+    "${DAWN_INCLUDE_DIR}/dawn/wire/WireServer.h"
+    "${DAWN_INCLUDE_DIR}/dawn/wire/dawn_wire_export.h"
+    ${DAWN_WIRE_GEN_SOURCES}
+    "BufferConsumer.h"
+    "BufferConsumer_impl.h"
+    "ChunkedCommandHandler.cpp"
+    "ChunkedCommandHandler.h"
+    "ChunkedCommandSerializer.cpp"
+    "ChunkedCommandSerializer.h"
+    "SupportedFeatures.cpp"
+    "SupportedFeatures.h"
+    "Wire.cpp"
+    "WireClient.cpp"
+    "WireDeserializeAllocator.cpp"
+    "WireDeserializeAllocator.h"
+    "WireResult.h"
+    "WireServer.cpp"
+    "client/Adapter.cpp"
+    "client/Adapter.h"
+    "client/ApiObjects.h"
+    "client/Buffer.cpp"
+    "client/Buffer.h"
+    "client/Client.cpp"
+    "client/Client.h"
+    "client/ClientDoers.cpp"
+    "client/ClientInlineMemoryTransferService.cpp"
+    "client/Device.cpp"
+    "client/Device.h"
+    "client/Instance.cpp"
+    "client/Instance.h"
+    "client/LimitsAndFeatures.cpp"
+    "client/LimitsAndFeatures.h"
+    "client/ObjectAllocator.h"
+    "client/Queue.cpp"
+    "client/Queue.h"
+    "client/RequestTracker.h"
+    "client/ShaderModule.cpp"
+    "client/ShaderModule.h"
+    "server/ObjectStorage.h"
+    "server/Server.cpp"
+    "server/Server.h"
+    "server/ServerAdapter.cpp"
+    "server/ServerBuffer.cpp"
+    "server/ServerDevice.cpp"
+    "server/ServerInlineMemoryTransferService.cpp"
+    "server/ServerInstance.cpp"
+    "server/ServerQueue.cpp"
+    "server/ServerShaderModule.cpp"
+)
+target_link_libraries(dawn_wire
+    PUBLIC dawn_headers
+    PRIVATE dawn_common dawn_internal_config
+)
diff --git a/src/dawn/wire/ChunkedCommandHandler.cpp b/src/dawn/wire/ChunkedCommandHandler.cpp
new file mode 100644
index 0000000..8113686
--- /dev/null
+++ b/src/dawn/wire/ChunkedCommandHandler.cpp
@@ -0,0 +1,79 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/ChunkedCommandHandler.h"
+
+#include "dawn/common/Alloc.h"
+
+#include <algorithm>
+#include <cstring>
+
+namespace dawn::wire {
+
+    ChunkedCommandHandler::~ChunkedCommandHandler() = default;
+
+    const volatile char* ChunkedCommandHandler::HandleCommands(const volatile char* commands,
+                                                               size_t size) {
+        if (mChunkedCommandRemainingSize > 0) {
+            // If there is a chunked command in flight, append the command data.
+            // We append at most |mChunkedCommandRemainingSize| which is enough to finish the
+            // in-flight chunked command, and then pass the rest along to a second call to
+            // |HandleCommandsImpl|.
+            size_t chunkSize = std::min(size, mChunkedCommandRemainingSize);
+
+            memcpy(mChunkedCommandData.get() + mChunkedCommandPutOffset,
+                   const_cast<const char*>(commands), chunkSize);
+            mChunkedCommandPutOffset += chunkSize;
+            mChunkedCommandRemainingSize -= chunkSize;
+
+            commands += chunkSize;
+            size -= chunkSize;
+
+            if (mChunkedCommandRemainingSize == 0) {
+                // Once the chunked command is complete, pass the data to the command handler
+                // implemenation.
+                auto chunkedCommandData = std::move(mChunkedCommandData);
+                if (HandleCommandsImpl(chunkedCommandData.get(), mChunkedCommandPutOffset) ==
+                    nullptr) {
+                    // |HandleCommandsImpl| returns nullptr on error. Forward any errors
+                    // out.
+                    return nullptr;
+                }
+            }
+        }
+
+        return HandleCommandsImpl(commands, size);
+    }
+
+    ChunkedCommandHandler::ChunkedCommandsResult ChunkedCommandHandler::BeginChunkedCommandData(
+        const volatile char* commands,
+        size_t commandSize,
+        size_t initialSize) {
+        ASSERT(!mChunkedCommandData);
+
+        // Reserve space for all the command data we're expecting, and copy the initial data
+        // to the start of the memory.
+        mChunkedCommandData.reset(AllocNoThrow<char>(commandSize));
+        if (!mChunkedCommandData) {
+            return ChunkedCommandsResult::Error;
+        }
+
+        memcpy(mChunkedCommandData.get(), const_cast<const char*>(commands), initialSize);
+        mChunkedCommandPutOffset = initialSize;
+        mChunkedCommandRemainingSize = commandSize - initialSize;
+
+        return ChunkedCommandsResult::Consumed;
+    }
+
+}  // namespace dawn::wire
diff --git a/src/dawn/wire/ChunkedCommandHandler.h b/src/dawn/wire/ChunkedCommandHandler.h
new file mode 100644
index 0000000..162feca
--- /dev/null
+++ b/src/dawn/wire/ChunkedCommandHandler.h
@@ -0,0 +1,71 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
+#define DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/Wire.h"
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <cstdint>
+#include <memory>
+
+namespace dawn::wire {
+
+    class ChunkedCommandHandler : public CommandHandler {
+      public:
+        const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
+        ~ChunkedCommandHandler() override;
+
+      protected:
+        enum class ChunkedCommandsResult {
+            Passthrough,
+            Consumed,
+            Error,
+        };
+
+        // Returns |true| if the commands were entirely consumed into the chunked command vector
+        // and should be handled later once we receive all the command data.
+        // Returns |false| if commands should be handled now immediately.
+        ChunkedCommandsResult HandleChunkedCommands(const volatile char* commands, size_t size) {
+            uint64_t commandSize64 =
+                reinterpret_cast<const volatile CmdHeader*>(commands)->commandSize;
+
+            if (commandSize64 > std::numeric_limits<size_t>::max()) {
+                return ChunkedCommandsResult::Error;
+            }
+            size_t commandSize = static_cast<size_t>(commandSize64);
+            if (size < commandSize) {
+                return BeginChunkedCommandData(commands, commandSize, size);
+            }
+            return ChunkedCommandsResult::Passthrough;
+        }
+
+      private:
+        virtual const volatile char* HandleCommandsImpl(const volatile char* commands,
+                                                        size_t size) = 0;
+
+        ChunkedCommandsResult BeginChunkedCommandData(const volatile char* commands,
+                                                      size_t commandSize,
+                                                      size_t initialSize);
+
+        size_t mChunkedCommandRemainingSize = 0;
+        size_t mChunkedCommandPutOffset = 0;
+        std::unique_ptr<char[]> mChunkedCommandData;
+    };
+
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
diff --git a/src/dawn/wire/ChunkedCommandSerializer.cpp b/src/dawn/wire/ChunkedCommandSerializer.cpp
new file mode 100644
index 0000000..b2e4a56
--- /dev/null
+++ b/src/dawn/wire/ChunkedCommandSerializer.cpp
@@ -0,0 +1,38 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/ChunkedCommandSerializer.h"
+
+namespace dawn::wire {
+
+    ChunkedCommandSerializer::ChunkedCommandSerializer(CommandSerializer* serializer)
+        : mSerializer(serializer), mMaxAllocationSize(serializer->GetMaximumAllocationSize()) {
+    }
+
+    void ChunkedCommandSerializer::SerializeChunkedCommand(const char* allocatedBuffer,
+                                                           size_t remainingSize) {
+        while (remainingSize > 0) {
+            size_t chunkSize = std::min(remainingSize, mMaxAllocationSize);
+            void* dst = mSerializer->GetCmdSpace(chunkSize);
+            if (dst == nullptr) {
+                return;
+            }
+            memcpy(dst, allocatedBuffer, chunkSize);
+
+            allocatedBuffer += chunkSize;
+            remainingSize -= chunkSize;
+        }
+    }
+
+}  // namespace dawn::wire
diff --git a/src/dawn/wire/ChunkedCommandSerializer.h b/src/dawn/wire/ChunkedCommandSerializer.h
new file mode 100644
index 0000000..92b8a44
--- /dev/null
+++ b/src/dawn/wire/ChunkedCommandSerializer.h
@@ -0,0 +1,114 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
+#define DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
+
+#include "dawn/common/Alloc.h"
+#include "dawn/common/Compiler.h"
+#include "dawn/wire/Wire.h"
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <algorithm>
+#include <cstring>
+#include <memory>
+
+namespace dawn::wire {
+
+    class ChunkedCommandSerializer {
+      public:
+        ChunkedCommandSerializer(CommandSerializer* serializer);
+
+        template <typename Cmd>
+        void SerializeCommand(const Cmd& cmd) {
+            SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
+        }
+
+        template <typename Cmd, typename ExtraSizeSerializeFn>
+        void SerializeCommand(const Cmd& cmd,
+                              size_t extraSize,
+                              ExtraSizeSerializeFn&& SerializeExtraSize) {
+            SerializeCommandImpl(
+                cmd,
+                [](const Cmd& cmd, size_t requiredSize, SerializeBuffer* serializeBuffer) {
+                    return cmd.Serialize(requiredSize, serializeBuffer);
+                },
+                extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+        }
+
+        template <typename Cmd>
+        void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
+            SerializeCommand(cmd, objectIdProvider, 0,
+                             [](SerializeBuffer*) { return WireResult::Success; });
+        }
+
+        template <typename Cmd, typename ExtraSizeSerializeFn>
+        void SerializeCommand(const Cmd& cmd,
+                              const ObjectIdProvider& objectIdProvider,
+                              size_t extraSize,
+                              ExtraSizeSerializeFn&& SerializeExtraSize) {
+            SerializeCommandImpl(
+                cmd,
+                [&objectIdProvider](const Cmd& cmd, size_t requiredSize,
+                                    SerializeBuffer* serializeBuffer) {
+                    return cmd.Serialize(requiredSize, serializeBuffer, objectIdProvider);
+                },
+                extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+        }
+
+      private:
+        template <typename Cmd, typename SerializeCmdFn, typename ExtraSizeSerializeFn>
+        void SerializeCommandImpl(const Cmd& cmd,
+                                  SerializeCmdFn&& SerializeCmd,
+                                  size_t extraSize,
+                                  ExtraSizeSerializeFn&& SerializeExtraSize) {
+            size_t commandSize = cmd.GetRequiredSize();
+            size_t requiredSize = commandSize + extraSize;
+
+            if (requiredSize <= mMaxAllocationSize) {
+                char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
+                if (allocatedBuffer != nullptr) {
+                    SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
+                    WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+                    WireResult r2 = SerializeExtraSize(&serializeBuffer);
+                    if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+                        mSerializer->OnSerializeError();
+                    }
+                }
+                return;
+            }
+
+            auto cmdSpace = std::unique_ptr<char[]>(AllocNoThrow<char>(requiredSize));
+            if (!cmdSpace) {
+                return;
+            }
+            SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
+            WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+            WireResult r2 = SerializeExtraSize(&serializeBuffer);
+            if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+                mSerializer->OnSerializeError();
+                return;
+            }
+            SerializeChunkedCommand(cmdSpace.get(), requiredSize);
+        }
+
+        void SerializeChunkedCommand(const char* allocatedBuffer, size_t remainingSize);
+
+        CommandSerializer* mSerializer;
+        size_t mMaxAllocationSize;
+    };
+
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
diff --git a/src/dawn/wire/SupportedFeatures.cpp b/src/dawn/wire/SupportedFeatures.cpp
new file mode 100644
index 0000000..2d5a9f8
--- /dev/null
+++ b/src/dawn/wire/SupportedFeatures.cpp
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/SupportedFeatures.h"
+
+namespace dawn::wire {
+
+    // Note: Upon updating this list, please also update serialization/deserialization
+    // of limit structs on Adapter/Device initialization.
+    bool IsFeatureSupported(WGPUFeatureName feature) {
+        switch (feature) {
+            case WGPUFeatureName_Undefined:
+            case WGPUFeatureName_Force32:
+            case WGPUFeatureName_DawnNative:
+                return false;
+            case WGPUFeatureName_Depth24UnormStencil8:
+            case WGPUFeatureName_Depth32FloatStencil8:
+            case WGPUFeatureName_TimestampQuery:
+            case WGPUFeatureName_PipelineStatisticsQuery:
+            case WGPUFeatureName_TextureCompressionBC:
+            case WGPUFeatureName_TextureCompressionETC2:
+            case WGPUFeatureName_TextureCompressionASTC:
+            case WGPUFeatureName_IndirectFirstInstance:
+            case WGPUFeatureName_DepthClamping:
+            case WGPUFeatureName_DawnShaderFloat16:
+            case WGPUFeatureName_DawnInternalUsages:
+            case WGPUFeatureName_DawnMultiPlanarFormats:
+                return true;
+        }
+
+        // Catch-all, for unsupported features.
+        // "default:" is not used so we get compiler errors for
+        // newly added, unhandled features, but still catch completely
+        // unknown enums.
+        return false;
+    }
+
+}  // namespace dawn::wire
diff --git a/src/dawn/wire/SupportedFeatures.h b/src/dawn/wire/SupportedFeatures.h
new file mode 100644
index 0000000..9c173e1
--- /dev/null
+++ b/src/dawn/wire/SupportedFeatures.h
@@ -0,0 +1,26 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SUPPORTEDFEATURES_H_
+#define DAWNWIRE_SUPPORTEDFEATURES_H_
+
+#include <dawn/webgpu.h>
+
+namespace dawn::wire {
+
+    bool IsFeatureSupported(WGPUFeatureName feature);
+
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_SUPPORTEDFEATURES_H_
diff --git a/src/dawn/wire/Wire.cpp b/src/dawn/wire/Wire.cpp
new file mode 100644
index 0000000..af3e6be
--- /dev/null
+++ b/src/dawn/wire/Wire.cpp
@@ -0,0 +1,28 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/Wire.h"
+
+namespace dawn::wire {
+
+    CommandSerializer::CommandSerializer() = default;
+    CommandSerializer::~CommandSerializer() = default;
+
+    void CommandSerializer::OnSerializeError() {
+    }
+
+    CommandHandler::CommandHandler() = default;
+    CommandHandler::~CommandHandler() = default;
+
+}  // namespace dawn::wire
diff --git a/src/dawn/wire/WireClient.cpp b/src/dawn/wire/WireClient.cpp
new file mode 100644
index 0000000..0446da8
--- /dev/null
+++ b/src/dawn/wire/WireClient.cpp
@@ -0,0 +1,82 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire {
+
+    WireClient::WireClient(const WireClientDescriptor& descriptor)
+        : mImpl(new client::Client(descriptor.serializer, descriptor.memoryTransferService)) {
+    }
+
+    WireClient::~WireClient() {
+        mImpl.reset();
+    }
+
+    const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
+        return mImpl->HandleCommands(commands, size);
+    }
+
+    ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
+        return mImpl->ReserveTexture(device);
+    }
+
+    ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
+        return mImpl->ReserveSwapChain(device);
+    }
+
+    ReservedDevice WireClient::ReserveDevice() {
+        return mImpl->ReserveDevice();
+    }
+
+    ReservedInstance WireClient::ReserveInstance() {
+        return mImpl->ReserveInstance();
+    }
+
+    void WireClient::ReclaimTextureReservation(const ReservedTexture& reservation) {
+        mImpl->ReclaimTextureReservation(reservation);
+    }
+
+    void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+        mImpl->ReclaimSwapChainReservation(reservation);
+    }
+
+    void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+        mImpl->ReclaimDeviceReservation(reservation);
+    }
+
+    void WireClient::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+        mImpl->ReclaimInstanceReservation(reservation);
+    }
+
+    void WireClient::Disconnect() {
+        mImpl->Disconnect();
+    }
+
+    namespace client {
+        MemoryTransferService::MemoryTransferService() = default;
+
+        MemoryTransferService::~MemoryTransferService() = default;
+
+        MemoryTransferService::ReadHandle::ReadHandle() = default;
+
+        MemoryTransferService::ReadHandle::~ReadHandle() = default;
+
+        MemoryTransferService::WriteHandle::WriteHandle() = default;
+
+        MemoryTransferService::WriteHandle::~WriteHandle() = default;
+    }  // namespace client
+
+}  // namespace dawn::wire
diff --git a/src/dawn/wire/WireDeserializeAllocator.cpp b/src/dawn/wire/WireDeserializeAllocator.cpp
new file mode 100644
index 0000000..e0a3432
--- /dev/null
+++ b/src/dawn/wire/WireDeserializeAllocator.cpp
@@ -0,0 +1,60 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/WireDeserializeAllocator.h"
+
+#include <algorithm>
+
+namespace dawn::wire {
+    WireDeserializeAllocator::WireDeserializeAllocator() {
+        Reset();
+    }
+
+    WireDeserializeAllocator::~WireDeserializeAllocator() {
+        Reset();
+    }
+
+    void* WireDeserializeAllocator::GetSpace(size_t size) {
+        // Return space in the current buffer if possible first.
+        if (mRemainingSize >= size) {
+            char* buffer = mCurrentBuffer;
+            mCurrentBuffer += size;
+            mRemainingSize -= size;
+            return buffer;
+        }
+
+        // Otherwise allocate a new buffer and try again.
+        size_t allocationSize = std::max(size, size_t(2048));
+        char* allocation = static_cast<char*>(malloc(allocationSize));
+        if (allocation == nullptr) {
+            return nullptr;
+        }
+
+        mAllocations.push_back(allocation);
+        mCurrentBuffer = allocation;
+        mRemainingSize = allocationSize;
+        return GetSpace(size);
+    }
+
+    void WireDeserializeAllocator::Reset() {
+        for (auto allocation : mAllocations) {
+            free(allocation);
+        }
+        mAllocations.clear();
+
+        // The initial buffer is the inline buffer so that some allocations can be skipped
+        mCurrentBuffer = mStaticBuffer;
+        mRemainingSize = sizeof(mStaticBuffer);
+    }
+}  // namespace dawn::wire
diff --git a/src/dawn/wire/WireDeserializeAllocator.h b/src/dawn/wire/WireDeserializeAllocator.h
new file mode 100644
index 0000000..cc2ad7b
--- /dev/null
+++ b/src/dawn/wire/WireDeserializeAllocator.h
@@ -0,0 +1,43 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
+#define DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
+
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <vector>
+
+namespace dawn::wire {
+    // A really really simple implementation of the DeserializeAllocator. It's main feature
+    // is that it has some inline storage so as to avoid allocations for the majority of
+    // commands.
+    class WireDeserializeAllocator : public DeserializeAllocator {
+      public:
+        WireDeserializeAllocator();
+        virtual ~WireDeserializeAllocator();
+
+        void* GetSpace(size_t size) override;
+
+        void Reset();
+
+      private:
+        size_t mRemainingSize = 0;
+        char* mCurrentBuffer = nullptr;
+        char mStaticBuffer[2048];
+        std::vector<char*> mAllocations;
+    };
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
diff --git a/src/dawn/wire/WireResult.h b/src/dawn/wire/WireResult.h
new file mode 100644
index 0000000..4025c2c
--- /dev/null
+++ b/src/dawn/wire/WireResult.h
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIRERESULT_H_
+#define DAWNWIRE_WIRERESULT_H_
+
+#include "dawn/common/Compiler.h"
+
+namespace dawn::wire {
+
+    enum class [[nodiscard]] WireResult{
+        Success,
+        FatalError,
+    };
+
+// Macro to simplify error handling, similar to DAWN_TRY but for WireResult.
+#define WIRE_TRY(EXPR)                                          \
+    do {                                                        \
+        WireResult exprResult = EXPR;                           \
+        if (DAWN_UNLIKELY(exprResult != WireResult::Success)) { \
+            return exprResult;                                  \
+        }                                                       \
+    } while (0)
+
+}  // namespace dawn::wire
+
+#endif  // DAWNWIRE_WIRERESULT_H_
diff --git a/src/dawn/wire/WireServer.cpp b/src/dawn/wire/WireServer.cpp
new file mode 100644
index 0000000..bf9b0a1
--- /dev/null
+++ b/src/dawn/wire/WireServer.cpp
@@ -0,0 +1,83 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/WireServer.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire {
+
+    WireServer::WireServer(const WireServerDescriptor& descriptor)
+        : mImpl(new server::Server(*descriptor.procs,
+                                   descriptor.serializer,
+                                   descriptor.memoryTransferService)) {
+    }
+
+    WireServer::~WireServer() {
+        mImpl.reset();
+    }
+
+    const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
+        return mImpl->HandleCommands(commands, size);
+    }
+
+    bool WireServer::InjectTexture(WGPUTexture texture,
+                                   uint32_t id,
+                                   uint32_t generation,
+                                   uint32_t deviceId,
+                                   uint32_t deviceGeneration) {
+        return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
+    }
+
+    bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
+                                     uint32_t id,
+                                     uint32_t generation,
+                                     uint32_t deviceId,
+                                     uint32_t deviceGeneration) {
+        return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
+    }
+
+    bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+        return mImpl->InjectDevice(device, id, generation);
+    }
+
+    bool WireServer::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+        return mImpl->InjectInstance(instance, id, generation);
+    }
+
+    WGPUDevice WireServer::GetDevice(uint32_t id, uint32_t generation) {
+        return mImpl->GetDevice(id, generation);
+    }
+
+    namespace server {
+        MemoryTransferService::MemoryTransferService() = default;
+
+        MemoryTransferService::~MemoryTransferService() = default;
+
+        MemoryTransferService::ReadHandle::ReadHandle() = default;
+
+        MemoryTransferService::ReadHandle::~ReadHandle() = default;
+
+        MemoryTransferService::WriteHandle::WriteHandle() = default;
+
+        MemoryTransferService::WriteHandle::~WriteHandle() = default;
+
+        void MemoryTransferService::WriteHandle::SetTarget(void* data) {
+            mTargetData = data;
+        }
+        void MemoryTransferService::WriteHandle::SetDataLength(size_t dataLength) {
+            mDataLength = dataLength;
+        }
+    }  // namespace server
+
+}  // namespace dawn::wire
diff --git a/src/dawn/wire/client/Adapter.cpp b/src/dawn/wire/client/Adapter.cpp
new file mode 100644
index 0000000..b2dcc87
--- /dev/null
+++ b/src/dawn/wire/client/Adapter.cpp
@@ -0,0 +1,133 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Adapter.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+    Adapter::~Adapter() {
+        mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+            request->callback(WGPURequestDeviceStatus_Unknown, nullptr,
+                              "Adapter destroyed before callback", request->userdata);
+        });
+    }
+
+    void Adapter::CancelCallbacksForDisconnect() {
+        mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+            request->callback(WGPURequestDeviceStatus_Unknown, nullptr, "GPU connection lost",
+                              request->userdata);
+        });
+    }
+
+    bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+        return mLimitsAndFeatures.GetLimits(limits);
+    }
+
+    bool Adapter::HasFeature(WGPUFeatureName feature) const {
+        return mLimitsAndFeatures.HasFeature(feature);
+    }
+
+    size_t Adapter::EnumerateFeatures(WGPUFeatureName* features) const {
+        return mLimitsAndFeatures.EnumerateFeatures(features);
+    }
+
+    void Adapter::SetLimits(const WGPUSupportedLimits* limits) {
+        return mLimitsAndFeatures.SetLimits(limits);
+    }
+
+    void Adapter::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+        return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+    }
+
+    void Adapter::SetProperties(const WGPUAdapterProperties* properties) {
+        mProperties = *properties;
+        mProperties.nextInChain = nullptr;
+    }
+
+    void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+        *properties = mProperties;
+    }
+
+    void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                                WGPURequestDeviceCallback callback,
+                                void* userdata) {
+        if (client->IsDisconnected()) {
+            callback(WGPURequestDeviceStatus_Error, nullptr, "GPU connection lost", userdata);
+            return;
+        }
+
+        auto* allocation = client->DeviceAllocator().New(client);
+        uint64_t serial = mRequestDeviceRequests.Add({callback, allocation->object->id, userdata});
+
+        AdapterRequestDeviceCmd cmd;
+        cmd.adapterId = this->id;
+        cmd.requestSerial = serial;
+        cmd.deviceObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+        cmd.descriptor = descriptor;
+
+        client->SerializeCommand(cmd);
+    }
+
+    bool Client::DoAdapterRequestDeviceCallback(Adapter* adapter,
+                                                uint64_t requestSerial,
+                                                WGPURequestDeviceStatus status,
+                                                const char* message,
+                                                const WGPUSupportedLimits* limits,
+                                                uint32_t featuresCount,
+                                                const WGPUFeatureName* features) {
+        // May have been deleted or recreated so this isn't an error.
+        if (adapter == nullptr) {
+            return true;
+        }
+        return adapter->OnRequestDeviceCallback(requestSerial, status, message, limits,
+                                                featuresCount, features);
+    }
+
+    bool Adapter::OnRequestDeviceCallback(uint64_t requestSerial,
+                                          WGPURequestDeviceStatus status,
+                                          const char* message,
+                                          const WGPUSupportedLimits* limits,
+                                          uint32_t featuresCount,
+                                          const WGPUFeatureName* features) {
+        RequestDeviceData request;
+        if (!mRequestDeviceRequests.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        Device* device = client->DeviceAllocator().GetObject(request.deviceObjectId);
+
+        // If the return status is a failure we should give a null device to the callback and
+        // free the allocation.
+        if (status != WGPURequestDeviceStatus_Success) {
+            client->DeviceAllocator().Free(device);
+            request.callback(status, nullptr, message, request.userdata);
+            return true;
+        }
+
+        device->SetLimits(limits);
+        device->SetFeatures(features, featuresCount);
+
+        request.callback(status, ToAPI(device), message, request.userdata);
+        return true;
+    }
+
+    WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor*) {
+        dawn::ErrorLog() << "adapter.CreateDevice not supported with dawn_wire.";
+        return nullptr;
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Adapter.h b/src/dawn/wire/client/Adapter.h
new file mode 100644
index 0000000..8753843
--- /dev/null
+++ b/src/dawn/wire/client/Adapter.h
@@ -0,0 +1,70 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_ADAPTER_H_
+#define DAWNWIRE_CLIENT_ADAPTER_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/LimitsAndFeatures.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+    class Adapter final : public ObjectBase {
+      public:
+        using ObjectBase::ObjectBase;
+
+        ~Adapter();
+        void CancelCallbacksForDisconnect() override;
+
+        bool GetLimits(WGPUSupportedLimits* limits) const;
+        bool HasFeature(WGPUFeatureName feature) const;
+        size_t EnumerateFeatures(WGPUFeatureName* features) const;
+        void SetLimits(const WGPUSupportedLimits* limits);
+        void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+        void SetProperties(const WGPUAdapterProperties* properties);
+        void GetProperties(WGPUAdapterProperties* properties) const;
+        void RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                           WGPURequestDeviceCallback callback,
+                           void* userdata);
+
+        bool OnRequestDeviceCallback(uint64_t requestSerial,
+                                     WGPURequestDeviceStatus status,
+                                     const char* message,
+                                     const WGPUSupportedLimits* limits,
+                                     uint32_t featuresCount,
+                                     const WGPUFeatureName* features);
+
+        // Unimplementable. Only availale in dawn_native.
+        WGPUDevice CreateDevice(const WGPUDeviceDescriptor*);
+
+      private:
+        LimitsAndFeatures mLimitsAndFeatures;
+        WGPUAdapterProperties mProperties;
+
+        struct RequestDeviceData {
+            WGPURequestDeviceCallback callback = nullptr;
+            ObjectId deviceObjectId;
+            void* userdata = nullptr;
+        };
+        RequestTracker<RequestDeviceData> mRequestDeviceRequests;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_ADAPTER_H_
diff --git a/src/dawn/wire/client/ApiObjects.h b/src/dawn/wire/client/ApiObjects.h
new file mode 100644
index 0000000..080da48
--- /dev/null
+++ b/src/dawn/wire/client/ApiObjects.h
@@ -0,0 +1,29 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_APIOBJECTS_H_
+#define DAWNWIRE_CLIENT_APIOBJECTS_H_
+
+#include "dawn/wire/client/ObjectBase.h"
+
+#include "dawn/wire/client/Adapter.h"
+#include "dawn/wire/client/Buffer.h"
+#include "dawn/wire/client/Device.h"
+#include "dawn/wire/client/Instance.h"
+#include "dawn/wire/client/Queue.h"
+#include "dawn/wire/client/ShaderModule.h"
+
+#include "dawn/wire/client/ApiObjects_autogen.h"
+
+#endif  // DAWNWIRE_CLIENT_APIOBJECTS_H_
diff --git a/src/dawn/wire/client/Buffer.cpp b/src/dawn/wire/client/Buffer.cpp
new file mode 100644
index 0000000..21db737
--- /dev/null
+++ b/src/dawn/wire/client/Buffer.cpp
@@ -0,0 +1,406 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Buffer.h"
+
+#include "dawn/wire/BufferConsumer_impl.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+    // static
+    WGPUBuffer Buffer::Create(Device* device, const WGPUBufferDescriptor* descriptor) {
+        Client* wireClient = device->client;
+
+        bool mappable =
+            (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
+            descriptor->mappedAtCreation;
+        if (mappable && descriptor->size >= std::numeric_limits<size_t>::max()) {
+            device->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
+            return device->CreateErrorBuffer();
+        }
+
+        std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
+        std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
+
+        DeviceCreateBufferCmd cmd;
+        cmd.deviceId = device->id;
+        cmd.descriptor = descriptor;
+        cmd.readHandleCreateInfoLength = 0;
+        cmd.readHandleCreateInfo = nullptr;
+        cmd.writeHandleCreateInfoLength = 0;
+        cmd.writeHandleCreateInfo = nullptr;
+
+        if (mappable) {
+            if ((descriptor->usage & WGPUBufferUsage_MapRead) != 0) {
+                // Create the read handle on buffer creation.
+                readHandle.reset(
+                    wireClient->GetMemoryTransferService()->CreateReadHandle(descriptor->size));
+                if (readHandle == nullptr) {
+                    device->InjectError(WGPUErrorType_OutOfMemory,
+                                        "Failed to create buffer mapping");
+                    return device->CreateErrorBuffer();
+                }
+                cmd.readHandleCreateInfoLength = readHandle->SerializeCreateSize();
+            }
+
+            if ((descriptor->usage & WGPUBufferUsage_MapWrite) != 0 ||
+                descriptor->mappedAtCreation) {
+                // Create the write handle on buffer creation.
+                writeHandle.reset(
+                    wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
+                if (writeHandle == nullptr) {
+                    device->InjectError(WGPUErrorType_OutOfMemory,
+                                        "Failed to create buffer mapping");
+                    return device->CreateErrorBuffer();
+                }
+                cmd.writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
+            }
+        }
+
+        // Create the buffer and send the creation command.
+        // This must happen after any potential device->CreateErrorBuffer()
+        // as server expects allocating ids to be monotonically increasing
+        auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(wireClient);
+        Buffer* buffer = bufferObjectAndSerial->object.get();
+        buffer->mDevice = device;
+        buffer->mDeviceIsAlive = device->GetAliveWeakPtr();
+        buffer->mSize = descriptor->size;
+        buffer->mDestructWriteHandleOnUnmap = false;
+
+        if (descriptor->mappedAtCreation) {
+            // If the buffer is mapped at creation, a write handle is created and will be
+            // destructed on unmap if the buffer doesn't have MapWrite usage
+            // The buffer is mapped right now.
+            buffer->mMapState = MapState::MappedAtCreation;
+
+            // This flag is for write handle created by mappedAtCreation
+            // instead of MapWrite usage. We don't have such a case for read handle
+            buffer->mDestructWriteHandleOnUnmap =
+                (descriptor->usage & WGPUBufferUsage_MapWrite) == 0;
+
+            buffer->mMapOffset = 0;
+            buffer->mMapSize = buffer->mSize;
+            ASSERT(writeHandle != nullptr);
+            buffer->mMappedData = writeHandle->GetData();
+        }
+
+        cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
+
+        wireClient->SerializeCommand(
+            cmd, cmd.readHandleCreateInfoLength + cmd.writeHandleCreateInfoLength,
+            [&](SerializeBuffer* serializeBuffer) {
+                if (readHandle != nullptr) {
+                    char* readHandleBuffer;
+                    WIRE_TRY(
+                        serializeBuffer->NextN(cmd.readHandleCreateInfoLength, &readHandleBuffer));
+                    // Serialize the ReadHandle into the space after the command.
+                    readHandle->SerializeCreate(readHandleBuffer);
+                    buffer->mReadHandle = std::move(readHandle);
+                }
+                if (writeHandle != nullptr) {
+                    char* writeHandleBuffer;
+                    WIRE_TRY(serializeBuffer->NextN(cmd.writeHandleCreateInfoLength,
+                                                    &writeHandleBuffer));
+                    // Serialize the WriteHandle into the space after the command.
+                    writeHandle->SerializeCreate(writeHandleBuffer);
+                    buffer->mWriteHandle = std::move(writeHandle);
+                }
+
+                return WireResult::Success;
+            });
+        return ToAPI(buffer);
+    }
+
+    // static
+    WGPUBuffer Buffer::CreateError(Device* device) {
+        auto* allocation = device->client->BufferAllocator().New(device->client);
+        allocation->object->mDevice = device;
+        allocation->object->mDeviceIsAlive = device->GetAliveWeakPtr();
+
+        DeviceCreateErrorBufferCmd cmd;
+        cmd.self = ToAPI(device);
+        cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+        device->client->SerializeCommand(cmd);
+
+        return ToAPI(allocation->object.get());
+    }
+
+    Buffer::~Buffer() {
+        ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+        FreeMappedData();
+    }
+
+    void Buffer::CancelCallbacksForDisconnect() {
+        ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
+    }
+
+    void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
+        mRequests.CloseAll([status](MapRequestData* request) {
+            if (request->callback != nullptr) {
+                request->callback(status, request->userdata);
+            }
+        });
+    }
+
+    void Buffer::MapAsync(WGPUMapModeFlags mode,
+                          size_t offset,
+                          size_t size,
+                          WGPUBufferMapCallback callback,
+                          void* userdata) {
+        if (client->IsDisconnected()) {
+            return callback(WGPUBufferMapAsyncStatus_DeviceLost, userdata);
+        }
+
+        // Handle the defaulting of size required by WebGPU.
+        if ((size == WGPU_WHOLE_MAP_SIZE) && (offset <= mSize)) {
+            size = mSize - offset;
+        }
+
+        // Create the request structure that will hold information while this mapping is
+        // in flight.
+        MapRequestData request = {};
+        request.callback = callback;
+        request.userdata = userdata;
+        request.offset = offset;
+        request.size = size;
+        if (mode & WGPUMapMode_Read) {
+            request.type = MapRequestType::Read;
+        } else if (mode & WGPUMapMode_Write) {
+            request.type = MapRequestType::Write;
+        }
+
+        uint64_t serial = mRequests.Add(std::move(request));
+
+        // Serialize the command to send to the server.
+        BufferMapAsyncCmd cmd;
+        cmd.bufferId = this->id;
+        cmd.requestSerial = serial;
+        cmd.mode = mode;
+        cmd.offset = offset;
+        cmd.size = size;
+
+        client->SerializeCommand(cmd);
+    }
+
+    bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
+                                    uint32_t status,
+                                    uint64_t readDataUpdateInfoLength,
+                                    const uint8_t* readDataUpdateInfo) {
+        MapRequestData request;
+        if (!mRequests.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        auto FailRequest = [&request]() -> bool {
+            if (request.callback != nullptr) {
+                request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
+            }
+            return false;
+        };
+
+        // Take into account the client-side status of the request if the server says it is a
+        // success.
+        if (status == WGPUBufferMapAsyncStatus_Success) {
+            status = request.clientStatus;
+        }
+
+        if (status == WGPUBufferMapAsyncStatus_Success) {
+            switch (request.type) {
+                case MapRequestType::Read: {
+                    if (readDataUpdateInfoLength > std::numeric_limits<size_t>::max()) {
+                        // This is the size of data deserialized from the command stream, which must
+                        // be CPU-addressable.
+                        return FailRequest();
+                    }
+
+                    // Validate to prevent bad map request; buffer destroyed during map request
+                    if (mReadHandle == nullptr) {
+                        return FailRequest();
+                    }
+                    // Update user map data with server returned data
+                    if (!mReadHandle->DeserializeDataUpdate(
+                            readDataUpdateInfo, static_cast<size_t>(readDataUpdateInfoLength),
+                            request.offset, request.size)) {
+                        return FailRequest();
+                    }
+                    mMapState = MapState::MappedForRead;
+                    mMappedData = const_cast<void*>(mReadHandle->GetData());
+                    break;
+                }
+                case MapRequestType::Write: {
+                    if (mWriteHandle == nullptr) {
+                        return FailRequest();
+                    }
+                    mMapState = MapState::MappedForWrite;
+                    mMappedData = mWriteHandle->GetData();
+                    break;
+                }
+                default:
+                    UNREACHABLE();
+            }
+
+            mMapOffset = request.offset;
+            mMapSize = request.size;
+        }
+
+        if (request.callback) {
+            request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
+        }
+
+        return true;
+    }
+
+    void* Buffer::GetMappedRange(size_t offset, size_t size) {
+        if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
+            return nullptr;
+        }
+        return static_cast<uint8_t*>(mMappedData) + offset;
+    }
+
+    const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
+        if (!(IsMappedForWriting() || IsMappedForReading()) ||
+            !CheckGetMappedRangeOffsetSize(offset, size)) {
+            return nullptr;
+        }
+        return static_cast<uint8_t*>(mMappedData) + offset;
+    }
+
+    void Buffer::Unmap() {
+        // Invalidate the local pointer, and cancel all other in-flight requests that would
+        // turn into errors anyway (you can't double map). This prevents race when the following
+        // happens, where the application code would have unmapped a buffer but still receive a
+        // callback:
+        //   - Client -> Server: MapRequest1, Unmap, MapRequest2
+        //   - Server -> Client: Result of MapRequest1
+        //   - Unmap locally on the client
+        //   - Server -> Client: Result of MapRequest2
+
+        // mWriteHandle can still be nullptr if buffer has been destroyed before unmap
+        if ((mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation) &&
+            mWriteHandle != nullptr) {
+            // Writes need to be flushed before Unmap is sent. Unmap calls all associated
+            // in-flight callbacks which may read the updated data.
+
+            // Get the serialization size of data update writes.
+            size_t writeDataUpdateInfoLength =
+                mWriteHandle->SizeOfSerializeDataUpdate(mMapOffset, mMapSize);
+
+            BufferUpdateMappedDataCmd cmd;
+            cmd.bufferId = id;
+            cmd.writeDataUpdateInfoLength = writeDataUpdateInfoLength;
+            cmd.writeDataUpdateInfo = nullptr;
+            cmd.offset = mMapOffset;
+            cmd.size = mMapSize;
+
+            client->SerializeCommand(
+                cmd, writeDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+                    char* writeHandleBuffer;
+                    WIRE_TRY(serializeBuffer->NextN(writeDataUpdateInfoLength, &writeHandleBuffer));
+
+                    // Serialize flush metadata into the space after the command.
+                    // This closes the handle for writing.
+                    mWriteHandle->SerializeDataUpdate(writeHandleBuffer, cmd.offset, cmd.size);
+
+                    return WireResult::Success;
+                });
+
+            // If mDestructWriteHandleOnUnmap is true, that means the write handle is merely
+            // for mappedAtCreation usage. It is destroyed on unmap after flush to server
+            // instead of at buffer destruction.
+            if (mMapState == MapState::MappedAtCreation && mDestructWriteHandleOnUnmap) {
+                mWriteHandle = nullptr;
+                if (mReadHandle) {
+                    // If it's both mappedAtCreation and MapRead we need to reset
+                    // mMappedData to readHandle's GetData(). This could be changed to
+                    // merging read/write handle in future
+                    mMappedData = const_cast<void*>(mReadHandle->GetData());
+                }
+            }
+        }
+
+        // Free map access tokens
+        mMapState = MapState::Unmapped;
+        mMapOffset = 0;
+        mMapSize = 0;
+
+        // Tag all mapping requests still in flight as unmapped before callback.
+        mRequests.ForAll([](MapRequestData* request) {
+            if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+                request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
+            }
+        });
+
+        BufferUnmapCmd cmd;
+        cmd.self = ToAPI(this);
+        client->SerializeCommand(cmd);
+    }
+
+    void Buffer::Destroy() {
+        // Remove the current mapping and destroy Read/WriteHandles.
+        FreeMappedData();
+
+        // Tag all mapping requests still in flight as destroyed before callback.
+        mRequests.ForAll([](MapRequestData* request) {
+            if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+                request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
+            }
+        });
+
+        BufferDestroyCmd cmd;
+        cmd.self = ToAPI(this);
+        client->SerializeCommand(cmd);
+    }
+
+    bool Buffer::IsMappedForReading() const {
+        return mMapState == MapState::MappedForRead;
+    }
+
+    bool Buffer::IsMappedForWriting() const {
+        return mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation;
+    }
+
+    bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
+        if (offset % 8 != 0 || size % 4 != 0) {
+            return false;
+        }
+
+        if (size > mMapSize || offset < mMapOffset) {
+            return false;
+        }
+
+        size_t offsetInMappedRange = offset - mMapOffset;
+        return offsetInMappedRange <= mMapSize - size;
+    }
+
+    void Buffer::FreeMappedData() {
+#if defined(DAWN_ENABLE_ASSERTS)
+        // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
+        // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
+        // interaction of mapping and GC.
+        if (mMappedData) {
+            memset(static_cast<uint8_t*>(mMappedData) + mMapOffset, 0xCA, mMapSize);
+        }
+#endif  // defined(DAWN_ENABLE_ASSERTS)
+
+        mMapOffset = 0;
+        mMapSize = 0;
+        mReadHandle = nullptr;
+        mWriteHandle = nullptr;
+        mMappedData = nullptr;
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Buffer.h b/src/dawn/wire/client/Buffer.h
new file mode 100644
index 0000000..186c82b
--- /dev/null
+++ b/src/dawn/wire/client/Buffer.h
@@ -0,0 +1,109 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_BUFFER_H_
+#define DAWNWIRE_CLIENT_BUFFER_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+    class Device;
+
+    class Buffer final : public ObjectBase {
+      public:
+        using ObjectBase::ObjectBase;
+
+        static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
+        static WGPUBuffer CreateError(Device* device);
+
+        ~Buffer();
+
+        bool OnMapAsyncCallback(uint64_t requestSerial,
+                                uint32_t status,
+                                uint64_t readDataUpdateInfoLength,
+                                const uint8_t* readDataUpdateInfo);
+        void MapAsync(WGPUMapModeFlags mode,
+                      size_t offset,
+                      size_t size,
+                      WGPUBufferMapCallback callback,
+                      void* userdata);
+        void* GetMappedRange(size_t offset, size_t size);
+        const void* GetConstMappedRange(size_t offset, size_t size);
+        void Unmap();
+
+        void Destroy();
+
+      private:
+        void CancelCallbacksForDisconnect() override;
+        void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
+
+        bool IsMappedForReading() const;
+        bool IsMappedForWriting() const;
+        bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
+
+        void FreeMappedData();
+
+        Device* mDevice;
+
+        enum class MapRequestType { None, Read, Write };
+
+        enum class MapState {
+            Unmapped,
+            MappedForRead,
+            MappedForWrite,
+            MappedAtCreation,
+        };
+
+        // We want to defer all the validation to the server, which means we could have multiple
+        // map request in flight at a single time and need to track them separately.
+        // On well-behaved applications, only one request should exist at a single time.
+        struct MapRequestData {
+            WGPUBufferMapCallback callback = nullptr;
+            void* userdata = nullptr;
+            size_t offset = 0;
+            size_t size = 0;
+
+            // When the buffer is destroyed or unmapped too early, the unmappedBeforeX status takes
+            // precedence over the success value returned from the server. However Error statuses
+            // from the server take precedence over the client-side status.
+            WGPUBufferMapAsyncStatus clientStatus = WGPUBufferMapAsyncStatus_Success;
+
+            MapRequestType type = MapRequestType::None;
+        };
+        RequestTracker<MapRequestData> mRequests;
+        uint64_t mSize = 0;
+
+        // Only one mapped pointer can be active at a time because Unmap clears all the in-flight
+        // requests.
+        // TODO(enga): Use a tagged pointer to save space.
+        std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
+        std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
+        MapState mMapState = MapState::Unmapped;
+        bool mDestructWriteHandleOnUnmap = false;
+
+        void* mMappedData = nullptr;
+        size_t mMapOffset = 0;
+        size_t mMapSize = 0;
+
+        std::weak_ptr<bool> mDeviceIsAlive;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_BUFFER_H_
diff --git a/src/dawn/wire/client/Client.cpp b/src/dawn/wire/client/Client.cpp
new file mode 100644
index 0000000..5db8444
--- /dev/null
+++ b/src/dawn/wire/client/Client.cpp
@@ -0,0 +1,171 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Client.h"
+
+#include "dawn/common/Compiler.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+    namespace {
+
+        class NoopCommandSerializer final : public CommandSerializer {
+          public:
+            static NoopCommandSerializer* GetInstance() {
+                static NoopCommandSerializer gNoopCommandSerializer;
+                return &gNoopCommandSerializer;
+            }
+
+            ~NoopCommandSerializer() = default;
+
+            size_t GetMaximumAllocationSize() const final {
+                return 0;
+            }
+            void* GetCmdSpace(size_t size) final {
+                return nullptr;
+            }
+            bool Flush() final {
+                return false;
+            }
+        };
+
+    }  // anonymous namespace
+
+    Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
+        : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
+        if (mMemoryTransferService == nullptr) {
+            // If a MemoryTransferService is not provided, fall back to inline memory.
+            mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+            mMemoryTransferService = mOwnedMemoryTransferService.get();
+        }
+    }
+
+    Client::~Client() {
+        DestroyAllObjects();
+    }
+
+    void Client::DestroyAllObjects() {
+        for (auto& objectList : mObjects) {
+            ObjectType objectType = static_cast<ObjectType>(&objectList - mObjects.data());
+            if (objectType == ObjectType::Device) {
+                continue;
+            }
+            while (!objectList.empty()) {
+                ObjectBase* object = objectList.head()->value();
+
+                DestroyObjectCmd cmd;
+                cmd.objectType = objectType;
+                cmd.objectId = object->id;
+                SerializeCommand(cmd);
+                FreeObject(objectType, object);
+            }
+        }
+
+        while (!mObjects[ObjectType::Device].empty()) {
+            ObjectBase* object = mObjects[ObjectType::Device].head()->value();
+
+            DestroyObjectCmd cmd;
+            cmd.objectType = ObjectType::Device;
+            cmd.objectId = object->id;
+            SerializeCommand(cmd);
+            FreeObject(ObjectType::Device, object);
+        }
+    }
+
+    ReservedTexture Client::ReserveTexture(WGPUDevice device) {
+        auto* allocation = TextureAllocator().New(this);
+
+        ReservedTexture result;
+        result.texture = ToAPI(allocation->object.get());
+        result.id = allocation->object->id;
+        result.generation = allocation->generation;
+        result.deviceId = FromAPI(device)->id;
+        result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+        return result;
+    }
+
+    ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
+        auto* allocation = SwapChainAllocator().New(this);
+
+        ReservedSwapChain result;
+        result.swapchain = ToAPI(allocation->object.get());
+        result.id = allocation->object->id;
+        result.generation = allocation->generation;
+        result.deviceId = FromAPI(device)->id;
+        result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+        return result;
+    }
+
+    ReservedDevice Client::ReserveDevice() {
+        auto* allocation = DeviceAllocator().New(this);
+
+        ReservedDevice result;
+        result.device = ToAPI(allocation->object.get());
+        result.id = allocation->object->id;
+        result.generation = allocation->generation;
+        return result;
+    }
+
+    ReservedInstance Client::ReserveInstance() {
+        auto* allocation = InstanceAllocator().New(this);
+
+        ReservedInstance result;
+        result.instance = ToAPI(allocation->object.get());
+        result.id = allocation->object->id;
+        result.generation = allocation->generation;
+        return result;
+    }
+
+    void Client::ReclaimTextureReservation(const ReservedTexture& reservation) {
+        TextureAllocator().Free(FromAPI(reservation.texture));
+    }
+
+    void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+        SwapChainAllocator().Free(FromAPI(reservation.swapchain));
+    }
+
+    void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+        DeviceAllocator().Free(FromAPI(reservation.device));
+    }
+
+    void Client::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+        InstanceAllocator().Free(FromAPI(reservation.instance));
+    }
+
+    void Client::Disconnect() {
+        mDisconnected = true;
+        mSerializer = ChunkedCommandSerializer(NoopCommandSerializer::GetInstance());
+
+        auto& deviceList = mObjects[ObjectType::Device];
+        {
+            for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
+                 device = device->next()) {
+                static_cast<Device*>(device->value())
+                    ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
+            }
+        }
+        for (auto& objectList : mObjects) {
+            for (LinkNode<ObjectBase>* object = objectList.head(); object != objectList.end();
+                 object = object->next()) {
+                object->value()->CancelCallbacksForDisconnect();
+            }
+        }
+    }
+
+    bool Client::IsDisconnected() const {
+        return mDisconnected;
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Client.h b/src/dawn/wire/client/Client.h
new file mode 100644
index 0000000..fc84b3b
--- /dev/null
+++ b/src/dawn/wire/client/Client.h
@@ -0,0 +1,95 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_CLIENT_H_
+#define DAWNWIRE_CLIENT_CLIENT_H_
+
+#include <dawn/webgpu.h>
+#include <dawn/wire/Wire.h>
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/common/NonCopyable.h"
+#include "dawn/wire/ChunkedCommandSerializer.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/WireDeserializeAllocator.h"
+#include "dawn/wire/client/ClientBase_autogen.h"
+
+namespace dawn::wire::client {
+
+    class Device;
+    class MemoryTransferService;
+
+    class Client : public ClientBase {
+      public:
+        Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
+        ~Client() override;
+
+        // ChunkedCommandHandler implementation
+        const volatile char* HandleCommandsImpl(const volatile char* commands,
+                                                size_t size) override;
+
+        MemoryTransferService* GetMemoryTransferService() const {
+            return mMemoryTransferService;
+        }
+
+        ReservedTexture ReserveTexture(WGPUDevice device);
+        ReservedSwapChain ReserveSwapChain(WGPUDevice device);
+        ReservedDevice ReserveDevice();
+        ReservedInstance ReserveInstance();
+
+        void ReclaimTextureReservation(const ReservedTexture& reservation);
+        void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
+        void ReclaimDeviceReservation(const ReservedDevice& reservation);
+        void ReclaimInstanceReservation(const ReservedInstance& reservation);
+
+        template <typename Cmd>
+        void SerializeCommand(const Cmd& cmd) {
+            mSerializer.SerializeCommand(cmd, *this);
+        }
+
+        template <typename Cmd, typename ExtraSizeSerializeFn>
+        void SerializeCommand(const Cmd& cmd,
+                              size_t extraSize,
+                              ExtraSizeSerializeFn&& SerializeExtraSize) {
+            mSerializer.SerializeCommand(cmd, *this, extraSize, SerializeExtraSize);
+        }
+
+        void Disconnect();
+        bool IsDisconnected() const;
+
+        template <typename T>
+        void TrackObject(T* object) {
+            mObjects[ObjectTypeToTypeEnum<T>::value].Append(object);
+        }
+
+      private:
+        void DestroyAllObjects();
+
+#include "dawn/wire/client/ClientPrototypes_autogen.inc"
+
+        ChunkedCommandSerializer mSerializer;
+        WireDeserializeAllocator mAllocator;
+        MemoryTransferService* mMemoryTransferService = nullptr;
+        std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+
+        PerObjectType<LinkedList<ObjectBase>> mObjects;
+        bool mDisconnected = false;
+    };
+
+    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_CLIENT_H_
diff --git a/src/dawn/wire/client/ClientDoers.cpp b/src/dawn/wire/client/ClientDoers.cpp
new file mode 100644
index 0000000..7b99dc6
--- /dev/null
+++ b/src/dawn/wire/client/ClientDoers.cpp
@@ -0,0 +1,133 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+#include <limits>
+
+namespace dawn::wire::client {
+
+    bool Client::DoDeviceUncapturedErrorCallback(Device* device,
+                                                 WGPUErrorType errorType,
+                                                 const char* message) {
+        switch (errorType) {
+            case WGPUErrorType_NoError:
+            case WGPUErrorType_Validation:
+            case WGPUErrorType_OutOfMemory:
+            case WGPUErrorType_Unknown:
+            case WGPUErrorType_DeviceLost:
+                break;
+            default:
+                return false;
+        }
+        if (device == nullptr) {
+            // The device might have been deleted or recreated so this isn't an error.
+            return true;
+        }
+        device->HandleError(errorType, message);
+        return true;
+    }
+
+    bool Client::DoDeviceLoggingCallback(Device* device,
+                                         WGPULoggingType loggingType,
+                                         const char* message) {
+        if (device == nullptr) {
+            // The device might have been deleted or recreated so this isn't an error.
+            return true;
+        }
+        device->HandleLogging(loggingType, message);
+        return true;
+    }
+
+    bool Client::DoDeviceLostCallback(Device* device,
+                                      WGPUDeviceLostReason reason,
+                                      char const* message) {
+        if (device == nullptr) {
+            // The device might have been deleted or recreated so this isn't an error.
+            return true;
+        }
+        device->HandleDeviceLost(reason, message);
+        return true;
+    }
+
+    bool Client::DoDevicePopErrorScopeCallback(Device* device,
+                                               uint64_t requestSerial,
+                                               WGPUErrorType errorType,
+                                               const char* message) {
+        if (device == nullptr) {
+            // The device might have been deleted or recreated so this isn't an error.
+            return true;
+        }
+        return device->OnPopErrorScopeCallback(requestSerial, errorType, message);
+    }
+
+    bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
+                                          uint64_t requestSerial,
+                                          uint32_t status,
+                                          uint64_t readDataUpdateInfoLength,
+                                          const uint8_t* readDataUpdateInfo) {
+        // The buffer might have been deleted or recreated so this isn't an error.
+        if (buffer == nullptr) {
+            return true;
+        }
+        return buffer->OnMapAsyncCallback(requestSerial, status, readDataUpdateInfoLength,
+                                          readDataUpdateInfo);
+    }
+
+    bool Client::DoQueueWorkDoneCallback(Queue* queue,
+                                         uint64_t requestSerial,
+                                         WGPUQueueWorkDoneStatus status) {
+        // The queue might have been deleted or recreated so this isn't an error.
+        if (queue == nullptr) {
+            return true;
+        }
+        return queue->OnWorkDoneCallback(requestSerial, status);
+    }
+
+    bool Client::DoDeviceCreateComputePipelineAsyncCallback(Device* device,
+                                                            uint64_t requestSerial,
+                                                            WGPUCreatePipelineAsyncStatus status,
+                                                            const char* message) {
+        // The device might have been deleted or recreated so this isn't an error.
+        if (device == nullptr) {
+            return true;
+        }
+        return device->OnCreateComputePipelineAsyncCallback(requestSerial, status, message);
+    }
+
+    bool Client::DoDeviceCreateRenderPipelineAsyncCallback(Device* device,
+                                                           uint64_t requestSerial,
+                                                           WGPUCreatePipelineAsyncStatus status,
+                                                           const char* message) {
+        // The device might have been deleted or recreated so this isn't an error.
+        if (device == nullptr) {
+            return true;
+        }
+        return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
+    }
+
+    bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
+                                                          uint64_t requestSerial,
+                                                          WGPUCompilationInfoRequestStatus status,
+                                                          const WGPUCompilationInfo* info) {
+        // The shader module might have been deleted or recreated so this isn't an error.
+        if (shaderModule == nullptr) {
+            return true;
+        }
+        return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp b/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
new file mode 100644
index 0000000..e04ce80
--- /dev/null
+++ b/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
@@ -0,0 +1,131 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Alloc.h"
+#include "dawn/common/Assert.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/Client.h"
+
+#include <cstring>
+
+namespace dawn::wire::client {
+
+    class InlineMemoryTransferService : public MemoryTransferService {
+        class ReadHandleImpl : public ReadHandle {
+          public:
+            explicit ReadHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+                : mStagingData(std::move(stagingData)), mSize(size) {
+            }
+
+            ~ReadHandleImpl() override = default;
+
+            size_t SerializeCreateSize() override {
+                return 0;
+            }
+
+            void SerializeCreate(void*) override {
+            }
+
+            const void* GetData() override {
+                return mStagingData.get();
+            }
+
+            bool DeserializeDataUpdate(const void* deserializePointer,
+                                       size_t deserializeSize,
+                                       size_t offset,
+                                       size_t size) override {
+                if (deserializeSize != size || deserializePointer == nullptr) {
+                    return false;
+                }
+
+                if (offset > mSize || size > mSize - offset) {
+                    return false;
+                }
+
+                void* start = static_cast<uint8_t*>(mStagingData.get()) + offset;
+                memcpy(start, deserializePointer, size);
+                return true;
+            }
+
+          private:
+            std::unique_ptr<uint8_t[]> mStagingData;
+            size_t mSize;
+        };
+
+        class WriteHandleImpl : public WriteHandle {
+          public:
+            explicit WriteHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+                : mStagingData(std::move(stagingData)), mSize(size) {
+            }
+
+            ~WriteHandleImpl() override = default;
+
+            size_t SerializeCreateSize() override {
+                return 0;
+            }
+
+            void SerializeCreate(void*) override {
+            }
+
+            void* GetData() override {
+                return mStagingData.get();
+            }
+
+            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
+                ASSERT(offset <= mSize);
+                ASSERT(size <= mSize - offset);
+                return size;
+            }
+
+            void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override {
+                ASSERT(mStagingData != nullptr);
+                ASSERT(serializePointer != nullptr);
+                ASSERT(offset <= mSize);
+                ASSERT(size <= mSize - offset);
+                memcpy(serializePointer, static_cast<uint8_t*>(mStagingData.get()) + offset, size);
+            }
+
+          private:
+            std::unique_ptr<uint8_t[]> mStagingData;
+            size_t mSize;
+        };
+
+      public:
+        InlineMemoryTransferService() {
+        }
+        ~InlineMemoryTransferService() override = default;
+
+        ReadHandle* CreateReadHandle(size_t size) override {
+            auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+            if (stagingData) {
+                return new ReadHandleImpl(std::move(stagingData), size);
+            }
+            return nullptr;
+        }
+
+        WriteHandle* CreateWriteHandle(size_t size) override {
+            auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+            if (stagingData) {
+                memset(stagingData.get(), 0, size);
+                return new WriteHandleImpl(std::move(stagingData), size);
+            }
+            return nullptr;
+        }
+    };
+
+    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+        return std::make_unique<InlineMemoryTransferService>();
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp b/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
new file mode 100644
index 0000000..44ca3ed
--- /dev/null
+++ b/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
@@ -0,0 +1,105 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/ClientMemoryTransferService_mock.h"
+
+#include <cstdio>
+#include "dawn/common/Assert.h"
+
+namespace dawn::wire::client {
+
+    MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+        : ReadHandle(), mService(service) {
+    }
+
+    MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+        mService->OnReadHandleDestroy(this);
+    }
+
+    size_t MockMemoryTransferService::MockReadHandle::SerializeCreateSize() {
+        return mService->OnReadHandleSerializeCreateSize(this);
+    }
+
+    void MockMemoryTransferService::MockReadHandle::SerializeCreate(void* serializePointer) {
+        mService->OnReadHandleSerializeCreate(this, serializePointer);
+    }
+
+    const void* MockMemoryTransferService::MockReadHandle::GetData() {
+        return mService->OnReadHandleGetData(this);
+    }
+
+    bool MockMemoryTransferService::MockReadHandle::DeserializeDataUpdate(
+        const void* deserializePointer,
+        size_t deserializeSize,
+        size_t offset,
+        size_t size) {
+        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+        return mService->OnReadHandleDeserializeDataUpdate(
+            this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
+            size);
+    }
+
+    MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+        : WriteHandle(), mService(service) {
+    }
+
+    MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+        mService->OnWriteHandleDestroy(this);
+    }
+
+    size_t MockMemoryTransferService::MockWriteHandle::SerializeCreateSize() {
+        return mService->OnWriteHandleSerializeCreateSize(this);
+    }
+
+    void MockMemoryTransferService::MockWriteHandle::SerializeCreate(void* serializePointer) {
+        mService->OnWriteHandleSerializeCreate(this, serializePointer);
+    }
+
+    void* MockMemoryTransferService::MockWriteHandle::GetData() {
+        return mService->OnWriteHandleGetData(this);
+    }
+
+    size_t MockMemoryTransferService::MockWriteHandle::SizeOfSerializeDataUpdate(size_t offset,
+                                                                                 size_t size) {
+        return mService->OnWriteHandleSizeOfSerializeDataUpdate(this, offset, size);
+    }
+
+    void MockMemoryTransferService::MockWriteHandle::SerializeDataUpdate(void* serializePointer,
+                                                                         size_t offset,
+                                                                         size_t size) {
+        mService->OnWriteHandleSerializeDataUpdate(this, serializePointer, offset, size);
+    }
+
+    MockMemoryTransferService::MockMemoryTransferService() = default;
+    MockMemoryTransferService::~MockMemoryTransferService() = default;
+
+    MockMemoryTransferService::ReadHandle* MockMemoryTransferService::CreateReadHandle(
+        size_t size) {
+        return OnCreateReadHandle(size);
+    }
+
+    MockMemoryTransferService::WriteHandle* MockMemoryTransferService::CreateWriteHandle(
+        size_t size) {
+        return OnCreateWriteHandle(size);
+    }
+
+    MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+        return new MockReadHandle(this);
+    }
+
+    MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+        return new MockWriteHandle(this);
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ClientMemoryTransferService_mock.h b/src/dawn/wire/client/ClientMemoryTransferService_mock.h
new file mode 100644
index 0000000..0974f40
--- /dev/null
+++ b/src/dawn/wire/client/ClientMemoryTransferService_mock.h
@@ -0,0 +1,99 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
+#define DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
+
+#include <gmock/gmock.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+    class MockMemoryTransferService : public MemoryTransferService {
+      public:
+        class MockReadHandle : public ReadHandle {
+          public:
+            explicit MockReadHandle(MockMemoryTransferService* service);
+            ~MockReadHandle() override;
+
+            size_t SerializeCreateSize() override;
+            void SerializeCreate(void* serializePointer) override;
+            const void* GetData() override;
+            bool DeserializeDataUpdate(const void* deserializePointer,
+                                       size_t deserializeSize,
+                                       size_t offset,
+                                       size_t size) override;
+
+          private:
+            MockMemoryTransferService* mService;
+        };
+
+        class MockWriteHandle : public WriteHandle {
+          public:
+            explicit MockWriteHandle(MockMemoryTransferService* service);
+            ~MockWriteHandle() override;
+
+            size_t SerializeCreateSize() override;
+            void SerializeCreate(void* serializePointer) override;
+            void* GetData() override;
+            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+            void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override;
+
+          private:
+            MockMemoryTransferService* mService;
+        };
+
+        MockMemoryTransferService();
+        ~MockMemoryTransferService() override;
+
+        ReadHandle* CreateReadHandle(size_t) override;
+        WriteHandle* CreateWriteHandle(size_t) override;
+
+        MockReadHandle* NewReadHandle();
+        MockWriteHandle* NewWriteHandle();
+
+        MOCK_METHOD(ReadHandle*, OnCreateReadHandle, (size_t));
+        MOCK_METHOD(WriteHandle*, OnCreateWriteHandle, (size_t));
+
+        MOCK_METHOD(size_t, OnReadHandleSerializeCreateSize, (const ReadHandle*));
+        MOCK_METHOD(void, OnReadHandleSerializeCreate, (const ReadHandle*, void* serializePointer));
+        MOCK_METHOD((const void*), OnReadHandleGetData, (const ReadHandle*));
+        MOCK_METHOD(bool,
+                    OnReadHandleDeserializeDataUpdate,
+                    (const ReadHandle*,
+                     const uint32_t* deserializePointer,
+                     size_t deserializeSize,
+                     size_t offset,
+                     size_t size));
+        MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle*));
+
+        MOCK_METHOD(size_t, OnWriteHandleSerializeCreateSize, (const void* WriteHandle));
+        MOCK_METHOD(void,
+                    OnWriteHandleSerializeCreate,
+                    (const void* WriteHandle, void* serializePointer));
+        MOCK_METHOD((void*), OnWriteHandleGetData, (const void* WriteHandle));
+        MOCK_METHOD(size_t,
+                    OnWriteHandleSizeOfSerializeDataUpdate,
+                    (const void* WriteHandle, size_t offset, size_t size));
+        MOCK_METHOD(size_t,
+                    OnWriteHandleSerializeDataUpdate,
+                    (const void* WriteHandle, void* serializePointer, size_t offset, size_t size));
+        MOCK_METHOD(void, OnWriteHandleDestroy, (const void* WriteHandle));
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/src/dawn/wire/client/Device.cpp b/src/dawn/wire/client/Device.cpp
new file mode 100644
index 0000000..9378bd5
--- /dev/null
+++ b/src/dawn/wire/client/Device.cpp
@@ -0,0 +1,325 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Device.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/wire/client/ApiObjects_autogen.h"
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/ObjectAllocator.h"
+
+namespace dawn::wire::client {
+
+    Device::Device(Client* clientIn, uint32_t initialRefcount, uint32_t initialId)
+        : ObjectBase(clientIn, initialRefcount, initialId), mIsAlive(std::make_shared<bool>()) {
+#if defined(DAWN_ENABLE_ASSERTS)
+        mErrorCallback = [](WGPUErrorType, char const*, void*) {
+            static bool calledOnce = false;
+            if (!calledOnce) {
+                calledOnce = true;
+                dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+                                      "probably not intended. If you really want to ignore errors "
+                                      "and suppress this message, set the callback to null.";
+            }
+        };
+
+        mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+            static bool calledOnce = false;
+            if (!calledOnce) {
+                calledOnce = true;
+                dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+                                      "intended. If you really want to ignore device lost "
+                                      "and suppress this message, set the callback to null.";
+            }
+        };
+#endif  // DAWN_ENABLE_ASSERTS
+    }
+
+    Device::~Device() {
+        mErrorScopes.CloseAll([](ErrorScopeData* request) {
+            request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
+                              request->userdata);
+        });
+
+        mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+            if (request->createComputePipelineAsyncCallback != nullptr) {
+                request->createComputePipelineAsyncCallback(
+                    WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                    "Device destroyed before callback", request->userdata);
+            } else {
+                ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+                request->createRenderPipelineAsyncCallback(
+                    WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                    "Device destroyed before callback", request->userdata);
+            }
+        });
+    }
+
+    bool Device::GetLimits(WGPUSupportedLimits* limits) const {
+        return mLimitsAndFeatures.GetLimits(limits);
+    }
+
+    bool Device::HasFeature(WGPUFeatureName feature) const {
+        return mLimitsAndFeatures.HasFeature(feature);
+    }
+
+    size_t Device::EnumerateFeatures(WGPUFeatureName* features) const {
+        return mLimitsAndFeatures.EnumerateFeatures(features);
+    }
+
+    void Device::SetLimits(const WGPUSupportedLimits* limits) {
+        return mLimitsAndFeatures.SetLimits(limits);
+    }
+
+    void Device::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+        return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+    }
+
+    void Device::HandleError(WGPUErrorType errorType, const char* message) {
+        if (mErrorCallback) {
+            mErrorCallback(errorType, message, mErrorUserdata);
+        }
+    }
+
+    void Device::HandleLogging(WGPULoggingType loggingType, const char* message) {
+        if (mLoggingCallback) {
+            // Since client always run in single thread, calling the callback directly is safe.
+            mLoggingCallback(loggingType, message, mLoggingUserdata);
+        }
+    }
+
+    void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
+        if (mDeviceLostCallback && !mDidRunLostCallback) {
+            mDidRunLostCallback = true;
+            mDeviceLostCallback(reason, message, mDeviceLostUserdata);
+        }
+    }
+
+    void Device::CancelCallbacksForDisconnect() {
+        mErrorScopes.CloseAll([](ErrorScopeData* request) {
+            request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
+        });
+
+        mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+            if (request->createComputePipelineAsyncCallback != nullptr) {
+                request->createComputePipelineAsyncCallback(
+                    WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, "Device lost",
+                    request->userdata);
+            } else {
+                ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+                request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
+                                                           nullptr, "Device lost",
+                                                           request->userdata);
+            }
+        });
+    }
+
+    std::weak_ptr<bool> Device::GetAliveWeakPtr() {
+        return mIsAlive;
+    }
+
+    void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
+        mErrorCallback = errorCallback;
+        mErrorUserdata = errorUserdata;
+    }
+
+    void Device::SetLoggingCallback(WGPULoggingCallback callback, void* userdata) {
+        mLoggingCallback = callback;
+        mLoggingUserdata = userdata;
+    }
+
+    void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
+        mDeviceLostCallback = callback;
+        mDeviceLostUserdata = userdata;
+    }
+
+    bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
+        // TODO(crbug.com/dawn/1324) Replace bool return with void when users are updated.
+        if (client->IsDisconnected()) {
+            callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
+            return true;
+        }
+
+        uint64_t serial = mErrorScopes.Add({callback, userdata});
+        DevicePopErrorScopeCmd cmd;
+        cmd.deviceId = this->id;
+        cmd.requestSerial = serial;
+        client->SerializeCommand(cmd);
+        return true;
+    }
+
+    bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
+                                         WGPUErrorType type,
+                                         const char* message) {
+        switch (type) {
+            case WGPUErrorType_NoError:
+            case WGPUErrorType_Validation:
+            case WGPUErrorType_OutOfMemory:
+            case WGPUErrorType_Unknown:
+            case WGPUErrorType_DeviceLost:
+                break;
+            default:
+                return false;
+        }
+
+        ErrorScopeData request;
+        if (!mErrorScopes.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        request.callback(type, message, request.userdata);
+        return true;
+    }
+
+    void Device::InjectError(WGPUErrorType type, const char* message) {
+        DeviceInjectErrorCmd cmd;
+        cmd.self = ToAPI(this);
+        cmd.type = type;
+        cmd.message = message;
+        client->SerializeCommand(cmd);
+    }
+
+    WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
+        return Buffer::Create(this, descriptor);
+    }
+
+    WGPUBuffer Device::CreateErrorBuffer() {
+        return Buffer::CreateError(this);
+    }
+
+    WGPUQueue Device::GetQueue() {
+        // The queue is lazily created because if a Device is created by
+        // Reserve/Inject, we cannot send the GetQueue message until
+        // it has been injected on the Server. It cannot happen immediately
+        // on construction.
+        if (mQueue == nullptr) {
+            // Get the primary queue for this device.
+            auto* allocation = client->QueueAllocator().New(client);
+            mQueue = allocation->object.get();
+
+            DeviceGetQueueCmd cmd;
+            cmd.self = ToAPI(this);
+            cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+
+            client->SerializeCommand(cmd);
+        }
+
+        mQueue->refcount++;
+        return ToAPI(mQueue);
+    }
+
+    void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+                                            WGPUCreateComputePipelineAsyncCallback callback,
+                                            void* userdata) {
+        if (client->IsDisconnected()) {
+            return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                            "GPU device disconnected", userdata);
+        }
+
+        auto* allocation = client->ComputePipelineAllocator().New(client);
+
+        CreatePipelineAsyncRequest request = {};
+        request.createComputePipelineAsyncCallback = callback;
+        request.userdata = userdata;
+        request.pipelineObjectID = allocation->object->id;
+
+        uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+
+        DeviceCreateComputePipelineAsyncCmd cmd;
+        cmd.deviceId = this->id;
+        cmd.descriptor = descriptor;
+        cmd.requestSerial = serial;
+        cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
+
+        client->SerializeCommand(cmd);
+    }
+
+    bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+                                                      WGPUCreatePipelineAsyncStatus status,
+                                                      const char* message) {
+        CreatePipelineAsyncRequest request;
+        if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        auto pipelineAllocation =
+            client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
+
+        // If the return status is a failure we should give a null pipeline to the callback and
+        // free the allocation.
+        if (status != WGPUCreatePipelineAsyncStatus_Success) {
+            client->ComputePipelineAllocator().Free(pipelineAllocation);
+            request.createComputePipelineAsyncCallback(status, nullptr, message, request.userdata);
+            return true;
+        }
+
+        WGPUComputePipeline pipeline = reinterpret_cast<WGPUComputePipeline>(pipelineAllocation);
+        request.createComputePipelineAsyncCallback(status, pipeline, message, request.userdata);
+
+        return true;
+    }
+
+    void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+                                           WGPUCreateRenderPipelineAsyncCallback callback,
+                                           void* userdata) {
+        if (client->IsDisconnected()) {
+            return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                            "GPU device disconnected", userdata);
+        }
+
+        auto* allocation = client->RenderPipelineAllocator().New(client);
+
+        CreatePipelineAsyncRequest request = {};
+        request.createRenderPipelineAsyncCallback = callback;
+        request.userdata = userdata;
+        request.pipelineObjectID = allocation->object->id;
+
+        uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+
+        DeviceCreateRenderPipelineAsyncCmd cmd;
+        cmd.deviceId = this->id;
+        cmd.descriptor = descriptor;
+        cmd.requestSerial = serial;
+        cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+
+        client->SerializeCommand(cmd);
+    }
+
+    bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+                                                     WGPUCreatePipelineAsyncStatus status,
+                                                     const char* message) {
+        CreatePipelineAsyncRequest request;
+        if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        auto pipelineAllocation =
+            client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
+
+        // If the return status is a failure we should give a null pipeline to the callback and
+        // free the allocation.
+        if (status != WGPUCreatePipelineAsyncStatus_Success) {
+            client->RenderPipelineAllocator().Free(pipelineAllocation);
+            request.createRenderPipelineAsyncCallback(status, nullptr, message, request.userdata);
+            return true;
+        }
+
+        WGPURenderPipeline pipeline = reinterpret_cast<WGPURenderPipeline>(pipelineAllocation);
+        request.createRenderPipelineAsyncCallback(status, pipeline, message, request.userdata);
+
+        return true;
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Device.h b/src/dawn/wire/client/Device.h
new file mode 100644
index 0000000..56e2af2
--- /dev/null
+++ b/src/dawn/wire/client/Device.h
@@ -0,0 +1,111 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_DEVICE_H_
+#define DAWNWIRE_CLIENT_DEVICE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/ApiObjects_autogen.h"
+#include "dawn/wire/client/LimitsAndFeatures.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+#include <memory>
+
+namespace dawn::wire::client {
+
+    class Client;
+    class Queue;
+
+    class Device final : public ObjectBase {
+      public:
+        Device(Client* client, uint32_t refcount, uint32_t id);
+        ~Device();
+
+        void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
+        void SetLoggingCallback(WGPULoggingCallback errorCallback, void* errorUserdata);
+        void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
+        void InjectError(WGPUErrorType type, const char* message);
+        void PushErrorScope(WGPUErrorFilter filter);
+        bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
+        WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
+        WGPUBuffer CreateErrorBuffer();
+        WGPUComputePipeline CreateComputePipeline(WGPUComputePipelineDescriptor const* descriptor);
+        void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+                                        WGPUCreateComputePipelineAsyncCallback callback,
+                                        void* userdata);
+        void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+                                       WGPUCreateRenderPipelineAsyncCallback callback,
+                                       void* userdata);
+
+        void HandleError(WGPUErrorType errorType, const char* message);
+        void HandleLogging(WGPULoggingType loggingType, const char* message);
+        void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
+        bool OnPopErrorScopeCallback(uint64_t requestSerial,
+                                     WGPUErrorType type,
+                                     const char* message);
+        bool OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+                                                  WGPUCreatePipelineAsyncStatus status,
+                                                  const char* message);
+        bool OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+                                                 WGPUCreatePipelineAsyncStatus status,
+                                                 const char* message);
+
+        bool GetLimits(WGPUSupportedLimits* limits) const;
+        bool HasFeature(WGPUFeatureName feature) const;
+        size_t EnumerateFeatures(WGPUFeatureName* features) const;
+        void SetLimits(const WGPUSupportedLimits* limits);
+        void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+
+        WGPUQueue GetQueue();
+
+        void CancelCallbacksForDisconnect() override;
+
+        std::weak_ptr<bool> GetAliveWeakPtr();
+
+      private:
+        LimitsAndFeatures mLimitsAndFeatures;
+        struct ErrorScopeData {
+            WGPUErrorCallback callback = nullptr;
+            void* userdata = nullptr;
+        };
+        RequestTracker<ErrorScopeData> mErrorScopes;
+
+        struct CreatePipelineAsyncRequest {
+            WGPUCreateComputePipelineAsyncCallback createComputePipelineAsyncCallback = nullptr;
+            WGPUCreateRenderPipelineAsyncCallback createRenderPipelineAsyncCallback = nullptr;
+            void* userdata = nullptr;
+            ObjectId pipelineObjectID;
+        };
+        RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
+
+        WGPUErrorCallback mErrorCallback = nullptr;
+        WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
+        WGPULoggingCallback mLoggingCallback = nullptr;
+        bool mDidRunLostCallback = false;
+        void* mErrorUserdata = nullptr;
+        void* mDeviceLostUserdata = nullptr;
+        void* mLoggingUserdata = nullptr;
+
+        Queue* mQueue = nullptr;
+
+        std::shared_ptr<bool> mIsAlive;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_DEVICE_H_
diff --git a/src/dawn/wire/client/Instance.cpp b/src/dawn/wire/client/Instance.cpp
new file mode 100644
index 0000000..de27d47
--- /dev/null
+++ b/src/dawn/wire/client/Instance.cpp
@@ -0,0 +1,101 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Instance.h"
+
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+    Instance::~Instance() {
+        mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+            request->callback(WGPURequestAdapterStatus_Unknown, nullptr,
+                              "Instance destroyed before callback", request->userdata);
+        });
+    }
+
+    void Instance::CancelCallbacksForDisconnect() {
+        mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+            request->callback(WGPURequestAdapterStatus_Unknown, nullptr, "GPU connection lost",
+                              request->userdata);
+        });
+    }
+
+    void Instance::RequestAdapter(const WGPURequestAdapterOptions* options,
+                                  WGPURequestAdapterCallback callback,
+                                  void* userdata) {
+        if (client->IsDisconnected()) {
+            callback(WGPURequestAdapterStatus_Error, nullptr, "GPU connection lost", userdata);
+            return;
+        }
+
+        auto* allocation = client->AdapterAllocator().New(client);
+        uint64_t serial = mRequestAdapterRequests.Add({callback, allocation->object->id, userdata});
+
+        InstanceRequestAdapterCmd cmd;
+        cmd.instanceId = this->id;
+        cmd.requestSerial = serial;
+        cmd.adapterObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+        cmd.options = options;
+
+        client->SerializeCommand(cmd);
+    }
+
+    bool Client::DoInstanceRequestAdapterCallback(Instance* instance,
+                                                  uint64_t requestSerial,
+                                                  WGPURequestAdapterStatus status,
+                                                  const char* message,
+                                                  const WGPUAdapterProperties* properties,
+                                                  const WGPUSupportedLimits* limits,
+                                                  uint32_t featuresCount,
+                                                  const WGPUFeatureName* features) {
+        // May have been deleted or recreated so this isn't an error.
+        if (instance == nullptr) {
+            return true;
+        }
+        return instance->OnRequestAdapterCallback(requestSerial, status, message, properties,
+                                                  limits, featuresCount, features);
+    }
+
+    bool Instance::OnRequestAdapterCallback(uint64_t requestSerial,
+                                            WGPURequestAdapterStatus status,
+                                            const char* message,
+                                            const WGPUAdapterProperties* properties,
+                                            const WGPUSupportedLimits* limits,
+                                            uint32_t featuresCount,
+                                            const WGPUFeatureName* features) {
+        RequestAdapterData request;
+        if (!mRequestAdapterRequests.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        Adapter* adapter = client->AdapterAllocator().GetObject(request.adapterObjectId);
+
+        // If the return status is a failure we should give a null adapter to the callback and
+        // free the allocation.
+        if (status != WGPURequestAdapterStatus_Success) {
+            client->AdapterAllocator().Free(adapter);
+            request.callback(status, nullptr, message, request.userdata);
+            return true;
+        }
+
+        adapter->SetProperties(properties);
+        adapter->SetLimits(limits);
+        adapter->SetFeatures(features, featuresCount);
+
+        request.callback(status, ToAPI(adapter), message, request.userdata);
+        return true;
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Instance.h b/src/dawn/wire/client/Instance.h
new file mode 100644
index 0000000..9c4cfc9
--- /dev/null
+++ b/src/dawn/wire/client/Instance.h
@@ -0,0 +1,56 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_INSTANCE_H_
+#define DAWNWIRE_CLIENT_INSTANCE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+    class Instance final : public ObjectBase {
+      public:
+        using ObjectBase::ObjectBase;
+
+        ~Instance();
+        void CancelCallbacksForDisconnect() override;
+
+        void RequestAdapter(const WGPURequestAdapterOptions* options,
+                            WGPURequestAdapterCallback callback,
+                            void* userdata);
+        bool OnRequestAdapterCallback(uint64_t requestSerial,
+                                      WGPURequestAdapterStatus status,
+                                      const char* message,
+                                      const WGPUAdapterProperties* properties,
+                                      const WGPUSupportedLimits* limits,
+                                      uint32_t featuresCount,
+                                      const WGPUFeatureName* features);
+
+      private:
+        struct RequestAdapterData {
+            WGPURequestAdapterCallback callback = nullptr;
+            ObjectId adapterObjectId;
+            void* userdata = nullptr;
+        };
+        RequestTracker<RequestAdapterData> mRequestAdapterRequests;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_INSTANCE_H_
diff --git a/src/dawn/wire/client/LimitsAndFeatures.cpp b/src/dawn/wire/client/LimitsAndFeatures.cpp
new file mode 100644
index 0000000..a2c753c
--- /dev/null
+++ b/src/dawn/wire/client/LimitsAndFeatures.cpp
@@ -0,0 +1,63 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/LimitsAndFeatures.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/SupportedFeatures.h"
+
+namespace dawn::wire::client {
+
+    bool LimitsAndFeatures::GetLimits(WGPUSupportedLimits* limits) const {
+        ASSERT(limits != nullptr);
+        if (limits->nextInChain != nullptr) {
+            return false;
+        }
+        *limits = mLimits;
+        return true;
+    }
+
+    bool LimitsAndFeatures::HasFeature(WGPUFeatureName feature) const {
+        return mFeatures.count(feature) != 0;
+    }
+
+    size_t LimitsAndFeatures::EnumerateFeatures(WGPUFeatureName* features) const {
+        if (features != nullptr) {
+            for (WGPUFeatureName f : mFeatures) {
+                *features = f;
+                ++features;
+            }
+        }
+        return mFeatures.size();
+    }
+
+    void LimitsAndFeatures::SetLimits(const WGPUSupportedLimits* limits) {
+        ASSERT(limits != nullptr);
+        mLimits = *limits;
+        mLimits.nextInChain = nullptr;
+    }
+
+    void LimitsAndFeatures::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+        ASSERT(features != nullptr || featuresCount == 0);
+        for (uint32_t i = 0; i < featuresCount; ++i) {
+            // Filter out features that the server supports, but the client does not.
+            // (Could be different versions)
+            if (!IsFeatureSupported(features[i])) {
+                continue;
+            }
+            mFeatures.insert(features[i]);
+        }
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/LimitsAndFeatures.h b/src/dawn/wire/client/LimitsAndFeatures.h
new file mode 100644
index 0000000..e6c07e5
--- /dev/null
+++ b/src/dawn/wire/client/LimitsAndFeatures.h
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_LIMITSANDFEATURES_H_
+#define DAWNWIRE_CLIENT_LIMITSANDFEATURES_H_
+
+#include <dawn/webgpu.h>
+
+#include <unordered_set>
+
+namespace dawn::wire::client {
+
+    class LimitsAndFeatures {
+      public:
+        bool GetLimits(WGPUSupportedLimits* limits) const;
+        bool HasFeature(WGPUFeatureName feature) const;
+        size_t EnumerateFeatures(WGPUFeatureName* features) const;
+
+        void SetLimits(const WGPUSupportedLimits* limits);
+        void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+
+      private:
+        WGPUSupportedLimits mLimits;
+        std::unordered_set<WGPUFeatureName> mFeatures;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_LIMITSANDFEATURES_H_
diff --git a/src/dawn/wire/client/ObjectAllocator.h b/src/dawn/wire/client/ObjectAllocator.h
new file mode 100644
index 0000000..b14e91e
--- /dev/null
+++ b/src/dawn/wire/client/ObjectAllocator.h
@@ -0,0 +1,110 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
+#define DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+namespace dawn::wire::client {
+
+    template <typename T>
+    class ObjectAllocator {
+      public:
+        struct ObjectAndSerial {
+            ObjectAndSerial(std::unique_ptr<T> object, uint32_t generation)
+                : object(std::move(object)), generation(generation) {
+            }
+            std::unique_ptr<T> object;
+            uint32_t generation;
+        };
+
+        ObjectAllocator() {
+            // ID 0 is nullptr
+            mObjects.emplace_back(nullptr, 0);
+        }
+
+        template <typename Client>
+        ObjectAndSerial* New(Client* client) {
+            uint32_t id = GetNewId();
+            auto object = std::make_unique<T>(client, 1, id);
+            client->TrackObject(object.get());
+
+            if (id >= mObjects.size()) {
+                ASSERT(id == mObjects.size());
+                mObjects.emplace_back(std::move(object), 0);
+            } else {
+                ASSERT(mObjects[id].object == nullptr);
+
+                mObjects[id].generation++;
+                // The generation should never overflow. We don't recycle ObjectIds that would
+                // overflow their next generation.
+                ASSERT(mObjects[id].generation != 0);
+
+                mObjects[id].object = std::move(object);
+            }
+
+            return &mObjects[id];
+        }
+        void Free(T* obj) {
+            ASSERT(obj->IsInList());
+            if (DAWN_LIKELY(mObjects[obj->id].generation != std::numeric_limits<uint32_t>::max())) {
+                // Only recycle this ObjectId if the generation won't overflow on the next
+                // allocation.
+                FreeId(obj->id);
+            }
+            mObjects[obj->id].object = nullptr;
+        }
+
+        T* GetObject(uint32_t id) {
+            if (id >= mObjects.size()) {
+                return nullptr;
+            }
+            return mObjects[id].object.get();
+        }
+
+        uint32_t GetGeneration(uint32_t id) {
+            if (id >= mObjects.size()) {
+                return 0;
+            }
+            return mObjects[id].generation;
+        }
+
+      private:
+        uint32_t GetNewId() {
+            if (mFreeIds.empty()) {
+                return mCurrentId++;
+            }
+            uint32_t id = mFreeIds.back();
+            mFreeIds.pop_back();
+            return id;
+        }
+        void FreeId(uint32_t id) {
+            mFreeIds.push_back(id);
+        }
+
+        // 0 is an ID reserved to represent nullptr
+        uint32_t mCurrentId = 1;
+        std::vector<uint32_t> mFreeIds;
+        std::vector<ObjectAndSerial> mObjects;
+    };
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
diff --git a/src/dawn/wire/client/ObjectBase.h b/src/dawn/wire/client/ObjectBase.h
new file mode 100644
index 0000000..8a4c04d
--- /dev/null
+++ b/src/dawn/wire/client/ObjectBase.h
@@ -0,0 +1,51 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_OBJECTBASE_H_
+#define DAWNWIRE_CLIENT_OBJECTBASE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/wire/ObjectType_autogen.h"
+
+namespace dawn::wire::client {
+
+    class Client;
+
+    // All objects on the client side have:
+    //  - A pointer to the Client to get where to serialize commands
+    //  - The external reference count
+    //  - An ID that is used to refer to this object when talking with the server side
+    //  - A next/prev pointer. They are part of a linked list of objects of the same type.
+    struct ObjectBase : public LinkNode<ObjectBase> {
+        ObjectBase(Client* client, uint32_t refcount, uint32_t id)
+            : client(client), refcount(refcount), id(id) {
+        }
+
+        ~ObjectBase() {
+            RemoveFromList();
+        }
+
+        virtual void CancelCallbacksForDisconnect() {
+        }
+
+        Client* const client;
+        uint32_t refcount;
+        const uint32_t id;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_OBJECTBASE_H_
diff --git a/src/dawn/wire/client/Queue.cpp b/src/dawn/wire/client/Queue.cpp
new file mode 100644
index 0000000..37d97d7
--- /dev/null
+++ b/src/dawn/wire/client/Queue.cpp
@@ -0,0 +1,98 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Queue.h"
+
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+    Queue::~Queue() {
+        ClearAllCallbacks(WGPUQueueWorkDoneStatus_Unknown);
+    }
+
+    bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
+        OnWorkDoneData request;
+        if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        request.callback(status, request.userdata);
+        return true;
+    }
+
+    void Queue::OnSubmittedWorkDone(uint64_t signalValue,
+                                    WGPUQueueWorkDoneCallback callback,
+                                    void* userdata) {
+        if (client->IsDisconnected()) {
+            callback(WGPUQueueWorkDoneStatus_DeviceLost, userdata);
+            return;
+        }
+
+        uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
+
+        QueueOnSubmittedWorkDoneCmd cmd;
+        cmd.queueId = this->id;
+        cmd.signalValue = signalValue;
+        cmd.requestSerial = serial;
+
+        client->SerializeCommand(cmd);
+    }
+
+    void Queue::WriteBuffer(WGPUBuffer cBuffer,
+                            uint64_t bufferOffset,
+                            const void* data,
+                            size_t size) {
+        Buffer* buffer = FromAPI(cBuffer);
+
+        QueueWriteBufferCmd cmd;
+        cmd.queueId = id;
+        cmd.bufferId = buffer->id;
+        cmd.bufferOffset = bufferOffset;
+        cmd.data = static_cast<const uint8_t*>(data);
+        cmd.size = size;
+
+        client->SerializeCommand(cmd);
+    }
+
+    void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
+                             const void* data,
+                             size_t dataSize,
+                             const WGPUTextureDataLayout* dataLayout,
+                             const WGPUExtent3D* writeSize) {
+        QueueWriteTextureCmd cmd;
+        cmd.queueId = id;
+        cmd.destination = destination;
+        cmd.data = static_cast<const uint8_t*>(data);
+        cmd.dataSize = dataSize;
+        cmd.dataLayout = dataLayout;
+        cmd.writeSize = writeSize;
+
+        client->SerializeCommand(cmd);
+    }
+
+    void Queue::CancelCallbacksForDisconnect() {
+        ClearAllCallbacks(WGPUQueueWorkDoneStatus_DeviceLost);
+    }
+
+    void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
+        mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
+            if (request->callback != nullptr) {
+                request->callback(status, request->userdata);
+            }
+        });
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Queue.h b/src/dawn/wire/client/Queue.h
new file mode 100644
index 0000000..d205387
--- /dev/null
+++ b/src/dawn/wire/client/Queue.h
@@ -0,0 +1,57 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_QUEUE_H_
+#define DAWNWIRE_CLIENT_QUEUE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+    class Queue final : public ObjectBase {
+      public:
+        using ObjectBase::ObjectBase;
+        ~Queue();
+
+        bool OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status);
+
+        // Dawn API
+        void OnSubmittedWorkDone(uint64_t signalValue,
+                                 WGPUQueueWorkDoneCallback callback,
+                                 void* userdata);
+        void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
+        void WriteTexture(const WGPUImageCopyTexture* destination,
+                          const void* data,
+                          size_t dataSize,
+                          const WGPUTextureDataLayout* dataLayout,
+                          const WGPUExtent3D* writeSize);
+
+      private:
+        void CancelCallbacksForDisconnect() override;
+        void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
+
+        struct OnWorkDoneData {
+            WGPUQueueWorkDoneCallback callback = nullptr;
+            void* userdata = nullptr;
+        };
+        RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_QUEUE_H_
diff --git a/src/dawn/wire/client/RequestTracker.h b/src/dawn/wire/client/RequestTracker.h
new file mode 100644
index 0000000..c57ae92
--- /dev/null
+++ b/src/dawn/wire/client/RequestTracker.h
@@ -0,0 +1,82 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_REQUESTTRACKER_H_
+#define DAWNWIRE_CLIENT_REQUESTTRACKER_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/NonCopyable.h"
+
+#include <cstdint>
+#include <map>
+
+namespace dawn::wire::client {
+
+    class Device;
+    class MemoryTransferService;
+
+    template <typename Request>
+    class RequestTracker : NonCopyable {
+      public:
+        ~RequestTracker() {
+            ASSERT(mRequests.empty());
+        }
+
+        uint64_t Add(Request&& request) {
+            mSerial++;
+            mRequests.emplace(mSerial, request);
+            return mSerial;
+        }
+
+        bool Acquire(uint64_t serial, Request* request) {
+            auto it = mRequests.find(serial);
+            if (it == mRequests.end()) {
+                return false;
+            }
+            *request = std::move(it->second);
+            mRequests.erase(it);
+            return true;
+        }
+
+        template <typename CloseFunc>
+        void CloseAll(CloseFunc&& closeFunc) {
+            // Call closeFunc on all requests while handling reentrancy where the callback of some
+            // requests may add some additional requests. We guarantee all callbacks for requests
+            // are called exactly onces, so keep closing new requests if the first batch added more.
+            // It is fine to loop infinitely here if that's what the application makes use do.
+            while (!mRequests.empty()) {
+                // Move mRequests to a local variable so that further reentrant modifications of
+                // mRequests don't invalidate the iterators.
+                auto allRequests = std::move(mRequests);
+                for (auto& [_, request] : allRequests) {
+                    closeFunc(&request);
+                }
+            }
+        }
+
+        template <typename F>
+        void ForAll(F&& f) {
+            for (auto& [_, request] : mRequests) {
+                f(&request);
+            }
+        }
+
+      private:
+        uint64_t mSerial;
+        std::map<uint64_t, Request> mRequests;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_REQUESTTRACKER_H_
diff --git a/src/dawn/wire/client/ShaderModule.cpp b/src/dawn/wire/client/ShaderModule.cpp
new file mode 100644
index 0000000..ce25ef7
--- /dev/null
+++ b/src/dawn/wire/client/ShaderModule.cpp
@@ -0,0 +1,64 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/ShaderModule.h"
+
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+    ShaderModule::~ShaderModule() {
+        ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
+    }
+
+    void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
+        if (client->IsDisconnected()) {
+            callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
+            return;
+        }
+
+        uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
+
+        ShaderModuleGetCompilationInfoCmd cmd;
+        cmd.shaderModuleId = this->id;
+        cmd.requestSerial = serial;
+
+        client->SerializeCommand(cmd);
+    }
+
+    bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
+                                                  WGPUCompilationInfoRequestStatus status,
+                                                  const WGPUCompilationInfo* info) {
+        CompilationInfoRequest request;
+        if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
+            return false;
+        }
+
+        request.callback(status, info, request.userdata);
+        return true;
+    }
+
+    void ShaderModule::CancelCallbacksForDisconnect() {
+        ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
+    }
+
+    void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
+        mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
+            if (request->callback != nullptr) {
+                request->callback(status, nullptr, request->userdata);
+            }
+        });
+    }
+
+}  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ShaderModule.h b/src/dawn/wire/client/ShaderModule.h
new file mode 100644
index 0000000..fba76b4
--- /dev/null
+++ b/src/dawn/wire/client/ShaderModule.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_SHADER_MODULE_H_
+#define DAWNWIRE_CLIENT_SHADER_MODULE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+    class ShaderModule final : public ObjectBase {
+      public:
+        using ObjectBase::ObjectBase;
+        ~ShaderModule();
+
+        void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
+        bool GetCompilationInfoCallback(uint64_t requestSerial,
+                                        WGPUCompilationInfoRequestStatus status,
+                                        const WGPUCompilationInfo* info);
+
+      private:
+        void CancelCallbacksForDisconnect() override;
+        void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
+
+        struct CompilationInfoRequest {
+            WGPUCompilationInfoCallback callback = nullptr;
+            void* userdata = nullptr;
+        };
+        RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
+    };
+
+}  // namespace dawn::wire::client
+
+#endif  // DAWNWIRE_CLIENT_SHADER_MODULE_H_
diff --git a/src/dawn/wire/server/ObjectStorage.h b/src/dawn/wire/server/ObjectStorage.h
new file mode 100644
index 0000000..b9cba68
--- /dev/null
+++ b/src/dawn/wire/server/ObjectStorage.h
@@ -0,0 +1,228 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SERVER_OBJECTSTORAGE_H_
+#define DAWNWIRE_SERVER_OBJECTSTORAGE_H_
+
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/WireServer.h"
+
+#include <algorithm>
+#include <map>
+#include <unordered_set>
+
+namespace dawn::wire::server {
+
+    struct DeviceInfo {
+        std::unordered_set<uint64_t> childObjectTypesAndIds;
+        Server* server;
+        ObjectHandle self;
+    };
+
+    // Whether this object has been allocated, or reserved for async object creation.
+    // Used by the KnownObjects queries
+    enum class AllocationState : uint32_t {
+        Free,
+        Reserved,
+        Allocated,
+    };
+
+    template <typename T>
+    struct ObjectDataBase {
+        // The backend-provided handle and generation to this object.
+        T handle;
+        uint32_t generation = 0;
+
+        AllocationState state;
+
+        // This points to an allocation that is owned by the device.
+        DeviceInfo* deviceInfo = nullptr;
+    };
+
+    // Stores what the backend knows about the type.
+    template <typename T>
+    struct ObjectData : public ObjectDataBase<T> {};
+
+    enum class BufferMapWriteState { Unmapped, Mapped, MapError };
+
+    template <>
+    struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
+        // TODO(enga): Use a tagged pointer to save space.
+        std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
+        std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
+        BufferMapWriteState mapWriteState = BufferMapWriteState::Unmapped;
+        WGPUBufferUsageFlags usage = WGPUBufferUsage_None;
+        // Indicate if writeHandle needs to be destroyed on unmap
+        bool mappedAtCreation = false;
+    };
+
+    // Pack the ObjectType and ObjectId as a single value for storage in
+    // an std::unordered_set. This lets us avoid providing our own hash and
+    // equality comparison operators.
+    inline uint64_t PackObjectTypeAndId(ObjectType type, ObjectId id) {
+        static_assert(sizeof(ObjectType) * 8 <= 32);
+        static_assert(sizeof(ObjectId) * 8 <= 32);
+        return (static_cast<uint64_t>(type) << 32) + id;
+    }
+
+    inline std::pair<ObjectType, ObjectId> UnpackObjectTypeAndId(uint64_t payload) {
+        ObjectType type = static_cast<ObjectType>(payload >> 32);
+        ObjectId id = payload & 0xFFFFFFFF;
+        return std::make_pair(type, id);
+    }
+
+    template <>
+    struct ObjectData<WGPUDevice> : public ObjectDataBase<WGPUDevice> {
+        // Store |info| as a separate allocation so that its address does not move.
+        // The pointer to |info| is stored in device child objects.
+        std::unique_ptr<DeviceInfo> info = std::make_unique<DeviceInfo>();
+    };
+
+    // Keeps track of the mapping between client IDs and backend objects.
+    template <typename T>
+    class KnownObjects {
+      public:
+        using Data = ObjectData<T>;
+
+        KnownObjects() {
+            // Reserve ID 0 so that it can be used to represent nullptr for optional object values
+            // in the wire format. However don't tag it as allocated so that it is an error to ask
+            // KnownObjects for ID 0.
+            Data reservation;
+            reservation.handle = nullptr;
+            reservation.state = AllocationState::Free;
+            mKnown.push_back(std::move(reservation));
+        }
+
+        // Get a backend objects for a given client ID.
+        // Returns nullptr if the ID hasn't previously been allocated.
+        const Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) const {
+            if (id >= mKnown.size()) {
+                return nullptr;
+            }
+
+            const Data* data = &mKnown[id];
+
+            if (data->state != expected) {
+                return nullptr;
+            }
+
+            return data;
+        }
+        Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) {
+            if (id >= mKnown.size()) {
+                return nullptr;
+            }
+
+            Data* data = &mKnown[id];
+
+            if (data->state != expected) {
+                return nullptr;
+            }
+
+            return data;
+        }
+
+        // Allocates the data for a given ID and returns it.
+        // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
+        // reserved for nullptr). Invalidates all the Data*
+        Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
+            if (id == 0 || id > mKnown.size()) {
+                return nullptr;
+            }
+
+            Data data;
+            data.state = state;
+            data.handle = nullptr;
+
+            if (id >= mKnown.size()) {
+                mKnown.push_back(std::move(data));
+                return &mKnown.back();
+            }
+
+            if (mKnown[id].state != AllocationState::Free) {
+                return nullptr;
+            }
+
+            mKnown[id] = std::move(data);
+            return &mKnown[id];
+        }
+
+        // Marks an ID as deallocated
+        void Free(uint32_t id) {
+            ASSERT(id < mKnown.size());
+            mKnown[id].state = AllocationState::Free;
+        }
+
+        std::vector<T> AcquireAllHandles() {
+            std::vector<T> objects;
+            for (Data& data : mKnown) {
+                if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+                    objects.push_back(data.handle);
+                    data.state = AllocationState::Free;
+                    data.handle = nullptr;
+                }
+            }
+
+            return objects;
+        }
+
+        std::vector<T> GetAllHandles() {
+            std::vector<T> objects;
+            for (Data& data : mKnown) {
+                if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+                    objects.push_back(data.handle);
+                }
+            }
+
+            return objects;
+        }
+
+      private:
+        std::vector<Data> mKnown;
+    };
+
+    // ObjectIds are lost in deserialization. Store the ids of deserialized
+    // objects here so they can be used in command handlers. This is useful
+    // for creating ReturnWireCmds which contain client ids
+    template <typename T>
+    class ObjectIdLookupTable {
+      public:
+        void Store(T key, ObjectId id) {
+            mTable[key] = id;
+        }
+
+        // Return the cached ObjectId, or 0 (null handle)
+        ObjectId Get(T key) const {
+            const auto it = mTable.find(key);
+            if (it != mTable.end()) {
+                return it->second;
+            }
+            return 0;
+        }
+
+        void Remove(T key) {
+            auto it = mTable.find(key);
+            if (it != mTable.end()) {
+                mTable.erase(it);
+            }
+        }
+
+      private:
+        std::map<T, ObjectId> mTable;
+    };
+
+}  // namespace dawn::wire::server
+
+#endif  // DAWNWIRE_SERVER_OBJECTSTORAGE_H_
diff --git a/src/dawn/wire/server/Server.cpp b/src/dawn/wire/server/Server.cpp
new file mode 100644
index 0000000..b0d4ba2
--- /dev/null
+++ b/src/dawn/wire/server/Server.cpp
@@ -0,0 +1,213 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+#include "dawn/wire/WireServer.h"
+
+namespace dawn::wire::server {
+
+    Server::Server(const DawnProcTable& procs,
+                   CommandSerializer* serializer,
+                   MemoryTransferService* memoryTransferService)
+        : mSerializer(serializer),
+          mProcs(procs),
+          mMemoryTransferService(memoryTransferService),
+          mIsAlive(std::make_shared<bool>(true)) {
+        if (mMemoryTransferService == nullptr) {
+            // If a MemoryTransferService is not provided, fallback to inline memory.
+            mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+            mMemoryTransferService = mOwnedMemoryTransferService.get();
+        }
+    }
+
+    Server::~Server() {
+        // Un-set the error and lost callbacks since we cannot forward them
+        // after the server has been destroyed.
+        for (WGPUDevice device : DeviceObjects().GetAllHandles()) {
+            ClearDeviceCallbacks(device);
+        }
+        DestroyAllObjects(mProcs);
+    }
+
+    bool Server::InjectTexture(WGPUTexture texture,
+                               uint32_t id,
+                               uint32_t generation,
+                               uint32_t deviceId,
+                               uint32_t deviceGeneration) {
+        ASSERT(texture != nullptr);
+        ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+        if (device == nullptr || device->generation != deviceGeneration) {
+            return false;
+        }
+
+        ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
+        if (data == nullptr) {
+            return false;
+        }
+
+        data->handle = texture;
+        data->generation = generation;
+        data->state = AllocationState::Allocated;
+        data->deviceInfo = device->info.get();
+
+        if (!TrackDeviceChild(data->deviceInfo, ObjectType::Texture, id)) {
+            return false;
+        }
+
+        // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+        // message from the client. Add a reference to counterbalance the eventual release.
+        mProcs.textureReference(texture);
+
+        return true;
+    }
+
+    bool Server::InjectSwapChain(WGPUSwapChain swapchain,
+                                 uint32_t id,
+                                 uint32_t generation,
+                                 uint32_t deviceId,
+                                 uint32_t deviceGeneration) {
+        ASSERT(swapchain != nullptr);
+        ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+        if (device == nullptr || device->generation != deviceGeneration) {
+            return false;
+        }
+
+        ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
+        if (data == nullptr) {
+            return false;
+        }
+
+        data->handle = swapchain;
+        data->generation = generation;
+        data->state = AllocationState::Allocated;
+        data->deviceInfo = device->info.get();
+
+        if (!TrackDeviceChild(data->deviceInfo, ObjectType::SwapChain, id)) {
+            return false;
+        }
+
+        // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+        // message from the client. Add a reference to counterbalance the eventual release.
+        mProcs.swapChainReference(swapchain);
+
+        return true;
+    }
+
+    bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+        ASSERT(device != nullptr);
+        ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
+        if (data == nullptr) {
+            return false;
+        }
+
+        data->handle = device;
+        data->generation = generation;
+        data->state = AllocationState::Allocated;
+        data->info->server = this;
+        data->info->self = ObjectHandle{id, generation};
+
+        // The device is externally owned so it shouldn't be destroyed when we receive a destroy
+        // message from the client. Add a reference to counterbalance the eventual release.
+        mProcs.deviceReference(device);
+
+        // Set callbacks to forward errors to the client.
+        SetForwardingDeviceCallbacks(data);
+        return true;
+    }
+
+    bool Server::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+        ASSERT(instance != nullptr);
+        ObjectData<WGPUInstance>* data = InstanceObjects().Allocate(id);
+        if (data == nullptr) {
+            return false;
+        }
+
+        data->handle = instance;
+        data->generation = generation;
+        data->state = AllocationState::Allocated;
+
+        // The instance is externally owned so it shouldn't be destroyed when we receive a destroy
+        // message from the client. Add a reference to counterbalance the eventual release.
+        mProcs.instanceReference(instance);
+
+        return true;
+    }
+
+    WGPUDevice Server::GetDevice(uint32_t id, uint32_t generation) {
+        ObjectData<WGPUDevice>* data = DeviceObjects().Get(id);
+        if (data == nullptr || data->generation != generation) {
+            return nullptr;
+        }
+        return data->handle;
+    }
+
+    void Server::SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject) {
+        // Note: these callbacks are manually inlined here since they do not acquire and
+        // free their userdata. Also unlike other callbacks, these are cleared and unset when
+        // the server is destroyed, so we don't need to check if the server is still alive
+        // inside them.
+        mProcs.deviceSetUncapturedErrorCallback(
+            deviceObject->handle,
+            [](WGPUErrorType type, const char* message, void* userdata) {
+                DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+                info->server->OnUncapturedError(info->self, type, message);
+            },
+            deviceObject->info.get());
+        // Set callback to post warning and other infomation to client.
+        // Almost the same with UncapturedError.
+        mProcs.deviceSetLoggingCallback(
+            deviceObject->handle,
+            [](WGPULoggingType type, const char* message, void* userdata) {
+                DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+                info->server->OnLogging(info->self, type, message);
+            },
+            deviceObject->info.get());
+        mProcs.deviceSetDeviceLostCallback(
+            deviceObject->handle,
+            [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
+                DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+                info->server->OnDeviceLost(info->self, reason, message);
+            },
+            deviceObject->info.get());
+    }
+
+    void Server::ClearDeviceCallbacks(WGPUDevice device) {
+        // Un-set the error and lost callbacks since we cannot forward them
+        // after the server has been destroyed.
+        mProcs.deviceSetUncapturedErrorCallback(device, nullptr, nullptr);
+        mProcs.deviceSetLoggingCallback(device, nullptr, nullptr);
+        mProcs.deviceSetDeviceLostCallback(device, nullptr, nullptr);
+    }
+
+    bool TrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
+        auto [_, inserted] = info->childObjectTypesAndIds.insert(PackObjectTypeAndId(type, id));
+        if (!inserted) {
+            // An object of this type and id already exists.
+            return false;
+        }
+        return true;
+    }
+
+    bool UntrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
+        auto& children = info->childObjectTypesAndIds;
+        auto it = children.find(PackObjectTypeAndId(type, id));
+        if (it == children.end()) {
+            // An object of this type and id was already deleted.
+            return false;
+        }
+        children.erase(it);
+        return true;
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/Server.h b/src/dawn/wire/server/Server.h
new file mode 100644
index 0000000..9c7a02a
--- /dev/null
+++ b/src/dawn/wire/server/Server.h
@@ -0,0 +1,243 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SERVER_SERVER_H_
+#define DAWNWIRE_SERVER_SERVER_H_
+
+#include "dawn/wire/ChunkedCommandSerializer.h"
+#include "dawn/wire/server/ServerBase_autogen.h"
+
+namespace dawn::wire::server {
+
+    class Server;
+    class MemoryTransferService;
+
+    // CallbackUserdata and its derived classes are intended to be created by
+    // Server::MakeUserdata<T> and then passed as the userdata argument for Dawn
+    // callbacks.
+    // It contains a pointer back to the Server so that the callback can call the
+    // Server to perform operations like serialization, and it contains a weak pointer
+    // |serverIsAlive|. If the weak pointer has expired, it means the server has
+    // been destroyed and the callback must not use the Server pointer.
+    // To assist with checking |serverIsAlive| and lifetime management of the userdata,
+    // |ForwardToServer| (defined later in this file) can be used to acquire the userdata,
+    // return early if |serverIsAlive| has expired, and then forward the arguments
+    // to userdata->server->MyCallbackHandler.
+    //
+    // Example Usage:
+    //
+    // struct MyUserdata : CallbackUserdata { uint32_t foo; };
+    //
+    // auto userdata = MakeUserdata<MyUserdata>();
+    // userdata->foo = 2;
+    //
+    // callMyCallbackHandler(
+    //      ForwardToServer<&Server::MyCallbackHandler>,
+    //      userdata.release());
+    //
+    // void Server::MyCallbackHandler(MyUserdata* userdata, Other args) { }
+    struct CallbackUserdata {
+        Server* const server;
+        std::weak_ptr<bool> const serverIsAlive;
+
+        CallbackUserdata() = delete;
+        CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive)
+            : server(server), serverIsAlive(serverIsAlive) {
+        }
+    };
+
+    template <auto F>
+    struct ForwardToServerHelper {
+        template <typename _>
+        struct ExtractedTypes;
+
+        // An internal structure used to unpack the various types that compose the type of F
+        template <typename Return, typename Class, typename Userdata, typename... Args>
+        struct ExtractedTypes<Return (Class::*)(Userdata*, Args...)> {
+            using UntypedCallback = Return (*)(Args..., void*);
+            static Return Callback(Args... args, void* userdata) {
+                // Acquire the userdata, and cast it to UserdataT.
+                std::unique_ptr<Userdata> data(static_cast<Userdata*>(userdata));
+                if (data->serverIsAlive.expired()) {
+                    // Do nothing if the server has already been destroyed.
+                    return;
+                }
+                // Forward the arguments and the typed userdata to the Server:: member function.
+                (data->server->*F)(data.get(), std::forward<decltype(args)>(args)...);
+            }
+        };
+
+        static constexpr typename ExtractedTypes<decltype(F)>::UntypedCallback Create() {
+            return ExtractedTypes<decltype(F)>::Callback;
+        }
+    };
+
+    template <auto F>
+    constexpr auto ForwardToServer = ForwardToServerHelper<F>::Create();
+
+    struct MapUserdata : CallbackUserdata {
+        using CallbackUserdata::CallbackUserdata;
+
+        ObjectHandle buffer;
+        WGPUBuffer bufferObj;
+        uint64_t requestSerial;
+        uint64_t offset;
+        uint64_t size;
+        WGPUMapModeFlags mode;
+    };
+
+    struct ErrorScopeUserdata : CallbackUserdata {
+        using CallbackUserdata::CallbackUserdata;
+
+        ObjectHandle device;
+        uint64_t requestSerial;
+    };
+
+    struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
+        using CallbackUserdata::CallbackUserdata;
+
+        ObjectHandle shaderModule;
+        uint64_t requestSerial;
+    };
+
+    struct QueueWorkDoneUserdata : CallbackUserdata {
+        using CallbackUserdata::CallbackUserdata;
+
+        ObjectHandle queue;
+        uint64_t requestSerial;
+    };
+
+    struct CreatePipelineAsyncUserData : CallbackUserdata {
+        using CallbackUserdata::CallbackUserdata;
+
+        ObjectHandle device;
+        uint64_t requestSerial;
+        ObjectId pipelineObjectID;
+    };
+
+    struct RequestAdapterUserdata : CallbackUserdata {
+        using CallbackUserdata::CallbackUserdata;
+
+        ObjectHandle instance;
+        uint64_t requestSerial;
+        ObjectId adapterObjectId;
+    };
+
+    struct RequestDeviceUserdata : CallbackUserdata {
+        using CallbackUserdata::CallbackUserdata;
+
+        ObjectHandle adapter;
+        uint64_t requestSerial;
+        ObjectId deviceObjectId;
+    };
+
+    class Server : public ServerBase {
+      public:
+        Server(const DawnProcTable& procs,
+               CommandSerializer* serializer,
+               MemoryTransferService* memoryTransferService);
+        ~Server() override;
+
+        // ChunkedCommandHandler implementation
+        const volatile char* HandleCommandsImpl(const volatile char* commands,
+                                                size_t size) override;
+
+        bool InjectTexture(WGPUTexture texture,
+                           uint32_t id,
+                           uint32_t generation,
+                           uint32_t deviceId,
+                           uint32_t deviceGeneration);
+
+        bool InjectSwapChain(WGPUSwapChain swapchain,
+                             uint32_t id,
+                             uint32_t generation,
+                             uint32_t deviceId,
+                             uint32_t deviceGeneration);
+
+        bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
+
+        bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
+
+        WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+
+        template <typename T,
+                  typename Enable = std::enable_if<std::is_base_of<CallbackUserdata, T>::value>>
+        std::unique_ptr<T> MakeUserdata() {
+            return std::unique_ptr<T>(new T(this, mIsAlive));
+        }
+
+      private:
+        template <typename Cmd>
+        void SerializeCommand(const Cmd& cmd) {
+            mSerializer.SerializeCommand(cmd);
+        }
+
+        template <typename Cmd, typename ExtraSizeSerializeFn>
+        void SerializeCommand(const Cmd& cmd,
+                              size_t extraSize,
+                              ExtraSizeSerializeFn&& SerializeExtraSize) {
+            mSerializer.SerializeCommand(cmd, extraSize, SerializeExtraSize);
+        }
+
+        void SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject);
+        void ClearDeviceCallbacks(WGPUDevice device);
+
+        // Error callbacks
+        void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
+        void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
+        void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
+        void OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+                                   WGPUErrorType type,
+                                   const char* message);
+        void OnBufferMapAsyncCallback(MapUserdata* userdata, WGPUBufferMapAsyncStatus status);
+        void OnQueueWorkDone(QueueWorkDoneUserdata* userdata, WGPUQueueWorkDoneStatus status);
+        void OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+                                                  WGPUCreatePipelineAsyncStatus status,
+                                                  WGPUComputePipeline pipeline,
+                                                  const char* message);
+        void OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+                                                 WGPUCreatePipelineAsyncStatus status,
+                                                 WGPURenderPipeline pipeline,
+                                                 const char* message);
+        void OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* userdata,
+                                              WGPUCompilationInfoRequestStatus status,
+                                              const WGPUCompilationInfo* info);
+        void OnRequestAdapterCallback(RequestAdapterUserdata* userdata,
+                                      WGPURequestAdapterStatus status,
+                                      WGPUAdapter adapter,
+                                      const char* message);
+        void OnRequestDeviceCallback(RequestDeviceUserdata* userdata,
+                                     WGPURequestDeviceStatus status,
+                                     WGPUDevice device,
+                                     const char* message);
+
+#include "dawn/wire/server/ServerPrototypes_autogen.inc"
+
+        WireDeserializeAllocator mAllocator;
+        ChunkedCommandSerializer mSerializer;
+        DawnProcTable mProcs;
+        std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+        MemoryTransferService* mMemoryTransferService = nullptr;
+
+        std::shared_ptr<bool> mIsAlive;
+    };
+
+    bool TrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
+    bool UntrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
+
+    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+
+}  // namespace dawn::wire::server
+
+#endif  // DAWNWIRE_SERVER_SERVER_H_
diff --git a/src/dawn/wire/server/ServerAdapter.cpp b/src/dawn/wire/server/ServerAdapter.cpp
new file mode 100644
index 0000000..0518455
--- /dev/null
+++ b/src/dawn/wire/server/ServerAdapter.cpp
@@ -0,0 +1,110 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+#include "dawn/wire/SupportedFeatures.h"
+
+namespace dawn::wire::server {
+
+    bool Server::DoAdapterRequestDevice(ObjectId adapterId,
+                                        uint64_t requestSerial,
+                                        ObjectHandle deviceHandle,
+                                        const WGPUDeviceDescriptor* descriptor) {
+        auto* adapter = AdapterObjects().Get(adapterId);
+        if (adapter == nullptr) {
+            return false;
+        }
+
+        auto* resultData = DeviceObjects().Allocate(deviceHandle.id, AllocationState::Reserved);
+        if (resultData == nullptr) {
+            return false;
+        }
+
+        resultData->generation = deviceHandle.generation;
+
+        auto userdata = MakeUserdata<RequestDeviceUserdata>();
+        userdata->adapter = ObjectHandle{adapterId, adapter->generation};
+        userdata->requestSerial = requestSerial;
+        userdata->deviceObjectId = deviceHandle.id;
+
+        mProcs.adapterRequestDevice(adapter->handle, descriptor,
+                                    ForwardToServer<&Server::OnRequestDeviceCallback>,
+                                    userdata.release());
+        return true;
+    }
+
+    void Server::OnRequestDeviceCallback(RequestDeviceUserdata* data,
+                                         WGPURequestDeviceStatus status,
+                                         WGPUDevice device,
+                                         const char* message) {
+        auto* deviceObject = DeviceObjects().Get(data->deviceObjectId, AllocationState::Reserved);
+        // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+        // they move from Reserved to Allocated, or if they are destroyed here.
+        ASSERT(deviceObject != nullptr);
+
+        ReturnAdapterRequestDeviceCallbackCmd cmd = {};
+        cmd.adapter = data->adapter;
+        cmd.requestSerial = data->requestSerial;
+        cmd.status = status;
+        cmd.message = message;
+
+        if (status != WGPURequestDeviceStatus_Success) {
+            // Free the ObjectId which will make it unusable.
+            DeviceObjects().Free(data->deviceObjectId);
+            ASSERT(device == nullptr);
+            SerializeCommand(cmd);
+            return;
+        }
+
+        std::vector<WGPUFeatureName> features;
+
+        size_t featuresCount = mProcs.deviceEnumerateFeatures(device, nullptr);
+        features.resize(featuresCount);
+        mProcs.deviceEnumerateFeatures(device, features.data());
+
+        // The client should only be able to request supported features, so all enumerated
+        // features that were enabled must also be supported by the wire.
+        // Note: We fail the callback here, instead of immediately upon receiving
+        // the request to preserve callback ordering.
+        for (WGPUFeatureName f : features) {
+            if (!IsFeatureSupported(f)) {
+                // Release the device.
+                mProcs.deviceRelease(device);
+                // Free the ObjectId which will make it unusable.
+                DeviceObjects().Free(data->deviceObjectId);
+
+                cmd.status = WGPURequestDeviceStatus_Error;
+                cmd.message = "Requested feature not supported.";
+                SerializeCommand(cmd);
+                return;
+            }
+        }
+
+        cmd.featuresCount = features.size();
+        cmd.features = features.data();
+
+        WGPUSupportedLimits limits = {};
+        mProcs.deviceGetLimits(device, &limits);
+        cmd.limits = &limits;
+
+        // Assign the handle and allocated status if the device is created successfully.
+        deviceObject->state = AllocationState::Allocated;
+        deviceObject->handle = device;
+        SetForwardingDeviceCallbacks(deviceObject);
+
+        SerializeCommand(cmd);
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerBuffer.cpp b/src/dawn/wire/server/ServerBuffer.cpp
new file mode 100644
index 0000000..44664da
--- /dev/null
+++ b/src/dawn/wire/server/ServerBuffer.cpp
@@ -0,0 +1,282 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/BufferConsumer_impl.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/server/Server.h"
+
+#include <memory>
+
+namespace dawn::wire::server {
+
+    bool Server::PreHandleBufferUnmap(const BufferUnmapCmd& cmd) {
+        auto* buffer = BufferObjects().Get(cmd.selfId);
+        DAWN_ASSERT(buffer != nullptr);
+
+        if (buffer->mappedAtCreation && !(buffer->usage & WGPUMapMode_Write)) {
+            // This indicates the writeHandle is for mappedAtCreation only. Destroy on unmap
+            // writeHandle could have possibly been deleted if buffer is already destroyed so we
+            // don't assert it's non-null
+            buffer->writeHandle = nullptr;
+        }
+
+        buffer->mapWriteState = BufferMapWriteState::Unmapped;
+
+        return true;
+    }
+
+    bool Server::PreHandleBufferDestroy(const BufferDestroyCmd& cmd) {
+        // Destroying a buffer does an implicit unmapping.
+        auto* buffer = BufferObjects().Get(cmd.selfId);
+        DAWN_ASSERT(buffer != nullptr);
+
+        // The buffer was destroyed. Clear the Read/WriteHandle.
+        buffer->readHandle = nullptr;
+        buffer->writeHandle = nullptr;
+        buffer->mapWriteState = BufferMapWriteState::Unmapped;
+
+        return true;
+    }
+
+    bool Server::DoBufferMapAsync(ObjectId bufferId,
+                                  uint64_t requestSerial,
+                                  WGPUMapModeFlags mode,
+                                  uint64_t offset64,
+                                  uint64_t size64) {
+        // These requests are just forwarded to the buffer, with userdata containing what the
+        // client will require in the return command.
+
+        // The null object isn't valid as `self`
+        if (bufferId == 0) {
+            return false;
+        }
+
+        auto* buffer = BufferObjects().Get(bufferId);
+        if (buffer == nullptr) {
+            return false;
+        }
+
+        std::unique_ptr<MapUserdata> userdata = MakeUserdata<MapUserdata>();
+        userdata->buffer = ObjectHandle{bufferId, buffer->generation};
+        userdata->bufferObj = buffer->handle;
+        userdata->requestSerial = requestSerial;
+        userdata->mode = mode;
+
+        // Make sure that the deserialized offset and size are no larger than
+        // std::numeric_limits<size_t>::max() so that they are CPU-addressable, and size is not
+        // WGPU_WHOLE_MAP_SIZE, which is by definition std::numeric_limits<size_t>::max(). Since
+        // client does the default size computation, we should always have a valid actual size here
+        // in server. All other invalid actual size can be caught by dawn native side validation.
+        if (offset64 > std::numeric_limits<size_t>::max() || size64 >= WGPU_WHOLE_MAP_SIZE) {
+            OnBufferMapAsyncCallback(userdata.get(), WGPUBufferMapAsyncStatus_Error);
+            return true;
+        }
+
+        size_t offset = static_cast<size_t>(offset64);
+        size_t size = static_cast<size_t>(size64);
+
+        userdata->offset = offset;
+        userdata->size = size;
+
+        mProcs.bufferMapAsync(buffer->handle, mode, offset, size,
+                              ForwardToServer<&Server::OnBufferMapAsyncCallback>,
+                              userdata.release());
+
+        return true;
+    }
+
+    bool Server::DoDeviceCreateBuffer(ObjectId deviceId,
+                                      const WGPUBufferDescriptor* descriptor,
+                                      ObjectHandle bufferResult,
+                                      uint64_t readHandleCreateInfoLength,
+                                      const uint8_t* readHandleCreateInfo,
+                                      uint64_t writeHandleCreateInfoLength,
+                                      const uint8_t* writeHandleCreateInfo) {
+        auto* device = DeviceObjects().Get(deviceId);
+        if (device == nullptr) {
+            return false;
+        }
+
+        // Create and register the buffer object.
+        auto* resultData = BufferObjects().Allocate(bufferResult.id);
+        if (resultData == nullptr) {
+            return false;
+        }
+        resultData->generation = bufferResult.generation;
+        resultData->handle = mProcs.deviceCreateBuffer(device->handle, descriptor);
+        resultData->deviceInfo = device->info.get();
+        resultData->usage = descriptor->usage;
+        resultData->mappedAtCreation = descriptor->mappedAtCreation;
+        if (!TrackDeviceChild(resultData->deviceInfo, ObjectType::Buffer, bufferResult.id)) {
+            return false;
+        }
+
+        // isReadMode and isWriteMode could be true at the same time if usage contains
+        // WGPUMapMode_Read and buffer is mappedAtCreation
+        bool isReadMode = descriptor->usage & WGPUMapMode_Read;
+        bool isWriteMode = descriptor->usage & WGPUMapMode_Write || descriptor->mappedAtCreation;
+
+        // This is the size of data deserialized from the command stream to create the read/write
+        // handle, which must be CPU-addressable.
+        if (readHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+            writeHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+            readHandleCreateInfoLength >
+                std::numeric_limits<size_t>::max() - writeHandleCreateInfoLength) {
+            return false;
+        }
+
+        if (isWriteMode) {
+            MemoryTransferService::WriteHandle* writeHandle = nullptr;
+            // Deserialize metadata produced from the client to create a companion server handle.
+            if (!mMemoryTransferService->DeserializeWriteHandle(
+                    writeHandleCreateInfo, static_cast<size_t>(writeHandleCreateInfoLength),
+                    &writeHandle)) {
+                return false;
+            }
+            ASSERT(writeHandle != nullptr);
+            resultData->writeHandle.reset(writeHandle);
+            writeHandle->SetDataLength(descriptor->size);
+
+            if (descriptor->mappedAtCreation) {
+                void* mapping =
+                    mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
+                if (mapping == nullptr) {
+                    // A zero mapping is used to indicate an allocation error of an error buffer.
+                    // This is a valid case and isn't fatal. Remember the buffer is an error so as
+                    // to skip subsequent mapping operations.
+                    resultData->mapWriteState = BufferMapWriteState::MapError;
+                    return true;
+                }
+                ASSERT(mapping != nullptr);
+                writeHandle->SetTarget(mapping);
+
+                resultData->mapWriteState = BufferMapWriteState::Mapped;
+            }
+        }
+
+        if (isReadMode) {
+            MemoryTransferService::ReadHandle* readHandle = nullptr;
+            // Deserialize metadata produced from the client to create a companion server handle.
+            if (!mMemoryTransferService->DeserializeReadHandle(
+                    readHandleCreateInfo, static_cast<size_t>(readHandleCreateInfoLength),
+                    &readHandle)) {
+                return false;
+            }
+            ASSERT(readHandle != nullptr);
+
+            resultData->readHandle.reset(readHandle);
+        }
+
+        return true;
+    }
+
+    bool Server::DoBufferUpdateMappedData(ObjectId bufferId,
+                                          uint64_t writeDataUpdateInfoLength,
+                                          const uint8_t* writeDataUpdateInfo,
+                                          uint64_t offset,
+                                          uint64_t size) {
+        // The null object isn't valid as `self`
+        if (bufferId == 0) {
+            return false;
+        }
+
+        if (writeDataUpdateInfoLength > std::numeric_limits<size_t>::max() ||
+            offset > std::numeric_limits<size_t>::max() ||
+            size > std::numeric_limits<size_t>::max()) {
+            return false;
+        }
+
+        auto* buffer = BufferObjects().Get(bufferId);
+        if (buffer == nullptr) {
+            return false;
+        }
+        switch (buffer->mapWriteState) {
+            case BufferMapWriteState::Unmapped:
+                return false;
+            case BufferMapWriteState::MapError:
+                // The buffer is mapped but there was an error allocating mapped data.
+                // Do not perform the memcpy.
+                return true;
+            case BufferMapWriteState::Mapped:
+                break;
+        }
+        if (!buffer->writeHandle) {
+            // This check is performed after the check for the MapError state. It is permissible
+            // to Unmap and attempt to update mapped data of an error buffer.
+            return false;
+        }
+
+        // Deserialize the flush info and flush updated data from the handle into the target
+        // of the handle. The target is set via WriteHandle::SetTarget.
+        return buffer->writeHandle->DeserializeDataUpdate(
+            writeDataUpdateInfo, static_cast<size_t>(writeDataUpdateInfoLength),
+            static_cast<size_t>(offset), static_cast<size_t>(size));
+    }
+
+    void Server::OnBufferMapAsyncCallback(MapUserdata* data, WGPUBufferMapAsyncStatus status) {
+        // Skip sending the callback if the buffer has already been destroyed.
+        auto* bufferData = BufferObjects().Get(data->buffer.id);
+        if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
+            return;
+        }
+
+        bool isRead = data->mode & WGPUMapMode_Read;
+        bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
+
+        ReturnBufferMapAsyncCallbackCmd cmd;
+        cmd.buffer = data->buffer;
+        cmd.requestSerial = data->requestSerial;
+        cmd.status = status;
+        cmd.readDataUpdateInfoLength = 0;
+        cmd.readDataUpdateInfo = nullptr;
+
+        const void* readData = nullptr;
+        if (isSuccess) {
+            if (isRead) {
+                // Get the serialization size of the message to initialize ReadHandle data.
+                readData =
+                    mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
+                cmd.readDataUpdateInfoLength =
+                    bufferData->readHandle->SizeOfSerializeDataUpdate(data->offset, data->size);
+            } else {
+                ASSERT(data->mode & WGPUMapMode_Write);
+                // The in-flight map request returned successfully.
+                bufferData->mapWriteState = BufferMapWriteState::Mapped;
+                // Set the target of the WriteHandle to the mapped buffer data.
+                // writeHandle Target always refers to the buffer base address.
+                // but we call getMappedRange exactly with the range of data that is potentially
+                // modified (i.e. we don't want getMappedRange(0, wholeBufferSize) if only a
+                // subset of the buffer is actually mapped) in case the implementation does some
+                // range tracking.
+                bufferData->writeHandle->SetTarget(
+                    static_cast<uint8_t*>(
+                        mProcs.bufferGetMappedRange(data->bufferObj, data->offset, data->size)) -
+                    data->offset);
+            }
+        }
+
+        SerializeCommand(cmd, cmd.readDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+            if (isSuccess && isRead) {
+                char* readHandleBuffer;
+                WIRE_TRY(serializeBuffer->NextN(cmd.readDataUpdateInfoLength, &readHandleBuffer));
+                // The in-flight map request returned successfully.
+                bufferData->readHandle->SerializeDataUpdate(readData, data->offset, data->size,
+                                                            readHandleBuffer);
+            }
+            return WireResult::Success;
+        });
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerDevice.cpp b/src/dawn/wire/server/ServerDevice.cpp
new file mode 100644
index 0000000..45fb6b8
--- /dev/null
+++ b/src/dawn/wire/server/ServerDevice.cpp
@@ -0,0 +1,200 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+
+    namespace {
+
+        template <ObjectType objectType, typename Pipeline>
+        void HandleCreateRenderPipelineAsyncCallbackResult(KnownObjects<Pipeline>* knownObjects,
+                                                           WGPUCreatePipelineAsyncStatus status,
+                                                           Pipeline pipeline,
+                                                           CreatePipelineAsyncUserData* data) {
+            // May be null if the device was destroyed. Device destruction destroys child
+            // objects on the wire.
+            auto* pipelineObject =
+                knownObjects->Get(data->pipelineObjectID, AllocationState::Reserved);
+            // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+            // they move from Reserved to Allocated, or if they are destroyed here.
+            ASSERT(pipelineObject != nullptr);
+
+            if (status == WGPUCreatePipelineAsyncStatus_Success) {
+                // Assign the handle and allocated status if the pipeline is created successfully.
+                pipelineObject->state = AllocationState::Allocated;
+                pipelineObject->handle = pipeline;
+
+                // This should be impossible to fail. It would require a command to be sent that
+                // creates a duplicate ObjectId, which would fail validation.
+                bool success = TrackDeviceChild(pipelineObject->deviceInfo, objectType,
+                                                data->pipelineObjectID);
+                ASSERT(success);
+            } else {
+                // Otherwise, free the ObjectId which will make it unusable.
+                knownObjects->Free(data->pipelineObjectID);
+                ASSERT(pipeline == nullptr);
+            }
+        }
+
+    }  // anonymous namespace
+
+    void Server::OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message) {
+        ReturnDeviceUncapturedErrorCallbackCmd cmd;
+        cmd.device = device;
+        cmd.type = type;
+        cmd.message = message;
+
+        SerializeCommand(cmd);
+    }
+
+    void Server::OnDeviceLost(ObjectHandle device,
+                              WGPUDeviceLostReason reason,
+                              const char* message) {
+        ReturnDeviceLostCallbackCmd cmd;
+        cmd.device = device;
+        cmd.reason = reason;
+        cmd.message = message;
+
+        SerializeCommand(cmd);
+    }
+
+    void Server::OnLogging(ObjectHandle device, WGPULoggingType type, const char* message) {
+        ReturnDeviceLoggingCallbackCmd cmd;
+        cmd.device = device;
+        cmd.type = type;
+        cmd.message = message;
+
+        SerializeCommand(cmd);
+    }
+
+    bool Server::DoDevicePopErrorScope(ObjectId deviceId, uint64_t requestSerial) {
+        auto* device = DeviceObjects().Get(deviceId);
+        if (device == nullptr) {
+            return false;
+        }
+
+        auto userdata = MakeUserdata<ErrorScopeUserdata>();
+        userdata->requestSerial = requestSerial;
+        userdata->device = ObjectHandle{deviceId, device->generation};
+
+        mProcs.devicePopErrorScope(device->handle, ForwardToServer<&Server::OnDevicePopErrorScope>,
+                                   userdata.release());
+        return true;
+    }
+
+    void Server::OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+                                       WGPUErrorType type,
+                                       const char* message) {
+        ReturnDevicePopErrorScopeCallbackCmd cmd;
+        cmd.device = userdata->device;
+        cmd.requestSerial = userdata->requestSerial;
+        cmd.type = type;
+        cmd.message = message;
+
+        SerializeCommand(cmd);
+    }
+
+    bool Server::DoDeviceCreateComputePipelineAsync(
+        ObjectId deviceId,
+        uint64_t requestSerial,
+        ObjectHandle pipelineObjectHandle,
+        const WGPUComputePipelineDescriptor* descriptor) {
+        auto* device = DeviceObjects().Get(deviceId);
+        if (device == nullptr) {
+            return false;
+        }
+
+        auto* resultData =
+            ComputePipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+        if (resultData == nullptr) {
+            return false;
+        }
+
+        resultData->generation = pipelineObjectHandle.generation;
+        resultData->deviceInfo = device->info.get();
+
+        auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+        userdata->device = ObjectHandle{deviceId, device->generation};
+        userdata->requestSerial = requestSerial;
+        userdata->pipelineObjectID = pipelineObjectHandle.id;
+
+        mProcs.deviceCreateComputePipelineAsync(
+            device->handle, descriptor,
+            ForwardToServer<&Server::OnCreateComputePipelineAsyncCallback>, userdata.release());
+        return true;
+    }
+
+    void Server::OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+                                                      WGPUCreatePipelineAsyncStatus status,
+                                                      WGPUComputePipeline pipeline,
+                                                      const char* message) {
+        HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::ComputePipeline>(
+            &ComputePipelineObjects(), status, pipeline, data);
+
+        ReturnDeviceCreateComputePipelineAsyncCallbackCmd cmd;
+        cmd.device = data->device;
+        cmd.status = status;
+        cmd.requestSerial = data->requestSerial;
+        cmd.message = message;
+
+        SerializeCommand(cmd);
+    }
+
+    bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
+                                                   uint64_t requestSerial,
+                                                   ObjectHandle pipelineObjectHandle,
+                                                   const WGPURenderPipelineDescriptor* descriptor) {
+        auto* device = DeviceObjects().Get(deviceId);
+        if (device == nullptr) {
+            return false;
+        }
+
+        auto* resultData =
+            RenderPipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+        if (resultData == nullptr) {
+            return false;
+        }
+
+        resultData->generation = pipelineObjectHandle.generation;
+        resultData->deviceInfo = device->info.get();
+
+        auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+        userdata->device = ObjectHandle{deviceId, device->generation};
+        userdata->requestSerial = requestSerial;
+        userdata->pipelineObjectID = pipelineObjectHandle.id;
+
+        mProcs.deviceCreateRenderPipelineAsync(
+            device->handle, descriptor,
+            ForwardToServer<&Server::OnCreateRenderPipelineAsyncCallback>, userdata.release());
+        return true;
+    }
+
+    void Server::OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+                                                     WGPUCreatePipelineAsyncStatus status,
+                                                     WGPURenderPipeline pipeline,
+                                                     const char* message) {
+        HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::RenderPipeline>(
+            &RenderPipelineObjects(), status, pipeline, data);
+
+        ReturnDeviceCreateRenderPipelineAsyncCallbackCmd cmd;
+        cmd.device = data->device;
+        cmd.status = status;
+        cmd.requestSerial = data->requestSerial;
+        cmd.message = message;
+
+        SerializeCommand(cmd);
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp b/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
new file mode 100644
index 0000000..0e6b30a
--- /dev/null
+++ b/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
@@ -0,0 +1,94 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/WireServer.h"
+#include "dawn/wire/server/Server.h"
+
+#include <cstring>
+
+namespace dawn::wire::server {
+
+    class InlineMemoryTransferService : public MemoryTransferService {
+      public:
+        class ReadHandleImpl : public ReadHandle {
+          public:
+            ReadHandleImpl() {
+            }
+            ~ReadHandleImpl() override = default;
+
+            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
+                return size;
+            }
+
+            void SerializeDataUpdate(const void* data,
+                                     size_t offset,
+                                     size_t size,
+                                     void* serializePointer) override {
+                if (size > 0) {
+                    ASSERT(data != nullptr);
+                    ASSERT(serializePointer != nullptr);
+                    memcpy(serializePointer, data, size);
+                }
+            }
+        };
+
+        class WriteHandleImpl : public WriteHandle {
+          public:
+            WriteHandleImpl() {
+            }
+            ~WriteHandleImpl() override = default;
+
+            bool DeserializeDataUpdate(const void* deserializePointer,
+                                       size_t deserializeSize,
+                                       size_t offset,
+                                       size_t size) override {
+                if (deserializeSize != size || mTargetData == nullptr ||
+                    deserializePointer == nullptr) {
+                    return false;
+                }
+                if ((offset >= mDataLength && offset > 0) || size > mDataLength - offset) {
+                    return false;
+                }
+                memcpy(static_cast<uint8_t*>(mTargetData) + offset, deserializePointer, size);
+                return true;
+            }
+        };
+
+        InlineMemoryTransferService() {
+        }
+        ~InlineMemoryTransferService() override = default;
+
+        bool DeserializeReadHandle(const void* deserializePointer,
+                                   size_t deserializeSize,
+                                   ReadHandle** readHandle) override {
+            ASSERT(readHandle != nullptr);
+            *readHandle = new ReadHandleImpl();
+            return true;
+        }
+
+        bool DeserializeWriteHandle(const void* deserializePointer,
+                                    size_t deserializeSize,
+                                    WriteHandle** writeHandle) override {
+            ASSERT(writeHandle != nullptr);
+            *writeHandle = new WriteHandleImpl();
+            return true;
+        }
+    };
+
+    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+        return std::make_unique<InlineMemoryTransferService>();
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerInstance.cpp b/src/dawn/wire/server/ServerInstance.cpp
new file mode 100644
index 0000000..d39dade
--- /dev/null
+++ b/src/dawn/wire/server/ServerInstance.cpp
@@ -0,0 +1,100 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+#include "dawn/wire/SupportedFeatures.h"
+
+#include <algorithm>
+
+namespace dawn::wire::server {
+
+    bool Server::DoInstanceRequestAdapter(ObjectId instanceId,
+                                          uint64_t requestSerial,
+                                          ObjectHandle adapterHandle,
+                                          const WGPURequestAdapterOptions* options) {
+        auto* instance = InstanceObjects().Get(instanceId);
+        if (instance == nullptr) {
+            return false;
+        }
+
+        auto* resultData = AdapterObjects().Allocate(adapterHandle.id, AllocationState::Reserved);
+        if (resultData == nullptr) {
+            return false;
+        }
+
+        resultData->generation = adapterHandle.generation;
+
+        auto userdata = MakeUserdata<RequestAdapterUserdata>();
+        userdata->instance = ObjectHandle{instanceId, instance->generation};
+        userdata->requestSerial = requestSerial;
+        userdata->adapterObjectId = adapterHandle.id;
+
+        mProcs.instanceRequestAdapter(instance->handle, options,
+                                      ForwardToServer<&Server::OnRequestAdapterCallback>,
+                                      userdata.release());
+        return true;
+    }
+
+    void Server::OnRequestAdapterCallback(RequestAdapterUserdata* data,
+                                          WGPURequestAdapterStatus status,
+                                          WGPUAdapter adapter,
+                                          const char* message) {
+        auto* adapterObject =
+            AdapterObjects().Get(data->adapterObjectId, AllocationState::Reserved);
+        // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+        // they move from Reserved to Allocated, or if they are destroyed here.
+        ASSERT(adapterObject != nullptr);
+
+        ReturnInstanceRequestAdapterCallbackCmd cmd = {};
+        cmd.instance = data->instance;
+        cmd.requestSerial = data->requestSerial;
+        cmd.status = status;
+        cmd.message = message;
+
+        if (status != WGPURequestAdapterStatus_Success) {
+            // Free the ObjectId which will make it unusable.
+            AdapterObjects().Free(data->adapterObjectId);
+            ASSERT(adapter == nullptr);
+            SerializeCommand(cmd);
+            return;
+        }
+
+        WGPUAdapterProperties properties = {};
+        WGPUSupportedLimits limits = {};
+        std::vector<WGPUFeatureName> features;
+
+        // Assign the handle and allocated status if the adapter is created successfully.
+        adapterObject->state = AllocationState::Allocated;
+        adapterObject->handle = adapter;
+
+        size_t featuresCount = mProcs.adapterEnumerateFeatures(adapter, nullptr);
+        features.resize(featuresCount);
+        mProcs.adapterEnumerateFeatures(adapter, features.data());
+
+        // Hide features the wire cannot support.
+        auto it = std::partition(features.begin(), features.end(), IsFeatureSupported);
+
+        cmd.featuresCount = std::distance(features.begin(), it);
+        cmd.features = features.data();
+
+        mProcs.adapterGetProperties(adapter, &properties);
+        mProcs.adapterGetLimits(adapter, &limits);
+        cmd.properties = &properties;
+        cmd.limits = &limits;
+
+        SerializeCommand(cmd);
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp b/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
new file mode 100644
index 0000000..758c344
--- /dev/null
+++ b/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
@@ -0,0 +1,91 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/ServerMemoryTransferService_mock.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::wire::server {
+
+    MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+        : ReadHandle(), mService(service) {
+    }
+
+    MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+        mService->OnReadHandleDestroy(this);
+    }
+
+    size_t MockMemoryTransferService::MockReadHandle::SizeOfSerializeDataUpdate(size_t offset,
+                                                                                size_t size) {
+        return mService->OnReadHandleSizeOfSerializeDataUpdate(this, offset, size);
+    }
+
+    void MockMemoryTransferService::MockReadHandle::SerializeDataUpdate(const void* data,
+                                                                        size_t offset,
+                                                                        size_t size,
+                                                                        void* serializePointer) {
+        mService->OnReadHandleSerializeDataUpdate(this, data, offset, size, serializePointer);
+    }
+
+    MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+        : WriteHandle(), mService(service) {
+    }
+
+    MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+        mService->OnWriteHandleDestroy(this);
+    }
+
+    const uint32_t* MockMemoryTransferService::MockWriteHandle::GetData() const {
+        return reinterpret_cast<const uint32_t*>(mTargetData);
+    }
+
+    bool MockMemoryTransferService::MockWriteHandle::DeserializeDataUpdate(
+        const void* deserializePointer,
+        size_t deserializeSize,
+        size_t offset,
+        size_t size) {
+        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+        return mService->OnWriteHandleDeserializeDataUpdate(
+            this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
+            size);
+    }
+
+    MockMemoryTransferService::MockMemoryTransferService() = default;
+    MockMemoryTransferService::~MockMemoryTransferService() = default;
+
+    bool MockMemoryTransferService::DeserializeReadHandle(const void* deserializePointer,
+                                                          size_t deserializeSize,
+                                                          ReadHandle** readHandle) {
+        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+        return OnDeserializeReadHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+                                       deserializeSize, readHandle);
+    }
+
+    bool MockMemoryTransferService::DeserializeWriteHandle(const void* deserializePointer,
+                                                           size_t deserializeSize,
+                                                           WriteHandle** writeHandle) {
+        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+        return OnDeserializeWriteHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+                                        deserializeSize, writeHandle);
+    }
+
+    MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+        return new MockReadHandle(this);
+    }
+
+    MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+        return new MockWriteHandle(this);
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerMemoryTransferService_mock.h b/src/dawn/wire/server/ServerMemoryTransferService_mock.h
new file mode 100644
index 0000000..faea0ed
--- /dev/null
+++ b/src/dawn/wire/server/ServerMemoryTransferService_mock.h
@@ -0,0 +1,108 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
+#define DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
+
+#include <gmock/gmock.h>
+
+#include "dawn/wire/WireServer.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+
+    class MockMemoryTransferService : public MemoryTransferService {
+      public:
+        class MockReadHandle : public ReadHandle {
+          public:
+            MockReadHandle(MockMemoryTransferService* service);
+            ~MockReadHandle() override;
+
+            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+            void SerializeDataUpdate(const void* data,
+                                     size_t offset,
+                                     size_t size,
+                                     void* serializePointer) override;
+
+          private:
+            MockMemoryTransferService* mService;
+        };
+
+        class MockWriteHandle : public WriteHandle {
+          public:
+            MockWriteHandle(MockMemoryTransferService* service);
+            ~MockWriteHandle() override;
+
+            bool DeserializeDataUpdate(const void* deserializePointer,
+                                       size_t deserializeSize,
+                                       size_t offset,
+                                       size_t size) override;
+
+            const uint32_t* GetData() const;
+
+          private:
+            MockMemoryTransferService* mService;
+        };
+
+        MockMemoryTransferService();
+        ~MockMemoryTransferService() override;
+
+        bool DeserializeReadHandle(const void* deserializePointer,
+                                   size_t deserializeSize,
+                                   ReadHandle** readHandle) override;
+
+        bool DeserializeWriteHandle(const void* deserializePointer,
+                                    size_t deserializeSize,
+                                    WriteHandle** writeHandle) override;
+
+        MockReadHandle* NewReadHandle();
+        MockWriteHandle* NewWriteHandle();
+
+        MOCK_METHOD(bool,
+                    OnDeserializeReadHandle,
+                    (const uint32_t* deserializePointer,
+                     size_t deserializeSize,
+                     ReadHandle** readHandle));
+
+        MOCK_METHOD(bool,
+                    OnDeserializeWriteHandle,
+                    (const uint32_t* deserializePointer,
+                     size_t deserializeSize,
+                     WriteHandle** writeHandle));
+
+        MOCK_METHOD(size_t,
+                    OnReadHandleSizeOfSerializeDataUpdate,
+                    (const ReadHandle* readHandle, size_t offset, size_t size));
+        MOCK_METHOD(void,
+                    OnReadHandleSerializeDataUpdate,
+                    (const ReadHandle* readHandle,
+                     const void* data,
+                     size_t offset,
+                     size_t size,
+                     void* serializePointer));
+        MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle* readHandle));
+
+        MOCK_METHOD(bool,
+                    OnWriteHandleDeserializeDataUpdate,
+                    (const WriteHandle* writeHandle,
+                     const uint32_t* deserializePointer,
+                     size_t deserializeSize,
+                     size_t offset,
+                     size_t size));
+        MOCK_METHOD(void, OnWriteHandleDestroy, (const WriteHandle* writeHandle));
+    };
+
+}  // namespace dawn::wire::server
+
+#endif  // DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/src/dawn/wire/server/ServerQueue.cpp b/src/dawn/wire/server/ServerQueue.cpp
new file mode 100644
index 0000000..68e1ea8
--- /dev/null
+++ b/src/dawn/wire/server/ServerQueue.cpp
@@ -0,0 +1,103 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+
+    void Server::OnQueueWorkDone(QueueWorkDoneUserdata* data, WGPUQueueWorkDoneStatus status) {
+        ReturnQueueWorkDoneCallbackCmd cmd;
+        cmd.queue = data->queue;
+        cmd.requestSerial = data->requestSerial;
+        cmd.status = status;
+
+        SerializeCommand(cmd);
+    }
+
+    bool Server::DoQueueOnSubmittedWorkDone(ObjectId queueId,
+                                            uint64_t signalValue,
+                                            uint64_t requestSerial) {
+        auto* queue = QueueObjects().Get(queueId);
+        if (queue == nullptr) {
+            return false;
+        }
+
+        auto userdata = MakeUserdata<QueueWorkDoneUserdata>();
+        userdata->queue = ObjectHandle{queueId, queue->generation};
+        userdata->requestSerial = requestSerial;
+
+        mProcs.queueOnSubmittedWorkDone(queue->handle, signalValue,
+                                        ForwardToServer<&Server::OnQueueWorkDone>,
+                                        userdata.release());
+        return true;
+    }
+
+    bool Server::DoQueueWriteBuffer(ObjectId queueId,
+                                    ObjectId bufferId,
+                                    uint64_t bufferOffset,
+                                    const uint8_t* data,
+                                    uint64_t size) {
+        // The null object isn't valid as `self` or `buffer` so we can combine the check with the
+        // check that the ID is valid.
+        auto* queue = QueueObjects().Get(queueId);
+        auto* buffer = BufferObjects().Get(bufferId);
+        if (queue == nullptr || buffer == nullptr) {
+            return false;
+        }
+
+        if (size > std::numeric_limits<size_t>::max()) {
+            auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
+            if (device == nullptr) {
+                return false;
+            }
+            return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
+                                       WGPUErrorType_OutOfMemory,
+                                       "Data size too large for write texture.");
+        }
+
+        mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data,
+                                static_cast<size_t>(size));
+        return true;
+    }
+
+    bool Server::DoQueueWriteTexture(ObjectId queueId,
+                                     const WGPUImageCopyTexture* destination,
+                                     const uint8_t* data,
+                                     uint64_t dataSize,
+                                     const WGPUTextureDataLayout* dataLayout,
+                                     const WGPUExtent3D* writeSize) {
+        // The null object isn't valid as `self` so we can combine the check with the
+        // check that the ID is valid.
+        auto* queue = QueueObjects().Get(queueId);
+        if (queue == nullptr) {
+            return false;
+        }
+
+        if (dataSize > std::numeric_limits<size_t>::max()) {
+            auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
+            if (device == nullptr) {
+                return false;
+            }
+            return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
+                                       WGPUErrorType_OutOfMemory,
+                                       "Data size too large for write texture.");
+        }
+
+        mProcs.queueWriteTexture(queue->handle, destination, data, static_cast<size_t>(dataSize),
+                                 dataLayout, writeSize);
+        return true;
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerShaderModule.cpp b/src/dawn/wire/server/ServerShaderModule.cpp
new file mode 100644
index 0000000..8785e0d
--- /dev/null
+++ b/src/dawn/wire/server/ServerShaderModule.cpp
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+#include <memory>
+
+namespace dawn::wire::server {
+
+    bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
+        auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
+        if (shaderModule == nullptr) {
+            return false;
+        }
+
+        auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
+        userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
+        userdata->requestSerial = requestSerial;
+
+        mProcs.shaderModuleGetCompilationInfo(
+            shaderModule->handle, ForwardToServer<&Server::OnShaderModuleGetCompilationInfo>,
+            userdata.release());
+        return true;
+    }
+
+    void Server::OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* data,
+                                                  WGPUCompilationInfoRequestStatus status,
+                                                  const WGPUCompilationInfo* info) {
+        ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
+        cmd.shaderModule = data->shaderModule;
+        cmd.requestSerial = data->requestSerial;
+        cmd.status = status;
+        cmd.info = info;
+
+        SerializeCommand(cmd);
+    }
+
+}  // namespace dawn::wire::server
diff --git a/src/dawn_native/BUILD.gn b/src/dawn_native/BUILD.gn
new file mode 100644
index 0000000..e811642
--- /dev/null
+++ b/src/dawn_native/BUILD.gn
@@ -0,0 +1,24 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_native") {
+  public_deps = [ "../dawn/native" ]
+}
+group("webgpu_dawn") {
+  public_deps = [ "../dawn/native:webgpu_dawn" ]
+}
diff --git a/src/dawn_platform/BUILD.gn b/src/dawn_platform/BUILD.gn
new file mode 100644
index 0000000..92df854
--- /dev/null
+++ b/src/dawn_platform/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_platform") {
+  public_deps = [ "../dawn/platform" ]
+}
diff --git a/src/dawn_wire/BUILD.gn b/src/dawn_wire/BUILD.gn
new file mode 100644
index 0000000..13a9a90
--- /dev/null
+++ b/src/dawn_wire/BUILD.gn
@@ -0,0 +1,24 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_wire") {
+  public_deps = [ "../dawn/wire" ]
+}
+group("dawn_wire_headers") {
+  public_deps = [ "../dawn/wire:headers" ]
+}
diff --git a/src/fuzzers/dawn/BUILD.gn b/src/fuzzers/dawn/BUILD.gn
new file mode 100644
index 0000000..a2756c7
--- /dev/null
+++ b/src/fuzzers/dawn/BUILD.gn
@@ -0,0 +1,22 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_fuzzers") {
+  public_deps = [ "../../dawn/fuzzers" ]
+  testonly = true
+}
diff --git a/src/include/README.md b/src/include/README.md
new file mode 100644
index 0000000..8111f62
--- /dev/null
+++ b/src/include/README.md
@@ -0,0 +1,4 @@
+# TODO(crbug.com/dawn/1275) - remove this directory
+
+This directory exists as a temporary include directory while migrating Chromium source to the new Dawn include layout.
+All headers in the subdirectories simply #include to the new location for the header.
diff --git a/src/include/dawn/EnumClassBitmasks.h b/src/include/dawn/EnumClassBitmasks.h
new file mode 100644
index 0000000..143e980
--- /dev/null
+++ b/src/include/dawn/EnumClassBitmasks.h
@@ -0,0 +1 @@
+#include <dawn/EnumClassBitmasks.h>
diff --git a/src/include/dawn/dawn_proc.h b/src/include/dawn/dawn_proc.h
new file mode 100644
index 0000000..f706d9f
--- /dev/null
+++ b/src/include/dawn/dawn_proc.h
@@ -0,0 +1 @@
+#include <dawn/dawn_proc.h>
diff --git a/src/include/dawn/dawn_thread_dispatch_proc.h b/src/include/dawn/dawn_thread_dispatch_proc.h
new file mode 100644
index 0000000..318acb13
--- /dev/null
+++ b/src/include/dawn/dawn_thread_dispatch_proc.h
@@ -0,0 +1 @@
+#include <dawn/dawn_thread_dispatch_proc.h>
diff --git a/src/include/dawn/dawn_wsi.h b/src/include/dawn/dawn_wsi.h
new file mode 100644
index 0000000..0ee9aab
--- /dev/null
+++ b/src/include/dawn/dawn_wsi.h
@@ -0,0 +1 @@
+#include <dawn/dawn_wsi.h>
diff --git a/src/include/dawn/webgpu.h b/src/include/dawn/webgpu.h
new file mode 100644
index 0000000..a410df1
--- /dev/null
+++ b/src/include/dawn/webgpu.h
@@ -0,0 +1 @@
+#include <dawn/webgpu.h>
diff --git a/src/include/dawn/webgpu_cpp.h b/src/include/dawn/webgpu_cpp.h
new file mode 100644
index 0000000..8904453
--- /dev/null
+++ b/src/include/dawn/webgpu_cpp.h
@@ -0,0 +1 @@
+#include "dawn/webgpu_cpp.h"
diff --git a/src/include/dawn_native/D3D12Backend.h b/src/include/dawn_native/D3D12Backend.h
new file mode 100644
index 0000000..ade0dd1
--- /dev/null
+++ b/src/include/dawn_native/D3D12Backend.h
@@ -0,0 +1 @@
+#include <dawn/native/D3D12Backend.h>
diff --git a/src/include/dawn_native/DawnNative.h b/src/include/dawn_native/DawnNative.h
new file mode 100644
index 0000000..637d511
--- /dev/null
+++ b/src/include/dawn_native/DawnNative.h
@@ -0,0 +1 @@
+#include <dawn/native/DawnNative.h>
diff --git a/src/include/dawn_native/MetalBackend.h b/src/include/dawn_native/MetalBackend.h
new file mode 100644
index 0000000..1cb8a89
--- /dev/null
+++ b/src/include/dawn_native/MetalBackend.h
@@ -0,0 +1 @@
+#include <dawn/native/MetalBackend.h>
diff --git a/src/include/dawn_native/NullBackend.h b/src/include/dawn_native/NullBackend.h
new file mode 100644
index 0000000..38e1134
--- /dev/null
+++ b/src/include/dawn_native/NullBackend.h
@@ -0,0 +1 @@
+#include <dawn/native/NullBackend.h>
diff --git a/src/include/dawn_native/OpenGLBackend.h b/src/include/dawn_native/OpenGLBackend.h
new file mode 100644
index 0000000..e7d7adf
--- /dev/null
+++ b/src/include/dawn_native/OpenGLBackend.h
@@ -0,0 +1 @@
+#include <dawn/native/OpenGLBackend.h>
diff --git a/src/include/dawn_native/VulkanBackend.h b/src/include/dawn_native/VulkanBackend.h
new file mode 100644
index 0000000..f183c03
--- /dev/null
+++ b/src/include/dawn_native/VulkanBackend.h
@@ -0,0 +1 @@
+#include <dawn/native/VulkanBackend.h>
diff --git a/src/include/dawn_native/dawn_native_export.h b/src/include/dawn_native/dawn_native_export.h
new file mode 100644
index 0000000..89f8287
--- /dev/null
+++ b/src/include/dawn_native/dawn_native_export.h
@@ -0,0 +1 @@
+#include <dawn/native/dawn_native_export.h>
diff --git a/src/include/dawn_platform/DawnPlatform.h b/src/include/dawn_platform/DawnPlatform.h
new file mode 100644
index 0000000..2b40383
--- /dev/null
+++ b/src/include/dawn_platform/DawnPlatform.h
@@ -0,0 +1 @@
+#include <dawn/platform/DawnPlatform.h>
diff --git a/src/include/dawn_wire/Wire.h b/src/include/dawn_wire/Wire.h
new file mode 100644
index 0000000..066e7ab
--- /dev/null
+++ b/src/include/dawn_wire/Wire.h
@@ -0,0 +1 @@
+#include <dawn/wire/Wire.h>
diff --git a/src/include/dawn_wire/WireClient.h b/src/include/dawn_wire/WireClient.h
new file mode 100644
index 0000000..0de7599
--- /dev/null
+++ b/src/include/dawn_wire/WireClient.h
@@ -0,0 +1 @@
+#include <dawn/wire/WireClient.h>
diff --git a/src/include/dawn_wire/WireServer.h b/src/include/dawn_wire/WireServer.h
new file mode 100644
index 0000000..be7030b
--- /dev/null
+++ b/src/include/dawn_wire/WireServer.h
@@ -0,0 +1 @@
+#include <dawn/wire/WireServer.h>
diff --git a/src/include/dawn_wire/dawn_wire_export.h b/src/include/dawn_wire/dawn_wire_export.h
new file mode 100644
index 0000000..36624f8
--- /dev/null
+++ b/src/include/dawn_wire/dawn_wire_export.h
@@ -0,0 +1 @@
+#include <dawn/wire/dawn_wire_export.h>
diff --git a/src/include/webgpu/webgpu.h b/src/include/webgpu/webgpu.h
new file mode 100644
index 0000000..4a29d37
--- /dev/null
+++ b/src/include/webgpu/webgpu.h
@@ -0,0 +1 @@
+#include "dawn/webgpu.h"
diff --git a/src/include/webgpu/webgpu_cpp.h b/src/include/webgpu/webgpu_cpp.h
new file mode 100644
index 0000000..5bbd869
--- /dev/null
+++ b/src/include/webgpu/webgpu_cpp.h
@@ -0,0 +1 @@
+#include <dawn/webgpu_cpp.h>
diff --git a/src/tint/fuzzers/BUILD.gn b/src/tint/fuzzers/BUILD.gn
index fd29315..3253e94 100644
--- a/src/tint/fuzzers/BUILD.gn
+++ b/src/tint/fuzzers/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2021 The Tint Authors
+# Copyright 2022 The Dawn & Tint Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn b/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn
index 0db6c1d..4c63bab 100644
--- a/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn
+++ b/src/tint/fuzzers/tint_ast_fuzzer/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2021 The Tint Authors
+# Copyright 2022 The Dawn & Tint Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/src/tint/fuzzers/tint_spirv_tools_fuzzer/BUILD.gn b/src/tint/fuzzers/tint_spirv_tools_fuzzer/BUILD.gn
new file mode 100644
index 0000000..a42c2d5
--- /dev/null
+++ b/src/tint/fuzzers/tint_spirv_tools_fuzzer/BUILD.gn
@@ -0,0 +1,22 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../../scripts/dawn_overrides_with_defaults.gni")
+
+# Target aliases to ease merging Tint->Dawn
+
+group("tint_spirv_tools_fuzzer") {
+  deps = [ "${dawn_tint_dir}/src/tint/fuzzers/tint_spirv_tools_fuzzer:tint_spirv_tools_fuzzer" ]
+  testonly = true
+}
diff --git a/test/tint/BUILD.gn b/test/tint/BUILD.gn
index d2ca288..2839ae6 100644
--- a/test/tint/BUILD.gn
+++ b/test/tint/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2021 The Tint Authors
+# Copyright 2022 The Dawn & Tint Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/third_party/.clang-format b/third_party/.clang-format
new file mode 100644
index 0000000..9d15924
--- /dev/null
+++ b/third_party/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/third_party/CMakeLists.txt b/third_party/CMakeLists.txt
index bf1542c..7933b90 100644
--- a/third_party/CMakeLists.txt
+++ b/third_party/CMakeLists.txt
@@ -1,4 +1,4 @@
-# Copyright 2020 The Tint Authors.
+# Copyright 2022 The Dawn & Tint Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,6 +12,82 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Don't build testing in third_party dependencies
+set(BUILD_TESTING OFF)
+
+if (NOT TARGET SPIRV-Headers)
+    set(SPIRV_HEADERS_SKIP_EXAMPLES ON CACHE BOOL "" FORCE)
+    set(SPIRV_HEADERS_SKIP_INSTALL ON CACHE BOOL "" FORCE)
+
+    message(STATUS "Dawn: using SPIRV-Headers at ${DAWN_SPIRV_HEADERS_DIR}")
+    add_subdirectory(${DAWN_SPIRV_HEADERS_DIR} "${CMAKE_CURRENT_BINARY_DIR}/spirv-headers")
+endif()
+
+if(${TINT_BUILD_GLSL_WRITER})
+  if(${TINT_BUILD_SAMPLES})
+    add_subdirectory("${DAWN_THIRD_PARTY_DIR}/vulkan-deps/glslang/src" "${CMAKE_CURRENT_BINARY_DIR}/glslang" EXCLUDE_FROM_ALL)
+  endif()
+endif()
+
+if(${TINT_BUILD_SPV_READER} OR ${TINT_BUILD_SPV_WRITER})
+  if (NOT IS_DIRECTORY "${SPIRV-Headers_SOURCE_DIR}")
+    set(SPIRV-Headers_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/spirv-headers CACHE STRING "Source directory for SPIR-V headers")
+  endif()
+endif()
+
+if (NOT TARGET SPIRV-Tools)
+    set(SPIRV_SKIP_TESTS ON CACHE BOOL "" FORCE)
+    set(SPIRV_SKIP_EXECUTABLES ON CACHE BOOL "" FORCE)
+    set(SKIP_SPIRV_TOOLS_INSTALL ON CACHE BOOL "" FORCE)
+
+    if(${TINT_BUILD_SPV_READER} OR ${TINT_BUILD_SPV_WRITER})
+      set(SPIRV_SKIP_TESTS ON CACHE BOOL "Controls whether SPIR-V tests are run" FORCE)
+      set(SPIRV_WERROR OFF CACHE BOOL OFF FORCE)
+      if (${TINT_BUILD_SPIRV_TOOLS_FUZZER})
+        set(SPIRV_BUILD_FUZZER ON CACHE BOOL "Controls whether spirv-fuzz is built" FORCE)
+      endif()
+    endif()
+
+    message(STATUS "Dawn: using SPIRV-Tools at ${DAWN_SPIRV_TOOLS_DIR}")
+    add_subdirectory(${DAWN_SPIRV_TOOLS_DIR} "${CMAKE_CURRENT_BINARY_DIR}/spirv-tools" EXCLUDE_FROM_ALL)
+endif()
+
+if (NOT TARGET glfw)
+    set(GLFW_BUILD_DOCS OFF CACHE BOOL "" FORCE)
+    set(GLFW_BUILD_TESTS OFF CACHE BOOL "" FORCE)
+    set(GLFW_BUILD_EXAMPLES OFF CACHE BOOL "" FORCE)
+
+    message(STATUS "Dawn: using GLFW at ${DAWN_GLFW_DIR}")
+    add_subdirectory(${DAWN_GLFW_DIR} "${CMAKE_CURRENT_BINARY_DIR}/glfw")
+endif()
+
+if (NOT TARGET libabsl)
+    message(STATUS "Dawn: using Abseil at ${DAWN_ABSEIL_DIR}")
+    add_subdirectory(${DAWN_ABSEIL_DIR} "${CMAKE_CURRENT_BINARY_DIR}/abseil")
+endif()
+
+if (NOT TARGET Vulkan-Headers)
+    message(STATUS "Dawn: using Vulkan-Headers at ${DAWN_VULKAN_HEADERS_DIR}")
+    add_subdirectory(${DAWN_VULKAN_HEADERS_DIR} "${CMAKE_CURRENT_BINARY_DIR}/vulkan-headers")
+endif()
+
+# Header-only library for khrplatform.h
+add_library(dawn_khronos_platform INTERFACE)
+target_sources(dawn_khronos_platform INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/khronos/KHR/khrplatform.h")
+target_include_directories(dawn_khronos_platform INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/khronos")
+
+# Header-only library for Vulkan headers
+add_library(dawn_vulkan_headers INTERFACE)
+target_sources(dawn_vulkan_headers INTERFACE
+    "${CMAKE_CURRENT_SOURCE_DIR}/khronos/vulkan/vk_icd.h"
+    "${CMAKE_CURRENT_SOURCE_DIR}/khronos/vulkan/vk_layer.h"
+    "${CMAKE_CURRENT_SOURCE_DIR}/khronos/vulkan/vk_platform.h"
+    "${CMAKE_CURRENT_SOURCE_DIR}/khronos/vulkan/vk_sdk_platform.h"
+    "${CMAKE_CURRENT_SOURCE_DIR}/khronos/vulkan/vulkan.h"
+    "${CMAKE_CURRENT_SOURCE_DIR}/khronos/vulkan/vulkan_core.h"
+)
+target_include_directories(dawn_vulkan_headers INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/khronos")
+
 if (${TINT_BUILD_BENCHMARKS})
   set(BENCHMARK_ENABLE_TESTING FALSE CACHE BOOL FALSE FORCE)
   add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/benchmark EXCLUDE_FROM_ALL)
@@ -28,26 +104,3 @@
   set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Controls whether a protobuf static runtime is built" FORCE)
   add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/protobuf/cmake)
 endif()
-
-if(${TINT_BUILD_SPV_READER} OR ${TINT_BUILD_SPV_WRITER})
-  if (NOT IS_DIRECTORY "${SPIRV-Headers_SOURCE_DIR}")
-    set(SPIRV-Headers_SOURCE_DIR "${TINT_THIRD_PARTY_DIR}/vulkan-deps/spirv-headers/src" CACHE STRING "Source directory for SPIR-V headers")
-  endif()
-
-  if (NOT TARGET SPIRV-Tools)
-    set(SPIRV_SKIP_TESTS ON CACHE BOOL "Controls whether SPIR-V tests are run" FORCE)
-    set(SPIRV_WERROR OFF CACHE BOOL OFF FORCE)
-    if (${TINT_BUILD_SPIRV_TOOLS_FUZZER})
-      set(SPIRV_BUILD_FUZZER ON CACHE BOOL "Controls whether spirv-fuzz is built" FORCE)
-    endif()
-    set(SPIRV-Headers_SOURCE_DIR "${TINT_THIRD_PARTY_DIR}/vulkan-deps/spirv-headers/src")
-    add_subdirectory("${TINT_THIRD_PARTY_DIR}/vulkan-deps/spirv-tools/src" "${CMAKE_BINARY_DIR}/third_party/spirv-tools" EXCLUDE_FROM_ALL)
-  endif()
-endif()
-
-if(${TINT_BUILD_GLSL_WRITER})
-  set(SPIRV-Headers_SOURCE_DIR "${TINT_THIRD_PARTY_DIR}/vulkan-deps/glslang/src")
-  if(${TINT_BUILD_SAMPLES})
-    add_subdirectory("${TINT_THIRD_PARTY_DIR}/vulkan-deps/glslang/src" "${CMAKE_BINARY_DIR}/third_party/glslang" EXCLUDE_FROM_ALL)
-  endif()
-endif()
diff --git a/third_party/gn/abseil-cpp/BUILD.gn b/third_party/gn/abseil-cpp/BUILD.gn
new file mode 100644
index 0000000..6b4017e
--- /dev/null
+++ b/third_party/gn/abseil-cpp/BUILD.gn
@@ -0,0 +1,170 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/scripts/dawn_features.gni")
+
+config("absl_config") {
+  if (dawn_standalone && is_clang) {
+    cflags = [
+      # Allow the use of enable_if()
+      "-Wno-gcc-compat",
+    ]
+  }
+
+  include_dirs = [ "${dawn_abseil_dir}" ]
+}
+
+template("absl_source_set") {
+  source_set(target_name) {
+    forward_variables_from(invoker, "*")
+
+    if (!defined(public_configs)) {
+      public_configs = []
+    }
+    public_configs += [ ":absl_config" ]
+  }
+}
+
+#
+# absl/base
+#
+
+absl_source_set("log_severity") {
+  sources = [ "${dawn_abseil_dir}/absl/base/log_severity.cc" ]
+  public = [ "${dawn_abseil_dir}/absl/base/log_severity.h" ]
+}
+
+absl_source_set("raw_logging_internal") {
+  sources = [ "${dawn_abseil_dir}/absl/base/internal/raw_logging.cc" ]
+  public = [ "${dawn_abseil_dir}/absl/base/internal/raw_logging.h" ]
+  public_deps = [ ":log_severity" ]
+  visibility = [ ":*" ]
+}
+
+absl_source_set("throw_delegate") {
+  sources = [ "${dawn_abseil_dir}/absl/base/internal/throw_delegate.cc" ]
+  public = [ "${dawn_abseil_dir}/absl/base/internal/throw_delegate.h" ]
+  public_deps = [ ":raw_logging_internal" ]
+  visibility = [ ":*" ]
+}
+
+#
+# absl/numeric
+#
+
+absl_source_set("int128") {
+  sources = [
+    "${dawn_abseil_dir}/absl/numeric/int128.cc",
+    "${dawn_abseil_dir}/absl/numeric/int128_have_intrinsic.inc",
+    "${dawn_abseil_dir}/absl/numeric/int128_no_intrinsic.inc",
+  ]
+  public = [ "${dawn_abseil_dir}/absl/numeric/int128.h" ]
+}
+
+#
+# absl/strings
+#
+
+absl_source_set("strings") {
+  sources = [
+    "${dawn_abseil_dir}/absl/strings/ascii.cc",
+    "${dawn_abseil_dir}/absl/strings/charconv.cc",
+    "${dawn_abseil_dir}/absl/strings/escaping.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/charconv_bigint.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/charconv_bigint.h",
+    "${dawn_abseil_dir}/absl/strings/internal/charconv_parse.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/charconv_parse.h",
+    "${dawn_abseil_dir}/absl/strings/internal/memutil.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/memutil.h",
+    "${dawn_abseil_dir}/absl/strings/internal/stl_type_traits.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_join_internal.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_split_internal.h",
+    "${dawn_abseil_dir}/absl/strings/match.cc",
+    "${dawn_abseil_dir}/absl/strings/numbers.cc",
+    "${dawn_abseil_dir}/absl/strings/str_cat.cc",
+    "${dawn_abseil_dir}/absl/strings/str_replace.cc",
+    "${dawn_abseil_dir}/absl/strings/str_split.cc",
+    "${dawn_abseil_dir}/absl/strings/string_view.cc",
+    "${dawn_abseil_dir}/absl/strings/substitute.cc",
+  ]
+  public = [
+    "${dawn_abseil_dir}/absl/strings/ascii.h",
+    "${dawn_abseil_dir}/absl/strings/charconv.h",
+    "${dawn_abseil_dir}/absl/strings/escaping.h",
+    "${dawn_abseil_dir}/absl/strings/internal/string_constant.h",
+    "${dawn_abseil_dir}/absl/strings/match.h",
+    "${dawn_abseil_dir}/absl/strings/numbers.h",
+    "${dawn_abseil_dir}/absl/strings/str_cat.h",
+    "${dawn_abseil_dir}/absl/strings/str_join.h",
+    "${dawn_abseil_dir}/absl/strings/str_replace.h",
+    "${dawn_abseil_dir}/absl/strings/str_split.h",
+    "${dawn_abseil_dir}/absl/strings/string_view.h",
+    "${dawn_abseil_dir}/absl/strings/strip.h",
+    "${dawn_abseil_dir}/absl/strings/substitute.h",
+  ]
+  deps = [
+    ":int128",
+    ":raw_logging_internal",
+    ":strings_internal",
+    ":throw_delegate",
+  ]
+}
+
+absl_source_set("strings_internal") {
+  sources = [
+    "${dawn_abseil_dir}/absl/strings/internal/escaping.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/ostringstream.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/utf8.cc",
+  ]
+  public = [
+    "${dawn_abseil_dir}/absl/strings/internal/char_map.h",
+    "${dawn_abseil_dir}/absl/strings/internal/escaping.h",
+    "${dawn_abseil_dir}/absl/strings/internal/ostringstream.h",
+    "${dawn_abseil_dir}/absl/strings/internal/resize_uninitialized.h",
+    "${dawn_abseil_dir}/absl/strings/internal/utf8.h",
+  ]
+  deps = [ ":raw_logging_internal" ]
+}
+
+absl_source_set("str_format") {
+  public = [ "${dawn_abseil_dir}/absl/strings/str_format.h" ]
+  deps = [ ":str_format_internal" ]
+}
+
+absl_source_set("str_format_internal") {
+  sources = [
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/arg.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/bind.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/extension.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/float_conversion.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/output.cc",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/parser.cc",
+  ]
+  public = [
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/arg.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/bind.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/checker.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/extension.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/float_conversion.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/output.h",
+    "${dawn_abseil_dir}/absl/strings/internal/str_format/parser.h",
+  ]
+  visibility = [ ":*" ]
+  deps = [
+    ":int128",
+    ":strings",
+  ]
+}
diff --git a/third_party/gn/glfw/BUILD.gn b/third_party/gn/glfw/BUILD.gn
new file mode 100644
index 0000000..b592b4d
--- /dev/null
+++ b/third_party/gn/glfw/BUILD.gn
@@ -0,0 +1,154 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/scripts/dawn_features.gni")
+
+# Only expose GLFW targets on platforms where GLFW is supported: otherwise they
+# might get discovered by GN when another target in this file is referenced,
+# and GLFW will be built as part of "all" builds, causing compilation failures.
+
+assert(dawn_supports_glfw_for_windowing)
+glfw_dir = dawn_glfw_dir
+
+config("glfw_public") {
+  include_dirs = [ "${glfw_dir}/include" ]
+
+  if (is_win) {
+    defines = [ "_GLFW_WIN32" ]
+  }
+
+  if (is_mac) {
+    defines = [ "_GLFW_COCOA" ]
+  }
+
+  if (is_linux) {
+    # ANGLE builds only libEGL.so, so tell GLFW to load that instead of
+    # the default libEGL.so.1.
+    defines = [
+      "_GLFW_X11",
+      "_GLFW_EGL_LIBRARY=\"libEGL.so\"",
+    ]
+  }
+}
+
+static_library("glfw") {
+  public_configs = [ ":glfw_public" ]
+
+  if (dawn_has_build) {
+    configs -= [ "//build/config/compiler:chromium_code" ]
+    configs += [ "//build/config/compiler:no_chromium_code" ]
+  }
+
+  if (is_win && !is_clang) {
+    # nonstandard extension, function/data pointer conversion in expression
+    cflags_c = [ "/wd4152" ]
+  } else {
+    cflags_c = [
+      "-Wno-sign-compare",
+      "-Wno-missing-field-initializers",
+    ]
+  }
+
+  sources = [
+    "${glfw_dir}/include/GLFW/glfw3.h",
+    "${glfw_dir}/include/GLFW/glfw3native.h",
+    "${glfw_dir}/src/context.c",
+    "${glfw_dir}/src/egl_context.c",
+    "${glfw_dir}/src/egl_context.h",
+    "${glfw_dir}/src/init.c",
+    "${glfw_dir}/src/input.c",
+    "${glfw_dir}/src/internal.h",
+    "${glfw_dir}/src/monitor.c",
+    "${glfw_dir}/src/osmesa_context.c",
+    "${glfw_dir}/src/osmesa_context.h",
+    "${glfw_dir}/src/vulkan.c",
+    "${glfw_dir}/src/window.c",
+  ]
+  libs = []
+
+  if (is_win) {
+    sources += [
+      "${glfw_dir}/src/wgl_context.c",
+      "${glfw_dir}/src/wgl_context.h",
+      "${glfw_dir}/src/win32_init.c",
+      "${glfw_dir}/src/win32_joystick.c",
+      "${glfw_dir}/src/win32_joystick.h",
+      "${glfw_dir}/src/win32_monitor.c",
+      "${glfw_dir}/src/win32_platform.h",
+      "${glfw_dir}/src/win32_thread.c",
+      "${glfw_dir}/src/win32_time.c",
+      "${glfw_dir}/src/win32_window.c",
+    ]
+  }
+
+  if (is_linux || is_mac) {
+    sources += [
+      "${glfw_dir}/src/posix_thread.c",
+      "${glfw_dir}/src/posix_thread.h",
+    ]
+  }
+
+  if (is_linux) {
+    sources += [
+      "${glfw_dir}/src/glx_context.c",
+      "${glfw_dir}/src/glx_context.h",
+      "${glfw_dir}/src/linux_joystick.c",
+      "${glfw_dir}/src/linux_joystick.h",
+      "${glfw_dir}/src/posix_time.c",
+      "${glfw_dir}/src/posix_time.h",
+      "${glfw_dir}/src/x11_init.c",
+      "${glfw_dir}/src/x11_monitor.c",
+      "${glfw_dir}/src/x11_platform.h",
+      "${glfw_dir}/src/x11_window.c",
+      "${glfw_dir}/src/xkb_unicode.c",
+      "${glfw_dir}/src/xkb_unicode.h",
+    ]
+
+    libs += [
+      "rt",
+      "dl",
+      "X11",
+      "Xcursor",
+      "Xinerama",
+      "Xrandr",
+    ]
+  }
+
+  if (is_mac) {
+    sources += [
+      "${glfw_dir}/src/cocoa_init.m",
+      "${glfw_dir}/src/cocoa_joystick.h",
+      "${glfw_dir}/src/cocoa_joystick.m",
+      "${glfw_dir}/src/cocoa_monitor.m",
+      "${glfw_dir}/src/cocoa_platform.h",
+      "${glfw_dir}/src/cocoa_time.c",
+      "${glfw_dir}/src/cocoa_window.m",
+      "${glfw_dir}/src/nsgl_context.h",
+      "${glfw_dir}/src/nsgl_context.m",
+    ]
+    frameworks = [
+      "Cocoa.framework",
+      "IOKit.framework",
+      "CoreFoundation.framework",
+      "CoreVideo.framework",
+    ]
+    cflags_objc = [
+      "-Wno-sign-compare",
+      "-Wno-unguarded-availability",
+      "-Wno-objc-multiple-method-names",
+    ]
+  }
+}
diff --git a/third_party/gn/webgpu-cts/BUILD.gn b/third_party/gn/webgpu-cts/BUILD.gn
new file mode 100644
index 0000000..7023941
--- /dev/null
+++ b/third_party/gn/webgpu-cts/BUILD.gn
@@ -0,0 +1,101 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Note: This file is intentionally not used by any other BUILD.gn in Dawn.
+# Instead, Chromium depends directly on this file to build the WebGPU CTS.
+# Scripts called from this file assume Dawn is checked out inside Chromium.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+group("webgpu-cts") {
+  public_deps = [
+    ":compile_src",
+    ":copy_resources",
+    ":verify_gen_ts_dep_list",
+  ]
+}
+
+list_from_ts_sources_txt = read_file("ts_sources.txt", "list lines")
+
+ts_source_inputs = [ "../../webgpu-cts/tsconfig.json" ]
+foreach(file, list_from_ts_sources_txt) {
+  ts_source_inputs += [ "../../webgpu-cts/$file" ]
+}
+
+js_outputs = []
+foreach(ts_file, list_from_ts_sources_txt) {
+  js_file = string_replace(ts_file, ".ts", ".js")
+  js_node_file = string_replace(js_file, "src/", "src-node/")
+
+  js_outputs += [ "$target_gen_dir/../../webgpu-cts/$js_file" ]
+
+  if (js_node_file != "src-node/common/runtime/wpt.js" &&
+      js_node_file != "src-node/common/runtime/standalone.js" &&
+      js_node_file != "src-node/common/runtime/helper/test_worker.js" &&
+      js_node_file !=
+      "src-node/webgpu/web_platform/worker/worker_launcher.js") {
+    js_outputs += [ "$target_gen_dir/../../webgpu-cts/$js_node_file" ]
+  }
+}
+
+action("compile_src") {
+  script = "${dawn_root}/webgpu-cts/scripts/compile_src.py"
+
+  inputs = [
+             "//third_party/node/node_modules/typescript/lib/tsc.js",
+             "//third_party/node/node.py",
+             "${dawn_root}/webgpu-cts/scripts/tsc_ignore_errors.py",
+           ] + ts_source_inputs
+
+  outputs = js_outputs
+  data = js_outputs
+
+  args = [ rebase_path("$target_gen_dir/../../webgpu-cts", root_build_dir) ]
+}
+
+list_from_resource_files_txt = read_file("resource_files.txt", "list lines")
+resource_file_inputs = []
+foreach(file, list_from_resource_files_txt) {
+  resource_file_inputs += [ "$file" ]
+}
+
+copy("copy_resources") {
+  sources = []
+  data = []
+  foreach(resource_file, resource_file_inputs) {
+    sources += [ "../../webgpu-cts/src/resources/$resource_file" ]
+
+    # Copy into resources/, instead of src/resources/, because compile_src
+    # wipes src/ before running.
+    data += [ "$target_gen_dir/../../webgpu-cts/resources/$resource_file" ]
+  }
+
+  outputs =
+      [ "$target_gen_dir/../../webgpu-cts/resources/{{source_file_part}}" ]
+}
+
+action("verify_gen_ts_dep_list") {
+  script = "${dawn_root}/webgpu-cts/scripts/gen_ts_dep_lists.py"
+  inputs = [
+    # TODO(kainino): Make sure this gets retriggered when the CTS dep changes.
+    "resource_files.txt",
+    "ts_sources.txt",
+  ]
+  outputs = [ "$target_out_dir/run_$target_name.stamp" ]
+  args = [
+    "--check",
+    "--stamp",
+    rebase_path(outputs[0], root_build_dir),
+  ]
+}
diff --git a/third_party/gn/webgpu-cts/README.chromium b/third_party/gn/webgpu-cts/README.chromium
new file mode 100644
index 0000000..cc4328c
--- /dev/null
+++ b/third_party/gn/webgpu-cts/README.chromium
@@ -0,0 +1,14 @@
+Name: WebGPU Conformance Test Suite
+Short Name: webgpu-cts
+Version: unknown
+Revision: latest
+URL: https://gpuweb.github.io/cts/
+SOURCE CODE: git clone https://github.com/gpuweb/cts.git
+Security Critical: no
+License: BSD-3-Clause
+License File: NOT_SHIPPED
+
+Description:
+(This README is for ../../webgpu-cts/. This directory is part of dawn.)
+The WebGPU conformance test suite. Not used directly by Dawn, only transitively
+by Chromium. There, it is run under Chromium's GPU testing infrastructure.
diff --git a/third_party/gn/webgpu-cts/resource_files.txt b/third_party/gn/webgpu-cts/resource_files.txt
new file mode 100644
index 0000000..812c137
--- /dev/null
+++ b/third_party/gn/webgpu-cts/resource_files.txt
@@ -0,0 +1,6 @@
+Di-3d.png
+README.md
+red-green.bt601.vp9.webm
+red-green.mp4
+red-green.theora.ogv
+red-green.webmvp8.webm
diff --git a/third_party/gn/webgpu-cts/ts_sources.txt b/third_party/gn/webgpu-cts/ts_sources.txt
new file mode 100644
index 0000000..51927cf
--- /dev/null
+++ b/third_party/gn/webgpu-cts/ts_sources.txt
@@ -0,0 +1,330 @@
+src/common/internal/version.ts
+src/common/internal/stack.ts
+src/common/internal/logging/log_message.ts
+src/common/internal/logging/result.ts
+src/common/internal/logging/logger.ts
+src/common/util/timeout.ts
+src/common/util/util.ts
+src/common/internal/logging/test_case_recorder.ts
+src/common/util/types.ts
+src/common/runtime/helper/options.ts
+src/common/internal/query/encode_selectively.ts
+src/common/internal/query/json_param_value.ts
+src/common/internal/query/separators.ts
+src/common/internal/query/validQueryPart.ts
+src/common/internal/query/parseQuery.ts
+src/common/internal/query/stringify_params.ts
+src/common/internal/query/query.ts
+src/common/internal/query/compare.ts
+src/common/internal/params_utils.ts
+src/common/framework/fixture.ts
+src/common/framework/params_builder.ts
+src/common/framework/resources.ts
+src/common/internal/test_group.ts
+src/common/framework/test_group.ts
+src/common/internal/test_suite_listing.ts
+src/common/internal/util.ts
+src/common/internal/tree.ts
+src/common/internal/file_loader.ts
+src/common/util/navigator_gpu.ts
+src/common/runtime/helper/sys.ts
+src/common/runtime/cmdline.ts
+src/common/runtime/server.ts
+src/common/runtime/helper/test_worker.ts
+src/common/runtime/standalone.ts
+src/common/runtime/wpt.ts
+src/common/runtime/helper/test_worker-worker.ts
+src/common/tools/checklist.ts
+src/common/tools/crawl.ts
+src/common/tools/dev_server.ts
+src/common/tools/gen_listings.ts
+src/common/tools/gen_wpt_cts_html.ts
+src/common/tools/version.ts
+src/common/util/collect_garbage.ts
+src/common/util/colors.ts
+src/common/util/data_tables.ts
+src/common/util/preprocessor.ts
+src/unittests/unit_test.ts
+src/demo/a.spec.ts
+src/demo/json.spec.ts
+src/demo/a/b.spec.ts
+src/demo/a/b/c.spec.ts
+src/demo/a/b/d.spec.ts
+src/demo/file_depth_2/in_single_child_dir/r.spec.ts
+src/stress/listing.ts
+src/webgpu/constants.ts
+src/stress/adapter/device_allocation.spec.ts
+src/webgpu/util/constants.ts
+src/webgpu/util/conversion.ts
+src/webgpu/util/math.ts
+src/webgpu/util/unions.ts
+src/webgpu/util/texture/base.ts
+src/webgpu/util/texture/layout.ts
+src/webgpu/capability_info.ts
+src/webgpu/util/buffer.ts
+src/webgpu/util/pretty_diff_tables.ts
+src/webgpu/util/check_contents.ts
+src/webgpu/util/command_buffer_maker.ts
+src/webgpu/util/device_pool.ts
+src/webgpu/util/texture/texel_data.ts
+src/webgpu/gpu_test.ts
+src/stress/compute/compute_pass.spec.ts
+src/stress/device/bind_group_allocation.spec.ts
+src/stress/device/bind_group_layout_allocation.spec.ts
+src/stress/device/buffer_allocation.spec.ts
+src/stress/device/command_encoder_allocation.spec.ts
+src/stress/device/compute_pipeline_allocation.spec.ts
+src/stress/device/pipeline_layout_allocation.spec.ts
+src/stress/device/query_set_allocation.spec.ts
+src/stress/device/render_bundle_allocation.spec.ts
+src/stress/device/render_pipeline_allocation.spec.ts
+src/stress/device/sampler_allocation.spec.ts
+src/stress/device/shader_module_allocation.spec.ts
+src/stress/device/texture_allocation.spec.ts
+src/stress/memory/churn.spec.ts
+src/webgpu/util/memory.ts
+src/stress/memory/oom.spec.ts
+src/stress/queries/occlusion.spec.ts
+src/stress/queries/pipeline_statistics.spec.ts
+src/stress/queries/resolve.spec.ts
+src/stress/queries/timestamps.spec.ts
+src/stress/queue/submit.spec.ts
+src/stress/render/render_pass.spec.ts
+src/stress/render/vertex_buffers.spec.ts
+src/stress/shaders/entry_points.spec.ts
+src/stress/shaders/non_halting.spec.ts
+src/stress/shaders/slow.spec.ts
+src/stress/texture/large.spec.ts
+src/unittests/test_group_test.ts
+src/unittests/async_expectations.spec.ts
+src/unittests/basic.spec.ts
+src/unittests/check_contents.spec.ts
+src/unittests/conversion.spec.ts
+src/unittests/getStackTrace.spec.ts
+src/unittests/listing.ts
+src/unittests/loaders_and_trees.spec.ts
+src/unittests/logger.spec.ts
+src/unittests/maths.spec.ts
+src/unittests/params_builder_and_utils.spec.ts
+src/unittests/params_builder_toplevel.spec.ts
+src/unittests/preprocessor.spec.ts
+src/unittests/query_compare.spec.ts
+src/unittests/query_string.spec.ts
+src/unittests/test_group.spec.ts
+src/unittests/test_query.spec.ts
+src/webgpu/examples.spec.ts
+src/webgpu/listing.ts
+src/webgpu/api/operation/labels.spec.ts
+src/webgpu/api/operation/onSubmittedWorkDone.spec.ts
+src/webgpu/api/operation/uncapturederror.spec.ts
+src/webgpu/api/operation/adapter/requestDevice.spec.ts
+src/webgpu/api/operation/adapter/requestDevice_limits.spec.ts
+src/webgpu/api/operation/buffers/mapping_test.ts
+src/webgpu/api/operation/buffers/map.spec.ts
+src/webgpu/api/operation/buffers/map_ArrayBuffer.spec.ts
+src/webgpu/api/operation/buffers/map_detach.spec.ts
+src/webgpu/api/operation/buffers/map_oom.spec.ts
+src/webgpu/api/operation/buffers/threading.spec.ts
+src/webgpu/api/operation/command_buffer/basic.spec.ts
+src/webgpu/api/operation/command_buffer/clearBuffer.spec.ts
+src/webgpu/api/operation/command_buffer/copyBufferToBuffer.spec.ts
+src/webgpu/api/operation/command_buffer/copyTextureToTexture.spec.ts
+src/webgpu/api/operation/command_buffer/image_copy.spec.ts
+src/webgpu/api/operation/command_buffer/programmable/programmable_state_test.ts
+src/webgpu/api/operation/command_buffer/programmable/state_tracking.spec.ts
+src/webgpu/api/operation/command_buffer/render/dynamic_state.spec.ts
+src/webgpu/api/operation/command_buffer/render/state_tracking.spec.ts
+src/webgpu/api/operation/compute/basic.spec.ts
+src/webgpu/api/operation/compute_pipeline/entry_point_name.spec.ts
+src/webgpu/api/operation/device/lost.spec.ts
+src/webgpu/api/operation/memory_sync/buffer/buffer_sync_test.ts
+src/webgpu/api/operation/memory_sync/buffer/rw_and_wr.spec.ts
+src/webgpu/api/operation/memory_sync/buffer/ww.spec.ts
+src/webgpu/api/operation/memory_sync/texture/texture_sync_test.ts
+src/webgpu/api/operation/memory_sync/texture/same_subresource.spec.ts
+src/webgpu/api/operation/pipeline/default_layout.spec.ts
+src/webgpu/api/operation/queue/writeBuffer.spec.ts
+src/webgpu/api/operation/render_pass/clear_value.spec.ts
+src/webgpu/api/operation/render_pass/resolve.spec.ts
+src/webgpu/api/operation/render_pass/storeOp.spec.ts
+src/webgpu/api/operation/render_pass/storeop2.spec.ts
+src/webgpu/api/operation/render_pipeline/alpha_to_coverage.spec.ts
+src/webgpu/api/operation/render_pipeline/culling_tests.spec.ts
+src/webgpu/api/operation/render_pipeline/entry_point_name.spec.ts
+src/webgpu/api/operation/render_pipeline/pipeline_output_targets.spec.ts
+src/webgpu/api/operation/render_pipeline/primitive_topology.spec.ts
+src/webgpu/api/operation/render_pipeline/sample_mask.spec.ts
+src/webgpu/api/operation/render_pipeline/vertex_only_render_pipeline.spec.ts
+src/webgpu/api/operation/rendering/basic.spec.ts
+src/webgpu/api/operation/rendering/blending.spec.ts
+src/webgpu/api/operation/rendering/depth.spec.ts
+src/webgpu/api/operation/rendering/depth_clip_clamp.spec.ts
+src/webgpu/api/operation/rendering/draw.spec.ts
+src/webgpu/api/operation/rendering/indirect_draw.spec.ts
+src/webgpu/api/operation/rendering/robust_access_index.spec.ts
+src/webgpu/api/operation/resource_init/buffer.spec.ts
+src/webgpu/util/texture/subresource.ts
+src/webgpu/api/operation/resource_init/check_texture/by_copy.ts
+src/webgpu/api/operation/resource_init/check_texture/by_ds_test.ts
+src/webgpu/api/operation/resource_init/check_texture/by_sampling.ts
+src/webgpu/api/operation/resource_init/texture_zero.spec.ts
+src/webgpu/api/operation/sampling/anisotropy.spec.ts
+src/webgpu/api/operation/sampling/filter_mode.spec.ts
+src/webgpu/api/operation/sampling/lod_clamp.spec.ts
+src/webgpu/api/operation/shader_module/compilation_info.spec.ts
+src/webgpu/api/operation/texture_view/read.spec.ts
+src/webgpu/api/operation/texture_view/write.spec.ts
+src/webgpu/api/operation/vertex_state/correctness.spec.ts
+src/webgpu/api/operation/vertex_state/index_format.spec.ts
+src/webgpu/api/validation/validation_test.ts
+src/webgpu/api/validation/attachment_compatibility.spec.ts
+src/webgpu/api/validation/createBindGroup.spec.ts
+src/webgpu/api/validation/createBindGroupLayout.spec.ts
+src/webgpu/api/validation/createComputePipeline.spec.ts
+src/webgpu/api/validation/createPipelineLayout.spec.ts
+src/webgpu/api/validation/createRenderPipeline.spec.ts
+src/webgpu/api/validation/createSampler.spec.ts
+src/webgpu/api/validation/createTexture.spec.ts
+src/webgpu/api/validation/createView.spec.ts
+src/webgpu/api/validation/create_pipeline.spec.ts
+src/webgpu/api/validation/error_scope.spec.ts
+src/webgpu/api/validation/layout_shader_compat.spec.ts
+src/webgpu/api/validation/render_pass_descriptor.spec.ts
+src/webgpu/api/validation/vertex_state.spec.ts
+src/webgpu/api/validation/buffer/create.spec.ts
+src/webgpu/api/validation/buffer/destroy.spec.ts
+src/webgpu/api/validation/buffer/mapping.spec.ts
+src/webgpu/api/validation/buffer/threading.spec.ts
+src/webgpu/api/validation/capability_checks/features/depth_clip_control.spec.ts
+src/webgpu/api/validation/capability_checks/features/query_types.spec.ts
+src/webgpu/api/validation/capability_checks/features/texture_formats.spec.ts
+src/webgpu/api/validation/encoding/beginRenderPass.spec.ts
+src/webgpu/api/validation/encoding/createRenderBundleEncoder.spec.ts
+src/webgpu/api/validation/encoding/encoder_state.spec.ts
+src/webgpu/api/validation/encoding/render_bundle.spec.ts
+src/webgpu/api/validation/encoding/cmds/buffer_texture_copies.spec.ts
+src/webgpu/api/validation/encoding/cmds/clearBuffer.spec.ts
+src/webgpu/api/validation/encoding/cmds/compute_pass.spec.ts
+src/webgpu/api/validation/encoding/cmds/copyBufferToBuffer.spec.ts
+src/webgpu/api/validation/encoding/cmds/copyTextureToTexture.spec.ts
+src/webgpu/api/validation/encoding/cmds/debug.spec.ts
+src/webgpu/api/validation/encoding/cmds/index_access.spec.ts
+src/webgpu/api/validation/encoding/cmds/render_pass.spec.ts
+src/webgpu/api/validation/encoding/cmds/setBindGroup.spec.ts
+src/webgpu/api/validation/encoding/cmds/render/draw.spec.ts
+src/webgpu/api/validation/encoding/cmds/render/dynamic_state.spec.ts
+src/webgpu/api/validation/encoding/cmds/render/render.ts
+src/webgpu/api/validation/encoding/cmds/render/indirect_draw.spec.ts
+src/webgpu/api/validation/encoding/cmds/render/setIndexBuffer.spec.ts
+src/webgpu/api/validation/encoding/cmds/render/setPipeline.spec.ts
+src/webgpu/api/validation/encoding/cmds/render/setVertexBuffer.spec.ts
+src/webgpu/api/validation/encoding/cmds/render/state_tracking.spec.ts
+src/webgpu/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts
+src/webgpu/api/validation/encoding/queries/common.ts
+src/webgpu/api/validation/encoding/queries/begin_end.spec.ts
+src/webgpu/api/validation/encoding/queries/general.spec.ts
+src/webgpu/api/validation/encoding/queries/pipeline_statistics.spec.ts
+src/webgpu/api/validation/encoding/queries/resolveQuerySet.spec.ts
+src/webgpu/api/validation/image_copy/image_copy.ts
+src/webgpu/api/validation/image_copy/buffer_related.spec.ts
+src/webgpu/api/validation/image_copy/layout_related.spec.ts
+src/webgpu/api/validation/image_copy/texture_related.spec.ts
+src/webgpu/api/validation/initialization/requestDevice.spec.ts
+src/webgpu/api/validation/query_set/create.spec.ts
+src/webgpu/api/validation/query_set/destroy.spec.ts
+src/webgpu/api/validation/queue/buffer_mapped.spec.ts
+src/webgpu/api/validation/queue/submit.spec.ts
+src/webgpu/api/validation/queue/writeBuffer.spec.ts
+src/webgpu/util/create_elements.ts
+src/webgpu/api/validation/queue/copyToTexture/CopyExternalImageToTexture.spec.ts
+src/webgpu/api/validation/queue/destroyed/buffer.spec.ts
+src/webgpu/api/validation/queue/destroyed/query_set.spec.ts
+src/webgpu/api/validation/render_pass/resolve.spec.ts
+src/webgpu/api/validation/render_pass/storeOp.spec.ts
+src/webgpu/api/validation/resource_usages/texture/in_pass_encoder.spec.ts
+src/webgpu/api/validation/resource_usages/texture/in_render_common.spec.ts
+src/webgpu/api/validation/resource_usages/texture/in_render_misc.spec.ts
+src/webgpu/api/validation/texture/destroy.spec.ts
+src/webgpu/idl/exposed.html.ts
+src/webgpu/idl/idl_test.ts
+src/webgpu/idl/constants/flags.spec.ts
+src/webgpu/shader/types.ts
+src/webgpu/shader/values.ts
+src/webgpu/shader/execution/robust_access.spec.ts
+src/webgpu/shader/execution/robust_access_vertex.spec.ts
+src/webgpu/shader/execution/zero_init.spec.ts
+src/webgpu/util/compare.ts
+src/webgpu/shader/execution/expression/expression.ts
+src/webgpu/shader/execution/expression/binary/binary.ts
+src/webgpu/shader/execution/expression/binary/bitwise.spec.ts
+src/webgpu/shader/execution/expression/binary/f32_arithmetic.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/builtin.ts
+src/webgpu/shader/execution/expression/call/builtin/abs.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/all.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/any.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atan.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/atan2.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/ceil.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/clamp.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/cos.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/countLeadingZeros.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/countOneBits.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/countTrailingZeros.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/extractBits.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/firstLeadingBit.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/firstTrailingBit.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/float_built_functions.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/floor.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/fract.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/insertBits.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/inversesqrt.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/ldexp.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/log.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/log2.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/logical_built_in_functions.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/max.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/min.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/reverseBits.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/select.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/sin.spec.ts
+src/webgpu/shader/execution/expression/call/builtin/value_testing_built_in_functions.spec.ts
+src/webgpu/shader/execution/expression/unary/unary.ts
+src/webgpu/shader/execution/expression/unary/f32_arithmetic.spec.ts
+src/webgpu/shader/execution/memory_model/memory_model_setup.ts
+src/webgpu/shader/execution/memory_model/atomicity.spec.ts
+src/webgpu/shader/execution/memory_model/barrier.spec.ts
+src/webgpu/shader/execution/memory_model/coherence.spec.ts
+src/webgpu/shader/execution/memory_model/weak.spec.ts
+src/webgpu/shader/execution/sampling/gradients_in_varying_loop.spec.ts
+src/webgpu/shader/execution/shader_io/compute_builtins.spec.ts
+src/webgpu/shader/execution/shader_io/shared_structs.spec.ts
+src/webgpu/shader/validation/shader_validation_test.ts
+src/webgpu/shader/validation/variable_and_const.spec.ts
+src/webgpu/shader/validation/shader_io/util.ts
+src/webgpu/shader/validation/shader_io/builtins.spec.ts
+src/webgpu/shader/validation/shader_io/generic.spec.ts
+src/webgpu/shader/validation/shader_io/interpolate.spec.ts
+src/webgpu/shader/validation/shader_io/invariant.spec.ts
+src/webgpu/shader/validation/shader_io/locations.spec.ts
+src/webgpu/shader/validation/wgsl/basic.spec.ts
+src/webgpu/util/color_space_conversion.ts
+src/webgpu/util/copy_to_texture.ts
+src/webgpu/util/texture/texel_data.spec.ts
+src/webgpu/web_platform/util.ts
+src/webgpu/web_platform/canvas/configure.spec.ts
+src/webgpu/web_platform/canvas/context_creation.spec.ts
+src/webgpu/web_platform/canvas/getCurrentTexture.spec.ts
+src/webgpu/web_platform/canvas/getPreferredFormat.spec.ts
+src/webgpu/web_platform/canvas/readbackFromWebGPUCanvas.spec.ts
+src/webgpu/web_platform/copyToTexture/ImageBitmap.spec.ts
+src/webgpu/web_platform/copyToTexture/canvas.spec.ts
+src/webgpu/web_platform/copyToTexture/video.spec.ts
+src/webgpu/web_platform/external_texture/video.spec.ts
+src/webgpu/web_platform/reftests/gpu_ref_test.ts
+src/webgpu/web_platform/reftests/canvas_clear.html.ts
+src/webgpu/web_platform/reftests/canvas_complex.html.ts
+src/webgpu/web_platform/reftests/canvas_composite_alpha.html.ts
+src/webgpu/web_platform/reftests/canvas_size_different_with_back_buffer_size.html.ts
+src/webgpu/web_platform/worker/worker.spec.ts
+src/webgpu/web_platform/worker/worker.ts
+src/webgpu/web_platform/worker/worker_launcher.ts
diff --git a/third_party/khronos/BUILD.gn b/third_party/khronos/BUILD.gn
new file mode 100644
index 0000000..e93b89a
--- /dev/null
+++ b/third_party/khronos/BUILD.gn
@@ -0,0 +1,26 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Empty targets to add the include dirs and list the sources of Khronos
+# headers for header inclusion check.
+
+config("khronos_headers_public") {
+  include_dirs = [ "." ]
+}
+
+source_set("khronos_platform") {
+  sources = [ "KHR/khrplatform.h" ]
+
+  public_configs = [ ":khronos_headers_public" ]
+}
diff --git a/third_party/khronos/KHR/khrplatform.h b/third_party/khronos/KHR/khrplatform.h
new file mode 100644
index 0000000..5b55ea2
--- /dev/null
+++ b/third_party/khronos/KHR/khrplatform.h
@@ -0,0 +1,290 @@
+#ifndef __khrplatform_h_
+#define __khrplatform_h_
+
+/*
+** Copyright (c) 2008-2018 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+/* Khronos platform-specific types and definitions.
+ *
+ * The master copy of khrplatform.h is maintained in the Khronos EGL
+ * Registry repository at https://github.com/KhronosGroup/EGL-Registry
+ * The last semantic modification to khrplatform.h was at commit ID:
+ *      67a3e0864c2d75ea5287b9f3d2eb74a745936692
+ *
+ * Adopters may modify this file to suit their platform. Adopters are
+ * encouraged to submit platform specific modifications to the Khronos
+ * group so that they can be included in future versions of this file.
+ * Please submit changes by filing pull requests or issues on
+ * the EGL Registry repository linked above.
+ *
+ *
+ * See the Implementer's Guidelines for information about where this file
+ * should be located on your system and for more details of its use:
+ *    http://www.khronos.org/registry/implementers_guide.pdf
+ *
+ * This file should be included as
+ *        #include <KHR/khrplatform.h>
+ * by Khronos client API header files that use its types and defines.
+ *
+ * The types in khrplatform.h should only be used to define API-specific types.
+ *
+ * Types defined in khrplatform.h:
+ *    khronos_int8_t              signed   8  bit
+ *    khronos_uint8_t             unsigned 8  bit
+ *    khronos_int16_t             signed   16 bit
+ *    khronos_uint16_t            unsigned 16 bit
+ *    khronos_int32_t             signed   32 bit
+ *    khronos_uint32_t            unsigned 32 bit
+ *    khronos_int64_t             signed   64 bit
+ *    khronos_uint64_t            unsigned 64 bit
+ *    khronos_intptr_t            signed   same number of bits as a pointer
+ *    khronos_uintptr_t           unsigned same number of bits as a pointer
+ *    khronos_ssize_t             signed   size
+ *    khronos_usize_t             unsigned size
+ *    khronos_float_t             signed   32 bit floating point
+ *    khronos_time_ns_t           unsigned 64 bit time in nanoseconds
+ *    khronos_utime_nanoseconds_t unsigned time interval or absolute time in
+ *                                         nanoseconds
+ *    khronos_stime_nanoseconds_t signed time interval in nanoseconds
+ *    khronos_boolean_enum_t      enumerated boolean type. This should
+ *      only be used as a base type when a client API's boolean type is
+ *      an enum. Client APIs which use an integer or other type for
+ *      booleans cannot use this as the base type for their boolean.
+ *
+ * Tokens defined in khrplatform.h:
+ *
+ *    KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values.
+ *
+ *    KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0.
+ *    KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0.
+ *
+ * Calling convention macros defined in this file:
+ *    KHRONOS_APICALL
+ *    KHRONOS_APIENTRY
+ *    KHRONOS_APIATTRIBUTES
+ *
+ * These may be used in function prototypes as:
+ *
+ *      KHRONOS_APICALL void KHRONOS_APIENTRY funcname(
+ *                                  int arg1,
+ *                                  int arg2) KHRONOS_APIATTRIBUTES;
+ */
+
+#if defined(__SCITECH_SNAP__) && !defined(KHRONOS_STATIC)
+#   define KHRONOS_STATIC 1
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APICALL
+ *-------------------------------------------------------------------------
+ * This precedes the return type of the function in the function prototype.
+ */
+#if defined(KHRONOS_STATIC)
+    /* If the preprocessor constant KHRONOS_STATIC is defined, make the
+     * header compatible with static linking. */
+#   define KHRONOS_APICALL
+#elif defined(_WIN32)
+#   define KHRONOS_APICALL __declspec(dllimport)
+#elif defined (__SYMBIAN32__)
+#   define KHRONOS_APICALL IMPORT_C
+#elif defined(__ANDROID__)
+#   define KHRONOS_APICALL __attribute__((visibility("default")))
+#else
+#   define KHRONOS_APICALL
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APIENTRY
+ *-------------------------------------------------------------------------
+ * This follows the return type of the function  and precedes the function
+ * name in the function prototype.
+ */
+#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(KHRONOS_STATIC)
+    /* Win32 but not WinCE */
+#   define KHRONOS_APIENTRY __stdcall
+#else
+#   define KHRONOS_APIENTRY
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APIATTRIBUTES
+ *-------------------------------------------------------------------------
+ * This follows the closing parenthesis of the function prototype arguments.
+ */
+#if defined (__ARMCC_2__)
+#define KHRONOS_APIATTRIBUTES __softfp
+#else
+#define KHRONOS_APIATTRIBUTES
+#endif
+
+/*-------------------------------------------------------------------------
+ * basic type definitions
+ *-----------------------------------------------------------------------*/
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__)
+
+
+/*
+ * Using <stdint.h>
+ */
+#include <stdint.h>
+typedef int32_t                 khronos_int32_t;
+typedef uint32_t                khronos_uint32_t;
+typedef int64_t                 khronos_int64_t;
+typedef uint64_t                khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64   1
+#define KHRONOS_SUPPORT_FLOAT   1
+
+#elif defined(__VMS ) || defined(__sgi)
+
+/*
+ * Using <inttypes.h>
+ */
+#include <inttypes.h>
+typedef int32_t                 khronos_int32_t;
+typedef uint32_t                khronos_uint32_t;
+typedef int64_t                 khronos_int64_t;
+typedef uint64_t                khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64   1
+#define KHRONOS_SUPPORT_FLOAT   1
+
+#elif defined(_WIN32) && !defined(__SCITECH_SNAP__)
+
+/*
+ * Win32
+ */
+typedef __int32                 khronos_int32_t;
+typedef unsigned __int32        khronos_uint32_t;
+typedef __int64                 khronos_int64_t;
+typedef unsigned __int64        khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64   1
+#define KHRONOS_SUPPORT_FLOAT   1
+
+#elif defined(__sun__) || defined(__digital__)
+
+/*
+ * Sun or Digital
+ */
+typedef int                     khronos_int32_t;
+typedef unsigned int            khronos_uint32_t;
+#if defined(__arch64__) || defined(_LP64)
+typedef long int                khronos_int64_t;
+typedef unsigned long int       khronos_uint64_t;
+#else
+typedef long long int           khronos_int64_t;
+typedef unsigned long long int  khronos_uint64_t;
+#endif /* __arch64__ */
+#define KHRONOS_SUPPORT_INT64   1
+#define KHRONOS_SUPPORT_FLOAT   1
+
+#elif 0
+
+/*
+ * Hypothetical platform with no float or int64 support
+ */
+typedef int                     khronos_int32_t;
+typedef unsigned int            khronos_uint32_t;
+#define KHRONOS_SUPPORT_INT64   0
+#define KHRONOS_SUPPORT_FLOAT   0
+
+#else
+
+/*
+ * Generic fallback
+ */
+#include <stdint.h>
+typedef int32_t                 khronos_int32_t;
+typedef uint32_t                khronos_uint32_t;
+typedef int64_t                 khronos_int64_t;
+typedef uint64_t                khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64   1
+#define KHRONOS_SUPPORT_FLOAT   1
+
+#endif
+
+
+/*
+ * Types that are (so far) the same on all platforms
+ */
+typedef signed   char          khronos_int8_t;
+typedef unsigned char          khronos_uint8_t;
+typedef signed   short int     khronos_int16_t;
+typedef unsigned short int     khronos_uint16_t;
+
+/*
+ * Types that differ between LLP64 and LP64 architectures - in LLP64,
+ * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears
+ * to be the only LLP64 architecture in current use.
+ */
+#ifdef _WIN64
+typedef signed   long long int khronos_intptr_t;
+typedef unsigned long long int khronos_uintptr_t;
+typedef signed   long long int khronos_ssize_t;
+typedef unsigned long long int khronos_usize_t;
+#else
+typedef signed   long  int     khronos_intptr_t;
+typedef unsigned long  int     khronos_uintptr_t;
+typedef signed   long  int     khronos_ssize_t;
+typedef unsigned long  int     khronos_usize_t;
+#endif
+
+#if KHRONOS_SUPPORT_FLOAT
+/*
+ * Float type
+ */
+typedef          float         khronos_float_t;
+#endif
+
+#if KHRONOS_SUPPORT_INT64
+/* Time types
+ *
+ * These types can be used to represent a time interval in nanoseconds or
+ * an absolute Unadjusted System Time.  Unadjusted System Time is the number
+ * of nanoseconds since some arbitrary system event (e.g. since the last
+ * time the system booted).  The Unadjusted System Time is an unsigned
+ * 64 bit value that wraps back to 0 every 584 years.  Time intervals
+ * may be either signed or unsigned.
+ */
+typedef khronos_uint64_t       khronos_utime_nanoseconds_t;
+typedef khronos_int64_t        khronos_stime_nanoseconds_t;
+#endif
+
+/*
+ * Dummy value used to pad enum types to 32 bits.
+ */
+#ifndef KHRONOS_MAX_ENUM
+#define KHRONOS_MAX_ENUM 0x7FFFFFFF
+#endif
+
+/*
+ * Enumerated boolean type
+ *
+ * Values other than zero should be considered to be true.  Therefore
+ * comparisons should not be made against KHRONOS_TRUE.
+ */
+typedef enum {
+    KHRONOS_FALSE = 0,
+    KHRONOS_TRUE  = 1,
+    KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM
+} khronos_boolean_enum_t;
+
+#endif /* __khrplatform_h_ */
diff --git a/third_party/khronos/gl.xml b/third_party/khronos/gl.xml
new file mode 100644
index 0000000..ce4ba10
--- /dev/null
+++ b/third_party/khronos/gl.xml
@@ -0,0 +1,50579 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<registry>
+    <comment>
+Copyright (c) 2013-2018 The Khronos Group Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+------------------------------------------------------------------------
+
+This file, gl.xml, is the OpenGL and OpenGL API Registry. The canonical
+version of the registry, together with documentation, schema, and Python
+generator scripts used to generate C header files for OpenGL and OpenGL ES,
+can always be found in the Khronos Registry at
+        https://github.com/KhronosGroup/OpenGL-Registry
+    </comment>
+
+    <!-- SECTION: GL type definitions. -->
+    <types>
+            <!-- These are dependencies GL types require to be declared legally -->
+        <type name="khrplatform">#include &lt;KHR/khrplatform.h&gt;</type>
+            <!-- These are actual GL types -->
+        <type>typedef unsigned int <name>GLenum</name>;</type>
+        <type>typedef unsigned char <name>GLboolean</name>;</type>
+        <type>typedef unsigned int <name>GLbitfield</name>;</type>
+        <type comment="Not an actual GL type, though used in headers in the past">typedef void <name>GLvoid</name>;</type>
+        <type requires="khrplatform">typedef khronos_int8_t <name>GLbyte</name>;</type>
+        <type requires="khrplatform">typedef khronos_uint8_t <name>GLubyte</name>;</type>
+        <type requires="khrplatform">typedef khronos_int16_t <name>GLshort</name>;</type>
+        <type requires="khrplatform">typedef khronos_uint16_t <name>GLushort</name>;</type>
+        <type>typedef int <name>GLint</name>;</type>
+        <type>typedef unsigned int <name>GLuint</name>;</type>
+        <type requires="khrplatform">typedef khronos_int32_t <name>GLclampx</name>;</type>
+        <type>typedef int <name>GLsizei</name>;</type>
+        <type requires="khrplatform">typedef khronos_float_t <name>GLfloat</name>;</type>
+        <type requires="khrplatform">typedef khronos_float_t <name>GLclampf</name>;</type>
+        <type>typedef double <name>GLdouble</name>;</type>
+        <type>typedef double <name>GLclampd</name>;</type>
+        <type>typedef void *<name>GLeglClientBufferEXT</name>;</type>
+        <type>typedef void *<name>GLeglImageOES</name>;</type>
+        <type>typedef char <name>GLchar</name>;</type>
+        <type>typedef char <name>GLcharARB</name>;</type>
+        <type name="GLhandleARB">#ifdef __APPLE__
+typedef void *GLhandleARB;
+#else
+typedef unsigned int GLhandleARB;
+#endif</type>
+        <type requires="khrplatform">typedef khronos_uint16_t <name>GLhalf</name>;</type>
+        <type requires="khrplatform">typedef khronos_uint16_t <name>GLhalfARB</name>;</type>
+        <type requires="khrplatform">typedef khronos_int32_t <name>GLfixed</name>;</type>
+        <type requires="khrplatform">typedef khronos_intptr_t <name>GLintptr</name>;</type>
+        <type requires="khrplatform">typedef khronos_intptr_t <name>GLintptrARB</name>;</type>
+        <type requires="khrplatform">typedef khronos_ssize_t <name>GLsizeiptr</name>;</type>
+        <type requires="khrplatform">typedef khronos_ssize_t <name>GLsizeiptrARB</name>;</type>
+        <type requires="khrplatform">typedef khronos_int64_t <name>GLint64</name>;</type>
+        <type requires="khrplatform">typedef khronos_int64_t <name>GLint64EXT</name>;</type>
+        <type requires="khrplatform">typedef khronos_uint64_t <name>GLuint64</name>;</type>
+        <type requires="khrplatform">typedef khronos_uint64_t <name>GLuint64EXT</name>;</type>
+        <type>typedef struct __GLsync *<name>GLsync</name>;</type>
+        <type comment="compatible with OpenCL cl_context"><name>struct _cl_context</name>;</type>
+        <type comment="compatible with OpenCL cl_event"><name>struct _cl_event</name>;</type>
+        <type>typedef void (<apientry/> *<name>GLDEBUGPROC</name>)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);</type>
+        <type>typedef void (<apientry/> *<name>GLDEBUGPROCARB</name>)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);</type>
+        <type>typedef void (<apientry/> *<name>GLDEBUGPROCKHR</name>)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);</type>
+
+            <!-- Vendor extension types -->
+        <type>typedef void (<apientry/> *<name>GLDEBUGPROCAMD</name>)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam);</type>
+        <type>typedef unsigned short <name>GLhalfNV</name>;</type>
+        <type requires="GLintptr">typedef GLintptr <name>GLvdpauSurfaceNV</name>;</type>
+        <type>typedef void (<apientry/> *<name>GLVULKANPROCNV</name>)(void);</type>
+    </types>
+
+    <!-- SECTION: GL parameter class type definitions. -->
+
+    <groups>
+        <group name="TextureNormalModeEXT">
+            <enum name="PERTURB_EXT"/>
+        </group>
+
+        <group name="LightTexturePNameEXT">
+            <enum name="ATTENUATION_EXT"/>
+            <enum name="SHADOW_ATTENUATION_EXT"/>
+        </group>
+
+        <group name="VertexShaderCoordOutEXT">
+            <enum name="X_EXT"/>
+            <enum name="Y_EXT"/>
+            <enum name="Z_EXT"/>
+            <enum name="W_EXT"/>
+            <enum name="NEGATIVE_X_EXT"/>
+            <enum name="NEGATIVE_Y_EXT"/>
+            <enum name="NEGATIVE_Z_EXT"/>
+            <enum name="NEGATIVE_W_EXT"/>
+            <enum name="ZERO_EXT"/>
+            <enum name="ONE_EXT"/>
+            <enum name="NEGATIVE_ONE_EXT"/>
+        </group>
+
+        <group name="SamplePatternEXT">
+            <enum name="1PASS_EXT"/>
+            <enum name="2PASS_0_EXT"/>
+            <enum name="2PASS_1_EXT"/>
+            <enum name="4PASS_0_EXT"/>
+            <enum name="4PASS_1_EXT"/>
+            <enum name="4PASS_2_EXT"/>
+            <enum name="4PASS_3_EXT"/>
+        </group>
+
+        <group name="VertexShaderStorageTypeEXT">
+            <enum name="VARIANT_EXT"/>
+            <enum name="INVARIANT_EXT"/>
+            <enum name="LOCAL_CONSTANT_EXT"/>
+            <enum name="LOCAL_EXT"/>
+        </group>
+
+        <group name="VertexShaderParameterEXT">
+            <enum name="CURRENT_VERTEX_EXT"/>
+            <enum name="MVP_MATRIX_EXT"/>
+        </group>
+
+        <group name="LightTextureModeEXT">
+            <enum name="FRAGMENT_MATERIAL_EXT"/>
+            <enum name="FRAGMENT_NORMAL_EXT"/>
+            <enum name="FRAGMENT_DEPTH_EXT"/>
+            <enum name="FRAGMENT_COLOR_EXT"/>
+        </group>
+
+        <group name="VertexShaderOpEXT">
+            <enum name="OP_INDEX_EXT"/>
+            <enum name="OP_NEGATE_EXT"/>
+            <enum name="OP_DOT3_EXT"/>
+            <enum name="OP_DOT4_EXT"/>
+            <enum name="OP_MUL_EXT"/>
+            <enum name="OP_ADD_EXT"/>
+            <enum name="OP_MADD_EXT"/>
+            <enum name="OP_FRAC_EXT"/>
+            <enum name="OP_MAX_EXT"/>
+            <enum name="OP_MIN_EXT"/>
+            <enum name="OP_SET_GE_EXT"/>
+            <enum name="OP_SET_LT_EXT"/>
+            <enum name="OP_CLAMP_EXT"/>
+            <enum name="OP_FLOOR_EXT"/>
+            <enum name="OP_ROUND_EXT"/>
+            <enum name="OP_EXP_BASE_2_EXT"/>
+            <enum name="OP_LOG_BASE_2_EXT"/>
+            <enum name="OP_POWER_EXT"/>
+            <enum name="OP_RECIP_EXT"/>
+            <enum name="OP_RECIP_SQRT_EXT"/>
+            <enum name="OP_SUB_EXT"/>
+            <enum name="OP_CROSS_PRODUCT_EXT"/>
+            <enum name="OP_MULTIPLY_MATRIX_EXT"/>
+            <enum name="OP_MOV_EXT"/>
+        </group>
+
+        <group name="ProgramFormatARB">
+            <enum name="PROGRAM_FORMAT_ASCII_ARB"/>
+        </group>
+
+        <group name="PointParameterNameARB">
+            <enum name="GL_POINT_SIZE_MIN_EXT"/>
+            <enum name="GL_POINT_SIZE_MAX_EXT"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE_EXT"/>
+        </group>
+
+        <group name="VertexAttribPropertyARB">
+            <enum name="VERTEX_ATTRIB_ARRAY_INTEGER_EXT"/>
+        </group>
+
+        <group name="VertexAttribPointerPropertyARB">
+            <enum name="VERTEX_ATTRIB_ARRAY_POINTER_ARB"/>
+        </group>
+
+        <group name="ProgramStringPropertyARB">
+            <enum name="PROGRAM_STRING_ARB"/>
+        </group>
+
+        <group name="BufferPointerNameARB">
+            <enum name="BUFFER_MAP_POINTER_ARB"/>
+        </group>
+
+        <group name="BufferPNameARB">
+            <enum name="BUFFER_SIZE_ARB"/>
+            <enum name="BUFFER_USAGE_ARB"/>
+            <enum name="BUFFER_ACCESS_ARB"/>
+            <enum name="BUFFER_MAPPED_ARB"/>
+        </group>
+
+        <group name="ClampColorModeARB">
+            <enum name="FIXED_ONLY_ARB"/>
+            <enum name="FALSE"/>
+            <enum name="TRUE"/>
+        </group>
+
+        <group name="ClampColorTargetARB">
+            <enum name="CLAMP_VERTEX_COLOR_ARB"/>
+            <enum name="CLAMP_FRAGMENT_COLOR_ARB"/>
+            <enum name="CLAMP_READ_COLOR_ARB"/>
+        </group>
+
+        <group name="ProgramTargetARB">
+            <enum name="TEXT_FRAGMENT_SHADER_ATI"/>
+        </group>
+
+        <group name="VertexArrayPNameAPPLE">
+            <enum name="STORAGE_CLIENT_APPLE"/>
+            <enum name="STORAGE_CACHED_APPLE"/>
+            <enum name="STORAGE_SHARED_APPLE"/>
+        </group>
+
+        <group name="ObjectTypeAPPLE">
+            <enum name="DRAW_PIXELS_APPLE"/>
+            <enum name="FENCE_APPLE"/>
+        </group>
+
+        <group name="PreserveModeATI">
+            <enum name="PRESERVE_ATI"/>
+            <enum name="DISCARD_ATI"/>
+        </group>
+
+        <group name="TexBumpParameterATI">
+            <enum name="BUMP_ROT_MATRIX_ATI"/>
+        </group>
+
+        <group name="SwizzleOpATI">
+            <enum name="SWIZZLE_STR_ATI"/>
+            <enum name="SWIZZLE_STQ_ATI"/>
+            <enum name="SWIZZLE_STR_DR_ATI"/>
+            <enum name="SWIZZLE_STQ_DQ_ATI"/>
+        </group>
+
+        <group name="PNTrianglesPNameATI">
+            <enum name="PN_TRIANGLES_POINT_MODE_ATI"/>
+            <enum name="PN_TRIANGLES_NORMAL_MODE_ATI"/>
+            <enum name="PN_TRIANGLES_TESSELATION_LEVEL_ATI"/>
+        </group>
+
+        <group name="ArrayObjectUsageATI">
+            <enum name="STATIC_ATI"/>
+            <enum name="DYNAMIC_ATI"/>
+        </group>
+
+        <group name="GetTexBumpParameterATI">
+            <enum name="BUMP_ROT_MATRIX_ATI"/>
+            <enum name="BUMP_ROT_MATRIX_SIZE_ATI"/>
+            <enum name="BUMP_NUM_TEX_UNITS_ATI"/>
+            <enum name="BUMP_TEX_UNITS_ATI"/>
+        </group>
+
+        <group name="ArrayObjectPNameATI">
+            <enum name="OBJECT_BUFFER_SIZE_ATI"/>
+            <enum name="OBJECT_BUFFER_USAGE_ATI"/>
+        </group>
+
+        <group name="DrawBufferModeATI">
+            <enum name="COLOR_ATTACHMENT0_NV"/>
+            <enum name="COLOR_ATTACHMENT1_NV"/>
+            <enum name="COLOR_ATTACHMENT2_NV"/>
+            <enum name="COLOR_ATTACHMENT3_NV"/>
+            <enum name="COLOR_ATTACHMENT4_NV"/>
+            <enum name="COLOR_ATTACHMENT5_NV"/>
+            <enum name="COLOR_ATTACHMENT6_NV"/>
+            <enum name="COLOR_ATTACHMENT7_NV"/>
+            <enum name="COLOR_ATTACHMENT8_NV"/>
+            <enum name="COLOR_ATTACHMENT9_NV"/>
+            <enum name="COLOR_ATTACHMENT10_NV"/>
+            <enum name="COLOR_ATTACHMENT11_NV"/>
+            <enum name="COLOR_ATTACHMENT12_NV"/>
+            <enum name="COLOR_ATTACHMENT13_NV"/>
+            <enum name="COLOR_ATTACHMENT14_NV"/>
+            <enum name="COLOR_ATTACHMENT15_NV"/>
+        </group>
+
+        <group name="VertexStreamATI">
+            <enum name="VERTEX_STREAM0_ATI"/>
+            <enum name="VERTEX_STREAM1_ATI"/>
+            <enum name="VERTEX_STREAM2_ATI"/>
+            <enum name="VERTEX_STREAM3_ATI"/>
+            <enum name="VERTEX_STREAM4_ATI"/>
+            <enum name="VERTEX_STREAM5_ATI"/>
+            <enum name="VERTEX_STREAM6_ATI"/>
+            <enum name="VERTEX_STREAM7_ATI"/>
+        </group>
+
+        <group name="SpriteParameterNameSGIX">
+            <enum name="SPRITE_MODE_SGIX"/>
+        </group>
+
+        <group name="PixelTexGenModeSGIX">
+            <enum name="PIXEL_TEX_GEN_Q_CEILING_SGIX"/>
+            <enum name="PIXEL_TEX_GEN_Q_FLOOR_SGIX"/>
+            <enum name="PIXEL_TEX_GEN_Q_ROUND_SGIX"/>
+            <enum name="PIXEL_TEX_GEN_ALPHA_LS_SGIX"/>
+            <enum name="PIXEL_TEX_GEN_ALPHA_MS_SGIX"/>
+        </group>
+
+        <group name="IglooFunctionSelectSGIX">
+            <enum name="IGLOO_FULLSCREEN_SGIX"/>
+            <enum name="IGLOO_VIEWPORT_OFFSET_SGIX"/>
+            <enum name="IGLOO_SWAPTMESH_SGIX"/>
+            <enum name="IGLOO_COLORNORMAL_SGIX"/>
+            <enum name="IGLOO_IRISGL_MODE_SGIX"/>
+            <enum name="IGLOO_LMC_COLOR_SGIX"/>
+            <enum name="IGLOO_TMESHMODE_SGIX"/>
+        </group>
+
+        <group name="HintTargetPGI">
+            <enum name="VERTEX_DATA_HINT_PGI"/>
+            <enum name="VERTEX_CONSISTENT_HINT_PGI"/>
+            <enum name="MATERIAL_SIDE_HINT_PGI"/>
+            <enum name="MAX_VERTEX_HINT_PGI"/>
+        </group>
+
+        <group name="ImageTransformPNameHP">
+            <enum name="IMAGE_SCALE_X_HP"/>
+            <enum name="IMAGE_SCALE_Y_HP"/>
+            <enum name="IMAGE_TRANSLATE_X_HP"/>
+            <enum name="IMAGE_TRANSLATE_Y_HP"/>
+            <enum name="IMAGE_ROTATE_ANGLE_HP"/>
+            <enum name="IMAGE_ROTATE_ORIGIN_X_HP"/>
+            <enum name="IMAGE_ROTATE_ORIGIN_Y_HP"/>
+            <enum name="IMAGE_MAG_FILTER_HP"/>
+            <enum name="IMAGE_MIN_FILTER_HP"/>
+            <enum name="IMAGE_CUBIC_WEIGHT_HP"/>
+        </group>
+
+        <group name="ImageTransformTargetHP">
+            <enum name="IMAGE_TRANSFORM_2D_HP"/>
+        </group>
+
+        <group name="TextureFilterSGIS">
+            <enum name="FILTER4_SGIS"/>
+        </group>
+
+        <group name="OcclusionQueryParameterNameNV">
+            <enum name="PIXEL_COUNT_NV"/>
+            <enum name="PIXEL_COUNT_AVAILABLE_NV"/>
+        </group>
+
+        <group name="GetMultisamplePNameNV">
+            <enum name="SAMPLE_LOCATION_ARB"/>
+            <enum name="PROGRAMMABLE_SAMPLE_LOCATION_ARB"/>
+        </group>
+
+        <group name="MapParameterNV">
+            <enum name="MAP_TESSELLATION_NV"/>
+        </group>
+
+        <group name="MapAttribParameterNV">
+            <enum name="MAP_ATTRIB_U_ORDER_NV"/>
+            <enum name="MAP_ATTRIB_V_ORDER_NV"/>
+        </group>
+
+        <group name="FenceParameterNameNV">
+            <enum name="FENCE_STATUS_NV"/>
+            <enum name="FENCE_CONDITION_NV"/>
+        </group>
+
+        <group name="CombinerParameterNV">
+            <enum name="COMBINER_INPUT_NV"/>
+            <enum name="COMBINER_MAPPING_NV"/>
+            <enum name="COMBINER_COMPONENT_USAGE_NV"/>
+        </group>
+
+        <group name="CombinerBiasNV">
+            <enum name="NONE"/>
+            <enum name="BIAS_BY_NEGATIVE_ONE_HALF_NV"/>
+        </group>
+
+        <group name="CombinerScaleNV">
+            <enum name="NONE"/>
+            <enum name="SCALE_BY_TWO_NV"/>
+            <enum name="SCALE_BY_FOUR_NV"/>
+            <enum name="SCALE_BY_ONE_HALF_NV"/>
+        </group>
+
+        <group name="CombinerMappingNV">
+            <enum name="UNSIGNED_IDENTITY_NV"/>
+            <enum name="UNSIGNED_INVERT_NV"/>
+            <enum name="EXPAND_NORMAL_NV"/>
+            <enum name="EXPAND_NEGATE_NV"/>
+            <enum name="HALF_BIAS_NORMAL_NV"/>
+            <enum name="HALF_BIAS_NEGATE_NV"/>
+            <enum name="SIGNED_IDENTITY_NV"/>
+            <enum name="SIGNED_NEGATE_NV"/>
+        </group>
+
+        <group name="CombinerRegisterNV">
+            <enum name="DISCARD_NV"/>
+            <enum name="PRIMARY_COLOR_NV"/>
+            <enum name="SECONDARY_COLOR_NV"/>
+            <enum name="SPARE0_NV"/>
+            <enum name="SPARE1_NV"/>
+            <enum name="TEXTURE0_ARB"/>
+            <enum name="TEXTURE1_ARB"/>
+        </group>
+
+        <group name="CombinerVariableNV">
+            <enum name="VARIABLE_A_NV"/>
+            <enum name="VARIABLE_B_NV"/>
+            <enum name="VARIABLE_C_NV"/>
+            <enum name="VARIABLE_D_NV"/>
+            <enum name="VARIABLE_E_NV"/>
+            <enum name="VARIABLE_F_NV"/>
+            <enum name="VARIABLE_G_NV"/>
+        </group>
+
+        <group name="PixelDataRangeTargetNV">
+            <enum name="WRITE_PIXEL_DATA_RANGE_NV"/>
+            <enum name="READ_PIXEL_DATA_RANGE_NV"/>
+        </group>
+
+        <group name="EvalTargetNV">
+            <enum name="EVAL_2D_NV"/>
+            <enum name="EVAL_TRIANGULAR_2D_NV"/>
+        </group>
+
+        <group name="VertexAttribEnumNV">
+            <enum name="PROGRAM_PARAMETER_NV"/>
+        </group>
+
+        <group name="FenceConditionNV">
+            <enum name="ALL_COMPLETED_NV"/>
+        </group>
+
+        <group name="PathCoordType">
+            <enum name="CLOSE_PATH_NV"/>
+            <enum name="MOVE_TO_NV"/>
+            <enum name="RELATIVE_MOVE_TO_NV"/>
+            <enum name="LINE_TO_NV"/>
+            <enum name="RELATIVE_LINE_TO_NV"/>
+            <enum name="HORIZONTAL_LINE_TO_NV"/>
+            <enum name="RELATIVE_HORIZONTAL_LINE_TO_NV"/>
+            <enum name="VERTICAL_LINE_TO_NV"/>
+            <enum name="RELATIVE_VERTICAL_LINE_TO_NV"/>
+            <enum name="QUADRATIC_CURVE_TO_NV"/>
+            <enum name="RELATIVE_QUADRATIC_CURVE_TO_NV"/>
+            <enum name="CUBIC_CURVE_TO_NV"/>
+            <enum name="RELATIVE_CUBIC_CURVE_TO_NV"/>
+            <enum name="SMOOTH_QUADRATIC_CURVE_TO_NV"/>
+            <enum name="RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV"/>
+            <enum name="SMOOTH_CUBIC_CURVE_TO_NV"/>
+            <enum name="RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV"/>
+            <enum name="SMALL_CCW_ARC_TO_NV"/>
+            <enum name="RELATIVE_SMALL_CCW_ARC_TO_NV"/>
+            <enum name="SMALL_CW_ARC_TO_NV"/>
+            <enum name="RELATIVE_SMALL_CW_ARC_TO_NV"/>
+            <enum name="LARGE_CCW_ARC_TO_NV"/>
+            <enum name="RELATIVE_LARGE_CCW_ARC_TO_NV"/>
+            <enum name="LARGE_CW_ARC_TO_NV"/>
+            <enum name="RELATIVE_LARGE_CW_ARC_TO_NV"/>
+            <enum name="CONIC_CURVE_TO_NV"/>
+            <enum name="RELATIVE_CONIC_CURVE_TO_NV"/>
+            <enum name="ROUNDED_RECT_NV"/>
+            <enum name="RELATIVE_ROUNDED_RECT_NV"/>
+            <enum name="ROUNDED_RECT2_NV"/>
+            <enum name="RELATIVE_ROUNDED_RECT2_NV"/>
+            <enum name="ROUNDED_RECT4_NV"/>
+            <enum name="RELATIVE_ROUNDED_RECT4_NV"/>
+            <enum name="ROUNDED_RECT8_NV"/>
+            <enum name="RELATIVE_ROUNDED_RECT8_NV"/>
+            <enum name="RESTART_PATH_NV"/>
+            <enum name="DUP_FIRST_CUBIC_CURVE_TO_NV"/>
+            <enum name="DUP_LAST_CUBIC_CURVE_TO_NV"/>
+            <enum name="RECT_NV"/>
+            <enum name="RELATIVE_RECT_NV"/>
+            <enum name="CIRCULAR_CCW_ARC_TO_NV"/>
+            <enum name="CIRCULAR_CW_ARC_TO_NV"/>
+            <enum name="CIRCULAR_TANGENT_ARC_TO_NV"/>
+            <enum name="ARC_TO_NV"/>
+            <enum name="RELATIVE_ARC_TO_NV"/>
+        </group>
+
+        <group name="AccumOp">
+            <enum name="GL_ACCUM"/>
+            <enum name="GL_LOAD"/>
+            <enum name="GL_RETURN"/>
+            <enum name="GL_MULT"/>
+            <enum name="GL_ADD"/>
+        </group>
+
+        <group name="AttribMask">
+            <enum name="GL_ACCUM_BUFFER_BIT"/>
+            <enum name="GL_ALL_ATTRIB_BITS"/>
+            <enum name="GL_COLOR_BUFFER_BIT"/>
+            <enum name="GL_CURRENT_BIT"/>
+            <enum name="GL_DEPTH_BUFFER_BIT"/>
+            <enum name="GL_ENABLE_BIT"/>
+            <enum name="GL_EVAL_BIT"/>
+            <enum name="GL_FOG_BIT"/>
+            <enum name="GL_HINT_BIT"/>
+            <enum name="GL_LIGHTING_BIT"/>
+            <enum name="GL_LINE_BIT"/>
+            <enum name="GL_LIST_BIT"/>
+            <enum name="GL_MULTISAMPLE_BIT"/>
+            <enum name="GL_MULTISAMPLE_BIT_3DFX"/>
+            <enum name="GL_MULTISAMPLE_BIT_ARB"/>
+            <enum name="GL_MULTISAMPLE_BIT_EXT"/>
+            <enum name="GL_PIXEL_MODE_BIT"/>
+            <enum name="GL_POINT_BIT"/>
+            <enum name="GL_POLYGON_BIT"/>
+            <enum name="GL_POLYGON_STIPPLE_BIT"/>
+            <enum name="GL_SCISSOR_BIT"/>
+            <enum name="GL_STENCIL_BUFFER_BIT"/>
+            <enum name="GL_TEXTURE_BIT"/>
+            <enum name="GL_TRANSFORM_BIT"/>
+            <enum name="GL_VIEWPORT_BIT"/>
+        </group>
+
+        <group name="AlphaFunction">
+            <enum name="GL_ALWAYS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_GEQUAL"/>
+            <enum name="GL_GREATER"/>
+            <enum name="GL_LEQUAL"/>
+            <enum name="GL_LESS"/>
+            <enum name="GL_NEVER"/>
+            <enum name="GL_NOTEQUAL"/>
+        </group>
+
+        <group name="BlendEquationModeEXT">
+            <enum name="GL_ALPHA_MAX_SGIX"/>
+            <enum name="GL_ALPHA_MIN_SGIX"/>
+            <enum name="GL_FUNC_ADD"/>
+            <enum name="GL_FUNC_ADD_EXT"/>
+            <enum name="GL_FUNC_REVERSE_SUBTRACT"/>
+            <enum name="GL_FUNC_REVERSE_SUBTRACT_EXT"/>
+            <enum name="GL_FUNC_SUBTRACT"/>
+            <enum name="GL_FUNC_SUBTRACT_EXT"/>
+            <enum name="GL_MAX"/>
+            <enum name="GL_MAX_EXT"/>
+            <enum name="GL_MIN"/>
+            <enum name="GL_MIN_EXT"/>
+        </group>
+
+        <group name="Boolean">
+            <enum name="GL_FALSE"/>
+            <enum name="GL_TRUE"/>
+        </group>
+
+        <group name="BufferBitQCOM">
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT7_QCOM"/>
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT6_QCOM"/>
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT5_QCOM"/>
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT4_QCOM"/>
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT3_QCOM"/>
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT2_QCOM"/>
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT1_QCOM"/>
+            <enum name="GL_MULTISAMPLE_BUFFER_BIT0_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT7_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT6_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT5_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT4_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT3_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT2_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT1_QCOM"/>
+            <enum name="GL_STENCIL_BUFFER_BIT0_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT7_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT6_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT5_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT4_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT3_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT2_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT1_QCOM"/>
+            <enum name="GL_DEPTH_BUFFER_BIT0_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT7_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT6_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT5_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT4_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT3_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT2_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT1_QCOM"/>
+            <enum name="GL_COLOR_BUFFER_BIT0_QCOM"/>
+        </group>
+
+        <group name="BufferTargetARB">
+          <enum name="GL_ARRAY_BUFFER"/>
+          <enum name="GL_ATOMIC_COUNTER_BUFFER" />
+          <enum name="GL_COPY_READ_BUFFER" />
+          <enum name="GL_COPY_WRITE_BUFFER" />
+          <enum name="GL_DISPATCH_INDIRECT_BUFFER" />
+          <enum name="GL_DRAW_INDIRECT_BUFFER" />
+          <enum name="GL_ELEMENT_ARRAY_BUFFER" />
+          <enum name="GL_PIXEL_PACK_BUFFER" />
+          <enum name="GL_PIXEL_UNPACK_BUFFER" />
+          <enum name="GL_QUERY_BUFFER" />
+          <enum name="GL_SHADER_STORAGE_BUFFER" />
+          <enum name="GL_TEXTURE_BUFFER" />
+          <enum name="GL_TRANSFORM_FEEDBACK_BUFFER" />
+          <enum name="GL_UNIFORM_BUFFER" />
+          <enum name="GL_PARAMETER_BUFFER" />
+        </group>
+
+        <group name="BufferUsageARB">
+          <enum name="GL_STREAM_DRAW"/>
+          <enum name="GL_STREAM_READ"/>
+          <enum name="GL_STREAM_COPY"/>
+          <enum name="GL_STATIC_DRAW"/>
+          <enum name="GL_STATIC_READ"/>
+          <enum name="GL_STATIC_COPY"/>
+          <enum name="GL_DYNAMIC_DRAW"/>
+          <enum name="GL_DYNAMIC_READ"/>
+          <enum name="GL_DYNAMIC_COPY"/>
+        </group>
+
+        <group name="BufferAccessARB">
+          <enum name="GL_READ_ONLY"/>
+          <enum name="GL_WRITE_ONLY"/>
+          <enum name="GL_READ_WRITE"/>
+        </group>
+
+        <group name="BufferStorageMask">
+            <enum name="GL_CLIENT_STORAGE_BIT"/>
+            <enum name="GL_CLIENT_STORAGE_BIT_EXT"/>
+            <enum name="GL_DYNAMIC_STORAGE_BIT"/>
+            <enum name="GL_DYNAMIC_STORAGE_BIT_EXT"/>
+            <enum name="GL_MAP_COHERENT_BIT"/>
+            <enum name="GL_MAP_COHERENT_BIT_EXT"/>
+            <enum name="GL_MAP_PERSISTENT_BIT"/>
+            <enum name="GL_MAP_PERSISTENT_BIT_EXT"/>
+            <enum name="GL_MAP_READ_BIT"/>
+            <enum name="GL_MAP_READ_BIT_EXT"/>
+            <enum name="GL_MAP_WRITE_BIT"/>
+            <enum name="GL_MAP_WRITE_BIT_EXT"/>
+            <enum name="GL_SPARSE_STORAGE_BIT_ARB"/>
+            <enum name="GL_LGPU_SEPARATE_STORAGE_BIT_NVX"/>
+            <enum name="GL_PER_GPU_STORAGE_BIT_NV"/>
+        </group>
+
+        <group name="ClearBufferMask">
+            <enum name="GL_ACCUM_BUFFER_BIT"/>
+            <enum name="GL_COLOR_BUFFER_BIT"/>
+            <enum name="GL_COVERAGE_BUFFER_BIT_NV"/>
+            <enum name="GL_DEPTH_BUFFER_BIT"/>
+            <enum name="GL_STENCIL_BUFFER_BIT"/>
+        </group>
+
+        <group name="ClientAttribMask">
+            <enum name="GL_CLIENT_ALL_ATTRIB_BITS"/>
+            <enum name="GL_CLIENT_PIXEL_STORE_BIT"/>
+            <enum name="GL_CLIENT_VERTEX_ARRAY_BIT"/>
+        </group>
+
+        <group name="ClipPlaneName">
+            <enum name="GL_CLIP_DISTANCE0"/>
+            <enum name="GL_CLIP_DISTANCE1"/>
+            <enum name="GL_CLIP_DISTANCE2"/>
+            <enum name="GL_CLIP_DISTANCE3"/>
+            <enum name="GL_CLIP_DISTANCE4"/>
+            <enum name="GL_CLIP_DISTANCE5"/>
+            <enum name="GL_CLIP_DISTANCE6"/>
+            <enum name="GL_CLIP_DISTANCE7"/>
+            <enum name="GL_CLIP_PLANE0"/>
+            <enum name="GL_CLIP_PLANE1"/>
+            <enum name="GL_CLIP_PLANE2"/>
+            <enum name="GL_CLIP_PLANE3"/>
+            <enum name="GL_CLIP_PLANE4"/>
+            <enum name="GL_CLIP_PLANE5"/>
+        </group>
+
+        <group name="ColorMaterialFace">
+            <enum name="GL_BACK"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+        </group>
+
+        <group name="ColorMaterialParameter">
+            <enum name="GL_AMBIENT"/>
+            <enum name="GL_AMBIENT_AND_DIFFUSE"/>
+            <enum name="GL_DIFFUSE"/>
+            <enum name="GL_EMISSION"/>
+            <enum name="GL_SPECULAR"/>
+        </group>
+
+        <group name="ColorPointerType">
+            <enum name="GL_BYTE"/>
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+        </group>
+
+        <group name="ColorTableParameterPNameSGI">
+            <enum name="GL_COLOR_TABLE_BIAS"/>
+            <enum name="GL_COLOR_TABLE_BIAS_SGI"/>
+            <enum name="GL_COLOR_TABLE_SCALE"/>
+            <enum name="GL_COLOR_TABLE_SCALE_SGI"/>
+        </group>
+
+        <group name="ColorTableTargetSGI">
+            <enum name="GL_COLOR_TABLE"/>
+            <enum name="GL_COLOR_TABLE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_COLOR_TABLE"/>
+            <enum name="GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+            <enum name="GL_POST_CONVOLUTION_COLOR_TABLE"/>
+            <enum name="GL_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+            <enum name="GL_PROXY_COLOR_TABLE"/>
+            <enum name="GL_PROXY_COLOR_TABLE_SGI"/>
+            <enum name="GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE"/>
+            <enum name="GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+            <enum name="GL_PROXY_POST_CONVOLUTION_COLOR_TABLE"/>
+            <enum name="GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+            <enum name="GL_PROXY_TEXTURE_COLOR_TABLE_SGI"/>
+            <enum name="GL_TEXTURE_COLOR_TABLE_SGI"/>
+        </group>
+
+        <group name="ContextFlagMask">
+            <enum name="GL_CONTEXT_FLAG_DEBUG_BIT"/>
+            <enum name="GL_CONTEXT_FLAG_DEBUG_BIT_KHR"/>
+            <enum name="GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT"/>
+            <enum name="GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT"/>
+            <enum name="GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB"/>
+            <enum name="GL_CONTEXT_FLAG_PROTECTED_CONTENT_BIT_EXT"/>
+            <enum name="GL_CONTEXT_FLAG_NO_ERROR_BIT"/>
+            <enum name="GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR"/>
+        </group>
+
+        <group name="ContextProfileMask">
+            <enum name="GL_CONTEXT_COMPATIBILITY_PROFILE_BIT"/>
+            <enum name="GL_CONTEXT_CORE_PROFILE_BIT"/>
+        </group>
+
+        <group name="ConvolutionBorderModeEXT">
+            <enum name="GL_REDUCE"/>
+            <enum name="GL_REDUCE_EXT"/>
+        </group>
+
+        <group name="ConvolutionParameterEXT">
+            <enum name="GL_CONVOLUTION_BORDER_MODE"/>
+            <enum name="GL_CONVOLUTION_BORDER_MODE_EXT"/>
+            <enum name="GL_CONVOLUTION_FILTER_BIAS"/>
+            <enum name="GL_CONVOLUTION_FILTER_BIAS_EXT"/>
+            <enum name="GL_CONVOLUTION_FILTER_SCALE"/>
+            <enum name="GL_CONVOLUTION_FILTER_SCALE_EXT"/>
+        </group>
+
+        <group name="ConvolutionTargetEXT">
+            <enum name="GL_CONVOLUTION_1D"/>
+            <enum name="GL_CONVOLUTION_1D_EXT"/>
+            <enum name="GL_CONVOLUTION_2D"/>
+            <enum name="GL_CONVOLUTION_2D_EXT"/>
+        </group>
+
+        <group name="CullFaceMode">
+            <enum name="GL_BACK"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+        </group>
+
+        <group name="DataType" comment="See enums block below"/>
+
+        <group name="DepthFunction">
+            <enum name="GL_ALWAYS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_GEQUAL"/>
+            <enum name="GL_GREATER"/>
+            <enum name="GL_LEQUAL"/>
+            <enum name="GL_LESS"/>
+            <enum name="GL_NEVER"/>
+            <enum name="GL_NOTEQUAL"/>
+        </group>
+
+        <group name="DrawBufferMode">
+            <enum name="GL_AUX0"/>
+            <enum name="GL_AUX1"/>
+            <enum name="GL_AUX2"/>
+            <enum name="GL_AUX3"/>
+            <enum name="GL_BACK"/>
+            <enum name="GL_BACK_LEFT"/>
+            <enum name="GL_BACK_RIGHT"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+            <enum name="GL_FRONT_LEFT"/>
+            <enum name="GL_FRONT_RIGHT"/>
+            <enum name="GL_LEFT"/>
+            <enum name="GL_NONE"/>
+            <enum name="GL_NONE_OES"/>
+            <enum name="GL_RIGHT"/>
+        </group>
+
+        <group name="DrawElementsType">
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_UNSIGNED_INT"/>
+        </group>
+
+        <group name="EnableCap">
+            <enum name="GL_ALPHA_TEST"/>
+            <enum name="GL_ASYNC_DRAW_PIXELS_SGIX"/>
+            <enum name="GL_ASYNC_HISTOGRAM_SGIX"/>
+            <enum name="GL_ASYNC_READ_PIXELS_SGIX"/>
+            <enum name="GL_ASYNC_TEX_IMAGE_SGIX"/>
+            <enum name="GL_AUTO_NORMAL"/>
+            <enum name="GL_BLEND"/>
+            <enum name="GL_CALLIGRAPHIC_FRAGMENT_SGIX"/>
+            <enum name="GL_CLIP_DISTANCE"/>
+            <enum name="GL_CLIP_PLANE0"/>
+            <enum name="GL_CLIP_PLANE1"/>
+            <enum name="GL_CLIP_PLANE2"/>
+            <enum name="GL_CLIP_PLANE3"/>
+            <enum name="GL_CLIP_PLANE4"/>
+            <enum name="GL_CLIP_PLANE5"/>
+            <enum name="GL_COLOR_ARRAY"/>
+            <enum name="GL_COLOR_LOGIC_OP"/>
+            <enum name="GL_COLOR_MATERIAL"/>
+            <enum name="GL_COLOR_TABLE_SGI"/>
+            <enum name="GL_CONVOLUTION_1D_EXT"/>
+            <enum name="GL_CONVOLUTION_2D_EXT"/>
+            <enum name="GL_CULL_FACE"/>
+            <enum name="GL_DEBUG_OUTPUT"/>
+            <enum name="GL_DEBUG_OUTPUT_SYNCHRONOUS"/>
+            <enum name="GL_DEPTH_CLAMP"/>
+            <enum name="GL_DEPTH_TEST"/>
+            <enum name="GL_DITHER"/>
+            <enum name="GL_EDGE_FLAG_ARRAY"/>
+            <enum name="GL_FOG"/>
+            <enum name="GL_FOG_OFFSET_SGIX"/>
+            <enum name="GL_FRAGMENT_COLOR_MATERIAL_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT0_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT1_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT2_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT3_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT4_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT5_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT6_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT7_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHTING_SGIX"/>
+            <enum name="GL_FRAMEBUFFER_SRGB"/>
+            <enum name="GL_FRAMEZOOM_SGIX"/>
+            <enum name="GL_HISTOGRAM_EXT"/>
+            <enum name="GL_INDEX_ARRAY"/>
+            <enum name="GL_INDEX_LOGIC_OP"/>
+            <enum name="GL_INTERLACE_SGIX"/>
+            <enum name="GL_IR_INSTRUMENT1_SGIX"/>
+            <enum name="GL_LIGHT0"/>
+            <enum name="GL_LIGHT1"/>
+            <enum name="GL_LIGHT2"/>
+            <enum name="GL_LIGHT3"/>
+            <enum name="GL_LIGHT4"/>
+            <enum name="GL_LIGHT5"/>
+            <enum name="GL_LIGHT6"/>
+            <enum name="GL_LIGHT7"/>
+            <enum name="GL_LIGHTING"/>
+            <enum name="GL_LINE_SMOOTH"/>
+            <enum name="GL_LINE_STIPPLE"/>
+            <enum name="GL_MAP1_COLOR_4"/>
+            <enum name="GL_MAP1_INDEX"/>
+            <enum name="GL_MAP1_NORMAL"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP1_VERTEX_3"/>
+            <enum name="GL_MAP1_VERTEX_4"/>
+            <enum name="GL_MAP2_COLOR_4"/>
+            <enum name="GL_MAP2_INDEX"/>
+            <enum name="GL_MAP2_NORMAL"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP2_VERTEX_3"/>
+            <enum name="GL_MAP2_VERTEX_4"/>
+            <enum name="GL_MINMAX_EXT"/>
+            <enum name="GL_MULTISAMPLE"/>
+            <enum name="GL_MULTISAMPLE_SGIS"/>
+            <enum name="GL_NORMALIZE"/>
+            <enum name="GL_NORMAL_ARRAY"/>
+            <enum name="GL_PIXEL_TEXTURE_SGIS"/>
+            <enum name="GL_PIXEL_TEX_GEN_SGIX"/>
+            <enum name="GL_POINT_SMOOTH"/>
+            <enum name="GL_POLYGON_OFFSET_FILL"/>
+            <enum name="GL_POLYGON_OFFSET_LINE"/>
+            <enum name="GL_POLYGON_OFFSET_POINT"/>
+            <enum name="GL_POLYGON_SMOOTH"/>
+            <enum name="GL_POLYGON_STIPPLE"/>
+            <enum name="GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+            <enum name="GL_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+            <enum name="GL_PRIMITIVE_RESTART"/>
+            <enum name="GL_PRIMITIVE_RESTART_FIXED_INDEX"/>
+            <enum name="GL_PROGRAM_POINT_SIZE"/>
+            <enum name="GL_RASTERIZER_DISCARD"/>
+            <enum name="GL_REFERENCE_PLANE_SGIX"/>
+            <enum name="GL_RESCALE_NORMAL_EXT"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_COVERAGE"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_MASK_SGIS"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_ONE"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_ONE_SGIS"/>
+            <enum name="GL_SAMPLE_COVERAGE"/>
+            <enum name="GL_SAMPLE_MASK"/>
+            <enum name="GL_SAMPLE_MASK_SGIS"/>
+            <enum name="GL_SAMPLE_SHADING"/>
+            <enum name="GL_SCISSOR_TEST"/>
+            <enum name="GL_SEPARABLE_2D_EXT"/>
+            <enum name="GL_SHARED_TEXTURE_PALETTE_EXT"/>
+            <enum name="GL_SPRITE_SGIX"/>
+            <enum name="GL_STENCIL_TEST"/>
+            <enum name="GL_TEXTURE_1D"/>
+            <enum name="GL_TEXTURE_2D"/>
+            <enum name="GL_TEXTURE_3D_EXT"/>
+            <enum name="GL_TEXTURE_4D_SGIS"/>
+            <enum name="GL_TEXTURE_COLOR_TABLE_SGI"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_SEAMLESS"/>
+            <enum name="GL_TEXTURE_GEN_Q"/>
+            <enum name="GL_TEXTURE_GEN_R"/>
+            <enum name="GL_TEXTURE_GEN_S"/>
+            <enum name="GL_TEXTURE_GEN_T"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+        </group>
+
+        <group name="ErrorCode">
+            <enum name="GL_INVALID_ENUM"/>
+            <enum name="GL_INVALID_FRAMEBUFFER_OPERATION"/>
+            <enum name="GL_INVALID_FRAMEBUFFER_OPERATION_EXT"/>
+            <enum name="GL_INVALID_FRAMEBUFFER_OPERATION_OES"/>
+            <enum name="GL_INVALID_OPERATION"/>
+            <enum name="GL_INVALID_VALUE"/>
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_OUT_OF_MEMORY"/>
+            <enum name="GL_STACK_OVERFLOW"/>
+            <enum name="GL_STACK_UNDERFLOW"/>
+            <enum name="GL_TABLE_TOO_LARGE"/>
+            <enum name="GL_TABLE_TOO_LARGE_EXT"/>
+            <enum name="GL_TEXTURE_TOO_LARGE_EXT"/>
+        </group>
+
+        <group name="ExternalHandleType">
+            <enum name="GL_HANDLE_TYPE_OPAQUE_FD_EXT"/>
+            <enum name="GL_HANDLE_TYPE_OPAQUE_WIN32_EXT"/>
+            <enum name="GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT"/>
+            <enum name="GL_HANDLE_TYPE_D3D12_TILEPOOL_EXT"/>
+            <enum name="GL_HANDLE_TYPE_D3D12_RESOURCE_EXT"/>
+            <enum name="GL_HANDLE_TYPE_D3D11_IMAGE_EXT"/>
+            <enum name="GL_HANDLE_TYPE_D3D11_IMAGE_KMT_EXT"/>
+            <enum name="GL_HANDLE_TYPE_D3D12_FENCE_EXT"/>
+        </group>
+
+        <group name="FeedbackType">
+            <enum name="GL_2D"/>
+            <enum name="GL_3D"/>
+            <enum name="GL_3D_COLOR"/>
+            <enum name="GL_3D_COLOR_TEXTURE"/>
+            <enum name="GL_4D_COLOR_TEXTURE"/>
+        </group>
+
+        <group name="FeedBackToken">
+            <enum name="GL_BITMAP_TOKEN"/>
+            <enum name="GL_COPY_PIXEL_TOKEN"/>
+            <enum name="GL_DRAW_PIXEL_TOKEN"/>
+            <enum name="GL_LINE_RESET_TOKEN"/>
+            <enum name="GL_LINE_TOKEN"/>
+            <enum name="GL_PASS_THROUGH_TOKEN"/>
+            <enum name="GL_POINT_TOKEN"/>
+            <enum name="GL_POLYGON_TOKEN"/>
+        </group>
+
+        <group name="FfdMaskSGIX" comment="See enums section below. Was SGIXFfdMask"/>
+
+        <group name="FfdTargetSGIX">
+            <enum name="GL_GEOMETRY_DEFORMATION_SGIX"/>
+            <enum name="GL_TEXTURE_DEFORMATION_SGIX"/>
+        </group>
+
+        <group name="FogCoordinatePointerType">
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_DOUBLE"/>
+        </group>
+
+        <group name="FogMode">
+            <enum name="GL_EXP"/>
+            <enum name="GL_EXP2"/>
+            <enum name="GL_FOG_FUNC_SGIS"/>
+            <enum name="GL_LINEAR"/>
+        </group>
+
+        <group name="FogParameter">
+            <enum name="GL_FOG_COLOR"/>
+            <enum name="GL_FOG_DENSITY"/>
+            <enum name="GL_FOG_END"/>
+            <enum name="GL_FOG_INDEX"/>
+            <enum name="GL_FOG_MODE"/>
+            <enum name="GL_FOG_OFFSET_VALUE_SGIX"/>
+            <enum name="GL_FOG_START"/>
+        </group>
+
+        <group name="FogPointerTypeEXT">
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_DOUBLE"/>
+        </group>
+
+        <group name="FogPointerTypeIBM">
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_DOUBLE"/>
+        </group>
+
+        <group name="FragmentLightModelParameterSGIX">
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX"/>
+        </group>
+
+        <group name="FramebufferFetchNoncoherent">
+            <enum name="GL_FRAMEBUFFER_FETCH_NONCOHERENT_QCOM"/>
+        </group>
+
+        <group name="FrontFaceDirection">
+            <enum name="GL_CCW"/>
+            <enum name="GL_CW"/>
+        </group>
+
+        <group name="GetColorTableParameterPNameSGI">
+            <enum name="GL_COLOR_TABLE_ALPHA_SIZE_SGI"/>
+            <enum name="GL_COLOR_TABLE_BIAS_SGI"/>
+            <enum name="GL_COLOR_TABLE_BLUE_SIZE_SGI"/>
+            <enum name="GL_COLOR_TABLE_FORMAT_SGI"/>
+            <enum name="GL_COLOR_TABLE_GREEN_SIZE_SGI"/>
+            <enum name="GL_COLOR_TABLE_INTENSITY_SIZE_SGI"/>
+            <enum name="GL_COLOR_TABLE_LUMINANCE_SIZE_SGI"/>
+            <enum name="GL_COLOR_TABLE_RED_SIZE_SGI"/>
+            <enum name="GL_COLOR_TABLE_SCALE_SGI"/>
+            <enum name="GL_COLOR_TABLE_WIDTH_SGI"/>
+            <enum name="GL_COLOR_TABLE_BIAS"/>
+            <enum name="GL_COLOR_TABLE_SCALE"/>
+            <enum name="GL_COLOR_TABLE_FORMAT"/>
+            <enum name="GL_COLOR_TABLE_WIDTH"/>
+            <enum name="GL_COLOR_TABLE_RED_SIZE"/>
+            <enum name="GL_COLOR_TABLE_GREEN_SIZE"/>
+            <enum name="GL_COLOR_TABLE_BLUE_SIZE"/>
+            <enum name="GL_COLOR_TABLE_ALPHA_SIZE"/>
+            <enum name="GL_COLOR_TABLE_LUMINANCE_SIZE"/>
+            <enum name="GL_COLOR_TABLE_INTENSITY_SIZE"/>
+        </group>
+
+        <group name="GetConvolutionParameter">
+            <enum name="GL_CONVOLUTION_BORDER_MODE_EXT"/>
+            <enum name="GL_CONVOLUTION_FILTER_BIAS_EXT"/>
+            <enum name="GL_CONVOLUTION_FILTER_SCALE_EXT"/>
+            <enum name="GL_CONVOLUTION_FORMAT_EXT"/>
+            <enum name="GL_CONVOLUTION_HEIGHT_EXT"/>
+            <enum name="GL_CONVOLUTION_WIDTH_EXT"/>
+            <enum name="GL_MAX_CONVOLUTION_HEIGHT_EXT"/>
+            <enum name="GL_MAX_CONVOLUTION_WIDTH_EXT"/>
+            <enum name="GL_CONVOLUTION_BORDER_MODE"/>
+            <enum name="GL_CONVOLUTION_BORDER_COLOR"/>
+            <enum name="GL_CONVOLUTION_FILTER_SCALE"/>
+            <enum name="GL_CONVOLUTION_FILTER_BIAS"/>
+            <enum name="GL_CONVOLUTION_FORMAT"/>
+            <enum name="GL_CONVOLUTION_WIDTH"/>
+            <enum name="GL_CONVOLUTION_HEIGHT"/>
+            <enum name="GL_MAX_CONVOLUTION_WIDTH"/>
+            <enum name="GL_MAX_CONVOLUTION_HEIGHT"/>
+        </group>
+
+        <group name="GetHistogramParameterPNameEXT">
+            <enum name="GL_HISTOGRAM_ALPHA_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_BLUE_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_FORMAT_EXT"/>
+            <enum name="GL_HISTOGRAM_GREEN_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_LUMINANCE_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_RED_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_SINK_EXT"/>
+            <enum name="GL_HISTOGRAM_WIDTH_EXT"/>
+            <enum name="GL_HISTOGRAM_WIDTH"/>
+            <enum name="GL_HISTOGRAM_FORMAT"/>
+            <enum name="GL_HISTOGRAM_RED_SIZE"/>
+            <enum name="GL_HISTOGRAM_GREEN_SIZE"/>
+            <enum name="GL_HISTOGRAM_BLUE_SIZE"/>
+            <enum name="GL_HISTOGRAM_ALPHA_SIZE"/>
+            <enum name="GL_HISTOGRAM_LUMINANCE_SIZE"/>
+            <enum name="GL_HISTOGRAM_SINK"/>
+            <enum name="GL_HISTOGRAM_ALPHA_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_BLUE_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_FORMAT_EXT"/>
+            <enum name="GL_HISTOGRAM_GREEN_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_LUMINANCE_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_RED_SIZE_EXT"/>
+            <enum name="GL_HISTOGRAM_SINK_EXT"/>
+            <enum name="GL_HISTOGRAM_WIDTH_EXT"/>
+        </group>
+
+        <group name="GetMapQuery">
+            <enum name="GL_COEFF"/>
+            <enum name="GL_DOMAIN"/>
+            <enum name="GL_ORDER"/>
+        </group>
+
+        <group name="GetMinmaxParameterPNameEXT">
+            <enum name="GL_MINMAX_FORMAT"/>
+            <enum name="GL_MINMAX_FORMAT_EXT"/>
+            <enum name="GL_MINMAX_SINK"/>
+            <enum name="GL_MINMAX_SINK_EXT"/>
+            <enum name="GL_MINMAX_FORMAT"/>
+            <enum name="GL_MINMAX_SINK"/>
+        </group>
+
+        <group name="GetPixelMap">
+            <enum name="GL_PIXEL_MAP_A_TO_A"/>
+            <enum name="GL_PIXEL_MAP_B_TO_B"/>
+            <enum name="GL_PIXEL_MAP_G_TO_G"/>
+            <enum name="GL_PIXEL_MAP_I_TO_A"/>
+            <enum name="GL_PIXEL_MAP_I_TO_B"/>
+            <enum name="GL_PIXEL_MAP_I_TO_G"/>
+            <enum name="GL_PIXEL_MAP_I_TO_I"/>
+            <enum name="GL_PIXEL_MAP_I_TO_R"/>
+            <enum name="GL_PIXEL_MAP_R_TO_R"/>
+            <enum name="GL_PIXEL_MAP_S_TO_S"/>
+        </group>
+
+        <group name="GetPName">
+            <enum name="GL_ACCUM_ALPHA_BITS"/>
+            <enum name="GL_ACCUM_BLUE_BITS"/>
+            <enum name="GL_ACCUM_CLEAR_VALUE"/>
+            <enum name="GL_ACCUM_GREEN_BITS"/>
+            <enum name="GL_ACCUM_RED_BITS"/>
+            <enum name="GL_ACTIVE_TEXTURE"/>
+            <enum name="GL_ALIASED_LINE_WIDTH_RANGE"/>
+            <enum name="GL_ALIASED_POINT_SIZE_RANGE"/>
+            <enum name="GL_ALPHA_BIAS"/>
+            <enum name="GL_ALPHA_BITS"/>
+            <enum name="GL_ALPHA_SCALE"/>
+            <enum name="GL_ALPHA_TEST"/>
+            <enum name="GL_ALPHA_TEST_FUNC"/>
+            <enum name="GL_ALPHA_TEST_FUNC_QCOM"/>
+            <enum name="GL_ALPHA_TEST_QCOM"/>
+            <enum name="GL_ALPHA_TEST_REF"/>
+            <enum name="GL_ALPHA_TEST_REF_QCOM"/>
+            <enum name="GL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_ASYNC_DRAW_PIXELS_SGIX"/>
+            <enum name="GL_ASYNC_HISTOGRAM_SGIX"/>
+            <enum name="GL_ASYNC_MARKER_SGIX"/>
+            <enum name="GL_ASYNC_READ_PIXELS_SGIX"/>
+            <enum name="GL_ASYNC_TEX_IMAGE_SGIX"/>
+            <enum name="GL_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_AUTO_NORMAL"/>
+            <enum name="GL_AUX_BUFFERS"/>
+            <enum name="GL_BLEND"/>
+            <enum name="GL_BLEND_COLOR"/>
+            <enum name="GL_BLEND_COLOR_EXT"/>
+            <enum name="GL_BLEND_DST"/>
+            <enum name="GL_BLEND_DST_ALPHA"/>
+            <enum name="GL_BLEND_DST_RGB"/>
+            <enum name="GL_BLEND_EQUATION_ALPHA"/>
+            <enum name="GL_BLEND_EQUATION_EXT"/>
+            <enum name="GL_BLEND_EQUATION_RGB"/>
+            <enum name="GL_BLEND_SRC"/>
+            <enum name="GL_BLEND_SRC_ALPHA"/>
+            <enum name="GL_BLEND_SRC_RGB"/>
+            <enum name="GL_BLUE_BIAS"/>
+            <enum name="GL_BLUE_BITS"/>
+            <enum name="GL_BLUE_SCALE"/>
+            <enum name="GL_CALLIGRAPHIC_FRAGMENT_SGIX"/>
+            <enum name="GL_CLIENT_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_CLIP_PLANE0"/>
+            <enum name="GL_CLIP_PLANE1"/>
+            <enum name="GL_CLIP_PLANE2"/>
+            <enum name="GL_CLIP_PLANE3"/>
+            <enum name="GL_CLIP_PLANE4"/>
+            <enum name="GL_CLIP_PLANE5"/>
+            <enum name="GL_COLOR_ARRAY"/>
+            <enum name="GL_COLOR_ARRAY_COUNT_EXT"/>
+            <enum name="GL_COLOR_ARRAY_SIZE"/>
+            <enum name="GL_COLOR_ARRAY_STRIDE"/>
+            <enum name="GL_COLOR_ARRAY_TYPE"/>
+            <enum name="GL_COLOR_CLEAR_VALUE"/>
+            <enum name="GL_COLOR_LOGIC_OP"/>
+            <enum name="GL_COLOR_MATERIAL"/>
+            <enum name="GL_COLOR_MATERIAL_FACE"/>
+            <enum name="GL_COLOR_MATERIAL_PARAMETER"/>
+            <enum name="GL_COLOR_MATRIX_SGI"/>
+            <enum name="GL_COLOR_MATRIX_STACK_DEPTH_SGI"/>
+            <enum name="GL_COLOR_TABLE_SGI"/>
+            <enum name="GL_COLOR_WRITEMASK"/>
+            <enum name="GL_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_CONTEXT_FLAGS"/>
+            <enum name="GL_CONVOLUTION_1D_EXT"/>
+            <enum name="GL_CONVOLUTION_2D_EXT"/>
+            <enum name="GL_CONVOLUTION_HINT_SGIX"/>
+            <enum name="GL_CULL_FACE"/>
+            <enum name="GL_CULL_FACE_MODE"/>
+            <enum name="GL_CURRENT_COLOR"/>
+            <enum name="GL_CURRENT_INDEX"/>
+            <enum name="GL_CURRENT_NORMAL"/>
+            <enum name="GL_CURRENT_PROGRAM"/>
+            <enum name="GL_CURRENT_RASTER_COLOR"/>
+            <enum name="GL_CURRENT_RASTER_DISTANCE"/>
+            <enum name="GL_CURRENT_RASTER_INDEX"/>
+            <enum name="GL_CURRENT_RASTER_POSITION"/>
+            <enum name="GL_CURRENT_RASTER_POSITION_VALID"/>
+            <enum name="GL_CURRENT_RASTER_TEXTURE_COORDS"/>
+            <enum name="GL_CURRENT_TEXTURE_COORDS"/>
+            <enum name="GL_DEBUG_GROUP_STACK_DEPTH"/>
+            <enum name="GL_DEFORMATIONS_MASK_SGIX"/>
+            <enum name="GL_DEPTH_BIAS"/>
+            <enum name="GL_DEPTH_BITS"/>
+            <enum name="GL_DEPTH_CLEAR_VALUE"/>
+            <enum name="GL_DEPTH_FUNC"/>
+            <enum name="GL_DEPTH_RANGE"/>
+            <enum name="GL_DEPTH_SCALE"/>
+            <enum name="GL_DEPTH_TEST"/>
+            <enum name="GL_DEPTH_WRITEMASK"/>
+            <enum name="GL_DETAIL_TEXTURE_2D_BINDING_SGIS"/>
+            <enum name="GL_DEVICE_LUID_EXT"/>
+            <enum name="GL_DEVICE_NODE_MASK_EXT"/>
+            <enum name="GL_DEVICE_UUID_EXT"/>
+            <enum name="GL_DISPATCH_INDIRECT_BUFFER_BINDING"/>
+            <enum name="GL_DISTANCE_ATTENUATION_SGIS"/>
+            <enum name="GL_DITHER"/>
+            <enum name="GL_DOUBLEBUFFER"/>
+            <enum name="GL_DRAW_BUFFER"/>
+            <enum name="GL_DRAW_BUFFER_EXT"/>
+            <enum name="GL_DRAW_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_DRIVER_UUID_EXT"/>
+            <enum name="GL_EDGE_FLAG"/>
+            <enum name="GL_EDGE_FLAG_ARRAY"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_COUNT_EXT"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_STRIDE"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_FEEDBACK_BUFFER_SIZE"/>
+            <enum name="GL_FEEDBACK_BUFFER_TYPE"/>
+            <enum name="GL_FOG"/>
+            <enum name="GL_FOG_COLOR"/>
+            <enum name="GL_FOG_DENSITY"/>
+            <enum name="GL_FOG_END"/>
+            <enum name="GL_FOG_FUNC_POINTS_SGIS"/>
+            <enum name="GL_FOG_HINT"/>
+            <enum name="GL_FOG_INDEX"/>
+            <enum name="GL_FOG_MODE"/>
+            <enum name="GL_FOG_OFFSET_SGIX"/>
+            <enum name="GL_FOG_OFFSET_VALUE_SGIX"/>
+            <enum name="GL_FOG_START"/>
+            <enum name="GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX"/>
+            <enum name="GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX"/>
+            <enum name="GL_FRAGMENT_COLOR_MATERIAL_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT0_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHTING_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX"/>
+            <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT"/>
+            <enum name="GL_FRAMEZOOM_FACTOR_SGIX"/>
+            <enum name="GL_FRAMEZOOM_SGIX"/>
+            <enum name="GL_FRONT_FACE"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT_SGIS"/>
+            <enum name="GL_GREEN_BIAS"/>
+            <enum name="GL_GREEN_BITS"/>
+            <enum name="GL_GREEN_SCALE"/>
+            <enum name="GL_HISTOGRAM_EXT"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_FORMAT"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_TYPE"/>
+            <enum name="GL_INDEX_ARRAY"/>
+            <enum name="GL_INDEX_ARRAY_COUNT_EXT"/>
+            <enum name="GL_INDEX_ARRAY_STRIDE"/>
+            <enum name="GL_INDEX_ARRAY_TYPE"/>
+            <enum name="GL_INDEX_BITS"/>
+            <enum name="GL_INDEX_CLEAR_VALUE"/>
+            <enum name="GL_INDEX_LOGIC_OP"/>
+            <enum name="GL_INDEX_MODE"/>
+            <enum name="GL_INDEX_OFFSET"/>
+            <enum name="GL_INDEX_SHIFT"/>
+            <enum name="GL_INDEX_WRITEMASK"/>
+            <enum name="GL_INSTRUMENT_MEASUREMENTS_SGIX"/>
+            <enum name="GL_INTERLACE_SGIX"/>
+            <enum name="GL_IR_INSTRUMENT1_SGIX"/>
+            <enum name="GL_LAYER_PROVOKING_VERTEX"/>
+            <enum name="GL_LIGHT0"/>
+            <enum name="GL_LIGHT1"/>
+            <enum name="GL_LIGHT2"/>
+            <enum name="GL_LIGHT3"/>
+            <enum name="GL_LIGHT4"/>
+            <enum name="GL_LIGHT5"/>
+            <enum name="GL_LIGHT6"/>
+            <enum name="GL_LIGHT7"/>
+            <enum name="GL_LIGHTING"/>
+            <enum name="GL_LIGHT_ENV_MODE_SGIX"/>
+            <enum name="GL_LIGHT_MODEL_AMBIENT"/>
+            <enum name="GL_LIGHT_MODEL_COLOR_CONTROL"/>
+            <enum name="GL_LIGHT_MODEL_LOCAL_VIEWER"/>
+            <enum name="GL_LIGHT_MODEL_TWO_SIDE"/>
+            <enum name="GL_LINE_SMOOTH"/>
+            <enum name="GL_LINE_SMOOTH_HINT"/>
+            <enum name="GL_LINE_STIPPLE"/>
+            <enum name="GL_LINE_STIPPLE_PATTERN"/>
+            <enum name="GL_LINE_STIPPLE_REPEAT"/>
+            <enum name="GL_LINE_WIDTH"/>
+            <enum name="GL_LINE_WIDTH_GRANULARITY"/>
+            <enum name="GL_LINE_WIDTH_RANGE"/>
+            <enum name="GL_LIST_BASE"/>
+            <enum name="GL_LIST_INDEX"/>
+            <enum name="GL_LIST_MODE"/>
+            <enum name="GL_LOGIC_OP"/>
+            <enum name="GL_LOGIC_OP_MODE"/>
+            <enum name="GL_MAJOR_VERSION"/>
+            <enum name="GL_MAP1_COLOR_4"/>
+            <enum name="GL_MAP1_GRID_DOMAIN"/>
+            <enum name="GL_MAP1_GRID_SEGMENTS"/>
+            <enum name="GL_MAP1_INDEX"/>
+            <enum name="GL_MAP1_NORMAL"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP1_VERTEX_3"/>
+            <enum name="GL_MAP1_VERTEX_4"/>
+            <enum name="GL_MAP2_COLOR_4"/>
+            <enum name="GL_MAP2_GRID_DOMAIN"/>
+            <enum name="GL_MAP2_GRID_SEGMENTS"/>
+            <enum name="GL_MAP2_INDEX"/>
+            <enum name="GL_MAP2_NORMAL"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP2_VERTEX_3"/>
+            <enum name="GL_MAP2_VERTEX_4"/>
+            <enum name="GL_MAP_COLOR"/>
+            <enum name="GL_MAP_STENCIL"/>
+            <enum name="GL_MATRIX_MODE"/>
+            <enum name="GL_MAX_3D_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_3D_TEXTURE_SIZE_EXT"/>
+            <enum name="GL_MAX_4D_TEXTURE_SIZE_SGIS"/>
+            <enum name="GL_MAX_ACTIVE_LIGHTS_SGIX"/>
+            <enum name="GL_MAX_ARRAY_TEXTURE_LAYERS"/>
+            <enum name="GL_MAX_ASYNC_DRAW_PIXELS_SGIX"/>
+            <enum name="GL_MAX_ASYNC_HISTOGRAM_SGIX"/>
+            <enum name="GL_MAX_ASYNC_READ_PIXELS_SGIX"/>
+            <enum name="GL_MAX_ASYNC_TEX_IMAGE_SGIX"/>
+            <enum name="GL_MAX_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_MAX_CLIENT_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_MAX_CLIPMAP_DEPTH_SGIX"/>
+            <enum name="GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX"/>
+            <enum name="GL_MAX_CLIP_DISTANCES"/>
+            <enum name="GL_MAX_CLIP_PLANES"/>
+            <enum name="GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI"/>
+            <enum name="GL_MAX_COLOR_TEXTURE_SAMPLES"/>
+            <enum name="GL_MAX_COMBINED_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_COMBINED_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_COMPUTE_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_COMPUTE_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_COUNT"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_SIZE"/>
+            <enum name="GL_MAX_CUBE_MAP_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_DEBUG_GROUP_STACK_DEPTH"/>
+            <enum name="GL_MAX_DEPTH_TEXTURE_SAMPLES"/>
+            <enum name="GL_MAX_DRAW_BUFFERS"/>
+            <enum name="GL_MAX_DUAL_SOURCE_DRAW_BUFFERS"/>
+            <enum name="GL_MAX_ELEMENTS_INDICES"/>
+            <enum name="GL_MAX_ELEMENTS_VERTICES"/>
+            <enum name="GL_MAX_ELEMENT_INDEX"/>
+            <enum name="GL_MAX_EVAL_ORDER"/>
+            <enum name="GL_MAX_FOG_FUNC_POINTS_SGIS"/>
+            <enum name="GL_MAX_FRAGMENT_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_FRAGMENT_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_FRAGMENT_LIGHTS_SGIX"/>
+            <enum name="GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_VECTORS"/>
+            <enum name="GL_MAX_FRAMEBUFFER_HEIGHT"/>
+            <enum name="GL_MAX_FRAMEBUFFER_LAYERS"/>
+            <enum name="GL_MAX_FRAMEBUFFER_SAMPLES"/>
+            <enum name="GL_MAX_FRAMEBUFFER_WIDTH"/>
+            <enum name="GL_MAX_FRAMEZOOM_FACTOR_SGIX"/>
+            <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_GEOMETRY_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_INTEGER_SAMPLES"/>
+            <enum name="GL_MAX_LABEL_LENGTH"/>
+            <enum name="GL_MAX_LIGHTS"/>
+            <enum name="GL_MAX_LIST_NESTING"/>
+            <enum name="GL_MAX_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_MAX_NAME_STACK_DEPTH"/>
+            <enum name="GL_MAX_PIXEL_MAP_TABLE"/>
+            <enum name="GL_MAX_PROGRAM_TEXEL_OFFSET"/>
+            <enum name="GL_MAX_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_MAX_RECTANGLE_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_RENDERBUFFER_SIZE"/>
+            <enum name="GL_MAX_SAMPLE_MASK_WORDS"/>
+            <enum name="GL_MAX_SERVER_WAIT_TIMEOUT"/>
+            <enum name="GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS"/>
+            <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_TEXTURE_BUFFER_SIZE"/>
+            <enum name="GL_MAX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_TEXTURE_LOD_BIAS"/>
+            <enum name="GL_MAX_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_MAX_UNIFORM_BLOCK_SIZE"/>
+            <enum name="GL_MAX_UNIFORM_BUFFER_BINDINGS"/>
+            <enum name="GL_MAX_UNIFORM_LOCATIONS"/>
+            <enum name="GL_MAX_VARYING_COMPONENTS"/>
+            <enum name="GL_MAX_VARYING_FLOATS"/>
+            <enum name="GL_MAX_VARYING_VECTORS"/>
+            <enum name="GL_MAX_VERTEX_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_VERTEX_ATTRIBS"/>
+            <enum name="GL_MAX_VERTEX_ATTRIB_BINDINGS"/>
+            <enum name="GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+            <enum name="GL_MAX_VERTEX_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_VECTORS"/>
+            <enum name="GL_MAX_VIEWPORTS"/>
+            <enum name="GL_MAX_VIEWPORT_DIMS"/>
+            <enum name="GL_MINMAX_EXT"/>
+            <enum name="GL_MINOR_VERSION"/>
+            <enum name="GL_MIN_MAP_BUFFER_ALIGNMENT"/>
+            <enum name="GL_MIN_PROGRAM_TEXEL_OFFSET"/>
+            <enum name="GL_MODELVIEW0_MATRIX_EXT"/>
+            <enum name="GL_MODELVIEW0_STACK_DEPTH_EXT"/>
+            <enum name="GL_MODELVIEW_MATRIX"/>
+            <enum name="GL_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_MULTISAMPLE_SGIS"/>
+            <enum name="GL_NAME_STACK_DEPTH"/>
+            <enum name="GL_NORMALIZE"/>
+            <enum name="GL_NORMAL_ARRAY"/>
+            <enum name="GL_NORMAL_ARRAY_COUNT_EXT"/>
+            <enum name="GL_NORMAL_ARRAY_STRIDE"/>
+            <enum name="GL_NORMAL_ARRAY_TYPE"/>
+            <enum name="GL_NUM_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_NUM_DEVICE_UUIDS_EXT"/>
+            <enum name="GL_NUM_EXTENSIONS"/>
+            <enum name="GL_NUM_PROGRAM_BINARY_FORMATS"/>
+            <enum name="GL_NUM_SHADER_BINARY_FORMATS"/>
+            <enum name="GL_PACK_ALIGNMENT"/>
+            <enum name="GL_PACK_CMYK_HINT_EXT"/>
+            <enum name="GL_PACK_IMAGE_DEPTH_SGIS"/>
+            <enum name="GL_PACK_IMAGE_HEIGHT"/>
+            <enum name="GL_PACK_IMAGE_HEIGHT_EXT"/>
+            <enum name="GL_PACK_LSB_FIRST"/>
+            <enum name="GL_PACK_RESAMPLE_SGIX"/>
+            <enum name="GL_PACK_ROW_LENGTH"/>
+            <enum name="GL_PACK_SKIP_IMAGES"/>
+            <enum name="GL_PACK_SKIP_IMAGES_EXT"/>
+            <enum name="GL_PACK_SKIP_PIXELS"/>
+            <enum name="GL_PACK_SKIP_ROWS"/>
+            <enum name="GL_PACK_SKIP_VOLUMES_SGIS"/>
+            <enum name="GL_PACK_SUBSAMPLE_RATE_SGIX"/>
+            <enum name="GL_PACK_SWAP_BYTES"/>
+            <enum name="GL_PERSPECTIVE_CORRECTION_HINT"/>
+            <enum name="GL_PIXEL_MAP_A_TO_A_SIZE"/>
+            <enum name="GL_PIXEL_MAP_B_TO_B_SIZE"/>
+            <enum name="GL_PIXEL_MAP_G_TO_G_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_A_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_B_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_G_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_I_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_R_SIZE"/>
+            <enum name="GL_PIXEL_MAP_R_TO_R_SIZE"/>
+            <enum name="GL_PIXEL_MAP_S_TO_S_SIZE"/>
+            <enum name="GL_PIXEL_PACK_BUFFER_BINDING"/>
+            <enum name="GL_PIXEL_TEXTURE_SGIS"/>
+            <enum name="GL_PIXEL_TEX_GEN_MODE_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_SGIX"/>
+            <enum name="GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX"/>
+            <enum name="GL_PIXEL_TILE_CACHE_INCREMENT_SGIX"/>
+            <enum name="GL_PIXEL_TILE_CACHE_SIZE_SGIX"/>
+            <enum name="GL_PIXEL_TILE_GRID_DEPTH_SGIX"/>
+            <enum name="GL_PIXEL_TILE_GRID_HEIGHT_SGIX"/>
+            <enum name="GL_PIXEL_TILE_GRID_WIDTH_SGIX"/>
+            <enum name="GL_PIXEL_TILE_HEIGHT_SGIX"/>
+            <enum name="GL_PIXEL_TILE_WIDTH_SGIX"/>
+            <enum name="GL_PIXEL_UNPACK_BUFFER_BINDING"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE_SGIS"/>
+            <enum name="GL_POINT_SIZE"/>
+            <enum name="GL_POINT_SIZE_GRANULARITY"/>
+            <enum name="GL_POINT_SIZE_MAX_SGIS"/>
+            <enum name="GL_POINT_SIZE_MIN_SGIS"/>
+            <enum name="GL_POINT_SIZE_RANGE"/>
+            <enum name="GL_POINT_SMOOTH"/>
+            <enum name="GL_POINT_SMOOTH_HINT"/>
+            <enum name="GL_POLYGON_MODE"/>
+            <enum name="GL_POLYGON_OFFSET_BIAS_EXT"/>
+            <enum name="GL_POLYGON_OFFSET_FACTOR"/>
+            <enum name="GL_POLYGON_OFFSET_FILL"/>
+            <enum name="GL_POLYGON_OFFSET_LINE"/>
+            <enum name="GL_POLYGON_OFFSET_POINT"/>
+            <enum name="GL_POLYGON_OFFSET_UNITS"/>
+            <enum name="GL_POLYGON_SMOOTH"/>
+            <enum name="GL_POLYGON_SMOOTH_HINT"/>
+            <enum name="GL_POLYGON_STIPPLE"/>
+            <enum name="GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_RED_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_RED_SCALE_SGI"/>
+            <enum name="GL_POST_CONVOLUTION_ALPHA_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_ALPHA_SCALE_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_BLUE_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_BLUE_SCALE_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+            <enum name="GL_POST_CONVOLUTION_GREEN_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_GREEN_SCALE_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_RED_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_RED_SCALE_EXT"/>
+            <enum name="GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX"/>
+            <enum name="GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX"/>
+            <enum name="GL_PRIMITIVE_RESTART_INDEX"/>
+            <enum name="GL_PROGRAM_BINARY_FORMATS"/>
+            <enum name="GL_PROGRAM_PIPELINE_BINDING"/>
+            <enum name="GL_PROGRAM_POINT_SIZE"/>
+            <enum name="GL_PROJECTION_MATRIX"/>
+            <enum name="GL_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_PROVOKING_VERTEX"/>
+            <enum name="GL_READ_BUFFER"/>
+            <enum name="GL_READ_BUFFER_EXT"/>
+            <enum name="GL_READ_BUFFER_NV"/>
+            <enum name="GL_READ_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_RED_BIAS"/>
+            <enum name="GL_RED_BITS"/>
+            <enum name="GL_RED_SCALE"/>
+            <enum name="GL_REFERENCE_PLANE_EQUATION_SGIX"/>
+            <enum name="GL_REFERENCE_PLANE_SGIX"/>
+            <enum name="GL_RENDERBUFFER_BINDING"/>
+            <enum name="GL_RENDER_MODE"/>
+            <enum name="GL_RESCALE_NORMAL_EXT"/>
+            <enum name="GL_RGBA_MODE"/>
+            <enum name="GL_SAMPLER_BINDING"/>
+            <enum name="GL_SAMPLES"/>
+            <enum name="GL_SAMPLES_SGIS"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_MASK_SGIS"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_ONE_SGIS"/>
+            <enum name="GL_SAMPLE_BUFFERS"/>
+            <enum name="GL_SAMPLE_BUFFERS_SGIS"/>
+            <enum name="GL_SAMPLE_COVERAGE_INVERT"/>
+            <enum name="GL_SAMPLE_COVERAGE_VALUE"/>
+            <enum name="GL_SAMPLE_MASK_INVERT_SGIS"/>
+            <enum name="GL_SAMPLE_MASK_SGIS"/>
+            <enum name="GL_SAMPLE_MASK_VALUE_SGIS"/>
+            <enum name="GL_SAMPLE_PATTERN_SGIS"/>
+            <enum name="GL_SCISSOR_BOX"/>
+            <enum name="GL_SCISSOR_TEST"/>
+            <enum name="GL_SELECTION_BUFFER_SIZE"/>
+            <enum name="GL_SEPARABLE_2D_EXT"/>
+            <enum name="GL_SHADER_COMPILER"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_BINDING"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_SIZE"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_START"/>
+            <enum name="GL_SHADE_MODEL"/>
+            <enum name="GL_SHARED_TEXTURE_PALETTE_EXT"/>
+            <enum name="GL_SMOOTH_LINE_WIDTH_GRANULARITY"/>
+            <enum name="GL_SMOOTH_LINE_WIDTH_RANGE"/>
+            <enum name="GL_SMOOTH_POINT_SIZE_GRANULARITY"/>
+            <enum name="GL_SMOOTH_POINT_SIZE_RANGE"/>
+            <enum name="GL_SPRITE_AXIS_SGIX"/>
+            <enum name="GL_SPRITE_MODE_SGIX"/>
+            <enum name="GL_SPRITE_SGIX"/>
+            <enum name="GL_SPRITE_TRANSLATION_SGIX"/>
+            <enum name="GL_STENCIL_BACK_FAIL"/>
+            <enum name="GL_STENCIL_BACK_FUNC"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_BACK_REF"/>
+            <enum name="GL_STENCIL_BACK_VALUE_MASK"/>
+            <enum name="GL_STENCIL_BACK_WRITEMASK"/>
+            <enum name="GL_STENCIL_BITS"/>
+            <enum name="GL_STENCIL_CLEAR_VALUE"/>
+            <enum name="GL_STENCIL_FAIL"/>
+            <enum name="GL_STENCIL_FUNC"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_REF"/>
+            <enum name="GL_STENCIL_TEST"/>
+            <enum name="GL_STENCIL_VALUE_MASK"/>
+            <enum name="GL_STENCIL_WRITEMASK"/>
+            <enum name="GL_STEREO"/>
+            <enum name="GL_SUBPIXEL_BITS"/>
+            <enum name="GL_TEXTURE_1D"/>
+            <enum name="GL_TEXTURE_2D"/>
+            <enum name="GL_TEXTURE_3D_BINDING_EXT"/>
+            <enum name="GL_TEXTURE_3D_EXT"/>
+            <enum name="GL_TEXTURE_4D_BINDING_SGIS"/>
+            <enum name="GL_TEXTURE_4D_SGIS"/>
+            <enum name="GL_TEXTURE_BINDING_1D"/>
+            <enum name="GL_TEXTURE_BINDING_1D_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D"/>
+            <enum name="GL_TEXTURE_BINDING_2D_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_3D"/>
+            <enum name="GL_TEXTURE_BINDING_BUFFER"/>
+            <enum name="GL_TEXTURE_BINDING_CUBE_MAP"/>
+            <enum name="GL_TEXTURE_BINDING_RECTANGLE"/>
+            <enum name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_TEXTURE_COLOR_TABLE_SGI"/>
+            <enum name="GL_TEXTURE_COMPRESSION_HINT"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_COUNT_EXT"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_SIZE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_STRIDE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_TYPE"/>
+            <enum name="GL_TEXTURE_GEN_Q"/>
+            <enum name="GL_TEXTURE_GEN_R"/>
+            <enum name="GL_TEXTURE_GEN_S"/>
+            <enum name="GL_TEXTURE_GEN_T"/>
+            <enum name="GL_TEXTURE_MATRIX"/>
+            <enum name="GL_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_TIMESTAMP"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_START"/>
+            <enum name="GL_UNIFORM_BUFFER_BINDING"/>
+            <enum name="GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_UNIFORM_BUFFER_SIZE"/>
+            <enum name="GL_UNIFORM_BUFFER_START"/>
+            <enum name="GL_UNPACK_ALIGNMENT"/>
+            <enum name="GL_UNPACK_CMYK_HINT_EXT"/>
+            <enum name="GL_UNPACK_IMAGE_DEPTH_SGIS"/>
+            <enum name="GL_UNPACK_IMAGE_HEIGHT"/>
+            <enum name="GL_UNPACK_IMAGE_HEIGHT_EXT"/>
+            <enum name="GL_UNPACK_LSB_FIRST"/>
+            <enum name="GL_UNPACK_RESAMPLE_SGIX"/>
+            <enum name="GL_UNPACK_ROW_LENGTH"/>
+            <enum name="GL_UNPACK_SKIP_IMAGES"/>
+            <enum name="GL_UNPACK_SKIP_IMAGES_EXT"/>
+            <enum name="GL_UNPACK_SKIP_PIXELS"/>
+            <enum name="GL_UNPACK_SKIP_ROWS"/>
+            <enum name="GL_UNPACK_SKIP_VOLUMES_SGIS"/>
+            <enum name="GL_UNPACK_SUBSAMPLE_RATE_SGIX"/>
+            <enum name="GL_UNPACK_SWAP_BYTES"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+            <enum name="GL_VERTEX_ARRAY_BINDING"/>
+            <enum name="GL_VERTEX_ARRAY_COUNT_EXT"/>
+            <enum name="GL_VERTEX_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ARRAY_STRIDE"/>
+            <enum name="GL_VERTEX_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_BINDING_DIVISOR"/>
+            <enum name="GL_VERTEX_BINDING_OFFSET"/>
+            <enum name="GL_VERTEX_BINDING_STRIDE"/>
+            <enum name="GL_VERTEX_PRECLIP_HINT_SGIX"/>
+            <enum name="GL_VERTEX_PRECLIP_SGIX"/>
+            <enum name="GL_VIEWPORT"/>
+            <enum name="GL_VIEWPORT_BOUNDS_RANGE"/>
+            <enum name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX"/>
+            <enum name="GL_VIEWPORT_SUBPIXEL_BITS"/>
+            <enum name="GL_ZOOM_X"/>
+            <enum name="GL_ZOOM_Y"/>
+        </group>
+
+        <group name="GetPointervPName">
+            <enum name="GL_COLOR_ARRAY_POINTER"/>
+            <enum name="GL_COLOR_ARRAY_POINTER_EXT"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_POINTER"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_POINTER_EXT"/>
+            <enum name="GL_FEEDBACK_BUFFER_POINTER"/>
+            <enum name="GL_INDEX_ARRAY_POINTER"/>
+            <enum name="GL_INDEX_ARRAY_POINTER_EXT"/>
+            <enum name="GL_INSTRUMENT_BUFFER_POINTER_SGIX"/>
+            <enum name="GL_NORMAL_ARRAY_POINTER"/>
+            <enum name="GL_NORMAL_ARRAY_POINTER_EXT"/>
+            <enum name="GL_SELECTION_BUFFER_POINTER"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_POINTER"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_POINTER_EXT"/>
+            <enum name="GL_VERTEX_ARRAY_POINTER"/>
+            <enum name="GL_VERTEX_ARRAY_POINTER_EXT"/>
+            <enum name="GL_DEBUG_CALLBACK_FUNCTION"/>
+            <enum name="GL_DEBUG_CALLBACK_USER_PARAM"/>
+        </group>
+
+        <group name="GetTextureParameter">
+            <enum name="GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS"/>
+            <enum name="GL_DETAIL_TEXTURE_LEVEL_SGIS"/>
+            <enum name="GL_DETAIL_TEXTURE_MODE_SGIS"/>
+            <enum name="GL_DUAL_TEXTURE_SELECT_SGIS"/>
+            <enum name="GL_GENERATE_MIPMAP_SGIS"/>
+            <enum name="GL_POST_TEXTURE_FILTER_BIAS_SGIX"/>
+            <enum name="GL_POST_TEXTURE_FILTER_SCALE_SGIX"/>
+            <enum name="GL_QUAD_TEXTURE_SELECT_SGIS"/>
+            <enum name="GL_SHADOW_AMBIENT_SGIX"/>
+            <enum name="GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS"/>
+            <enum name="GL_TEXTURE_4DSIZE_SGIS"/>
+            <enum name="GL_TEXTURE_ALPHA_SIZE"/>
+            <enum name="GL_TEXTURE_BASE_LEVEL_SGIS"/>
+            <enum name="GL_TEXTURE_BLUE_SIZE"/>
+            <enum name="GL_TEXTURE_BORDER"/>
+            <enum name="GL_TEXTURE_BORDER_COLOR"/>
+            <enum name="GL_TEXTURE_BORDER_COLOR_NV"/>
+            <enum name="GL_TEXTURE_CLIPMAP_CENTER_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_DEPTH_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_FRAME_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_OFFSET_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX"/>
+            <enum name="GL_TEXTURE_COMPARE_OPERATOR_SGIX"/>
+            <enum name="GL_TEXTURE_COMPARE_SGIX"/>
+            <enum name="GL_TEXTURE_COMPONENTS"/>
+            <enum name="GL_TEXTURE_DEPTH_EXT"/>
+            <enum name="GL_TEXTURE_FILTER4_SIZE_SGIS"/>
+            <enum name="GL_TEXTURE_GEQUAL_R_SGIX"/>
+            <enum name="GL_TEXTURE_GREEN_SIZE"/>
+            <enum name="GL_TEXTURE_HEIGHT"/>
+            <enum name="GL_TEXTURE_INTENSITY_SIZE"/>
+            <enum name="GL_TEXTURE_INTERNAL_FORMAT"/>
+            <enum name="GL_TEXTURE_LEQUAL_R_SGIX"/>
+            <enum name="GL_TEXTURE_LOD_BIAS_R_SGIX"/>
+            <enum name="GL_TEXTURE_LOD_BIAS_S_SGIX"/>
+            <enum name="GL_TEXTURE_LOD_BIAS_T_SGIX"/>
+            <enum name="GL_TEXTURE_LUMINANCE_SIZE"/>
+            <enum name="GL_TEXTURE_MAG_FILTER"/>
+            <enum name="GL_TEXTURE_MAX_CLAMP_R_SGIX"/>
+            <enum name="GL_TEXTURE_MAX_CLAMP_S_SGIX"/>
+            <enum name="GL_TEXTURE_MAX_CLAMP_T_SGIX"/>
+            <enum name="GL_TEXTURE_MAX_LEVEL_SGIS"/>
+            <enum name="GL_TEXTURE_MAX_LOD_SGIS"/>
+            <enum name="GL_TEXTURE_MIN_FILTER"/>
+            <enum name="GL_TEXTURE_MIN_LOD_SGIS"/>
+            <enum name="GL_TEXTURE_PRIORITY"/>
+            <enum name="GL_TEXTURE_RED_SIZE"/>
+            <enum name="GL_TEXTURE_RESIDENT"/>
+            <enum name="GL_TEXTURE_WIDTH"/>
+            <enum name="GL_TEXTURE_WRAP_Q_SGIS"/>
+            <enum name="GL_TEXTURE_WRAP_R_EXT"/>
+            <enum name="GL_TEXTURE_WRAP_S"/>
+            <enum name="GL_TEXTURE_WRAP_T"/>
+        </group>
+
+        <group name="HintMode">
+            <enum name="GL_DONT_CARE"/>
+            <enum name="GL_FASTEST"/>
+            <enum name="GL_NICEST"/>
+        </group>
+
+        <group name="HintTarget">
+            <enum name="GL_ALLOW_DRAW_FRG_HINT_PGI"/>
+            <enum name="GL_ALLOW_DRAW_MEM_HINT_PGI"/>
+            <enum name="GL_ALLOW_DRAW_OBJ_HINT_PGI"/>
+            <enum name="GL_ALLOW_DRAW_WIN_HINT_PGI"/>
+            <enum name="GL_ALWAYS_FAST_HINT_PGI"/>
+            <enum name="GL_ALWAYS_SOFT_HINT_PGI"/>
+            <enum name="GL_BACK_NORMALS_HINT_PGI"/>
+            <enum name="GL_BINNING_CONTROL_HINT_QCOM"/>
+            <enum name="GL_CLIP_FAR_HINT_PGI"/>
+            <enum name="GL_CLIP_NEAR_HINT_PGI"/>
+            <enum name="GL_CLIP_VOLUME_CLIPPING_HINT_EXT"/>
+            <enum name="GL_CONSERVE_MEMORY_HINT_PGI"/>
+            <enum name="GL_CONVOLUTION_HINT_SGIX"/>
+            <enum name="GL_FOG_HINT"/>
+            <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT"/>
+            <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB"/>
+            <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES"/>
+            <enum name="GL_FULL_STIPPLE_HINT_PGI"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT_SGIS"/>
+            <enum name="GL_LINE_QUALITY_HINT_SGIX"/>
+            <enum name="GL_LINE_SMOOTH_HINT"/>
+            <enum name="GL_MATERIAL_SIDE_HINT_PGI"/>
+            <enum name="GL_MAX_VERTEX_HINT_PGI"/>
+            <enum name="GL_MULTISAMPLE_FILTER_HINT_NV"/>
+            <enum name="GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI"/>
+            <enum name="GL_NATIVE_GRAPHICS_END_HINT_PGI"/>
+            <enum name="GL_PACK_CMYK_HINT_EXT"/>
+            <enum name="GL_PERSPECTIVE_CORRECTION_HINT"/>
+            <enum name="GL_PHONG_HINT_WIN"/>
+            <enum name="GL_POINT_SMOOTH_HINT"/>
+            <enum name="GL_POLYGON_SMOOTH_HINT"/>
+            <enum name="GL_PREFER_DOUBLEBUFFER_HINT_PGI"/>
+            <enum name="GL_PROGRAM_BINARY_RETRIEVABLE_HINT"/>
+            <enum name="GL_RECLAIM_MEMORY_HINT_PGI"/>
+            <enum name="GL_SCALEBIAS_HINT_SGIX"/>
+            <enum name="GL_STRICT_DEPTHFUNC_HINT_PGI"/>
+            <enum name="GL_STRICT_LIGHTING_HINT_PGI"/>
+            <enum name="GL_STRICT_SCISSOR_HINT_PGI"/>
+            <enum name="GL_TEXTURE_COMPRESSION_HINT"/>
+            <enum name="GL_TEXTURE_COMPRESSION_HINT_ARB"/>
+            <enum name="GL_TEXTURE_MULTI_BUFFER_HINT_SGIX"/>
+            <enum name="GL_TEXTURE_STORAGE_HINT_APPLE"/>
+            <enum name="GL_TRANSFORM_HINT_APPLE"/>
+            <enum name="GL_UNPACK_CMYK_HINT_EXT"/>
+            <enum name="GL_VERTEX_ARRAY_STORAGE_HINT_APPLE"/>
+            <enum name="GL_VERTEX_CONSISTENT_HINT_PGI"/>
+            <enum name="GL_VERTEX_DATA_HINT_PGI"/>
+            <enum name="GL_VERTEX_PRECLIP_HINT_SGIX"/>
+            <enum name="GL_VERTEX_PRECLIP_SGIX"/>
+            <enum name="GL_WIDE_LINE_HINT_PGI"/>
+        </group>
+
+        <group name="HistogramTargetEXT">
+            <enum name="GL_HISTOGRAM"/>
+            <enum name="GL_HISTOGRAM_EXT"/>
+            <enum name="GL_PROXY_HISTOGRAM"/>
+            <enum name="GL_PROXY_HISTOGRAM_EXT"/>
+        </group>
+
+        <group name="IndexPointerType">
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_SHORT"/>
+        </group>
+
+        <group name="InterleavedArrayFormat">
+            <enum name="GL_C3F_V3F"/>
+            <enum name="GL_C4F_N3F_V3F"/>
+            <enum name="GL_C4UB_V2F"/>
+            <enum name="GL_C4UB_V3F"/>
+            <enum name="GL_N3F_V3F"/>
+            <enum name="GL_T2F_C3F_V3F"/>
+            <enum name="GL_T2F_C4F_N3F_V3F"/>
+            <enum name="GL_T2F_C4UB_V3F"/>
+            <enum name="GL_T2F_N3F_V3F"/>
+            <enum name="GL_T2F_V3F"/>
+            <enum name="GL_T4F_C4F_N3F_V4F"/>
+            <enum name="GL_T4F_V4F"/>
+            <enum name="GL_V2F"/>
+            <enum name="GL_V3F"/>
+        </group>
+
+        <group name="LightEnvModeSGIX">
+            <enum name="GL_ADD"/>
+            <enum name="GL_MODULATE"/>
+            <enum name="GL_REPLACE"/>
+        </group>
+
+        <group name="LightEnvParameterSGIX">
+            <enum name="GL_LIGHT_ENV_MODE_SGIX"/>
+        </group>
+
+        <group name="LightModelColorControl">
+            <enum name="GL_SEPARATE_SPECULAR_COLOR"/>
+            <enum name="GL_SEPARATE_SPECULAR_COLOR_EXT"/>
+            <enum name="GL_SINGLE_COLOR"/>
+            <enum name="GL_SINGLE_COLOR_EXT"/>
+        </group>
+
+        <group name="LightModelParameter">
+            <enum name="GL_LIGHT_MODEL_AMBIENT"/>
+            <enum name="GL_LIGHT_MODEL_COLOR_CONTROL"/>
+            <enum name="GL_LIGHT_MODEL_COLOR_CONTROL_EXT"/>
+            <enum name="GL_LIGHT_MODEL_LOCAL_VIEWER"/>
+            <enum name="GL_LIGHT_MODEL_TWO_SIDE"/>
+        </group>
+
+        <group name="LightName">
+            <enum name="GL_FRAGMENT_LIGHT0_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT1_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT2_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT3_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT4_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT5_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT6_SGIX"/>
+            <enum name="GL_FRAGMENT_LIGHT7_SGIX"/>
+            <enum name="GL_LIGHT0"/>
+            <enum name="GL_LIGHT1"/>
+            <enum name="GL_LIGHT2"/>
+            <enum name="GL_LIGHT3"/>
+            <enum name="GL_LIGHT4"/>
+            <enum name="GL_LIGHT5"/>
+            <enum name="GL_LIGHT6"/>
+            <enum name="GL_LIGHT7"/>
+        </group>
+
+        <group name="LightParameter">
+            <enum name="GL_AMBIENT"/>
+            <enum name="GL_CONSTANT_ATTENUATION"/>
+            <enum name="GL_DIFFUSE"/>
+            <enum name="GL_LINEAR_ATTENUATION"/>
+            <enum name="GL_POSITION"/>
+            <enum name="GL_QUADRATIC_ATTENUATION"/>
+            <enum name="GL_SPECULAR"/>
+            <enum name="GL_SPOT_CUTOFF"/>
+            <enum name="GL_SPOT_DIRECTION"/>
+            <enum name="GL_SPOT_EXPONENT"/>
+        </group>
+
+        <group name="ListMode">
+            <enum name="GL_COMPILE"/>
+            <enum name="GL_COMPILE_AND_EXECUTE"/>
+        </group>
+
+        <group name="ListNameType">
+            <enum name="GL_2_BYTES"/>
+            <enum name="GL_3_BYTES"/>
+            <enum name="GL_4_BYTES"/>
+            <enum name="GL_BYTE"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+        </group>
+
+        <group name="ListParameterName">
+            <enum name="GL_LIST_PRIORITY_SGIX"/>
+        </group>
+
+        <group name="LogicOp">
+            <enum name="GL_AND"/>
+            <enum name="GL_AND_INVERTED"/>
+            <enum name="GL_AND_REVERSE"/>
+            <enum name="GL_CLEAR"/>
+            <enum name="GL_COPY"/>
+            <enum name="GL_COPY_INVERTED"/>
+            <enum name="GL_EQUIV"/>
+            <enum name="GL_INVERT"/>
+            <enum name="GL_NAND"/>
+            <enum name="GL_NOOP"/>
+            <enum name="GL_NOR"/>
+            <enum name="GL_OR"/>
+            <enum name="GL_OR_INVERTED"/>
+            <enum name="GL_OR_REVERSE"/>
+            <enum name="GL_SET"/>
+            <enum name="GL_XOR"/>
+        </group>
+
+        <group name="MapBufferAccessMask">
+            <enum name="GL_MAP_COHERENT_BIT"/>
+            <enum name="GL_MAP_COHERENT_BIT_EXT"/>
+            <enum name="GL_MAP_FLUSH_EXPLICIT_BIT"/>
+            <enum name="GL_MAP_FLUSH_EXPLICIT_BIT_EXT"/>
+            <enum name="GL_MAP_INVALIDATE_BUFFER_BIT"/>
+            <enum name="GL_MAP_INVALIDATE_BUFFER_BIT_EXT"/>
+            <enum name="GL_MAP_INVALIDATE_RANGE_BIT"/>
+            <enum name="GL_MAP_INVALIDATE_RANGE_BIT_EXT"/>
+            <enum name="GL_MAP_PERSISTENT_BIT"/>
+            <enum name="GL_MAP_PERSISTENT_BIT_EXT"/>
+            <enum name="GL_MAP_READ_BIT"/>
+            <enum name="GL_MAP_READ_BIT_EXT"/>
+            <enum name="GL_MAP_UNSYNCHRONIZED_BIT"/>
+            <enum name="GL_MAP_UNSYNCHRONIZED_BIT_EXT"/>
+            <enum name="GL_MAP_WRITE_BIT"/>
+            <enum name="GL_MAP_WRITE_BIT_EXT"/>
+        </group>
+
+        <group name="MapTarget">
+            <enum name="GL_GEOMETRY_DEFORMATION_SGIX"/>
+            <enum name="GL_MAP1_COLOR_4"/>
+            <enum name="GL_MAP1_INDEX"/>
+            <enum name="GL_MAP1_NORMAL"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP1_VERTEX_3"/>
+            <enum name="GL_MAP1_VERTEX_4"/>
+            <enum name="GL_MAP2_COLOR_4"/>
+            <enum name="GL_MAP2_INDEX"/>
+            <enum name="GL_MAP2_NORMAL"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP2_VERTEX_3"/>
+            <enum name="GL_MAP2_VERTEX_4"/>
+            <enum name="GL_TEXTURE_DEFORMATION_SGIX"/>
+        </group>
+
+        <group name="MapTextureFormatINTEL">
+            <enum name="GL_LAYOUT_DEFAULT_INTEL"/>
+            <enum name="GL_LAYOUT_LINEAR_CPU_CACHED_INTEL"/>
+            <enum name="GL_LAYOUT_LINEAR_INTEL"/>
+        </group>
+
+        <group name="MaterialFace">
+            <enum name="GL_BACK"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+        </group>
+
+        <group name="MaterialParameter">
+            <enum name="GL_AMBIENT"/>
+            <enum name="GL_AMBIENT_AND_DIFFUSE"/>
+            <enum name="GL_COLOR_INDEXES"/>
+            <enum name="GL_DIFFUSE"/>
+            <enum name="GL_EMISSION"/>
+            <enum name="GL_SHININESS"/>
+            <enum name="GL_SPECULAR"/>
+        </group>
+
+        <group name="MatrixMode">
+            <enum name="GL_MODELVIEW"/>
+            <enum name="GL_MODELVIEW0_EXT"/>
+            <enum name="GL_PROJECTION"/>
+            <enum name="GL_TEXTURE"/>
+        </group>
+
+        <group name="MemoryBarrierMask">
+            <enum name="GL_ALL_BARRIER_BITS"/>
+            <enum name="GL_ALL_BARRIER_BITS_EXT"/>
+            <enum name="GL_ATOMIC_COUNTER_BARRIER_BIT"/>
+            <enum name="GL_ATOMIC_COUNTER_BARRIER_BIT_EXT"/>
+            <enum name="GL_BUFFER_UPDATE_BARRIER_BIT"/>
+            <enum name="GL_BUFFER_UPDATE_BARRIER_BIT_EXT"/>
+            <enum name="GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT"/>
+            <enum name="GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT"/>
+            <enum name="GL_COMMAND_BARRIER_BIT"/>
+            <enum name="GL_COMMAND_BARRIER_BIT_EXT"/>
+            <enum name="GL_ELEMENT_ARRAY_BARRIER_BIT"/>
+            <enum name="GL_ELEMENT_ARRAY_BARRIER_BIT_EXT"/>
+            <enum name="GL_FRAMEBUFFER_BARRIER_BIT"/>
+            <enum name="GL_FRAMEBUFFER_BARRIER_BIT_EXT"/>
+            <enum name="GL_PIXEL_BUFFER_BARRIER_BIT"/>
+            <enum name="GL_PIXEL_BUFFER_BARRIER_BIT_EXT"/>
+            <enum name="GL_QUERY_BUFFER_BARRIER_BIT"/>
+            <enum name="GL_SHADER_GLOBAL_ACCESS_BARRIER_BIT_NV"/>
+            <enum name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT"/>
+            <enum name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT_EXT"/>
+            <enum name="GL_SHADER_STORAGE_BARRIER_BIT"/>
+            <enum name="GL_TEXTURE_FETCH_BARRIER_BIT"/>
+            <enum name="GL_TEXTURE_FETCH_BARRIER_BIT_EXT"/>
+            <enum name="GL_TEXTURE_UPDATE_BARRIER_BIT"/>
+            <enum name="GL_TEXTURE_UPDATE_BARRIER_BIT_EXT"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT_EXT"/>
+            <enum name="GL_UNIFORM_BARRIER_BIT"/>
+            <enum name="GL_UNIFORM_BARRIER_BIT_EXT"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT_EXT"/>
+        </group>
+
+        <group name="MemoryObjectParameterName">
+            <enum name="GL_DEDICATED_MEMORY_OBJECT_EXT"/>
+            <enum name="GL_PROTECTED_MEMORY_OBJECT_EXT"/>
+        </group>
+
+        <group name="MeshMode1">
+            <enum name="GL_LINE"/>
+            <enum name="GL_POINT"/>
+        </group>
+
+        <group name="MeshMode2">
+            <enum name="GL_FILL"/>
+            <enum name="GL_LINE"/>
+            <enum name="GL_POINT"/>
+        </group>
+
+        <group name="MinmaxTargetEXT">
+            <enum name="GL_MINMAX"/>
+            <enum name="GL_MINMAX_EXT"/>
+        </group>
+
+        <group name="NormalPointerType">
+            <enum name="GL_BYTE"/>
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_SHORT"/>
+        </group>
+
+        <group name="PixelCopyType">
+            <enum name="GL_COLOR"/>
+            <enum name="GL_COLOR_EXT"/>
+            <enum name="GL_DEPTH"/>
+            <enum name="GL_DEPTH_EXT"/>
+            <enum name="GL_STENCIL"/>
+            <enum name="GL_STENCIL_EXT"/>
+        </group>
+
+        <group name="PixelFormat">
+            <enum name="GL_ABGR_EXT"/>
+            <enum name="GL_ALPHA"/>
+            <enum name="GL_BGR"/>
+            <enum name="GL_BGR_INTEGER"/>
+            <enum name="GL_BGRA"/>
+            <enum name="GL_BGRA_INTEGER"/>
+            <enum name="GL_BLUE"/>
+            <enum name="GL_BLUE_INTEGER"/>
+            <enum name="GL_CMYKA_EXT"/>
+            <enum name="GL_CMYK_EXT"/>
+            <enum name="GL_COLOR_INDEX"/>
+            <enum name="GL_DEPTH_COMPONENT"/>
+            <enum name="GL_DEPTH_STENCIL"/>
+            <enum name="GL_GREEN"/>
+            <enum name="GL_GREEN_INTEGER"/>
+            <enum name="GL_LUMINANCE"/>
+            <enum name="GL_LUMINANCE_ALPHA"/>
+            <enum name="GL_RED"/>
+            <enum name="GL_RED_EXT"/>
+            <enum name="GL_RED_INTEGER"/>
+            <enum name="GL_RG"/>
+            <enum name="GL_RG_INTEGER"/>
+            <enum name="GL_RGB"/>
+            <enum name="GL_RGB_INTEGER"/>
+            <enum name="GL_RGBA"/>
+            <enum name="GL_RGBA_INTEGER"/>
+            <enum name="GL_STENCIL_INDEX"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_YCRCB_422_SGIX"/>
+            <enum name="GL_YCRCB_444_SGIX"/>
+        </group>
+
+        <group name="InternalFormat" comment="Was PixelInternalFormat">
+            <!-- Compatibility -->
+            <enum name="GL_ALPHA12"/>
+            <enum name="GL_ALPHA16"/>
+            <!-- <enum name="GL_ALPHA16_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <enum name="GL_ALPHA4"/>
+            <enum name="GL_ALPHA8"/>
+            <!-- <enum name="GL_ALPHA_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <enum name="GL_DUAL_ALPHA12_SGIS"/>
+            <enum name="GL_DUAL_ALPHA16_SGIS"/>
+            <enum name="GL_DUAL_ALPHA4_SGIS"/>
+            <enum name="GL_DUAL_ALPHA8_SGIS"/>
+            <enum name="GL_DUAL_INTENSITY12_SGIS"/>
+            <enum name="GL_DUAL_INTENSITY16_SGIS"/>
+            <enum name="GL_DUAL_INTENSITY4_SGIS"/>
+            <enum name="GL_DUAL_INTENSITY8_SGIS"/>
+            <enum name="GL_DUAL_LUMINANCE12_SGIS"/>
+            <enum name="GL_DUAL_LUMINANCE16_SGIS"/>
+            <enum name="GL_DUAL_LUMINANCE4_SGIS"/>
+            <enum name="GL_DUAL_LUMINANCE8_SGIS"/>
+            <enum name="GL_DUAL_LUMINANCE_ALPHA4_SGIS"/>
+            <enum name="GL_DUAL_LUMINANCE_ALPHA8_SGIS"/>
+            <enum name="GL_INTENSITY"/>
+            <enum name="GL_INTENSITY12"/>
+            <enum name="GL_INTENSITY16"/>
+            <!-- <enum name="GL_INTENSITY16_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <enum name="GL_INTENSITY4"/>
+            <enum name="GL_INTENSITY8"/>
+            <!-- <enum name="GL_INTENSITY_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <enum name="GL_LUMINANCE12"/>
+            <enum name="GL_LUMINANCE12_ALPHA12"/>
+            <enum name="GL_LUMINANCE12_ALPHA4"/>
+            <enum name="GL_LUMINANCE16"/>
+            <enum name="GL_LUMINANCE16_ALPHA16"/>
+            <!-- <enum name="GL_LUMINANCE16_ALPHA8_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <!-- <enum name="GL_LUMINANCE16_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <enum name="GL_LUMINANCE4"/>
+            <enum name="GL_LUMINANCE4_ALPHA4"/>
+            <enum name="GL_LUMINANCE6_ALPHA2"/>
+            <enum name="GL_LUMINANCE8"/>
+            <enum name="GL_LUMINANCE8_ALPHA8"/>
+            <!-- <enum name="GL_LUMINANCE_ALPHA_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <!-- <enum name="GL_LUMINANCE_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <enum name="GL_QUAD_ALPHA4_SGIS"/>
+            <enum name="GL_QUAD_ALPHA8_SGIS"/>
+            <enum name="GL_QUAD_INTENSITY4_SGIS"/>
+            <enum name="GL_QUAD_INTENSITY8_SGIS"/>
+            <enum name="GL_QUAD_LUMINANCE4_SGIS"/>
+            <enum name="GL_QUAD_LUMINANCE8_SGIS"/>
+            <!-- <enum name="GL_R5_G6_B5_A8_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <!-- <enum name="GL_R5_G6_B5_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <!-- <enum name="GL_RGBA_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <!-- <enum name="GL_RGB_ICC_SGIX" comment="Incomplete extension SGIX_icc_texture"/> -->
+            <!-- Base internal format: GL_RED -->
+            <enum name="GL_RED"/>
+            <enum name="GL_RED_EXT"/>
+            <enum name="GL_R8"/>
+            <enum name="GL_R8_EXT"/>
+            <enum name="GL_R8_SNORM"/>
+            <enum name="GL_R16"/>
+            <enum name="GL_R16_EXT"/>
+            <enum name="GL_R16_SNORM"/>
+            <enum name="GL_R16_SNORM_EXT"/>
+            <!-- <enum name="GL_R32" comment="cut & paste error?"/> -->
+            <!-- <enum name="GL_R32_EXT" comment="cut & paste error?"/> -->
+            <enum name="GL_R16F"/>
+            <enum name="GL_R16F_EXT"/>
+            <enum name="GL_R32F"/>
+            <enum name="GL_R32F_EXT"/>
+            <enum name="GL_R8I"/>
+            <enum name="GL_R16I"/>
+            <enum name="GL_R32I"/>
+            <enum name="GL_R8UI"/>
+            <enum name="GL_R16UI"/>
+            <enum name="GL_R32UI"/>
+            <!-- Base internal format: GL_RG -->
+            <enum name="GL_RG"/>
+            <enum name="GL_RG8"/>
+            <enum name="GL_RG8_EXT"/>
+            <enum name="GL_RG8_SNORM"/>
+            <enum name="GL_RG16"/>
+            <enum name="GL_RG16_EXT"/>
+            <enum name="GL_RG16_SNORM"/>
+            <enum name="GL_RG16_SNORM_EXT"/>
+            <enum name="GL_RG16F"/>
+            <enum name="GL_RG16F_EXT"/>
+            <enum name="GL_RG32F"/>
+            <enum name="GL_RG32F_EXT"/>
+            <enum name="GL_RG8I"/>
+            <enum name="GL_RG16I"/>
+            <enum name="GL_RG32I"/>
+            <enum name="GL_RG8UI"/>
+            <enum name="GL_RG16UI"/>
+            <enum name="GL_RG32UI"/>
+            <!-- Base internal format: GL_RGB -->
+            <enum name="GL_RGB"/>
+            <!-- <enum name="GL_RGB2" comment="Never actually added to core"/> -->
+            <enum name="GL_RGB2_EXT"/>
+            <enum name="GL_RGB4"/>
+            <enum name="GL_RGB4_EXT"/>
+            <enum name="GL_RGB5"/>
+            <enum name="GL_RGB5_EXT"/>
+            <enum name="GL_RGB8"/>
+            <enum name="GL_RGB8_EXT"/>
+            <enum name="GL_RGB8_OES"/>
+            <enum name="GL_RGB8_SNORM"/>
+            <enum name="GL_RGB10"/>
+            <enum name="GL_RGB10_EXT"/>
+            <enum name="GL_RGB12"/>
+            <enum name="GL_RGB12_EXT"/>
+            <enum name="GL_RGB16"/>
+            <enum name="GL_RGB16_EXT"/>
+            <enum name="GL_RGB16F"/>
+            <enum name="GL_RGB16F_ARB"/>
+            <enum name="GL_RGB16F_EXT"/>
+            <enum name="GL_RGB16_SNORM"/>
+            <enum name="GL_RGB16_SNORM_EXT"/>
+            <enum name="GL_RGB8I"/>
+            <enum name="GL_RGB16I"/>
+            <enum name="GL_RGB32I"/>
+            <enum name="GL_RGB8UI"/>
+            <enum name="GL_RGB16UI"/>
+            <enum name="GL_RGB32UI"/>
+            <enum name="GL_SRGB"/>
+            <enum name="GL_SRGB_EXT"/>
+            <enum name="GL_SRGB_ALPHA"/>
+            <enum name="GL_SRGB_ALPHA_EXT"/>
+            <enum name="GL_SRGB8"/>
+            <enum name="GL_SRGB8_EXT"/>
+            <enum name="GL_SRGB8_NV"/>
+            <enum name="GL_SRGB8_ALPHA8"/>
+            <enum name="GL_SRGB8_ALPHA8_EXT"/>
+            <enum name="GL_R3_G3_B2"/>
+            <enum name="GL_R11F_G11F_B10F"/>
+            <enum name="GL_R11F_G11F_B10F_APPLE"/>
+            <enum name="GL_R11F_G11F_B10F_EXT"/>
+            <enum name="GL_RGB9_E5"/>
+            <enum name="GL_RGB9_E5_APPLE"/>
+            <enum name="GL_RGB9_E5_EXT"/>
+            <!-- Base internal format: GL_RGBA -->
+            <enum name="GL_RGBA"/>
+            <enum name="GL_RGBA4"/>
+            <enum name="GL_RGBA4_EXT"/>
+            <enum name="GL_RGBA4_OES"/>
+            <enum name="GL_RGB5_A1"/>
+            <enum name="GL_RGB5_A1_EXT"/>
+            <enum name="GL_RGB5_A1_OES"/>
+            <enum name="GL_RGBA8"/>
+            <enum name="GL_RGBA8_EXT"/>
+            <enum name="GL_RGBA8_OES"/>
+            <enum name="GL_RGBA8_SNORM"/>
+            <enum name="GL_RGB10_A2"/>
+            <enum name="GL_RGB10_A2_EXT"/>
+            <enum name="GL_RGBA12"/>
+            <enum name="GL_RGBA12_EXT"/>
+            <enum name="GL_RGBA16"/>
+            <enum name="GL_RGBA16_EXT"/>
+            <enum name="GL_RGBA16F"/>
+            <enum name="GL_RGBA16F_ARB"/>
+            <enum name="GL_RGBA16F_EXT"/>
+            <enum name="GL_RGBA32F"/>
+            <enum name="GL_RGBA32F_ARB"/>
+            <enum name="GL_RGBA32F_EXT"/>
+            <enum name="GL_RGBA8I"/>
+            <enum name="GL_RGBA16I"/>
+            <enum name="GL_RGBA32I"/>
+            <enum name="GL_RGBA8UI"/>
+            <enum name="GL_RGBA16UI"/>
+            <enum name="GL_RGBA32UI"/>
+            <enum name="GL_RGB10_A2UI"/>
+            <!-- Base internal format: GL_DEPTH_COMPONENT -->
+            <enum name="GL_DEPTH_COMPONENT"/>
+            <enum name="GL_DEPTH_COMPONENT16"/>
+            <enum name="GL_DEPTH_COMPONENT16_ARB"/>
+            <enum name="GL_DEPTH_COMPONENT16_OES"/>
+            <enum name="GL_DEPTH_COMPONENT16_SGIX"/>
+            <enum name="GL_DEPTH_COMPONENT24_ARB"/>
+            <enum name="GL_DEPTH_COMPONENT24_OES"/>
+            <enum name="GL_DEPTH_COMPONENT24_SGIX"/>
+            <enum name="GL_DEPTH_COMPONENT32_ARB"/>
+            <enum name="GL_DEPTH_COMPONENT32_OES"/>
+            <enum name="GL_DEPTH_COMPONENT32_SGIX"/>
+            <enum name="GL_DEPTH_COMPONENT32F"/>
+            <enum name="GL_DEPTH_COMPONENT32F_NV"/>
+            <!-- Base internal format: GL_DEPTH_STENCIL -->
+            <enum name="GL_DEPTH_STENCIL"/>
+            <enum name="GL_DEPTH_STENCIL_EXT"/>
+            <enum name="GL_DEPTH_STENCIL_MESA"/>
+            <enum name="GL_DEPTH_STENCIL_NV"/>
+            <enum name="GL_DEPTH_STENCIL_OES"/>
+            <enum name="GL_DEPTH24_STENCIL8"/>
+            <enum name="GL_DEPTH24_STENCIL8_EXT"/>
+            <enum name="GL_DEPTH24_STENCIL8_OES"/>
+            <enum name="GL_DEPTH32F_STENCIL8"/>
+            <enum name="GL_DEPTH32F_STENCIL8_NV"/>
+            <!-- Compressed base internal formats -->
+            <enum name="GL_COMPRESSED_RED"/>
+            <enum name="GL_COMPRESSED_RG"/>
+            <enum name="GL_COMPRESSED_RGB"/>
+            <enum name="GL_COMPRESSED_RGBA"/>
+            <enum name="GL_COMPRESSED_SRGB"/>
+            <enum name="GL_COMPRESSED_SRGB_ALPHA"/>
+            <enum name="GL_COMPRESSED_RED_RGTC1"/>
+            <enum name="GL_COMPRESSED_RED_RGTC1_EXT"/>
+            <enum name="GL_COMPRESSED_SIGNED_RED_RGTC1"/>
+            <enum name="GL_COMPRESSED_SIGNED_RED_RGTC1_EXT"/>
+            <enum name="GL_COMPRESSED_R11_EAC"/>
+            <enum name="GL_COMPRESSED_SIGNED_R11_EAC"/>
+            <enum name="GL_COMPRESSED_RG_RGTC2"/>
+            <enum name="GL_COMPRESSED_SIGNED_RG_RGTC2"/>
+            <enum name="GL_COMPRESSED_RGBA_BPTC_UNORM"/>
+            <enum name="GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM"/>
+            <enum name="GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT"/>
+            <enum name="GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT"/>
+            <enum name="GL_COMPRESSED_RGB8_ETC2"/>
+            <enum name="GL_COMPRESSED_SRGB8_ETC2"/>
+            <enum name="GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+            <enum name="GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+            <enum name="GL_COMPRESSED_RGBA8_ETC2_EAC"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC"/>
+            <enum name="GL_COMPRESSED_RG11_EAC"/>
+            <enum name="GL_COMPRESSED_SIGNED_RG11_EAC"/>
+            <enum name="GL_COMPRESSED_RGB_S3TC_DXT1_EXT"/>
+            <enum name="GL_COMPRESSED_SRGB_S3TC_DXT1_EXT"/>
+            <enum name="GL_COMPRESSED_RGBA_S3TC_DXT1_EXT"/>
+            <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT"/>
+            <enum name="GL_COMPRESSED_RGBA_S3TC_DXT3_EXT"/>
+            <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT"/>
+            <enum name="GL_COMPRESSED_RGBA_S3TC_DXT5_EXT"/>
+            <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT"/>
+        </group>
+
+        <group name="PixelMap">
+            <enum name="GL_PIXEL_MAP_A_TO_A"/>
+            <enum name="GL_PIXEL_MAP_B_TO_B"/>
+            <enum name="GL_PIXEL_MAP_G_TO_G"/>
+            <enum name="GL_PIXEL_MAP_I_TO_A"/>
+            <enum name="GL_PIXEL_MAP_I_TO_B"/>
+            <enum name="GL_PIXEL_MAP_I_TO_G"/>
+            <enum name="GL_PIXEL_MAP_I_TO_I"/>
+            <enum name="GL_PIXEL_MAP_I_TO_R"/>
+            <enum name="GL_PIXEL_MAP_R_TO_R"/>
+            <enum name="GL_PIXEL_MAP_S_TO_S"/>
+        </group>
+
+        <group name="PixelStoreParameter">
+            <enum name="GL_PACK_ALIGNMENT"/>
+            <enum name="GL_PACK_IMAGE_DEPTH_SGIS"/>
+            <enum name="GL_PACK_IMAGE_HEIGHT"/>
+            <enum name="GL_PACK_IMAGE_HEIGHT_EXT"/>
+            <enum name="GL_PACK_LSB_FIRST"/>
+            <enum name="GL_PACK_RESAMPLE_OML"/>
+            <enum name="GL_PACK_RESAMPLE_SGIX"/>
+            <enum name="GL_PACK_ROW_LENGTH"/>
+            <enum name="GL_PACK_SKIP_IMAGES"/>
+            <enum name="GL_PACK_SKIP_IMAGES_EXT"/>
+            <enum name="GL_PACK_SKIP_PIXELS"/>
+            <enum name="GL_PACK_SKIP_ROWS"/>
+            <enum name="GL_PACK_SKIP_VOLUMES_SGIS"/>
+            <enum name="GL_PACK_SUBSAMPLE_RATE_SGIX"/>
+            <enum name="GL_PACK_SWAP_BYTES"/>
+            <enum name="GL_PIXEL_TILE_CACHE_SIZE_SGIX"/>
+            <enum name="GL_PIXEL_TILE_GRID_DEPTH_SGIX"/>
+            <enum name="GL_PIXEL_TILE_GRID_HEIGHT_SGIX"/>
+            <enum name="GL_PIXEL_TILE_GRID_WIDTH_SGIX"/>
+            <enum name="GL_PIXEL_TILE_HEIGHT_SGIX"/>
+            <enum name="GL_PIXEL_TILE_WIDTH_SGIX"/>
+            <enum name="GL_UNPACK_ALIGNMENT"/>
+            <enum name="GL_UNPACK_IMAGE_DEPTH_SGIS"/>
+            <enum name="GL_UNPACK_IMAGE_HEIGHT"/>
+            <enum name="GL_UNPACK_IMAGE_HEIGHT_EXT"/>
+            <enum name="GL_UNPACK_LSB_FIRST"/>
+            <enum name="GL_UNPACK_RESAMPLE_OML"/>
+            <enum name="GL_UNPACK_RESAMPLE_SGIX"/>
+            <enum name="GL_UNPACK_ROW_LENGTH"/>
+            <enum name="GL_UNPACK_ROW_LENGTH_EXT"/>
+            <enum name="GL_UNPACK_SKIP_IMAGES"/>
+            <enum name="GL_UNPACK_SKIP_IMAGES_EXT"/>
+            <enum name="GL_UNPACK_SKIP_PIXELS"/>
+            <enum name="GL_UNPACK_SKIP_PIXELS_EXT"/>
+            <enum name="GL_UNPACK_SKIP_ROWS"/>
+            <enum name="GL_UNPACK_SKIP_ROWS_EXT"/>
+            <enum name="GL_UNPACK_SKIP_VOLUMES_SGIS"/>
+            <enum name="GL_UNPACK_SUBSAMPLE_RATE_SGIX"/>
+            <enum name="GL_UNPACK_SWAP_BYTES"/>
+        </group>
+
+        <group name="PixelStoreResampleMode">
+            <enum name="GL_RESAMPLE_DECIMATE_SGIX"/>
+            <enum name="GL_RESAMPLE_REPLICATE_SGIX"/>
+            <enum name="GL_RESAMPLE_ZERO_FILL_SGIX"/>
+        </group>
+
+        <group name="PixelStoreSubsampleRate">
+            <enum name="GL_PIXEL_SUBSAMPLE_2424_SGIX"/>
+            <enum name="GL_PIXEL_SUBSAMPLE_4242_SGIX"/>
+            <enum name="GL_PIXEL_SUBSAMPLE_4444_SGIX"/>
+        </group>
+
+        <group name="PixelTexGenMode">
+            <enum name="GL_LUMINANCE"/>
+            <enum name="GL_LUMINANCE_ALPHA"/>
+            <enum name="GL_NONE"/>
+            <enum name="GL_PIXEL_TEX_GEN_ALPHA_LS_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_ALPHA_MS_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_ALPHA_NO_REPLACE_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_ALPHA_REPLACE_SGIX"/>
+            <enum name="GL_RGB"/>
+            <enum name="GL_RGBA"/>
+        </group>
+
+        <group name="PixelTexGenParameterNameSGIS">
+            <enum name="GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS"/>
+            <enum name="GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS"/>
+        </group>
+
+        <group name="PixelTransferParameter">
+            <enum name="GL_ALPHA_BIAS"/>
+            <enum name="GL_ALPHA_SCALE"/>
+            <enum name="GL_BLUE_BIAS"/>
+            <enum name="GL_BLUE_SCALE"/>
+            <enum name="GL_DEPTH_BIAS"/>
+            <enum name="GL_DEPTH_SCALE"/>
+            <enum name="GL_GREEN_BIAS"/>
+            <enum name="GL_GREEN_SCALE"/>
+            <enum name="GL_INDEX_OFFSET"/>
+            <enum name="GL_INDEX_SHIFT"/>
+            <enum name="GL_MAP_COLOR"/>
+            <enum name="GL_MAP_STENCIL"/>
+            <enum name="GL_POST_COLOR_MATRIX_ALPHA_BIAS"/>
+            <enum name="GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_ALPHA_SCALE"/>
+            <enum name="GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_BLUE_BIAS"/>
+            <enum name="GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_BLUE_SCALE"/>
+            <enum name="GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_GREEN_BIAS"/>
+            <enum name="GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_GREEN_SCALE"/>
+            <enum name="GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_RED_BIAS"/>
+            <enum name="GL_POST_COLOR_MATRIX_RED_BIAS_SGI"/>
+            <enum name="GL_POST_COLOR_MATRIX_RED_SCALE"/>
+            <enum name="GL_POST_COLOR_MATRIX_RED_SCALE_SGI"/>
+            <enum name="GL_POST_CONVOLUTION_ALPHA_BIAS"/>
+            <enum name="GL_POST_CONVOLUTION_ALPHA_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_ALPHA_SCALE"/>
+            <enum name="GL_POST_CONVOLUTION_ALPHA_SCALE_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_BLUE_BIAS"/>
+            <enum name="GL_POST_CONVOLUTION_BLUE_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_BLUE_SCALE"/>
+            <enum name="GL_POST_CONVOLUTION_BLUE_SCALE_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_GREEN_BIAS"/>
+            <enum name="GL_POST_CONVOLUTION_GREEN_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_GREEN_SCALE"/>
+            <enum name="GL_POST_CONVOLUTION_GREEN_SCALE_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_RED_BIAS"/>
+            <enum name="GL_POST_CONVOLUTION_RED_BIAS_EXT"/>
+            <enum name="GL_POST_CONVOLUTION_RED_SCALE"/>
+            <enum name="GL_POST_CONVOLUTION_RED_SCALE_EXT"/>
+            <enum name="GL_RED_BIAS"/>
+            <enum name="GL_RED_SCALE"/>
+        </group>
+
+        <group name="PixelType">
+            <enum name="GL_BITMAP"/>
+            <enum name="GL_BYTE"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_UNSIGNED_BYTE_3_3_2"/>
+            <enum name="GL_UNSIGNED_BYTE_3_3_2_EXT"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_UNSIGNED_INT_10_10_10_2"/>
+            <enum name="GL_UNSIGNED_INT_10_10_10_2_EXT"/>
+            <enum name="GL_UNSIGNED_INT_8_8_8_8"/>
+            <enum name="GL_UNSIGNED_INT_8_8_8_8_EXT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_UNSIGNED_SHORT_4_4_4_4"/>
+            <enum name="GL_UNSIGNED_SHORT_4_4_4_4_EXT"/>
+            <enum name="GL_UNSIGNED_SHORT_5_5_5_1"/>
+            <enum name="GL_UNSIGNED_SHORT_5_5_5_1_EXT"/>
+        </group>
+
+        <group name="PointParameterNameSGIS">
+            <enum name="GL_DISTANCE_ATTENUATION_EXT"/>
+            <enum name="GL_DISTANCE_ATTENUATION_SGIS"/>
+            <enum name="GL_POINT_DISTANCE_ATTENUATION"/>
+            <enum name="GL_POINT_DISTANCE_ATTENUATION_ARB"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE_ARB"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE_EXT"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE_SGIS"/>
+            <enum name="GL_POINT_SIZE_MAX"/>
+            <enum name="GL_POINT_SIZE_MAX_ARB"/>
+            <enum name="GL_POINT_SIZE_MAX_EXT"/>
+            <enum name="GL_POINT_SIZE_MAX_SGIS"/>
+            <enum name="GL_POINT_SIZE_MIN"/>
+            <enum name="GL_POINT_SIZE_MIN_ARB"/>
+            <enum name="GL_POINT_SIZE_MIN_EXT"/>
+            <enum name="GL_POINT_SIZE_MIN_SGIS"/>
+        </group>
+
+        <group name="PolygonMode">
+            <enum name="GL_FILL"/>
+            <enum name="GL_LINE"/>
+            <enum name="GL_POINT"/>
+        </group>
+
+        <group name="PrimitiveType">
+            <enum name="GL_LINES"/>
+            <enum name="GL_LINES_ADJACENCY"/>
+            <enum name="GL_LINES_ADJACENCY_ARB"/>
+            <enum name="GL_LINES_ADJACENCY_EXT"/>
+            <enum name="GL_LINE_LOOP"/>
+            <enum name="GL_LINE_STRIP"/>
+            <enum name="GL_LINE_STRIP_ADJACENCY"/>
+            <enum name="GL_LINE_STRIP_ADJACENCY_ARB"/>
+            <enum name="GL_LINE_STRIP_ADJACENCY_EXT"/>
+            <enum name="GL_PATCHES"/>
+            <enum name="GL_PATCHES_EXT"/>
+            <enum name="GL_POINTS"/>
+            <enum name="GL_POLYGON"/>
+            <enum name="GL_QUADS"/>
+            <enum name="GL_QUADS_EXT"/>
+            <enum name="GL_QUAD_STRIP"/>
+            <enum name="GL_TRIANGLES"/>
+            <enum name="GL_TRIANGLES_ADJACENCY"/>
+            <enum name="GL_TRIANGLES_ADJACENCY_ARB"/>
+            <enum name="GL_TRIANGLES_ADJACENCY_EXT"/>
+            <enum name="GL_TRIANGLE_FAN"/>
+            <enum name="GL_TRIANGLE_STRIP"/>
+            <enum name="GL_TRIANGLE_STRIP_ADJACENCY"/>
+            <enum name="GL_TRIANGLE_STRIP_ADJACENCY_ARB"/>
+            <enum name="GL_TRIANGLE_STRIP_ADJACENCY_EXT"/>
+        </group>
+
+        <group name="OcclusionQueryEventMaskAMD">
+            <enum name="GL_QUERY_DEPTH_PASS_EVENT_BIT_AMD"/>
+            <enum name="GL_QUERY_DEPTH_FAIL_EVENT_BIT_AMD"/>
+            <enum name="GL_QUERY_STENCIL_FAIL_EVENT_BIT_AMD"/>
+            <enum name="GL_QUERY_DEPTH_BOUNDS_FAIL_EVENT_BIT_AMD"/>
+            <enum name="GL_QUERY_ALL_EVENT_BITS_AMD"/>
+        </group>
+
+        <group name="ReadBufferMode">
+            <enum name="GL_AUX0"/>
+            <enum name="GL_AUX1"/>
+            <enum name="GL_AUX2"/>
+            <enum name="GL_AUX3"/>
+            <enum name="GL_BACK"/>
+            <enum name="GL_BACK_LEFT"/>
+            <enum name="GL_BACK_RIGHT"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_FRONT_LEFT"/>
+            <enum name="GL_FRONT_RIGHT"/>
+            <enum name="GL_LEFT"/>
+            <enum name="GL_RIGHT"/>
+        </group>
+
+        <group name="RenderingMode">
+            <enum name="GL_FEEDBACK"/>
+            <enum name="GL_RENDER"/>
+            <enum name="GL_SELECT"/>
+        </group>
+
+        <group name="SamplePatternSGIS">
+            <enum name="GL_1PASS_EXT"/>
+            <enum name="GL_1PASS_SGIS"/>
+            <enum name="GL_2PASS_0_EXT"/>
+            <enum name="GL_2PASS_0_SGIS"/>
+            <enum name="GL_2PASS_1_EXT"/>
+            <enum name="GL_2PASS_1_SGIS"/>
+            <enum name="GL_4PASS_0_EXT"/>
+            <enum name="GL_4PASS_0_SGIS"/>
+            <enum name="GL_4PASS_1_EXT"/>
+            <enum name="GL_4PASS_1_SGIS"/>
+            <enum name="GL_4PASS_2_EXT"/>
+            <enum name="GL_4PASS_2_SGIS"/>
+            <enum name="GL_4PASS_3_EXT"/>
+            <enum name="GL_4PASS_3_SGIS"/>
+        </group>
+
+        <group name="SemaphoreParameterName">
+            <enum name="GL_D3D12_FENCE_VALUE_EXT"/>
+        </group>
+
+        <group name="SeparableTargetEXT">
+            <enum name="GL_SEPARABLE_2D"/>
+            <enum name="GL_SEPARABLE_2D_EXT"/>
+        </group>
+
+        <group name="ShadingModel">
+            <enum name="GL_FLAT"/>
+            <enum name="GL_SMOOTH"/>
+        </group>
+
+        <group name="StencilFaceDirection">
+             <enum name="GL_FRONT"/>
+             <enum name="GL_BACK"/>
+             <enum name="GL_FRONT_AND_BACK"/>
+        </group>
+
+        <group name="StencilFunction">
+            <enum name="GL_ALWAYS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_GEQUAL"/>
+            <enum name="GL_GREATER"/>
+            <enum name="GL_LEQUAL"/>
+            <enum name="GL_LESS"/>
+            <enum name="GL_NEVER"/>
+            <enum name="GL_NOTEQUAL"/>
+        </group>
+
+        <group name="StencilOp">
+            <enum name="GL_DECR"/>
+            <enum name="GL_DECR_WRAP"/>
+            <enum name="GL_INCR"/>
+            <enum name="GL_INCR_WRAP"/>
+            <enum name="GL_INVERT"/>
+            <enum name="GL_KEEP"/>
+            <enum name="GL_REPLACE"/>
+            <enum name="GL_ZERO"/>
+        </group>
+
+        <group name="StringName">
+            <enum name="GL_EXTENSIONS"/>
+            <enum name="GL_RENDERER"/>
+            <enum name="GL_VENDOR"/>
+            <enum name="GL_VERSION"/>
+            <enum name="GL_SHADING_LANGUAGE_VERSION"/>
+        </group>
+
+        <group name="SyncObjectMask">
+            <enum name="GL_SYNC_FLUSH_COMMANDS_BIT"/>
+            <enum name="GL_SYNC_FLUSH_COMMANDS_BIT_APPLE"/>
+        </group>
+
+        <group name="TexCoordPointerType">
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_SHORT"/>
+        </group>
+
+        <group name="TextureCoordName">
+            <enum name="GL_S"/>
+            <enum name="GL_T"/>
+            <enum name="GL_R"/>
+            <enum name="GL_Q"/>
+        </group>
+
+        <group name="TextureEnvMode">
+            <enum name="GL_ADD"/>
+            <enum name="GL_BLEND"/>
+            <enum name="GL_DECAL"/>
+            <enum name="GL_MODULATE"/>
+            <enum name="GL_REPLACE_EXT"/>
+            <enum name="GL_TEXTURE_ENV_BIAS_SGIX"/>
+        </group>
+
+        <group name="TextureEnvParameter">
+            <enum name="GL_TEXTURE_ENV_COLOR"/>
+            <enum name="GL_TEXTURE_ENV_MODE"/>
+        </group>
+
+        <group name="TextureEnvTarget">
+            <enum name="GL_TEXTURE_ENV"/>
+        </group>
+
+        <group name="TextureFilterFuncSGIS">
+            <enum name="GL_FILTER4_SGIS"/>
+        </group>
+
+        <group name="TextureGenMode">
+            <enum name="GL_EYE_DISTANCE_TO_LINE_SGIS"/>
+            <enum name="GL_EYE_DISTANCE_TO_POINT_SGIS"/>
+            <enum name="GL_EYE_LINEAR"/>
+            <enum name="GL_OBJECT_DISTANCE_TO_LINE_SGIS"/>
+            <enum name="GL_OBJECT_DISTANCE_TO_POINT_SGIS"/>
+            <enum name="GL_OBJECT_LINEAR"/>
+            <enum name="GL_SPHERE_MAP"/>
+        </group>
+
+        <group name="TextureGenParameter">
+            <enum name="GL_EYE_LINE_SGIS"/>
+            <enum name="GL_EYE_PLANE"/>
+            <enum name="GL_EYE_POINT_SGIS"/>
+            <enum name="GL_OBJECT_LINE_SGIS"/>
+            <enum name="GL_OBJECT_PLANE"/>
+            <enum name="GL_OBJECT_POINT_SGIS"/>
+            <enum name="GL_TEXTURE_GEN_MODE"/>
+        </group>
+
+        <group name="TextureMagFilter">
+            <enum name="GL_FILTER4_SGIS"/>
+            <enum name="GL_LINEAR"/>
+            <enum name="GL_LINEAR_DETAIL_ALPHA_SGIS"/>
+            <enum name="GL_LINEAR_DETAIL_COLOR_SGIS"/>
+            <enum name="GL_LINEAR_DETAIL_SGIS"/>
+            <enum name="GL_LINEAR_SHARPEN_ALPHA_SGIS"/>
+            <enum name="GL_LINEAR_SHARPEN_COLOR_SGIS"/>
+            <enum name="GL_LINEAR_SHARPEN_SGIS"/>
+            <enum name="GL_NEAREST"/>
+            <enum name="GL_PIXEL_TEX_GEN_Q_CEILING_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_Q_FLOOR_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_Q_ROUND_SGIX"/>
+        </group>
+
+        <group name="TextureMinFilter">
+            <enum name="GL_FILTER4_SGIS"/>
+            <enum name="GL_LINEAR"/>
+            <enum name="GL_LINEAR_CLIPMAP_LINEAR_SGIX"/>
+            <enum name="GL_LINEAR_CLIPMAP_NEAREST_SGIX"/>
+            <enum name="GL_LINEAR_MIPMAP_LINEAR"/>
+            <enum name="GL_LINEAR_MIPMAP_NEAREST"/>
+            <enum name="GL_NEAREST"/>
+            <enum name="GL_NEAREST_CLIPMAP_LINEAR_SGIX"/>
+            <enum name="GL_NEAREST_CLIPMAP_NEAREST_SGIX"/>
+            <enum name="GL_NEAREST_MIPMAP_LINEAR"/>
+            <enum name="GL_NEAREST_MIPMAP_NEAREST"/>
+            <enum name="GL_PIXEL_TEX_GEN_Q_CEILING_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_Q_FLOOR_SGIX"/>
+            <enum name="GL_PIXEL_TEX_GEN_Q_ROUND_SGIX"/>
+        </group>
+
+        <group name="TextureParameterName">
+            <enum name="GL_DETAIL_TEXTURE_LEVEL_SGIS"/>
+            <enum name="GL_DETAIL_TEXTURE_MODE_SGIS"/>
+            <enum name="GL_DUAL_TEXTURE_SELECT_SGIS"/>
+            <enum name="GL_GENERATE_MIPMAP"/>
+            <enum name="GL_GENERATE_MIPMAP_SGIS"/>
+            <enum name="GL_POST_TEXTURE_FILTER_BIAS_SGIX"/>
+            <enum name="GL_POST_TEXTURE_FILTER_SCALE_SGIX"/>
+            <enum name="GL_QUAD_TEXTURE_SELECT_SGIS"/>
+            <enum name="GL_SHADOW_AMBIENT_SGIX"/>
+            <enum name="GL_TEXTURE_BORDER_COLOR"/>
+            <enum name="GL_TEXTURE_CLIPMAP_CENTER_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_DEPTH_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_FRAME_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_OFFSET_SGIX"/>
+            <enum name="GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX"/>
+            <enum name="GL_TEXTURE_COMPARE_SGIX"/>
+            <enum name="GL_TEXTURE_LOD_BIAS_R_SGIX"/>
+            <enum name="GL_TEXTURE_LOD_BIAS_S_SGIX"/>
+            <enum name="GL_TEXTURE_LOD_BIAS_T_SGIX"/>
+            <enum name="GL_TEXTURE_MAG_FILTER"/>
+            <enum name="GL_TEXTURE_MAX_CLAMP_R_SGIX"/>
+            <enum name="GL_TEXTURE_MAX_CLAMP_S_SGIX"/>
+            <enum name="GL_TEXTURE_MAX_CLAMP_T_SGIX"/>
+            <enum name="GL_TEXTURE_MIN_FILTER"/>
+            <enum name="GL_TEXTURE_PRIORITY"/>
+            <enum name="GL_TEXTURE_PRIORITY_EXT"/>
+            <enum name="GL_TEXTURE_WRAP_Q_SGIS"/>
+            <enum name="GL_TEXTURE_WRAP_R"/>
+            <enum name="GL_TEXTURE_WRAP_R_EXT"/>
+            <enum name="GL_TEXTURE_WRAP_R_OES"/>
+            <enum name="GL_TEXTURE_WRAP_S"/>
+            <enum name="GL_TEXTURE_WRAP_T"/>
+            <enum name="GL_TEXTURE_BASE_LEVEL"/>
+            <enum name="GL_TEXTURE_COMPARE_MODE"/>
+            <enum name="GL_TEXTURE_COMPARE_FUNC"/>
+            <enum name="GL_TEXTURE_LOD_BIAS"/>
+            <enum name="GL_TEXTURE_MIN_LOD"/>
+            <enum name="GL_TEXTURE_MAX_LOD"/>
+            <enum name="GL_TEXTURE_MAX_LEVEL"/>
+            <enum name="GL_TEXTURE_SWIZZLE_R"/>
+            <enum name="GL_TEXTURE_SWIZZLE_G"/>
+            <enum name="GL_TEXTURE_SWIZZLE_B"/>
+            <enum name="GL_TEXTURE_SWIZZLE_A"/>
+            <enum name="GL_TEXTURE_SWIZZLE_RGBA"/>
+            <enum name="GL_TEXTURE_TILING_EXT"/>
+            <enum name="GL_DEPTH_STENCIL_TEXTURE_MODE"/>
+            <enum name="GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS"/>
+            <enum name="GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS"/>
+            <enum name="GL_TEXTURE_4DSIZE_SGIS"/>
+            <enum name="GL_TEXTURE_ALPHA_SIZE"/>
+            <enum name="GL_TEXTURE_BASE_LEVEL_SGIS"/>
+            <enum name="GL_TEXTURE_BLUE_SIZE"/>
+            <enum name="GL_TEXTURE_BORDER"/>
+            <enum name="GL_TEXTURE_BORDER_COLOR_NV"/>
+            <enum name="GL_TEXTURE_COMPARE_OPERATOR_SGIX"/>
+            <enum name="GL_TEXTURE_COMPONENTS"/>
+            <enum name="GL_TEXTURE_DEPTH_EXT"/>
+            <enum name="GL_TEXTURE_FILTER4_SIZE_SGIS"/>
+            <enum name="GL_TEXTURE_GEQUAL_R_SGIX"/>
+            <enum name="GL_TEXTURE_GREEN_SIZE"/>
+            <enum name="GL_TEXTURE_HEIGHT"/>
+            <enum name="GL_TEXTURE_INTENSITY_SIZE"/>
+            <enum name="GL_TEXTURE_INTERNAL_FORMAT"/>
+            <enum name="GL_TEXTURE_LEQUAL_R_SGIX"/>
+            <enum name="GL_TEXTURE_LUMINANCE_SIZE"/>
+            <enum name="GL_TEXTURE_MAX_LEVEL_SGIS"/>
+            <enum name="GL_TEXTURE_MAX_LOD_SGIS"/>
+            <enum name="GL_TEXTURE_MIN_LOD_SGIS"/>
+            <enum name="GL_TEXTURE_RED_SIZE"/>
+            <enum name="GL_TEXTURE_RESIDENT"/>
+            <enum name="GL_TEXTURE_WIDTH"/>
+        </group>
+
+        <group name="TextureStorageMaskAMD">
+            <enum name="GL_TEXTURE_STORAGE_SPARSE_BIT_AMD"/>
+        </group>
+
+        <group name="TextureTarget">
+            <enum name="GL_DETAIL_TEXTURE_2D_SGIS"/>
+            <enum name="GL_PROXY_TEXTURE_1D"/>
+            <enum name="GL_PROXY_TEXTURE_1D_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_1D_ARRAY_EXT"/>
+            <enum name="GL_PROXY_TEXTURE_1D_EXT"/>
+            <enum name="GL_PROXY_TEXTURE_2D"/>
+            <enum name="GL_PROXY_TEXTURE_2D_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_2D_ARRAY_EXT"/>
+            <enum name="GL_PROXY_TEXTURE_2D_EXT"/>
+            <enum name="GL_PROXY_TEXTURE_2D_MULTISAMPLE"/>
+            <enum name="GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_3D"/>
+            <enum name="GL_PROXY_TEXTURE_3D_EXT"/>
+            <enum name="GL_PROXY_TEXTURE_4D_SGIS"/>
+            <enum name="GL_PROXY_TEXTURE_CUBE_MAP"/>
+            <enum name="GL_PROXY_TEXTURE_CUBE_MAP_ARB"/>
+            <enum name="GL_PROXY_TEXTURE_CUBE_MAP_EXT"/>
+            <enum name="GL_PROXY_TEXTURE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_CUBE_MAP_ARRAY_ARB"/>
+            <enum name="GL_PROXY_TEXTURE_RECTANGLE"/>
+            <enum name="GL_PROXY_TEXTURE_RECTANGLE_ARB"/>
+            <enum name="GL_PROXY_TEXTURE_RECTANGLE_NV"/>
+            <enum name="GL_TEXTURE_1D"/>
+            <enum name="GL_TEXTURE_2D"/>
+            <enum name="GL_TEXTURE_3D"/>
+            <enum name="GL_TEXTURE_3D_EXT"/>
+            <enum name="GL_TEXTURE_3D_OES"/>
+            <enum name="GL_TEXTURE_4D_SGIS"/>
+            <enum name="GL_TEXTURE_RECTANGLE"/>
+            <enum name="GL_TEXTURE_CUBE_MAP"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_X"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_ARRAY_ARB"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_ARRAY_EXT"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_ARRAY_OES"/>
+            <enum name="GL_TEXTURE_1D_ARRAY"/>
+            <enum name="GL_TEXTURE_2D_ARRAY"/>
+            <enum name="GL_TEXTURE_2D_MULTISAMPLE"/>
+            <enum name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+        </group>
+
+        <group name="TextureWrapMode">
+            <enum name="GL_CLAMP"/>
+            <enum name="GL_CLAMP_TO_BORDER"/>
+            <enum name="GL_CLAMP_TO_BORDER_ARB"/>
+            <enum name="GL_CLAMP_TO_BORDER_NV"/>
+            <enum name="GL_CLAMP_TO_BORDER_SGIS"/>
+            <enum name="GL_CLAMP_TO_EDGE"/>
+            <enum name="GL_CLAMP_TO_EDGE_SGIS"/>
+            <enum name="GL_REPEAT"/>
+        </group>
+
+        <group name="UseProgramStageMask">
+            <enum name="GL_VERTEX_SHADER_BIT"/>
+            <enum name="GL_VERTEX_SHADER_BIT_EXT"/>
+            <enum name="GL_FRAGMENT_SHADER_BIT"/>
+            <enum name="GL_FRAGMENT_SHADER_BIT_EXT"/>
+            <enum name="GL_GEOMETRY_SHADER_BIT"/>
+            <enum name="GL_GEOMETRY_SHADER_BIT_EXT"/>
+            <enum name="GL_GEOMETRY_SHADER_BIT_OES"/>
+            <enum name="GL_TESS_CONTROL_SHADER_BIT"/>
+            <enum name="GL_TESS_CONTROL_SHADER_BIT_EXT"/>
+            <enum name="GL_TESS_CONTROL_SHADER_BIT_OES"/>
+            <enum name="GL_TESS_EVALUATION_SHADER_BIT"/>
+            <enum name="GL_TESS_EVALUATION_SHADER_BIT_EXT"/>
+            <enum name="GL_TESS_EVALUATION_SHADER_BIT_OES"/>
+            <enum name="GL_COMPUTE_SHADER_BIT"/>
+            <enum name="GL_MESH_SHADER_BIT_NV"/>
+            <enum name="GL_TASK_SHADER_BIT_NV"/>
+            <enum name="GL_ALL_SHADER_BITS"/>
+            <enum name="GL_ALL_SHADER_BITS_EXT"/>
+        </group>
+
+        <group name="VertexPointerType">
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_SHORT"/>
+        </group>
+
+        <group name="FramebufferAttachment">
+            <enum name="GL_MAX_COLOR_ATTACHMENTS"/>
+            <enum name="GL_MAX_COLOR_ATTACHMENTS_EXT"/>
+            <enum name="GL_MAX_COLOR_ATTACHMENTS_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT0"/>
+            <enum name="GL_COLOR_ATTACHMENT0_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT0_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT0_OES"/>
+            <enum name="GL_COLOR_ATTACHMENT1"/>
+            <enum name="GL_COLOR_ATTACHMENT1_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT1_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT2"/>
+            <enum name="GL_COLOR_ATTACHMENT2_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT2_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT3"/>
+            <enum name="GL_COLOR_ATTACHMENT3_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT3_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT4"/>
+            <enum name="GL_COLOR_ATTACHMENT4_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT4_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT5"/>
+            <enum name="GL_COLOR_ATTACHMENT5_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT5_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT6"/>
+            <enum name="GL_COLOR_ATTACHMENT6_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT6_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT7"/>
+            <enum name="GL_COLOR_ATTACHMENT7_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT7_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT8"/>
+            <enum name="GL_COLOR_ATTACHMENT8_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT8_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT9"/>
+            <enum name="GL_COLOR_ATTACHMENT9_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT9_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT10"/>
+            <enum name="GL_COLOR_ATTACHMENT10_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT10_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT11"/>
+            <enum name="GL_COLOR_ATTACHMENT11_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT11_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT12"/>
+            <enum name="GL_COLOR_ATTACHMENT12_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT12_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT13"/>
+            <enum name="GL_COLOR_ATTACHMENT13_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT13_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT14"/>
+            <enum name="GL_COLOR_ATTACHMENT14_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT14_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT15"/>
+            <enum name="GL_COLOR_ATTACHMENT15_EXT"/>
+            <enum name="GL_COLOR_ATTACHMENT15_NV"/>
+            <enum name="GL_COLOR_ATTACHMENT16"/>
+            <enum name="GL_COLOR_ATTACHMENT17"/>
+            <enum name="GL_COLOR_ATTACHMENT18"/>
+            <enum name="GL_COLOR_ATTACHMENT19"/>
+            <enum name="GL_COLOR_ATTACHMENT20"/>
+            <enum name="GL_COLOR_ATTACHMENT21"/>
+            <enum name="GL_COLOR_ATTACHMENT22"/>
+            <enum name="GL_COLOR_ATTACHMENT23"/>
+            <enum name="GL_COLOR_ATTACHMENT24"/>
+            <enum name="GL_COLOR_ATTACHMENT25"/>
+            <enum name="GL_COLOR_ATTACHMENT26"/>
+            <enum name="GL_COLOR_ATTACHMENT27"/>
+            <enum name="GL_COLOR_ATTACHMENT28"/>
+            <enum name="GL_COLOR_ATTACHMENT29"/>
+            <enum name="GL_COLOR_ATTACHMENT30"/>
+            <enum name="GL_COLOR_ATTACHMENT31"/>
+            <enum name="GL_DEPTH_ATTACHMENT"/>
+            <enum name="GL_DEPTH_STENCIL_ATTACHMENT"/>
+            <enum name="GL_DEPTH_ATTACHMENT_EXT"/>
+            <enum name="GL_DEPTH_ATTACHMENT_OES"/>
+        </group>
+
+        <group name="RenderbufferTarget">
+            <enum name="GL_RENDERBUFFER" />
+        </group>
+
+        <group name="FramebufferTarget">
+            <enum name="GL_FRAMEBUFFER" />
+            <enum name="GL_DRAW_FRAMEBUFFER" />
+            <enum name="GL_READ_FRAMEBUFFER" />
+        </group>
+
+        <group name="TextureUnit">
+            <enum name="GL_TEXTURE0"/>
+            <enum name="GL_TEXTURE1"/>
+            <enum name="GL_TEXTURE2"/>
+            <enum name="GL_TEXTURE3"/>
+            <enum name="GL_TEXTURE4"/>
+            <enum name="GL_TEXTURE5"/>
+            <enum name="GL_TEXTURE6"/>
+            <enum name="GL_TEXTURE7"/>
+            <enum name="GL_TEXTURE8"/>
+            <enum name="GL_TEXTURE9"/>
+            <enum name="GL_TEXTURE10"/>
+            <enum name="GL_TEXTURE11"/>
+            <enum name="GL_TEXTURE12"/>
+            <enum name="GL_TEXTURE13"/>
+            <enum name="GL_TEXTURE14"/>
+            <enum name="GL_TEXTURE15"/>
+            <enum name="GL_TEXTURE16"/>
+            <enum name="GL_TEXTURE17"/>
+            <enum name="GL_TEXTURE18"/>
+            <enum name="GL_TEXTURE19"/>
+            <enum name="GL_TEXTURE20"/>
+            <enum name="GL_TEXTURE21"/>
+            <enum name="GL_TEXTURE22"/>
+            <enum name="GL_TEXTURE23"/>
+            <enum name="GL_TEXTURE24"/>
+            <enum name="GL_TEXTURE25"/>
+            <enum name="GL_TEXTURE26"/>
+            <enum name="GL_TEXTURE27"/>
+            <enum name="GL_TEXTURE28"/>
+            <enum name="GL_TEXTURE29"/>
+            <enum name="GL_TEXTURE30"/>
+            <enum name="GL_TEXTURE31"/>
+        </group>
+
+        <group name="ConditionalRenderMode">
+            <enum name="GL_QUERY_WAIT"/>
+            <enum name="GL_QUERY_NO_WAIT"/>
+            <enum name="GL_QUERY_BY_REGION_WAIT"/>
+            <enum name="GL_QUERY_BY_REGION_NO_WAIT"/>
+            <enum name="GL_QUERY_WAIT_INVERTED"/>
+            <enum name="GL_QUERY_NO_WAIT_INVERTED"/>
+            <enum name="GL_QUERY_BY_REGION_WAIT_INVERTED"/>
+            <enum name="GL_QUERY_BY_REGION_NO_WAIT_INVERTED"/>
+        </group>
+
+        <group name="FragmentOpATI">
+            <enum name="GL_MOV_ATI"/>
+            <enum name="GL_ADD_ATI"/>
+            <enum name="GL_MUL_ATI"/>
+            <enum name="GL_SUB_ATI"/>
+            <enum name="GL_DOT3_ATI"/>
+            <enum name="GL_DOT4_ATI"/>
+            <enum name="GL_MAD_ATI"/>
+            <enum name="GL_LERP_ATI"/>
+            <enum name="GL_CND_ATI"/>
+            <enum name="GL_CND0_ATI"/>
+            <enum name="GL_DOT2_ADD_ATI"/>
+        </group>
+
+        <group name="FramebufferStatus">
+            <enum name="GL_FRAMEBUFFER_COMPLETE"/>
+            <enum name="GL_FRAMEBUFFER_UNDEFINED"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER"/>
+            <enum name="GL_FRAMEBUFFER_UNSUPPORTED"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS"/>
+        </group>
+
+        <group name="GraphicsResetStatus">
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_GUILTY_CONTEXT_RESET"/>
+            <enum name="GL_INNOCENT_CONTEXT_RESET"/>
+            <enum name="GL_UNKNOWN_CONTEXT_RESET"/>
+        </group>
+
+        <group name="SyncStatus">
+            <enum name="GL_ALREADY_SIGNALED"/>
+            <enum name="GL_TIMEOUT_EXPIRED"/>
+            <enum name="GL_CONDITION_SATISFIED"/>
+            <enum name="GL_WAIT_FAILED"/>
+        </group>
+
+        <group name="QueryTarget">
+            <enum name="GL_SAMPLES_PASSED"/>
+            <enum name="GL_ANY_SAMPLES_PASSED"/>
+            <enum name="GL_ANY_SAMPLES_PASSED_CONSERVATIVE"/>
+            <enum name="GL_PRIMITIVES_GENERATED"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN"/>
+            <enum name="GL_TIME_ELAPSED"/>
+        </group>
+
+        <group name="QueryCounterTarget">
+            <enum name="GL_TIMESTAMP"/>
+        </group>
+
+        <group name="ConvolutionTarget">
+            <enum name="GL_CONVOLUTION_1D"/>
+            <enum name="GL_CONVOLUTION_2D"/>
+        </group>
+
+        <group name="PathFillMode">
+            <enum name="GL_INVERT"/>
+            <enum name="GL_COUNT_UP_NV"/>
+            <enum name="GL_COUNT_DOWN_NV"/>
+            <enum name="GL_PATH_FILL_MODE_NV"/>
+        </group>
+
+        <group name="ColorTableTarget">
+            <enum name="GL_COLOR_TABLE"/>
+            <enum name="GL_POST_CONVOLUTION_COLOR_TABLE"/>
+            <enum name="GL_POST_COLOR_MATRIX_COLOR_TABLE"/>
+        </group>
+
+        <group name="VertexBufferObjectParameter">
+            <enum name="GL_BUFFER_ACCESS"/>
+            <enum name="GL_BUFFER_ACCESS_FLAGS"/>
+            <enum name="GL_BUFFER_IMMUTABLE_STORAGE"/>
+            <enum name="GL_BUFFER_MAPPED"/>
+            <enum name="GL_BUFFER_MAP_LENGTH"/>
+            <enum name="GL_BUFFER_MAP_OFFSET"/>
+            <enum name="GL_BUFFER_SIZE"/>
+            <enum name="GL_BUFFER_STORAGE_FLAGS"/>
+            <enum name="GL_BUFFER_USAGE"/>
+        </group>
+
+        <group name="RenderbufferParameterName">
+            <enum name="GL_RENDERBUFFER_WIDTH"/>
+            <enum name="GL_RENDERBUFFER_HEIGHT"/>
+            <enum name="GL_RENDERBUFFER_INTERNAL_FORMAT"/>
+            <enum name="GL_RENDERBUFFER_SAMPLES"/>
+            <enum name="GL_RENDERBUFFER_RED_SIZE"/>
+            <enum name="GL_RENDERBUFFER_GREEN_SIZE"/>
+            <enum name="GL_RENDERBUFFER_BLUE_SIZE"/>
+            <enum name="GL_RENDERBUFFER_ALPHA_SIZE"/>
+            <enum name="GL_RENDERBUFFER_DEPTH_SIZE"/>
+            <enum name="GL_RENDERBUFFER_STENCIL_SIZE"/>
+        </group>
+
+        <group name="VertexBufferObjectUsage">
+            <enum name="GL_STREAM_DRAW"/>
+            <enum name="GL_STREAM_READ"/>
+            <enum name="GL_STREAM_COPY"/>
+            <enum name="GL_STATIC_DRAW"/>
+            <enum name="GL_STATIC_READ"/>
+            <enum name="GL_STATIC_COPY"/>
+            <enum name="GL_DYNAMIC_DRAW"/>
+            <enum name="GL_DYNAMIC_READ"/>
+            <enum name="GL_DYNAMIC_COPY"/>
+        </group>
+
+        <group name="FramebufferParameterName">
+            <enum name="GL_FRAMEBUFFER_DEFAULT_WIDTH"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_HEIGHT"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_LAYERS"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_SAMPLES"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS"/>
+        </group>
+
+        <group name="ProgramParameterPName">
+            <enum name="GL_PROGRAM_BINARY_RETRIEVABLE_HINT"/>
+            <enum name="GL_PROGRAM_SEPARABLE"/>
+        </group>
+
+        <group name="BlendingFactor">
+            <enum name="GL_ZERO"/>
+            <enum name="GL_ONE"/>
+            <enum name="GL_SRC_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC_COLOR"/>
+            <enum name="GL_DST_COLOR"/>
+            <enum name="GL_ONE_MINUS_DST_COLOR"/>
+            <enum name="GL_SRC_ALPHA"/>
+            <enum name="GL_ONE_MINUS_SRC_ALPHA"/>
+            <enum name="GL_DST_ALPHA"/>
+            <enum name="GL_ONE_MINUS_DST_ALPHA"/>
+            <enum name="GL_CONSTANT_COLOR"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+            <enum name="GL_CONSTANT_ALPHA"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+            <enum name="GL_SRC_ALPHA_SATURATE"/>
+            <enum name="GL_SRC1_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC1_COLOR"/>
+            <enum name="GL_SRC1_ALPHA"/>
+            <enum name="GL_ONE_MINUS_SRC1_ALPHA"/>
+        </group>
+
+        <group name="BindTransformFeedbackTarget">
+            <enum name="GL_TRANSFORM_FEEDBACK"/>
+        </group>
+
+        <group name="BlitFramebufferFilter">
+            <enum name="GL_NEAREST"/>
+            <enum name="GL_LINEAR"/>
+        </group>
+
+        <group name="BufferStorageTarget">
+            <enum name="GL_ARRAY_BUFFER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER"/>
+            <enum name="GL_COPY_READ_BUFFER"/>
+            <enum name="GL_COPY_WRITE_BUFFER"/>
+            <enum name="GL_DISPATCH_INDIRECT_BUFFER"/>
+            <enum name="GL_DRAW_INDIRECT_BUFFER"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER"/>
+            <enum name="GL_PIXEL_PACK_BUFFER"/>
+            <enum name="GL_PIXEL_UNPACK_BUFFER"/>
+            <enum name="GL_QUERY_BUFFER"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER"/>
+            <enum name="GL_TEXTURE_BUFFER"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+            <enum name="GL_UNIFORM_BUFFER"/>
+        </group>
+
+        <group name="CheckFramebufferStatusTarget">
+            <enum name="GL_DRAW_FRAMEBUFFER"/>
+            <enum name="GL_READ_FRAMEBUFFER"/>
+            <enum name="GL_FRAMEBUFFER"/>
+        </group>
+
+        <group name="Buffer">
+            <enum name="GL_COLOR"/>
+            <enum name="GL_DEPTH"/>
+            <enum name="GL_STENCIL"/>
+        </group>
+
+        <group name="ClipControlOrigin">
+            <enum name="GL_LOWER_LEFT"/>
+            <enum name="GL_UPPER_LEFT"/>
+        </group>
+
+        <group name="ClipControlDepth">
+            <enum name="GL_NEGATIVE_ONE_TO_ONE"/>
+            <enum name="GL_ZERO_TO_ONE"/>
+        </group>
+
+        <group name="CopyBufferSubDataTarget">
+            <enum name="GL_ARRAY_BUFFER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER"/>
+            <enum name="GL_COPY_READ_BUFFER"/>
+            <enum name="GL_COPY_WRITE_BUFFER"/>
+            <enum name="GL_DISPATCH_INDIRECT_BUFFER"/>
+            <enum name="GL_DRAW_INDIRECT_BUFFER"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER"/>
+            <enum name="GL_PIXEL_PACK_BUFFER"/>
+            <enum name="GL_PIXEL_UNPACK_BUFFER"/>
+            <enum name="GL_QUERY_BUFFER"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER"/>
+            <enum name="GL_TEXTURE_BUFFER"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+            <enum name="GL_UNIFORM_BUFFER"/>
+        </group>
+
+        <group name="ShaderType">
+            <enum name="GL_COMPUTE_SHADER"/>
+            <enum name="GL_VERTEX_SHADER"/>
+            <enum name="GL_TESS_CONTROL_SHADER"/>
+            <enum name="GL_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_GEOMETRY_SHADER"/>
+            <enum name="GL_FRAGMENT_SHADER"/>
+            <enum name="GL_FRAGMENT_SHADER_ARB"/>
+            <enum name="GL_VERTEX_SHADER_ARB"/>
+        </group>
+
+        <group name="DebugSource">
+            <enum name="GL_DEBUG_SOURCE_API"/>
+            <enum name="GL_DEBUG_SOURCE_WINDOW_SYSTEM"/>
+            <enum name="GL_DEBUG_SOURCE_SHADER_COMPILER"/>
+            <enum name="GL_DEBUG_SOURCE_THIRD_PARTY"/>
+            <enum name="GL_DEBUG_SOURCE_APPLICATION"/>
+            <enum name="GL_DEBUG_SOURCE_OTHER"/>
+            <enum name="GL_DONT_CARE"/>
+        </group>
+
+        <group name="DebugType">
+            <enum name="GL_DEBUG_TYPE_ERROR"/>
+            <enum name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR"/>
+            <enum name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR"/>
+            <enum name="GL_DEBUG_TYPE_PORTABILITY"/>
+            <enum name="GL_DEBUG_TYPE_PERFORMANCE"/>
+            <enum name="GL_DEBUG_TYPE_MARKER"/>
+            <enum name="GL_DEBUG_TYPE_PUSH_GROUP"/>
+            <enum name="GL_DEBUG_TYPE_POP_GROUP"/>
+            <enum name="GL_DEBUG_TYPE_OTHER"/>
+            <enum name="GL_DONT_CARE"/>
+        </group>
+
+        <group name="DebugSeverity">
+            <enum name="GL_DEBUG_SEVERITY_LOW"/>
+            <enum name="GL_DEBUG_SEVERITY_MEDIUM"/>
+            <enum name="GL_DEBUG_SEVERITY_HIGH"/>
+            <enum name="GL_DEBUG_SEVERITY_NOTIFICATION"/>
+            <enum name="GL_DONT_CARE"/>
+        </group>
+
+        <group name="SyncCondition">
+            <enum name="GL_SYNC_GPU_COMMANDS_COMPLETE"/>
+        </group>
+
+        <group name="FogPName">
+            <enum name="GL_FOG_MODE"/>
+            <enum name="GL_FOG_DENSITY"/>
+            <enum name="GL_FOG_START"/>
+            <enum name="GL_FOG_END"/>
+            <enum name="GL_FOG_INDEX"/>
+            <enum name="GL_FOG_COORD_SRC"/>
+        </group>
+
+        <group name="AtomicCounterBufferPName">
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_BINDING"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER"/>
+        </group>
+
+        <group name="UniformBlockPName">
+            <enum name="GL_UNIFORM_BLOCK_BINDING"/>
+            <enum name="GL_UNIFORM_BLOCK_DATA_SIZE"/>
+            <enum name="GL_UNIFORM_BLOCK_NAME_LENGTH"/>
+            <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS"/>
+            <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER"/>
+        </group>
+
+        <group name="UniformPName">
+            <enum name="GL_UNIFORM_TYPE"/>
+            <enum name="GL_UNIFORM_SIZE"/>
+            <enum name="GL_UNIFORM_NAME_LENGTH"/>
+            <enum name="GL_UNIFORM_BLOCK_INDEX"/>
+            <enum name="GL_UNIFORM_OFFSET"/>
+            <enum name="GL_UNIFORM_ARRAY_STRIDE"/>
+            <enum name="GL_UNIFORM_MATRIX_STRIDE"/>
+            <enum name="GL_UNIFORM_IS_ROW_MAJOR"/>
+            <enum name="GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX"/>
+        </group>
+
+        <group name="SamplerParameterName">
+            <enum name="GL_TEXTURE_WRAP_S"/>
+            <enum name="GL_TEXTURE_WRAP_T"/>
+            <enum name="GL_TEXTURE_WRAP_R"/>
+            <enum name="GL_TEXTURE_MIN_FILTER"/>
+            <enum name="GL_TEXTURE_MAG_FILTER"/>
+            <enum name="GL_TEXTURE_BORDER_COLOR"/>
+            <enum name="GL_TEXTURE_MIN_LOD"/>
+            <enum name="GL_TEXTURE_MAX_LOD"/>
+            <enum name="GL_TEXTURE_COMPARE_MODE"/>
+            <enum name="GL_TEXTURE_COMPARE_FUNC"/>
+        </group>
+
+        <group name="VertexProvokingMode">
+            <enum name="GL_FIRST_VERTEX_CONVENTION"/>
+            <enum name="GL_LAST_VERTEX_CONVENTION"/>
+        </group>
+
+        <group name="PatchParameterName">
+            <enum name="GL_PATCH_VERTICES"/>
+            <enum name="GL_PATCH_DEFAULT_OUTER_LEVEL"/>
+            <enum name="GL_PATCH_DEFAULT_INNER_LEVEL"/>
+        </group>
+
+        <group name="ObjectIdentifier">
+            <enum name="GL_BUFFER"/>
+            <enum name="GL_SHADER"/>
+            <enum name="GL_PROGRAM"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+            <enum name="GL_QUERY"/>
+            <enum name="GL_PROGRAM_PIPELINE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK"/>
+            <enum name="GL_SAMPLER"/>
+            <enum name="GL_TEXTURE"/>
+            <enum name="GL_RENDERBUFFER"/>
+            <enum name="GL_FRAMEBUFFER"/>
+        </group>
+
+        <group name="ColorBuffer">
+            <enum name="GL_NONE"/>
+            <enum name="GL_FRONT_LEFT"/>
+            <enum name="GL_FRONT_RIGHT"/>
+            <enum name="GL_BACK_LEFT"/>
+            <enum name="GL_BACK_RIGHT"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_BACK"/>
+            <enum name="GL_LEFT"/>
+            <enum name="GL_RIGHT"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+            <enum name="GL_COLOR_ATTACHMENT0"/>
+            <enum name="GL_COLOR_ATTACHMENT1"/>
+            <enum name="GL_COLOR_ATTACHMENT2"/>
+            <enum name="GL_COLOR_ATTACHMENT3"/>
+            <enum name="GL_COLOR_ATTACHMENT4"/>
+            <enum name="GL_COLOR_ATTACHMENT5"/>
+            <enum name="GL_COLOR_ATTACHMENT6"/>
+            <enum name="GL_COLOR_ATTACHMENT7"/>
+            <enum name="GL_COLOR_ATTACHMENT8"/>
+            <enum name="GL_COLOR_ATTACHMENT9"/>
+            <enum name="GL_COLOR_ATTACHMENT10"/>
+            <enum name="GL_COLOR_ATTACHMENT11"/>
+            <enum name="GL_COLOR_ATTACHMENT12"/>
+            <enum name="GL_COLOR_ATTACHMENT13"/>
+            <enum name="GL_COLOR_ATTACHMENT14"/>
+            <enum name="GL_COLOR_ATTACHMENT15"/>
+            <enum name="GL_COLOR_ATTACHMENT16"/>
+            <enum name="GL_COLOR_ATTACHMENT17"/>
+            <enum name="GL_COLOR_ATTACHMENT18"/>
+            <enum name="GL_COLOR_ATTACHMENT19"/>
+            <enum name="GL_COLOR_ATTACHMENT20"/>
+            <enum name="GL_COLOR_ATTACHMENT21"/>
+            <enum name="GL_COLOR_ATTACHMENT22"/>
+            <enum name="GL_COLOR_ATTACHMENT23"/>
+            <enum name="GL_COLOR_ATTACHMENT24"/>
+            <enum name="GL_COLOR_ATTACHMENT25"/>
+            <enum name="GL_COLOR_ATTACHMENT26"/>
+            <enum name="GL_COLOR_ATTACHMENT27"/>
+            <enum name="GL_COLOR_ATTACHMENT28"/>
+            <enum name="GL_COLOR_ATTACHMENT29"/>
+            <enum name="GL_COLOR_ATTACHMENT30"/>
+            <enum name="GL_COLOR_ATTACHMENT31"/>
+        </group>
+
+        <group name="MapQuery">
+            <enum name="GL_COEFF"/>
+            <enum name="GL_ORDER"/>
+            <enum name="GL_DOMAIN"/>
+        </group>
+
+        <group name="VertexArrayPName">
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_ENABLED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_STRIDE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_INTEGER"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_LONG"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR"/>
+            <enum name="GL_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+        </group>
+
+        <group name="TransformFeedbackPName">
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_START"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_PAUSED"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_ACTIVE"/>
+        </group>
+
+        <group name="SyncParameterName">
+            <enum name="GL_OBJECT_TYPE"/>
+            <enum name="GL_SYNC_STATUS"/>
+            <enum name="GL_SYNC_CONDITION"/>
+            <enum name="GL_SYNC_FLAGS"/>
+        </group>
+
+        <group name="ShaderParameterName">
+            <enum name="GL_SHADER_TYPE"/>
+            <enum name="GL_DELETE_STATUS"/>
+            <enum name="GL_COMPILE_STATUS"/>
+            <enum name="GL_INFO_LOG_LENGTH"/>
+            <enum name="GL_SHADER_SOURCE_LENGTH"/>
+        </group>
+
+        <group name="QueryObjectParameterName">
+            <enum name="GL_QUERY_RESULT_AVAILABLE"/>
+            <enum name="GL_QUERY_RESULT"/>
+            <enum name="GL_QUERY_RESULT_NO_WAIT"/>
+            <enum name="GL_QUERY_TARGET"/>
+        </group>
+
+        <group name="QueryParameterName">
+            <enum name="GL_CURRENT_QUERY"/>
+            <enum name="GL_QUERY_COUNTER_BITS"/>
+        </group>
+
+        <group name="ProgramStagePName">
+            <enum name="GL_ACTIVE_SUBROUTINE_UNIFORMS"/>
+            <enum name="GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS"/>
+            <enum name="GL_ACTIVE_SUBROUTINES"/>
+            <enum name="GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH"/>
+            <enum name="GL_ACTIVE_SUBROUTINE_MAX_LENGTH"/>
+        </group>
+
+        <group name="PipelineParameterName">
+            <enum name="GL_ACTIVE_PROGRAM"/>
+            <enum name="GL_VERTEX_SHADER"/>
+            <enum name="GL_TESS_CONTROL_SHADER"/>
+            <enum name="GL_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_GEOMETRY_SHADER"/>
+            <enum name="GL_FRAGMENT_SHADER"/>
+            <enum name="GL_INFO_LOG_LENGTH"/>
+        </group>
+
+        <group name="ProgramInterface">
+            <enum name="GL_UNIFORM"/>
+            <enum name="GL_UNIFORM_BLOCK"/>
+            <enum name="GL_PROGRAM_INPUT"/>
+            <enum name="GL_PROGRAM_OUTPUT"/>
+            <enum name="GL_VERTEX_SUBROUTINE"/>
+            <enum name="GL_TESS_CONTROL_SUBROUTINE"/>
+            <enum name="GL_TESS_EVALUATION_SUBROUTINE"/>
+            <enum name="GL_GEOMETRY_SUBROUTINE"/>
+            <enum name="GL_FRAGMENT_SUBROUTINE"/>
+            <enum name="GL_COMPUTE_SUBROUTINE"/>
+            <enum name="GL_VERTEX_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_TESS_CONTROL_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_TESS_EVALUATION_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_GEOMETRY_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_FRAGMENT_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_COMPUTE_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYING"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+            <enum name="GL_BUFFER_VARIABLE"/>
+            <enum name="GL_SHADER_STORAGE_BLOCK"/>
+        </group>
+
+        <group name="VertexAttribEnum">
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_ENABLED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_STRIDE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_INTEGER"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR"/>
+            <enum name="GL_CURRENT_VERTEX_ATTRIB"/>
+        </group>
+
+        <group name="VertexAttribType">
+            <enum name="GL_BYTE"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_FIXED"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_HALF_FLOAT"/>
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_INT_2_10_10_10_REV"/>
+            <enum name="GL_UNSIGNED_INT_2_10_10_10_REV"/>
+            <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV"/>
+        </group>
+
+        <group name="AttributeType">
+            <enum name="GL_FLOAT_VEC2"/>
+            <enum name="GL_FLOAT_VEC2_ARB"/>
+            <enum name="GL_FLOAT_VEC3"/>
+            <enum name="GL_FLOAT_VEC3_ARB"/>
+            <enum name="GL_FLOAT_VEC4"/>
+            <enum name="GL_FLOAT_VEC4_ARB"/>
+            <enum name="GL_INT_VEC2"/>
+            <enum name="GL_INT_VEC2_ARB"/>
+            <enum name="GL_INT_VEC3"/>
+            <enum name="GL_INT_VEC3_ARB"/>
+            <enum name="GL_INT_VEC4"/>
+            <enum name="GL_INT_VEC4_ARB"/>
+            <enum name="GL_BOOL"/>
+            <enum name="GL_BOOL_ARB"/>
+            <enum name="GL_BOOL_VEC2"/>
+            <enum name="GL_BOOL_VEC2_ARB"/>
+            <enum name="GL_BOOL_VEC3"/>
+            <enum name="GL_BOOL_VEC3_ARB"/>
+            <enum name="GL_BOOL_VEC4"/>
+            <enum name="GL_BOOL_VEC4_ARB"/>
+            <enum name="GL_FLOAT_MAT2"/>
+            <enum name="GL_FLOAT_MAT2_ARB"/>
+            <enum name="GL_FLOAT_MAT3"/>
+            <enum name="GL_FLOAT_MAT3_ARB"/>
+            <enum name="GL_FLOAT_MAT4"/>
+            <enum name="GL_FLOAT_MAT4_ARB"/>
+            <enum name="GL_SAMPLER_1D"/>
+            <enum name="GL_SAMPLER_1D_ARB"/>
+            <enum name="GL_SAMPLER_2D"/>
+            <enum name="GL_SAMPLER_2D_ARB"/>
+            <enum name="GL_SAMPLER_3D"/>
+            <enum name="GL_SAMPLER_3D_ARB"/>
+            <enum name="GL_SAMPLER_3D_OES"/>
+            <enum name="GL_SAMPLER_CUBE"/>
+            <enum name="GL_SAMPLER_CUBE_ARB"/>
+            <enum name="GL_SAMPLER_1D_SHADOW"/>
+            <enum name="GL_SAMPLER_1D_SHADOW_ARB"/>
+            <enum name="GL_SAMPLER_2D_SHADOW"/>
+            <enum name="GL_SAMPLER_2D_SHADOW_ARB"/>
+            <enum name="GL_SAMPLER_2D_SHADOW_EXT"/>
+            <enum name="GL_SAMPLER_2D_RECT"/>
+            <enum name="GL_SAMPLER_2D_RECT_ARB"/>
+            <enum name="GL_SAMPLER_2D_RECT_SHADOW"/>
+            <enum name="GL_SAMPLER_2D_RECT_SHADOW_ARB"/>
+            <enum name="GL_FLOAT_MAT2x3"/>
+            <enum name="GL_FLOAT_MAT2x3_NV"/>
+            <enum name="GL_FLOAT_MAT2x4"/>
+            <enum name="GL_FLOAT_MAT2x4_NV"/>
+            <enum name="GL_FLOAT_MAT3x2"/>
+            <enum name="GL_FLOAT_MAT3x2_NV"/>
+            <enum name="GL_FLOAT_MAT3x4"/>
+            <enum name="GL_FLOAT_MAT3x4_NV"/>
+            <enum name="GL_FLOAT_MAT4x2"/>
+            <enum name="GL_FLOAT_MAT4x2_NV"/>
+            <enum name="GL_FLOAT_MAT4x3"/>
+            <enum name="GL_FLOAT_MAT4x3_NV"/>
+        </group>
+
+        <group name="InternalFormatPName">
+            <enum name="GL_NUM_SAMPLE_COUNTS"/>
+            <enum name="GL_SAMPLES"/>
+            <enum name="GL_INTERNALFORMAT_SUPPORTED"/>
+            <enum name="GL_INTERNALFORMAT_PREFERRED"/>
+            <enum name="GL_INTERNALFORMAT_RED_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_GREEN_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_BLUE_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_ALPHA_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_DEPTH_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_STENCIL_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_SHARED_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_RED_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_GREEN_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_BLUE_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_ALPHA_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_DEPTH_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_STENCIL_TYPE"/>
+            <enum name="GL_MAX_WIDTH"/>
+            <enum name="GL_MAX_HEIGHT"/>
+            <enum name="GL_MAX_DEPTH"/>
+            <enum name="GL_MAX_LAYERS"/>
+            <enum name="GL_COLOR_COMPONENTS"/>
+            <enum name="GL_COLOR_RENDERABLE"/>
+            <enum name="GL_DEPTH_RENDERABLE"/>
+            <enum name="GL_STENCIL_RENDERABLE"/>
+            <enum name="GL_FRAMEBUFFER_RENDERABLE"/>
+            <enum name="GL_FRAMEBUFFER_RENDERABLE_LAYERED"/>
+            <enum name="GL_FRAMEBUFFER_BLEND"/>
+            <enum name="GL_READ_PIXELS"/>
+            <enum name="GL_READ_PIXELS_FORMAT"/>
+            <enum name="GL_READ_PIXELS_TYPE"/>
+            <enum name="GL_TEXTURE_IMAGE_FORMAT"/>
+            <enum name="GL_TEXTURE_IMAGE_TYPE"/>
+            <enum name="GL_GET_TEXTURE_IMAGE_FORMAT"/>
+            <enum name="GL_GET_TEXTURE_IMAGE_TYPE"/>
+            <enum name="GL_MIPMAP"/>
+            <enum name="GL_GENERATE_MIPMAP"/>
+            <enum name="GL_AUTO_GENERATE_MIPMAP"/>
+            <enum name="GL_COLOR_ENCODING"/>
+            <enum name="GL_SRGB_READ"/>
+            <enum name="GL_SRGB_WRITE"/>
+            <enum name="GL_FILTER"/>
+            <enum name="GL_VERTEX_TEXTURE"/>
+            <enum name="GL_TESS_CONTROL_TEXTURE"/>
+            <enum name="GL_TESS_EVALUATION_TEXTURE"/>
+            <enum name="GL_GEOMETRY_TEXTURE"/>
+            <enum name="GL_FRAGMENT_TEXTURE"/>
+            <enum name="GL_COMPUTE_TEXTURE"/>
+            <enum name="GL_TEXTURE_SHADOW"/>
+            <enum name="GL_TEXTURE_GATHER"/>
+            <enum name="GL_TEXTURE_GATHER_SHADOW"/>
+            <enum name="GL_SHADER_IMAGE_LOAD"/>
+            <enum name="GL_SHADER_IMAGE_STORE"/>
+            <enum name="GL_SHADER_IMAGE_ATOMIC"/>
+            <enum name="GL_IMAGE_TEXEL_SIZE"/>
+            <enum name="GL_IMAGE_COMPATIBILITY_CLASS"/>
+            <enum name="GL_IMAGE_PIXEL_FORMAT"/>
+            <enum name="GL_IMAGE_PIXEL_TYPE"/>
+            <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_TYPE"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE"/>
+            <enum name="GL_TEXTURE_COMPRESSED"/>
+            <enum name="GL_TEXTURE_COMPRESSED_BLOCK_WIDTH"/>
+            <enum name="GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT"/>
+            <enum name="GL_TEXTURE_COMPRESSED_BLOCK_SIZE"/>
+            <enum name="GL_CLEAR_BUFFER"/>
+            <enum name="GL_TEXTURE_VIEW"/>
+            <enum name="GL_VIEW_COMPATIBILITY_CLASS"/>
+            <enum name="GL_CLEAR_TEXTURE"/>
+        </group>
+
+        <group name="FramebufferAttachmentParameterName">
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER"/>
+        </group>
+
+        <group name="ProgramInterfacePName">
+            <enum name="GL_ACTIVE_RESOURCES"/>
+            <enum name="GL_MAX_NAME_LENGTH"/>
+            <enum name="GL_MAX_NUM_ACTIVE_VARIABLES"/>
+            <enum name="GL_MAX_NUM_COMPATIBLE_SUBROUTINES"/>
+        </group>
+
+        <group name="PrecisionType">
+            <enum name="GL_LOW_FLOAT"/>
+            <enum name="GL_MEDIUM_FLOAT"/>
+            <enum name="GL_HIGH_FLOAT"/>
+            <enum name="GL_LOW_INT"/>
+            <enum name="GL_MEDIUM_INT"/>
+            <enum name="GL_HIGH_INT"/>
+        </group>
+
+        <group name="VertexAttribPointerType">
+            <enum name="GL_BYTE"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_HALF_FLOAT"/>
+            <enum name="GL_FIXED"/>
+            <enum name="GL_INT_2_10_10_10_REV"/>
+            <enum name="GL_UNSIGNED_INT_2_10_10_10_REV"/>
+            <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV"/>
+        </group>
+
+        <group name="SubroutineParameterName">
+            <enum name="GL_NUM_COMPATIBLE_SUBROUTINES"/>
+            <enum name="GL_COMPATIBLE_SUBROUTINES"/>
+            <enum name="GL_UNIFORM_SIZE"/>
+            <enum name="GL_UNIFORM_NAME_LENGTH"/>
+        </group>
+
+        <group name="GetFramebufferParameter">
+            <enum name="GL_FRAMEBUFFER_DEFAULT_WIDTH"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_HEIGHT"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_LAYERS"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_SAMPLES"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS"/>
+            <enum name="GL_DOUBLEBUFFER"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_FORMAT"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_TYPE"/>
+            <enum name="GL_SAMPLES"/>
+            <enum name="GL_SAMPLE_BUFFERS"/>
+            <enum name="GL_STEREO"/>
+        </group>
+
+        <group name="PathStringFormat">
+            <enum name="GL_PATH_FORMAT_SVG_NV" />
+            <enum name="GL_PATH_FORMAT_PS_NV" />
+        </group>
+
+        <group name="PathFontTarget">
+            <enum name="GL_STANDARD_FONT_NAME_NV" />
+            <enum name="GL_SYSTEM_FONT_NAME_NV" />
+            <enum name="GL_FILE_NAME_NV" />
+        </group>
+
+        <group name="PathHandleMissingGlyphs">
+            <enum name="GL_SKIP_MISSING_GLYPH_NV" />
+            <enum name="GL_USE_MISSING_GLYPH_NV" />
+        </group>
+
+        <group name="PathParameter">
+            <enum name="GL_PATH_STROKE_WIDTH_NV" />
+            <enum name="GL_PATH_INITIAL_END_CAP_NV" />
+            <enum name="GL_PATH_TERMINAL_END_CAP_NV" />
+            <enum name="GL_PATH_JOIN_STYLE_NV" />
+            <enum name="GL_PATH_MITER_LIMIT_NV" />
+            <enum name="GL_PATH_INITIAL_DASH_CAP_NV" />
+            <enum name="GL_PATH_TERMINAL_DASH_CAP_NV" />
+            <enum name="GL_PATH_DASH_OFFSET_NV" />
+            <enum name="GL_PATH_CLIENT_LENGTH_NV" />
+            <enum name="GL_PATH_DASH_OFFSET_RESET_NV" />
+            <enum name="GL_PATH_FILL_MODE_NV" />
+            <enum name="GL_PATH_FILL_MASK_NV" />
+            <enum name="GL_PATH_FILL_COVER_MODE_NV" />
+            <enum name="GL_PATH_STROKE_COVER_MODE_NV" />
+            <enum name="GL_PATH_STROKE_MASK_NV" />
+            <!-- <enum name="GL_PATH_STROKE_BOUND_NV" comment="Removed from extension"/> -->
+            <enum name="GL_PATH_END_CAPS_NV" />
+            <enum name="GL_PATH_DASH_CAPS_NV" />
+            <enum name="GL_PATH_COMMAND_COUNT_NV" />
+            <enum name="GL_PATH_COORD_COUNT_NV" />
+            <enum name="GL_PATH_DASH_ARRAY_COUNT_NV" />
+            <enum name="GL_PATH_COMPUTED_LENGTH_NV" />
+            <enum name="GL_PATH_OBJECT_BOUNDING_BOX_NV" />
+            <enum name="GL_PATH_FILL_BOUNDING_BOX_NV" />
+            <enum name="GL_PATH_STROKE_BOUNDING_BOX_NV" />
+        </group>
+
+        <group name="PathColor">
+            <enum name="GL_PRIMARY_COLOR" />
+            <enum name="GL_PRIMARY_COLOR_NV" />
+            <enum name="GL_SECONDARY_COLOR_NV" />
+        </group>
+
+        <group name="PathGenMode">
+            <enum name="GL_NONE" />
+            <enum name="GL_EYE_LINEAR" />
+            <enum name="GL_OBJECT_LINEAR" />
+            <enum name="GL_PATH_OBJECT_BOUNDING_BOX_NV" />
+            <enum name="GL_CONSTANT" />
+        </group>
+
+        <group name="TextureLayout">
+            <enum name="GL_LAYOUT_GENERAL_EXT"/>
+            <enum name="GL_LAYOUT_COLOR_ATTACHMENT_EXT"/>
+            <enum name="GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT"/>
+            <enum name="GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT"/>
+            <enum name="GL_LAYOUT_SHADER_READ_ONLY_EXT"/>
+            <enum name="GL_LAYOUT_TRANSFER_SRC_EXT"/>
+            <enum name="GL_LAYOUT_TRANSFER_DST_EXT"/>
+            <enum name="GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT"/>
+            <enum name="GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT"/>
+        </group>
+
+        <group name="PathTransformType">
+            <enum name="GL_NONE" />
+            <enum name="GL_TRANSLATE_X_NV" />
+            <enum name="GL_TRANSLATE_Y_NV" />
+            <enum name="GL_TRANSLATE_2D_NV" />
+            <enum name="GL_TRANSLATE_3D_NV" />
+            <enum name="GL_AFFINE_2D_NV" />
+            <enum name="GL_AFFINE_3D_NV" />
+            <enum name="GL_TRANSPOSE_AFFINE_2D_NV" />
+            <enum name="GL_TRANSPOSE_AFFINE_3D_NV" />
+        </group>
+
+        <group name="PathElementType">
+            <enum name="GL_UTF8_NV" />
+            <enum name="GL_UTF16_NV" />
+        </group>
+
+        <group name="PathCoverMode">
+            <enum name="GL_CONVEX_HULL_NV" />
+            <enum name="GL_BOUNDING_BOX_NV" />
+            <enum name="GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV" />
+            <enum name="GL_PATH_FILL_COVER_MODE_NV" />
+        </group>
+
+        <group name="PathFontStyle">
+            <enum name="GL_NONE" />
+            <enum name="GL_BOLD_BIT_NV" />
+            <enum name="GL_ITALIC_BIT_NV" />
+        </group>
+
+        <group name="PathMetricMask">
+            <enum name="GL_GLYPH_WIDTH_BIT_NV" />
+            <enum name="GL_GLYPH_HEIGHT_BIT_NV" />
+            <enum name="GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV" />
+            <enum name="GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV" />
+            <enum name="GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV" />
+            <enum name="GL_GLYPH_VERTICAL_BEARING_X_BIT_NV" />
+            <enum name="GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV" />
+            <enum name="GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV" />
+            <enum name="GL_GLYPH_HAS_KERNING_BIT_NV" />
+            <enum name="GL_FONT_X_MIN_BOUNDS_BIT_NV" />
+            <enum name="GL_FONT_Y_MIN_BOUNDS_BIT_NV" />
+            <enum name="GL_FONT_X_MAX_BOUNDS_BIT_NV" />
+            <enum name="GL_FONT_Y_MAX_BOUNDS_BIT_NV" />
+            <enum name="GL_FONT_UNITS_PER_EM_BIT_NV" />
+            <enum name="GL_FONT_ASCENDER_BIT_NV" />
+            <enum name="GL_FONT_DESCENDER_BIT_NV" />
+            <enum name="GL_FONT_HEIGHT_BIT_NV" />
+            <enum name="GL_FONT_MAX_ADVANCE_WIDTH_BIT_NV" />
+            <enum name="GL_FONT_MAX_ADVANCE_HEIGHT_BIT_NV" />
+            <enum name="GL_FONT_UNDERLINE_POSITION_BIT_NV" />
+            <enum name="GL_FONT_UNDERLINE_THICKNESS_BIT_NV" />
+            <enum name="GL_FONT_HAS_KERNING_BIT_NV" />
+            <enum name="GL_FONT_NUM_GLYPH_INDICES_BIT_NV" />
+        </group>
+
+        <group name="PathListMode">
+            <enum name="GL_ACCUM_ADJACENT_PAIRS_NV" />
+            <enum name="GL_ADJACENT_PAIRS_NV" />
+            <enum name="GL_FIRST_TO_REST_NV" />
+        </group>
+
+        <group name="ProgramPropertyARB">
+            <enum name="GL_DELETE_STATUS" />
+            <enum name="GL_LINK_STATUS" />
+            <enum name="GL_VALIDATE_STATUS" />
+            <enum name="GL_INFO_LOG_LENGTH" />
+            <enum name="GL_ATTACHED_SHADERS" />
+            <enum name="GL_ACTIVE_ATOMIC_COUNTER_BUFFERS" />
+            <enum name="GL_ACTIVE_ATTRIBUTES" />
+            <enum name="GL_ACTIVE_ATTRIBUTE_MAX_LENGTH" />
+            <enum name="GL_ACTIVE_UNIFORMS" />
+            <enum name="GL_ACTIVE_UNIFORM_BLOCKS" />
+            <enum name="GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH" />
+            <enum name="GL_ACTIVE_UNIFORM_MAX_LENGTH" />
+            <enum name="GL_COMPUTE_WORK_GROUP_SIZE" />
+            <enum name="GL_PROGRAM_BINARY_LENGTH" />
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE" />
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYINGS" />
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH" />
+            <enum name="GL_GEOMETRY_VERTICES_OUT" />
+            <enum name="GL_GEOMETRY_INPUT_TYPE" />
+            <enum name="GL_GEOMETRY_OUTPUT_TYPE" />
+        </group>
+    </groups>
+
+    <!-- SECTION: GL enumerant (token) definitions. -->
+
+    <!-- Bitmasks each have their own namespace, although bits are
+         sometimes reused for other purposes -->
+
+    <enums namespace="GL" group="AttribMask" type="bitmask">
+        <enum value="0x00000001" name="GL_CURRENT_BIT"/>
+        <enum value="0x00000002" name="GL_POINT_BIT"/>
+        <enum value="0x00000004" name="GL_LINE_BIT"/>
+        <enum value="0x00000008" name="GL_POLYGON_BIT"/>
+        <enum value="0x00000010" name="GL_POLYGON_STIPPLE_BIT"/>
+        <enum value="0x00000020" name="GL_PIXEL_MODE_BIT"/>
+        <enum value="0x00000040" name="GL_LIGHTING_BIT"/>
+        <enum value="0x00000080" name="GL_FOG_BIT"/>
+        <enum value="0x00000100" name="GL_DEPTH_BUFFER_BIT"/>
+        <enum value="0x00000200" name="GL_ACCUM_BUFFER_BIT"/>
+        <enum value="0x00000400" name="GL_STENCIL_BUFFER_BIT"/>
+        <enum value="0x00000800" name="GL_VIEWPORT_BIT"/>
+        <enum value="0x00001000" name="GL_TRANSFORM_BIT"/>
+        <enum value="0x00002000" name="GL_ENABLE_BIT"/>
+        <enum value="0x00004000" name="GL_COLOR_BUFFER_BIT"/>
+        <enum value="0x00008000" name="GL_HINT_BIT"/>
+        <enum value="0x00010000" name="GL_EVAL_BIT"/>
+        <enum value="0x00020000" name="GL_LIST_BIT"/>
+        <enum value="0x00040000" name="GL_TEXTURE_BIT"/>
+        <enum value="0x00080000" name="GL_SCISSOR_BIT"/>
+        <enum value="0x20000000" name="GL_MULTISAMPLE_BIT"/>
+        <enum value="0x20000000" name="GL_MULTISAMPLE_BIT_ARB"/>
+        <enum value="0x20000000" name="GL_MULTISAMPLE_BIT_EXT"/>
+        <enum value="0x20000000" name="GL_MULTISAMPLE_BIT_3DFX"/>
+        <enum value="0xFFFFFFFF" name="GL_ALL_ATTRIB_BITS" comment="Guaranteed to mark all attribute groups at once"/>
+    </enums>
+
+    <enums namespace="GL" group="BufferStorageMask" type="bitmask" comment="GL_MAP_{COHERENT,PERSISTENT,READ,WRITE}_{BIT,BIT_EXT} also lie in this namespace">
+        <enum value="0x0100" name="GL_DYNAMIC_STORAGE_BIT"/>
+        <enum value="0x0100" name="GL_DYNAMIC_STORAGE_BIT_EXT"/>
+        <enum value="0x0200" name="GL_CLIENT_STORAGE_BIT"/>
+        <enum value="0x0200" name="GL_CLIENT_STORAGE_BIT_EXT"/>
+        <enum value="0x0400" name="GL_SPARSE_STORAGE_BIT_ARB"/>
+        <enum value="0x0800" name="GL_LGPU_SEPARATE_STORAGE_BIT_NVX"/>
+        <enum value="0x0800" name="GL_PER_GPU_STORAGE_BIT_NV"/>
+            <unused start="0x1000" end="0x1000" comment="Reserved for NVIDIA"/>
+        <enum value="0x2000" name="GL_EXTERNAL_STORAGE_BIT_NVX"/>
+            <!-- Also used: 0x000000ff for bits reused from MapBufferAccessMask below -->
+    </enums>
+
+    <enums namespace="GL" group="ClearBufferMask" type="bitmask" comment="GL_{DEPTH,ACCUM,STENCIL,COLOR}_BUFFER_BIT also lie in this namespace">
+        <enum value="0x00008000" name="GL_COVERAGE_BUFFER_BIT_NV" comment="Collides with AttribMask bit GL_HINT_BIT. OK since this token is for OpenGL ES 2, which doesn't have attribute groups."/>
+            <!-- Also used: 0x00004700 for bits reused from AttribMask above -->
+    </enums>
+
+    <enums namespace="GL" group="ClientAttribMask" type="bitmask">
+        <enum value="0x00000001" name="GL_CLIENT_PIXEL_STORE_BIT"/>
+        <enum value="0x00000002" name="GL_CLIENT_VERTEX_ARRAY_BIT"/>
+        <enum value="0xFFFFFFFF" name="GL_CLIENT_ALL_ATTRIB_BITS"/>
+    </enums>
+
+    <enums namespace="GL" group="ContextFlagMask" type="bitmask" comment="Should be shared with WGL/GLX, but aren't since the FORWARD_COMPATIBLE and DEBUG values are swapped vs. WGL/GLX.">
+        <enum value="0x00000001" name="GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT"/>
+        <enum value="0x00000002" name="GL_CONTEXT_FLAG_DEBUG_BIT"/>
+        <enum value="0x00000002" name="GL_CONTEXT_FLAG_DEBUG_BIT_KHR"/>
+        <enum value="0x00000004" name="GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT"/>
+        <enum value="0x00000004" name="GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB"/>
+        <enum value="0x00000008" name="GL_CONTEXT_FLAG_NO_ERROR_BIT"/>
+        <enum value="0x00000008" name="GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR" alias="GL_CONTEXT_FLAG_NO_ERROR_BIT"/>
+        <enum value="0x00000010" name="GL_CONTEXT_FLAG_PROTECTED_CONTENT_BIT_EXT"/>
+    </enums>
+
+    <enums namespace="GL" group="ContextProfileMask" type="bitmask">
+        <enum value="0x00000001" name="GL_CONTEXT_CORE_PROFILE_BIT"/>
+        <enum value="0x00000002" name="GL_CONTEXT_COMPATIBILITY_PROFILE_BIT"/>
+    </enums>
+
+    <enums namespace="GL" group="MapBufferAccessMask" type="bitmask">
+        <enum value="0x0001" name="GL_MAP_READ_BIT"/>
+        <enum value="0x0001" name="GL_MAP_READ_BIT_EXT"/>
+        <enum value="0x0002" name="GL_MAP_WRITE_BIT"/>
+        <enum value="0x0002" name="GL_MAP_WRITE_BIT_EXT"/>
+        <enum value="0x0004" name="GL_MAP_INVALIDATE_RANGE_BIT"/>
+        <enum value="0x0004" name="GL_MAP_INVALIDATE_RANGE_BIT_EXT"/>
+        <enum value="0x0008" name="GL_MAP_INVALIDATE_BUFFER_BIT"/>
+        <enum value="0x0008" name="GL_MAP_INVALIDATE_BUFFER_BIT_EXT"/>
+        <enum value="0x0010" name="GL_MAP_FLUSH_EXPLICIT_BIT"/>
+        <enum value="0x0010" name="GL_MAP_FLUSH_EXPLICIT_BIT_EXT"/>
+        <enum value="0x0020" name="GL_MAP_UNSYNCHRONIZED_BIT"/>
+        <enum value="0x0020" name="GL_MAP_UNSYNCHRONIZED_BIT_EXT"/>
+        <enum value="0x0040" name="GL_MAP_PERSISTENT_BIT"/>
+        <enum value="0x0040" name="GL_MAP_PERSISTENT_BIT_EXT"/>
+        <enum value="0x0080" name="GL_MAP_COHERENT_BIT"/>
+        <enum value="0x0080" name="GL_MAP_COHERENT_BIT_EXT"/>
+    </enums>
+
+    <enums namespace="GL" group="MemoryBarrierMask" type="bitmask">
+        <enum value="0x00000001" name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT"/>
+        <enum value="0x00000001" name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT_EXT"/>
+        <enum value="0x00000002" name="GL_ELEMENT_ARRAY_BARRIER_BIT"/>
+        <enum value="0x00000002" name="GL_ELEMENT_ARRAY_BARRIER_BIT_EXT"/>
+        <enum value="0x00000004" name="GL_UNIFORM_BARRIER_BIT"/>
+        <enum value="0x00000004" name="GL_UNIFORM_BARRIER_BIT_EXT"/>
+        <enum value="0x00000008" name="GL_TEXTURE_FETCH_BARRIER_BIT"/>
+        <enum value="0x00000008" name="GL_TEXTURE_FETCH_BARRIER_BIT_EXT"/>
+        <enum value="0x00000010" name="GL_SHADER_GLOBAL_ACCESS_BARRIER_BIT_NV"/>
+        <enum value="0x00000020" name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT"/>
+        <enum value="0x00000020" name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT_EXT"/>
+        <enum value="0x00000040" name="GL_COMMAND_BARRIER_BIT"/>
+        <enum value="0x00000040" name="GL_COMMAND_BARRIER_BIT_EXT"/>
+        <enum value="0x00000080" name="GL_PIXEL_BUFFER_BARRIER_BIT"/>
+        <enum value="0x00000080" name="GL_PIXEL_BUFFER_BARRIER_BIT_EXT"/>
+        <enum value="0x00000100" name="GL_TEXTURE_UPDATE_BARRIER_BIT"/>
+        <enum value="0x00000100" name="GL_TEXTURE_UPDATE_BARRIER_BIT_EXT"/>
+        <enum value="0x00000200" name="GL_BUFFER_UPDATE_BARRIER_BIT"/>
+        <enum value="0x00000200" name="GL_BUFFER_UPDATE_BARRIER_BIT_EXT"/>
+        <enum value="0x00000400" name="GL_FRAMEBUFFER_BARRIER_BIT"/>
+        <enum value="0x00000400" name="GL_FRAMEBUFFER_BARRIER_BIT_EXT"/>
+        <enum value="0x00000800" name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT"/>
+        <enum value="0x00000800" name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT_EXT"/>
+        <enum value="0x00001000" name="GL_ATOMIC_COUNTER_BARRIER_BIT"/>
+        <enum value="0x00001000" name="GL_ATOMIC_COUNTER_BARRIER_BIT_EXT"/>
+        <enum value="0x00002000" name="GL_SHADER_STORAGE_BARRIER_BIT"/>
+        <enum value="0x00004000" name="GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT"/>
+        <enum value="0x00004000" name="GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT"/>
+        <enum value="0x00008000" name="GL_QUERY_BUFFER_BARRIER_BIT"/>
+        <enum value="0xFFFFFFFF" name="GL_ALL_BARRIER_BITS"/>
+        <enum value="0xFFFFFFFF" name="GL_ALL_BARRIER_BITS_EXT"/>
+    </enums>
+
+    <enums namespace="GL" group="OcclusionQueryEventMaskAMD" type="bitmask">
+        <enum value="0x00000001" name="GL_QUERY_DEPTH_PASS_EVENT_BIT_AMD"/>
+        <enum value="0x00000002" name="GL_QUERY_DEPTH_FAIL_EVENT_BIT_AMD"/>
+        <enum value="0x00000004" name="GL_QUERY_STENCIL_FAIL_EVENT_BIT_AMD"/>
+        <enum value="0x00000008" name="GL_QUERY_DEPTH_BOUNDS_FAIL_EVENT_BIT_AMD"/>
+        <enum value="0xFFFFFFFF" name="GL_QUERY_ALL_EVENT_BITS_AMD"/>
+    </enums>
+
+    <enums namespace="GL" group="SyncObjectMask" type="bitmask">
+        <enum value="0x00000001" name="GL_SYNC_FLUSH_COMMANDS_BIT"/>
+        <enum value="0x00000001" name="GL_SYNC_FLUSH_COMMANDS_BIT_APPLE"/>
+    </enums>
+
+    <enums namespace="GL" group="UseProgramStageMask" type="bitmask">
+        <enum value="0x00000001" name="GL_VERTEX_SHADER_BIT"/>
+        <enum value="0x00000001" name="GL_VERTEX_SHADER_BIT_EXT"/>
+        <enum value="0x00000002" name="GL_FRAGMENT_SHADER_BIT"/>
+        <enum value="0x00000002" name="GL_FRAGMENT_SHADER_BIT_EXT"/>
+        <enum value="0x00000004" name="GL_GEOMETRY_SHADER_BIT"/>
+        <enum value="0x00000004" name="GL_GEOMETRY_SHADER_BIT_EXT"/>
+        <enum value="0x00000004" name="GL_GEOMETRY_SHADER_BIT_OES"/>
+        <enum value="0x00000008" name="GL_TESS_CONTROL_SHADER_BIT"/>
+        <enum value="0x00000008" name="GL_TESS_CONTROL_SHADER_BIT_EXT"/>
+        <enum value="0x00000008" name="GL_TESS_CONTROL_SHADER_BIT_OES"/>
+        <enum value="0x00000010" name="GL_TESS_EVALUATION_SHADER_BIT"/>
+        <enum value="0x00000010" name="GL_TESS_EVALUATION_SHADER_BIT_EXT"/>
+        <enum value="0x00000010" name="GL_TESS_EVALUATION_SHADER_BIT_OES"/>
+        <enum value="0x00000020" name="GL_COMPUTE_SHADER_BIT"/>
+        <enum value="0x00000040" name="GL_MESH_SHADER_BIT_NV"/>
+        <enum value="0x00000080" name="GL_TASK_SHADER_BIT_NV"/>
+        <enum value="0xFFFFFFFF" name="GL_ALL_SHADER_BITS"/>
+        <enum value="0xFFFFFFFF" name="GL_ALL_SHADER_BITS_EXT"/>
+    </enums>
+
+    <!-- Bitmasks defined by vendor extensions -->
+
+    <enums namespace="GL" group="TextureStorageMaskAMD" type="bitmask">
+        <enum value="0x00000001" name="GL_TEXTURE_STORAGE_SPARSE_BIT_AMD"/>
+    </enums>
+
+    <enums namespace="GL" group="FragmentShaderDestMaskATI" type="bitmask">
+        <enum value="0x00000001" name="GL_RED_BIT_ATI"/>
+        <enum value="0x00000002" name="GL_GREEN_BIT_ATI"/>
+        <enum value="0x00000004" name="GL_BLUE_BIT_ATI"/>
+    </enums>
+
+    <enums namespace="GL" group="FragmentShaderDestModMaskATI" type="bitmask">
+        <enum value="0x00000001" name="GL_2X_BIT_ATI"/>
+        <enum value="0x00000002" name="GL_4X_BIT_ATI"/>
+        <enum value="0x00000004" name="GL_8X_BIT_ATI"/>
+        <enum value="0x00000008" name="GL_HALF_BIT_ATI"/>
+        <enum value="0x00000010" name="GL_QUARTER_BIT_ATI"/>
+        <enum value="0x00000020" name="GL_EIGHTH_BIT_ATI"/>
+        <enum value="0x00000040" name="GL_SATURATE_BIT_ATI"/>
+    </enums>
+
+    <enums namespace="GL" group="FragmentShaderColorModMaskATI" type="bitmask">
+            <!-- Also used: 0x00000001 for GL_2X_BIT_ATI reused from FragmentShaderDestModMaskAT above -->
+        <enum value="0x00000002" name="GL_COMP_BIT_ATI"/>
+        <enum value="0x00000004" name="GL_NEGATE_BIT_ATI"/>
+        <enum value="0x00000008" name="GL_BIAS_BIT_ATI"/>
+    </enums>
+
+    <enums namespace="GL" group="TraceMaskMESA" type="bitmask">
+        <enum value="0x0001" name="GL_TRACE_OPERATIONS_BIT_MESA"/>
+        <enum value="0x0002" name="GL_TRACE_PRIMITIVES_BIT_MESA"/>
+        <enum value="0x0004" name="GL_TRACE_ARRAYS_BIT_MESA"/>
+        <enum value="0x0008" name="GL_TRACE_TEXTURES_BIT_MESA"/>
+        <enum value="0x0010" name="GL_TRACE_PIXELS_BIT_MESA"/>
+        <enum value="0x0020" name="GL_TRACE_ERRORS_BIT_MESA"/>
+        <enum value="0xFFFF" name="GL_TRACE_ALL_BITS_MESA"/>
+    </enums>
+
+    <enums namespace="GL" group="PathRenderingMaskNV" type="bitmask">
+        <enum value="0x01" name="GL_BOLD_BIT_NV"/>
+        <enum value="0x02" name="GL_ITALIC_BIT_NV"/>
+        <enum value="0x01" name="GL_GLYPH_WIDTH_BIT_NV"/>
+        <enum value="0x02" name="GL_GLYPH_HEIGHT_BIT_NV"/>
+        <enum value="0x04" name="GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV"/>
+        <enum value="0x08" name="GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV"/>
+        <enum value="0x10" name="GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV"/>
+        <enum value="0x20" name="GL_GLYPH_VERTICAL_BEARING_X_BIT_NV"/>
+        <enum value="0x40" name="GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV"/>
+        <enum value="0x80" name="GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV"/>
+        <enum value="0x100" name="GL_GLYPH_HAS_KERNING_BIT_NV"/>
+        <enum value="0x00010000" name="GL_FONT_X_MIN_BOUNDS_BIT_NV"/>
+        <enum value="0x00020000" name="GL_FONT_Y_MIN_BOUNDS_BIT_NV"/>
+        <enum value="0x00040000" name="GL_FONT_X_MAX_BOUNDS_BIT_NV"/>
+        <enum value="0x00080000" name="GL_FONT_Y_MAX_BOUNDS_BIT_NV"/>
+        <enum value="0x00100000" name="GL_FONT_UNITS_PER_EM_BIT_NV"/>
+        <enum value="0x00200000" name="GL_FONT_ASCENDER_BIT_NV"/>
+        <enum value="0x00400000" name="GL_FONT_DESCENDER_BIT_NV"/>
+        <enum value="0x00800000" name="GL_FONT_HEIGHT_BIT_NV"/>
+        <enum value="0x01000000" name="GL_FONT_MAX_ADVANCE_WIDTH_BIT_NV"/>
+        <enum value="0x02000000" name="GL_FONT_MAX_ADVANCE_HEIGHT_BIT_NV"/>
+        <enum value="0x04000000" name="GL_FONT_UNDERLINE_POSITION_BIT_NV"/>
+        <enum value="0x08000000" name="GL_FONT_UNDERLINE_THICKNESS_BIT_NV"/>
+        <enum value="0x10000000" name="GL_FONT_HAS_KERNING_BIT_NV"/>
+        <enum value="0x20000000" name="GL_FONT_NUM_GLYPH_INDICES_BIT_NV"/>
+    </enums>
+
+    <enums namespace="GL" group="PerformanceQueryCapsMaskINTEL" type="bitmask">
+        <enum value="0x00000000" name="GL_PERFQUERY_SINGLE_CONTEXT_INTEL"/>
+        <enum value="0x00000001" name="GL_PERFQUERY_GLOBAL_CONTEXT_INTEL"/>
+    </enums>
+
+    <enums namespace="GL" group="VertexHintsMaskPGI" type="bitmask">
+        <enum value="0x00000004" name="GL_VERTEX23_BIT_PGI"/>
+        <enum value="0x00000008" name="GL_VERTEX4_BIT_PGI"/>
+        <enum value="0x00010000" name="GL_COLOR3_BIT_PGI"/>
+        <enum value="0x00020000" name="GL_COLOR4_BIT_PGI"/>
+        <enum value="0x00040000" name="GL_EDGEFLAG_BIT_PGI"/>
+        <enum value="0x00080000" name="GL_INDEX_BIT_PGI"/>
+        <enum value="0x00100000" name="GL_MAT_AMBIENT_BIT_PGI"/>
+        <enum value="0x00200000" name="GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI"/>
+        <enum value="0x00400000" name="GL_MAT_DIFFUSE_BIT_PGI"/>
+        <enum value="0x00800000" name="GL_MAT_EMISSION_BIT_PGI"/>
+        <enum value="0x01000000" name="GL_MAT_COLOR_INDEXES_BIT_PGI"/>
+        <enum value="0x02000000" name="GL_MAT_SHININESS_BIT_PGI"/>
+        <enum value="0x04000000" name="GL_MAT_SPECULAR_BIT_PGI"/>
+        <enum value="0x08000000" name="GL_NORMAL_BIT_PGI"/>
+        <enum value="0x10000000" name="GL_TEXCOORD1_BIT_PGI"/>
+        <enum value="0x20000000" name="GL_TEXCOORD2_BIT_PGI"/>
+        <enum value="0x40000000" name="GL_TEXCOORD3_BIT_PGI"/>
+        <enum value="0x80000000" name="GL_TEXCOORD4_BIT_PGI"/>
+    </enums>
+
+    <enums namespace="GL" group="BufferBitQCOM" type="bitmask">
+        <enum value="0x00000001" name="GL_COLOR_BUFFER_BIT0_QCOM"/>
+        <enum value="0x00000002" name="GL_COLOR_BUFFER_BIT1_QCOM"/>
+        <enum value="0x00000004" name="GL_COLOR_BUFFER_BIT2_QCOM"/>
+        <enum value="0x00000008" name="GL_COLOR_BUFFER_BIT3_QCOM"/>
+        <enum value="0x00000010" name="GL_COLOR_BUFFER_BIT4_QCOM"/>
+        <enum value="0x00000020" name="GL_COLOR_BUFFER_BIT5_QCOM"/>
+        <enum value="0x00000040" name="GL_COLOR_BUFFER_BIT6_QCOM"/>
+        <enum value="0x00000080" name="GL_COLOR_BUFFER_BIT7_QCOM"/>
+        <enum value="0x00000100" name="GL_DEPTH_BUFFER_BIT0_QCOM"/>
+        <enum value="0x00000200" name="GL_DEPTH_BUFFER_BIT1_QCOM"/>
+        <enum value="0x00000400" name="GL_DEPTH_BUFFER_BIT2_QCOM"/>
+        <enum value="0x00000800" name="GL_DEPTH_BUFFER_BIT3_QCOM"/>
+        <enum value="0x00001000" name="GL_DEPTH_BUFFER_BIT4_QCOM"/>
+        <enum value="0x00002000" name="GL_DEPTH_BUFFER_BIT5_QCOM"/>
+        <enum value="0x00004000" name="GL_DEPTH_BUFFER_BIT6_QCOM"/>
+        <enum value="0x00008000" name="GL_DEPTH_BUFFER_BIT7_QCOM"/>
+        <enum value="0x00010000" name="GL_STENCIL_BUFFER_BIT0_QCOM"/>
+        <enum value="0x00020000" name="GL_STENCIL_BUFFER_BIT1_QCOM"/>
+        <enum value="0x00040000" name="GL_STENCIL_BUFFER_BIT2_QCOM"/>
+        <enum value="0x00080000" name="GL_STENCIL_BUFFER_BIT3_QCOM"/>
+        <enum value="0x00100000" name="GL_STENCIL_BUFFER_BIT4_QCOM"/>
+        <enum value="0x00200000" name="GL_STENCIL_BUFFER_BIT5_QCOM"/>
+        <enum value="0x00400000" name="GL_STENCIL_BUFFER_BIT6_QCOM"/>
+        <enum value="0x00800000" name="GL_STENCIL_BUFFER_BIT7_QCOM"/>
+        <enum value="0x01000000" name="GL_MULTISAMPLE_BUFFER_BIT0_QCOM"/>
+        <enum value="0x02000000" name="GL_MULTISAMPLE_BUFFER_BIT1_QCOM"/>
+        <enum value="0x04000000" name="GL_MULTISAMPLE_BUFFER_BIT2_QCOM"/>
+        <enum value="0x08000000" name="GL_MULTISAMPLE_BUFFER_BIT3_QCOM"/>
+        <enum value="0x10000000" name="GL_MULTISAMPLE_BUFFER_BIT4_QCOM"/>
+        <enum value="0x20000000" name="GL_MULTISAMPLE_BUFFER_BIT5_QCOM"/>
+        <enum value="0x40000000" name="GL_MULTISAMPLE_BUFFER_BIT6_QCOM"/>
+        <enum value="0x80000000" name="GL_MULTISAMPLE_BUFFER_BIT7_QCOM"/>
+    </enums>
+
+    <enums namespace="GL" group="FoveationConfigBitQCOM" type="bitmask">
+        <enum value="0x00000001" name="GL_FOVEATION_ENABLE_BIT_QCOM"/>
+        <enum value="0x00000002" name="GL_FOVEATION_SCALED_BIN_METHOD_BIT_QCOM"/>
+        <enum value="0x00000004" name="GL_FOVEATION_SUBSAMPLED_LAYOUT_METHOD_BIT_QCOM"/>
+    </enums>
+
+    <enums namespace="GL" group="FfdMaskSGIX" type="bitmask">
+        <enum value="0x00000001" name="GL_TEXTURE_DEFORMATION_BIT_SGIX"/>
+        <enum value="0x00000002" name="GL_GEOMETRY_DEFORMATION_BIT_SGIX"/>
+    </enums>
+
+
+    <!-- Non-bitmask enums with their own namespace. Generally small numbers
+         used for indexed access. -->
+
+    <enums namespace="GL" group="CommandOpcodesNV" vendor="NV" comment="For NV_command_list.">
+        <enum value="0x0000" name="GL_TERMINATE_SEQUENCE_COMMAND_NV"/>
+        <enum value="0x0001" name="GL_NOP_COMMAND_NV"/>
+        <enum value="0x0002" name="GL_DRAW_ELEMENTS_COMMAND_NV"/>
+        <enum value="0x0003" name="GL_DRAW_ARRAYS_COMMAND_NV"/>
+        <enum value="0x0004" name="GL_DRAW_ELEMENTS_STRIP_COMMAND_NV"/>
+        <enum value="0x0005" name="GL_DRAW_ARRAYS_STRIP_COMMAND_NV"/>
+        <enum value="0x0006" name="GL_DRAW_ELEMENTS_INSTANCED_COMMAND_NV"/>
+        <enum value="0x0007" name="GL_DRAW_ARRAYS_INSTANCED_COMMAND_NV"/>
+        <enum value="0x0008" name="GL_ELEMENT_ADDRESS_COMMAND_NV"/>
+        <enum value="0x0009" name="GL_ATTRIBUTE_ADDRESS_COMMAND_NV"/>
+        <enum value="0x000A" name="GL_UNIFORM_ADDRESS_COMMAND_NV"/>
+        <enum value="0x000B" name="GL_BLEND_COLOR_COMMAND_NV"/>
+        <enum value="0x000C" name="GL_STENCIL_REF_COMMAND_NV"/>
+        <enum value="0x000D" name="GL_LINE_WIDTH_COMMAND_NV"/>
+        <enum value="0x000E" name="GL_POLYGON_OFFSET_COMMAND_NV"/>
+        <enum value="0x000F" name="GL_ALPHA_REF_COMMAND_NV"/>
+        <enum value="0x0010" name="GL_VIEWPORT_COMMAND_NV"/>
+        <enum value="0x0011" name="GL_SCISSOR_COMMAND_NV"/>
+        <enum value="0x0012" name="GL_FRONT_FACE_COMMAND_NV"/>
+    </enums>
+
+    <enums namespace="GL" group="MapTextureFormatINTEL" vendor="INTEL" comment="Texture memory layouts for INTEL_map_texture">
+        <enum value="0" name="GL_LAYOUT_DEFAULT_INTEL"/>
+        <enum value="1" name="GL_LAYOUT_LINEAR_INTEL"/>
+        <enum value="2" name="GL_LAYOUT_LINEAR_CPU_CACHED_INTEL"/>
+    </enums>
+
+    <enums namespace="GL" group="PathRenderingTokenNV" vendor="NV">
+        <enum value="0x00" name="GL_CLOSE_PATH_NV"/>
+        <enum value="0x02" name="GL_MOVE_TO_NV"/>
+        <enum value="0x03" name="GL_RELATIVE_MOVE_TO_NV"/>
+        <enum value="0x04" name="GL_LINE_TO_NV"/>
+        <enum value="0x05" name="GL_RELATIVE_LINE_TO_NV"/>
+        <enum value="0x06" name="GL_HORIZONTAL_LINE_TO_NV"/>
+        <enum value="0x07" name="GL_RELATIVE_HORIZONTAL_LINE_TO_NV"/>
+        <enum value="0x08" name="GL_VERTICAL_LINE_TO_NV"/>
+        <enum value="0x09" name="GL_RELATIVE_VERTICAL_LINE_TO_NV"/>
+        <enum value="0x0A" name="GL_QUADRATIC_CURVE_TO_NV"/>
+        <enum value="0x0B" name="GL_RELATIVE_QUADRATIC_CURVE_TO_NV"/>
+        <enum value="0x0C" name="GL_CUBIC_CURVE_TO_NV"/>
+        <enum value="0x0D" name="GL_RELATIVE_CUBIC_CURVE_TO_NV"/>
+        <enum value="0x0E" name="GL_SMOOTH_QUADRATIC_CURVE_TO_NV"/>
+        <enum value="0x0F" name="GL_RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV"/>
+        <enum value="0x10" name="GL_SMOOTH_CUBIC_CURVE_TO_NV"/>
+        <enum value="0x11" name="GL_RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV"/>
+        <enum value="0x12" name="GL_SMALL_CCW_ARC_TO_NV"/>
+        <enum value="0x13" name="GL_RELATIVE_SMALL_CCW_ARC_TO_NV"/>
+        <enum value="0x14" name="GL_SMALL_CW_ARC_TO_NV"/>
+        <enum value="0x15" name="GL_RELATIVE_SMALL_CW_ARC_TO_NV"/>
+        <enum value="0x16" name="GL_LARGE_CCW_ARC_TO_NV"/>
+        <enum value="0x17" name="GL_RELATIVE_LARGE_CCW_ARC_TO_NV"/>
+        <enum value="0x18" name="GL_LARGE_CW_ARC_TO_NV"/>
+        <enum value="0x19" name="GL_RELATIVE_LARGE_CW_ARC_TO_NV"/>
+        <enum value="0x1A" name="GL_CONIC_CURVE_TO_NV"/>
+        <enum value="0x1B" name="GL_RELATIVE_CONIC_CURVE_TO_NV"/>
+            <unused start="0x1C" end="0xBF" comment="Unused for PathRenderingTokenNV"/>
+        <enum value="0xC0" name="GL_SHARED_EDGE_NV"/>
+            <unused start="0xC1" end="0xE7" comment="Unused for PathRenderingTokenNV"/>
+        <enum value="0xE8" name="GL_ROUNDED_RECT_NV"/>
+        <enum value="0xE9" name="GL_RELATIVE_ROUNDED_RECT_NV"/>
+        <enum value="0xEA" name="GL_ROUNDED_RECT2_NV"/>
+        <enum value="0xEB" name="GL_RELATIVE_ROUNDED_RECT2_NV"/>
+        <enum value="0xEC" name="GL_ROUNDED_RECT4_NV"/>
+        <enum value="0xED" name="GL_RELATIVE_ROUNDED_RECT4_NV"/>
+        <enum value="0xEE" name="GL_ROUNDED_RECT8_NV"/>
+        <enum value="0xEF" name="GL_RELATIVE_ROUNDED_RECT8_NV"/>
+        <enum value="0xF0" name="GL_RESTART_PATH_NV"/>
+        <enum value="0xF2" name="GL_DUP_FIRST_CUBIC_CURVE_TO_NV"/>
+        <enum value="0xF4" name="GL_DUP_LAST_CUBIC_CURVE_TO_NV"/>
+        <enum value="0xF6" name="GL_RECT_NV"/>
+        <enum value="0xF7" name="GL_RELATIVE_RECT_NV"/>
+        <enum value="0xF8" name="GL_CIRCULAR_CCW_ARC_TO_NV"/>
+        <enum value="0xFA" name="GL_CIRCULAR_CW_ARC_TO_NV"/>
+        <enum value="0xFC" name="GL_CIRCULAR_TANGENT_ARC_TO_NV"/>
+        <enum value="0xFE" name="GL_ARC_TO_NV"/>
+        <enum value="0xFF" name="GL_RELATIVE_ARC_TO_NV"/>
+    </enums>
+
+    <enums namespace="GL" group="TransformFeedbackTokenNV" vendor="NV" comment="For NV_transform_feedback. No clue why small negative values are used">
+        <enum value="-2" name="GL_NEXT_BUFFER_NV"/>
+        <enum value="-3" name="GL_SKIP_COMPONENTS4_NV"/>
+        <enum value="-4" name="GL_SKIP_COMPONENTS3_NV"/>
+        <enum value="-5" name="GL_SKIP_COMPONENTS2_NV"/>
+        <enum value="-6" name="GL_SKIP_COMPONENTS1_NV"/>
+    </enums>
+
+    <enums namespace="GL" group="TriangleListSUN" vendor="SUN">
+        <enum value="0x0001" name="GL_RESTART_SUN"/>
+        <enum value="0x0002" name="GL_REPLACE_MIDDLE_SUN"/>
+        <enum value="0x0003" name="GL_REPLACE_OLDEST_SUN"/>
+    </enums>
+
+    <!-- The default ("API") enum namespace starts here. While some
+         assigned values may overlap, and different parts of the
+         namespace are reserved for different purposes, it is a single
+         namespace. The "class" attribute indicates some of the reserved
+         purposes but is by no means complete (and cannot be, since many
+         tokens are reused for different purposes in different
+         extensions and API versions). -->
+
+    <enums namespace="GL" group="SpecialNumbers" vendor="ARB" comment="Tokens whose numeric value is intrinsically meaningful">
+        <enum value="0" name="GL_FALSE"/>
+        <enum value="0" name="GL_NO_ERROR"/>
+        <enum value="0" name="GL_ZERO"/>
+        <enum value="0" name="GL_NONE"/>
+        <enum value="0" name="GL_NONE_OES"/>
+        <enum value="1" name="GL_TRUE"/>
+        <enum value="1" name="GL_ONE"/>
+        <enum value="0xFFFFFFFF" name="GL_INVALID_INDEX" type="u" comment="Tagged as uint"/>
+        <enum value="0xFFFFFFFF" name="GL_ALL_PIXELS_AMD"/>
+        <enum value="0xFFFFFFFFFFFFFFFF" name="GL_TIMEOUT_IGNORED" type="ull" comment="Tagged as uint64"/>
+        <enum value="0xFFFFFFFFFFFFFFFF" name="GL_TIMEOUT_IGNORED_APPLE" type="ull" comment="Tagged as uint64"/>
+        <enum value="1" name="GL_VERSION_ES_CL_1_0" comment="Not an API enum. API definition macro for ES 1.0/1.1 headers"/>
+        <enum value="1" name="GL_VERSION_ES_CM_1_1" comment="Not an API enum. API definition macro for ES 1.0/1.1 headers"/>
+        <enum value="1" name="GL_VERSION_ES_CL_1_1" comment="Not an API enum. API definition macro for ES 1.0/1.1 headers"/>
+        <enum value="16" name="GL_UUID_SIZE_EXT"/>
+        <enum value="8" name="GL_LUID_SIZE_EXT"/>
+    </enums>
+
+    <enums namespace="GL" start="0x0000" end="0x7FFF" vendor="ARB" comment="Mostly OpenGL 1.0/1.1 enum assignments. Unused ranges should generally remain unused.">
+        <enum value="0x0000" name="GL_POINTS"/>
+        <enum value="0x0001" name="GL_LINES"/>
+        <enum value="0x0002" name="GL_LINE_LOOP"/>
+        <enum value="0x0003" name="GL_LINE_STRIP"/>
+        <enum value="0x0004" name="GL_TRIANGLES"/>
+        <enum value="0x0005" name="GL_TRIANGLE_STRIP"/>
+        <enum value="0x0006" name="GL_TRIANGLE_FAN"/>
+        <enum value="0x0007" name="GL_QUADS"/>
+        <enum value="0x0007" name="GL_QUADS_EXT"/>
+        <enum value="0x0007" name="GL_QUADS_OES"/>
+        <enum value="0x0008" name="GL_QUAD_STRIP"/>
+        <enum value="0x0009" name="GL_POLYGON"/>
+        <enum value="0x000A" name="GL_LINES_ADJACENCY"/>
+        <enum value="0x000A" name="GL_LINES_ADJACENCY_ARB"/>
+        <enum value="0x000A" name="GL_LINES_ADJACENCY_EXT"/>
+        <enum value="0x000A" name="GL_LINES_ADJACENCY_OES"/>
+        <enum value="0x000B" name="GL_LINE_STRIP_ADJACENCY"/>
+        <enum value="0x000B" name="GL_LINE_STRIP_ADJACENCY_ARB"/>
+        <enum value="0x000B" name="GL_LINE_STRIP_ADJACENCY_EXT"/>
+        <enum value="0x000B" name="GL_LINE_STRIP_ADJACENCY_OES"/>
+        <enum value="0x000C" name="GL_TRIANGLES_ADJACENCY"/>
+        <enum value="0x000C" name="GL_TRIANGLES_ADJACENCY_ARB"/>
+        <enum value="0x000C" name="GL_TRIANGLES_ADJACENCY_EXT"/>
+        <enum value="0x000C" name="GL_TRIANGLES_ADJACENCY_OES"/>
+        <enum value="0x000D" name="GL_TRIANGLE_STRIP_ADJACENCY"/>
+        <enum value="0x000D" name="GL_TRIANGLE_STRIP_ADJACENCY_ARB"/>
+        <enum value="0x000D" name="GL_TRIANGLE_STRIP_ADJACENCY_EXT"/>
+        <enum value="0x000D" name="GL_TRIANGLE_STRIP_ADJACENCY_OES"/>
+        <enum value="0x000E" name="GL_PATCHES"/>
+        <enum value="0x000E" name="GL_PATCHES_EXT"/>
+        <enum value="0x000E" name="GL_PATCHES_OES"/>
+            <unused start="0x000F" end="0x00FF" comment="Unused for PrimitiveType"/>
+        <enum value="0x0100" name="GL_ACCUM"/>
+        <enum value="0x0101" name="GL_LOAD"/>
+        <enum value="0x0102" name="GL_RETURN"/>
+        <enum value="0x0103" name="GL_MULT"/>
+        <enum value="0x0104" name="GL_ADD"/>
+            <unused start="0x0105" end="0x01FF" comment="Unused for AccumOp"/>
+        <enum value="0x0200" name="GL_NEVER"/>
+        <enum value="0x0201" name="GL_LESS"/>
+        <enum value="0x0202" name="GL_EQUAL"/>
+        <enum value="0x0203" name="GL_LEQUAL"/>
+        <enum value="0x0204" name="GL_GREATER"/>
+        <enum value="0x0205" name="GL_NOTEQUAL"/>
+        <enum value="0x0206" name="GL_GEQUAL"/>
+        <enum value="0x0207" name="GL_ALWAYS"/>
+            <unused start="0x0208" end="0x02FF" comment="Unused for AlphaFunction"/>
+        <enum value="0x0300" name="GL_SRC_COLOR"/>
+        <enum value="0x0301" name="GL_ONE_MINUS_SRC_COLOR"/>
+        <enum value="0x0302" name="GL_SRC_ALPHA"/>
+        <enum value="0x0303" name="GL_ONE_MINUS_SRC_ALPHA"/>
+        <enum value="0x0304" name="GL_DST_ALPHA"/>
+        <enum value="0x0305" name="GL_ONE_MINUS_DST_ALPHA"/>
+        <enum value="0x0306" name="GL_DST_COLOR"/>
+        <enum value="0x0307" name="GL_ONE_MINUS_DST_COLOR"/>
+        <enum value="0x0308" name="GL_SRC_ALPHA_SATURATE"/>
+        <enum value="0x0308" name="GL_SRC_ALPHA_SATURATE_EXT"/>
+            <unused start="0x0309" end="0x03FF" comment="Unused for BlendingFactor"/>
+        <enum value="0x0400" name="GL_FRONT_LEFT"/>
+        <enum value="0x0401" name="GL_FRONT_RIGHT"/>
+        <enum value="0x0402" name="GL_BACK_LEFT"/>
+        <enum value="0x0403" name="GL_BACK_RIGHT"/>
+        <enum value="0x0404" name="GL_FRONT"/>
+        <enum value="0x0405" name="GL_BACK"/>
+        <enum value="0x0406" name="GL_LEFT"/>
+        <enum value="0x0407" name="GL_RIGHT"/>
+        <enum value="0x0408" name="GL_FRONT_AND_BACK"/>
+        <enum value="0x0409" name="GL_AUX0"/>
+        <enum value="0x040A" name="GL_AUX1"/>
+        <enum value="0x040B" name="GL_AUX2"/>
+        <enum value="0x040C" name="GL_AUX3"/>
+            <unused start="0x040D" end="0x04FF" comment="Unused for DrawBufferMode"/>
+        <enum value="0x0500" name="GL_INVALID_ENUM"/>
+        <enum value="0x0501" name="GL_INVALID_VALUE"/>
+        <enum value="0x0502" name="GL_INVALID_OPERATION"/>
+        <enum value="0x0503" name="GL_STACK_OVERFLOW"/>
+        <enum value="0x0503" name="GL_STACK_OVERFLOW_KHR"/>
+        <enum value="0x0504" name="GL_STACK_UNDERFLOW"/>
+        <enum value="0x0504" name="GL_STACK_UNDERFLOW_KHR"/>
+        <enum value="0x0505" name="GL_OUT_OF_MEMORY"/>
+        <enum value="0x0506" name="GL_INVALID_FRAMEBUFFER_OPERATION"/>
+        <enum value="0x0506" name="GL_INVALID_FRAMEBUFFER_OPERATION_EXT"/>
+        <enum value="0x0506" name="GL_INVALID_FRAMEBUFFER_OPERATION_OES"/>
+        <enum value="0x0507" name="GL_CONTEXT_LOST"/>
+        <enum value="0x0507" name="GL_CONTEXT_LOST_KHR"/>
+            <unused start="0x0508" end="0x05FF" comment="Unused for ErrorCode"/>
+        <enum value="0x0600" name="GL_2D"/>
+        <enum value="0x0601" name="GL_3D"/>
+        <enum value="0x0602" name="GL_3D_COLOR"/>
+        <enum value="0x0603" name="GL_3D_COLOR_TEXTURE"/>
+        <enum value="0x0604" name="GL_4D_COLOR_TEXTURE"/>
+            <unused start="0x0605" end="0x06FF" comment="Unused for FeedbackType"/>
+        <enum value="0x0700" name="GL_PASS_THROUGH_TOKEN"/>
+        <enum value="0x0701" name="GL_POINT_TOKEN"/>
+        <enum value="0x0702" name="GL_LINE_TOKEN"/>
+        <enum value="0x0703" name="GL_POLYGON_TOKEN"/>
+        <enum value="0x0704" name="GL_BITMAP_TOKEN"/>
+        <enum value="0x0705" name="GL_DRAW_PIXEL_TOKEN"/>
+        <enum value="0x0706" name="GL_COPY_PIXEL_TOKEN"/>
+        <enum value="0x0707" name="GL_LINE_RESET_TOKEN"/>
+            <unused start="0x0708" end="0x07FF" comment="Unused for FeedbackToken"/>
+        <enum value="0x0800" name="GL_EXP"/>
+        <enum value="0x0801" name="GL_EXP2"/>
+            <unused start="0x0802" end="0x08FF" comment="Unused for FogMode"/>
+        <enum value="0x0900" name="GL_CW"/>
+        <enum value="0x0901" name="GL_CCW"/>
+            <unused start="0x0902" end="0x09FF" comment="Unused for FrontFaceDirection"/>
+        <enum value="0x0A00" name="GL_COEFF"/>
+        <enum value="0x0A01" name="GL_ORDER"/>
+        <enum value="0x0A02" name="GL_DOMAIN"/>
+            <unused start="0x0A03" end="0x0AFF" comment="Unused for GetMapQuery"/>
+        <enum value="0x0B00" name="GL_CURRENT_COLOR"/>
+        <enum value="0x0B01" name="GL_CURRENT_INDEX"/>
+        <enum value="0x0B02" name="GL_CURRENT_NORMAL"/>
+        <enum value="0x0B03" name="GL_CURRENT_TEXTURE_COORDS"/>
+        <enum value="0x0B04" name="GL_CURRENT_RASTER_COLOR"/>
+        <enum value="0x0B05" name="GL_CURRENT_RASTER_INDEX"/>
+        <enum value="0x0B06" name="GL_CURRENT_RASTER_TEXTURE_COORDS"/>
+        <enum value="0x0B07" name="GL_CURRENT_RASTER_POSITION"/>
+        <enum value="0x0B08" name="GL_CURRENT_RASTER_POSITION_VALID"/>
+        <enum value="0x0B09" name="GL_CURRENT_RASTER_DISTANCE"/>
+
+        <enum value="0x0B10" name="GL_POINT_SMOOTH"/>
+        <enum value="0x0B11" name="GL_POINT_SIZE"/>
+        <enum value="0x0B12" name="GL_POINT_SIZE_RANGE"/>
+        <enum value="0x0B12" name="GL_SMOOTH_POINT_SIZE_RANGE" alias="GL_POINT_SIZE_RANGE"/>
+        <enum value="0x0B13" name="GL_POINT_SIZE_GRANULARITY"/>
+        <enum value="0x0B13" name="GL_SMOOTH_POINT_SIZE_GRANULARITY" alias="GL_POINT_SIZE_GRANULARITY"/>
+
+        <enum value="0x0B20" name="GL_LINE_SMOOTH"/>
+        <enum value="0x0B21" name="GL_LINE_WIDTH"/>
+        <enum value="0x0B22" name="GL_LINE_WIDTH_RANGE"/>
+        <enum value="0x0B22" name="GL_SMOOTH_LINE_WIDTH_RANGE" alias="GL_LINE_WIDTH_RANGE"/>
+        <enum value="0x0B23" name="GL_LINE_WIDTH_GRANULARITY"/>
+        <enum value="0x0B23" name="GL_SMOOTH_LINE_WIDTH_GRANULARITY" alias="GL_LINE_WIDTH_GRANULARITY"/>
+        <enum value="0x0B24" name="GL_LINE_STIPPLE"/>
+        <enum value="0x0B25" name="GL_LINE_STIPPLE_PATTERN"/>
+        <enum value="0x0B26" name="GL_LINE_STIPPLE_REPEAT"/>
+
+        <enum value="0x0B30" name="GL_LIST_MODE"/>
+        <enum value="0x0B31" name="GL_MAX_LIST_NESTING"/>
+        <enum value="0x0B32" name="GL_LIST_BASE"/>
+        <enum value="0x0B33" name="GL_LIST_INDEX"/>
+
+        <enum value="0x0B40" name="GL_POLYGON_MODE"/>
+        <enum value="0x0B40" name="GL_POLYGON_MODE_NV"/>
+        <enum value="0x0B41" name="GL_POLYGON_SMOOTH"/>
+        <enum value="0x0B42" name="GL_POLYGON_STIPPLE"/>
+        <enum value="0x0B43" name="GL_EDGE_FLAG"/>
+        <enum value="0x0B44" name="GL_CULL_FACE"/>
+        <enum value="0x0B45" name="GL_CULL_FACE_MODE"/>
+        <enum value="0x0B46" name="GL_FRONT_FACE"/>
+
+        <enum value="0x0B50" name="GL_LIGHTING"/>
+        <enum value="0x0B51" name="GL_LIGHT_MODEL_LOCAL_VIEWER"/>
+        <enum value="0x0B52" name="GL_LIGHT_MODEL_TWO_SIDE"/>
+        <enum value="0x0B53" name="GL_LIGHT_MODEL_AMBIENT"/>
+        <enum value="0x0B54" name="GL_SHADE_MODEL"/>
+        <enum value="0x0B55" name="GL_COLOR_MATERIAL_FACE"/>
+        <enum value="0x0B56" name="GL_COLOR_MATERIAL_PARAMETER"/>
+        <enum value="0x0B57" name="GL_COLOR_MATERIAL"/>
+
+        <enum value="0x0B60" name="GL_FOG"/>
+        <enum value="0x0B61" name="GL_FOG_INDEX"/>
+        <enum value="0x0B62" name="GL_FOG_DENSITY"/>
+        <enum value="0x0B63" name="GL_FOG_START"/>
+        <enum value="0x0B64" name="GL_FOG_END"/>
+        <enum value="0x0B65" name="GL_FOG_MODE"/>
+        <enum value="0x0B66" name="GL_FOG_COLOR"/>
+
+        <enum value="0x0B70" name="GL_DEPTH_RANGE"/>
+        <enum value="0x0B71" name="GL_DEPTH_TEST"/>
+        <enum value="0x0B72" name="GL_DEPTH_WRITEMASK"/>
+        <enum value="0x0B73" name="GL_DEPTH_CLEAR_VALUE"/>
+        <enum value="0x0B74" name="GL_DEPTH_FUNC"/>
+
+        <enum value="0x0B80" name="GL_ACCUM_CLEAR_VALUE"/>
+
+        <enum value="0x0B90" name="GL_STENCIL_TEST"/>
+        <enum value="0x0B91" name="GL_STENCIL_CLEAR_VALUE"/>
+        <enum value="0x0B92" name="GL_STENCIL_FUNC"/>
+        <enum value="0x0B93" name="GL_STENCIL_VALUE_MASK"/>
+        <enum value="0x0B94" name="GL_STENCIL_FAIL"/>
+        <enum value="0x0B95" name="GL_STENCIL_PASS_DEPTH_FAIL"/>
+        <enum value="0x0B96" name="GL_STENCIL_PASS_DEPTH_PASS"/>
+        <enum value="0x0B97" name="GL_STENCIL_REF"/>
+        <enum value="0x0B98" name="GL_STENCIL_WRITEMASK"/>
+
+        <enum value="0x0BA0" name="GL_MATRIX_MODE"/>
+        <enum value="0x0BA1" name="GL_NORMALIZE"/>
+        <enum value="0x0BA2" name="GL_VIEWPORT"/>
+        <enum value="0x0BA3" name="GL_MODELVIEW_STACK_DEPTH"/>
+        <enum value="0x0BA3" name="GL_MODELVIEW0_STACK_DEPTH_EXT"/>
+        <enum value="0x0BA3" name="GL_PATH_MODELVIEW_STACK_DEPTH_NV"/>
+        <enum value="0x0BA4" name="GL_PROJECTION_STACK_DEPTH"/>
+        <enum value="0x0BA4" name="GL_PATH_PROJECTION_STACK_DEPTH_NV"/>
+        <enum value="0x0BA5" name="GL_TEXTURE_STACK_DEPTH"/>
+        <enum value="0x0BA6" name="GL_MODELVIEW_MATRIX"/>
+        <enum value="0x0BA6" name="GL_MODELVIEW0_MATRIX_EXT"/>
+        <enum value="0x0BA6" name="GL_PATH_MODELVIEW_MATRIX_NV"/>
+        <enum value="0x0BA7" name="GL_PROJECTION_MATRIX"/>
+        <enum value="0x0BA7" name="GL_PATH_PROJECTION_MATRIX_NV"/>
+        <enum value="0x0BA8" name="GL_TEXTURE_MATRIX"/>
+
+        <enum value="0x0BB0" name="GL_ATTRIB_STACK_DEPTH"/>
+        <enum value="0x0BB1" name="GL_CLIENT_ATTRIB_STACK_DEPTH"/>
+
+        <enum value="0x0BC0" name="GL_ALPHA_TEST"/>
+        <enum value="0x0BC0" name="GL_ALPHA_TEST_QCOM"/>
+        <enum value="0x0BC1" name="GL_ALPHA_TEST_FUNC"/>
+        <enum value="0x0BC1" name="GL_ALPHA_TEST_FUNC_QCOM"/>
+        <enum value="0x0BC2" name="GL_ALPHA_TEST_REF"/>
+        <enum value="0x0BC2" name="GL_ALPHA_TEST_REF_QCOM"/>
+
+        <enum value="0x0BD0" name="GL_DITHER"/>
+
+        <enum value="0x0BE0" name="GL_BLEND_DST"/>
+        <enum value="0x0BE1" name="GL_BLEND_SRC"/>
+        <enum value="0x0BE2" name="GL_BLEND"/>
+
+        <enum value="0x0BF0" name="GL_LOGIC_OP_MODE"/>
+        <enum value="0x0BF1" name="GL_INDEX_LOGIC_OP"/>
+        <enum value="0x0BF1" name="GL_LOGIC_OP"/>
+        <enum value="0x0BF2" name="GL_COLOR_LOGIC_OP"/>
+
+        <enum value="0x0C00" name="GL_AUX_BUFFERS"/>
+        <enum value="0x0C01" name="GL_DRAW_BUFFER"/>
+        <enum value="0x0C01" name="GL_DRAW_BUFFER_EXT"/>
+        <enum value="0x0C02" name="GL_READ_BUFFER"/>
+        <enum value="0x0C02" name="GL_READ_BUFFER_EXT"/>
+        <enum value="0x0C02" name="GL_READ_BUFFER_NV"/>
+
+        <enum value="0x0C10" name="GL_SCISSOR_BOX"/>
+        <enum value="0x0C11" name="GL_SCISSOR_TEST"/>
+
+        <enum value="0x0C20" name="GL_INDEX_CLEAR_VALUE"/>
+        <enum value="0x0C21" name="GL_INDEX_WRITEMASK"/>
+        <enum value="0x0C22" name="GL_COLOR_CLEAR_VALUE"/>
+        <enum value="0x0C23" name="GL_COLOR_WRITEMASK"/>
+
+        <enum value="0x0C30" name="GL_INDEX_MODE"/>
+        <enum value="0x0C31" name="GL_RGBA_MODE"/>
+        <enum value="0x0C32" name="GL_DOUBLEBUFFER"/>
+        <enum value="0x0C33" name="GL_STEREO"/>
+
+        <enum value="0x0C40" name="GL_RENDER_MODE"/>
+
+        <enum value="0x0C50" name="GL_PERSPECTIVE_CORRECTION_HINT"/>
+        <enum value="0x0C51" name="GL_POINT_SMOOTH_HINT"/>
+        <enum value="0x0C52" name="GL_LINE_SMOOTH_HINT"/>
+        <enum value="0x0C53" name="GL_POLYGON_SMOOTH_HINT"/>
+        <enum value="0x0C54" name="GL_FOG_HINT"/>
+
+        <enum value="0x0C60" name="GL_TEXTURE_GEN_S"/>
+        <enum value="0x0C61" name="GL_TEXTURE_GEN_T"/>
+        <enum value="0x0C62" name="GL_TEXTURE_GEN_R"/>
+        <enum value="0x0C63" name="GL_TEXTURE_GEN_Q"/>
+
+        <enum value="0x0C70" name="GL_PIXEL_MAP_I_TO_I"/>
+        <enum value="0x0C71" name="GL_PIXEL_MAP_S_TO_S"/>
+        <enum value="0x0C72" name="GL_PIXEL_MAP_I_TO_R"/>
+        <enum value="0x0C73" name="GL_PIXEL_MAP_I_TO_G"/>
+        <enum value="0x0C74" name="GL_PIXEL_MAP_I_TO_B"/>
+        <enum value="0x0C75" name="GL_PIXEL_MAP_I_TO_A"/>
+        <enum value="0x0C76" name="GL_PIXEL_MAP_R_TO_R"/>
+        <enum value="0x0C77" name="GL_PIXEL_MAP_G_TO_G"/>
+        <enum value="0x0C78" name="GL_PIXEL_MAP_B_TO_B"/>
+        <enum value="0x0C79" name="GL_PIXEL_MAP_A_TO_A"/>
+
+        <enum value="0x0CB0" name="GL_PIXEL_MAP_I_TO_I_SIZE"/>
+        <enum value="0x0CB1" name="GL_PIXEL_MAP_S_TO_S_SIZE"/>
+        <enum value="0x0CB2" name="GL_PIXEL_MAP_I_TO_R_SIZE"/>
+        <enum value="0x0CB3" name="GL_PIXEL_MAP_I_TO_G_SIZE"/>
+        <enum value="0x0CB4" name="GL_PIXEL_MAP_I_TO_B_SIZE"/>
+        <enum value="0x0CB5" name="GL_PIXEL_MAP_I_TO_A_SIZE"/>
+        <enum value="0x0CB6" name="GL_PIXEL_MAP_R_TO_R_SIZE"/>
+        <enum value="0x0CB7" name="GL_PIXEL_MAP_G_TO_G_SIZE"/>
+        <enum value="0x0CB8" name="GL_PIXEL_MAP_B_TO_B_SIZE"/>
+        <enum value="0x0CB9" name="GL_PIXEL_MAP_A_TO_A_SIZE"/>
+
+        <enum value="0x0CF0" name="GL_UNPACK_SWAP_BYTES"/>
+        <enum value="0x0CF1" name="GL_UNPACK_LSB_FIRST"/>
+        <enum value="0x0CF2" name="GL_UNPACK_ROW_LENGTH"/>
+        <enum value="0x0CF2" name="GL_UNPACK_ROW_LENGTH_EXT"/>
+        <enum value="0x0CF3" name="GL_UNPACK_SKIP_ROWS"/>
+        <enum value="0x0CF3" name="GL_UNPACK_SKIP_ROWS_EXT"/>
+        <enum value="0x0CF4" name="GL_UNPACK_SKIP_PIXELS"/>
+        <enum value="0x0CF4" name="GL_UNPACK_SKIP_PIXELS_EXT"/>
+        <enum value="0x0CF5" name="GL_UNPACK_ALIGNMENT"/>
+
+        <enum value="0x0D00" name="GL_PACK_SWAP_BYTES"/>
+        <enum value="0x0D01" name="GL_PACK_LSB_FIRST"/>
+        <enum value="0x0D02" name="GL_PACK_ROW_LENGTH"/>
+        <enum value="0x0D03" name="GL_PACK_SKIP_ROWS"/>
+        <enum value="0x0D04" name="GL_PACK_SKIP_PIXELS"/>
+        <enum value="0x0D05" name="GL_PACK_ALIGNMENT"/>
+
+        <enum value="0x0D10" name="GL_MAP_COLOR"/>
+        <enum value="0x0D11" name="GL_MAP_STENCIL"/>
+        <enum value="0x0D12" name="GL_INDEX_SHIFT"/>
+        <enum value="0x0D13" name="GL_INDEX_OFFSET"/>
+        <enum value="0x0D14" name="GL_RED_SCALE"/>
+        <enum value="0x0D15" name="GL_RED_BIAS"/>
+        <enum value="0x0D16" name="GL_ZOOM_X"/>
+        <enum value="0x0D17" name="GL_ZOOM_Y"/>
+        <enum value="0x0D18" name="GL_GREEN_SCALE"/>
+        <enum value="0x0D19" name="GL_GREEN_BIAS"/>
+        <enum value="0x0D1A" name="GL_BLUE_SCALE"/>
+        <enum value="0x0D1B" name="GL_BLUE_BIAS"/>
+        <enum value="0x0D1C" name="GL_ALPHA_SCALE"/>
+        <enum value="0x0D1D" name="GL_ALPHA_BIAS"/>
+        <enum value="0x0D1E" name="GL_DEPTH_SCALE"/>
+        <enum value="0x0D1F" name="GL_DEPTH_BIAS"/>
+
+        <enum value="0x0D30" name="GL_MAX_EVAL_ORDER"/>
+        <enum value="0x0D31" name="GL_MAX_LIGHTS"/>
+        <enum value="0x0D32" name="GL_MAX_CLIP_PLANES"/>
+        <enum value="0x0D32" name="GL_MAX_CLIP_PLANES_IMG"/>
+        <enum value="0x0D32" name="GL_MAX_CLIP_DISTANCES" alias="GL_MAX_CLIP_PLANES"/>
+        <enum value="0x0D32" name="GL_MAX_CLIP_DISTANCES_EXT" alias="GL_MAX_CLIP_PLANES"/>
+        <enum value="0x0D32" name="GL_MAX_CLIP_DISTANCES_APPLE"/>
+        <enum value="0x0D33" name="GL_MAX_TEXTURE_SIZE"/>
+        <enum value="0x0D34" name="GL_MAX_PIXEL_MAP_TABLE"/>
+        <enum value="0x0D35" name="GL_MAX_ATTRIB_STACK_DEPTH"/>
+        <enum value="0x0D36" name="GL_MAX_MODELVIEW_STACK_DEPTH"/>
+        <enum value="0x0D36" name="GL_PATH_MAX_MODELVIEW_STACK_DEPTH_NV"/>
+        <enum value="0x0D37" name="GL_MAX_NAME_STACK_DEPTH"/>
+        <enum value="0x0D38" name="GL_MAX_PROJECTION_STACK_DEPTH"/>
+        <enum value="0x0D38" name="GL_PATH_MAX_PROJECTION_STACK_DEPTH_NV"/>
+        <enum value="0x0D39" name="GL_MAX_TEXTURE_STACK_DEPTH"/>
+        <enum value="0x0D3A" name="GL_MAX_VIEWPORT_DIMS"/>
+        <enum value="0x0D3B" name="GL_MAX_CLIENT_ATTRIB_STACK_DEPTH"/>
+
+        <enum value="0x0D50" name="GL_SUBPIXEL_BITS"/>
+        <enum value="0x0D51" name="GL_INDEX_BITS"/>
+        <enum value="0x0D52" name="GL_RED_BITS"/>
+        <enum value="0x0D53" name="GL_GREEN_BITS"/>
+        <enum value="0x0D54" name="GL_BLUE_BITS"/>
+        <enum value="0x0D55" name="GL_ALPHA_BITS"/>
+        <enum value="0x0D56" name="GL_DEPTH_BITS"/>
+        <enum value="0x0D57" name="GL_STENCIL_BITS"/>
+        <enum value="0x0D58" name="GL_ACCUM_RED_BITS"/>
+        <enum value="0x0D59" name="GL_ACCUM_GREEN_BITS"/>
+        <enum value="0x0D5A" name="GL_ACCUM_BLUE_BITS"/>
+        <enum value="0x0D5B" name="GL_ACCUM_ALPHA_BITS"/>
+
+        <enum value="0x0D70" name="GL_NAME_STACK_DEPTH"/>
+
+        <enum value="0x0D80" name="GL_AUTO_NORMAL"/>
+
+        <enum value="0x0D90" name="GL_MAP1_COLOR_4"/>
+        <enum value="0x0D91" name="GL_MAP1_INDEX"/>
+        <enum value="0x0D92" name="GL_MAP1_NORMAL"/>
+        <enum value="0x0D93" name="GL_MAP1_TEXTURE_COORD_1"/>
+        <enum value="0x0D94" name="GL_MAP1_TEXTURE_COORD_2"/>
+        <enum value="0x0D95" name="GL_MAP1_TEXTURE_COORD_3"/>
+        <enum value="0x0D96" name="GL_MAP1_TEXTURE_COORD_4"/>
+        <enum value="0x0D97" name="GL_MAP1_VERTEX_3"/>
+        <enum value="0x0D98" name="GL_MAP1_VERTEX_4"/>
+
+        <enum value="0x0DB0" name="GL_MAP2_COLOR_4"/>
+        <enum value="0x0DB1" name="GL_MAP2_INDEX"/>
+        <enum value="0x0DB2" name="GL_MAP2_NORMAL"/>
+        <enum value="0x0DB3" name="GL_MAP2_TEXTURE_COORD_1"/>
+        <enum value="0x0DB4" name="GL_MAP2_TEXTURE_COORD_2"/>
+        <enum value="0x0DB5" name="GL_MAP2_TEXTURE_COORD_3"/>
+        <enum value="0x0DB6" name="GL_MAP2_TEXTURE_COORD_4"/>
+        <enum value="0x0DB7" name="GL_MAP2_VERTEX_3"/>
+        <enum value="0x0DB8" name="GL_MAP2_VERTEX_4"/>
+
+        <enum value="0x0DD0" name="GL_MAP1_GRID_DOMAIN"/>
+        <enum value="0x0DD1" name="GL_MAP1_GRID_SEGMENTS"/>
+        <enum value="0x0DD2" name="GL_MAP2_GRID_DOMAIN"/>
+        <enum value="0x0DD3" name="GL_MAP2_GRID_SEGMENTS"/>
+
+        <enum value="0x0DE0" name="GL_TEXTURE_1D"/>
+        <enum value="0x0DE1" name="GL_TEXTURE_2D"/>
+
+        <enum value="0x0DF0" name="GL_FEEDBACK_BUFFER_POINTER"/>
+        <enum value="0x0DF1" name="GL_FEEDBACK_BUFFER_SIZE"/>
+        <enum value="0x0DF2" name="GL_FEEDBACK_BUFFER_TYPE"/>
+        <enum value="0x0DF3" name="GL_SELECTION_BUFFER_POINTER"/>
+        <enum value="0x0DF4" name="GL_SELECTION_BUFFER_SIZE"/>
+            <unused start="0x0DF5" end="0xFFFF" comment="Unused for GetPName"/>
+        <enum value="0x1000" name="GL_TEXTURE_WIDTH"/>
+        <enum value="0x1001" name="GL_TEXTURE_HEIGHT"/>
+        <enum value="0x1003" name="GL_TEXTURE_INTERNAL_FORMAT"/>
+        <enum value="0x1003" name="GL_TEXTURE_COMPONENTS"/>
+        <enum value="0x1004" name="GL_TEXTURE_BORDER_COLOR"/>
+        <enum value="0x1004" name="GL_TEXTURE_BORDER_COLOR_EXT"/>
+        <enum value="0x1004" name="GL_TEXTURE_BORDER_COLOR_NV"/>
+        <enum value="0x1004" name="GL_TEXTURE_BORDER_COLOR_OES"/>
+        <enum value="0x1005" name="GL_TEXTURE_BORDER"/>
+        <enum value="0x1006" name="GL_TEXTURE_TARGET"/>
+            <unused start="0x1007" end="0x10FF" comment="Unused for GetTextureParameter"/>
+        <enum value="0x1100" name="GL_DONT_CARE"/>
+        <enum value="0x1101" name="GL_FASTEST"/>
+        <enum value="0x1102" name="GL_NICEST"/>
+            <unused start="0x1103" end="0x11FF" comment="Unused for HintMode"/>
+        <enum value="0x1200" name="GL_AMBIENT"/>
+        <enum value="0x1201" name="GL_DIFFUSE"/>
+        <enum value="0x1202" name="GL_SPECULAR"/>
+        <enum value="0x1203" name="GL_POSITION"/>
+        <enum value="0x1204" name="GL_SPOT_DIRECTION"/>
+        <enum value="0x1205" name="GL_SPOT_EXPONENT"/>
+        <enum value="0x1206" name="GL_SPOT_CUTOFF"/>
+        <enum value="0x1207" name="GL_CONSTANT_ATTENUATION"/>
+        <enum value="0x1208" name="GL_LINEAR_ATTENUATION"/>
+        <enum value="0x1209" name="GL_QUADRATIC_ATTENUATION"/>
+            <unused start="0x1210" end="0x12FF" comment="Unused for LightParameter"/>
+        <enum value="0x1300" name="GL_COMPILE"/>
+        <enum value="0x1301" name="GL_COMPILE_AND_EXECUTE"/>
+            <unused start="0x1302" end="0x13FF" comment="Unused for ListMode"/>
+        <enum value="0x1400" name="GL_BYTE"/>
+        <enum value="0x1401" name="GL_UNSIGNED_BYTE"/>
+        <enum value="0x1402" name="GL_SHORT"/>
+        <enum value="0x1403" name="GL_UNSIGNED_SHORT"/>
+        <enum value="0x1404" name="GL_INT"/>
+        <enum value="0x1405" name="GL_UNSIGNED_INT"/>
+        <enum value="0x1406" name="GL_FLOAT"/>
+        <enum value="0x1407" name="GL_2_BYTES"/>
+        <enum value="0x1407" name="GL_2_BYTES_NV"/>
+        <enum value="0x1408" name="GL_3_BYTES"/>
+        <enum value="0x1408" name="GL_3_BYTES_NV"/>
+        <enum value="0x1409" name="GL_4_BYTES"/>
+        <enum value="0x1409" name="GL_4_BYTES_NV"/>
+        <enum value="0x140A" name="GL_DOUBLE"/>
+        <enum value="0x140A" name="GL_DOUBLE_EXT"/>
+        <enum value="0x140B" name="GL_HALF_FLOAT"/>
+        <enum value="0x140B" name="GL_HALF_FLOAT_ARB"/>
+        <enum value="0x140B" name="GL_HALF_FLOAT_NV"/>
+        <enum value="0x140B" name="GL_HALF_APPLE"/>
+        <enum value="0x140C" name="GL_FIXED"/>
+        <enum value="0x140C" name="GL_FIXED_OES"/>
+            <unused start="0x140D" comment="Leave gap to preserve even/odd int/uint token values"/>
+        <enum value="0x140E" name="GL_INT64_ARB"/>
+        <enum value="0x140E" name="GL_INT64_NV"/>
+        <enum value="0x140F" name="GL_UNSIGNED_INT64_ARB"/>
+        <enum value="0x140F" name="GL_UNSIGNED_INT64_NV"/>
+            <unused start="0x1410" end="0x14FF" comment="Unused for DataType"/>
+        <enum value="0x1500" name="GL_CLEAR"/>
+        <enum value="0x1501" name="GL_AND"/>
+        <enum value="0x1502" name="GL_AND_REVERSE"/>
+        <enum value="0x1503" name="GL_COPY"/>
+        <enum value="0x1504" name="GL_AND_INVERTED"/>
+        <enum value="0x1505" name="GL_NOOP"/>
+        <enum value="0x1506" name="GL_XOR"/>
+        <enum value="0x1506" name="GL_XOR_NV"/>
+        <enum value="0x1507" name="GL_OR"/>
+        <enum value="0x1508" name="GL_NOR"/>
+        <enum value="0x1509" name="GL_EQUIV"/>
+        <enum value="0x150A" name="GL_INVERT"/>
+        <enum value="0x150B" name="GL_OR_REVERSE"/>
+        <enum value="0x150C" name="GL_COPY_INVERTED"/>
+        <enum value="0x150D" name="GL_OR_INVERTED"/>
+        <enum value="0x150E" name="GL_NAND"/>
+        <enum value="0x150F" name="GL_SET"/>
+            <unused start="0x1510" end="0x15FF" comment="Unused for LogicOp"/>
+        <enum value="0x1600" name="GL_EMISSION"/>
+        <enum value="0x1601" name="GL_SHININESS"/>
+        <enum value="0x1602" name="GL_AMBIENT_AND_DIFFUSE"/>
+        <enum value="0x1603" name="GL_COLOR_INDEXES"/>
+            <unused start="0x1604" end="0x16FF" comment="Unused for MaterialParameter"/>
+        <enum value="0x1700" name="GL_MODELVIEW"/>
+        <enum value="0x1700" name="GL_MODELVIEW0_ARB"/>
+        <enum value="0x1700" name="GL_MODELVIEW0_EXT"/>
+        <enum value="0x1700" name="GL_PATH_MODELVIEW_NV"/>
+        <enum value="0x1701" name="GL_PROJECTION"/>
+        <enum value="0x1701" name="GL_PATH_PROJECTION_NV"/>
+        <enum value="0x1702" name="GL_TEXTURE"/>
+            <unused start="0x1703" end="0x17FF" comment="Unused for MatrixMode"/>
+        <enum value="0x1800" name="GL_COLOR"/>
+        <enum value="0x1800" name="GL_COLOR_EXT"/>
+        <enum value="0x1801" name="GL_DEPTH"/>
+        <enum value="0x1801" name="GL_DEPTH_EXT"/>
+        <enum value="0x1802" name="GL_STENCIL"/>
+        <enum value="0x1802" name="GL_STENCIL_EXT"/>
+            <unused start="0x1803" end="0x18FF" comment="Unused for PixelCopyType"/>
+        <enum value="0x1900" name="GL_COLOR_INDEX"/>
+        <enum value="0x1901" name="GL_STENCIL_INDEX"/>
+        <enum value="0x1901" name="GL_STENCIL_INDEX_OES"/>
+        <enum value="0x1902" name="GL_DEPTH_COMPONENT"/>
+        <enum value="0x1903" name="GL_RED"/>
+        <enum value="0x1903" name="GL_RED_EXT"/>
+        <enum value="0x1903" name="GL_RED_NV"/>
+        <enum value="0x1904" name="GL_GREEN"/>
+        <enum value="0x1904" name="GL_GREEN_NV"/>
+        <enum value="0x1905" name="GL_BLUE"/>
+        <enum value="0x1905" name="GL_BLUE_NV"/>
+        <enum value="0x1906" name="GL_ALPHA"/>
+        <enum value="0x1907" name="GL_RGB"/>
+        <enum value="0x1908" name="GL_RGBA"/>
+        <enum value="0x1909" name="GL_LUMINANCE"/>
+        <enum value="0x190A" name="GL_LUMINANCE_ALPHA"/>
+            <unused start="0x1910" end="0x19FF" comment="Unused for PixelFormat"/>
+        <enum value="0x1A00" name="GL_BITMAP"/>
+            <unused start="0x1A01" end="0x1AFF" comment="Unused for PixelType"/>
+        <enum value="0x1B00" name="GL_POINT"/>
+        <enum value="0x1B00" name="GL_POINT_NV"/>
+        <enum value="0x1B01" name="GL_LINE"/>
+        <enum value="0x1B01" name="GL_LINE_NV"/>
+        <enum value="0x1B02" name="GL_FILL"/>
+        <enum value="0x1B02" name="GL_FILL_NV"/>
+            <unused start="0x1B03" end="0x1BFF" comment="Unused for PolygonMode"/>
+        <enum value="0x1C00" name="GL_RENDER"/>
+        <enum value="0x1C01" name="GL_FEEDBACK"/>
+        <enum value="0x1C02" name="GL_SELECT"/>
+            <unused start="0x1C03" end="0x1CFF" comment="Unused for RenderingMode"/>
+        <enum value="0x1D00" name="GL_FLAT"/>
+        <enum value="0x1D01" name="GL_SMOOTH"/>
+            <unused start="0x1D02" end="0x1DFF" comment="Unused for ShadingModel"/>
+        <enum value="0x1E00" name="GL_KEEP"/>
+        <enum value="0x1E01" name="GL_REPLACE"/>
+        <enum value="0x1E02" name="GL_INCR"/>
+        <enum value="0x1E03" name="GL_DECR"/>
+            <unused start="0x1E04" end="0x1EFF" comment="Unused for StencilOp"/>
+        <enum value="0x1F00" name="GL_VENDOR"/>
+        <enum value="0x1F01" name="GL_RENDERER"/>
+        <enum value="0x1F02" name="GL_VERSION"/>
+        <enum value="0x1F03" name="GL_EXTENSIONS"/>
+            <unused start="0x1F04" end="0x1FFF" comment="Unused for StringName"/>
+        <enum value="0x2000" name="GL_S"/>
+        <enum value="0x2001" name="GL_T"/>
+        <enum value="0x2002" name="GL_R"/>
+        <enum value="0x2003" name="GL_Q"/>
+            <unused start="0x2004" end="0x20FF" comment="Unused for TextureCoordName"/>
+        <enum value="0x2100" name="GL_MODULATE"/>
+        <enum value="0x2101" name="GL_DECAL"/>
+            <unused start="0x2102" end="0x21FF" comment="Unused for TextureEnvMode"/>
+        <enum value="0x2200" name="GL_TEXTURE_ENV_MODE"/>
+        <enum value="0x2201" name="GL_TEXTURE_ENV_COLOR"/>
+            <unused start="0x2202" end="0x22FF" comment="Unused for TextureEnvParameter"/>
+        <enum value="0x2300" name="GL_TEXTURE_ENV"/>
+            <unused start="0x2301" end="0x23FF" comment="Unused for TextureEnvTarget"/>
+        <enum value="0x2400" name="GL_EYE_LINEAR"/>
+        <enum value="0x2400" name="GL_EYE_LINEAR_NV"/>
+        <enum value="0x2401" name="GL_OBJECT_LINEAR"/>
+        <enum value="0x2401" name="GL_OBJECT_LINEAR_NV"/>
+        <enum value="0x2402" name="GL_SPHERE_MAP"/>
+            <unused start="0x2403" end="0x24FF" comment="Unused for TextureGenMode"/>
+        <enum value="0x2500" name="GL_TEXTURE_GEN_MODE"/>
+        <enum value="0x2500" name="GL_TEXTURE_GEN_MODE_OES"/>
+        <enum value="0x2501" name="GL_OBJECT_PLANE"/>
+        <enum value="0x2502" name="GL_EYE_PLANE"/>
+            <unused start="0x2503" end="0x25FF" comment="Unused for TextureGenParameter"/>
+        <enum value="0x2600" name="GL_NEAREST"/>
+        <enum value="0x2601" name="GL_LINEAR"/>
+            <unused start="0x2602" end="0x26FF" comment="Unused for TextureMagFilter"/>
+        <enum value="0x2700" name="GL_NEAREST_MIPMAP_NEAREST"/>
+        <enum value="0x2701" name="GL_LINEAR_MIPMAP_NEAREST"/>
+        <enum value="0x2702" name="GL_NEAREST_MIPMAP_LINEAR"/>
+        <enum value="0x2703" name="GL_LINEAR_MIPMAP_LINEAR"/>
+            <unused start="0x2704" end="0x27FF" comment="Unused for TextureMinFilter"/>
+        <enum value="0x2800" name="GL_TEXTURE_MAG_FILTER"/>
+        <enum value="0x2801" name="GL_TEXTURE_MIN_FILTER"/>
+        <enum value="0x2802" name="GL_TEXTURE_WRAP_S"/>
+        <enum value="0x2803" name="GL_TEXTURE_WRAP_T"/>
+            <unused start="0x2804" end="0x28FF" comment="Unused for TextureParameterName"/>
+        <enum value="0x2900" name="GL_CLAMP"/>
+        <enum value="0x2901" name="GL_REPEAT"/>
+            <unused start="0x2902" end="0x29FF" comment="Unused for TextureWrapMode"/>
+        <enum value="0x2A00" name="GL_POLYGON_OFFSET_UNITS"/>
+        <enum value="0x2A01" name="GL_POLYGON_OFFSET_POINT"/>
+        <enum value="0x2A01" name="GL_POLYGON_OFFSET_POINT_NV"/>
+        <enum value="0x2A02" name="GL_POLYGON_OFFSET_LINE"/>
+        <enum value="0x2A02" name="GL_POLYGON_OFFSET_LINE_NV"/>
+            <unused start="0x2A03" end="0x2A09" comment="Unused for PolygonOffset"/>
+        <enum value="0x2A10" name="GL_R3_G3_B2"/>
+            <unused start="0x2A11" end="0x2A1F" comment="Unused for InternalFormat"/>
+        <enum value="0x2A20" name="GL_V2F"/>
+        <enum value="0x2A21" name="GL_V3F"/>
+        <enum value="0x2A22" name="GL_C4UB_V2F"/>
+        <enum value="0x2A23" name="GL_C4UB_V3F"/>
+        <enum value="0x2A24" name="GL_C3F_V3F"/>
+        <enum value="0x2A25" name="GL_N3F_V3F"/>
+        <enum value="0x2A26" name="GL_C4F_N3F_V3F"/>
+        <enum value="0x2A27" name="GL_T2F_V3F"/>
+        <enum value="0x2A28" name="GL_T4F_V4F"/>
+        <enum value="0x2A29" name="GL_T2F_C4UB_V3F"/>
+        <enum value="0x2A2A" name="GL_T2F_C3F_V3F"/>
+        <enum value="0x2A2B" name="GL_T2F_N3F_V3F"/>
+        <enum value="0x2A2C" name="GL_T2F_C4F_N3F_V3F"/>
+        <enum value="0x2A2D" name="GL_T4F_C4F_N3F_V4F"/>
+            <unused start="0x2A2E" end="0x2FFF" comment="Unused for InterleavedArrayFormat"/>
+        <enum value="0x3000" name="GL_CLIP_PLANE0"/>
+        <enum value="0x3000" name="GL_CLIP_PLANE0_IMG"/>
+        <enum value="0x3000" name="GL_CLIP_DISTANCE0" alias="GL_CLIP_PLANE0"/>
+        <enum value="0x3000" name="GL_CLIP_DISTANCE0_EXT" alias="GL_CLIP_PLANE0"/>
+        <enum value="0x3000" name="GL_CLIP_DISTANCE0_APPLE"/>
+        <enum value="0x3001" name="GL_CLIP_PLANE1"/>
+        <enum value="0x3001" name="GL_CLIP_PLANE1_IMG"/>
+        <enum value="0x3001" name="GL_CLIP_DISTANCE1" alias="GL_CLIP_PLANE1"/>
+        <enum value="0x3001" name="GL_CLIP_DISTANCE1_EXT" alias="GL_CLIP_PLANE1"/>
+        <enum value="0x3001" name="GL_CLIP_DISTANCE1_APPLE"/>
+        <enum value="0x3002" name="GL_CLIP_PLANE2"/>
+        <enum value="0x3002" name="GL_CLIP_PLANE2_IMG"/>
+        <enum value="0x3002" name="GL_CLIP_DISTANCE2" alias="GL_CLIP_PLANE2"/>
+        <enum value="0x3002" name="GL_CLIP_DISTANCE2_EXT" alias="GL_CLIP_PLANE2"/>
+        <enum value="0x3002" name="GL_CLIP_DISTANCE2_APPLE"/>
+        <enum value="0x3003" name="GL_CLIP_PLANE3"/>
+        <enum value="0x3003" name="GL_CLIP_PLANE3_IMG"/>
+        <enum value="0x3003" name="GL_CLIP_DISTANCE3" alias="GL_CLIP_PLANE3"/>
+        <enum value="0x3003" name="GL_CLIP_DISTANCE3_EXT" alias="GL_CLIP_PLANE3"/>
+        <enum value="0x3003" name="GL_CLIP_DISTANCE3_APPLE"/>
+        <enum value="0x3004" name="GL_CLIP_PLANE4"/>
+        <enum value="0x3004" name="GL_CLIP_PLANE4_IMG"/>
+        <enum value="0x3004" name="GL_CLIP_DISTANCE4" alias="GL_CLIP_PLANE4"/>
+        <enum value="0x3004" name="GL_CLIP_DISTANCE4_EXT" alias="GL_CLIP_PLANE4"/>
+        <enum value="0x3004" name="GL_CLIP_DISTANCE4_APPLE"/>
+        <enum value="0x3005" name="GL_CLIP_PLANE5"/>
+        <enum value="0x3005" name="GL_CLIP_PLANE5_IMG"/>
+        <enum value="0x3005" name="GL_CLIP_DISTANCE5" alias="GL_CLIP_PLANE5"/>
+        <enum value="0x3005" name="GL_CLIP_DISTANCE5_EXT" alias="GL_CLIP_PLANE5"/>
+        <enum value="0x3005" name="GL_CLIP_DISTANCE5_APPLE"/>
+        <enum value="0x3006" name="GL_CLIP_DISTANCE6"/>
+        <enum value="0x3006" name="GL_CLIP_DISTANCE6_EXT" alias="GL_CLIP_DISTANCE6"/>
+        <enum value="0x3006" name="GL_CLIP_DISTANCE6_APPLE"/>
+        <enum value="0x3007" name="GL_CLIP_DISTANCE7"/>
+        <enum value="0x3007" name="GL_CLIP_DISTANCE7_EXT" alias="GL_CLIP_DISTANCE7"/>
+        <enum value="0x3007" name="GL_CLIP_DISTANCE7_APPLE"/>
+            <unused start="0x3008" end="0x3FFF" comment="Unused for ClipPlaneName"/>
+        <enum value="0x4000" name="GL_LIGHT0"/>
+        <enum value="0x4001" name="GL_LIGHT1"/>
+        <enum value="0x4002" name="GL_LIGHT2"/>
+        <enum value="0x4003" name="GL_LIGHT3"/>
+        <enum value="0x4004" name="GL_LIGHT4"/>
+        <enum value="0x4005" name="GL_LIGHT5"/>
+        <enum value="0x4006" name="GL_LIGHT6"/>
+        <enum value="0x4007" name="GL_LIGHT7"/>
+            <unused start="0x4008" end="0x4FFF" comment="Unused for LightName"/>
+            <unused start="0x5000" end="0x5FFF" comment="Unused. Do not use."/>
+            <unused start="0x6000" end="0x6FFF" comment="Experimental (internal/test only) range. DO NOT SHIP VALUES IN THIS RANGE."/>
+            <unused start="0x7000" end="0x7FFF" comment="Unused. Do not use."/>
+    </enums>
+
+    <enums namespace="GL" start="0x8000" end="0x80BF" vendor="ARB" comment="The primary GL enumerant space begins here. All modern enum allocations are in this range. These enums are mostly assigned the default class since it's a great deal of not very useful work to be more specific">
+        <enum value="0x8000" name="GL_ABGR_EXT"/>
+        <enum value="0x8001" name="GL_CONSTANT_COLOR"/>
+        <enum value="0x8001" name="GL_CONSTANT_COLOR_EXT"/>
+        <enum value="0x8002" name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+        <enum value="0x8002" name="GL_ONE_MINUS_CONSTANT_COLOR_EXT"/>
+        <enum value="0x8003" name="GL_CONSTANT_ALPHA"/>
+        <enum value="0x8003" name="GL_CONSTANT_ALPHA_EXT"/>
+        <enum value="0x8004" name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+        <enum value="0x8004" name="GL_ONE_MINUS_CONSTANT_ALPHA_EXT"/>
+        <enum value="0x8005" name="GL_BLEND_COLOR"/>
+        <enum value="0x8005" name="GL_BLEND_COLOR_EXT"/>
+        <enum value="0x8006" name="GL_FUNC_ADD"/>
+        <enum value="0x8006" name="GL_FUNC_ADD_EXT"/>
+        <enum value="0x8006" name="GL_FUNC_ADD_OES"/>
+        <enum value="0x8007" name="GL_MIN"/>
+        <enum value="0x8007" name="GL_MIN_EXT"/>
+        <enum value="0x8008" name="GL_MAX"/>
+        <enum value="0x8008" name="GL_MAX_EXT"/>
+        <enum value="0x8009" name="GL_BLEND_EQUATION"/>
+        <enum value="0x8009" name="GL_BLEND_EQUATION_EXT"/>
+        <enum value="0x8009" name="GL_BLEND_EQUATION_OES"/>
+        <enum value="0x8009" name="GL_BLEND_EQUATION_RGB"/>
+        <enum value="0x8009" name="GL_BLEND_EQUATION_RGB_EXT"/>
+        <enum value="0x8009" name="GL_BLEND_EQUATION_RGB_OES"/>
+        <enum value="0x800A" name="GL_FUNC_SUBTRACT"/>
+        <enum value="0x800A" name="GL_FUNC_SUBTRACT_EXT"/>
+        <enum value="0x800A" name="GL_FUNC_SUBTRACT_OES"/>
+        <enum value="0x800B" name="GL_FUNC_REVERSE_SUBTRACT"/>
+        <enum value="0x800B" name="GL_FUNC_REVERSE_SUBTRACT_EXT"/>
+        <enum value="0x800B" name="GL_FUNC_REVERSE_SUBTRACT_OES"/>
+        <enum value="0x800C" name="GL_CMYK_EXT"/>
+        <enum value="0x800D" name="GL_CMYKA_EXT"/>
+        <enum value="0x800E" name="GL_PACK_CMYK_HINT_EXT"/>
+        <enum value="0x800F" name="GL_UNPACK_CMYK_HINT_EXT"/>
+        <enum value="0x8010" name="GL_CONVOLUTION_1D"/>
+        <enum value="0x8010" name="GL_CONVOLUTION_1D_EXT"/>
+        <enum value="0x8011" name="GL_CONVOLUTION_2D"/>
+        <enum value="0x8011" name="GL_CONVOLUTION_2D_EXT"/>
+        <enum value="0x8012" name="GL_SEPARABLE_2D"/>
+        <enum value="0x8012" name="GL_SEPARABLE_2D_EXT"/>
+        <enum value="0x8013" name="GL_CONVOLUTION_BORDER_MODE"/>
+        <enum value="0x8013" name="GL_CONVOLUTION_BORDER_MODE_EXT"/>
+        <enum value="0x8014" name="GL_CONVOLUTION_FILTER_SCALE"/>
+        <enum value="0x8014" name="GL_CONVOLUTION_FILTER_SCALE_EXT"/>
+        <enum value="0x8015" name="GL_CONVOLUTION_FILTER_BIAS"/>
+        <enum value="0x8015" name="GL_CONVOLUTION_FILTER_BIAS_EXT"/>
+        <enum value="0x8016" name="GL_REDUCE"/>
+        <enum value="0x8016" name="GL_REDUCE_EXT"/>
+        <enum value="0x8017" name="GL_CONVOLUTION_FORMAT"/>
+        <enum value="0x8017" name="GL_CONVOLUTION_FORMAT_EXT"/>
+        <enum value="0x8018" name="GL_CONVOLUTION_WIDTH"/>
+        <enum value="0x8018" name="GL_CONVOLUTION_WIDTH_EXT"/>
+        <enum value="0x8019" name="GL_CONVOLUTION_HEIGHT"/>
+        <enum value="0x8019" name="GL_CONVOLUTION_HEIGHT_EXT"/>
+        <enum value="0x801A" name="GL_MAX_CONVOLUTION_WIDTH"/>
+        <enum value="0x801A" name="GL_MAX_CONVOLUTION_WIDTH_EXT"/>
+        <enum value="0x801B" name="GL_MAX_CONVOLUTION_HEIGHT"/>
+        <enum value="0x801B" name="GL_MAX_CONVOLUTION_HEIGHT_EXT"/>
+        <enum value="0x801C" name="GL_POST_CONVOLUTION_RED_SCALE"/>
+        <enum value="0x801C" name="GL_POST_CONVOLUTION_RED_SCALE_EXT"/>
+        <enum value="0x801D" name="GL_POST_CONVOLUTION_GREEN_SCALE"/>
+        <enum value="0x801D" name="GL_POST_CONVOLUTION_GREEN_SCALE_EXT"/>
+        <enum value="0x801E" name="GL_POST_CONVOLUTION_BLUE_SCALE"/>
+        <enum value="0x801E" name="GL_POST_CONVOLUTION_BLUE_SCALE_EXT"/>
+        <enum value="0x801F" name="GL_POST_CONVOLUTION_ALPHA_SCALE"/>
+        <enum value="0x801F" name="GL_POST_CONVOLUTION_ALPHA_SCALE_EXT"/>
+        <enum value="0x8020" name="GL_POST_CONVOLUTION_RED_BIAS"/>
+        <enum value="0x8020" name="GL_POST_CONVOLUTION_RED_BIAS_EXT"/>
+        <enum value="0x8021" name="GL_POST_CONVOLUTION_GREEN_BIAS"/>
+        <enum value="0x8021" name="GL_POST_CONVOLUTION_GREEN_BIAS_EXT"/>
+        <enum value="0x8022" name="GL_POST_CONVOLUTION_BLUE_BIAS"/>
+        <enum value="0x8022" name="GL_POST_CONVOLUTION_BLUE_BIAS_EXT"/>
+        <enum value="0x8023" name="GL_POST_CONVOLUTION_ALPHA_BIAS"/>
+        <enum value="0x8023" name="GL_POST_CONVOLUTION_ALPHA_BIAS_EXT"/>
+        <enum value="0x8024" name="GL_HISTOGRAM"/>
+        <enum value="0x8024" name="GL_HISTOGRAM_EXT"/>
+        <enum value="0x8025" name="GL_PROXY_HISTOGRAM"/>
+        <enum value="0x8025" name="GL_PROXY_HISTOGRAM_EXT"/>
+        <enum value="0x8026" name="GL_HISTOGRAM_WIDTH"/>
+        <enum value="0x8026" name="GL_HISTOGRAM_WIDTH_EXT"/>
+        <enum value="0x8027" name="GL_HISTOGRAM_FORMAT"/>
+        <enum value="0x8027" name="GL_HISTOGRAM_FORMAT_EXT"/>
+        <enum value="0x8028" name="GL_HISTOGRAM_RED_SIZE"/>
+        <enum value="0x8028" name="GL_HISTOGRAM_RED_SIZE_EXT"/>
+        <enum value="0x8029" name="GL_HISTOGRAM_GREEN_SIZE"/>
+        <enum value="0x8029" name="GL_HISTOGRAM_GREEN_SIZE_EXT"/>
+        <enum value="0x802A" name="GL_HISTOGRAM_BLUE_SIZE"/>
+        <enum value="0x802A" name="GL_HISTOGRAM_BLUE_SIZE_EXT"/>
+        <enum value="0x802B" name="GL_HISTOGRAM_ALPHA_SIZE"/>
+        <enum value="0x802B" name="GL_HISTOGRAM_ALPHA_SIZE_EXT"/>
+        <enum value="0x802C" name="GL_HISTOGRAM_LUMINANCE_SIZE"/>
+        <enum value="0x802C" name="GL_HISTOGRAM_LUMINANCE_SIZE_EXT"/>
+        <enum value="0x802D" name="GL_HISTOGRAM_SINK"/>
+        <enum value="0x802D" name="GL_HISTOGRAM_SINK_EXT"/>
+        <enum value="0x802E" name="GL_MINMAX"/>
+        <enum value="0x802E" name="GL_MINMAX_EXT"/>
+        <enum value="0x802F" name="GL_MINMAX_FORMAT"/>
+        <enum value="0x802F" name="GL_MINMAX_FORMAT_EXT"/>
+        <enum value="0x8030" name="GL_MINMAX_SINK"/>
+        <enum value="0x8030" name="GL_MINMAX_SINK_EXT"/>
+        <enum value="0x8031" name="GL_TABLE_TOO_LARGE_EXT"/>
+        <enum value="0x8031" name="GL_TABLE_TOO_LARGE"/>
+        <enum value="0x8032" name="GL_UNSIGNED_BYTE_3_3_2"/>
+        <enum value="0x8032" name="GL_UNSIGNED_BYTE_3_3_2_EXT"/>
+        <enum value="0x8033" name="GL_UNSIGNED_SHORT_4_4_4_4"/>
+        <enum value="0x8033" name="GL_UNSIGNED_SHORT_4_4_4_4_EXT"/>
+        <enum value="0x8034" name="GL_UNSIGNED_SHORT_5_5_5_1"/>
+        <enum value="0x8034" name="GL_UNSIGNED_SHORT_5_5_5_1_EXT"/>
+        <enum value="0x8035" name="GL_UNSIGNED_INT_8_8_8_8"/>
+        <enum value="0x8035" name="GL_UNSIGNED_INT_8_8_8_8_EXT"/>
+        <enum value="0x8036" name="GL_UNSIGNED_INT_10_10_10_2"/>
+        <enum value="0x8036" name="GL_UNSIGNED_INT_10_10_10_2_EXT"/>
+        <enum value="0x8037" name="GL_POLYGON_OFFSET_EXT"/>
+        <enum value="0x8037" name="GL_POLYGON_OFFSET_FILL"/>
+        <enum value="0x8038" name="GL_POLYGON_OFFSET_FACTOR"/>
+        <enum value="0x8038" name="GL_POLYGON_OFFSET_FACTOR_EXT"/>
+        <enum value="0x8039" name="GL_POLYGON_OFFSET_BIAS_EXT"/>
+        <enum value="0x803A" name="GL_RESCALE_NORMAL"/>
+        <enum value="0x803A" name="GL_RESCALE_NORMAL_EXT"/>
+        <enum value="0x803B" name="GL_ALPHA4"/>
+        <enum value="0x803B" name="GL_ALPHA4_EXT"/>
+        <enum value="0x803C" name="GL_ALPHA8"/>
+        <enum value="0x803C" name="GL_ALPHA8_EXT"/>
+        <enum value="0x803C" name="GL_ALPHA8_OES"/>
+        <enum value="0x803D" name="GL_ALPHA12"/>
+        <enum value="0x803D" name="GL_ALPHA12_EXT"/>
+        <enum value="0x803E" name="GL_ALPHA16"/>
+        <enum value="0x803E" name="GL_ALPHA16_EXT"/>
+        <enum value="0x803F" name="GL_LUMINANCE4"/>
+        <enum value="0x803F" name="GL_LUMINANCE4_EXT"/>
+        <enum value="0x8040" name="GL_LUMINANCE8"/>
+        <enum value="0x8040" name="GL_LUMINANCE8_EXT"/>
+        <enum value="0x8040" name="GL_LUMINANCE8_OES"/>
+        <enum value="0x8041" name="GL_LUMINANCE12"/>
+        <enum value="0x8041" name="GL_LUMINANCE12_EXT"/>
+        <enum value="0x8042" name="GL_LUMINANCE16"/>
+        <enum value="0x8042" name="GL_LUMINANCE16_EXT"/>
+        <enum value="0x8043" name="GL_LUMINANCE4_ALPHA4"/>
+        <enum value="0x8043" name="GL_LUMINANCE4_ALPHA4_EXT"/>
+        <enum value="0x8043" name="GL_LUMINANCE4_ALPHA4_OES"/>
+        <enum value="0x8044" name="GL_LUMINANCE6_ALPHA2"/>
+        <enum value="0x8044" name="GL_LUMINANCE6_ALPHA2_EXT"/>
+        <enum value="0x8045" name="GL_LUMINANCE8_ALPHA8"/>
+        <enum value="0x8045" name="GL_LUMINANCE8_ALPHA8_EXT"/>
+        <enum value="0x8045" name="GL_LUMINANCE8_ALPHA8_OES"/>
+        <enum value="0x8046" name="GL_LUMINANCE12_ALPHA4"/>
+        <enum value="0x8046" name="GL_LUMINANCE12_ALPHA4_EXT"/>
+        <enum value="0x8047" name="GL_LUMINANCE12_ALPHA12"/>
+        <enum value="0x8047" name="GL_LUMINANCE12_ALPHA12_EXT"/>
+        <enum value="0x8048" name="GL_LUMINANCE16_ALPHA16"/>
+        <enum value="0x8048" name="GL_LUMINANCE16_ALPHA16_EXT"/>
+        <enum value="0x8049" name="GL_INTENSITY"/>
+        <enum value="0x8049" name="GL_INTENSITY_EXT"/>
+        <enum value="0x804A" name="GL_INTENSITY4"/>
+        <enum value="0x804A" name="GL_INTENSITY4_EXT"/>
+        <enum value="0x804B" name="GL_INTENSITY8"/>
+        <enum value="0x804B" name="GL_INTENSITY8_EXT"/>
+        <enum value="0x804C" name="GL_INTENSITY12"/>
+        <enum value="0x804C" name="GL_INTENSITY12_EXT"/>
+        <enum value="0x804D" name="GL_INTENSITY16"/>
+        <enum value="0x804D" name="GL_INTENSITY16_EXT"/>
+        <enum value="0x804E" name="GL_RGB2_EXT"/>
+        <enum value="0x804F" name="GL_RGB4"/>
+        <enum value="0x804F" name="GL_RGB4_EXT"/>
+        <enum value="0x8050" name="GL_RGB5"/>
+        <enum value="0x8050" name="GL_RGB5_EXT"/>
+        <enum value="0x8051" name="GL_RGB8"/>
+        <enum value="0x8051" name="GL_RGB8_EXT"/>
+        <enum value="0x8051" name="GL_RGB8_OES"/>
+        <enum value="0x8052" name="GL_RGB10"/>
+        <enum value="0x8052" name="GL_RGB10_EXT"/>
+        <enum value="0x8053" name="GL_RGB12"/>
+        <enum value="0x8053" name="GL_RGB12_EXT"/>
+        <enum value="0x8054" name="GL_RGB16"/>
+        <enum value="0x8054" name="GL_RGB16_EXT"/>
+        <enum value="0x8055" name="GL_RGBA2"/>
+        <enum value="0x8055" name="GL_RGBA2_EXT"/>
+        <enum value="0x8056" name="GL_RGBA4"/>
+        <enum value="0x8056" name="GL_RGBA4_EXT"/>
+        <enum value="0x8056" name="GL_RGBA4_OES"/>
+        <enum value="0x8057" name="GL_RGB5_A1"/>
+        <enum value="0x8057" name="GL_RGB5_A1_EXT"/>
+        <enum value="0x8057" name="GL_RGB5_A1_OES"/>
+        <enum value="0x8058" name="GL_RGBA8"/>
+        <enum value="0x8058" name="GL_RGBA8_EXT"/>
+        <enum value="0x8058" name="GL_RGBA8_OES"/>
+        <enum value="0x8059" name="GL_RGB10_A2"/>
+        <enum value="0x8059" name="GL_RGB10_A2_EXT"/>
+        <enum value="0x805A" name="GL_RGBA12"/>
+        <enum value="0x805A" name="GL_RGBA12_EXT"/>
+        <enum value="0x805B" name="GL_RGBA16"/>
+        <enum value="0x805B" name="GL_RGBA16_EXT"/>
+        <enum value="0x805C" name="GL_TEXTURE_RED_SIZE"/>
+        <enum value="0x805C" name="GL_TEXTURE_RED_SIZE_EXT"/>
+        <enum value="0x805D" name="GL_TEXTURE_GREEN_SIZE"/>
+        <enum value="0x805D" name="GL_TEXTURE_GREEN_SIZE_EXT"/>
+        <enum value="0x805E" name="GL_TEXTURE_BLUE_SIZE"/>
+        <enum value="0x805E" name="GL_TEXTURE_BLUE_SIZE_EXT"/>
+        <enum value="0x805F" name="GL_TEXTURE_ALPHA_SIZE"/>
+        <enum value="0x805F" name="GL_TEXTURE_ALPHA_SIZE_EXT"/>
+        <enum value="0x8060" name="GL_TEXTURE_LUMINANCE_SIZE"/>
+        <enum value="0x8060" name="GL_TEXTURE_LUMINANCE_SIZE_EXT"/>
+        <enum value="0x8061" name="GL_TEXTURE_INTENSITY_SIZE"/>
+        <enum value="0x8061" name="GL_TEXTURE_INTENSITY_SIZE_EXT"/>
+        <enum value="0x8062" name="GL_REPLACE_EXT"/>
+        <enum value="0x8063" name="GL_PROXY_TEXTURE_1D"/>
+        <enum value="0x8063" name="GL_PROXY_TEXTURE_1D_EXT"/>
+        <enum value="0x8064" name="GL_PROXY_TEXTURE_2D"/>
+        <enum value="0x8064" name="GL_PROXY_TEXTURE_2D_EXT"/>
+        <enum value="0x8065" name="GL_TEXTURE_TOO_LARGE_EXT"/>
+        <enum value="0x8066" name="GL_TEXTURE_PRIORITY"/>
+        <enum value="0x8066" name="GL_TEXTURE_PRIORITY_EXT"/>
+        <enum value="0x8067" name="GL_TEXTURE_RESIDENT"/>
+        <enum value="0x8067" name="GL_TEXTURE_RESIDENT_EXT"/>
+        <enum value="0x8068" name="GL_TEXTURE_1D_BINDING_EXT"/>
+        <enum value="0x8068" name="GL_TEXTURE_BINDING_1D"/>
+        <enum value="0x8069" name="GL_TEXTURE_2D_BINDING_EXT"/>
+        <enum value="0x8069" name="GL_TEXTURE_BINDING_2D"/>
+        <enum value="0x806A" name="GL_TEXTURE_3D_BINDING_EXT"/>
+        <enum value="0x806A" name="GL_TEXTURE_3D_BINDING_OES"/>
+        <enum value="0x806A" name="GL_TEXTURE_BINDING_3D"/>
+        <enum value="0x806A" name="GL_TEXTURE_BINDING_3D_OES"/>
+        <enum value="0x806B" name="GL_PACK_SKIP_IMAGES"/>
+        <enum value="0x806B" name="GL_PACK_SKIP_IMAGES_EXT"/>
+        <enum value="0x806C" name="GL_PACK_IMAGE_HEIGHT"/>
+        <enum value="0x806C" name="GL_PACK_IMAGE_HEIGHT_EXT"/>
+        <enum value="0x806D" name="GL_UNPACK_SKIP_IMAGES"/>
+        <enum value="0x806D" name="GL_UNPACK_SKIP_IMAGES_EXT"/>
+        <enum value="0x806E" name="GL_UNPACK_IMAGE_HEIGHT"/>
+        <enum value="0x806E" name="GL_UNPACK_IMAGE_HEIGHT_EXT"/>
+        <enum value="0x806F" name="GL_TEXTURE_3D"/>
+        <enum value="0x806F" name="GL_TEXTURE_3D_EXT"/>
+        <enum value="0x806F" name="GL_TEXTURE_3D_OES"/>
+        <enum value="0x8070" name="GL_PROXY_TEXTURE_3D"/>
+        <enum value="0x8070" name="GL_PROXY_TEXTURE_3D_EXT"/>
+        <enum value="0x8071" name="GL_TEXTURE_DEPTH"/>
+        <enum value="0x8071" name="GL_TEXTURE_DEPTH_EXT"/>
+        <enum value="0x8072" name="GL_TEXTURE_WRAP_R"/>
+        <enum value="0x8072" name="GL_TEXTURE_WRAP_R_EXT"/>
+        <enum value="0x8072" name="GL_TEXTURE_WRAP_R_OES"/>
+        <enum value="0x8073" name="GL_MAX_3D_TEXTURE_SIZE"/>
+        <enum value="0x8073" name="GL_MAX_3D_TEXTURE_SIZE_EXT"/>
+        <enum value="0x8073" name="GL_MAX_3D_TEXTURE_SIZE_OES"/>
+        <enum value="0x8074" name="GL_VERTEX_ARRAY"/>
+        <enum value="0x8074" name="GL_VERTEX_ARRAY_EXT"/>
+        <enum value="0x8074" name="GL_VERTEX_ARRAY_KHR"/>
+        <enum value="0x8075" name="GL_NORMAL_ARRAY"/>
+        <enum value="0x8075" name="GL_NORMAL_ARRAY_EXT"/>
+        <enum value="0x8076" name="GL_COLOR_ARRAY"/>
+        <enum value="0x8076" name="GL_COLOR_ARRAY_EXT"/>
+        <enum value="0x8077" name="GL_INDEX_ARRAY"/>
+        <enum value="0x8077" name="GL_INDEX_ARRAY_EXT"/>
+        <enum value="0x8078" name="GL_TEXTURE_COORD_ARRAY"/>
+        <enum value="0x8078" name="GL_TEXTURE_COORD_ARRAY_EXT"/>
+        <enum value="0x8079" name="GL_EDGE_FLAG_ARRAY"/>
+        <enum value="0x8079" name="GL_EDGE_FLAG_ARRAY_EXT"/>
+        <enum value="0x807A" name="GL_VERTEX_ARRAY_SIZE"/>
+        <enum value="0x807A" name="GL_VERTEX_ARRAY_SIZE_EXT"/>
+        <enum value="0x807B" name="GL_VERTEX_ARRAY_TYPE"/>
+        <enum value="0x807B" name="GL_VERTEX_ARRAY_TYPE_EXT"/>
+        <enum value="0x807C" name="GL_VERTEX_ARRAY_STRIDE"/>
+        <enum value="0x807C" name="GL_VERTEX_ARRAY_STRIDE_EXT"/>
+        <enum value="0x807D" name="GL_VERTEX_ARRAY_COUNT_EXT"/>
+        <enum value="0x807E" name="GL_NORMAL_ARRAY_TYPE"/>
+        <enum value="0x807E" name="GL_NORMAL_ARRAY_TYPE_EXT"/>
+        <enum value="0x807F" name="GL_NORMAL_ARRAY_STRIDE"/>
+        <enum value="0x807F" name="GL_NORMAL_ARRAY_STRIDE_EXT"/>
+        <enum value="0x8080" name="GL_NORMAL_ARRAY_COUNT_EXT"/>
+        <enum value="0x8081" name="GL_COLOR_ARRAY_SIZE"/>
+        <enum value="0x8081" name="GL_COLOR_ARRAY_SIZE_EXT"/>
+        <enum value="0x8082" name="GL_COLOR_ARRAY_TYPE"/>
+        <enum value="0x8082" name="GL_COLOR_ARRAY_TYPE_EXT"/>
+        <enum value="0x8083" name="GL_COLOR_ARRAY_STRIDE"/>
+        <enum value="0x8083" name="GL_COLOR_ARRAY_STRIDE_EXT"/>
+        <enum value="0x8084" name="GL_COLOR_ARRAY_COUNT_EXT"/>
+        <enum value="0x8085" name="GL_INDEX_ARRAY_TYPE"/>
+        <enum value="0x8085" name="GL_INDEX_ARRAY_TYPE_EXT"/>
+        <enum value="0x8086" name="GL_INDEX_ARRAY_STRIDE"/>
+        <enum value="0x8086" name="GL_INDEX_ARRAY_STRIDE_EXT"/>
+        <enum value="0x8087" name="GL_INDEX_ARRAY_COUNT_EXT"/>
+        <enum value="0x8088" name="GL_TEXTURE_COORD_ARRAY_SIZE"/>
+        <enum value="0x8088" name="GL_TEXTURE_COORD_ARRAY_SIZE_EXT"/>
+        <enum value="0x8089" name="GL_TEXTURE_COORD_ARRAY_TYPE"/>
+        <enum value="0x8089" name="GL_TEXTURE_COORD_ARRAY_TYPE_EXT"/>
+        <enum value="0x808A" name="GL_TEXTURE_COORD_ARRAY_STRIDE"/>
+        <enum value="0x808A" name="GL_TEXTURE_COORD_ARRAY_STRIDE_EXT"/>
+        <enum value="0x808B" name="GL_TEXTURE_COORD_ARRAY_COUNT_EXT"/>
+        <enum value="0x808C" name="GL_EDGE_FLAG_ARRAY_STRIDE"/>
+        <enum value="0x808C" name="GL_EDGE_FLAG_ARRAY_STRIDE_EXT"/>
+        <enum value="0x808D" name="GL_EDGE_FLAG_ARRAY_COUNT_EXT"/>
+        <enum value="0x808E" name="GL_VERTEX_ARRAY_POINTER"/>
+        <enum value="0x808E" name="GL_VERTEX_ARRAY_POINTER_EXT"/>
+        <enum value="0x808F" name="GL_NORMAL_ARRAY_POINTER"/>
+        <enum value="0x808F" name="GL_NORMAL_ARRAY_POINTER_EXT"/>
+        <enum value="0x8090" name="GL_COLOR_ARRAY_POINTER"/>
+        <enum value="0x8090" name="GL_COLOR_ARRAY_POINTER_EXT"/>
+        <enum value="0x8091" name="GL_INDEX_ARRAY_POINTER"/>
+        <enum value="0x8091" name="GL_INDEX_ARRAY_POINTER_EXT"/>
+        <enum value="0x8092" name="GL_TEXTURE_COORD_ARRAY_POINTER"/>
+        <enum value="0x8092" name="GL_TEXTURE_COORD_ARRAY_POINTER_EXT"/>
+        <enum value="0x8093" name="GL_EDGE_FLAG_ARRAY_POINTER"/>
+        <enum value="0x8093" name="GL_EDGE_FLAG_ARRAY_POINTER_EXT"/>
+        <enum value="0x8094" name="GL_INTERLACE_SGIX"/>
+        <enum value="0x8095" name="GL_DETAIL_TEXTURE_2D_SGIS"/>
+        <enum value="0x8096" name="GL_DETAIL_TEXTURE_2D_BINDING_SGIS"/>
+        <enum value="0x8097" name="GL_LINEAR_DETAIL_SGIS"/>
+        <enum value="0x8098" name="GL_LINEAR_DETAIL_ALPHA_SGIS"/>
+        <enum value="0x8099" name="GL_LINEAR_DETAIL_COLOR_SGIS"/>
+        <enum value="0x809A" name="GL_DETAIL_TEXTURE_LEVEL_SGIS"/>
+        <enum value="0x809B" name="GL_DETAIL_TEXTURE_MODE_SGIS"/>
+        <enum value="0x809C" name="GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS"/>
+        <enum value="0x809D" name="GL_MULTISAMPLE"/>
+        <enum value="0x809D" name="GL_MULTISAMPLE_ARB"/>
+        <enum value="0x809D" name="GL_MULTISAMPLE_EXT"/>
+        <enum value="0x809D" name="GL_MULTISAMPLE_SGIS"/>
+        <enum value="0x809E" name="GL_SAMPLE_ALPHA_TO_COVERAGE"/>
+        <enum value="0x809E" name="GL_SAMPLE_ALPHA_TO_COVERAGE_ARB"/>
+        <enum value="0x809E" name="GL_SAMPLE_ALPHA_TO_MASK_EXT"/>
+        <enum value="0x809E" name="GL_SAMPLE_ALPHA_TO_MASK_SGIS"/>
+        <enum value="0x809F" name="GL_SAMPLE_ALPHA_TO_ONE"/>
+        <enum value="0x809F" name="GL_SAMPLE_ALPHA_TO_ONE_ARB"/>
+        <enum value="0x809F" name="GL_SAMPLE_ALPHA_TO_ONE_EXT"/>
+        <enum value="0x809F" name="GL_SAMPLE_ALPHA_TO_ONE_SGIS"/>
+        <enum value="0x80A0" name="GL_SAMPLE_COVERAGE"/>
+        <enum value="0x80A0" name="GL_SAMPLE_COVERAGE_ARB"/>
+        <enum value="0x80A0" name="GL_SAMPLE_MASK_EXT"/>
+        <enum value="0x80A0" name="GL_SAMPLE_MASK_SGIS"/>
+        <enum value="0x80A1" name="GL_1PASS_EXT"/>
+        <enum value="0x80A1" name="GL_1PASS_SGIS"/>
+        <enum value="0x80A2" name="GL_2PASS_0_EXT"/>
+        <enum value="0x80A2" name="GL_2PASS_0_SGIS"/>
+        <enum value="0x80A3" name="GL_2PASS_1_EXT"/>
+        <enum value="0x80A3" name="GL_2PASS_1_SGIS"/>
+        <enum value="0x80A4" name="GL_4PASS_0_EXT"/>
+        <enum value="0x80A4" name="GL_4PASS_0_SGIS"/>
+        <enum value="0x80A5" name="GL_4PASS_1_EXT"/>
+        <enum value="0x80A5" name="GL_4PASS_1_SGIS"/>
+        <enum value="0x80A6" name="GL_4PASS_2_EXT"/>
+        <enum value="0x80A6" name="GL_4PASS_2_SGIS"/>
+        <enum value="0x80A7" name="GL_4PASS_3_EXT"/>
+        <enum value="0x80A7" name="GL_4PASS_3_SGIS"/>
+        <enum value="0x80A8" name="GL_SAMPLE_BUFFERS"/>
+        <enum value="0x80A8" name="GL_SAMPLE_BUFFERS_ARB"/>
+        <enum value="0x80A8" name="GL_SAMPLE_BUFFERS_EXT"/>
+        <enum value="0x80A8" name="GL_SAMPLE_BUFFERS_SGIS"/>
+        <enum value="0x80A9" name="GL_SAMPLES"/>
+        <enum value="0x80A9" name="GL_SAMPLES_ARB"/>
+        <enum value="0x80A9" name="GL_SAMPLES_EXT"/>
+        <enum value="0x80A9" name="GL_SAMPLES_SGIS"/>
+        <enum value="0x80AA" name="GL_SAMPLE_COVERAGE_VALUE"/>
+        <enum value="0x80AA" name="GL_SAMPLE_COVERAGE_VALUE_ARB"/>
+        <enum value="0x80AA" name="GL_SAMPLE_MASK_VALUE_EXT"/>
+        <enum value="0x80AA" name="GL_SAMPLE_MASK_VALUE_SGIS"/>
+        <enum value="0x80AB" name="GL_SAMPLE_COVERAGE_INVERT"/>
+        <enum value="0x80AB" name="GL_SAMPLE_COVERAGE_INVERT_ARB"/>
+        <enum value="0x80AB" name="GL_SAMPLE_MASK_INVERT_EXT"/>
+        <enum value="0x80AB" name="GL_SAMPLE_MASK_INVERT_SGIS"/>
+        <enum value="0x80AC" name="GL_SAMPLE_PATTERN_EXT"/>
+        <enum value="0x80AC" name="GL_SAMPLE_PATTERN_SGIS"/>
+        <enum value="0x80AD" name="GL_LINEAR_SHARPEN_SGIS"/>
+        <enum value="0x80AE" name="GL_LINEAR_SHARPEN_ALPHA_SGIS"/>
+        <enum value="0x80AF" name="GL_LINEAR_SHARPEN_COLOR_SGIS"/>
+        <enum value="0x80B0" name="GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS"/>
+        <enum value="0x80B1" name="GL_COLOR_MATRIX"/>
+        <enum value="0x80B1" name="GL_COLOR_MATRIX_SGI"/>
+        <enum value="0x80B2" name="GL_COLOR_MATRIX_STACK_DEPTH"/>
+        <enum value="0x80B2" name="GL_COLOR_MATRIX_STACK_DEPTH_SGI"/>
+        <enum value="0x80B3" name="GL_MAX_COLOR_MATRIX_STACK_DEPTH"/>
+        <enum value="0x80B3" name="GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI"/>
+        <enum value="0x80B4" name="GL_POST_COLOR_MATRIX_RED_SCALE"/>
+        <enum value="0x80B4" name="GL_POST_COLOR_MATRIX_RED_SCALE_SGI"/>
+        <enum value="0x80B5" name="GL_POST_COLOR_MATRIX_GREEN_SCALE"/>
+        <enum value="0x80B5" name="GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI"/>
+        <enum value="0x80B6" name="GL_POST_COLOR_MATRIX_BLUE_SCALE"/>
+        <enum value="0x80B6" name="GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI"/>
+        <enum value="0x80B7" name="GL_POST_COLOR_MATRIX_ALPHA_SCALE"/>
+        <enum value="0x80B7" name="GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI"/>
+        <enum value="0x80B8" name="GL_POST_COLOR_MATRIX_RED_BIAS"/>
+        <enum value="0x80B8" name="GL_POST_COLOR_MATRIX_RED_BIAS_SGI"/>
+        <enum value="0x80B9" name="GL_POST_COLOR_MATRIX_GREEN_BIAS"/>
+        <enum value="0x80B9" name="GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI"/>
+        <enum value="0x80BA" name="GL_POST_COLOR_MATRIX_BLUE_BIAS"/>
+        <enum value="0x80BA" name="GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI"/>
+        <enum value="0x80BB" name="GL_POST_COLOR_MATRIX_ALPHA_BIAS"/>
+        <enum value="0x80BB" name="GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI"/>
+        <enum value="0x80BC" name="GL_TEXTURE_COLOR_TABLE_SGI"/>
+        <enum value="0x80BD" name="GL_PROXY_TEXTURE_COLOR_TABLE_SGI"/>
+        <enum value="0x80BE" name="GL_TEXTURE_ENV_BIAS_SGIX"/>
+        <enum value="0x80BF" name="GL_SHADOW_AMBIENT_SGIX"/>
+        <enum value="0x80BF" name="GL_TEXTURE_COMPARE_FAIL_VALUE_ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x80C0" end="0x80CF" vendor="ZiiLabs">
+            <unused start="0x80C0" end="0x80C7" vendor="ZiiLabs"/>
+        <enum value="0x80C8" name="GL_BLEND_DST_RGB"/>
+        <enum value="0x80C8" name="GL_BLEND_DST_RGB_EXT"/>
+        <enum value="0x80C8" name="GL_BLEND_DST_RGB_OES"/>
+        <enum value="0x80C9" name="GL_BLEND_SRC_RGB"/>
+        <enum value="0x80C9" name="GL_BLEND_SRC_RGB_EXT"/>
+        <enum value="0x80C9" name="GL_BLEND_SRC_RGB_OES"/>
+        <enum value="0x80CA" name="GL_BLEND_DST_ALPHA"/>
+        <enum value="0x80CA" name="GL_BLEND_DST_ALPHA_EXT"/>
+        <enum value="0x80CA" name="GL_BLEND_DST_ALPHA_OES"/>
+        <enum value="0x80CB" name="GL_BLEND_SRC_ALPHA"/>
+        <enum value="0x80CB" name="GL_BLEND_SRC_ALPHA_EXT"/>
+        <enum value="0x80CB" name="GL_BLEND_SRC_ALPHA_OES"/>
+        <enum value="0x80CC" name="GL_422_EXT"/>
+        <enum value="0x80CD" name="GL_422_REV_EXT"/>
+        <enum value="0x80CE" name="GL_422_AVERAGE_EXT"/>
+        <enum value="0x80CF" name="GL_422_REV_AVERAGE_EXT"/>
+    </enums>
+
+    <enums namespace="GL" start="0x80D0" end="0x80DF" vendor="SGI">
+        <enum value="0x80D0" name="GL_COLOR_TABLE"/>
+        <enum value="0x80D0" name="GL_COLOR_TABLE_SGI"/>
+        <enum value="0x80D1" name="GL_POST_CONVOLUTION_COLOR_TABLE"/>
+        <enum value="0x80D1" name="GL_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+        <enum value="0x80D2" name="GL_POST_COLOR_MATRIX_COLOR_TABLE"/>
+        <enum value="0x80D2" name="GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+        <enum value="0x80D3" name="GL_PROXY_COLOR_TABLE"/>
+        <enum value="0x80D3" name="GL_PROXY_COLOR_TABLE_SGI"/>
+        <enum value="0x80D4" name="GL_PROXY_POST_CONVOLUTION_COLOR_TABLE"/>
+        <enum value="0x80D4" name="GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+        <enum value="0x80D5" name="GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE"/>
+        <enum value="0x80D5" name="GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+        <enum value="0x80D6" name="GL_COLOR_TABLE_SCALE"/>
+        <enum value="0x80D6" name="GL_COLOR_TABLE_SCALE_SGI"/>
+        <enum value="0x80D7" name="GL_COLOR_TABLE_BIAS"/>
+        <enum value="0x80D7" name="GL_COLOR_TABLE_BIAS_SGI"/>
+        <enum value="0x80D8" name="GL_COLOR_TABLE_FORMAT"/>
+        <enum value="0x80D8" name="GL_COLOR_TABLE_FORMAT_SGI"/>
+        <enum value="0x80D9" name="GL_COLOR_TABLE_WIDTH"/>
+        <enum value="0x80D9" name="GL_COLOR_TABLE_WIDTH_SGI"/>
+        <enum value="0x80DA" name="GL_COLOR_TABLE_RED_SIZE"/>
+        <enum value="0x80DA" name="GL_COLOR_TABLE_RED_SIZE_SGI"/>
+        <enum value="0x80DB" name="GL_COLOR_TABLE_GREEN_SIZE"/>
+        <enum value="0x80DB" name="GL_COLOR_TABLE_GREEN_SIZE_SGI"/>
+        <enum value="0x80DC" name="GL_COLOR_TABLE_BLUE_SIZE"/>
+        <enum value="0x80DC" name="GL_COLOR_TABLE_BLUE_SIZE_SGI"/>
+        <enum value="0x80DD" name="GL_COLOR_TABLE_ALPHA_SIZE"/>
+        <enum value="0x80DD" name="GL_COLOR_TABLE_ALPHA_SIZE_SGI"/>
+        <enum value="0x80DE" name="GL_COLOR_TABLE_LUMINANCE_SIZE"/>
+        <enum value="0x80DE" name="GL_COLOR_TABLE_LUMINANCE_SIZE_SGI"/>
+        <enum value="0x80DF" name="GL_COLOR_TABLE_INTENSITY_SIZE"/>
+        <enum value="0x80DF" name="GL_COLOR_TABLE_INTENSITY_SIZE_SGI"/>
+    </enums>
+
+    <enums namespace="GL" start="0x80E0" end="0x810F" vendor="MS">
+        <enum value="0x80E0" name="GL_BGR"/>
+        <enum value="0x80E0" name="GL_BGR_EXT"/>
+        <enum value="0x80E1" name="GL_BGRA"/>
+        <enum value="0x80E1" name="GL_BGRA_EXT"/>
+        <enum value="0x80E1" name="GL_BGRA_IMG"/>
+        <enum value="0x80E2" name="GL_COLOR_INDEX1_EXT"/>
+        <enum value="0x80E3" name="GL_COLOR_INDEX2_EXT"/>
+        <enum value="0x80E4" name="GL_COLOR_INDEX4_EXT"/>
+        <enum value="0x80E5" name="GL_COLOR_INDEX8_EXT"/>
+        <enum value="0x80E6" name="GL_COLOR_INDEX12_EXT"/>
+        <enum value="0x80E7" name="GL_COLOR_INDEX16_EXT"/>
+        <enum value="0x80E8" name="GL_MAX_ELEMENTS_VERTICES"/>
+        <enum value="0x80E8" name="GL_MAX_ELEMENTS_VERTICES_EXT"/>
+        <enum value="0x80E9" name="GL_MAX_ELEMENTS_INDICES"/>
+        <enum value="0x80E9" name="GL_MAX_ELEMENTS_INDICES_EXT"/>
+        <enum value="0x80EA" name="GL_PHONG_WIN"/>
+        <enum value="0x80EB" name="GL_PHONG_HINT_WIN"/>
+        <enum value="0x80EC" name="GL_FOG_SPECULAR_TEXTURE_WIN"/>
+        <enum value="0x80ED" name="GL_TEXTURE_INDEX_SIZE_EXT"/>
+        <enum value="0x80EE" name="GL_PARAMETER_BUFFER"/>
+        <enum value="0x80EE" name="GL_PARAMETER_BUFFER_ARB" alias="GL_PARAMETER_BUFFER"/>
+        <enum value="0x80EF" name="GL_PARAMETER_BUFFER_BINDING"/>
+        <enum value="0x80EF" name="GL_PARAMETER_BUFFER_BINDING_ARB" alias="GL_PARAMETER_BUFFER_BINDING"/>
+        <enum value="0x80F0" name="GL_CLIP_VOLUME_CLIPPING_HINT_EXT"/>
+            <unused start="0x80F1" end="0x810F" vendor="MS"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8110" end="0x814F" vendor="SGI">
+        <enum value="0x8110" name="GL_DUAL_ALPHA4_SGIS"/>
+        <enum value="0x8111" name="GL_DUAL_ALPHA8_SGIS"/>
+        <enum value="0x8112" name="GL_DUAL_ALPHA12_SGIS"/>
+        <enum value="0x8113" name="GL_DUAL_ALPHA16_SGIS"/>
+        <enum value="0x8114" name="GL_DUAL_LUMINANCE4_SGIS"/>
+        <enum value="0x8115" name="GL_DUAL_LUMINANCE8_SGIS"/>
+        <enum value="0x8116" name="GL_DUAL_LUMINANCE12_SGIS"/>
+        <enum value="0x8117" name="GL_DUAL_LUMINANCE16_SGIS"/>
+        <enum value="0x8118" name="GL_DUAL_INTENSITY4_SGIS"/>
+        <enum value="0x8119" name="GL_DUAL_INTENSITY8_SGIS"/>
+        <enum value="0x811A" name="GL_DUAL_INTENSITY12_SGIS"/>
+        <enum value="0x811B" name="GL_DUAL_INTENSITY16_SGIS"/>
+        <enum value="0x811C" name="GL_DUAL_LUMINANCE_ALPHA4_SGIS"/>
+        <enum value="0x811D" name="GL_DUAL_LUMINANCE_ALPHA8_SGIS"/>
+        <enum value="0x811E" name="GL_QUAD_ALPHA4_SGIS"/>
+        <enum value="0x811F" name="GL_QUAD_ALPHA8_SGIS"/>
+        <enum value="0x8120" name="GL_QUAD_LUMINANCE4_SGIS"/>
+        <enum value="0x8121" name="GL_QUAD_LUMINANCE8_SGIS"/>
+        <enum value="0x8122" name="GL_QUAD_INTENSITY4_SGIS"/>
+        <enum value="0x8123" name="GL_QUAD_INTENSITY8_SGIS"/>
+        <enum value="0x8124" name="GL_DUAL_TEXTURE_SELECT_SGIS"/>
+        <enum value="0x8125" name="GL_QUAD_TEXTURE_SELECT_SGIS"/>
+        <enum value="0x8126" name="GL_POINT_SIZE_MIN"/>
+        <enum value="0x8126" name="GL_POINT_SIZE_MIN_ARB"/>
+        <enum value="0x8126" name="GL_POINT_SIZE_MIN_EXT"/>
+        <enum value="0x8126" name="GL_POINT_SIZE_MIN_SGIS"/>
+        <enum value="0x8127" name="GL_POINT_SIZE_MAX"/>
+        <enum value="0x8127" name="GL_POINT_SIZE_MAX_ARB"/>
+        <enum value="0x8127" name="GL_POINT_SIZE_MAX_EXT"/>
+        <enum value="0x8127" name="GL_POINT_SIZE_MAX_SGIS"/>
+        <enum value="0x8128" name="GL_POINT_FADE_THRESHOLD_SIZE"/>
+        <enum value="0x8128" name="GL_POINT_FADE_THRESHOLD_SIZE_ARB"/>
+        <enum value="0x8128" name="GL_POINT_FADE_THRESHOLD_SIZE_EXT"/>
+        <enum value="0x8128" name="GL_POINT_FADE_THRESHOLD_SIZE_SGIS"/>
+        <enum value="0x8129" name="GL_DISTANCE_ATTENUATION_EXT"/>
+        <enum value="0x8129" name="GL_DISTANCE_ATTENUATION_SGIS"/>
+        <enum value="0x8129" name="GL_POINT_DISTANCE_ATTENUATION"/>
+        <enum value="0x8129" name="GL_POINT_DISTANCE_ATTENUATION_ARB"/>
+        <enum value="0x812A" name="GL_FOG_FUNC_SGIS"/>
+        <enum value="0x812B" name="GL_FOG_FUNC_POINTS_SGIS"/>
+        <enum value="0x812C" name="GL_MAX_FOG_FUNC_POINTS_SGIS"/>
+        <enum value="0x812D" name="GL_CLAMP_TO_BORDER"/>
+        <enum value="0x812D" name="GL_CLAMP_TO_BORDER_ARB"/>
+        <enum value="0x812D" name="GL_CLAMP_TO_BORDER_EXT"/>
+        <enum value="0x812D" name="GL_CLAMP_TO_BORDER_NV"/>
+        <enum value="0x812D" name="GL_CLAMP_TO_BORDER_SGIS"/>
+        <enum value="0x812D" name="GL_CLAMP_TO_BORDER_OES"/>
+        <enum value="0x812E" name="GL_TEXTURE_MULTI_BUFFER_HINT_SGIX"/>
+        <enum value="0x812F" name="GL_CLAMP_TO_EDGE"/>
+        <enum value="0x812F" name="GL_CLAMP_TO_EDGE_SGIS"/>
+        <enum value="0x8130" name="GL_PACK_SKIP_VOLUMES_SGIS"/>
+        <enum value="0x8131" name="GL_PACK_IMAGE_DEPTH_SGIS"/>
+        <enum value="0x8132" name="GL_UNPACK_SKIP_VOLUMES_SGIS"/>
+        <enum value="0x8133" name="GL_UNPACK_IMAGE_DEPTH_SGIS"/>
+        <enum value="0x8134" name="GL_TEXTURE_4D_SGIS"/>
+        <enum value="0x8135" name="GL_PROXY_TEXTURE_4D_SGIS"/>
+        <enum value="0x8136" name="GL_TEXTURE_4DSIZE_SGIS"/>
+        <enum value="0x8137" name="GL_TEXTURE_WRAP_Q_SGIS"/>
+        <enum value="0x8138" name="GL_MAX_4D_TEXTURE_SIZE_SGIS"/>
+        <enum value="0x8139" name="GL_PIXEL_TEX_GEN_SGIX"/>
+        <enum value="0x813A" name="GL_TEXTURE_MIN_LOD"/>
+        <enum value="0x813A" name="GL_TEXTURE_MIN_LOD_SGIS"/>
+        <enum value="0x813B" name="GL_TEXTURE_MAX_LOD"/>
+        <enum value="0x813B" name="GL_TEXTURE_MAX_LOD_SGIS"/>
+        <enum value="0x813C" name="GL_TEXTURE_BASE_LEVEL"/>
+        <enum value="0x813C" name="GL_TEXTURE_BASE_LEVEL_SGIS"/>
+        <enum value="0x813D" name="GL_TEXTURE_MAX_LEVEL"/>
+        <enum value="0x813D" name="GL_TEXTURE_MAX_LEVEL_APPLE"/>
+        <enum value="0x813D" name="GL_TEXTURE_MAX_LEVEL_SGIS"/>
+        <enum value="0x813E" name="GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX"/>
+        <enum value="0x813F" name="GL_PIXEL_TILE_CACHE_INCREMENT_SGIX"/>
+        <enum value="0x8140" name="GL_PIXEL_TILE_WIDTH_SGIX"/>
+        <enum value="0x8141" name="GL_PIXEL_TILE_HEIGHT_SGIX"/>
+        <enum value="0x8142" name="GL_PIXEL_TILE_GRID_WIDTH_SGIX"/>
+        <enum value="0x8143" name="GL_PIXEL_TILE_GRID_HEIGHT_SGIX"/>
+        <enum value="0x8144" name="GL_PIXEL_TILE_GRID_DEPTH_SGIX"/>
+        <enum value="0x8145" name="GL_PIXEL_TILE_CACHE_SIZE_SGIX"/>
+        <enum value="0x8146" name="GL_FILTER4_SGIS"/>
+        <enum value="0x8147" name="GL_TEXTURE_FILTER4_SIZE_SGIS"/>
+        <enum value="0x8148" name="GL_SPRITE_SGIX"/>
+        <enum value="0x8149" name="GL_SPRITE_MODE_SGIX"/>
+        <enum value="0x814A" name="GL_SPRITE_AXIS_SGIX"/>
+        <enum value="0x814B" name="GL_SPRITE_TRANSLATION_SGIX"/>
+        <enum value="0x814C" name="GL_SPRITE_AXIAL_SGIX"/>
+        <enum value="0x814D" name="GL_SPRITE_OBJECT_ALIGNED_SGIX"/>
+        <enum value="0x814E" name="GL_SPRITE_EYE_ALIGNED_SGIX"/>
+        <enum value="0x814F" name="GL_TEXTURE_4D_BINDING_SGIS"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8150" end="0x816F" vendor="HP">
+        <enum value="0x8150" name="GL_IGNORE_BORDER_HP"/>
+        <enum value="0x8151" name="GL_CONSTANT_BORDER"/>
+        <enum value="0x8151" name="GL_CONSTANT_BORDER_HP"/>
+            <unused start="0x8152" vendor="HP" comment="GL_WRAP_BORDER = 0x8152 was proposed, but not actually promoted to core"/>
+        <enum value="0x8153" name="GL_REPLICATE_BORDER"/>
+        <enum value="0x8153" name="GL_REPLICATE_BORDER_HP"/>
+        <enum value="0x8154" name="GL_CONVOLUTION_BORDER_COLOR"/>
+        <enum value="0x8154" name="GL_CONVOLUTION_BORDER_COLOR_HP"/>
+        <enum value="0x8155" name="GL_IMAGE_SCALE_X_HP"/>
+        <enum value="0x8156" name="GL_IMAGE_SCALE_Y_HP"/>
+        <enum value="0x8157" name="GL_IMAGE_TRANSLATE_X_HP"/>
+        <enum value="0x8158" name="GL_IMAGE_TRANSLATE_Y_HP"/>
+        <enum value="0x8159" name="GL_IMAGE_ROTATE_ANGLE_HP"/>
+        <enum value="0x815A" name="GL_IMAGE_ROTATE_ORIGIN_X_HP"/>
+        <enum value="0x815B" name="GL_IMAGE_ROTATE_ORIGIN_Y_HP"/>
+        <enum value="0x815C" name="GL_IMAGE_MAG_FILTER_HP"/>
+        <enum value="0x815D" name="GL_IMAGE_MIN_FILTER_HP"/>
+        <enum value="0x815E" name="GL_IMAGE_CUBIC_WEIGHT_HP"/>
+        <enum value="0x815F" name="GL_CUBIC_HP"/>
+        <enum value="0x8160" name="GL_AVERAGE_HP"/>
+        <enum value="0x8161" name="GL_IMAGE_TRANSFORM_2D_HP"/>
+        <enum value="0x8162" name="GL_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP"/>
+        <enum value="0x8163" name="GL_PROXY_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP"/>
+            <unused start="0x8164" vendor="HP"/>
+        <enum value="0x8165" name="GL_OCCLUSION_TEST_HP"/>
+        <enum value="0x8166" name="GL_OCCLUSION_TEST_RESULT_HP"/>
+        <enum value="0x8167" name="GL_TEXTURE_LIGHTING_MODE_HP"/>
+        <enum value="0x8168" name="GL_TEXTURE_POST_SPECULAR_HP"/>
+        <enum value="0x8169" name="GL_TEXTURE_PRE_SPECULAR_HP"/>
+            <unused start="0x816A" end="0x816F" vendor="HP"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8170" end="0x81CF" vendor="SGI">
+        <enum value="0x8170" name="GL_LINEAR_CLIPMAP_LINEAR_SGIX"/>
+        <enum value="0x8171" name="GL_TEXTURE_CLIPMAP_CENTER_SGIX"/>
+        <enum value="0x8172" name="GL_TEXTURE_CLIPMAP_FRAME_SGIX"/>
+        <enum value="0x8173" name="GL_TEXTURE_CLIPMAP_OFFSET_SGIX"/>
+        <enum value="0x8174" name="GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX"/>
+        <enum value="0x8175" name="GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX"/>
+        <enum value="0x8176" name="GL_TEXTURE_CLIPMAP_DEPTH_SGIX"/>
+        <enum value="0x8177" name="GL_MAX_CLIPMAP_DEPTH_SGIX"/>
+        <enum value="0x8178" name="GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX"/>
+        <enum value="0x8179" name="GL_POST_TEXTURE_FILTER_BIAS_SGIX"/>
+        <enum value="0x817A" name="GL_POST_TEXTURE_FILTER_SCALE_SGIX"/>
+        <enum value="0x817B" name="GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX"/>
+        <enum value="0x817C" name="GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX"/>
+        <enum value="0x817D" name="GL_REFERENCE_PLANE_SGIX"/>
+        <enum value="0x817E" name="GL_REFERENCE_PLANE_EQUATION_SGIX"/>
+        <enum value="0x817F" name="GL_IR_INSTRUMENT1_SGIX"/>
+        <enum value="0x8180" name="GL_INSTRUMENT_BUFFER_POINTER_SGIX"/>
+        <enum value="0x8181" name="GL_INSTRUMENT_MEASUREMENTS_SGIX"/>
+        <enum value="0x8182" name="GL_LIST_PRIORITY_SGIX"/>
+        <enum value="0x8183" name="GL_CALLIGRAPHIC_FRAGMENT_SGIX"/>
+        <enum value="0x8184" name="GL_PIXEL_TEX_GEN_Q_CEILING_SGIX"/>
+        <enum value="0x8185" name="GL_PIXEL_TEX_GEN_Q_ROUND_SGIX"/>
+        <enum value="0x8186" name="GL_PIXEL_TEX_GEN_Q_FLOOR_SGIX"/>
+        <enum value="0x8187" name="GL_PIXEL_TEX_GEN_ALPHA_REPLACE_SGIX"/>
+        <enum value="0x8188" name="GL_PIXEL_TEX_GEN_ALPHA_NO_REPLACE_SGIX"/>
+        <enum value="0x8189" name="GL_PIXEL_TEX_GEN_ALPHA_LS_SGIX"/>
+        <enum value="0x818A" name="GL_PIXEL_TEX_GEN_ALPHA_MS_SGIX"/>
+        <enum value="0x818B" name="GL_FRAMEZOOM_SGIX"/>
+        <enum value="0x818C" name="GL_FRAMEZOOM_FACTOR_SGIX"/>
+        <enum value="0x818D" name="GL_MAX_FRAMEZOOM_FACTOR_SGIX"/>
+        <enum value="0x818E" name="GL_TEXTURE_LOD_BIAS_S_SGIX"/>
+        <enum value="0x818F" name="GL_TEXTURE_LOD_BIAS_T_SGIX"/>
+        <enum value="0x8190" name="GL_TEXTURE_LOD_BIAS_R_SGIX"/>
+        <enum value="0x8191" name="GL_GENERATE_MIPMAP"/>
+        <enum value="0x8191" name="GL_GENERATE_MIPMAP_SGIS"/>
+        <enum value="0x8192" name="GL_GENERATE_MIPMAP_HINT"/>
+        <enum value="0x8192" name="GL_GENERATE_MIPMAP_HINT_SGIS"/>
+            <unused start="0x8193" end="0x8193" comment="Incomplete extension SGIX_spotlight_cutoff"/>
+            <!-- <enum value="0x8193" name="GL_SPOT_CUTOFF_DELTA_SGIX"/> -->
+        <enum value="0x8194" name="GL_GEOMETRY_DEFORMATION_SGIX"/>
+        <enum value="0x8195" name="GL_TEXTURE_DEFORMATION_SGIX"/>
+        <enum value="0x8196" name="GL_DEFORMATIONS_MASK_SGIX"/>
+        <enum value="0x8197" name="GL_MAX_DEFORMATION_ORDER_SGIX"/>
+        <enum value="0x8198" name="GL_FOG_OFFSET_SGIX"/>
+        <enum value="0x8199" name="GL_FOG_OFFSET_VALUE_SGIX"/>
+        <enum value="0x819A" name="GL_TEXTURE_COMPARE_SGIX"/>
+        <enum value="0x819B" name="GL_TEXTURE_COMPARE_OPERATOR_SGIX"/>
+        <enum value="0x819C" name="GL_TEXTURE_LEQUAL_R_SGIX"/>
+        <enum value="0x819D" name="GL_TEXTURE_GEQUAL_R_SGIX"/>
+            <unused start="0x819E" end="0x81A4" comment="Private (internal) extension SGIX_igloo_interface"/>
+            <!-- <enum value="0x819E" name="GL_IGLOO_FULLSCREEN_SGIX"/> -->
+            <!-- <enum value="0x819F" name="GL_IGLOO_VIEWPORT_OFFSET_SGIX"/> -->
+            <!-- <enum value="0x81A0" name="GL_IGLOO_SWAPTMESH_SGIX"/> -->
+            <!-- <enum value="0x81A1" name="GL_IGLOO_COLORNORMAL_SGIX"/> -->
+            <!-- <enum value="0x81A2" name="GL_IGLOO_IRISGL_MODE_SGIX"/> -->
+            <!-- <enum value="0x81A3" name="GL_IGLOO_LMC_COLOR_SGIX"/> -->
+            <!-- <enum value="0x81A4" name="GL_IGLOO_TMESHMODE_SGIX"/> -->
+        <enum value="0x81A5" name="GL_DEPTH_COMPONENT16"/>
+        <enum value="0x81A5" name="GL_DEPTH_COMPONENT16_ARB"/>
+        <enum value="0x81A5" name="GL_DEPTH_COMPONENT16_OES"/>
+        <enum value="0x81A5" name="GL_DEPTH_COMPONENT16_SGIX"/>
+        <enum value="0x81A6" name="GL_DEPTH_COMPONENT24"/>
+        <enum value="0x81A6" name="GL_DEPTH_COMPONENT24_ARB"/>
+        <enum value="0x81A6" name="GL_DEPTH_COMPONENT24_OES"/>
+        <enum value="0x81A6" name="GL_DEPTH_COMPONENT24_SGIX"/>
+        <enum value="0x81A7" name="GL_DEPTH_COMPONENT32"/>
+        <enum value="0x81A7" name="GL_DEPTH_COMPONENT32_ARB"/>
+        <enum value="0x81A7" name="GL_DEPTH_COMPONENT32_OES"/>
+        <enum value="0x81A7" name="GL_DEPTH_COMPONENT32_SGIX"/>
+        <enum value="0x81A8" name="GL_ARRAY_ELEMENT_LOCK_FIRST_EXT"/>
+        <enum value="0x81A9" name="GL_ARRAY_ELEMENT_LOCK_COUNT_EXT"/>
+        <enum value="0x81AA" name="GL_CULL_VERTEX_EXT"/>
+        <enum value="0x81AB" name="GL_CULL_VERTEX_EYE_POSITION_EXT"/>
+        <enum value="0x81AC" name="GL_CULL_VERTEX_OBJECT_POSITION_EXT"/>
+        <enum value="0x81AD" name="GL_IUI_V2F_EXT"/>
+        <enum value="0x81AE" name="GL_IUI_V3F_EXT"/>
+        <enum value="0x81AF" name="GL_IUI_N3F_V2F_EXT"/>
+        <enum value="0x81B0" name="GL_IUI_N3F_V3F_EXT"/>
+        <enum value="0x81B1" name="GL_T2F_IUI_V2F_EXT"/>
+        <enum value="0x81B2" name="GL_T2F_IUI_V3F_EXT"/>
+        <enum value="0x81B3" name="GL_T2F_IUI_N3F_V2F_EXT"/>
+        <enum value="0x81B4" name="GL_T2F_IUI_N3F_V3F_EXT"/>
+        <enum value="0x81B5" name="GL_INDEX_TEST_EXT"/>
+        <enum value="0x81B6" name="GL_INDEX_TEST_FUNC_EXT"/>
+        <enum value="0x81B7" name="GL_INDEX_TEST_REF_EXT"/>
+        <enum value="0x81B8" name="GL_INDEX_MATERIAL_EXT"/>
+        <enum value="0x81B9" name="GL_INDEX_MATERIAL_PARAMETER_EXT"/>
+        <enum value="0x81BA" name="GL_INDEX_MATERIAL_FACE_EXT"/>
+        <enum value="0x81BB" name="GL_YCRCB_422_SGIX"/>
+        <enum value="0x81BC" name="GL_YCRCB_444_SGIX"/>
+            <unused start="0x81BD" end="0x81C3" comment="Incomplete extension SGI_complex_type"/>
+            <!-- <enum value="0x81BD" name="GL_COMPLEX_UNSIGNED_BYTE_SGI"/> -->
+            <!-- <enum value="0x81BE" name="GL_COMPLEX_BYTE_SGI"/> -->
+            <!-- <enum value="0x81BF" name="GL_COMPLEX_UNSIGNED_SHORT_SGI"/> -->
+            <!-- <enum value="0x81C0" name="GL_COMPLEX_SHORT_SGI"/> -->
+            <!-- <enum value="0x81C1" name="GL_COMPLEX_UNSIGNED_INT_SGI"/> -->
+            <!-- <enum value="0x81C2" name="GL_COMPLEX_INT_SGI"/> -->
+            <!-- <enum value="0x81C3" name="GL_COMPLEX_FLOAT_SGI"/> -->
+            <unused start="0x81C4" end="0x81CA" comment="Incomplete extension SGI_fft"/>
+            <!-- <enum value="0x81C4" name="GL_PIXEL_TRANSFORM_OPERATOR_SGI"/> -->
+            <!-- <enum value="0x81C5" name="GL_CONVOLUTION_SGI"/> -->
+            <!-- <enum value="0x81C6" name="GL_FFT_1D_SGI"/> -->
+            <!-- <enum value="0x81C7" name="GL_PIXEL_TRANSFORM_SGI"/> -->
+            <!-- <enum value="0x81C8" name="GL_MAX_FFT_WIDTH_SGI"/> -->
+            <!-- <enum value="0x81C9" name="GL_SORT_SGI"/> -->
+            <!-- <enum value="0x81CA" name="GL_TRANSPOSE_SGI"/> -->
+            <unused start="0x81CB" end="0x81CF" comment="Incomplete extension SGIX_nurbs_eval"/>
+            <!-- <enum value="0x81CB" name="GL_MAP1_VERTEX_3_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81CC" name="GL_MAP1_VERTEX_4_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81CD" name="GL_MAP1_INDEX_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81CE" name="GL_MAP1_COLOR_4_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81CF" name="GL_MAP1_NORMAL_NURBS_SGIX"/> -->
+    </enums>
+
+    <enums namespace="GL" start="0x81D0" end="0x81DF" vendor="SUN">
+            <unused start="0x81D0" end="0x81D1" vendor="SUN"/>
+            <unused start="0x81D2" end="0x81D3" comment="No extension spec SUNX_surface_hint"/>
+            <!-- <enum value="0x81D2" name="GL_SURFACE_SIZE_HINT_SUNX"/> -->
+            <!-- <enum value="0x81D3" name="GL_LARGE_SUNX"/> -->
+        <enum value="0x81D4" name="GL_WRAP_BORDER_SUN"/>
+        <enum value="0x81D5" name="GL_UNPACK_CONSTANT_DATA_SUNX"/>
+        <enum value="0x81D6" name="GL_TEXTURE_CONSTANT_DATA_SUNX"/>
+        <enum value="0x81D7" name="GL_TRIANGLE_LIST_SUN"/>
+        <enum value="0x81D8" name="GL_REPLACEMENT_CODE_SUN"/>
+        <enum value="0x81D9" name="GL_GLOBAL_ALPHA_SUN"/>
+        <enum value="0x81DA" name="GL_GLOBAL_ALPHA_FACTOR_SUN"/>
+            <unused start="0x81DB" end="0x81DF" vendor="SUN"/>
+    </enums>
+
+    <enums namespace="GL" start="0x81E0" end="0x81FF" vendor="SGI">
+            <unused start="0x81E0" end="0x81EE" comment="Incomplete extension SGIX_nurbs_eval"/>
+            <!-- <enum value="0x81E0" name="GL_MAP1_TEXTURE_COORD_1_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E1" name="GL_MAP1_TEXTURE_COORD_2_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E2" name="GL_MAP1_TEXTURE_COORD_3_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E3" name="GL_MAP1_TEXTURE_COORD_4_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E4" name="GL_MAP2_VERTEX_3_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E5" name="GL_MAP2_VERTEX_4_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E6" name="GL_MAP2_INDEX_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E7" name="GL_MAP2_COLOR_4_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E8" name="GL_MAP2_NORMAL_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81E9" name="GL_MAP2_TEXTURE_COORD_1_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81EA" name="GL_MAP2_TEXTURE_COORD_2_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81EB" name="GL_MAP2_TEXTURE_COORD_3_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81EC" name="GL_MAP2_TEXTURE_COORD_4_NURBS_SGIX"/> -->
+            <!-- <enum value="0x81ED" name="GL_NURBS_KNOT_COUNT_SGIX"/> -->
+            <!-- <enum value="0x81EE" name="GL_NURBS_KNOT_VECTOR_SGIX"/> -->
+        <enum value="0x81EF" name="GL_TEXTURE_COLOR_WRITEMASK_SGIS"/>
+        <enum value="0x81F0" name="GL_EYE_DISTANCE_TO_POINT_SGIS"/>
+        <enum value="0x81F1" name="GL_OBJECT_DISTANCE_TO_POINT_SGIS"/>
+        <enum value="0x81F2" name="GL_EYE_DISTANCE_TO_LINE_SGIS"/>
+        <enum value="0x81F3" name="GL_OBJECT_DISTANCE_TO_LINE_SGIS"/>
+        <enum value="0x81F4" name="GL_EYE_POINT_SGIS"/>
+        <enum value="0x81F5" name="GL_OBJECT_POINT_SGIS"/>
+        <enum value="0x81F6" name="GL_EYE_LINE_SGIS"/>
+        <enum value="0x81F7" name="GL_OBJECT_LINE_SGIS"/>
+        <enum value="0x81F8" name="GL_LIGHT_MODEL_COLOR_CONTROL"/>
+        <enum value="0x81F8" name="GL_LIGHT_MODEL_COLOR_CONTROL_EXT"/>
+        <enum value="0x81F9" name="GL_SINGLE_COLOR"/>
+        <enum value="0x81F9" name="GL_SINGLE_COLOR_EXT"/>
+        <enum value="0x81FA" name="GL_SEPARATE_SPECULAR_COLOR"/>
+        <enum value="0x81FA" name="GL_SEPARATE_SPECULAR_COLOR_EXT"/>
+        <enum value="0x81FB" name="GL_SHARED_TEXTURE_PALETTE_EXT"/>
+            <unused start="0x81FC" end="0x81FD" comment="Incomplete extension SGIX_fog_scale"/>
+            <!-- <enum value="0x81FC" name="GL_FOG_SCALE_SGIX"/> -->
+            <!-- <enum value="0x81FD" name="GL_FOG_SCALE_VALUE_SGIX"/> -->
+            <unused start="0x81FE" end="0x81FF" comment="Incomplete extension SGIX_fog_blend"/>
+            <!-- <enum value="0x81FE" name="GL_FOG_BLEND_ALPHA_SGIX"/> -->
+            <!-- <enum value="0x81FF" name="GL_FOG_BLEND_COLOR_SGIX"/> -->
+    </enums>
+
+    <enums namespace="GL" start="0x8200" end="0x820F" vendor="AMD" comment="Range released by MS 2002/9/16">
+        <enum value="0x8200" name="GL_TEXT_FRAGMENT_SHADER_ATI"/>
+            <unused start="0x8201" end="0x820F" vendor="AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8210" end="0x823F" vendor="ARB">
+        <enum value="0x8210" name="GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING"/>
+        <enum value="0x8210" name="GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT"/>
+        <enum value="0x8211" name="GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE"/>
+        <enum value="0x8211" name="GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE_EXT"/>
+        <enum value="0x8212" name="GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE"/>
+        <enum value="0x8213" name="GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE"/>
+        <enum value="0x8214" name="GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE"/>
+        <enum value="0x8215" name="GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE"/>
+        <enum value="0x8216" name="GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE"/>
+        <enum value="0x8217" name="GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE"/>
+        <enum value="0x8218" name="GL_FRAMEBUFFER_DEFAULT"/>
+        <enum value="0x8219" name="GL_FRAMEBUFFER_UNDEFINED"/>
+        <enum value="0x8219" name="GL_FRAMEBUFFER_UNDEFINED_OES"/>
+        <enum value="0x821A" name="GL_DEPTH_STENCIL_ATTACHMENT"/>
+        <enum value="0x821B" name="GL_MAJOR_VERSION"/>
+        <enum value="0x821C" name="GL_MINOR_VERSION"/>
+        <enum value="0x821D" name="GL_NUM_EXTENSIONS"/>
+        <enum value="0x821E" name="GL_CONTEXT_FLAGS"/>
+        <enum value="0x821F" name="GL_BUFFER_IMMUTABLE_STORAGE"/>
+        <enum value="0x821F" name="GL_BUFFER_IMMUTABLE_STORAGE_EXT"/>
+        <enum value="0x8220" name="GL_BUFFER_STORAGE_FLAGS"/>
+        <enum value="0x8220" name="GL_BUFFER_STORAGE_FLAGS_EXT"/>
+        <enum value="0x8221" name="GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED"/>
+        <enum value="0x8221" name="GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED_OES"/>
+        <enum value="0x8222" name="GL_INDEX"/>
+            <unused start="0x8223" vendor="ARB" comment="GL_DEPTH_BUFFER = 0x8223 not actually used in the API"/>
+            <unused start="0x8224" vendor="ARB" comment="GL_STENCIL_BUFFER = 0x8224 not actually used in the API"/>
+        <enum value="0x8225" name="GL_COMPRESSED_RED"/>
+        <enum value="0x8226" name="GL_COMPRESSED_RG"/>
+        <enum value="0x8227" name="GL_RG"/>
+        <enum value="0x8227" name="GL_RG_EXT"/>
+        <enum value="0x8228" name="GL_RG_INTEGER"/>
+        <enum value="0x8229" name="GL_R8"/>
+        <enum value="0x8229" name="GL_R8_EXT"/>
+        <enum value="0x822A" name="GL_R16"/>
+        <enum value="0x822A" name="GL_R16_EXT"/>
+        <enum value="0x822B" name="GL_RG8"/>
+        <enum value="0x822B" name="GL_RG8_EXT"/>
+        <enum value="0x822C" name="GL_RG16"/>
+        <enum value="0x822C" name="GL_RG16_EXT"/>
+        <enum value="0x822D" name="GL_R16F"/>
+        <enum value="0x822D" name="GL_R16F_EXT"/>
+        <enum value="0x822E" name="GL_R32F"/>
+        <enum value="0x822E" name="GL_R32F_EXT"/>
+        <enum value="0x822F" name="GL_RG16F"/>
+        <enum value="0x822F" name="GL_RG16F_EXT"/>
+        <enum value="0x8230" name="GL_RG32F"/>
+        <enum value="0x8230" name="GL_RG32F_EXT"/>
+        <enum value="0x8231" name="GL_R8I"/>
+        <enum value="0x8232" name="GL_R8UI"/>
+        <enum value="0x8233" name="GL_R16I"/>
+        <enum value="0x8234" name="GL_R16UI"/>
+        <enum value="0x8235" name="GL_R32I"/>
+        <enum value="0x8236" name="GL_R32UI"/>
+        <enum value="0x8237" name="GL_RG8I"/>
+        <enum value="0x8238" name="GL_RG8UI"/>
+        <enum value="0x8239" name="GL_RG16I"/>
+        <enum value="0x823A" name="GL_RG16UI"/>
+        <enum value="0x823B" name="GL_RG32I"/>
+        <enum value="0x823C" name="GL_RG32UI"/>
+            <unused start="0x823D" end="0x823F" vendor="ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8240" end="0x82AF" vendor="ARB" comment="Range released by MS on 2002/9/16">
+        <enum value="0x8240" name="GL_SYNC_CL_EVENT_ARB"/>
+        <enum value="0x8241" name="GL_SYNC_CL_EVENT_COMPLETE_ARB"/>
+        <enum value="0x8242" name="GL_DEBUG_OUTPUT_SYNCHRONOUS"/>
+        <enum value="0x8242" name="GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB"/>
+        <enum value="0x8242" name="GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR"/>
+        <enum value="0x8243" name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH"/>
+        <enum value="0x8243" name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_ARB"/>
+        <enum value="0x8243" name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_KHR"/>
+        <enum value="0x8244" name="GL_DEBUG_CALLBACK_FUNCTION"/>
+        <enum value="0x8244" name="GL_DEBUG_CALLBACK_FUNCTION_ARB"/>
+        <enum value="0x8244" name="GL_DEBUG_CALLBACK_FUNCTION_KHR"/>
+        <enum value="0x8245" name="GL_DEBUG_CALLBACK_USER_PARAM"/>
+        <enum value="0x8245" name="GL_DEBUG_CALLBACK_USER_PARAM_ARB"/>
+        <enum value="0x8245" name="GL_DEBUG_CALLBACK_USER_PARAM_KHR"/>
+        <enum value="0x8246" name="GL_DEBUG_SOURCE_API"/>
+        <enum value="0x8246" name="GL_DEBUG_SOURCE_API_ARB"/>
+        <enum value="0x8246" name="GL_DEBUG_SOURCE_API_KHR"/>
+        <enum value="0x8247" name="GL_DEBUG_SOURCE_WINDOW_SYSTEM"/>
+        <enum value="0x8247" name="GL_DEBUG_SOURCE_WINDOW_SYSTEM_ARB"/>
+        <enum value="0x8247" name="GL_DEBUG_SOURCE_WINDOW_SYSTEM_KHR"/>
+        <enum value="0x8248" name="GL_DEBUG_SOURCE_SHADER_COMPILER"/>
+        <enum value="0x8248" name="GL_DEBUG_SOURCE_SHADER_COMPILER_ARB"/>
+        <enum value="0x8248" name="GL_DEBUG_SOURCE_SHADER_COMPILER_KHR"/>
+        <enum value="0x8249" name="GL_DEBUG_SOURCE_THIRD_PARTY"/>
+        <enum value="0x8249" name="GL_DEBUG_SOURCE_THIRD_PARTY_ARB"/>
+        <enum value="0x8249" name="GL_DEBUG_SOURCE_THIRD_PARTY_KHR"/>
+        <enum value="0x824A" name="GL_DEBUG_SOURCE_APPLICATION"/>
+        <enum value="0x824A" name="GL_DEBUG_SOURCE_APPLICATION_ARB"/>
+        <enum value="0x824A" name="GL_DEBUG_SOURCE_APPLICATION_KHR"/>
+        <enum value="0x824B" name="GL_DEBUG_SOURCE_OTHER"/>
+        <enum value="0x824B" name="GL_DEBUG_SOURCE_OTHER_ARB"/>
+        <enum value="0x824B" name="GL_DEBUG_SOURCE_OTHER_KHR"/>
+        <enum value="0x824C" name="GL_DEBUG_TYPE_ERROR"/>
+        <enum value="0x824C" name="GL_DEBUG_TYPE_ERROR_ARB"/>
+        <enum value="0x824C" name="GL_DEBUG_TYPE_ERROR_KHR"/>
+        <enum value="0x824D" name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR"/>
+        <enum value="0x824D" name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_ARB"/>
+        <enum value="0x824D" name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR"/>
+        <enum value="0x824E" name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR"/>
+        <enum value="0x824E" name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_ARB"/>
+        <enum value="0x824E" name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR"/>
+        <enum value="0x824F" name="GL_DEBUG_TYPE_PORTABILITY"/>
+        <enum value="0x824F" name="GL_DEBUG_TYPE_PORTABILITY_ARB"/>
+        <enum value="0x824F" name="GL_DEBUG_TYPE_PORTABILITY_KHR"/>
+        <enum value="0x8250" name="GL_DEBUG_TYPE_PERFORMANCE"/>
+        <enum value="0x8250" name="GL_DEBUG_TYPE_PERFORMANCE_ARB"/>
+        <enum value="0x8250" name="GL_DEBUG_TYPE_PERFORMANCE_KHR"/>
+        <enum value="0x8251" name="GL_DEBUG_TYPE_OTHER"/>
+        <enum value="0x8251" name="GL_DEBUG_TYPE_OTHER_ARB"/>
+        <enum value="0x8251" name="GL_DEBUG_TYPE_OTHER_KHR"/>
+        <enum value="0x8252" name="GL_LOSE_CONTEXT_ON_RESET"/>
+        <enum value="0x8252" name="GL_LOSE_CONTEXT_ON_RESET_ARB"/>
+        <enum value="0x8252" name="GL_LOSE_CONTEXT_ON_RESET_EXT"/>
+        <enum value="0x8252" name="GL_LOSE_CONTEXT_ON_RESET_KHR"/>
+        <enum value="0x8253" name="GL_GUILTY_CONTEXT_RESET"/>
+        <enum value="0x8253" name="GL_GUILTY_CONTEXT_RESET_ARB"/>
+        <enum value="0x8253" name="GL_GUILTY_CONTEXT_RESET_EXT"/>
+        <enum value="0x8253" name="GL_GUILTY_CONTEXT_RESET_KHR"/>
+        <enum value="0x8254" name="GL_INNOCENT_CONTEXT_RESET"/>
+        <enum value="0x8254" name="GL_INNOCENT_CONTEXT_RESET_ARB"/>
+        <enum value="0x8254" name="GL_INNOCENT_CONTEXT_RESET_EXT"/>
+        <enum value="0x8254" name="GL_INNOCENT_CONTEXT_RESET_KHR"/>
+        <enum value="0x8255" name="GL_UNKNOWN_CONTEXT_RESET"/>
+        <enum value="0x8255" name="GL_UNKNOWN_CONTEXT_RESET_ARB"/>
+        <enum value="0x8255" name="GL_UNKNOWN_CONTEXT_RESET_EXT"/>
+        <enum value="0x8255" name="GL_UNKNOWN_CONTEXT_RESET_KHR"/>
+        <enum value="0x8256" name="GL_RESET_NOTIFICATION_STRATEGY"/>
+        <enum value="0x8256" name="GL_RESET_NOTIFICATION_STRATEGY_ARB"/>
+        <enum value="0x8256" name="GL_RESET_NOTIFICATION_STRATEGY_EXT"/>
+        <enum value="0x8256" name="GL_RESET_NOTIFICATION_STRATEGY_KHR"/>
+        <enum value="0x8257" name="GL_PROGRAM_BINARY_RETRIEVABLE_HINT"/>
+        <enum value="0x8258" name="GL_PROGRAM_SEPARABLE"/>
+        <enum value="0x8258" name="GL_PROGRAM_SEPARABLE_EXT"/>
+        <enum value="0x8259" name="GL_ACTIVE_PROGRAM"/>
+        <enum value="0x8259" api="gles2" name="GL_ACTIVE_PROGRAM_EXT" comment="For the OpenGL ES version of EXT_separate_shader_objects"/>
+        <enum value="0x825A" name="GL_PROGRAM_PIPELINE_BINDING"/>
+        <enum value="0x825A" name="GL_PROGRAM_PIPELINE_BINDING_EXT"/>
+        <enum value="0x825B" name="GL_MAX_VIEWPORTS"/>
+        <enum value="0x825B" name="GL_MAX_VIEWPORTS_NV"/>
+        <enum value="0x825B" name="GL_MAX_VIEWPORTS_OES"/>
+        <enum value="0x825C" name="GL_VIEWPORT_SUBPIXEL_BITS"/>
+        <enum value="0x825C" name="GL_VIEWPORT_SUBPIXEL_BITS_EXT"/>
+        <enum value="0x825C" name="GL_VIEWPORT_SUBPIXEL_BITS_NV"/>
+        <enum value="0x825C" name="GL_VIEWPORT_SUBPIXEL_BITS_OES"/>
+        <enum value="0x825D" name="GL_VIEWPORT_BOUNDS_RANGE"/>
+        <enum value="0x825D" name="GL_VIEWPORT_BOUNDS_RANGE_EXT"/>
+        <enum value="0x825D" name="GL_VIEWPORT_BOUNDS_RANGE_NV"/>
+        <enum value="0x825D" name="GL_VIEWPORT_BOUNDS_RANGE_OES"/>
+        <enum value="0x825E" name="GL_LAYER_PROVOKING_VERTEX"/>
+        <enum value="0x825E" name="GL_LAYER_PROVOKING_VERTEX_EXT"/>
+        <enum value="0x825E" name="GL_LAYER_PROVOKING_VERTEX_OES"/>
+        <enum value="0x825F" name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX"/>
+        <enum value="0x825F" name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX_EXT"/>
+        <enum value="0x825F" name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV"/>
+        <enum value="0x825F" name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX_OES"/>
+        <enum value="0x8260" name="GL_UNDEFINED_VERTEX"/>
+        <enum value="0x8260" name="GL_UNDEFINED_VERTEX_EXT"/>
+        <enum value="0x8260" name="GL_UNDEFINED_VERTEX_OES"/>
+        <enum value="0x8261" name="GL_NO_RESET_NOTIFICATION"/>
+        <enum value="0x8261" name="GL_NO_RESET_NOTIFICATION_ARB"/>
+        <enum value="0x8261" name="GL_NO_RESET_NOTIFICATION_EXT"/>
+        <enum value="0x8261" name="GL_NO_RESET_NOTIFICATION_KHR"/>
+        <enum value="0x8262" name="GL_MAX_COMPUTE_SHARED_MEMORY_SIZE"/>
+        <enum value="0x8263" name="GL_MAX_COMPUTE_UNIFORM_COMPONENTS"/>
+        <enum value="0x8264" name="GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x8265" name="GL_MAX_COMPUTE_ATOMIC_COUNTERS"/>
+        <enum value="0x8266" name="GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS"/>
+        <enum value="0x8267" name="GL_COMPUTE_WORK_GROUP_SIZE"/>
+        <enum value="0x8268" name="GL_DEBUG_TYPE_MARKER"/>
+        <enum value="0x8268" name="GL_DEBUG_TYPE_MARKER_KHR"/>
+        <enum value="0x8269" name="GL_DEBUG_TYPE_PUSH_GROUP"/>
+        <enum value="0x8269" name="GL_DEBUG_TYPE_PUSH_GROUP_KHR"/>
+        <enum value="0x826A" name="GL_DEBUG_TYPE_POP_GROUP"/>
+        <enum value="0x826A" name="GL_DEBUG_TYPE_POP_GROUP_KHR"/>
+        <enum value="0x826B" name="GL_DEBUG_SEVERITY_NOTIFICATION"/>
+        <enum value="0x826B" name="GL_DEBUG_SEVERITY_NOTIFICATION_KHR"/>
+        <enum value="0x826C" name="GL_MAX_DEBUG_GROUP_STACK_DEPTH"/>
+        <enum value="0x826C" name="GL_MAX_DEBUG_GROUP_STACK_DEPTH_KHR"/>
+        <enum value="0x826D" name="GL_DEBUG_GROUP_STACK_DEPTH"/>
+        <enum value="0x826D" name="GL_DEBUG_GROUP_STACK_DEPTH_KHR"/>
+        <enum value="0x826E" name="GL_MAX_UNIFORM_LOCATIONS"/>
+        <enum value="0x826F" name="GL_INTERNALFORMAT_SUPPORTED"/>
+        <enum value="0x8270" name="GL_INTERNALFORMAT_PREFERRED"/>
+        <enum value="0x8271" name="GL_INTERNALFORMAT_RED_SIZE"/>
+        <enum value="0x8272" name="GL_INTERNALFORMAT_GREEN_SIZE"/>
+        <enum value="0x8273" name="GL_INTERNALFORMAT_BLUE_SIZE"/>
+        <enum value="0x8274" name="GL_INTERNALFORMAT_ALPHA_SIZE"/>
+        <enum value="0x8275" name="GL_INTERNALFORMAT_DEPTH_SIZE"/>
+        <enum value="0x8276" name="GL_INTERNALFORMAT_STENCIL_SIZE"/>
+        <enum value="0x8277" name="GL_INTERNALFORMAT_SHARED_SIZE"/>
+        <enum value="0x8278" name="GL_INTERNALFORMAT_RED_TYPE"/>
+        <enum value="0x8279" name="GL_INTERNALFORMAT_GREEN_TYPE"/>
+        <enum value="0x827A" name="GL_INTERNALFORMAT_BLUE_TYPE"/>
+        <enum value="0x827B" name="GL_INTERNALFORMAT_ALPHA_TYPE"/>
+        <enum value="0x827C" name="GL_INTERNALFORMAT_DEPTH_TYPE"/>
+        <enum value="0x827D" name="GL_INTERNALFORMAT_STENCIL_TYPE"/>
+        <enum value="0x827E" name="GL_MAX_WIDTH"/>
+        <enum value="0x827F" name="GL_MAX_HEIGHT"/>
+        <enum value="0x8280" name="GL_MAX_DEPTH"/>
+        <enum value="0x8281" name="GL_MAX_LAYERS"/>
+        <enum value="0x8282" name="GL_MAX_COMBINED_DIMENSIONS"/>
+        <enum value="0x8283" name="GL_COLOR_COMPONENTS"/>
+        <enum value="0x8284" name="GL_DEPTH_COMPONENTS"/>
+        <enum value="0x8285" name="GL_STENCIL_COMPONENTS"/>
+        <enum value="0x8286" name="GL_COLOR_RENDERABLE"/>
+        <enum value="0x8287" name="GL_DEPTH_RENDERABLE"/>
+        <enum value="0x8288" name="GL_STENCIL_RENDERABLE"/>
+        <enum value="0x8289" name="GL_FRAMEBUFFER_RENDERABLE"/>
+        <enum value="0x828A" name="GL_FRAMEBUFFER_RENDERABLE_LAYERED"/>
+        <enum value="0x828B" name="GL_FRAMEBUFFER_BLEND"/>
+        <enum value="0x828C" name="GL_READ_PIXELS"/>
+        <enum value="0x828D" name="GL_READ_PIXELS_FORMAT"/>
+        <enum value="0x828E" name="GL_READ_PIXELS_TYPE"/>
+        <enum value="0x828F" name="GL_TEXTURE_IMAGE_FORMAT"/>
+        <enum value="0x8290" name="GL_TEXTURE_IMAGE_TYPE"/>
+        <enum value="0x8291" name="GL_GET_TEXTURE_IMAGE_FORMAT"/>
+        <enum value="0x8292" name="GL_GET_TEXTURE_IMAGE_TYPE"/>
+        <enum value="0x8293" name="GL_MIPMAP"/>
+        <enum value="0x8294" name="GL_MANUAL_GENERATE_MIPMAP"/>
+        <enum value="0x8295" name="GL_AUTO_GENERATE_MIPMAP" comment="Should be deprecated"/>
+        <enum value="0x8296" name="GL_COLOR_ENCODING"/>
+        <enum value="0x8297" name="GL_SRGB_READ"/>
+        <enum value="0x8298" name="GL_SRGB_WRITE"/>
+        <enum value="0x8299" name="GL_SRGB_DECODE_ARB"/>
+        <enum value="0x829A" name="GL_FILTER"/>
+        <enum value="0x829B" name="GL_VERTEX_TEXTURE"/>
+        <enum value="0x829C" name="GL_TESS_CONTROL_TEXTURE"/>
+        <enum value="0x829D" name="GL_TESS_EVALUATION_TEXTURE"/>
+        <enum value="0x829E" name="GL_GEOMETRY_TEXTURE"/>
+        <enum value="0x829F" name="GL_FRAGMENT_TEXTURE"/>
+        <enum value="0x82A0" name="GL_COMPUTE_TEXTURE"/>
+        <enum value="0x82A1" name="GL_TEXTURE_SHADOW"/>
+        <enum value="0x82A2" name="GL_TEXTURE_GATHER"/>
+        <enum value="0x82A3" name="GL_TEXTURE_GATHER_SHADOW"/>
+        <enum value="0x82A4" name="GL_SHADER_IMAGE_LOAD"/>
+        <enum value="0x82A5" name="GL_SHADER_IMAGE_STORE"/>
+        <enum value="0x82A6" name="GL_SHADER_IMAGE_ATOMIC"/>
+        <enum value="0x82A7" name="GL_IMAGE_TEXEL_SIZE"/>
+        <enum value="0x82A8" name="GL_IMAGE_COMPATIBILITY_CLASS"/>
+        <enum value="0x82A9" name="GL_IMAGE_PIXEL_FORMAT"/>
+        <enum value="0x82AA" name="GL_IMAGE_PIXEL_TYPE"/>
+            <unused start="0x82AB" vendor="ARB"/>
+        <enum value="0x82AC" name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST"/>
+        <enum value="0x82AD" name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST"/>
+        <enum value="0x82AE" name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE"/>
+        <enum value="0x82AF" name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE"/>
+    </enums>
+
+    <enums namespace="GL" start="0x82B0" end="0x830F" vendor="ARB" comment="Range reclaimed from ADD on 2012/05/10">
+            <unused start="0x82B0" vendor="ARB"/>
+        <enum value="0x82B1" name="GL_TEXTURE_COMPRESSED_BLOCK_WIDTH"/>
+        <enum value="0x82B2" name="GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT"/>
+        <enum value="0x82B3" name="GL_TEXTURE_COMPRESSED_BLOCK_SIZE"/>
+        <enum value="0x82B4" name="GL_CLEAR_BUFFER"/>
+        <enum value="0x82B5" name="GL_TEXTURE_VIEW"/>
+        <enum value="0x82B6" name="GL_VIEW_COMPATIBILITY_CLASS"/>
+        <enum value="0x82B7" name="GL_FULL_SUPPORT"/>
+        <enum value="0x82B8" name="GL_CAVEAT_SUPPORT"/>
+        <enum value="0x82B9" name="GL_IMAGE_CLASS_4_X_32"/>
+        <enum value="0x82BA" name="GL_IMAGE_CLASS_2_X_32"/>
+        <enum value="0x82BB" name="GL_IMAGE_CLASS_1_X_32"/>
+        <enum value="0x82BC" name="GL_IMAGE_CLASS_4_X_16"/>
+        <enum value="0x82BD" name="GL_IMAGE_CLASS_2_X_16"/>
+        <enum value="0x82BE" name="GL_IMAGE_CLASS_1_X_16"/>
+        <enum value="0x82BF" name="GL_IMAGE_CLASS_4_X_8"/>
+        <enum value="0x82C0" name="GL_IMAGE_CLASS_2_X_8"/>
+        <enum value="0x82C1" name="GL_IMAGE_CLASS_1_X_8"/>
+        <enum value="0x82C2" name="GL_IMAGE_CLASS_11_11_10"/>
+        <enum value="0x82C3" name="GL_IMAGE_CLASS_10_10_10_2"/>
+        <enum value="0x82C4" name="GL_VIEW_CLASS_128_BITS"/>
+        <enum value="0x82C5" name="GL_VIEW_CLASS_96_BITS"/>
+        <enum value="0x82C6" name="GL_VIEW_CLASS_64_BITS"/>
+        <enum value="0x82C7" name="GL_VIEW_CLASS_48_BITS"/>
+        <enum value="0x82C8" name="GL_VIEW_CLASS_32_BITS"/>
+        <enum value="0x82C9" name="GL_VIEW_CLASS_24_BITS"/>
+        <enum value="0x82CA" name="GL_VIEW_CLASS_16_BITS"/>
+        <enum value="0x82CB" name="GL_VIEW_CLASS_8_BITS"/>
+        <enum value="0x82CC" name="GL_VIEW_CLASS_S3TC_DXT1_RGB"/>
+        <enum value="0x82CD" name="GL_VIEW_CLASS_S3TC_DXT1_RGBA"/>
+        <enum value="0x82CE" name="GL_VIEW_CLASS_S3TC_DXT3_RGBA"/>
+        <enum value="0x82CF" name="GL_VIEW_CLASS_S3TC_DXT5_RGBA"/>
+        <enum value="0x82D0" name="GL_VIEW_CLASS_RGTC1_RED"/>
+        <enum value="0x82D1" name="GL_VIEW_CLASS_RGTC2_RG"/>
+        <enum value="0x82D2" name="GL_VIEW_CLASS_BPTC_UNORM"/>
+        <enum value="0x82D3" name="GL_VIEW_CLASS_BPTC_FLOAT"/>
+        <enum value="0x82D4" name="GL_VERTEX_ATTRIB_BINDING"/>
+        <enum value="0x82D5" name="GL_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+        <enum value="0x82D6" name="GL_VERTEX_BINDING_DIVISOR"/>
+        <enum value="0x82D7" name="GL_VERTEX_BINDING_OFFSET"/>
+        <enum value="0x82D8" name="GL_VERTEX_BINDING_STRIDE"/>
+        <enum value="0x82D9" name="GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+        <enum value="0x82DA" name="GL_MAX_VERTEX_ATTRIB_BINDINGS"/>
+        <enum value="0x82DB" name="GL_TEXTURE_VIEW_MIN_LEVEL"/>
+        <enum value="0x82DB" name="GL_TEXTURE_VIEW_MIN_LEVEL_EXT"/>
+        <enum value="0x82DB" name="GL_TEXTURE_VIEW_MIN_LEVEL_OES"/>
+        <enum value="0x82DC" name="GL_TEXTURE_VIEW_NUM_LEVELS"/>
+        <enum value="0x82DC" name="GL_TEXTURE_VIEW_NUM_LEVELS_EXT"/>
+        <enum value="0x82DC" name="GL_TEXTURE_VIEW_NUM_LEVELS_OES"/>
+        <enum value="0x82DD" name="GL_TEXTURE_VIEW_MIN_LAYER"/>
+        <enum value="0x82DD" name="GL_TEXTURE_VIEW_MIN_LAYER_EXT"/>
+        <enum value="0x82DD" name="GL_TEXTURE_VIEW_MIN_LAYER_OES"/>
+        <enum value="0x82DE" name="GL_TEXTURE_VIEW_NUM_LAYERS"/>
+        <enum value="0x82DE" name="GL_TEXTURE_VIEW_NUM_LAYERS_EXT"/>
+        <enum value="0x82DE" name="GL_TEXTURE_VIEW_NUM_LAYERS_OES"/>
+        <enum value="0x82DF" name="GL_TEXTURE_IMMUTABLE_LEVELS"/>
+        <enum value="0x82E0" name="GL_BUFFER"/>
+        <enum value="0x82E0" name="GL_BUFFER_KHR"/>
+        <enum value="0x82E1" name="GL_SHADER"/>
+        <enum value="0x82E1" name="GL_SHADER_KHR"/>
+        <enum value="0x82E2" name="GL_PROGRAM"/>
+        <enum value="0x82E2" name="GL_PROGRAM_KHR"/>
+        <enum value="0x82E3" name="GL_QUERY"/>
+        <enum value="0x82E3" name="GL_QUERY_KHR"/>
+        <enum value="0x82E4" name="GL_PROGRAM_PIPELINE"/>
+        <enum value="0x82E4" name="GL_PROGRAM_PIPELINE_KHR"/>
+        <enum value="0x82E5" name="GL_MAX_VERTEX_ATTRIB_STRIDE"/>
+        <enum value="0x82E6" name="GL_SAMPLER"/>
+        <enum value="0x82E6" name="GL_SAMPLER_KHR"/>
+        <enum value="0x82E7" name="GL_DISPLAY_LIST"/>
+        <enum value="0x82E8" name="GL_MAX_LABEL_LENGTH"/>
+        <enum value="0x82E8" name="GL_MAX_LABEL_LENGTH_KHR"/>
+        <enum value="0x82E9" name="GL_NUM_SHADING_LANGUAGE_VERSIONS"/>
+        <enum value="0x82EA" name="GL_QUERY_TARGET"/>
+        <!-- 0x82EB = GL_TEXTURE_BINDING was removed in GL 4.5 and
+             ARB_direct_state_access in February 2015 after determining it
+             was not well defined or implementable. -->
+            <unused start="0x82EB" vendor="ARB" comment="Reserved. Formerly used for GL_TEXTURE_BINDING."/>
+        <enum value="0x82EC" name="GL_TRANSFORM_FEEDBACK_OVERFLOW"/>
+        <enum value="0x82EC" name="GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB" alias="GL_TRANSFORM_FEEDBACK_OVERFLOW"/>
+        <enum value="0x82ED" name="GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW"/>
+        <enum value="0x82ED" name="GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB" alias="GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW"/>
+        <enum value="0x82EE" name="GL_VERTICES_SUBMITTED"/>
+        <enum value="0x82EE" name="GL_VERTICES_SUBMITTED_ARB" alias="GL_VERTICES_SUBMITTED"/>
+        <enum value="0x82EF" name="GL_PRIMITIVES_SUBMITTED"/>
+        <enum value="0x82EF" name="GL_PRIMITIVES_SUBMITTED_ARB" alias="GL_PRIMITIVES_SUBMITTED"/>
+        <enum value="0x82F0" name="GL_VERTEX_SHADER_INVOCATIONS"/>
+        <enum value="0x82F0" name="GL_VERTEX_SHADER_INVOCATIONS_ARB" alias="GL_VERTEX_SHADER_INVOCATIONS"/>
+        <enum value="0x82F1" name="GL_TESS_CONTROL_SHADER_PATCHES"/>
+        <enum value="0x82F1" name="GL_TESS_CONTROL_SHADER_PATCHES_ARB" alias="GL_TESS_CONTROL_SHADER_PATCHES"/>
+        <enum value="0x82F2" name="GL_TESS_EVALUATION_SHADER_INVOCATIONS"/>
+        <enum value="0x82F2" name="GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB" alias="GL_TESS_EVALUATION_SHADER_INVOCATIONS"/>
+        <enum value="0x82F3" name="GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED"/>
+        <enum value="0x82F3" name="GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB" alias="GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED"/>
+        <enum value="0x82F4" name="GL_FRAGMENT_SHADER_INVOCATIONS"/>
+        <enum value="0x82F4" name="GL_FRAGMENT_SHADER_INVOCATIONS_ARB" alias="GL_FRAGMENT_SHADER_INVOCATIONS"/>
+        <enum value="0x82F5" name="GL_COMPUTE_SHADER_INVOCATIONS"/>
+        <enum value="0x82F5" name="GL_COMPUTE_SHADER_INVOCATIONS_ARB" alias="GL_COMPUTE_SHADER_INVOCATIONS"/>
+        <enum value="0x82F6" name="GL_CLIPPING_INPUT_PRIMITIVES"/>
+        <enum value="0x82F6" name="GL_CLIPPING_INPUT_PRIMITIVES_ARB" alias="GL_CLIPPING_INPUT_PRIMITIVES"/>
+        <enum value="0x82F7" name="GL_CLIPPING_OUTPUT_PRIMITIVES"/>
+        <enum value="0x82F7" name="GL_CLIPPING_OUTPUT_PRIMITIVES_ARB" alias="GL_CLIPPING_OUTPUT_PRIMITIVES"/>
+        <enum value="0x82F8" name="GL_SPARSE_BUFFER_PAGE_SIZE_ARB"/>
+        <enum value="0x82F9" name="GL_MAX_CULL_DISTANCES"/>
+        <enum value="0x82F9" name="GL_MAX_CULL_DISTANCES_EXT" alias="GL_MAX_CULL_DISTANCES"/>
+        <enum value="0x82FA" name="GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES"/>
+        <enum value="0x82FA" name="GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES_EXT" alias="GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES"/>
+        <enum value="0x82FB" name="GL_CONTEXT_RELEASE_BEHAVIOR"/>
+        <enum value="0x82FB" name="GL_CONTEXT_RELEASE_BEHAVIOR_KHR"/>
+        <enum value="0x82FC" name="GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH"/>
+        <enum value="0x82FC" name="GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR"/>
+        <enum value="0x82FD" name="GL_ROBUST_GPU_TIMEOUT_MS_KHR" comment="Reserved for future"/>
+            <unused start="0x82FE" end="0x830F" vendor="ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8310" end="0x832F" vendor="SGI">
+        <enum value="0x8310" name="GL_DEPTH_PASS_INSTRUMENT_SGIX"/>
+        <enum value="0x8311" name="GL_DEPTH_PASS_INSTRUMENT_COUNTERS_SGIX"/>
+        <enum value="0x8312" name="GL_DEPTH_PASS_INSTRUMENT_MAX_SGIX"/>
+        <enum value="0x8313" name="GL_FRAGMENTS_INSTRUMENT_SGIX"/>
+        <enum value="0x8314" name="GL_FRAGMENTS_INSTRUMENT_COUNTERS_SGIX"/>
+        <enum value="0x8315" name="GL_FRAGMENTS_INSTRUMENT_MAX_SGIX"/>
+        <enum value="0x8316" name="GL_CONVOLUTION_HINT_SGIX"/>
+            <unused start="0x8317" comment="Incomplete extension SGIX_color_matrix_accuracy"/>
+            <!-- <enum value="0x8317" name="GL_COLOR_MATRIX_HINT"/> -->
+        <enum value="0x8318" name="GL_YCRCB_SGIX"/>
+        <enum value="0x8319" name="GL_YCRCBA_SGIX"/>
+        <enum value="0x831A" name="GL_UNPACK_COMPRESSED_SIZE_SGIX"/>
+        <enum value="0x831B" name="GL_PACK_MAX_COMPRESSED_SIZE_SGIX"/>
+        <enum value="0x831C" name="GL_PACK_COMPRESSED_SIZE_SGIX"/>
+        <enum value="0x831D" name="GL_SLIM8U_SGIX"/>
+        <enum value="0x831E" name="GL_SLIM10U_SGIX"/>
+        <enum value="0x831F" name="GL_SLIM12S_SGIX"/>
+        <enum value="0x8320" name="GL_ALPHA_MIN_SGIX"/>
+        <enum value="0x8321" name="GL_ALPHA_MAX_SGIX"/>
+        <enum value="0x8322" name="GL_SCALEBIAS_HINT_SGIX"/>
+            <unused start="0x8323" end="0x8328" comment="Incomplete extension SGIX_fog_layers"/>
+            <!-- <enum value="0x8323" name="GL_FOG_TYPE_SGIX"/> -->
+            <!-- <enum value="0x8324" name="GL_UNIFORM_SGIX"/> -->
+            <!-- <enum value="0x8325" name="GL_LAYERED_SGIX"/> -->
+            <!-- <enum value="0x8326" name="GL_FOG_GROUND_PLANE_SGIX"/> -->
+            <!-- <enum value="0x8327" name="GL_FOG_LAYERS_POINTS_SGIX"/> -->
+            <!-- <enum value="0x8328" name="GL_MAX_FOG_LAYERS_POINTS_SGIX"/> -->
+        <enum value="0x8329" name="GL_ASYNC_MARKER_SGIX"/>
+            <unused start="0x832A" comment="Incomplete extension SGIX_texture_phase"/>
+            <!-- <enum value="0x832A" name="GL_PHASE_SGIX"/> -->
+        <enum value="0x832B" name="GL_PIXEL_TEX_GEN_MODE_SGIX"/>
+        <enum value="0x832C" name="GL_ASYNC_HISTOGRAM_SGIX"/>
+        <enum value="0x832D" name="GL_MAX_ASYNC_HISTOGRAM_SGIX"/>
+            <unused start="0x832E" end="0x832F" comment="Incomplete extension SGIX_texture_mipmap_anisotropic"/>
+            <!-- <enum value="0x832E" name="GL_TEXTURE_MIPMAP_ANISOTROPY_SGIX"/> -->
+            <!-- <enum value="0x832F" name="GL_MAX_MIPMAP_ANISOTROPY_SGIX"/> -->
+    </enums>
+
+    <enums namespace="GL" start="0x8330" end="0x833F" vendor="SUN">
+        <enum value="0x8330" name="GL_PIXEL_TRANSFORM_2D_EXT"/>
+        <enum value="0x8331" name="GL_PIXEL_MAG_FILTER_EXT"/>
+        <enum value="0x8332" name="GL_PIXEL_MIN_FILTER_EXT"/>
+        <enum value="0x8333" name="GL_PIXEL_CUBIC_WEIGHT_EXT"/>
+        <enum value="0x8334" name="GL_CUBIC_EXT"/>
+        <enum value="0x8335" name="GL_AVERAGE_EXT"/>
+        <enum value="0x8336" name="GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT"/>
+        <enum value="0x8337" name="GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT"/>
+        <enum value="0x8338" name="GL_PIXEL_TRANSFORM_2D_MATRIX_EXT"/>
+            <unused start="0x8339" end="0x833F" vendor="SUN"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8340" end="0x836F" vendor="SGI">
+            <unused start="0x8340" end="0x8348" comment="Incomplete extension SGIX_cube_map"/>
+            <!-- <enum value="0x8340" name="GL_ENV_MAP_SGIX"/> -->
+            <!-- <enum value="0x8341" name="GL_CUBE_MAP_SGIX"/> -->
+            <!-- <enum value="0x8342" name="GL_CUBE_MAP_ZP_SGIX"/> -->
+            <!-- <enum value="0x8343" name="GL_CUBE_MAP_ZN_SGIX"/> -->
+            <!-- <enum value="0x8344" name="GL_CUBE_MAP_XN_SGIX"/> -->
+            <!-- <enum value="0x8345" name="GL_CUBE_MAP_XP_SGIX"/> -->
+            <!-- <enum value="0x8346" name="GL_CUBE_MAP_YN_SGIX"/> -->
+            <!-- <enum value="0x8347" name="GL_CUBE_MAP_YP_SGIX"/> -->
+            <!-- <enum value="0x8348" name="GL_CUBE_MAP_BINDING_SGIX"/> -->
+        <enum value="0x8349" name="GL_FRAGMENT_MATERIAL_EXT"/>
+        <enum value="0x834A" name="GL_FRAGMENT_NORMAL_EXT"/>
+            <!-- Unfortunately, there was a collision promoting to EXT
+                 from SGIX. Use fog_coord's value of 0x8452 instead of
+                 the old assigned FRAGMENT_DEPTH_EXT (0x834B). -->
+        <enum value="0x834C" name="GL_FRAGMENT_COLOR_EXT"/>
+        <enum value="0x834D" name="GL_ATTENUATION_EXT"/>
+        <enum value="0x834E" name="GL_SHADOW_ATTENUATION_EXT"/>
+        <enum value="0x834F" name="GL_TEXTURE_APPLICATION_MODE_EXT"/>
+        <enum value="0x8350" name="GL_TEXTURE_LIGHT_EXT"/>
+        <enum value="0x8351" name="GL_TEXTURE_MATERIAL_FACE_EXT"/>
+        <enum value="0x8352" name="GL_TEXTURE_MATERIAL_PARAMETER_EXT"/>
+        <enum value="0x8353" name="GL_PIXEL_TEXTURE_SGIS"/>
+        <enum value="0x8354" name="GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS"/>
+        <enum value="0x8355" name="GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS"/>
+        <enum value="0x8356" name="GL_PIXEL_GROUP_COLOR_SGIS"/>
+            <unused start="0x8357" end="0x8359" comment="Incomplete extension SGIX_pixel_texture_bits"/>
+            <!-- <enum value="0x8357" name="GL_COLOR_TO_TEXTURE_COORD_SGIX"/> -->
+            <!-- <enum value="0x8358" name="GL_COLOR_BIT_PATTERN_SGIX"/> -->
+            <!-- <enum value="0x8359" name="GL_COLOR_VALUE_SGIX"/> -->
+            <unused start="0x835A" comment="Incomplete extension SGIX_pixel_texture_lod"/>
+            <!-- <enum value="0x835A" name="GL_PIXEL_TEX_GEN_LAMBDA_SOURCE_SGIX"/> -->
+        <enum value="0x835B" name="GL_LINE_QUALITY_HINT_SGIX"/>
+        <enum value="0x835C" name="GL_ASYNC_TEX_IMAGE_SGIX"/>
+        <enum value="0x835D" name="GL_ASYNC_DRAW_PIXELS_SGIX"/>
+        <enum value="0x835E" name="GL_ASYNC_READ_PIXELS_SGIX"/>
+        <enum value="0x835F" name="GL_MAX_ASYNC_TEX_IMAGE_SGIX"/>
+        <enum value="0x8360" name="GL_MAX_ASYNC_DRAW_PIXELS_SGIX"/>
+        <enum value="0x8361" name="GL_MAX_ASYNC_READ_PIXELS_SGIX"/>
+        <enum value="0x8362" name="GL_UNSIGNED_BYTE_2_3_3_REV"/>
+        <enum value="0x8362" name="GL_UNSIGNED_BYTE_2_3_3_REV_EXT"/>
+        <enum value="0x8363" name="GL_UNSIGNED_SHORT_5_6_5"/>
+        <enum value="0x8363" name="GL_UNSIGNED_SHORT_5_6_5_EXT"/>
+        <enum value="0x8364" name="GL_UNSIGNED_SHORT_5_6_5_REV"/>
+        <enum value="0x8364" name="GL_UNSIGNED_SHORT_5_6_5_REV_EXT"/>
+        <enum value="0x8365" name="GL_UNSIGNED_SHORT_4_4_4_4_REV"/>
+        <enum value="0x8365" name="GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT"/>
+        <enum value="0x8365" name="GL_UNSIGNED_SHORT_4_4_4_4_REV_IMG"/>
+        <enum value="0x8366" name="GL_UNSIGNED_SHORT_1_5_5_5_REV"/>
+        <enum value="0x8366" name="GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT"/>
+        <enum value="0x8367" name="GL_UNSIGNED_INT_8_8_8_8_REV"/>
+        <enum value="0x8367" name="GL_UNSIGNED_INT_8_8_8_8_REV_EXT"/>
+        <enum value="0x8368" name="GL_UNSIGNED_INT_2_10_10_10_REV"/>
+        <enum value="0x8368" name="GL_UNSIGNED_INT_2_10_10_10_REV_EXT"/>
+        <enum value="0x8369" name="GL_TEXTURE_MAX_CLAMP_S_SGIX"/>
+        <enum value="0x836A" name="GL_TEXTURE_MAX_CLAMP_T_SGIX"/>
+        <enum value="0x836B" name="GL_TEXTURE_MAX_CLAMP_R_SGIX"/>
+            <unused start="0x836C" end="0x836E" comment="Incomplete extension SGIX_fog_texture"/>
+            <!-- <enum value="0x836C" name="GL_FRAGMENT_FOG_SGIX"/> -->
+            <!-- <enum value="0x836D" name="GL_TEXTURE_FOG_SGIX"/> -->
+            <!-- <enum value="0x836E" name="GL_FOG_PATCHY_FACTOR_SGIX"/> -->
+            <unused start="0x836F" comment="Incomplete extension SGIX_fog_factor_to_alpha"/>
+            <!-- <enum value="0x836F" name="GL_FOG_FACTOR_TO_ALPHA_SGIX"/> -->
+    </enums>
+
+    <enums namespace="GL" start="0x8370" end="0x837F" vendor="HP">
+            <!-- NOTE: IBM is using values in this range, because of a
+                 bobble when an employee left DEC for IBM at the same
+                 time as they were assigned the range. their registry
+                 became inconsistent. It's unknown whether HP has any
+                 conflicts. They have never reported using any values in
+                 this range. Lesson: assigned ranges belong to vendors,
+                 not engineers! -->
+        <enum value="0x8370" name="GL_MIRRORED_REPEAT"/>
+        <enum value="0x8370" name="GL_MIRRORED_REPEAT_ARB"/>
+        <enum value="0x8370" name="GL_MIRRORED_REPEAT_IBM"/>
+        <enum value="0x8370" name="GL_MIRRORED_REPEAT_OES"/>
+            <unused start="0x8371" end="0x837F" vendor="HP"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8380" end="0x839F" vendor="IBM">
+            <unused start="0x8380" end="0x839F" vendor="IBM"/>
+    </enums>
+
+    <enums namespace="GL" start="0x83A0" end="0x83BF" vendor="S3">
+        <enum value="0x83A0" name="GL_RGB_S3TC"/>
+        <enum value="0x83A1" name="GL_RGB4_S3TC"/>
+        <enum value="0x83A2" name="GL_RGBA_S3TC"/>
+        <enum value="0x83A3" name="GL_RGBA4_S3TC"/>
+        <enum value="0x83A4" name="GL_RGBA_DXT5_S3TC"/>
+        <enum value="0x83A5" name="GL_RGBA4_DXT5_S3TC"/>
+            <unused start="0x83A6" end="0x83BF" vendor="S3"/>
+    </enums>
+
+    <enums namespace="GL" start="0x83C0" end="0x83EF" vendor="SGI" comment="Most of this could be reclaimed">
+            <unused start="0x83C0" end="0x83CA" comment="Withdrawn extension SGIS_multitexture"/>
+            <!-- <enum value="0x83C0" name="GL_SELECTED_TEXTURE_SGIS"/> -->
+            <!-- <enum value="0x83C1" name="GL_SELECTED_TEXTURE_COORD_SET_SGIS"/> -->
+            <!-- <enum value="0x83C2" name="GL_SELECTED_TEXTURE_TRANSFORM_SGIS"/> -->
+            <!-- <enum value="0x83C3" name="GL_MAX_TEXTURES_SGIS"/> -->
+            <!-- <enum value="0x83C4" name="GL_MAX_TEXTURE_COORD_SETS_SGIS"/> -->
+            <!-- <enum value="0x83C5" name="GL_TEXTURE_COORD_SET_INTERLEAVE_FACTOR_SGIS"/> -->
+            <!-- <enum value="0x83C6" name="GL_TEXTURE_ENV_COORD_SET_SGIS"/> -->
+            <!-- <enum value="0x83C7" name="GL_TEXTURE0_SGIS"/> -->
+            <!-- <enum value="0x83C8" name="GL_TEXTURE1_SGIS"/> -->
+            <!-- <enum value="0x83C9" name="GL_TEXTURE2_SGIS"/> -->
+            <!-- <enum value="0x83CA" name="GL_TEXTURE3_SGIS"/> -->
+            <unused start="0x83CB" end="0x83E5" vendor="SGI"/>
+            <unused start="0x83E6" end="0x83E9" comment="Incomplete extension SGIX_bali_g_instruments"/>
+            <!-- <enum value="0x83E6" name="GL_BALI_NUM_TRIS_CULLED_INSTRUMENT_SGIX"/> -->
+            <!-- <enum value="0x83E7" name="GL_BALI_NUM_PRIMS_CLIPPED_INSTRUMENT_SGIX"/> -->
+            <!-- <enum value="0x83E8" name="GL_BALI_NUM_PRIMS_REJECT_INSTRUMENT_SGIX"/> -->
+            <!-- <enum value="0x83E9" name="GL_BALI_NUM_PRIMS_CLIP_RESULT_INSTRUMENT_SGIX"/> -->
+            <unused start="0x83EA" end="0x83EC" comment="Incomplete extension SGIX_bali_r_instruments"/>
+            <!-- <enum value="0x83EA" name="GL_BALI_FRAGMENTS_GENERATED_INSTRUMENT_SGIX"/> -->
+            <!-- <enum value="0x83EB" name="GL_BALI_DEPTH_PASS_INSTRUMENT_SGIX"/> -->
+            <!-- <enum value="0x83EC" name="GL_BALI_R_CHIP_COUNT_SGIX"/> -->
+            <unused start="0x83ED" comment="Incomplete extension SGIX_occlusion_instrument"/>
+            <!-- <enum value="0x83ED" name="GL_OCCLUSION_INSTRUMENT_SGIX"/> -->
+        <enum value="0x83EE" name="GL_VERTEX_PRECLIP_SGIX"/>
+        <enum value="0x83EF" name="GL_VERTEX_PRECLIP_HINT_SGIX"/>
+    </enums>
+
+    <enums namespace="GL" start="0x83F0" end="0x83FF" vendor="INTEL">
+            <!-- This block was reclaimed from NTP, who never shipped
+                 it, and reassigned to Intel. -->
+        <enum value="0x83F0" name="GL_COMPRESSED_RGB_S3TC_DXT1_EXT"/>
+        <enum value="0x83F1" name="GL_COMPRESSED_RGBA_S3TC_DXT1_EXT"/>
+        <enum value="0x83F2" name="GL_COMPRESSED_RGBA_S3TC_DXT3_ANGLE"/>
+        <enum value="0x83F2" name="GL_COMPRESSED_RGBA_S3TC_DXT3_EXT"/>
+        <enum value="0x83F3" name="GL_COMPRESSED_RGBA_S3TC_DXT5_ANGLE"/>
+        <enum value="0x83F3" name="GL_COMPRESSED_RGBA_S3TC_DXT5_EXT"/>
+        <enum value="0x83F4" name="GL_PARALLEL_ARRAYS_INTEL"/>
+        <enum value="0x83F5" name="GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL"/>
+        <enum value="0x83F6" name="GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL"/>
+        <enum value="0x83F7" name="GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL"/>
+        <enum value="0x83F8" name="GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL"/>
+        <enum value="0x83F9" name="GL_PERFQUERY_DONOT_FLUSH_INTEL"/>
+        <enum value="0x83FA" name="GL_PERFQUERY_FLUSH_INTEL"/>
+        <enum value="0x83FB" name="GL_PERFQUERY_WAIT_INTEL"/>
+        <enum value="0x83FC" name="GL_BLACKHOLE_RENDER_INTEL"/>
+            <unused start="0x83FD" vendor="INTEL"/>
+        <enum value="0x83FE" name="GL_CONSERVATIVE_RASTERIZATION_INTEL"/>
+        <enum value="0x83FF" name="GL_TEXTURE_MEMORY_LAYOUT_INTEL"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8400" end="0x846F" vendor="SGI">
+        <enum value="0x8400" name="GL_FRAGMENT_LIGHTING_SGIX"/>
+        <enum value="0x8401" name="GL_FRAGMENT_COLOR_MATERIAL_SGIX"/>
+        <enum value="0x8402" name="GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX"/>
+        <enum value="0x8403" name="GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX"/>
+        <enum value="0x8404" name="GL_MAX_FRAGMENT_LIGHTS_SGIX"/>
+        <enum value="0x8405" name="GL_MAX_ACTIVE_LIGHTS_SGIX"/>
+        <enum value="0x8406" name="GL_CURRENT_RASTER_NORMAL_SGIX"/>
+        <enum value="0x8407" name="GL_LIGHT_ENV_MODE_SGIX"/>
+        <enum value="0x8408" name="GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX"/>
+        <enum value="0x8409" name="GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX"/>
+        <enum value="0x840A" name="GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX"/>
+        <enum value="0x840B" name="GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX"/>
+        <enum value="0x840C" name="GL_FRAGMENT_LIGHT0_SGIX"/>
+        <enum value="0x840D" name="GL_FRAGMENT_LIGHT1_SGIX"/>
+        <enum value="0x840E" name="GL_FRAGMENT_LIGHT2_SGIX"/>
+        <enum value="0x840F" name="GL_FRAGMENT_LIGHT3_SGIX"/>
+        <enum value="0x8410" name="GL_FRAGMENT_LIGHT4_SGIX"/>
+        <enum value="0x8411" name="GL_FRAGMENT_LIGHT5_SGIX"/>
+        <enum value="0x8412" name="GL_FRAGMENT_LIGHT6_SGIX"/>
+        <enum value="0x8413" name="GL_FRAGMENT_LIGHT7_SGIX"/>
+            <unused start="0x8414" end="0x842D" vendor="SGI"/>
+        <enum value="0x842E" name="GL_PACK_RESAMPLE_SGIX" comment="Formerly 0x842C in SGI specfile"/>
+        <enum value="0x842F" name="GL_UNPACK_RESAMPLE_SGIX" comment="Formerly 0x842D in SGI specfile"/>
+        <enum value="0x8430" name="GL_RESAMPLE_DECIMATE_SGIX" comment="Formerly 0x8430 in SGI specfile"/>
+            <unused start="0x8431" end="0x8432" vendor="SGI"/>
+        <enum value="0x8433" name="GL_RESAMPLE_REPLICATE_SGIX" comment="Formerly 0x842E in SGI specfile"/>
+        <enum value="0x8434" name="GL_RESAMPLE_ZERO_FILL_SGIX" comment="Formerly 0x842F in SGI specfile"/>
+            <unused start="0x8435" vendor="SGI"/>
+            <!-- Incomplete extension SGIX_fragment_lighting -->
+            <!-- <enum value="0x8436"      name="GL_EYE_SPACE_SGIX"/> -->
+            <!-- <enum value="0x8437"      name="GL_TANGENT_SPACE_SGIX"/> -->
+            <!-- <enum value="0x8438"      name="GL_OBJECT_SPACE_SGIX"/> -->
+            <!-- <enum value="0x8439"      name="GL_TANGENT_ARRAY_SGIX"/> -->
+            <!-- <enum value="0x843A"      name="GL_BINORMAL_ARRAY_SGIX"/> -->
+            <!-- <enum value="0x843B"      name="GL_CURRENT_TANGENT_SGIX"/> -->
+            <!-- <enum value="0x843C"      name="GL_CURRENT_BINORMAL_SGIX"/> -->
+            <!-- <enum value="0x843D"      name="GL_FRAGMENT_LIGHT_SPACE_SGIX"/> -->
+            <!-- <enum value="0x843E"      name="GL_TANGENT_ARRAY_TYPE_SGIX"/> -->
+            <!-- <enum value="0x843F"      name="GL_TANGENT_ARRAY_STRIDE_SGIX"/> -->
+            <!-- <enum value="0x8440"      name="GL_TANGENT_ARRAY_COUNT_SGIX"/> -->
+            <!-- <enum value="0x8441"      name="GL_BINORMAL_ARRAY_TYPE_SGIX"/> -->
+            <!-- <enum value="0x8442"      name="GL_BINORMAL_ARRAY_STRIDE_SGIX"/> -->
+            <!-- <enum value="0x8443"      name="GL_BINORMAL_ARRAY_COUNT_SGIX"/> -->
+            <!-- <enum value="0x8444"      name="GL_TANGENT_ARRAY_POINTER_SGIX"/> -->
+            <!-- <enum value="0x8445"      name="GL_BINORMAL_ARRAY_POINTER_SGIX"/> -->
+            <!-- <enum value="0x8446"      name="GL_MAP1_TANGENT_SGIX"/> -->
+            <!-- <enum value="0x8447"      name="GL_MAP2_TANGENT_SGIX"/> -->
+            <!-- <enum value="0x8448"      name="GL_MAP1_BINORMAL_SGIX"/> -->
+            <!-- <enum value="0x8449"      name="GL_MAP2_BINORMAL_SGIX"/> -->
+        <enum value="0x8439" name="GL_TANGENT_ARRAY_EXT"/>
+        <enum value="0x843A" name="GL_BINORMAL_ARRAY_EXT"/>
+        <enum value="0x843B" name="GL_CURRENT_TANGENT_EXT"/>
+        <enum value="0x843C" name="GL_CURRENT_BINORMAL_EXT"/>
+            <unused start="0x844D" vendor="SGI"/>
+        <enum value="0x843E" name="GL_TANGENT_ARRAY_TYPE_EXT"/>
+        <enum value="0x843F" name="GL_TANGENT_ARRAY_STRIDE_EXT"/>
+        <enum value="0x8440" name="GL_BINORMAL_ARRAY_TYPE_EXT"/>
+        <enum value="0x8441" name="GL_BINORMAL_ARRAY_STRIDE_EXT"/>
+        <enum value="0x8442" name="GL_TANGENT_ARRAY_POINTER_EXT"/>
+        <enum value="0x8443" name="GL_BINORMAL_ARRAY_POINTER_EXT"/>
+        <enum value="0x8444" name="GL_MAP1_TANGENT_EXT"/>
+        <enum value="0x8445" name="GL_MAP2_TANGENT_EXT"/>
+        <enum value="0x8446" name="GL_MAP1_BINORMAL_EXT"/>
+        <enum value="0x8447" name="GL_MAP2_BINORMAL_EXT"/>
+            <unused start="0x8448" end="0x8449" comment="Incomplete extension SGIX_fragment_lighting"/>
+            <unused start="0x844A" end="0x844C" comment="Incomplete extension SGIX_bali_timer_instruments"/>
+            <!-- <enum value="0x844A" name="GL_BALI_GEOM_TIMER_INSTRUMENT_SGIX"/> -->
+            <!-- <enum value="0x844B" name="GL_BALI_RASTER_TIMER_INSTRUMENT_SGIX"/> -->
+            <!-- <enum value="0x844C" name="GL_BALI_INSTRUMENT_TIME_UNIT_SGIX"/> -->
+        <enum value="0x844D" name="GL_NEAREST_CLIPMAP_NEAREST_SGIX"/>
+        <enum value="0x844E" name="GL_NEAREST_CLIPMAP_LINEAR_SGIX"/>
+        <enum value="0x844F" name="GL_LINEAR_CLIPMAP_NEAREST_SGIX"/>
+            <!-- 0x8450-0x845F range brokered for Id Software -->
+        <enum value="0x8450" name="GL_FOG_COORDINATE_SOURCE"/>
+        <enum value="0x8450" name="GL_FOG_COORDINATE_SOURCE_EXT"/>
+        <enum value="0x8450" name="GL_FOG_COORD_SRC" alias="GL_FOG_COORDINATE_SOURCE"/>
+        <enum value="0x8451" name="GL_FOG_COORDINATE"/>
+        <enum value="0x8451" name="GL_FOG_COORD" alias="GL_FOG_COORDINATE"/>
+        <enum value="0x8451" name="GL_FOG_COORDINATE_EXT"/>
+        <enum value="0x8452" name="GL_FRAGMENT_DEPTH"/>
+        <enum value="0x8452" name="GL_FRAGMENT_DEPTH_EXT"/>
+        <enum value="0x8453" name="GL_CURRENT_FOG_COORDINATE"/>
+        <enum value="0x8453" name="GL_CURRENT_FOG_COORD" alias="GL_CURRENT_FOG_COORDINATE"/>
+        <enum value="0x8453" name="GL_CURRENT_FOG_COORDINATE_EXT"/>
+        <enum value="0x8454" name="GL_FOG_COORDINATE_ARRAY_TYPE"/>
+        <enum value="0x8454" name="GL_FOG_COORDINATE_ARRAY_TYPE_EXT"/>
+        <enum value="0x8454" name="GL_FOG_COORD_ARRAY_TYPE" alias="GL_FOG_COORDINATE_ARRAY_TYPE"/>
+        <enum value="0x8455" name="GL_FOG_COORDINATE_ARRAY_STRIDE"/>
+        <enum value="0x8455" name="GL_FOG_COORDINATE_ARRAY_STRIDE_EXT"/>
+        <enum value="0x8455" name="GL_FOG_COORD_ARRAY_STRIDE" alias="GL_FOG_COORDINATE_ARRAY_STRIDE"/>
+        <enum value="0x8456" name="GL_FOG_COORDINATE_ARRAY_POINTER"/>
+        <enum value="0x8456" name="GL_FOG_COORDINATE_ARRAY_POINTER_EXT"/>
+        <enum value="0x8456" name="GL_FOG_COORD_ARRAY_POINTER" alias="GL_FOG_COORDINATE_ARRAY_POINTER"/>
+        <enum value="0x8457" name="GL_FOG_COORDINATE_ARRAY"/>
+        <enum value="0x8457" name="GL_FOG_COORDINATE_ARRAY_EXT"/>
+        <enum value="0x8457" name="GL_FOG_COORD_ARRAY" alias="GL_FOG_COORDINATE_ARRAY"/>
+        <enum value="0x8458" name="GL_COLOR_SUM"/>
+        <enum value="0x8458" name="GL_COLOR_SUM_ARB"/>
+        <enum value="0x8458" name="GL_COLOR_SUM_EXT"/>
+        <enum value="0x8459" name="GL_CURRENT_SECONDARY_COLOR"/>
+        <enum value="0x8459" name="GL_CURRENT_SECONDARY_COLOR_EXT"/>
+        <enum value="0x845A" name="GL_SECONDARY_COLOR_ARRAY_SIZE"/>
+        <enum value="0x845A" name="GL_SECONDARY_COLOR_ARRAY_SIZE_EXT"/>
+        <enum value="0x845B" name="GL_SECONDARY_COLOR_ARRAY_TYPE"/>
+        <enum value="0x845B" name="GL_SECONDARY_COLOR_ARRAY_TYPE_EXT"/>
+        <enum value="0x845C" name="GL_SECONDARY_COLOR_ARRAY_STRIDE"/>
+        <enum value="0x845C" name="GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT"/>
+        <enum value="0x845D" name="GL_SECONDARY_COLOR_ARRAY_POINTER"/>
+        <enum value="0x845D" name="GL_SECONDARY_COLOR_ARRAY_POINTER_EXT"/>
+        <enum value="0x845E" name="GL_SECONDARY_COLOR_ARRAY"/>
+        <enum value="0x845E" name="GL_SECONDARY_COLOR_ARRAY_EXT"/>
+        <enum value="0x845F" name="GL_CURRENT_RASTER_SECONDARY_COLOR"/>
+            <unused start="0x8460" end="0x846B" comment="Incomplete extension SGIX_icc_texture"/>
+            <!-- <enum value="0x8460" name="GL_RGB_ICC_SGIX"/> -->
+            <!-- <enum value="0x8461" name="GL_RGBA_ICC_SGIX"/> -->
+            <!-- <enum value="0x8462" name="GL_ALPHA_ICC_SGIX"/> -->
+            <!-- <enum value="0x8463" name="GL_LUMINANCE_ICC_SGIX"/> -->
+            <!-- <enum value="0x8464" name="GL_INTENSITY_ICC_SGIX"/> -->
+            <!-- <enum value="0x8465" name="GL_LUMINANCE_ALPHA_ICC_SGIX"/> -->
+            <!-- <enum value="0x8466" name="GL_R5_G6_B5_ICC_SGIX"/> -->
+            <!-- <enum value="0x8467" name="GL_R5_G6_B5_A8_ICC_SGIX"/> -->
+            <!-- <enum value="0x8468" name="GL_ALPHA16_ICC_SGIX"/> -->
+            <!-- <enum value="0x8469" name="GL_LUMINANCE16_ICC_SGIX"/> -->
+            <!-- <enum value="0x846A" name="GL_INTENSITY16_ICC_SGIX"/> -->
+            <!-- <enum value="0x846B" name="GL_LUMINANCE16_ALPHA8_ICC_SGIX"/> -->
+            <unused start="0x846C" vendor="SGI"/>
+        <enum value="0x846D" name="GL_ALIASED_POINT_SIZE_RANGE"/>
+        <enum value="0x846E" name="GL_ALIASED_LINE_WIDTH_RANGE"/>
+            <unused start="0x846F" vendor="SGI"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8470" end="0x848F" vendor="AMD">
+            <unused start="0x8470" end="0x848F" vendor="AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8490" end="0x849F" vendor="REND">
+        <enum value="0x8490" name="GL_SCREEN_COORDINATES_REND"/>
+        <enum value="0x8491" name="GL_INVERTED_SCREEN_W_REND"/>
+            <unused start="0x8492" end="0x849F" vendor="REND"/>
+    </enums>
+
+    <enums namespace="GL" start="0x84A0" end="0x84BF" vendor="AMD">
+            <unused start="0x84A0" end="0x84BF" vendor="AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x84C0" end="0x84EF" vendor="ARB">
+        <enum value="0x84C0" name="GL_TEXTURE0"/>
+        <enum value="0x84C0" name="GL_TEXTURE0_ARB"/>
+        <enum value="0x84C1" name="GL_TEXTURE1"/>
+        <enum value="0x84C1" name="GL_TEXTURE1_ARB"/>
+        <enum value="0x84C2" name="GL_TEXTURE2"/>
+        <enum value="0x84C2" name="GL_TEXTURE2_ARB"/>
+        <enum value="0x84C3" name="GL_TEXTURE3"/>
+        <enum value="0x84C3" name="GL_TEXTURE3_ARB"/>
+        <enum value="0x84C4" name="GL_TEXTURE4"/>
+        <enum value="0x84C4" name="GL_TEXTURE4_ARB"/>
+        <enum value="0x84C5" name="GL_TEXTURE5"/>
+        <enum value="0x84C5" name="GL_TEXTURE5_ARB"/>
+        <enum value="0x84C6" name="GL_TEXTURE6"/>
+        <enum value="0x84C6" name="GL_TEXTURE6_ARB"/>
+        <enum value="0x84C7" name="GL_TEXTURE7"/>
+        <enum value="0x84C7" name="GL_TEXTURE7_ARB"/>
+        <enum value="0x84C8" name="GL_TEXTURE8"/>
+        <enum value="0x84C8" name="GL_TEXTURE8_ARB"/>
+        <enum value="0x84C9" name="GL_TEXTURE9"/>
+        <enum value="0x84C9" name="GL_TEXTURE9_ARB"/>
+        <enum value="0x84CA" name="GL_TEXTURE10"/>
+        <enum value="0x84CA" name="GL_TEXTURE10_ARB"/>
+        <enum value="0x84CB" name="GL_TEXTURE11"/>
+        <enum value="0x84CB" name="GL_TEXTURE11_ARB"/>
+        <enum value="0x84CC" name="GL_TEXTURE12"/>
+        <enum value="0x84CC" name="GL_TEXTURE12_ARB"/>
+        <enum value="0x84CD" name="GL_TEXTURE13"/>
+        <enum value="0x84CD" name="GL_TEXTURE13_ARB"/>
+        <enum value="0x84CE" name="GL_TEXTURE14"/>
+        <enum value="0x84CE" name="GL_TEXTURE14_ARB"/>
+        <enum value="0x84CF" name="GL_TEXTURE15"/>
+        <enum value="0x84CF" name="GL_TEXTURE15_ARB"/>
+        <enum value="0x84D0" name="GL_TEXTURE16"/>
+        <enum value="0x84D0" name="GL_TEXTURE16_ARB"/>
+        <enum value="0x84D1" name="GL_TEXTURE17"/>
+        <enum value="0x84D1" name="GL_TEXTURE17_ARB"/>
+        <enum value="0x84D2" name="GL_TEXTURE18"/>
+        <enum value="0x84D2" name="GL_TEXTURE18_ARB"/>
+        <enum value="0x84D3" name="GL_TEXTURE19"/>
+        <enum value="0x84D3" name="GL_TEXTURE19_ARB"/>
+        <enum value="0x84D4" name="GL_TEXTURE20"/>
+        <enum value="0x84D4" name="GL_TEXTURE20_ARB"/>
+        <enum value="0x84D5" name="GL_TEXTURE21"/>
+        <enum value="0x84D5" name="GL_TEXTURE21_ARB"/>
+        <enum value="0x84D6" name="GL_TEXTURE22"/>
+        <enum value="0x84D6" name="GL_TEXTURE22_ARB"/>
+        <enum value="0x84D7" name="GL_TEXTURE23"/>
+        <enum value="0x84D7" name="GL_TEXTURE23_ARB"/>
+        <enum value="0x84D8" name="GL_TEXTURE24"/>
+        <enum value="0x84D8" name="GL_TEXTURE24_ARB"/>
+        <enum value="0x84D9" name="GL_TEXTURE25"/>
+        <enum value="0x84D9" name="GL_TEXTURE25_ARB"/>
+        <enum value="0x84DA" name="GL_TEXTURE26"/>
+        <enum value="0x84DA" name="GL_TEXTURE26_ARB"/>
+        <enum value="0x84DB" name="GL_TEXTURE27"/>
+        <enum value="0x84DB" name="GL_TEXTURE27_ARB"/>
+        <enum value="0x84DC" name="GL_TEXTURE28"/>
+        <enum value="0x84DC" name="GL_TEXTURE28_ARB"/>
+        <enum value="0x84DD" name="GL_TEXTURE29"/>
+        <enum value="0x84DD" name="GL_TEXTURE29_ARB"/>
+        <enum value="0x84DE" name="GL_TEXTURE30"/>
+        <enum value="0x84DE" name="GL_TEXTURE30_ARB"/>
+        <enum value="0x84DF" name="GL_TEXTURE31"/>
+        <enum value="0x84DF" name="GL_TEXTURE31_ARB"/>
+        <enum value="0x84E0" name="GL_ACTIVE_TEXTURE"/>
+        <enum value="0x84E0" name="GL_ACTIVE_TEXTURE_ARB"/>
+        <enum value="0x84E1" name="GL_CLIENT_ACTIVE_TEXTURE"/>
+        <enum value="0x84E1" name="GL_CLIENT_ACTIVE_TEXTURE_ARB"/>
+        <enum value="0x84E2" name="GL_MAX_TEXTURE_UNITS"/>
+        <enum value="0x84E2" name="GL_MAX_TEXTURE_UNITS_ARB"/>
+        <enum value="0x84E3" name="GL_TRANSPOSE_MODELVIEW_MATRIX"/>
+        <enum value="0x84E3" name="GL_TRANSPOSE_MODELVIEW_MATRIX_ARB"/>
+        <enum value="0x84E3" name="GL_PATH_TRANSPOSE_MODELVIEW_MATRIX_NV"/>
+        <enum value="0x84E4" name="GL_TRANSPOSE_PROJECTION_MATRIX"/>
+        <enum value="0x84E4" name="GL_TRANSPOSE_PROJECTION_MATRIX_ARB"/>
+        <enum value="0x84E4" name="GL_PATH_TRANSPOSE_PROJECTION_MATRIX_NV"/>
+        <enum value="0x84E5" name="GL_TRANSPOSE_TEXTURE_MATRIX"/>
+        <enum value="0x84E5" name="GL_TRANSPOSE_TEXTURE_MATRIX_ARB"/>
+        <enum value="0x84E6" name="GL_TRANSPOSE_COLOR_MATRIX"/>
+        <enum value="0x84E6" name="GL_TRANSPOSE_COLOR_MATRIX_ARB"/>
+        <enum value="0x84E7" name="GL_SUBTRACT"/>
+        <enum value="0x84E7" name="GL_SUBTRACT_ARB"/>
+        <enum value="0x84E8" name="GL_MAX_RENDERBUFFER_SIZE"/>
+        <enum value="0x84E8" name="GL_MAX_RENDERBUFFER_SIZE_EXT"/>
+        <enum value="0x84E8" name="GL_MAX_RENDERBUFFER_SIZE_OES"/>
+        <enum value="0x84E9" name="GL_COMPRESSED_ALPHA"/>
+        <enum value="0x84E9" name="GL_COMPRESSED_ALPHA_ARB"/>
+        <enum value="0x84EA" name="GL_COMPRESSED_LUMINANCE"/>
+        <enum value="0x84EA" name="GL_COMPRESSED_LUMINANCE_ARB"/>
+        <enum value="0x84EB" name="GL_COMPRESSED_LUMINANCE_ALPHA"/>
+        <enum value="0x84EB" name="GL_COMPRESSED_LUMINANCE_ALPHA_ARB"/>
+        <enum value="0x84EC" name="GL_COMPRESSED_INTENSITY"/>
+        <enum value="0x84EC" name="GL_COMPRESSED_INTENSITY_ARB"/>
+        <enum value="0x84ED" name="GL_COMPRESSED_RGB"/>
+        <enum value="0x84ED" name="GL_COMPRESSED_RGB_ARB"/>
+        <enum value="0x84EE" name="GL_COMPRESSED_RGBA"/>
+        <enum value="0x84EE" name="GL_COMPRESSED_RGBA_ARB"/>
+        <enum value="0x84EF" name="GL_TEXTURE_COMPRESSION_HINT"/>
+        <enum value="0x84EF" name="GL_TEXTURE_COMPRESSION_HINT_ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x84F0" end="0x855F" vendor="NV">
+        <enum value="0x84F0" name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+        <enum value="0x84F1" name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+        <enum value="0x84F2" name="GL_ALL_COMPLETED_NV"/>
+        <enum value="0x84F3" name="GL_FENCE_STATUS_NV"/>
+        <enum value="0x84F4" name="GL_FENCE_CONDITION_NV"/>
+        <enum value="0x84F5" name="GL_TEXTURE_RECTANGLE"/>
+        <enum value="0x84F5" name="GL_TEXTURE_RECTANGLE_ARB"/>
+        <enum value="0x84F5" name="GL_TEXTURE_RECTANGLE_NV"/>
+        <enum value="0x84F6" name="GL_TEXTURE_BINDING_RECTANGLE"/>
+        <enum value="0x84F6" name="GL_TEXTURE_BINDING_RECTANGLE_ARB"/>
+        <enum value="0x84F6" name="GL_TEXTURE_BINDING_RECTANGLE_NV"/>
+        <enum value="0x84F7" name="GL_PROXY_TEXTURE_RECTANGLE"/>
+        <enum value="0x84F7" name="GL_PROXY_TEXTURE_RECTANGLE_ARB"/>
+        <enum value="0x84F7" name="GL_PROXY_TEXTURE_RECTANGLE_NV"/>
+        <enum value="0x84F8" name="GL_MAX_RECTANGLE_TEXTURE_SIZE"/>
+        <enum value="0x84F8" name="GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB"/>
+        <enum value="0x84F8" name="GL_MAX_RECTANGLE_TEXTURE_SIZE_NV"/>
+        <enum value="0x84F9" name="GL_DEPTH_STENCIL"/>
+        <enum value="0x84F9" name="GL_DEPTH_STENCIL_EXT"/>
+        <enum value="0x84F9" name="GL_DEPTH_STENCIL_NV"/>
+        <enum value="0x84F9" name="GL_DEPTH_STENCIL_OES"/>
+        <enum value="0x84FA" name="GL_UNSIGNED_INT_24_8"/>
+        <enum value="0x84FA" name="GL_UNSIGNED_INT_24_8_EXT"/>
+        <enum value="0x84FA" name="GL_UNSIGNED_INT_24_8_NV"/>
+        <enum value="0x84FA" name="GL_UNSIGNED_INT_24_8_OES"/>
+            <unused start="0x84FB" end="0x84FC" vendor="NV"/>
+        <enum value="0x84FD" name="GL_MAX_TEXTURE_LOD_BIAS"/>
+        <enum value="0x84FD" name="GL_MAX_TEXTURE_LOD_BIAS_EXT"/>
+        <enum value="0x84FE" name="GL_TEXTURE_MAX_ANISOTROPY"/>
+        <enum value="0x84FE" name="GL_TEXTURE_MAX_ANISOTROPY_EXT" alias="GL_TEXTURE_MAX_ANISOTROPY"/>
+        <enum value="0x84FF" name="GL_MAX_TEXTURE_MAX_ANISOTROPY"/>
+        <enum value="0x84FF" name="GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT" alias="GL_MAX_TEXTURE_MAX_ANISOTROPY"/>
+        <enum value="0x8500" name="GL_TEXTURE_FILTER_CONTROL"/>
+        <enum value="0x8500" name="GL_TEXTURE_FILTER_CONTROL_EXT"/>
+        <enum value="0x8501" name="GL_TEXTURE_LOD_BIAS"/>
+        <enum value="0x8501" name="GL_TEXTURE_LOD_BIAS_EXT"/>
+        <enum value="0x8502" name="GL_MODELVIEW1_STACK_DEPTH_EXT"/>
+        <enum value="0x8503" name="GL_COMBINE4_NV"/>
+        <enum value="0x8504" name="GL_MAX_SHININESS_NV"/>
+        <enum value="0x8505" name="GL_MAX_SPOT_EXPONENT_NV"/>
+        <enum value="0x8506" name="GL_MODELVIEW1_MATRIX_EXT"/>
+        <enum value="0x8507" name="GL_INCR_WRAP"/>
+        <enum value="0x8507" name="GL_INCR_WRAP_EXT"/>
+        <enum value="0x8507" name="GL_INCR_WRAP_OES"/>
+        <enum value="0x8508" name="GL_DECR_WRAP"/>
+        <enum value="0x8508" name="GL_DECR_WRAP_EXT"/>
+        <enum value="0x8508" name="GL_DECR_WRAP_OES"/>
+        <enum value="0x8509" name="GL_VERTEX_WEIGHTING_EXT"/>
+        <enum value="0x850A" name="GL_MODELVIEW1_ARB"/>
+        <enum value="0x850A" name="GL_MODELVIEW1_EXT"/>
+        <enum value="0x850B" name="GL_CURRENT_VERTEX_WEIGHT_EXT"/>
+        <enum value="0x850C" name="GL_VERTEX_WEIGHT_ARRAY_EXT"/>
+        <enum value="0x850D" name="GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT"/>
+        <enum value="0x850E" name="GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT"/>
+        <enum value="0x850F" name="GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT"/>
+        <enum value="0x8510" name="GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT"/>
+        <enum value="0x8511" name="GL_NORMAL_MAP"/>
+        <enum value="0x8511" name="GL_NORMAL_MAP_ARB"/>
+        <enum value="0x8511" name="GL_NORMAL_MAP_EXT"/>
+        <enum value="0x8511" name="GL_NORMAL_MAP_NV"/>
+        <enum value="0x8511" name="GL_NORMAL_MAP_OES"/>
+        <enum value="0x8512" name="GL_REFLECTION_MAP"/>
+        <enum value="0x8512" name="GL_REFLECTION_MAP_ARB"/>
+        <enum value="0x8512" name="GL_REFLECTION_MAP_EXT"/>
+        <enum value="0x8512" name="GL_REFLECTION_MAP_NV"/>
+        <enum value="0x8512" name="GL_REFLECTION_MAP_OES"/>
+        <enum value="0x8513" name="GL_TEXTURE_CUBE_MAP"/>
+        <enum value="0x8513" name="GL_TEXTURE_CUBE_MAP_ARB"/>
+        <enum value="0x8513" name="GL_TEXTURE_CUBE_MAP_EXT"/>
+        <enum value="0x8513" name="GL_TEXTURE_CUBE_MAP_OES"/>
+        <enum value="0x8514" name="GL_TEXTURE_BINDING_CUBE_MAP"/>
+        <enum value="0x8514" name="GL_TEXTURE_BINDING_CUBE_MAP_ARB"/>
+        <enum value="0x8514" name="GL_TEXTURE_BINDING_CUBE_MAP_EXT"/>
+        <enum value="0x8514" name="GL_TEXTURE_BINDING_CUBE_MAP_OES"/>
+        <enum value="0x8515" name="GL_TEXTURE_CUBE_MAP_POSITIVE_X"/>
+        <enum value="0x8515" name="GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB"/>
+        <enum value="0x8515" name="GL_TEXTURE_CUBE_MAP_POSITIVE_X_EXT"/>
+        <enum value="0x8515" name="GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES"/>
+        <enum value="0x8516" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X"/>
+        <enum value="0x8516" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB"/>
+        <enum value="0x8516" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X_EXT"/>
+        <enum value="0x8516" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES"/>
+        <enum value="0x8517" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y"/>
+        <enum value="0x8517" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB"/>
+        <enum value="0x8517" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y_EXT"/>
+        <enum value="0x8517" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES"/>
+        <enum value="0x8518" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"/>
+        <enum value="0x8518" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB"/>
+        <enum value="0x8518" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT"/>
+        <enum value="0x8518" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES"/>
+        <enum value="0x8519" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z"/>
+        <enum value="0x8519" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB"/>
+        <enum value="0x8519" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z_EXT"/>
+        <enum value="0x8519" name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES"/>
+        <enum value="0x851A" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"/>
+        <enum value="0x851A" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB"/>
+        <enum value="0x851A" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT"/>
+        <enum value="0x851A" name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES"/>
+        <enum value="0x851B" name="GL_PROXY_TEXTURE_CUBE_MAP"/>
+        <enum value="0x851B" name="GL_PROXY_TEXTURE_CUBE_MAP_ARB"/>
+        <enum value="0x851B" name="GL_PROXY_TEXTURE_CUBE_MAP_EXT"/>
+        <enum value="0x851C" name="GL_MAX_CUBE_MAP_TEXTURE_SIZE"/>
+        <enum value="0x851C" name="GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB"/>
+        <enum value="0x851C" name="GL_MAX_CUBE_MAP_TEXTURE_SIZE_EXT"/>
+        <enum value="0x851C" name="GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES"/>
+        <enum value="0x851D" name="GL_VERTEX_ARRAY_RANGE_APPLE"/>
+        <enum value="0x851D" name="GL_VERTEX_ARRAY_RANGE_NV"/>
+        <enum value="0x851E" name="GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE"/>
+        <enum value="0x851E" name="GL_VERTEX_ARRAY_RANGE_LENGTH_NV"/>
+        <enum value="0x851F" name="GL_VERTEX_ARRAY_RANGE_VALID_NV"/>
+        <enum value="0x851F" name="GL_VERTEX_ARRAY_STORAGE_HINT_APPLE"/>
+        <enum value="0x8520" name="GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV"/>
+        <enum value="0x8521" name="GL_VERTEX_ARRAY_RANGE_POINTER_APPLE"/>
+        <enum value="0x8521" name="GL_VERTEX_ARRAY_RANGE_POINTER_NV"/>
+        <enum value="0x8522" name="GL_REGISTER_COMBINERS_NV"/>
+        <enum value="0x8523" name="GL_VARIABLE_A_NV"/>
+        <enum value="0x8524" name="GL_VARIABLE_B_NV"/>
+        <enum value="0x8525" name="GL_VARIABLE_C_NV"/>
+        <enum value="0x8526" name="GL_VARIABLE_D_NV"/>
+        <enum value="0x8527" name="GL_VARIABLE_E_NV"/>
+        <enum value="0x8528" name="GL_VARIABLE_F_NV"/>
+        <enum value="0x8529" name="GL_VARIABLE_G_NV"/>
+        <enum value="0x852A" name="GL_CONSTANT_COLOR0_NV"/>
+        <enum value="0x852B" name="GL_CONSTANT_COLOR1_NV"/>
+        <enum value="0x852C" name="GL_PRIMARY_COLOR_NV"/>
+        <enum value="0x852D" name="GL_SECONDARY_COLOR_NV"/>
+        <enum value="0x852E" name="GL_SPARE0_NV"/>
+        <enum value="0x852F" name="GL_SPARE1_NV"/>
+        <enum value="0x8530" name="GL_DISCARD_NV"/>
+        <enum value="0x8531" name="GL_E_TIMES_F_NV"/>
+        <enum value="0x8532" name="GL_SPARE0_PLUS_SECONDARY_COLOR_NV"/>
+        <enum value="0x8533" name="GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV"/>
+        <enum value="0x8534" name="GL_MULTISAMPLE_FILTER_HINT_NV"/>
+        <enum value="0x8535" name="GL_PER_STAGE_CONSTANTS_NV"/>
+        <enum value="0x8536" name="GL_UNSIGNED_IDENTITY_NV"/>
+        <enum value="0x8537" name="GL_UNSIGNED_INVERT_NV"/>
+        <enum value="0x8538" name="GL_EXPAND_NORMAL_NV"/>
+        <enum value="0x8539" name="GL_EXPAND_NEGATE_NV"/>
+        <enum value="0x853A" name="GL_HALF_BIAS_NORMAL_NV"/>
+        <enum value="0x853B" name="GL_HALF_BIAS_NEGATE_NV"/>
+        <enum value="0x853C" name="GL_SIGNED_IDENTITY_NV"/>
+        <enum value="0x853D" name="GL_SIGNED_NEGATE_NV"/>
+        <enum value="0x853E" name="GL_SCALE_BY_TWO_NV"/>
+        <enum value="0x853F" name="GL_SCALE_BY_FOUR_NV"/>
+        <enum value="0x8540" name="GL_SCALE_BY_ONE_HALF_NV"/>
+        <enum value="0x8541" name="GL_BIAS_BY_NEGATIVE_ONE_HALF_NV"/>
+        <enum value="0x8542" name="GL_COMBINER_INPUT_NV"/>
+        <enum value="0x8543" name="GL_COMBINER_MAPPING_NV"/>
+        <enum value="0x8544" name="GL_COMBINER_COMPONENT_USAGE_NV"/>
+        <enum value="0x8545" name="GL_COMBINER_AB_DOT_PRODUCT_NV"/>
+        <enum value="0x8546" name="GL_COMBINER_CD_DOT_PRODUCT_NV"/>
+        <enum value="0x8547" name="GL_COMBINER_MUX_SUM_NV"/>
+        <enum value="0x8548" name="GL_COMBINER_SCALE_NV"/>
+        <enum value="0x8549" name="GL_COMBINER_BIAS_NV"/>
+        <enum value="0x854A" name="GL_COMBINER_AB_OUTPUT_NV"/>
+        <enum value="0x854B" name="GL_COMBINER_CD_OUTPUT_NV"/>
+        <enum value="0x854C" name="GL_COMBINER_SUM_OUTPUT_NV"/>
+        <enum value="0x854D" name="GL_MAX_GENERAL_COMBINERS_NV"/>
+        <enum value="0x854E" name="GL_NUM_GENERAL_COMBINERS_NV"/>
+        <enum value="0x854F" name="GL_COLOR_SUM_CLAMP_NV"/>
+        <enum value="0x8550" name="GL_COMBINER0_NV"/>
+        <enum value="0x8551" name="GL_COMBINER1_NV"/>
+        <enum value="0x8552" name="GL_COMBINER2_NV"/>
+        <enum value="0x8553" name="GL_COMBINER3_NV"/>
+        <enum value="0x8554" name="GL_COMBINER4_NV"/>
+        <enum value="0x8555" name="GL_COMBINER5_NV"/>
+        <enum value="0x8556" name="GL_COMBINER6_NV"/>
+        <enum value="0x8557" name="GL_COMBINER7_NV"/>
+        <enum value="0x8558" name="GL_PRIMITIVE_RESTART_NV"/>
+        <enum value="0x8559" name="GL_PRIMITIVE_RESTART_INDEX_NV"/>
+        <enum value="0x855A" name="GL_FOG_DISTANCE_MODE_NV"/>
+        <enum value="0x855B" name="GL_EYE_RADIAL_NV"/>
+        <enum value="0x855C" name="GL_EYE_PLANE_ABSOLUTE_NV"/>
+        <enum value="0x855D" name="GL_EMBOSS_LIGHT_NV"/>
+        <enum value="0x855E" name="GL_EMBOSS_CONSTANT_NV"/>
+        <enum value="0x855F" name="GL_EMBOSS_MAP_NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8560" end="0x856F" vendor="ZiiLabs">
+        <enum value="0x8560" name="GL_RED_MIN_CLAMP_INGR"/>
+        <enum value="0x8561" name="GL_GREEN_MIN_CLAMP_INGR"/>
+        <enum value="0x8562" name="GL_BLUE_MIN_CLAMP_INGR"/>
+        <enum value="0x8563" name="GL_ALPHA_MIN_CLAMP_INGR"/>
+        <enum value="0x8564" name="GL_RED_MAX_CLAMP_INGR"/>
+        <enum value="0x8565" name="GL_GREEN_MAX_CLAMP_INGR"/>
+        <enum value="0x8566" name="GL_BLUE_MAX_CLAMP_INGR"/>
+        <enum value="0x8567" name="GL_ALPHA_MAX_CLAMP_INGR"/>
+        <enum value="0x8568" name="GL_INTERLACE_READ_INGR"/>
+            <unused start="0x8569" end="0x856F" vendor="ZiiLabs"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8570" end="0x859F" group="RegisterCombinerPname" vendor="AMD/NV">
+        <enum value="0x8570" name="GL_COMBINE"/>
+        <enum value="0x8570" name="GL_COMBINE_ARB"/>
+        <enum value="0x8570" name="GL_COMBINE_EXT"/>
+        <enum value="0x8571" name="GL_COMBINE_RGB"/>
+        <enum value="0x8571" name="GL_COMBINE_RGB_ARB"/>
+        <enum value="0x8571" name="GL_COMBINE_RGB_EXT"/>
+        <enum value="0x8572" name="GL_COMBINE_ALPHA"/>
+        <enum value="0x8572" name="GL_COMBINE_ALPHA_ARB"/>
+        <enum value="0x8572" name="GL_COMBINE_ALPHA_EXT"/>
+        <enum value="0x8573" name="GL_RGB_SCALE"/>
+        <enum value="0x8573" name="GL_RGB_SCALE_ARB"/>
+        <enum value="0x8573" name="GL_RGB_SCALE_EXT"/>
+        <enum value="0x8574" name="GL_ADD_SIGNED"/>
+        <enum value="0x8574" name="GL_ADD_SIGNED_ARB"/>
+        <enum value="0x8574" name="GL_ADD_SIGNED_EXT"/>
+        <enum value="0x8575" name="GL_INTERPOLATE"/>
+        <enum value="0x8575" name="GL_INTERPOLATE_ARB"/>
+        <enum value="0x8575" name="GL_INTERPOLATE_EXT"/>
+        <enum value="0x8576" name="GL_CONSTANT"/>
+        <enum value="0x8576" name="GL_CONSTANT_ARB"/>
+        <enum value="0x8576" name="GL_CONSTANT_EXT"/>
+        <enum value="0x8576" name="GL_CONSTANT_NV"/>
+        <enum value="0x8577" name="GL_PRIMARY_COLOR"/>
+        <enum value="0x8577" name="GL_PRIMARY_COLOR_ARB"/>
+        <enum value="0x8577" name="GL_PRIMARY_COLOR_EXT"/>
+        <enum value="0x8578" name="GL_PREVIOUS"/>
+        <enum value="0x8578" name="GL_PREVIOUS_ARB"/>
+        <enum value="0x8578" name="GL_PREVIOUS_EXT"/>
+            <unused start="0x8579" end="0x857F" comment="Additional combiner enums only"/>
+        <enum value="0x8580" name="GL_SOURCE0_RGB"/>
+        <enum value="0x8580" name="GL_SOURCE0_RGB_ARB"/>
+        <enum value="0x8580" name="GL_SOURCE0_RGB_EXT"/>
+        <enum value="0x8580" name="GL_SRC0_RGB" alias="GL_SOURCE0_RGB"/>
+        <enum value="0x8581" name="GL_SOURCE1_RGB"/>
+        <enum value="0x8581" name="GL_SOURCE1_RGB_ARB"/>
+        <enum value="0x8581" name="GL_SOURCE1_RGB_EXT"/>
+        <enum value="0x8581" name="GL_SRC1_RGB" alias="GL_SOURCE1_RGB"/>
+        <enum value="0x8582" name="GL_SOURCE2_RGB"/>
+        <enum value="0x8582" name="GL_SOURCE2_RGB_ARB"/>
+        <enum value="0x8582" name="GL_SOURCE2_RGB_EXT"/>
+        <enum value="0x8582" name="GL_SRC2_RGB" alias="GL_SOURCE2_RGB"/>
+        <enum value="0x8583" name="GL_SOURCE3_RGB_NV"/>
+            <unused start="0x8584" end="0x8587" comment="Additional combiner enums only"/>
+        <enum value="0x8588" name="GL_SOURCE0_ALPHA"/>
+        <enum value="0x8588" name="GL_SOURCE0_ALPHA_ARB"/>
+        <enum value="0x8588" name="GL_SOURCE0_ALPHA_EXT"/>
+        <enum value="0x8588" name="GL_SRC0_ALPHA" alias="GL_SOURCE0_ALPHA"/>
+        <enum value="0x8589" name="GL_SOURCE1_ALPHA"/>
+        <enum value="0x8589" name="GL_SOURCE1_ALPHA_ARB"/>
+        <enum value="0x8589" name="GL_SOURCE1_ALPHA_EXT"/>
+        <enum value="0x8589" name="GL_SRC1_ALPHA" alias="GL_SOURCE1_ALPHA"/>
+        <enum value="0x8589" name="GL_SRC1_ALPHA_EXT"/>
+        <enum value="0x858A" name="GL_SOURCE2_ALPHA"/>
+        <enum value="0x858A" name="GL_SOURCE2_ALPHA_ARB"/>
+        <enum value="0x858A" name="GL_SOURCE2_ALPHA_EXT"/>
+        <enum value="0x858A" name="GL_SRC2_ALPHA" alias="GL_SOURCE2_ALPHA"/>
+        <enum value="0x858B" name="GL_SOURCE3_ALPHA_NV"/>
+            <unused start="0x858C" end="0x858F" comment="Additional combiner enums only"/>
+        <enum value="0x8590" name="GL_OPERAND0_RGB"/>
+        <enum value="0x8590" name="GL_OPERAND0_RGB_ARB"/>
+        <enum value="0x8590" name="GL_OPERAND0_RGB_EXT"/>
+        <enum value="0x8591" name="GL_OPERAND1_RGB"/>
+        <enum value="0x8591" name="GL_OPERAND1_RGB_ARB"/>
+        <enum value="0x8591" name="GL_OPERAND1_RGB_EXT"/>
+        <enum value="0x8592" name="GL_OPERAND2_RGB"/>
+        <enum value="0x8592" name="GL_OPERAND2_RGB_ARB"/>
+        <enum value="0x8592" name="GL_OPERAND2_RGB_EXT"/>
+        <enum value="0x8593" name="GL_OPERAND3_RGB_NV"/>
+            <unused start="0x8594" end="0x8597" comment="Additional combiner enums only"/>
+        <enum value="0x8598" name="GL_OPERAND0_ALPHA"/>
+        <enum value="0x8598" name="GL_OPERAND0_ALPHA_ARB"/>
+        <enum value="0x8598" name="GL_OPERAND0_ALPHA_EXT"/>
+        <enum value="0x8599" name="GL_OPERAND1_ALPHA"/>
+        <enum value="0x8599" name="GL_OPERAND1_ALPHA_ARB"/>
+        <enum value="0x8599" name="GL_OPERAND1_ALPHA_EXT"/>
+        <enum value="0x859A" name="GL_OPERAND2_ALPHA"/>
+        <enum value="0x859A" name="GL_OPERAND2_ALPHA_ARB"/>
+        <enum value="0x859A" name="GL_OPERAND2_ALPHA_EXT"/>
+        <enum value="0x859B" name="GL_OPERAND3_ALPHA_NV"/>
+            <unused start="0x859C" end="0x859F" comment="Additional combiner enums only"/>
+    </enums>
+
+    <enums namespace="GL" start="0x85A0" end="0x85AF" vendor="SGI">
+        <enum value="0x85A0" name="GL_PACK_SUBSAMPLE_RATE_SGIX"/>
+        <enum value="0x85A1" name="GL_UNPACK_SUBSAMPLE_RATE_SGIX"/>
+        <enum value="0x85A2" name="GL_PIXEL_SUBSAMPLE_4444_SGIX"/>
+        <enum value="0x85A3" name="GL_PIXEL_SUBSAMPLE_2424_SGIX"/>
+        <enum value="0x85A4" name="GL_PIXEL_SUBSAMPLE_4242_SGIX"/>
+            <unused start="0x85A5" end="0x85AD" comment="Incomplete extension SGIS_color_range"/>
+            <!-- <enum value="0x85A5" name="GL_EXTENDED_RANGE_SGIS"/> -->
+            <!-- <enum value="0x85A6" name="GL_MIN_RED_SGIS"/> -->
+            <!-- <enum value="0x85A7" name="GL_MAX_RED_SGIS"/> -->
+            <!-- <enum value="0x85A8" name="GL_MIN_GREEN_SGIS"/> -->
+            <!-- <enum value="0x85A9" name="GL_MAX_GREEN_SGIS"/> -->
+            <!-- <enum value="0x85AA" name="GL_MIN_BLUE_SGIS"/> -->
+            <!-- <enum value="0x85AB" name="GL_MAX_BLUE_SGIS"/> -->
+            <!-- <enum value="0x85AC" name="GL_MIN_ALPHA_SGIS"/> -->
+            <!-- <enum value="0x85AD" name="GL_MAX_ALPHA_SGIS"/> -->
+        <enum value="0x85AE" name="GL_PERTURB_EXT"/>
+        <enum value="0x85AF" name="GL_TEXTURE_NORMAL_EXT"/>
+    </enums>
+
+    <enums namespace="GL" start="0x85B0" end="0x85BF" vendor="APPLE">
+        <enum value="0x85B0" name="GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE"/>
+        <enum value="0x85B1" name="GL_TRANSFORM_HINT_APPLE"/>
+        <enum value="0x85B2" name="GL_UNPACK_CLIENT_STORAGE_APPLE"/>
+        <enum value="0x85B3" name="GL_BUFFER_OBJECT_APPLE"/>
+        <enum value="0x85B4" name="GL_STORAGE_CLIENT_APPLE"/>
+        <enum value="0x85B5" name="GL_VERTEX_ARRAY_BINDING"/>
+        <enum value="0x85B5" name="GL_VERTEX_ARRAY_BINDING_APPLE"/>
+        <enum value="0x85B5" name="GL_VERTEX_ARRAY_BINDING_OES"/>
+            <unused start="0x85B6" vendor="APPLE" comment="Unknown extension (Khronos bug 632)"/>
+            <!-- <enum value="0x85B6" name="GL_TEXTURE_MINIMIZE_STORAGE_APPLE"/> -->
+        <enum value="0x85B7" name="GL_TEXTURE_RANGE_LENGTH_APPLE"/>
+        <enum value="0x85B8" name="GL_TEXTURE_RANGE_POINTER_APPLE"/>
+        <enum value="0x85B9" name="GL_YCBCR_422_APPLE"/>
+        <enum value="0x85BA" name="GL_UNSIGNED_SHORT_8_8_APPLE"/>
+        <enum value="0x85BA" name="GL_UNSIGNED_SHORT_8_8_MESA"/>
+        <enum value="0x85BB" name="GL_UNSIGNED_SHORT_8_8_REV_APPLE"/>
+        <enum value="0x85BB" name="GL_UNSIGNED_SHORT_8_8_REV_MESA"/>
+        <enum value="0x85BC" name="GL_TEXTURE_STORAGE_HINT_APPLE"/>
+        <enum value="0x85BD" name="GL_STORAGE_PRIVATE_APPLE"/>
+        <enum value="0x85BE" name="GL_STORAGE_CACHED_APPLE"/>
+        <enum value="0x85BF" name="GL_STORAGE_SHARED_APPLE"/>
+    </enums>
+
+    <enums namespace="GL" start="0x85C0" end="0x85CF" vendor="SUN">
+        <enum value="0x85C0" name="GL_REPLACEMENT_CODE_ARRAY_SUN"/>
+        <enum value="0x85C1" name="GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN"/>
+        <enum value="0x85C2" name="GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN"/>
+        <enum value="0x85C3" name="GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN"/>
+        <enum value="0x85C4" name="GL_R1UI_V3F_SUN"/>
+        <enum value="0x85C5" name="GL_R1UI_C4UB_V3F_SUN"/>
+        <enum value="0x85C6" name="GL_R1UI_C3F_V3F_SUN"/>
+        <enum value="0x85C7" name="GL_R1UI_N3F_V3F_SUN"/>
+        <enum value="0x85C8" name="GL_R1UI_C4F_N3F_V3F_SUN"/>
+        <enum value="0x85C9" name="GL_R1UI_T2F_V3F_SUN"/>
+        <enum value="0x85CA" name="GL_R1UI_T2F_N3F_V3F_SUN"/>
+        <enum value="0x85CB" name="GL_R1UI_T2F_C4F_N3F_V3F_SUN"/>
+        <enum value="0x85CC" name="GL_SLICE_ACCUM_SUN"/>
+            <unused start="0x85CD" end="0x85CF" vendor="SUN"/>
+    </enums>
+
+    <enums namespace="GL" start="0x85D0" end="0x85DF" vendor="ZiiLabs" comment="3Dlabs private extension for Autodesk">
+            <unused start="0x85D0" end="0x85D1" comment="Unknown 3Dlabs private extension for Autodesk (but we know the enum values)"/>
+            <!-- <enum value="0x85D0" name="GL_FACET_NORMAL_AUTODESK"/> -->
+            <!-- <enum value="0x85D1" name="GL_FACET_NORMAL_ARRAY_AUTODESK"/> -->
+            <unused start="0x85D2" end="0x85DF" vendor="ZiiLabs"/>
+    </enums>
+
+    <enums namespace="GL" start="0x85E0" end="0x85FF" vendor="SGI">
+            <unused start="0x85E0" end="0x85FB" comment="Incomplete extension SGIX_texture_range"/>
+            <!-- <enum value="0x85E0" name="GL_RGB_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E1" name="GL_RGBA_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E2" name="GL_ALPHA_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E3" name="GL_LUMINANCE_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E4" name="GL_INTENSITY_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E5" name="GL_LUMINANCE_ALPHA_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E6" name="GL_RGB16_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E7" name="GL_RGBA16_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E8" name="GL_ALPHA16_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85E9" name="GL_LUMINANCE16_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85EA" name="GL_INTENSITY16_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85EB" name="GL_LUMINANCE16_ALPHA16_SIGNED_SGIX"/> -->
+            <!-- <enum value="0x85EC" name="GL_RGB_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85ED" name="GL_RGBA_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85EE" name="GL_ALPHA_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85EF" name="GL_LUMINANCE_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F0" name="GL_INTENSITY_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F1" name="GL_LUMINANCE_ALPHA_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F2" name="GL_RGB16_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F3" name="GL_RGBA16_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F4" name="GL_ALPHA16_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F5" name="GL_LUMINANCE16_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F6" name="GL_INTENSITY16_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F7" name="GL_LUMINANCE16_ALPHA16_EXTENDED_RANGE_SGIX"/> -->
+            <!-- <enum value="0x85F8" name="GL_MIN_LUMINANCE_SGIS"/> -->
+            <!-- <enum value="0x85F9" name="GL_MAX_LUMINANCE_SGIS"/> -->
+            <!-- <enum value="0x85FA" name="GL_MIN_INTENSITY_SGIS"/> -->
+            <!-- <enum value="0x85FB" name="GL_MAX_INTENSITY_SGIS"/> -->
+            <unused start="0x85FC" end="0x85FF" vendor="SGI"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8600" end="0x861F" vendor="SUN">
+            <unused start="0x8600" end="0x8613" vendor="SUN"/>
+        <enum value="0x8614" name="GL_QUAD_MESH_SUN"/>
+        <enum value="0x8615" name="GL_TRIANGLE_MESH_SUN"/>
+            <unused start="0x8614" end="0x861F" vendor="SUN"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8620" end="0x867F" vendor="NV">
+        <enum value="0x8620" name="GL_VERTEX_PROGRAM_ARB"/>
+        <enum value="0x8620" name="GL_VERTEX_PROGRAM_NV"/>
+        <enum value="0x8621" name="GL_VERTEX_STATE_PROGRAM_NV"/>
+        <enum value="0x8622" name="GL_VERTEX_ATTRIB_ARRAY_ENABLED"/>
+        <enum value="0x8622" name="GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB"/>
+        <enum value="0x8623" name="GL_ATTRIB_ARRAY_SIZE_NV"/>
+        <enum value="0x8623" name="GL_VERTEX_ATTRIB_ARRAY_SIZE"/>
+        <enum value="0x8623" name="GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB"/>
+        <enum value="0x8624" name="GL_ATTRIB_ARRAY_STRIDE_NV"/>
+        <enum value="0x8624" name="GL_VERTEX_ATTRIB_ARRAY_STRIDE"/>
+        <enum value="0x8624" name="GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB"/>
+        <enum value="0x8625" name="GL_ATTRIB_ARRAY_TYPE_NV"/>
+        <enum value="0x8625" name="GL_VERTEX_ATTRIB_ARRAY_TYPE"/>
+        <enum value="0x8625" name="GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB"/>
+        <enum value="0x8626" name="GL_CURRENT_ATTRIB_NV"/>
+        <enum value="0x8626" name="GL_CURRENT_VERTEX_ATTRIB"/>
+        <enum value="0x8626" name="GL_CURRENT_VERTEX_ATTRIB_ARB"/>
+        <enum value="0x8627" name="GL_PROGRAM_LENGTH_ARB"/>
+        <enum value="0x8627" name="GL_PROGRAM_LENGTH_NV"/>
+        <enum value="0x8628" name="GL_PROGRAM_STRING_ARB"/>
+        <enum value="0x8628" name="GL_PROGRAM_STRING_NV"/>
+        <enum value="0x8629" name="GL_MODELVIEW_PROJECTION_NV"/>
+        <enum value="0x862A" name="GL_IDENTITY_NV"/>
+        <enum value="0x862B" name="GL_INVERSE_NV"/>
+        <enum value="0x862C" name="GL_TRANSPOSE_NV"/>
+        <enum value="0x862D" name="GL_INVERSE_TRANSPOSE_NV"/>
+        <enum value="0x862E" name="GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB"/>
+        <enum value="0x862E" name="GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV"/>
+        <enum value="0x862F" name="GL_MAX_PROGRAM_MATRICES_ARB"/>
+        <enum value="0x862F" name="GL_MAX_TRACK_MATRICES_NV"/>
+        <enum value="0x8630" name="GL_MATRIX0_NV"/>
+        <enum value="0x8631" name="GL_MATRIX1_NV"/>
+        <enum value="0x8632" name="GL_MATRIX2_NV"/>
+        <enum value="0x8633" name="GL_MATRIX3_NV"/>
+        <enum value="0x8634" name="GL_MATRIX4_NV"/>
+        <enum value="0x8635" name="GL_MATRIX5_NV"/>
+        <enum value="0x8636" name="GL_MATRIX6_NV"/>
+        <enum value="0x8637" name="GL_MATRIX7_NV"/>
+            <unused start="0x8638" end="0x863F" comment="Reserved for MATRIX{8-15}_NV"/>
+            <!-- <enum value="0x8638" name="GL_MATRIX8_NV"/> -->
+            <!-- <enum value="0x8639" name="GL_MATRIX9_NV"/> -->
+            <!-- <enum value="0x863A" name="GL_MATRIX10_NV"/> -->
+            <!-- <enum value="0x863B" name="GL_MATRIX11_NV"/> -->
+            <!-- <enum value="0x863C" name="GL_MATRIX12_NV"/> -->
+            <!-- <enum value="0x863D" name="GL_MATRIX13_NV"/> -->
+            <!-- <enum value="0x863E" name="GL_MATRIX14_NV"/> -->
+            <!-- <enum value="0x863F" name="GL_MATRIX15_NV"/> -->
+        <enum value="0x8640" name="GL_CURRENT_MATRIX_STACK_DEPTH_ARB"/>
+        <enum value="0x8640" name="GL_CURRENT_MATRIX_STACK_DEPTH_NV"/>
+        <enum value="0x8641" name="GL_CURRENT_MATRIX_ARB"/>
+        <enum value="0x8641" name="GL_CURRENT_MATRIX_NV"/>
+        <enum value="0x8642" name="GL_VERTEX_PROGRAM_POINT_SIZE"/>
+        <enum value="0x8642" name="GL_VERTEX_PROGRAM_POINT_SIZE_ARB"/>
+        <enum value="0x8642" name="GL_VERTEX_PROGRAM_POINT_SIZE_NV"/>
+        <enum value="0x8642" name="GL_PROGRAM_POINT_SIZE" alias="GL_VERTEX_PROGRAM_POINT_SIZE"/>
+        <enum value="0x8642" name="GL_PROGRAM_POINT_SIZE_ARB"/>
+        <enum value="0x8642" name="GL_PROGRAM_POINT_SIZE_EXT"/>
+        <enum value="0x8643" name="GL_VERTEX_PROGRAM_TWO_SIDE"/>
+        <enum value="0x8643" name="GL_VERTEX_PROGRAM_TWO_SIDE_ARB"/>
+        <enum value="0x8643" name="GL_VERTEX_PROGRAM_TWO_SIDE_NV"/>
+        <enum value="0x8644" name="GL_PROGRAM_PARAMETER_NV"/>
+        <enum value="0x8645" name="GL_ATTRIB_ARRAY_POINTER_NV"/>
+        <enum value="0x8645" name="GL_VERTEX_ATTRIB_ARRAY_POINTER"/>
+        <enum value="0x8645" name="GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB"/>
+        <enum value="0x8646" name="GL_PROGRAM_TARGET_NV"/>
+        <enum value="0x8647" name="GL_PROGRAM_RESIDENT_NV"/>
+        <enum value="0x8648" name="GL_TRACK_MATRIX_NV"/>
+        <enum value="0x8649" name="GL_TRACK_MATRIX_TRANSFORM_NV"/>
+        <enum value="0x864A" name="GL_VERTEX_PROGRAM_BINDING_NV"/>
+        <enum value="0x864B" name="GL_PROGRAM_ERROR_POSITION_ARB"/>
+        <enum value="0x864B" name="GL_PROGRAM_ERROR_POSITION_NV"/>
+        <enum value="0x864C" name="GL_OFFSET_TEXTURE_RECTANGLE_NV"/>
+        <enum value="0x864D" name="GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV"/>
+        <enum value="0x864E" name="GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV"/>
+        <enum value="0x864F" name="GL_DEPTH_CLAMP"/>
+        <enum value="0x864F" name="GL_DEPTH_CLAMP_NV"/>
+        <enum value="0x864F" name="GL_DEPTH_CLAMP_EXT"/>
+        <enum value="0x8650" name="GL_VERTEX_ATTRIB_ARRAY0_NV"/>
+        <enum value="0x8651" name="GL_VERTEX_ATTRIB_ARRAY1_NV"/>
+        <enum value="0x8652" name="GL_VERTEX_ATTRIB_ARRAY2_NV"/>
+        <enum value="0x8653" name="GL_VERTEX_ATTRIB_ARRAY3_NV"/>
+        <enum value="0x8654" name="GL_VERTEX_ATTRIB_ARRAY4_NV"/>
+        <enum value="0x8655" name="GL_VERTEX_ATTRIB_ARRAY5_NV"/>
+        <enum value="0x8656" name="GL_VERTEX_ATTRIB_ARRAY6_NV"/>
+        <enum value="0x8657" name="GL_VERTEX_ATTRIB_ARRAY7_NV"/>
+        <enum value="0x8658" name="GL_VERTEX_ATTRIB_ARRAY8_NV"/>
+        <enum value="0x8659" name="GL_VERTEX_ATTRIB_ARRAY9_NV"/>
+        <enum value="0x865A" name="GL_VERTEX_ATTRIB_ARRAY10_NV"/>
+        <enum value="0x865B" name="GL_VERTEX_ATTRIB_ARRAY11_NV"/>
+        <enum value="0x865C" name="GL_VERTEX_ATTRIB_ARRAY12_NV"/>
+        <enum value="0x865D" name="GL_VERTEX_ATTRIB_ARRAY13_NV"/>
+        <enum value="0x865E" name="GL_VERTEX_ATTRIB_ARRAY14_NV"/>
+        <enum value="0x865F" name="GL_VERTEX_ATTRIB_ARRAY15_NV"/>
+        <enum value="0x8660" name="GL_MAP1_VERTEX_ATTRIB0_4_NV"/>
+        <enum value="0x8661" name="GL_MAP1_VERTEX_ATTRIB1_4_NV"/>
+        <enum value="0x8662" name="GL_MAP1_VERTEX_ATTRIB2_4_NV"/>
+        <enum value="0x8663" name="GL_MAP1_VERTEX_ATTRIB3_4_NV"/>
+        <enum value="0x8664" name="GL_MAP1_VERTEX_ATTRIB4_4_NV"/>
+        <enum value="0x8665" name="GL_MAP1_VERTEX_ATTRIB5_4_NV"/>
+        <enum value="0x8666" name="GL_MAP1_VERTEX_ATTRIB6_4_NV"/>
+        <enum value="0x8667" name="GL_MAP1_VERTEX_ATTRIB7_4_NV"/>
+        <enum value="0x8668" name="GL_MAP1_VERTEX_ATTRIB8_4_NV"/>
+        <enum value="0x8669" name="GL_MAP1_VERTEX_ATTRIB9_4_NV"/>
+        <enum value="0x866A" name="GL_MAP1_VERTEX_ATTRIB10_4_NV"/>
+        <enum value="0x866B" name="GL_MAP1_VERTEX_ATTRIB11_4_NV"/>
+        <enum value="0x866C" name="GL_MAP1_VERTEX_ATTRIB12_4_NV"/>
+        <enum value="0x866D" name="GL_MAP1_VERTEX_ATTRIB13_4_NV"/>
+        <enum value="0x866E" name="GL_MAP1_VERTEX_ATTRIB14_4_NV"/>
+        <enum value="0x866F" name="GL_MAP1_VERTEX_ATTRIB15_4_NV"/>
+        <enum value="0x8670" name="GL_MAP2_VERTEX_ATTRIB0_4_NV"/>
+        <enum value="0x8671" name="GL_MAP2_VERTEX_ATTRIB1_4_NV"/>
+        <enum value="0x8672" name="GL_MAP2_VERTEX_ATTRIB2_4_NV"/>
+        <enum value="0x8673" name="GL_MAP2_VERTEX_ATTRIB3_4_NV"/>
+        <enum value="0x8674" name="GL_MAP2_VERTEX_ATTRIB4_4_NV"/>
+        <enum value="0x8675" name="GL_MAP2_VERTEX_ATTRIB5_4_NV"/>
+        <enum value="0x8676" name="GL_MAP2_VERTEX_ATTRIB6_4_NV"/>
+        <enum value="0x8677" name="GL_MAP2_VERTEX_ATTRIB7_4_NV"/>
+        <enum value="0x8677" name="GL_PROGRAM_BINDING_ARB" comment="NOT an alias. Accidental reuse of GL_MAP2_VERTEX_ATTRIB7_4_NV"/>
+        <enum value="0x8678" name="GL_MAP2_VERTEX_ATTRIB8_4_NV"/>
+        <enum value="0x8679" name="GL_MAP2_VERTEX_ATTRIB9_4_NV"/>
+        <enum value="0x867A" name="GL_MAP2_VERTEX_ATTRIB10_4_NV"/>
+        <enum value="0x867B" name="GL_MAP2_VERTEX_ATTRIB11_4_NV"/>
+        <enum value="0x867C" name="GL_MAP2_VERTEX_ATTRIB12_4_NV"/>
+        <enum value="0x867D" name="GL_MAP2_VERTEX_ATTRIB13_4_NV"/>
+        <enum value="0x867E" name="GL_MAP2_VERTEX_ATTRIB14_4_NV"/>
+        <enum value="0x867F" name="GL_MAP2_VERTEX_ATTRIB15_4_NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8680" end="0x869F" vendor="Pixelfusion">
+            <unused start="0x8680" end="0x869F" vendor="Pixelfusion"/>
+    </enums>
+
+    <enums namespace="GL" start="0x86A0" end="0x86AF" vendor="ARB">
+        <enum value="0x86A0" name="GL_TEXTURE_COMPRESSED_IMAGE_SIZE"/>
+        <enum value="0x86A0" name="GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB"/>
+        <enum value="0x86A1" name="GL_TEXTURE_COMPRESSED"/>
+        <enum value="0x86A1" name="GL_TEXTURE_COMPRESSED_ARB"/>
+        <enum value="0x86A2" name="GL_NUM_COMPRESSED_TEXTURE_FORMATS"/>
+        <enum value="0x86A2" name="GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB"/>
+        <enum value="0x86A3" name="GL_COMPRESSED_TEXTURE_FORMATS"/>
+        <enum value="0x86A3" name="GL_COMPRESSED_TEXTURE_FORMATS_ARB"/>
+        <enum value="0x86A4" name="GL_MAX_VERTEX_UNITS_ARB"/>
+        <enum value="0x86A4" name="GL_MAX_VERTEX_UNITS_OES"/>
+        <enum value="0x86A5" name="GL_ACTIVE_VERTEX_UNITS_ARB"/>
+        <enum value="0x86A6" name="GL_WEIGHT_SUM_UNITY_ARB"/>
+        <enum value="0x86A7" name="GL_VERTEX_BLEND_ARB"/>
+        <enum value="0x86A8" name="GL_CURRENT_WEIGHT_ARB"/>
+        <enum value="0x86A9" name="GL_WEIGHT_ARRAY_TYPE_ARB"/>
+        <enum value="0x86A9" name="GL_WEIGHT_ARRAY_TYPE_OES"/>
+        <enum value="0x86AA" name="GL_WEIGHT_ARRAY_STRIDE_ARB"/>
+        <enum value="0x86AA" name="GL_WEIGHT_ARRAY_STRIDE_OES"/>
+        <enum value="0x86AB" name="GL_WEIGHT_ARRAY_SIZE_ARB"/>
+        <enum value="0x86AB" name="GL_WEIGHT_ARRAY_SIZE_OES"/>
+        <enum value="0x86AC" name="GL_WEIGHT_ARRAY_POINTER_ARB"/>
+        <enum value="0x86AC" name="GL_WEIGHT_ARRAY_POINTER_OES"/>
+        <enum value="0x86AD" name="GL_WEIGHT_ARRAY_ARB"/>
+        <enum value="0x86AD" name="GL_WEIGHT_ARRAY_OES"/>
+        <enum value="0x86AE" name="GL_DOT3_RGB"/>
+        <enum value="0x86AE" name="GL_DOT3_RGB_ARB"/>
+        <enum value="0x86AF" name="GL_DOT3_RGBA"/>
+        <enum value="0x86AF" name="GL_DOT3_RGBA_ARB"/>
+        <enum value="0x86AF" name="GL_DOT3_RGBA_IMG"/>
+    </enums>
+
+    <enums namespace="GL" start="0x86B0" end="0x86BF" vendor="3DFX">
+        <enum value="0x86B0" name="GL_COMPRESSED_RGB_FXT1_3DFX"/>
+        <enum value="0x86B1" name="GL_COMPRESSED_RGBA_FXT1_3DFX"/>
+        <enum value="0x86B2" name="GL_MULTISAMPLE_3DFX"/>
+        <enum value="0x86B3" name="GL_SAMPLE_BUFFERS_3DFX"/>
+        <enum value="0x86B4" name="GL_SAMPLES_3DFX"/>
+            <unused start="0x86B5" end="0x86BF" vendor="3DFX"/>
+    </enums>
+
+    <enums namespace="GL" start="0x86C0" end="0x871F" vendor="NV">
+        <enum value="0x86C0" name="GL_EVAL_2D_NV"/>
+        <enum value="0x86C1" name="GL_EVAL_TRIANGULAR_2D_NV"/>
+        <enum value="0x86C2" name="GL_MAP_TESSELLATION_NV"/>
+        <enum value="0x86C3" name="GL_MAP_ATTRIB_U_ORDER_NV"/>
+        <enum value="0x86C4" name="GL_MAP_ATTRIB_V_ORDER_NV"/>
+        <enum value="0x86C5" name="GL_EVAL_FRACTIONAL_TESSELLATION_NV"/>
+        <enum value="0x86C6" name="GL_EVAL_VERTEX_ATTRIB0_NV"/>
+        <enum value="0x86C7" name="GL_EVAL_VERTEX_ATTRIB1_NV"/>
+        <enum value="0x86C8" name="GL_EVAL_VERTEX_ATTRIB2_NV"/>
+        <enum value="0x86C9" name="GL_EVAL_VERTEX_ATTRIB3_NV"/>
+        <enum value="0x86CA" name="GL_EVAL_VERTEX_ATTRIB4_NV"/>
+        <enum value="0x86CB" name="GL_EVAL_VERTEX_ATTRIB5_NV"/>
+        <enum value="0x86CC" name="GL_EVAL_VERTEX_ATTRIB6_NV"/>
+        <enum value="0x86CD" name="GL_EVAL_VERTEX_ATTRIB7_NV"/>
+        <enum value="0x86CE" name="GL_EVAL_VERTEX_ATTRIB8_NV"/>
+        <enum value="0x86CF" name="GL_EVAL_VERTEX_ATTRIB9_NV"/>
+        <enum value="0x86D0" name="GL_EVAL_VERTEX_ATTRIB10_NV"/>
+        <enum value="0x86D1" name="GL_EVAL_VERTEX_ATTRIB11_NV"/>
+        <enum value="0x86D2" name="GL_EVAL_VERTEX_ATTRIB12_NV"/>
+        <enum value="0x86D3" name="GL_EVAL_VERTEX_ATTRIB13_NV"/>
+        <enum value="0x86D4" name="GL_EVAL_VERTEX_ATTRIB14_NV"/>
+        <enum value="0x86D5" name="GL_EVAL_VERTEX_ATTRIB15_NV"/>
+        <enum value="0x86D6" name="GL_MAX_MAP_TESSELLATION_NV"/>
+        <enum value="0x86D7" name="GL_MAX_RATIONAL_EVAL_ORDER_NV"/>
+        <enum value="0x86D8" name="GL_MAX_PROGRAM_PATCH_ATTRIBS_NV"/>
+        <enum value="0x86D9" name="GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV"/>
+        <enum value="0x86DA" name="GL_UNSIGNED_INT_S8_S8_8_8_NV"/>
+        <enum value="0x86DB" name="GL_UNSIGNED_INT_8_8_S8_S8_REV_NV"/>
+        <enum value="0x86DC" name="GL_DSDT_MAG_INTENSITY_NV"/>
+        <enum value="0x86DD" name="GL_SHADER_CONSISTENT_NV"/>
+        <enum value="0x86DE" name="GL_TEXTURE_SHADER_NV"/>
+        <enum value="0x86DF" name="GL_SHADER_OPERATION_NV"/>
+        <enum value="0x86E0" name="GL_CULL_MODES_NV"/>
+        <enum value="0x86E1" name="GL_OFFSET_TEXTURE_MATRIX_NV"/>
+        <enum value="0x86E1" name="GL_OFFSET_TEXTURE_2D_MATRIX_NV" alias="GL_OFFSET_TEXTURE_MATRIX_NV"/>
+        <enum value="0x86E2" name="GL_OFFSET_TEXTURE_SCALE_NV"/>
+        <enum value="0x86E2" name="GL_OFFSET_TEXTURE_2D_SCALE_NV" alias="GL_OFFSET_TEXTURE_SCALE_NV"/>
+        <enum value="0x86E3" name="GL_OFFSET_TEXTURE_BIAS_NV"/>
+        <enum value="0x86E3" name="GL_OFFSET_TEXTURE_2D_BIAS_NV" alias="GL_OFFSET_TEXTURE_BIAS_NV"/>
+        <enum value="0x86E4" name="GL_PREVIOUS_TEXTURE_INPUT_NV"/>
+        <enum value="0x86E5" name="GL_CONST_EYE_NV"/>
+        <enum value="0x86E6" name="GL_PASS_THROUGH_NV"/>
+        <enum value="0x86E7" name="GL_CULL_FRAGMENT_NV"/>
+        <enum value="0x86E8" name="GL_OFFSET_TEXTURE_2D_NV"/>
+        <enum value="0x86E9" name="GL_DEPENDENT_AR_TEXTURE_2D_NV"/>
+        <enum value="0x86EA" name="GL_DEPENDENT_GB_TEXTURE_2D_NV"/>
+        <enum value="0x86EB" name="GL_SURFACE_STATE_NV"/>
+        <enum value="0x86EC" name="GL_DOT_PRODUCT_NV"/>
+        <enum value="0x86ED" name="GL_DOT_PRODUCT_DEPTH_REPLACE_NV"/>
+        <enum value="0x86EE" name="GL_DOT_PRODUCT_TEXTURE_2D_NV"/>
+        <enum value="0x86EF" name="GL_DOT_PRODUCT_TEXTURE_3D_NV"/>
+        <enum value="0x86F0" name="GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV"/>
+        <enum value="0x86F1" name="GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV"/>
+        <enum value="0x86F2" name="GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV"/>
+        <enum value="0x86F3" name="GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV"/>
+        <enum value="0x86F4" name="GL_HILO_NV"/>
+        <enum value="0x86F5" name="GL_DSDT_NV"/>
+        <enum value="0x86F6" name="GL_DSDT_MAG_NV"/>
+        <enum value="0x86F7" name="GL_DSDT_MAG_VIB_NV"/>
+        <enum value="0x86F8" name="GL_HILO16_NV"/>
+        <enum value="0x86F9" name="GL_SIGNED_HILO_NV"/>
+        <enum value="0x86FA" name="GL_SIGNED_HILO16_NV"/>
+        <enum value="0x86FB" name="GL_SIGNED_RGBA_NV"/>
+        <enum value="0x86FC" name="GL_SIGNED_RGBA8_NV"/>
+        <enum value="0x86FD" name="GL_SURFACE_REGISTERED_NV"/>
+        <enum value="0x86FE" name="GL_SIGNED_RGB_NV"/>
+        <enum value="0x86FF" name="GL_SIGNED_RGB8_NV"/>
+        <enum value="0x8700" name="GL_SURFACE_MAPPED_NV"/>
+        <enum value="0x8701" name="GL_SIGNED_LUMINANCE_NV"/>
+        <enum value="0x8702" name="GL_SIGNED_LUMINANCE8_NV"/>
+        <enum value="0x8703" name="GL_SIGNED_LUMINANCE_ALPHA_NV"/>
+        <enum value="0x8704" name="GL_SIGNED_LUMINANCE8_ALPHA8_NV"/>
+        <enum value="0x8705" name="GL_SIGNED_ALPHA_NV"/>
+        <enum value="0x8706" name="GL_SIGNED_ALPHA8_NV"/>
+        <enum value="0x8707" name="GL_SIGNED_INTENSITY_NV"/>
+        <enum value="0x8708" name="GL_SIGNED_INTENSITY8_NV"/>
+        <enum value="0x8709" name="GL_DSDT8_NV"/>
+        <enum value="0x870A" name="GL_DSDT8_MAG8_NV"/>
+        <enum value="0x870B" name="GL_DSDT8_MAG8_INTENSITY8_NV"/>
+        <enum value="0x870C" name="GL_SIGNED_RGB_UNSIGNED_ALPHA_NV"/>
+        <enum value="0x870D" name="GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV"/>
+        <enum value="0x870E" name="GL_HI_SCALE_NV"/>
+        <enum value="0x870F" name="GL_LO_SCALE_NV"/>
+        <enum value="0x8710" name="GL_DS_SCALE_NV"/>
+        <enum value="0x8711" name="GL_DT_SCALE_NV"/>
+        <enum value="0x8712" name="GL_MAGNITUDE_SCALE_NV"/>
+        <enum value="0x8713" name="GL_VIBRANCE_SCALE_NV"/>
+        <enum value="0x8714" name="GL_HI_BIAS_NV"/>
+        <enum value="0x8715" name="GL_LO_BIAS_NV"/>
+        <enum value="0x8716" name="GL_DS_BIAS_NV"/>
+        <enum value="0x8717" name="GL_DT_BIAS_NV"/>
+        <enum value="0x8718" name="GL_MAGNITUDE_BIAS_NV"/>
+        <enum value="0x8719" name="GL_VIBRANCE_BIAS_NV"/>
+        <enum value="0x871A" name="GL_TEXTURE_BORDER_VALUES_NV"/>
+        <enum value="0x871B" name="GL_TEXTURE_HI_SIZE_NV"/>
+        <enum value="0x871C" name="GL_TEXTURE_LO_SIZE_NV"/>
+        <enum value="0x871D" name="GL_TEXTURE_DS_SIZE_NV"/>
+        <enum value="0x871E" name="GL_TEXTURE_DT_SIZE_NV"/>
+        <enum value="0x871F" name="GL_TEXTURE_MAG_SIZE_NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8720" end="0x873F" vendor="ARB">
+            <unused start="0x8720" end="0x8721" comment="MODELVIEW0/1 already exist"/>
+        <enum value="0x8722" name="GL_MODELVIEW2_ARB"/>
+        <enum value="0x8723" name="GL_MODELVIEW3_ARB"/>
+        <enum value="0x8724" name="GL_MODELVIEW4_ARB"/>
+        <enum value="0x8725" name="GL_MODELVIEW5_ARB"/>
+        <enum value="0x8726" name="GL_MODELVIEW6_ARB"/>
+        <enum value="0x8727" name="GL_MODELVIEW7_ARB"/>
+        <enum value="0x8728" name="GL_MODELVIEW8_ARB"/>
+        <enum value="0x8729" name="GL_MODELVIEW9_ARB"/>
+        <enum value="0x872A" name="GL_MODELVIEW10_ARB"/>
+        <enum value="0x872B" name="GL_MODELVIEW11_ARB"/>
+        <enum value="0x872C" name="GL_MODELVIEW12_ARB"/>
+        <enum value="0x872D" name="GL_MODELVIEW13_ARB"/>
+        <enum value="0x872E" name="GL_MODELVIEW14_ARB"/>
+        <enum value="0x872F" name="GL_MODELVIEW15_ARB"/>
+        <enum value="0x8730" name="GL_MODELVIEW16_ARB"/>
+        <enum value="0x8731" name="GL_MODELVIEW17_ARB"/>
+        <enum value="0x8732" name="GL_MODELVIEW18_ARB"/>
+        <enum value="0x8733" name="GL_MODELVIEW19_ARB"/>
+        <enum value="0x8734" name="GL_MODELVIEW20_ARB"/>
+        <enum value="0x8735" name="GL_MODELVIEW21_ARB"/>
+        <enum value="0x8736" name="GL_MODELVIEW22_ARB"/>
+        <enum value="0x8737" name="GL_MODELVIEW23_ARB"/>
+        <enum value="0x8738" name="GL_MODELVIEW24_ARB"/>
+        <enum value="0x8739" name="GL_MODELVIEW25_ARB"/>
+        <enum value="0x873A" name="GL_MODELVIEW26_ARB"/>
+        <enum value="0x873B" name="GL_MODELVIEW27_ARB"/>
+        <enum value="0x873C" name="GL_MODELVIEW28_ARB"/>
+        <enum value="0x873D" name="GL_MODELVIEW29_ARB"/>
+        <enum value="0x873E" name="GL_MODELVIEW30_ARB"/>
+        <enum value="0x873F" name="GL_MODELVIEW31_ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8740" end="0x874F" vendor="AMD">
+        <enum value="0x8740" name="GL_DOT3_RGB_EXT"/>
+        <enum value="0x8740" name="GL_Z400_BINARY_AMD" comment="NOT an alias. Accidental reuse of GL_DOT3_RGB_EXT"/>
+        <enum value="0x8741" name="GL_DOT3_RGBA_EXT"/>
+        <enum value="0x8741" name="GL_PROGRAM_BINARY_LENGTH_OES" comment="NOT an alias. Accidental reuse of GL_DOT3_RGBA_EXT"/>
+        <enum value="0x8741" name="GL_PROGRAM_BINARY_LENGTH"/>
+        <enum value="0x8742" name="GL_MIRROR_CLAMP_ATI"/>
+        <enum value="0x8742" name="GL_MIRROR_CLAMP_EXT"/>
+        <enum value="0x8743" name="GL_MIRROR_CLAMP_TO_EDGE"/>
+        <enum value="0x8743" name="GL_MIRROR_CLAMP_TO_EDGE_ATI"/>
+        <enum value="0x8743" name="GL_MIRROR_CLAMP_TO_EDGE_EXT"/>
+        <enum value="0x8744" name="GL_MODULATE_ADD_ATI"/>
+        <enum value="0x8745" name="GL_MODULATE_SIGNED_ADD_ATI"/>
+        <enum value="0x8746" name="GL_MODULATE_SUBTRACT_ATI"/>
+            <unused start="0x8747" end="0x8749" vendor="AMD"/>
+        <enum value="0x874A" name="GL_SET_AMD"/>
+        <enum value="0x874B" name="GL_REPLACE_VALUE_AMD"/>
+        <enum value="0x874C" name="GL_STENCIL_OP_VALUE_AMD"/>
+        <enum value="0x874D" name="GL_STENCIL_BACK_OP_VALUE_AMD"/>
+        <enum value="0x874E" name="GL_VERTEX_ATTRIB_ARRAY_LONG"/>
+        <enum value="0x874F" name="GL_OCCLUSION_QUERY_EVENT_MASK_AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8750" end="0x875F" vendor="MESA">
+        <enum value="0x8750" name="GL_DEPTH_STENCIL_MESA"/>
+        <enum value="0x8751" name="GL_UNSIGNED_INT_24_8_MESA"/>
+        <enum value="0x8752" name="GL_UNSIGNED_INT_8_24_REV_MESA"/>
+        <enum value="0x8753" name="GL_UNSIGNED_SHORT_15_1_MESA"/>
+        <enum value="0x8754" name="GL_UNSIGNED_SHORT_1_15_REV_MESA"/>
+        <enum value="0x8755" name="GL_TRACE_MASK_MESA"/>
+        <enum value="0x8756" name="GL_TRACE_NAME_MESA"/>
+        <enum value="0x8757" name="GL_YCBCR_MESA"/>
+        <enum value="0x8758" name="GL_PACK_INVERT_MESA"/>
+        <enum value="0x8759" name="GL_DEBUG_OBJECT_MESA" comment="NOT an alias. Accidental reuse of GL_TEXTURE_1D_STACK_MESAX"/>
+        <enum value="0x8759" name="GL_TEXTURE_1D_STACK_MESAX"/>
+        <enum value="0x875A" name="GL_DEBUG_PRINT_MESA" comment="NOT an alias. Accidental reuse of GL_TEXTURE_2D_STACK_MESAX"/>
+        <enum value="0x875A" name="GL_TEXTURE_2D_STACK_MESAX"/>
+        <enum value="0x875B" name="GL_DEBUG_ASSERT_MESA" comment="NOT an alias. Accidental reuse of GL_PROXY_TEXTURE_1D_STACK_MESAX"/>
+        <enum value="0x875B" name="GL_PROXY_TEXTURE_1D_STACK_MESAX"/>
+        <enum value="0x875C" name="GL_PROXY_TEXTURE_2D_STACK_MESAX"/>
+        <enum value="0x875D" name="GL_TEXTURE_1D_STACK_BINDING_MESAX"/>
+        <enum value="0x875E" name="GL_TEXTURE_2D_STACK_BINDING_MESAX"/>
+        <enum value="0x875F" name="GL_PROGRAM_BINARY_FORMAT_MESA"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8760" end="0x883F" vendor="AMD">
+        <enum value="0x8760" name="GL_STATIC_ATI"/>
+        <enum value="0x8761" name="GL_DYNAMIC_ATI"/>
+        <enum value="0x8762" name="GL_PRESERVE_ATI"/>
+        <enum value="0x8763" name="GL_DISCARD_ATI"/>
+        <enum value="0x8764" name="GL_BUFFER_SIZE"/>
+        <enum value="0x8764" name="GL_BUFFER_SIZE_ARB"/>
+        <enum value="0x8764" name="GL_OBJECT_BUFFER_SIZE_ATI"/>
+        <enum value="0x8765" name="GL_BUFFER_USAGE"/>
+        <enum value="0x8765" name="GL_BUFFER_USAGE_ARB"/>
+        <enum value="0x8765" name="GL_OBJECT_BUFFER_USAGE_ATI"/>
+        <enum value="0x8766" name="GL_ARRAY_OBJECT_BUFFER_ATI"/>
+        <enum value="0x8767" name="GL_ARRAY_OBJECT_OFFSET_ATI"/>
+        <enum value="0x8768" name="GL_ELEMENT_ARRAY_ATI"/>
+        <enum value="0x8769" name="GL_ELEMENT_ARRAY_TYPE_ATI"/>
+        <enum value="0x876A" name="GL_ELEMENT_ARRAY_POINTER_ATI"/>
+        <enum value="0x876B" name="GL_MAX_VERTEX_STREAMS_ATI"/>
+        <enum value="0x876C" name="GL_VERTEX_STREAM0_ATI"/>
+        <enum value="0x876D" name="GL_VERTEX_STREAM1_ATI"/>
+        <enum value="0x876E" name="GL_VERTEX_STREAM2_ATI"/>
+        <enum value="0x876F" name="GL_VERTEX_STREAM3_ATI"/>
+        <enum value="0x8770" name="GL_VERTEX_STREAM4_ATI"/>
+        <enum value="0x8771" name="GL_VERTEX_STREAM5_ATI"/>
+        <enum value="0x8772" name="GL_VERTEX_STREAM6_ATI"/>
+        <enum value="0x8773" name="GL_VERTEX_STREAM7_ATI"/>
+        <enum value="0x8774" name="GL_VERTEX_SOURCE_ATI"/>
+        <enum value="0x8775" name="GL_BUMP_ROT_MATRIX_ATI"/>
+        <enum value="0x8776" name="GL_BUMP_ROT_MATRIX_SIZE_ATI"/>
+        <enum value="0x8777" name="GL_BUMP_NUM_TEX_UNITS_ATI"/>
+        <enum value="0x8778" name="GL_BUMP_TEX_UNITS_ATI"/>
+        <enum value="0x8779" name="GL_DUDV_ATI"/>
+        <enum value="0x877A" name="GL_DU8DV8_ATI"/>
+        <enum value="0x877B" name="GL_BUMP_ENVMAP_ATI"/>
+        <enum value="0x877C" name="GL_BUMP_TARGET_ATI"/>
+            <unused start="0x877D" end="0x877F" vendor="AMD"/>
+        <enum value="0x8780" name="GL_VERTEX_SHADER_EXT"/>
+        <enum value="0x8781" name="GL_VERTEX_SHADER_BINDING_EXT"/>
+        <enum value="0x8782" name="GL_OP_INDEX_EXT"/>
+        <enum value="0x8783" name="GL_OP_NEGATE_EXT"/>
+        <enum value="0x8784" name="GL_OP_DOT3_EXT"/>
+        <enum value="0x8785" name="GL_OP_DOT4_EXT"/>
+        <enum value="0x8786" name="GL_OP_MUL_EXT"/>
+        <enum value="0x8787" name="GL_OP_ADD_EXT"/>
+        <enum value="0x8788" name="GL_OP_MADD_EXT"/>
+        <enum value="0x8789" name="GL_OP_FRAC_EXT"/>
+        <enum value="0x878A" name="GL_OP_MAX_EXT"/>
+        <enum value="0x878B" name="GL_OP_MIN_EXT"/>
+        <enum value="0x878C" name="GL_OP_SET_GE_EXT"/>
+        <enum value="0x878D" name="GL_OP_SET_LT_EXT"/>
+        <enum value="0x878E" name="GL_OP_CLAMP_EXT"/>
+        <enum value="0x878F" name="GL_OP_FLOOR_EXT"/>
+        <enum value="0x8790" name="GL_OP_ROUND_EXT"/>
+        <enum value="0x8791" name="GL_OP_EXP_BASE_2_EXT"/>
+        <enum value="0x8792" name="GL_OP_LOG_BASE_2_EXT"/>
+        <enum value="0x8793" name="GL_OP_POWER_EXT"/>
+        <enum value="0x8794" name="GL_OP_RECIP_EXT"/>
+        <enum value="0x8795" name="GL_OP_RECIP_SQRT_EXT"/>
+        <enum value="0x8796" name="GL_OP_SUB_EXT"/>
+        <enum value="0x8797" name="GL_OP_CROSS_PRODUCT_EXT"/>
+        <enum value="0x8798" name="GL_OP_MULTIPLY_MATRIX_EXT"/>
+        <enum value="0x8799" name="GL_OP_MOV_EXT"/>
+        <enum value="0x879A" name="GL_OUTPUT_VERTEX_EXT"/>
+        <enum value="0x879B" name="GL_OUTPUT_COLOR0_EXT"/>
+        <enum value="0x879C" name="GL_OUTPUT_COLOR1_EXT"/>
+        <enum value="0x879D" name="GL_OUTPUT_TEXTURE_COORD0_EXT"/>
+        <enum value="0x879E" name="GL_OUTPUT_TEXTURE_COORD1_EXT"/>
+        <enum value="0x879F" name="GL_OUTPUT_TEXTURE_COORD2_EXT"/>
+        <enum value="0x87A0" name="GL_OUTPUT_TEXTURE_COORD3_EXT"/>
+        <enum value="0x87A1" name="GL_OUTPUT_TEXTURE_COORD4_EXT"/>
+        <enum value="0x87A2" name="GL_OUTPUT_TEXTURE_COORD5_EXT"/>
+        <enum value="0x87A3" name="GL_OUTPUT_TEXTURE_COORD6_EXT"/>
+        <enum value="0x87A4" name="GL_OUTPUT_TEXTURE_COORD7_EXT"/>
+        <enum value="0x87A5" name="GL_OUTPUT_TEXTURE_COORD8_EXT"/>
+        <enum value="0x87A6" name="GL_OUTPUT_TEXTURE_COORD9_EXT"/>
+        <enum value="0x87A7" name="GL_OUTPUT_TEXTURE_COORD10_EXT"/>
+        <enum value="0x87A8" name="GL_OUTPUT_TEXTURE_COORD11_EXT"/>
+        <enum value="0x87A9" name="GL_OUTPUT_TEXTURE_COORD12_EXT"/>
+        <enum value="0x87AA" name="GL_OUTPUT_TEXTURE_COORD13_EXT"/>
+        <enum value="0x87AB" name="GL_OUTPUT_TEXTURE_COORD14_EXT"/>
+        <enum value="0x87AC" name="GL_OUTPUT_TEXTURE_COORD15_EXT"/>
+        <enum value="0x87AD" name="GL_OUTPUT_TEXTURE_COORD16_EXT"/>
+        <enum value="0x87AE" name="GL_OUTPUT_TEXTURE_COORD17_EXT"/>
+        <enum value="0x87AF" name="GL_OUTPUT_TEXTURE_COORD18_EXT"/>
+        <enum value="0x87B0" name="GL_OUTPUT_TEXTURE_COORD19_EXT"/>
+        <enum value="0x87B1" name="GL_OUTPUT_TEXTURE_COORD20_EXT"/>
+        <enum value="0x87B2" name="GL_OUTPUT_TEXTURE_COORD21_EXT"/>
+        <enum value="0x87B3" name="GL_OUTPUT_TEXTURE_COORD22_EXT"/>
+        <enum value="0x87B4" name="GL_OUTPUT_TEXTURE_COORD23_EXT"/>
+        <enum value="0x87B5" name="GL_OUTPUT_TEXTURE_COORD24_EXT"/>
+        <enum value="0x87B6" name="GL_OUTPUT_TEXTURE_COORD25_EXT"/>
+        <enum value="0x87B7" name="GL_OUTPUT_TEXTURE_COORD26_EXT"/>
+        <enum value="0x87B8" name="GL_OUTPUT_TEXTURE_COORD27_EXT"/>
+        <enum value="0x87B9" name="GL_OUTPUT_TEXTURE_COORD28_EXT"/>
+        <enum value="0x87BA" name="GL_OUTPUT_TEXTURE_COORD29_EXT"/>
+        <enum value="0x87BB" name="GL_OUTPUT_TEXTURE_COORD30_EXT"/>
+        <enum value="0x87BC" name="GL_OUTPUT_TEXTURE_COORD31_EXT"/>
+        <enum value="0x87BD" name="GL_OUTPUT_FOG_EXT"/>
+        <enum value="0x87BE" name="GL_SCALAR_EXT"/>
+        <enum value="0x87BF" name="GL_VECTOR_EXT"/>
+        <enum value="0x87C0" name="GL_MATRIX_EXT"/>
+        <enum value="0x87C1" name="GL_VARIANT_EXT"/>
+        <enum value="0x87C2" name="GL_INVARIANT_EXT"/>
+        <enum value="0x87C3" name="GL_LOCAL_CONSTANT_EXT"/>
+        <enum value="0x87C4" name="GL_LOCAL_EXT"/>
+        <enum value="0x87C5" name="GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT"/>
+        <enum value="0x87C6" name="GL_MAX_VERTEX_SHADER_VARIANTS_EXT"/>
+        <enum value="0x87C7" name="GL_MAX_VERTEX_SHADER_INVARIANTS_EXT"/>
+        <enum value="0x87C8" name="GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"/>
+        <enum value="0x87C9" name="GL_MAX_VERTEX_SHADER_LOCALS_EXT"/>
+        <enum value="0x87CA" name="GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT"/>
+        <enum value="0x87CB" name="GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT"/>
+        <enum value="0x87CC" name="GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"/>
+        <enum value="0x87CD" name="GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT"/>
+        <enum value="0x87CE" name="GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT"/>
+        <enum value="0x87CF" name="GL_VERTEX_SHADER_INSTRUCTIONS_EXT"/>
+        <enum value="0x87D0" name="GL_VERTEX_SHADER_VARIANTS_EXT"/>
+        <enum value="0x87D1" name="GL_VERTEX_SHADER_INVARIANTS_EXT"/>
+        <enum value="0x87D2" name="GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"/>
+        <enum value="0x87D3" name="GL_VERTEX_SHADER_LOCALS_EXT"/>
+        <enum value="0x87D4" name="GL_VERTEX_SHADER_OPTIMIZED_EXT"/>
+        <enum value="0x87D5" name="GL_X_EXT"/>
+        <enum value="0x87D6" name="GL_Y_EXT"/>
+        <enum value="0x87D7" name="GL_Z_EXT"/>
+        <enum value="0x87D8" name="GL_W_EXT"/>
+        <enum value="0x87D9" name="GL_NEGATIVE_X_EXT"/>
+        <enum value="0x87DA" name="GL_NEGATIVE_Y_EXT"/>
+        <enum value="0x87DB" name="GL_NEGATIVE_Z_EXT"/>
+        <enum value="0x87DC" name="GL_NEGATIVE_W_EXT"/>
+        <enum value="0x87DD" name="GL_ZERO_EXT"/>
+        <enum value="0x87DE" name="GL_ONE_EXT"/>
+        <enum value="0x87DF" name="GL_NEGATIVE_ONE_EXT"/>
+        <enum value="0x87E0" name="GL_NORMALIZED_RANGE_EXT"/>
+        <enum value="0x87E1" name="GL_FULL_RANGE_EXT"/>
+        <enum value="0x87E2" name="GL_CURRENT_VERTEX_EXT"/>
+        <enum value="0x87E3" name="GL_MVP_MATRIX_EXT"/>
+        <enum value="0x87E4" name="GL_VARIANT_VALUE_EXT"/>
+        <enum value="0x87E5" name="GL_VARIANT_DATATYPE_EXT"/>
+        <enum value="0x87E6" name="GL_VARIANT_ARRAY_STRIDE_EXT"/>
+        <enum value="0x87E7" name="GL_VARIANT_ARRAY_TYPE_EXT"/>
+        <enum value="0x87E8" name="GL_VARIANT_ARRAY_EXT"/>
+        <enum value="0x87E9" name="GL_VARIANT_ARRAY_POINTER_EXT"/>
+        <enum value="0x87EA" name="GL_INVARIANT_VALUE_EXT"/>
+        <enum value="0x87EB" name="GL_INVARIANT_DATATYPE_EXT"/>
+        <enum value="0x87EC" name="GL_LOCAL_CONSTANT_VALUE_EXT"/>
+        <enum value="0x87ED" name="GL_LOCAL_CONSTANT_DATATYPE_EXT"/>
+        <enum value="0x87EE" name="GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD"/>
+        <enum value="0x87F0" name="GL_PN_TRIANGLES_ATI"/>
+        <enum value="0x87F1" name="GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI"/>
+        <enum value="0x87F2" name="GL_PN_TRIANGLES_POINT_MODE_ATI"/>
+        <enum value="0x87F3" name="GL_PN_TRIANGLES_NORMAL_MODE_ATI"/>
+        <enum value="0x87F4" name="GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI"/>
+        <enum value="0x87F5" name="GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI"/>
+        <enum value="0x87F6" name="GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI"/>
+        <enum value="0x87F7" name="GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI"/>
+        <enum value="0x87F8" name="GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI"/>
+        <enum value="0x87F9" name="GL_3DC_X_AMD"/>
+        <enum value="0x87FA" name="GL_3DC_XY_AMD"/>
+        <enum value="0x87FB" name="GL_VBO_FREE_MEMORY_ATI"/>
+        <enum value="0x87FC" name="GL_TEXTURE_FREE_MEMORY_ATI"/>
+        <enum value="0x87FD" name="GL_RENDERBUFFER_FREE_MEMORY_ATI"/>
+        <enum value="0x87FE" name="GL_NUM_PROGRAM_BINARY_FORMATS"/>
+        <enum value="0x87FE" name="GL_NUM_PROGRAM_BINARY_FORMATS_OES"/>
+        <enum value="0x87FF" name="GL_PROGRAM_BINARY_FORMATS"/>
+        <enum value="0x87FF" name="GL_PROGRAM_BINARY_FORMATS_OES"/>
+        <enum value="0x8800" name="GL_STENCIL_BACK_FUNC"/>
+        <enum value="0x8800" name="GL_STENCIL_BACK_FUNC_ATI"/>
+        <enum value="0x8801" name="GL_STENCIL_BACK_FAIL"/>
+        <enum value="0x8801" name="GL_STENCIL_BACK_FAIL_ATI"/>
+        <enum value="0x8802" name="GL_STENCIL_BACK_PASS_DEPTH_FAIL"/>
+        <enum value="0x8802" name="GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI"/>
+        <enum value="0x8803" name="GL_STENCIL_BACK_PASS_DEPTH_PASS"/>
+        <enum value="0x8803" name="GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI"/>
+        <enum value="0x8804" name="GL_FRAGMENT_PROGRAM_ARB"/>
+        <enum value="0x8805" name="GL_PROGRAM_ALU_INSTRUCTIONS_ARB"/>
+        <enum value="0x8806" name="GL_PROGRAM_TEX_INSTRUCTIONS_ARB"/>
+        <enum value="0x8807" name="GL_PROGRAM_TEX_INDIRECTIONS_ARB"/>
+        <enum value="0x8808" name="GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB"/>
+        <enum value="0x8809" name="GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB"/>
+        <enum value="0x880A" name="GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB"/>
+        <enum value="0x880B" name="GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB"/>
+        <enum value="0x880C" name="GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB"/>
+        <enum value="0x880D" name="GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB"/>
+        <enum value="0x880E" name="GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB"/>
+        <enum value="0x880F" name="GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB"/>
+        <enum value="0x8810" name="GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB"/>
+            <unused start="0x8811" end="0x8813" vendor="AMD"/>
+        <enum value="0x8814" name="GL_RGBA32F"/>
+        <enum value="0x8814" name="GL_RGBA32F_ARB"/>
+        <enum value="0x8814" name="GL_RGBA32F_EXT"/>
+        <enum value="0x8814" name="GL_RGBA_FLOAT32_APPLE"/>
+        <enum value="0x8814" name="GL_RGBA_FLOAT32_ATI"/>
+        <enum value="0x8815" name="GL_RGB32F"/>
+        <enum value="0x8815" name="GL_RGB32F_ARB"/>
+        <enum value="0x8815" name="GL_RGB32F_EXT"/>
+        <enum value="0x8815" name="GL_RGB_FLOAT32_APPLE"/>
+        <enum value="0x8815" name="GL_RGB_FLOAT32_ATI"/>
+        <enum value="0x8816" name="GL_ALPHA32F_ARB"/>
+        <enum value="0x8816" name="GL_ALPHA32F_EXT"/>
+        <enum value="0x8816" name="GL_ALPHA_FLOAT32_APPLE"/>
+        <enum value="0x8816" name="GL_ALPHA_FLOAT32_ATI"/>
+        <enum value="0x8817" name="GL_INTENSITY32F_ARB"/>
+        <enum value="0x8817" name="GL_INTENSITY_FLOAT32_APPLE"/>
+        <enum value="0x8817" name="GL_INTENSITY_FLOAT32_ATI"/>
+        <enum value="0x8818" name="GL_LUMINANCE32F_ARB"/>
+        <enum value="0x8818" name="GL_LUMINANCE32F_EXT"/>
+        <enum value="0x8818" name="GL_LUMINANCE_FLOAT32_APPLE"/>
+        <enum value="0x8818" name="GL_LUMINANCE_FLOAT32_ATI"/>
+        <enum value="0x8819" name="GL_LUMINANCE_ALPHA32F_ARB"/>
+        <enum value="0x8819" name="GL_LUMINANCE_ALPHA32F_EXT"/>
+        <enum value="0x8819" name="GL_LUMINANCE_ALPHA_FLOAT32_APPLE"/>
+        <enum value="0x8819" name="GL_LUMINANCE_ALPHA_FLOAT32_ATI"/>
+        <enum value="0x881A" name="GL_RGBA16F"/>
+        <enum value="0x881A" name="GL_RGBA16F_ARB"/>
+        <enum value="0x881A" name="GL_RGBA16F_EXT"/>
+        <enum value="0x881A" name="GL_RGBA_FLOAT16_APPLE"/>
+        <enum value="0x881A" name="GL_RGBA_FLOAT16_ATI"/>
+        <enum value="0x881B" name="GL_RGB16F"/>
+        <enum value="0x881B" name="GL_RGB16F_ARB"/>
+        <enum value="0x881B" name="GL_RGB16F_EXT"/>
+        <enum value="0x881B" name="GL_RGB_FLOAT16_APPLE"/>
+        <enum value="0x881B" name="GL_RGB_FLOAT16_ATI"/>
+        <enum value="0x881C" name="GL_ALPHA16F_ARB"/>
+        <enum value="0x881C" name="GL_ALPHA16F_EXT"/>
+        <enum value="0x881C" name="GL_ALPHA_FLOAT16_APPLE"/>
+        <enum value="0x881C" name="GL_ALPHA_FLOAT16_ATI"/>
+        <enum value="0x881D" name="GL_INTENSITY16F_ARB"/>
+        <enum value="0x881D" name="GL_INTENSITY_FLOAT16_APPLE"/>
+        <enum value="0x881D" name="GL_INTENSITY_FLOAT16_ATI"/>
+        <enum value="0x881E" name="GL_LUMINANCE16F_ARB"/>
+        <enum value="0x881E" name="GL_LUMINANCE16F_EXT"/>
+        <enum value="0x881E" name="GL_LUMINANCE_FLOAT16_APPLE"/>
+        <enum value="0x881E" name="GL_LUMINANCE_FLOAT16_ATI"/>
+        <enum value="0x881F" name="GL_LUMINANCE_ALPHA16F_ARB"/>
+        <enum value="0x881F" name="GL_LUMINANCE_ALPHA16F_EXT"/>
+        <enum value="0x881F" name="GL_LUMINANCE_ALPHA_FLOAT16_APPLE"/>
+        <enum value="0x881F" name="GL_LUMINANCE_ALPHA_FLOAT16_ATI"/>
+            <!-- RGBA_FLOAT_MODE_ARB equivalent to TYPE_RGBA_FLOAT_ATI -->
+        <enum value="0x8820" name="GL_RGBA_FLOAT_MODE_ARB"/>
+        <enum value="0x8820" name="GL_RGBA_FLOAT_MODE_ATI"/>
+            <unused start="0x8821" end="0x8822" vendor="AMD"/>
+        <enum value="0x8823" name="GL_WRITEONLY_RENDERING_QCOM"/>
+        <enum value="0x8824" name="GL_MAX_DRAW_BUFFERS"/>
+        <enum value="0x8824" name="GL_MAX_DRAW_BUFFERS_ARB"/>
+        <enum value="0x8824" name="GL_MAX_DRAW_BUFFERS_ATI"/>
+        <enum value="0x8824" name="GL_MAX_DRAW_BUFFERS_EXT"/>
+        <enum value="0x8824" name="GL_MAX_DRAW_BUFFERS_NV"/>
+        <enum value="0x8825" name="GL_DRAW_BUFFER0"/>
+        <enum value="0x8825" name="GL_DRAW_BUFFER0_ARB"/>
+        <enum value="0x8825" name="GL_DRAW_BUFFER0_ATI"/>
+        <enum value="0x8825" name="GL_DRAW_BUFFER0_EXT"/>
+        <enum value="0x8825" name="GL_DRAW_BUFFER0_NV"/>
+        <enum value="0x8826" name="GL_DRAW_BUFFER1"/>
+        <enum value="0x8826" name="GL_DRAW_BUFFER1_ARB"/>
+        <enum value="0x8826" name="GL_DRAW_BUFFER1_ATI"/>
+        <enum value="0x8826" name="GL_DRAW_BUFFER1_EXT"/>
+        <enum value="0x8826" name="GL_DRAW_BUFFER1_NV"/>
+        <enum value="0x8827" name="GL_DRAW_BUFFER2"/>
+        <enum value="0x8827" name="GL_DRAW_BUFFER2_ARB"/>
+        <enum value="0x8827" name="GL_DRAW_BUFFER2_ATI"/>
+        <enum value="0x8827" name="GL_DRAW_BUFFER2_EXT"/>
+        <enum value="0x8827" name="GL_DRAW_BUFFER2_NV"/>
+        <enum value="0x8828" name="GL_DRAW_BUFFER3"/>
+        <enum value="0x8828" name="GL_DRAW_BUFFER3_ARB"/>
+        <enum value="0x8828" name="GL_DRAW_BUFFER3_ATI"/>
+        <enum value="0x8828" name="GL_DRAW_BUFFER3_EXT"/>
+        <enum value="0x8828" name="GL_DRAW_BUFFER3_NV"/>
+        <enum value="0x8829" name="GL_DRAW_BUFFER4"/>
+        <enum value="0x8829" name="GL_DRAW_BUFFER4_ARB"/>
+        <enum value="0x8829" name="GL_DRAW_BUFFER4_ATI"/>
+        <enum value="0x8829" name="GL_DRAW_BUFFER4_EXT"/>
+        <enum value="0x8829" name="GL_DRAW_BUFFER4_NV"/>
+        <enum value="0x882A" name="GL_DRAW_BUFFER5"/>
+        <enum value="0x882A" name="GL_DRAW_BUFFER5_ARB"/>
+        <enum value="0x882A" name="GL_DRAW_BUFFER5_ATI"/>
+        <enum value="0x882A" name="GL_DRAW_BUFFER5_EXT"/>
+        <enum value="0x882A" name="GL_DRAW_BUFFER5_NV"/>
+        <enum value="0x882B" name="GL_DRAW_BUFFER6"/>
+        <enum value="0x882B" name="GL_DRAW_BUFFER6_ARB"/>
+        <enum value="0x882B" name="GL_DRAW_BUFFER6_ATI"/>
+        <enum value="0x882B" name="GL_DRAW_BUFFER6_EXT"/>
+        <enum value="0x882B" name="GL_DRAW_BUFFER6_NV"/>
+        <enum value="0x882C" name="GL_DRAW_BUFFER7"/>
+        <enum value="0x882C" name="GL_DRAW_BUFFER7_ARB"/>
+        <enum value="0x882C" name="GL_DRAW_BUFFER7_ATI"/>
+        <enum value="0x882C" name="GL_DRAW_BUFFER7_EXT"/>
+        <enum value="0x882C" name="GL_DRAW_BUFFER7_NV"/>
+        <enum value="0x882D" name="GL_DRAW_BUFFER8"/>
+        <enum value="0x882D" name="GL_DRAW_BUFFER8_ARB"/>
+        <enum value="0x882D" name="GL_DRAW_BUFFER8_ATI"/>
+        <enum value="0x882D" name="GL_DRAW_BUFFER8_EXT"/>
+        <enum value="0x882D" name="GL_DRAW_BUFFER8_NV"/>
+        <enum value="0x882E" name="GL_DRAW_BUFFER9"/>
+        <enum value="0x882E" name="GL_DRAW_BUFFER9_ARB"/>
+        <enum value="0x882E" name="GL_DRAW_BUFFER9_ATI"/>
+        <enum value="0x882E" name="GL_DRAW_BUFFER9_EXT"/>
+        <enum value="0x882E" name="GL_DRAW_BUFFER9_NV"/>
+        <enum value="0x882F" name="GL_DRAW_BUFFER10"/>
+        <enum value="0x882F" name="GL_DRAW_BUFFER10_ARB"/>
+        <enum value="0x882F" name="GL_DRAW_BUFFER10_ATI"/>
+        <enum value="0x882F" name="GL_DRAW_BUFFER10_EXT"/>
+        <enum value="0x882F" name="GL_DRAW_BUFFER10_NV"/>
+        <enum value="0x8830" name="GL_DRAW_BUFFER11"/>
+        <enum value="0x8830" name="GL_DRAW_BUFFER11_ARB"/>
+        <enum value="0x8830" name="GL_DRAW_BUFFER11_ATI"/>
+        <enum value="0x8830" name="GL_DRAW_BUFFER11_EXT"/>
+        <enum value="0x8830" name="GL_DRAW_BUFFER11_NV"/>
+        <enum value="0x8831" name="GL_DRAW_BUFFER12"/>
+        <enum value="0x8831" name="GL_DRAW_BUFFER12_ARB"/>
+        <enum value="0x8831" name="GL_DRAW_BUFFER12_ATI"/>
+        <enum value="0x8831" name="GL_DRAW_BUFFER12_EXT"/>
+        <enum value="0x8831" name="GL_DRAW_BUFFER12_NV"/>
+        <enum value="0x8832" name="GL_DRAW_BUFFER13"/>
+        <enum value="0x8832" name="GL_DRAW_BUFFER13_ARB"/>
+        <enum value="0x8832" name="GL_DRAW_BUFFER13_ATI"/>
+        <enum value="0x8832" name="GL_DRAW_BUFFER13_EXT"/>
+        <enum value="0x8832" name="GL_DRAW_BUFFER13_NV"/>
+        <enum value="0x8833" name="GL_DRAW_BUFFER14"/>
+        <enum value="0x8833" name="GL_DRAW_BUFFER14_ARB"/>
+        <enum value="0x8833" name="GL_DRAW_BUFFER14_ATI"/>
+        <enum value="0x8833" name="GL_DRAW_BUFFER14_EXT"/>
+        <enum value="0x8833" name="GL_DRAW_BUFFER14_NV"/>
+        <enum value="0x8834" name="GL_DRAW_BUFFER15"/>
+        <enum value="0x8834" name="GL_DRAW_BUFFER15_ARB"/>
+        <enum value="0x8834" name="GL_DRAW_BUFFER15_ATI"/>
+        <enum value="0x8834" name="GL_DRAW_BUFFER15_EXT"/>
+        <enum value="0x8834" name="GL_DRAW_BUFFER15_NV"/>
+        <enum value="0x8835" name="GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI"/>
+            <unused start="0x8836" vendor="AMD"/>
+        <enum value="0x8837" name="GL_COMPRESSED_LUMINANCE_ALPHA_3DC_ATI" comment="Defined by Mesa but not ATI"/>
+            <unused start="0x8838" end="0x883C" vendor="AMD"/>
+        <enum value="0x883D" name="GL_BLEND_EQUATION_ALPHA"/>
+        <enum value="0x883D" name="GL_BLEND_EQUATION_ALPHA_EXT"/>
+        <enum value="0x883D" name="GL_BLEND_EQUATION_ALPHA_OES"/>
+            <unused start="0x883E" vendor="AMD"/>
+        <enum value="0x883F" name="GL_SUBSAMPLE_DISTANCE_AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8840" end="0x884F" vendor="ARB">
+        <enum value="0x8840" name="GL_MATRIX_PALETTE_ARB"/>
+        <enum value="0x8840" name="GL_MATRIX_PALETTE_OES"/>
+        <enum value="0x8841" name="GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB"/>
+        <enum value="0x8842" name="GL_MAX_PALETTE_MATRICES_ARB"/>
+        <enum value="0x8842" name="GL_MAX_PALETTE_MATRICES_OES"/>
+        <enum value="0x8843" name="GL_CURRENT_PALETTE_MATRIX_ARB"/>
+        <enum value="0x8843" name="GL_CURRENT_PALETTE_MATRIX_OES"/>
+        <enum value="0x8844" name="GL_MATRIX_INDEX_ARRAY_ARB"/>
+        <enum value="0x8844" name="GL_MATRIX_INDEX_ARRAY_OES"/>
+        <enum value="0x8845" name="GL_CURRENT_MATRIX_INDEX_ARB"/>
+        <enum value="0x8846" name="GL_MATRIX_INDEX_ARRAY_SIZE_ARB"/>
+        <enum value="0x8846" name="GL_MATRIX_INDEX_ARRAY_SIZE_OES"/>
+        <enum value="0x8847" name="GL_MATRIX_INDEX_ARRAY_TYPE_ARB"/>
+        <enum value="0x8847" name="GL_MATRIX_INDEX_ARRAY_TYPE_OES"/>
+        <enum value="0x8848" name="GL_MATRIX_INDEX_ARRAY_STRIDE_ARB"/>
+        <enum value="0x8848" name="GL_MATRIX_INDEX_ARRAY_STRIDE_OES"/>
+        <enum value="0x8849" name="GL_MATRIX_INDEX_ARRAY_POINTER_ARB"/>
+        <enum value="0x8849" name="GL_MATRIX_INDEX_ARRAY_POINTER_OES"/>
+        <enum value="0x884A" name="GL_TEXTURE_DEPTH_SIZE"/>
+        <enum value="0x884A" name="GL_TEXTURE_DEPTH_SIZE_ARB"/>
+        <enum value="0x884B" name="GL_DEPTH_TEXTURE_MODE"/>
+        <enum value="0x884B" name="GL_DEPTH_TEXTURE_MODE_ARB"/>
+        <enum value="0x884C" name="GL_TEXTURE_COMPARE_MODE"/>
+        <enum value="0x884C" name="GL_TEXTURE_COMPARE_MODE_ARB"/>
+        <enum value="0x884C" name="GL_TEXTURE_COMPARE_MODE_EXT"/>
+        <enum value="0x884D" name="GL_TEXTURE_COMPARE_FUNC"/>
+        <enum value="0x884D" name="GL_TEXTURE_COMPARE_FUNC_ARB"/>
+        <enum value="0x884D" name="GL_TEXTURE_COMPARE_FUNC_EXT"/>
+        <enum value="0x884E" name="GL_COMPARE_R_TO_TEXTURE"/>
+        <enum value="0x884E" name="GL_COMPARE_R_TO_TEXTURE_ARB"/>
+        <enum value="0x884E" name="GL_COMPARE_REF_DEPTH_TO_TEXTURE_EXT"/>
+        <enum value="0x884E" name="GL_COMPARE_REF_TO_TEXTURE" alias="GL_COMPARE_R_TO_TEXTURE"/>
+        <enum value="0x884E" name="GL_COMPARE_REF_TO_TEXTURE_EXT"/>
+        <enum value="0x884F" name="GL_TEXTURE_CUBE_MAP_SEAMLESS"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8850" end="0x891F" vendor="NV">
+        <enum value="0x8850" name="GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV"/>
+        <enum value="0x8851" name="GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV"/>
+        <enum value="0x8852" name="GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV"/>
+        <enum value="0x8853" name="GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV"/>
+        <enum value="0x8854" name="GL_OFFSET_HILO_TEXTURE_2D_NV"/>
+        <enum value="0x8855" name="GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV"/>
+        <enum value="0x8856" name="GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV"/>
+        <enum value="0x8857" name="GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV"/>
+        <enum value="0x8858" name="GL_DEPENDENT_HILO_TEXTURE_2D_NV"/>
+        <enum value="0x8859" name="GL_DEPENDENT_RGB_TEXTURE_3D_NV"/>
+        <enum value="0x885A" name="GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV"/>
+        <enum value="0x885B" name="GL_DOT_PRODUCT_PASS_THROUGH_NV"/>
+        <enum value="0x885C" name="GL_DOT_PRODUCT_TEXTURE_1D_NV"/>
+        <enum value="0x885D" name="GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV"/>
+        <enum value="0x885E" name="GL_HILO8_NV"/>
+        <enum value="0x885F" name="GL_SIGNED_HILO8_NV"/>
+        <enum value="0x8860" name="GL_FORCE_BLUE_TO_ONE_NV"/>
+        <enum value="0x8861" name="GL_POINT_SPRITE"/>
+        <enum value="0x8861" name="GL_POINT_SPRITE_ARB"/>
+        <enum value="0x8861" name="GL_POINT_SPRITE_NV"/>
+        <enum value="0x8861" name="GL_POINT_SPRITE_OES"/>
+        <enum value="0x8862" name="GL_COORD_REPLACE"/>
+        <enum value="0x8862" name="GL_COORD_REPLACE_ARB"/>
+        <enum value="0x8862" name="GL_COORD_REPLACE_NV"/>
+        <enum value="0x8862" name="GL_COORD_REPLACE_OES"/>
+        <enum value="0x8863" name="GL_POINT_SPRITE_R_MODE_NV"/>
+        <enum value="0x8864" name="GL_PIXEL_COUNTER_BITS_NV"/>
+        <enum value="0x8864" name="GL_QUERY_COUNTER_BITS"/>
+        <enum value="0x8864" name="GL_QUERY_COUNTER_BITS_ARB"/>
+        <enum value="0x8864" name="GL_QUERY_COUNTER_BITS_EXT"/>
+        <enum value="0x8865" name="GL_CURRENT_OCCLUSION_QUERY_ID_NV"/>
+        <enum value="0x8865" name="GL_CURRENT_QUERY"/>
+        <enum value="0x8865" name="GL_CURRENT_QUERY_ARB"/>
+        <enum value="0x8865" name="GL_CURRENT_QUERY_EXT"/>
+        <enum value="0x8866" name="GL_PIXEL_COUNT_NV"/>
+        <enum value="0x8866" name="GL_QUERY_RESULT"/>
+        <enum value="0x8866" name="GL_QUERY_RESULT_ARB"/>
+        <enum value="0x8866" name="GL_QUERY_RESULT_EXT"/>
+        <enum value="0x8867" name="GL_PIXEL_COUNT_AVAILABLE_NV"/>
+        <enum value="0x8867" name="GL_QUERY_RESULT_AVAILABLE"/>
+        <enum value="0x8867" name="GL_QUERY_RESULT_AVAILABLE_ARB"/>
+        <enum value="0x8867" name="GL_QUERY_RESULT_AVAILABLE_EXT"/>
+        <enum value="0x8868" name="GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV"/>
+        <enum value="0x8869" name="GL_MAX_VERTEX_ATTRIBS"/>
+        <enum value="0x8869" name="GL_MAX_VERTEX_ATTRIBS_ARB"/>
+        <enum value="0x886A" name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"/>
+        <enum value="0x886A" name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB"/>
+            <unused start="0x886B" vendor="NV"/>
+        <enum value="0x886C" name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS"/>
+        <enum value="0x886C" name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_EXT"/>
+        <enum value="0x886C" name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_OES"/>
+        <enum value="0x886D" name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS"/>
+        <enum value="0x886D" name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_EXT"/>
+        <enum value="0x886D" name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_OES"/>
+        <enum value="0x886E" name="GL_DEPTH_STENCIL_TO_RGBA_NV"/>
+        <enum value="0x886F" name="GL_DEPTH_STENCIL_TO_BGRA_NV"/>
+        <enum value="0x8870" name="GL_FRAGMENT_PROGRAM_NV"/>
+        <enum value="0x8871" name="GL_MAX_TEXTURE_COORDS"/>
+        <enum value="0x8871" name="GL_MAX_TEXTURE_COORDS_ARB"/>
+        <enum value="0x8871" name="GL_MAX_TEXTURE_COORDS_NV"/>
+        <enum value="0x8872" name="GL_MAX_TEXTURE_IMAGE_UNITS"/>
+        <enum value="0x8872" name="GL_MAX_TEXTURE_IMAGE_UNITS_ARB"/>
+        <enum value="0x8872" name="GL_MAX_TEXTURE_IMAGE_UNITS_NV"/>
+        <enum value="0x8873" name="GL_FRAGMENT_PROGRAM_BINDING_NV"/>
+        <enum value="0x8874" name="GL_PROGRAM_ERROR_STRING_ARB"/>
+        <enum value="0x8874" name="GL_PROGRAM_ERROR_STRING_NV"/>
+        <enum value="0x8875" name="GL_PROGRAM_FORMAT_ASCII_ARB"/>
+        <enum value="0x8876" name="GL_PROGRAM_FORMAT_ARB"/>
+            <unused start="0x8877" vendor="NV" comment="Should have been assigned to PROGRAM_BINDING_ARB"/>
+        <enum value="0x8878" name="GL_WRITE_PIXEL_DATA_RANGE_NV"/>
+        <enum value="0x8879" name="GL_READ_PIXEL_DATA_RANGE_NV"/>
+        <enum value="0x887A" name="GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV"/>
+        <enum value="0x887B" name="GL_READ_PIXEL_DATA_RANGE_LENGTH_NV"/>
+        <enum value="0x887C" name="GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV"/>
+        <enum value="0x887D" name="GL_READ_PIXEL_DATA_RANGE_POINTER_NV"/>
+            <unused start="0x887E" vendor="NV"/>
+        <enum value="0x887F" name="GL_GEOMETRY_SHADER_INVOCATIONS"/>
+        <enum value="0x887F" name="GL_GEOMETRY_SHADER_INVOCATIONS_EXT"/>
+        <enum value="0x887F" name="GL_GEOMETRY_SHADER_INVOCATIONS_OES"/>
+        <enum value="0x8880" name="GL_FLOAT_R_NV"/>
+        <enum value="0x8881" name="GL_FLOAT_RG_NV"/>
+        <enum value="0x8882" name="GL_FLOAT_RGB_NV"/>
+        <enum value="0x8883" name="GL_FLOAT_RGBA_NV"/>
+        <enum value="0x8884" name="GL_FLOAT_R16_NV"/>
+        <enum value="0x8885" name="GL_FLOAT_R32_NV"/>
+        <enum value="0x8886" name="GL_FLOAT_RG16_NV"/>
+        <enum value="0x8887" name="GL_FLOAT_RG32_NV"/>
+        <enum value="0x8888" name="GL_FLOAT_RGB16_NV"/>
+        <enum value="0x8889" name="GL_FLOAT_RGB32_NV"/>
+        <enum value="0x888A" name="GL_FLOAT_RGBA16_NV"/>
+        <enum value="0x888B" name="GL_FLOAT_RGBA32_NV"/>
+        <enum value="0x888C" name="GL_TEXTURE_FLOAT_COMPONENTS_NV"/>
+        <enum value="0x888D" name="GL_FLOAT_CLEAR_COLOR_VALUE_NV"/>
+        <enum value="0x888E" name="GL_FLOAT_RGBA_MODE_NV"/>
+        <enum value="0x888F" name="GL_TEXTURE_UNSIGNED_REMAP_MODE_NV"/>
+        <enum value="0x8890" name="GL_DEPTH_BOUNDS_TEST_EXT"/>
+        <enum value="0x8891" name="GL_DEPTH_BOUNDS_EXT"/>
+        <enum value="0x8892" name="GL_ARRAY_BUFFER"/>
+        <enum value="0x8892" name="GL_ARRAY_BUFFER_ARB"/>
+        <enum value="0x8893" name="GL_ELEMENT_ARRAY_BUFFER"/>
+        <enum value="0x8893" name="GL_ELEMENT_ARRAY_BUFFER_ARB"/>
+        <enum value="0x8894" name="GL_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x8894" name="GL_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x8895" name="GL_ELEMENT_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x8895" name="GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x8896" name="GL_VERTEX_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x8896" name="GL_VERTEX_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x8897" name="GL_NORMAL_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x8897" name="GL_NORMAL_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x8898" name="GL_COLOR_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x8898" name="GL_COLOR_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x8899" name="GL_INDEX_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x8899" name="GL_INDEX_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x889A" name="GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x889A" name="GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x889B" name="GL_EDGE_FLAG_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x889B" name="GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x889C" name="GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x889C" name="GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x889D" name="GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x889D" name="GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x889D" name="GL_FOG_COORD_ARRAY_BUFFER_BINDING" alias="GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x889E" name="GL_WEIGHT_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x889E" name="GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x889E" name="GL_WEIGHT_ARRAY_BUFFER_BINDING_OES"/>
+        <enum value="0x889F" name="GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING"/>
+        <enum value="0x889F" name="GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB"/>
+        <enum value="0x88A0" name="GL_PROGRAM_INSTRUCTIONS_ARB"/>
+        <enum value="0x88A1" name="GL_MAX_PROGRAM_INSTRUCTIONS_ARB"/>
+        <enum value="0x88A2" name="GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB"/>
+        <enum value="0x88A3" name="GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB"/>
+        <enum value="0x88A4" name="GL_PROGRAM_TEMPORARIES_ARB"/>
+        <enum value="0x88A5" name="GL_MAX_PROGRAM_TEMPORARIES_ARB"/>
+        <enum value="0x88A6" name="GL_PROGRAM_NATIVE_TEMPORARIES_ARB"/>
+        <enum value="0x88A7" name="GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB"/>
+        <enum value="0x88A8" name="GL_PROGRAM_PARAMETERS_ARB"/>
+        <enum value="0x88A9" name="GL_MAX_PROGRAM_PARAMETERS_ARB"/>
+        <enum value="0x88AA" name="GL_PROGRAM_NATIVE_PARAMETERS_ARB"/>
+        <enum value="0x88AB" name="GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB"/>
+        <enum value="0x88AC" name="GL_PROGRAM_ATTRIBS_ARB"/>
+        <enum value="0x88AD" name="GL_MAX_PROGRAM_ATTRIBS_ARB"/>
+        <enum value="0x88AE" name="GL_PROGRAM_NATIVE_ATTRIBS_ARB"/>
+        <enum value="0x88AF" name="GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB"/>
+        <enum value="0x88B0" name="GL_PROGRAM_ADDRESS_REGISTERS_ARB"/>
+        <enum value="0x88B1" name="GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB"/>
+        <enum value="0x88B2" name="GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB"/>
+        <enum value="0x88B3" name="GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB"/>
+        <enum value="0x88B4" name="GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB"/>
+        <enum value="0x88B5" name="GL_MAX_PROGRAM_ENV_PARAMETERS_ARB"/>
+        <enum value="0x88B6" name="GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB"/>
+        <enum value="0x88B7" name="GL_TRANSPOSE_CURRENT_MATRIX_ARB"/>
+        <enum value="0x88B8" name="GL_READ_ONLY"/>
+        <enum value="0x88B8" name="GL_READ_ONLY_ARB"/>
+        <enum value="0x88B9" name="GL_WRITE_ONLY"/>
+        <enum value="0x88B9" name="GL_WRITE_ONLY_ARB"/>
+        <enum value="0x88B9" name="GL_WRITE_ONLY_OES"/>
+        <enum value="0x88BA" name="GL_READ_WRITE"/>
+        <enum value="0x88BA" name="GL_READ_WRITE_ARB"/>
+        <enum value="0x88BB" name="GL_BUFFER_ACCESS"/>
+        <enum value="0x88BB" name="GL_BUFFER_ACCESS_ARB"/>
+        <enum value="0x88BB" name="GL_BUFFER_ACCESS_OES"/>
+        <enum value="0x88BC" name="GL_BUFFER_MAPPED"/>
+        <enum value="0x88BC" name="GL_BUFFER_MAPPED_ARB"/>
+        <enum value="0x88BC" name="GL_BUFFER_MAPPED_OES"/>
+        <enum value="0x88BD" name="GL_BUFFER_MAP_POINTER"/>
+        <enum value="0x88BD" name="GL_BUFFER_MAP_POINTER_ARB"/>
+        <enum value="0x88BD" name="GL_BUFFER_MAP_POINTER_OES"/>
+        <enum value="0x88BE" name="GL_WRITE_DISCARD_NV"/>
+        <enum value="0x88BF" name="GL_TIME_ELAPSED"/>
+        <enum value="0x88BF" name="GL_TIME_ELAPSED_EXT"/>
+        <enum value="0x88C0" name="GL_MATRIX0_ARB"/>
+        <enum value="0x88C1" name="GL_MATRIX1_ARB"/>
+        <enum value="0x88C2" name="GL_MATRIX2_ARB"/>
+        <enum value="0x88C3" name="GL_MATRIX3_ARB"/>
+        <enum value="0x88C4" name="GL_MATRIX4_ARB"/>
+        <enum value="0x88C5" name="GL_MATRIX5_ARB"/>
+        <enum value="0x88C6" name="GL_MATRIX6_ARB"/>
+        <enum value="0x88C7" name="GL_MATRIX7_ARB"/>
+        <enum value="0x88C8" name="GL_MATRIX8_ARB"/>
+        <enum value="0x88C9" name="GL_MATRIX9_ARB"/>
+        <enum value="0x88CA" name="GL_MATRIX10_ARB"/>
+        <enum value="0x88CB" name="GL_MATRIX11_ARB"/>
+        <enum value="0x88CC" name="GL_MATRIX12_ARB"/>
+        <enum value="0x88CD" name="GL_MATRIX13_ARB"/>
+        <enum value="0x88CE" name="GL_MATRIX14_ARB"/>
+        <enum value="0x88CF" name="GL_MATRIX15_ARB"/>
+        <enum value="0x88D0" name="GL_MATRIX16_ARB"/>
+        <enum value="0x88D1" name="GL_MATRIX17_ARB"/>
+        <enum value="0x88D2" name="GL_MATRIX18_ARB"/>
+        <enum value="0x88D3" name="GL_MATRIX19_ARB"/>
+        <enum value="0x88D4" name="GL_MATRIX20_ARB"/>
+        <enum value="0x88D5" name="GL_MATRIX21_ARB"/>
+        <enum value="0x88D6" name="GL_MATRIX22_ARB"/>
+        <enum value="0x88D7" name="GL_MATRIX23_ARB"/>
+        <enum value="0x88D8" name="GL_MATRIX24_ARB"/>
+        <enum value="0x88D9" name="GL_MATRIX25_ARB"/>
+        <enum value="0x88DA" name="GL_MATRIX26_ARB"/>
+        <enum value="0x88DB" name="GL_MATRIX27_ARB"/>
+        <enum value="0x88DC" name="GL_MATRIX28_ARB"/>
+        <enum value="0x88DD" name="GL_MATRIX29_ARB"/>
+        <enum value="0x88DE" name="GL_MATRIX30_ARB"/>
+        <enum value="0x88DF" name="GL_MATRIX31_ARB"/>
+        <enum value="0x88E0" name="GL_STREAM_DRAW"/>
+        <enum value="0x88E0" name="GL_STREAM_DRAW_ARB"/>
+        <enum value="0x88E1" name="GL_STREAM_READ"/>
+        <enum value="0x88E1" name="GL_STREAM_READ_ARB"/>
+        <enum value="0x88E2" name="GL_STREAM_COPY"/>
+        <enum value="0x88E2" name="GL_STREAM_COPY_ARB"/>
+            <unused start="0x88E3" vendor="NV" comment="To extend ARB_vbo"/>
+        <enum value="0x88E4" name="GL_STATIC_DRAW"/>
+        <enum value="0x88E4" name="GL_STATIC_DRAW_ARB"/>
+        <enum value="0x88E5" name="GL_STATIC_READ"/>
+        <enum value="0x88E5" name="GL_STATIC_READ_ARB"/>
+        <enum value="0x88E6" name="GL_STATIC_COPY"/>
+        <enum value="0x88E6" name="GL_STATIC_COPY_ARB"/>
+            <unused start="0x88E7" vendor="NV" comment="To extend ARB_vbo"/>
+        <enum value="0x88E8" name="GL_DYNAMIC_DRAW"/>
+        <enum value="0x88E8" name="GL_DYNAMIC_DRAW_ARB"/>
+        <enum value="0x88E9" name="GL_DYNAMIC_READ"/>
+        <enum value="0x88E9" name="GL_DYNAMIC_READ_ARB"/>
+        <enum value="0x88EA" name="GL_DYNAMIC_COPY"/>
+        <enum value="0x88EA" name="GL_DYNAMIC_COPY_ARB"/>
+        <enum value="0x88EB" name="GL_PIXEL_PACK_BUFFER"/>
+        <enum value="0x88EB" name="GL_PIXEL_PACK_BUFFER_ARB"/>
+        <enum value="0x88EB" name="GL_PIXEL_PACK_BUFFER_EXT"/>
+        <enum value="0x88EB" name="GL_PIXEL_PACK_BUFFER_NV"/>
+        <enum value="0x88EC" name="GL_PIXEL_UNPACK_BUFFER"/>
+        <enum value="0x88EC" name="GL_PIXEL_UNPACK_BUFFER_ARB"/>
+        <enum value="0x88EC" name="GL_PIXEL_UNPACK_BUFFER_EXT"/>
+        <enum value="0x88EC" name="GL_PIXEL_UNPACK_BUFFER_NV"/>
+        <enum value="0x88ED" name="GL_PIXEL_PACK_BUFFER_BINDING"/>
+        <enum value="0x88ED" name="GL_PIXEL_PACK_BUFFER_BINDING_ARB"/>
+        <enum value="0x88ED" name="GL_PIXEL_PACK_BUFFER_BINDING_EXT"/>
+        <enum value="0x88ED" name="GL_PIXEL_PACK_BUFFER_BINDING_NV"/>
+        <enum value="0x88EE" name="GL_ETC1_SRGB8_NV"/>
+        <enum value="0x88EF" name="GL_PIXEL_UNPACK_BUFFER_BINDING"/>
+        <enum value="0x88EF" name="GL_PIXEL_UNPACK_BUFFER_BINDING_ARB"/>
+        <enum value="0x88EF" name="GL_PIXEL_UNPACK_BUFFER_BINDING_EXT"/>
+        <enum value="0x88EF" name="GL_PIXEL_UNPACK_BUFFER_BINDING_NV"/>
+        <enum value="0x88F0" name="GL_DEPTH24_STENCIL8"/>
+        <enum value="0x88F0" name="GL_DEPTH24_STENCIL8_EXT"/>
+        <enum value="0x88F0" name="GL_DEPTH24_STENCIL8_OES"/>
+        <enum value="0x88F1" name="GL_TEXTURE_STENCIL_SIZE"/>
+        <enum value="0x88F1" name="GL_TEXTURE_STENCIL_SIZE_EXT"/>
+        <enum value="0x88F2" name="GL_STENCIL_TAG_BITS_EXT"/>
+        <enum value="0x88F3" name="GL_STENCIL_CLEAR_TAG_VALUE_EXT"/>
+        <enum value="0x88F4" name="GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV"/>
+        <enum value="0x88F5" name="GL_MAX_PROGRAM_CALL_DEPTH_NV"/>
+        <enum value="0x88F6" name="GL_MAX_PROGRAM_IF_DEPTH_NV"/>
+        <enum value="0x88F7" name="GL_MAX_PROGRAM_LOOP_DEPTH_NV"/>
+        <enum value="0x88F8" name="GL_MAX_PROGRAM_LOOP_COUNT_NV"/>
+        <enum value="0x88F9" name="GL_SRC1_COLOR"/>
+        <enum value="0x88F9" name="GL_SRC1_COLOR_EXT"/>
+        <enum value="0x88FA" name="GL_ONE_MINUS_SRC1_COLOR"/>
+        <enum value="0x88FA" name="GL_ONE_MINUS_SRC1_COLOR_EXT"/>
+        <enum value="0x88FB" name="GL_ONE_MINUS_SRC1_ALPHA"/>
+        <enum value="0x88FB" name="GL_ONE_MINUS_SRC1_ALPHA_EXT"/>
+        <enum value="0x88FC" name="GL_MAX_DUAL_SOURCE_DRAW_BUFFERS"/>
+        <enum value="0x88FC" name="GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT"/>
+        <enum value="0x88FD" name="GL_VERTEX_ATTRIB_ARRAY_INTEGER"/>
+        <enum value="0x88FD" name="GL_VERTEX_ATTRIB_ARRAY_INTEGER_EXT"/>
+        <enum value="0x88FD" name="GL_VERTEX_ATTRIB_ARRAY_INTEGER_NV"/>
+        <enum value="0x88FE" name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR"/>
+        <enum value="0x88FE" name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE"/>
+        <enum value="0x88FE" name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ARB"/>
+        <enum value="0x88FE" name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_EXT"/>
+        <enum value="0x88FE" name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_NV"/>
+        <enum value="0x88FF" name="GL_MAX_ARRAY_TEXTURE_LAYERS"/>
+        <enum value="0x88FF" name="GL_MAX_ARRAY_TEXTURE_LAYERS_EXT"/>
+        <enum value="0x8904" name="GL_MIN_PROGRAM_TEXEL_OFFSET"/>
+        <enum value="0x8904" name="GL_MIN_PROGRAM_TEXEL_OFFSET_EXT"/>
+        <enum value="0x8904" name="GL_MIN_PROGRAM_TEXEL_OFFSET_NV"/>
+        <enum value="0x8905" name="GL_MAX_PROGRAM_TEXEL_OFFSET"/>
+        <enum value="0x8905" name="GL_MAX_PROGRAM_TEXEL_OFFSET_EXT"/>
+        <enum value="0x8905" name="GL_MAX_PROGRAM_TEXEL_OFFSET_NV"/>
+        <enum value="0x8906" name="GL_PROGRAM_ATTRIB_COMPONENTS_NV"/>
+        <enum value="0x8907" name="GL_PROGRAM_RESULT_COMPONENTS_NV"/>
+        <enum value="0x8908" name="GL_MAX_PROGRAM_ATTRIB_COMPONENTS_NV"/>
+        <enum value="0x8909" name="GL_MAX_PROGRAM_RESULT_COMPONENTS_NV"/>
+        <enum value="0x8910" name="GL_STENCIL_TEST_TWO_SIDE_EXT"/>
+        <enum value="0x8911" name="GL_ACTIVE_STENCIL_FACE_EXT"/>
+        <enum value="0x8912" name="GL_MIRROR_CLAMP_TO_BORDER_EXT"/>
+            <unused start="0x8913" vendor="NV"/>
+        <enum value="0x8914" name="GL_SAMPLES_PASSED"/>
+        <enum value="0x8914" name="GL_SAMPLES_PASSED_ARB"/>
+            <unused start="0x8915" vendor="NV"/>
+        <enum value="0x8916" name="GL_GEOMETRY_VERTICES_OUT"/>
+        <enum value="0x8916" name="GL_GEOMETRY_LINKED_VERTICES_OUT_EXT"/>
+        <enum value="0x8916" name="GL_GEOMETRY_LINKED_VERTICES_OUT_OES"/>
+        <enum value="0x8917" name="GL_GEOMETRY_INPUT_TYPE"/>
+        <enum value="0x8917" name="GL_GEOMETRY_LINKED_INPUT_TYPE_EXT"/>
+        <enum value="0x8917" name="GL_GEOMETRY_LINKED_INPUT_TYPE_OES"/>
+        <enum value="0x8918" name="GL_GEOMETRY_OUTPUT_TYPE"/>
+        <enum value="0x8918" name="GL_GEOMETRY_LINKED_OUTPUT_TYPE_EXT"/>
+        <enum value="0x8918" name="GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES"/>
+        <enum value="0x8919" name="GL_SAMPLER_BINDING"/>
+        <enum value="0x891A" name="GL_CLAMP_VERTEX_COLOR"/>
+        <enum value="0x891A" name="GL_CLAMP_VERTEX_COLOR_ARB"/>
+        <enum value="0x891B" name="GL_CLAMP_FRAGMENT_COLOR"/>
+        <enum value="0x891B" name="GL_CLAMP_FRAGMENT_COLOR_ARB"/>
+        <enum value="0x891C" name="GL_CLAMP_READ_COLOR"/>
+        <enum value="0x891C" name="GL_CLAMP_READ_COLOR_ARB"/>
+        <enum value="0x891D" name="GL_FIXED_ONLY"/>
+        <enum value="0x891D" name="GL_FIXED_ONLY_ARB"/>
+        <enum value="0x891E" name="GL_TESS_CONTROL_PROGRAM_NV"/>
+        <enum value="0x891F" name="GL_TESS_EVALUATION_PROGRAM_NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8920" end="0x897F" vendor="AMD">
+        <enum value="0x8920" name="GL_FRAGMENT_SHADER_ATI"/>
+        <enum value="0x8921" name="GL_REG_0_ATI"/>
+        <enum value="0x8922" name="GL_REG_1_ATI"/>
+        <enum value="0x8923" name="GL_REG_2_ATI"/>
+        <enum value="0x8924" name="GL_REG_3_ATI"/>
+        <enum value="0x8925" name="GL_REG_4_ATI"/>
+        <enum value="0x8926" name="GL_REG_5_ATI"/>
+        <enum value="0x8927" name="GL_REG_6_ATI"/>
+        <enum value="0x8928" name="GL_REG_7_ATI"/>
+        <enum value="0x8929" name="GL_REG_8_ATI"/>
+        <enum value="0x892A" name="GL_REG_9_ATI"/>
+        <enum value="0x892B" name="GL_REG_10_ATI"/>
+        <enum value="0x892C" name="GL_REG_11_ATI"/>
+        <enum value="0x892D" name="GL_REG_12_ATI"/>
+        <enum value="0x892E" name="GL_REG_13_ATI"/>
+        <enum value="0x892F" name="GL_REG_14_ATI"/>
+        <enum value="0x8930" name="GL_REG_15_ATI"/>
+        <enum value="0x8931" name="GL_REG_16_ATI"/>
+        <enum value="0x8932" name="GL_REG_17_ATI"/>
+        <enum value="0x8933" name="GL_REG_18_ATI"/>
+        <enum value="0x8934" name="GL_REG_19_ATI"/>
+        <enum value="0x8935" name="GL_REG_20_ATI"/>
+        <enum value="0x8936" name="GL_REG_21_ATI"/>
+        <enum value="0x8937" name="GL_REG_22_ATI"/>
+        <enum value="0x8938" name="GL_REG_23_ATI"/>
+        <enum value="0x8939" name="GL_REG_24_ATI"/>
+        <enum value="0x893A" name="GL_REG_25_ATI"/>
+        <enum value="0x893B" name="GL_REG_26_ATI"/>
+        <enum value="0x893C" name="GL_REG_27_ATI"/>
+        <enum value="0x893D" name="GL_REG_28_ATI"/>
+        <enum value="0x893E" name="GL_REG_29_ATI"/>
+        <enum value="0x893F" name="GL_REG_30_ATI"/>
+        <enum value="0x8940" name="GL_REG_31_ATI"/>
+        <enum value="0x8941" name="GL_CON_0_ATI"/>
+        <enum value="0x8942" name="GL_CON_1_ATI"/>
+        <enum value="0x8943" name="GL_CON_2_ATI"/>
+        <enum value="0x8944" name="GL_CON_3_ATI"/>
+        <enum value="0x8945" name="GL_CON_4_ATI"/>
+        <enum value="0x8946" name="GL_CON_5_ATI"/>
+        <enum value="0x8947" name="GL_CON_6_ATI"/>
+        <enum value="0x8948" name="GL_CON_7_ATI"/>
+        <enum value="0x8949" name="GL_CON_8_ATI"/>
+        <enum value="0x894A" name="GL_CON_9_ATI"/>
+        <enum value="0x894B" name="GL_CON_10_ATI"/>
+        <enum value="0x894C" name="GL_CON_11_ATI"/>
+        <enum value="0x894D" name="GL_CON_12_ATI"/>
+        <enum value="0x894E" name="GL_CON_13_ATI"/>
+        <enum value="0x894F" name="GL_CON_14_ATI"/>
+        <enum value="0x8950" name="GL_CON_15_ATI"/>
+        <enum value="0x8951" name="GL_CON_16_ATI"/>
+        <enum value="0x8952" name="GL_CON_17_ATI"/>
+        <enum value="0x8953" name="GL_CON_18_ATI"/>
+        <enum value="0x8954" name="GL_CON_19_ATI"/>
+        <enum value="0x8955" name="GL_CON_20_ATI"/>
+        <enum value="0x8956" name="GL_CON_21_ATI"/>
+        <enum value="0x8957" name="GL_CON_22_ATI"/>
+        <enum value="0x8958" name="GL_CON_23_ATI"/>
+        <enum value="0x8959" name="GL_CON_24_ATI"/>
+        <enum value="0x895A" name="GL_CON_25_ATI"/>
+        <enum value="0x895B" name="GL_CON_26_ATI"/>
+        <enum value="0x895C" name="GL_CON_27_ATI"/>
+        <enum value="0x895D" name="GL_CON_28_ATI"/>
+        <enum value="0x895E" name="GL_CON_29_ATI"/>
+        <enum value="0x895F" name="GL_CON_30_ATI"/>
+        <enum value="0x8960" name="GL_CON_31_ATI"/>
+        <enum value="0x8961" name="GL_MOV_ATI"/>
+        <enum value="0x8963" name="GL_ADD_ATI"/>
+        <enum value="0x8964" name="GL_MUL_ATI"/>
+        <enum value="0x8965" name="GL_SUB_ATI"/>
+        <enum value="0x8966" name="GL_DOT3_ATI"/>
+        <enum value="0x8967" name="GL_DOT4_ATI"/>
+        <enum value="0x8968" name="GL_MAD_ATI"/>
+        <enum value="0x8969" name="GL_LERP_ATI"/>
+        <enum value="0x896A" name="GL_CND_ATI"/>
+        <enum value="0x896B" name="GL_CND0_ATI"/>
+        <enum value="0x896C" name="GL_DOT2_ADD_ATI"/>
+        <enum value="0x896D" name="GL_SECONDARY_INTERPOLATOR_ATI"/>
+        <enum value="0x896E" name="GL_NUM_FRAGMENT_REGISTERS_ATI"/>
+        <enum value="0x896F" name="GL_NUM_FRAGMENT_CONSTANTS_ATI"/>
+        <enum value="0x8970" name="GL_NUM_PASSES_ATI"/>
+        <enum value="0x8971" name="GL_NUM_INSTRUCTIONS_PER_PASS_ATI"/>
+        <enum value="0x8972" name="GL_NUM_INSTRUCTIONS_TOTAL_ATI"/>
+        <enum value="0x8973" name="GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI"/>
+        <enum value="0x8974" name="GL_NUM_LOOPBACK_COMPONENTS_ATI"/>
+        <enum value="0x8975" name="GL_COLOR_ALPHA_PAIRING_ATI"/>
+        <enum value="0x8976" name="GL_SWIZZLE_STR_ATI"/>
+        <enum value="0x8977" name="GL_SWIZZLE_STQ_ATI"/>
+        <enum value="0x8978" name="GL_SWIZZLE_STR_DR_ATI"/>
+        <enum value="0x8979" name="GL_SWIZZLE_STQ_DQ_ATI"/>
+        <enum value="0x897A" name="GL_SWIZZLE_STRQ_ATI"/>
+        <enum value="0x897B" name="GL_SWIZZLE_STRQ_DQ_ATI"/>
+            <unused start="0x897C" end="0x897F" vendor="AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8980" end="0x898F" vendor="OML">
+        <enum value="0x8980" name="GL_INTERLACE_OML"/>
+        <enum value="0x8981" name="GL_INTERLACE_READ_OML"/>
+        <enum value="0x8982" name="GL_FORMAT_SUBSAMPLE_24_24_OML"/>
+        <enum value="0x8983" name="GL_FORMAT_SUBSAMPLE_244_244_OML"/>
+        <enum value="0x8984" name="GL_PACK_RESAMPLE_OML"/>
+        <enum value="0x8985" name="GL_UNPACK_RESAMPLE_OML"/>
+        <enum value="0x8986" name="GL_RESAMPLE_REPLICATE_OML"/>
+        <enum value="0x8987" name="GL_RESAMPLE_ZERO_FILL_OML"/>
+        <enum value="0x8988" name="GL_RESAMPLE_AVERAGE_OML"/>
+        <enum value="0x8989" name="GL_RESAMPLE_DECIMATE_OML"/>
+        <enum value="0x898A" name="GL_POINT_SIZE_ARRAY_TYPE_OES"/>
+        <enum value="0x898B" name="GL_POINT_SIZE_ARRAY_STRIDE_OES"/>
+        <enum value="0x898C" name="GL_POINT_SIZE_ARRAY_POINTER_OES"/>
+        <enum value="0x898D" name="GL_MODELVIEW_MATRIX_FLOAT_AS_INT_BITS_OES"/>
+        <enum value="0x898E" name="GL_PROJECTION_MATRIX_FLOAT_AS_INT_BITS_OES"/>
+        <enum value="0x898F" name="GL_TEXTURE_MATRIX_FLOAT_AS_INT_BITS_OES"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8990" end="0x899F" vendor="ZiiLabs">
+            <unused start="0x8990" end="0x899F" vendor="ZiiLabs"/>
+    </enums>
+
+    <enums namespace="GL" start="0x89A0" end="0x89FF" vendor="Matrox">
+            <unused start="0x89A0" end="0x89FF" vendor="Matrox"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8A00" end="0x8A7F" vendor="APPLE">
+        <enum value="0x8A00" name="GL_VERTEX_ATTRIB_MAP1_APPLE"/>
+        <enum value="0x8A01" name="GL_VERTEX_ATTRIB_MAP2_APPLE"/>
+        <enum value="0x8A02" name="GL_VERTEX_ATTRIB_MAP1_SIZE_APPLE"/>
+        <enum value="0x8A03" name="GL_VERTEX_ATTRIB_MAP1_COEFF_APPLE"/>
+        <enum value="0x8A04" name="GL_VERTEX_ATTRIB_MAP1_ORDER_APPLE"/>
+        <enum value="0x8A05" name="GL_VERTEX_ATTRIB_MAP1_DOMAIN_APPLE"/>
+        <enum value="0x8A06" name="GL_VERTEX_ATTRIB_MAP2_SIZE_APPLE"/>
+        <enum value="0x8A07" name="GL_VERTEX_ATTRIB_MAP2_COEFF_APPLE"/>
+        <enum value="0x8A08" name="GL_VERTEX_ATTRIB_MAP2_ORDER_APPLE"/>
+        <enum value="0x8A09" name="GL_VERTEX_ATTRIB_MAP2_DOMAIN_APPLE"/>
+        <enum value="0x8A0A" name="GL_DRAW_PIXELS_APPLE"/>
+        <enum value="0x8A0B" name="GL_FENCE_APPLE"/>
+        <enum value="0x8A0C" name="GL_ELEMENT_ARRAY_APPLE"/>
+        <enum value="0x8A0D" name="GL_ELEMENT_ARRAY_TYPE_APPLE"/>
+        <enum value="0x8A0E" name="GL_ELEMENT_ARRAY_POINTER_APPLE"/>
+        <enum value="0x8A0F" name="GL_COLOR_FLOAT_APPLE"/>
+            <unused start="0x8A10" vendor="APPLE" comment="Unknown extension (Khronos bug 632)"/>
+            <!-- <enum value="0x8A10" name="GL_MIN_PBUFFER_VIEWPORT_DIMS_APPLE"/> -->
+        <enum value="0x8A11" name="GL_UNIFORM_BUFFER"/>
+        <enum value="0x8A12" name="GL_BUFFER_SERIALIZED_MODIFY_APPLE"/>
+        <enum value="0x8A13" name="GL_BUFFER_FLUSHING_UNMAP_APPLE"/>
+        <enum value="0x8A14" name="GL_AUX_DEPTH_STENCIL_APPLE"/>
+        <enum value="0x8A15" name="GL_PACK_ROW_BYTES_APPLE"/>
+        <enum value="0x8A16" name="GL_UNPACK_ROW_BYTES_APPLE"/>
+            <unused start="0x8A17" end="0x8A18" vendor="APPLE"/>
+        <enum value="0x8A19" name="GL_RELEASED_APPLE"/>
+        <enum value="0x8A1A" name="GL_VOLATILE_APPLE"/>
+        <enum value="0x8A1B" name="GL_RETAINED_APPLE"/>
+        <enum value="0x8A1C" name="GL_UNDEFINED_APPLE"/>
+        <enum value="0x8A1D" name="GL_PURGEABLE_APPLE"/>
+            <unused start="0x8A1E" vendor="APPLE"/>
+        <enum value="0x8A1F" name="GL_RGB_422_APPLE"/>
+            <unused start="0x8A20" end="0x8A27" vendor="APPLE"/>
+        <enum value="0x8A28" name="GL_UNIFORM_BUFFER_BINDING"/>
+        <enum value="0x8A29" name="GL_UNIFORM_BUFFER_START"/>
+        <enum value="0x8A2A" name="GL_UNIFORM_BUFFER_SIZE"/>
+        <enum value="0x8A2B" name="GL_MAX_VERTEX_UNIFORM_BLOCKS"/>
+        <enum value="0x8A2C" name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS"/>
+        <enum value="0x8A2C" name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS_EXT"/>
+        <enum value="0x8A2C" name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES"/>
+        <enum value="0x8A2D" name="GL_MAX_FRAGMENT_UNIFORM_BLOCKS"/>
+        <enum value="0x8A2E" name="GL_MAX_COMBINED_UNIFORM_BLOCKS"/>
+        <enum value="0x8A2F" name="GL_MAX_UNIFORM_BUFFER_BINDINGS"/>
+        <enum value="0x8A30" name="GL_MAX_UNIFORM_BLOCK_SIZE"/>
+        <enum value="0x8A31" name="GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS"/>
+        <enum value="0x8A32" name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS"/>
+        <enum value="0x8A32" name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_EXT"/>
+        <enum value="0x8A32" name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES"/>
+        <enum value="0x8A33" name="GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS"/>
+        <enum value="0x8A34" name="GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT"/>
+        <enum value="0x8A35" name="GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH"/>
+        <enum value="0x8A36" name="GL_ACTIVE_UNIFORM_BLOCKS"/>
+        <enum value="0x8A37" name="GL_UNIFORM_TYPE"/>
+        <enum value="0x8A38" name="GL_UNIFORM_SIZE"/>
+        <enum value="0x8A39" name="GL_UNIFORM_NAME_LENGTH"/>
+        <enum value="0x8A3A" name="GL_UNIFORM_BLOCK_INDEX"/>
+        <enum value="0x8A3B" name="GL_UNIFORM_OFFSET"/>
+        <enum value="0x8A3C" name="GL_UNIFORM_ARRAY_STRIDE"/>
+        <enum value="0x8A3D" name="GL_UNIFORM_MATRIX_STRIDE"/>
+        <enum value="0x8A3E" name="GL_UNIFORM_IS_ROW_MAJOR"/>
+        <enum value="0x8A3F" name="GL_UNIFORM_BLOCK_BINDING"/>
+        <enum value="0x8A40" name="GL_UNIFORM_BLOCK_DATA_SIZE"/>
+        <enum value="0x8A41" name="GL_UNIFORM_BLOCK_NAME_LENGTH"/>
+        <enum value="0x8A42" name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS"/>
+        <enum value="0x8A43" name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES"/>
+        <enum value="0x8A44" name="GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER"/>
+        <enum value="0x8A45" name="GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER"/>
+        <enum value="0x8A46" name="GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <unused start="0x8A47" vendor="APPLE"/>
+        <enum value="0x8A48" name="GL_TEXTURE_SRGB_DECODE_EXT"/>
+        <enum value="0x8A49" name="GL_DECODE_EXT"/>
+        <enum value="0x8A4A" name="GL_SKIP_DECODE_EXT"/>
+            <unused start="0x8A4B" end="0x8A4E" vendor="APPLE"/>
+        <enum value="0x8A4F" name="GL_PROGRAM_PIPELINE_OBJECT_EXT"/>
+            <unused start="0x8A50" vendor="APPLE"/>
+        <enum value="0x8A51" name="GL_RGB_RAW_422_APPLE"/>
+        <enum value="0x8A52" name="GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT"/>
+        <enum value="0x8A53" name="GL_SYNC_OBJECT_APPLE"/>
+        <enum value="0x8A54" name="GL_COMPRESSED_SRGB_PVRTC_2BPPV1_EXT"/>
+        <enum value="0x8A55" name="GL_COMPRESSED_SRGB_PVRTC_4BPPV1_EXT"/>
+        <enum value="0x8A56" name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT"/>
+        <enum value="0x8A57" name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT"/>
+            <unused start="0x8A58" end="0x8A7F" vendor="APPLE"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8A80" end="0x8AEF" vendor="Matrox">
+            <unused start="0x8A80" end="0x8AEF" vendor="Matrox"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8AF0" end="0x8B2F" vendor="Chromium" comment="For Brian Paul">
+            <unused start="0x8AF0" end="0x8B2F" vendor="Chromium"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8B30" end="0x8B3F" group="ShaderType" vendor="ARB">
+        <enum value="0x8B30" name="GL_FRAGMENT_SHADER"/>
+        <enum value="0x8B30" name="GL_FRAGMENT_SHADER_ARB"/>
+        <enum value="0x8B31" name="GL_VERTEX_SHADER"/>
+        <enum value="0x8B31" name="GL_VERTEX_SHADER_ARB"/>
+            <unused start="0x8B32" end="0x8B3F" comment="For shader types"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8B40" end="0x8B47" group="ContainerType" vendor="ARB">
+        <enum value="0x8B40" name="GL_PROGRAM_OBJECT_ARB"/>
+        <enum value="0x8B40" name="GL_PROGRAM_OBJECT_EXT"/>
+            <unused start="0x8B41" end="0x8B47" comment="For container types"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8B48" end="0x8B4F" vendor="ARB">
+        <enum value="0x8B48" name="GL_SHADER_OBJECT_ARB"/>
+        <enum value="0x8B48" name="GL_SHADER_OBJECT_EXT"/>
+        <enum value="0x8B49" name="GL_MAX_FRAGMENT_UNIFORM_COMPONENTS"/>
+        <enum value="0x8B49" name="GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB"/>
+        <enum value="0x8B4A" name="GL_MAX_VERTEX_UNIFORM_COMPONENTS"/>
+        <enum value="0x8B4A" name="GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB"/>
+        <enum value="0x8B4B" name="GL_MAX_VARYING_FLOATS"/>
+        <enum value="0x8B4B" name="GL_MAX_VARYING_COMPONENTS" alias="MAX_VARYING_FLOATS"/>
+        <enum value="0x8B4B" name="GL_MAX_VARYING_COMPONENTS_EXT"/>
+        <enum value="0x8B4B" name="GL_MAX_VARYING_FLOATS_ARB"/>
+        <enum value="0x8B4C" name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS"/>
+        <enum value="0x8B4C" name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB"/>
+        <enum value="0x8B4D" name="GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS"/>
+        <enum value="0x8B4D" name="GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB"/>
+        <enum value="0x8B4E" name="GL_OBJECT_TYPE_ARB"/>
+        <enum value="0x8B4F" name="GL_SHADER_TYPE"/>
+        <enum value="0x8B4F" name="GL_OBJECT_SUBTYPE_ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8B50" end="0x8B7F" group="AttributeType" vendor="ARB">
+        <enum value="0x8B50" name="GL_FLOAT_VEC2"/>
+        <enum value="0x8B50" name="GL_FLOAT_VEC2_ARB"/>
+        <enum value="0x8B51" name="GL_FLOAT_VEC3"/>
+        <enum value="0x8B51" name="GL_FLOAT_VEC3_ARB"/>
+        <enum value="0x8B52" name="GL_FLOAT_VEC4"/>
+        <enum value="0x8B52" name="GL_FLOAT_VEC4_ARB"/>
+        <enum value="0x8B53" name="GL_INT_VEC2"/>
+        <enum value="0x8B53" name="GL_INT_VEC2_ARB"/>
+        <enum value="0x8B54" name="GL_INT_VEC3"/>
+        <enum value="0x8B54" name="GL_INT_VEC3_ARB"/>
+        <enum value="0x8B55" name="GL_INT_VEC4"/>
+        <enum value="0x8B55" name="GL_INT_VEC4_ARB"/>
+        <enum value="0x8B56" name="GL_BOOL"/>
+        <enum value="0x8B56" name="GL_BOOL_ARB"/>
+        <enum value="0x8B57" name="GL_BOOL_VEC2"/>
+        <enum value="0x8B57" name="GL_BOOL_VEC2_ARB"/>
+        <enum value="0x8B58" name="GL_BOOL_VEC3"/>
+        <enum value="0x8B58" name="GL_BOOL_VEC3_ARB"/>
+        <enum value="0x8B59" name="GL_BOOL_VEC4"/>
+        <enum value="0x8B59" name="GL_BOOL_VEC4_ARB"/>
+        <enum value="0x8B5A" name="GL_FLOAT_MAT2"/>
+        <enum value="0x8B5A" name="GL_FLOAT_MAT2_ARB"/>
+        <enum value="0x8B5B" name="GL_FLOAT_MAT3"/>
+        <enum value="0x8B5B" name="GL_FLOAT_MAT3_ARB"/>
+        <enum value="0x8B5C" name="GL_FLOAT_MAT4"/>
+        <enum value="0x8B5C" name="GL_FLOAT_MAT4_ARB"/>
+        <enum value="0x8B5D" name="GL_SAMPLER_1D"/>
+        <enum value="0x8B5D" name="GL_SAMPLER_1D_ARB"/>
+        <enum value="0x8B5E" name="GL_SAMPLER_2D"/>
+        <enum value="0x8B5E" name="GL_SAMPLER_2D_ARB"/>
+        <enum value="0x8B5F" name="GL_SAMPLER_3D"/>
+        <enum value="0x8B5F" name="GL_SAMPLER_3D_ARB"/>
+        <enum value="0x8B5F" name="GL_SAMPLER_3D_OES"/>
+        <enum value="0x8B60" name="GL_SAMPLER_CUBE"/>
+        <enum value="0x8B60" name="GL_SAMPLER_CUBE_ARB"/>
+        <enum value="0x8B61" name="GL_SAMPLER_1D_SHADOW"/>
+        <enum value="0x8B61" name="GL_SAMPLER_1D_SHADOW_ARB"/>
+        <enum value="0x8B62" name="GL_SAMPLER_2D_SHADOW"/>
+        <enum value="0x8B62" name="GL_SAMPLER_2D_SHADOW_ARB"/>
+        <enum value="0x8B62" name="GL_SAMPLER_2D_SHADOW_EXT"/>
+        <enum value="0x8B63" name="GL_SAMPLER_2D_RECT"/>
+        <enum value="0x8B63" name="GL_SAMPLER_2D_RECT_ARB"/>
+        <enum value="0x8B64" name="GL_SAMPLER_2D_RECT_SHADOW"/>
+        <enum value="0x8B64" name="GL_SAMPLER_2D_RECT_SHADOW_ARB"/>
+        <enum value="0x8B65" name="GL_FLOAT_MAT2x3"/>
+        <enum value="0x8B65" name="GL_FLOAT_MAT2x3_NV"/>
+        <enum value="0x8B66" name="GL_FLOAT_MAT2x4"/>
+        <enum value="0x8B66" name="GL_FLOAT_MAT2x4_NV"/>
+        <enum value="0x8B67" name="GL_FLOAT_MAT3x2"/>
+        <enum value="0x8B67" name="GL_FLOAT_MAT3x2_NV"/>
+        <enum value="0x8B68" name="GL_FLOAT_MAT3x4"/>
+        <enum value="0x8B68" name="GL_FLOAT_MAT3x4_NV"/>
+        <enum value="0x8B69" name="GL_FLOAT_MAT4x2"/>
+        <enum value="0x8B69" name="GL_FLOAT_MAT4x2_NV"/>
+        <enum value="0x8B6A" name="GL_FLOAT_MAT4x3"/>
+        <enum value="0x8B6A" name="GL_FLOAT_MAT4x3_NV"/>
+            <unused start="0x8B6B" end="0x8B7F" comment="For attribute types"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8B80" end="0x8B8F" vendor="ARB">
+        <enum value="0x8B80" name="GL_DELETE_STATUS"/>
+        <enum value="0x8B80" name="GL_OBJECT_DELETE_STATUS_ARB"/>
+        <enum value="0x8B81" name="GL_COMPILE_STATUS"/>
+        <enum value="0x8B81" name="GL_OBJECT_COMPILE_STATUS_ARB"/>
+        <enum value="0x8B82" name="GL_LINK_STATUS"/>
+        <enum value="0x8B82" name="GL_OBJECT_LINK_STATUS_ARB"/>
+        <enum value="0x8B83" name="GL_VALIDATE_STATUS"/>
+        <enum value="0x8B83" name="GL_OBJECT_VALIDATE_STATUS_ARB"/>
+        <enum value="0x8B84" name="GL_INFO_LOG_LENGTH"/>
+        <enum value="0x8B84" name="GL_OBJECT_INFO_LOG_LENGTH_ARB"/>
+        <enum value="0x8B85" name="GL_ATTACHED_SHADERS"/>
+        <enum value="0x8B85" name="GL_OBJECT_ATTACHED_OBJECTS_ARB"/>
+        <enum value="0x8B86" name="GL_ACTIVE_UNIFORMS"/>
+        <enum value="0x8B86" name="GL_OBJECT_ACTIVE_UNIFORMS_ARB"/>
+        <enum value="0x8B87" name="GL_ACTIVE_UNIFORM_MAX_LENGTH"/>
+        <enum value="0x8B87" name="GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB"/>
+        <enum value="0x8B88" name="GL_SHADER_SOURCE_LENGTH"/>
+        <enum value="0x8B88" name="GL_OBJECT_SHADER_SOURCE_LENGTH_ARB"/>
+        <enum value="0x8B89" name="GL_ACTIVE_ATTRIBUTES"/>
+        <enum value="0x8B89" name="GL_OBJECT_ACTIVE_ATTRIBUTES_ARB"/>
+        <enum value="0x8B8A" name="GL_ACTIVE_ATTRIBUTE_MAX_LENGTH"/>
+        <enum value="0x8B8A" name="GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB"/>
+        <enum value="0x8B8B" name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT"/>
+        <enum value="0x8B8B" name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB"/>
+        <enum value="0x8B8B" name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES"/>
+        <enum value="0x8B8C" name="GL_SHADING_LANGUAGE_VERSION"/>
+        <enum value="0x8B8C" name="GL_SHADING_LANGUAGE_VERSION_ARB"/>
+        <enum value="0x8B8D" name="GL_CURRENT_PROGRAM"/>
+        <enum value="0x8B8D" api="gl" name="GL_ACTIVE_PROGRAM_EXT" alias="GL_CURRENT_PROGRAM" comment="For the OpenGL version of EXT_separate_shader_objects"/>
+            <unused start="0x8B8E" end="0x8B8F" vendor="ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8B90" end="0x8B9F" vendor="OES">
+        <enum value="0x8B90" name="GL_PALETTE4_RGB8_OES"/>
+        <enum value="0x8B91" name="GL_PALETTE4_RGBA8_OES"/>
+        <enum value="0x8B92" name="GL_PALETTE4_R5_G6_B5_OES"/>
+        <enum value="0x8B93" name="GL_PALETTE4_RGBA4_OES"/>
+        <enum value="0x8B94" name="GL_PALETTE4_RGB5_A1_OES"/>
+        <enum value="0x8B95" name="GL_PALETTE8_RGB8_OES"/>
+        <enum value="0x8B96" name="GL_PALETTE8_RGBA8_OES"/>
+        <enum value="0x8B97" name="GL_PALETTE8_R5_G6_B5_OES"/>
+        <enum value="0x8B98" name="GL_PALETTE8_RGBA4_OES"/>
+        <enum value="0x8B99" name="GL_PALETTE8_RGB5_A1_OES"/>
+        <enum value="0x8B9A" name="GL_IMPLEMENTATION_COLOR_READ_TYPE"/>
+        <enum value="0x8B9A" name="GL_IMPLEMENTATION_COLOR_READ_TYPE_OES"/>
+        <enum value="0x8B9B" name="GL_IMPLEMENTATION_COLOR_READ_FORMAT"/>
+        <enum value="0x8B9B" name="GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES"/>
+        <enum value="0x8B9C" name="GL_POINT_SIZE_ARRAY_OES"/>
+        <enum value="0x8B9D" name="GL_TEXTURE_CROP_RECT_OES"/>
+        <enum value="0x8B9E" name="GL_MATRIX_INDEX_ARRAY_BUFFER_BINDING_OES"/>
+        <enum value="0x8B9F" name="GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8BA0" end="0x8BAF" vendor="Seaweed">
+            <unused start="0x8BA0" end="0x8BAF" vendor="Seaweed"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8BB0" end="0x8BBF" vendor="MESA">
+        <enum value="0x8BB0" name="GL_FRAGMENT_PROGRAM_POSITION_MESA"/>
+        <enum value="0x8BB1" name="GL_FRAGMENT_PROGRAM_CALLBACK_MESA"/>
+        <enum value="0x8BB2" name="GL_FRAGMENT_PROGRAM_CALLBACK_FUNC_MESA"/>
+        <enum value="0x8BB3" name="GL_FRAGMENT_PROGRAM_CALLBACK_DATA_MESA"/>
+        <enum value="0x8BB4" name="GL_VERTEX_PROGRAM_POSITION_MESA"/>
+        <enum value="0x8BB5" name="GL_VERTEX_PROGRAM_CALLBACK_MESA"/>
+        <enum value="0x8BB6" name="GL_VERTEX_PROGRAM_CALLBACK_FUNC_MESA"/>
+        <enum value="0x8BB7" name="GL_VERTEX_PROGRAM_CALLBACK_DATA_MESA"/>
+        <enum value="0x8BB8" name="GL_TILE_RASTER_ORDER_FIXED_MESA"/>
+        <enum value="0x8BB9" name="GL_TILE_RASTER_ORDER_INCREASING_X_MESA"/>
+        <enum value="0x8BBA" name="GL_TILE_RASTER_ORDER_INCREASING_Y_MESA"/>
+        <enum value="0x8BBB" name="GL_FRAMEBUFFER_FLIP_Y_MESA" />
+    </enums>
+
+    <enums namespace="GL" start="0x8BC0" end="0x8BFF" vendor="QCOM" comment="Reassigned from AMD to QCOM">
+        <enum value="0x8BC0" name="GL_COUNTER_TYPE_AMD"/>
+        <enum value="0x8BC1" name="GL_COUNTER_RANGE_AMD"/>
+        <enum value="0x8BC2" name="GL_UNSIGNED_INT64_AMD"/>
+        <enum value="0x8BC3" name="GL_PERCENTAGE_AMD"/>
+        <enum value="0x8BC4" name="GL_PERFMON_RESULT_AVAILABLE_AMD"/>
+        <enum value="0x8BC5" name="GL_PERFMON_RESULT_SIZE_AMD"/>
+        <enum value="0x8BC6" name="GL_PERFMON_RESULT_AMD"/>
+            <unused start="0x8BC7" end="0x8BD1" vendor="QCOM"/>
+        <enum value="0x8BD2" name="GL_TEXTURE_WIDTH_QCOM"/>
+        <enum value="0x8BD3" name="GL_TEXTURE_HEIGHT_QCOM"/>
+        <enum value="0x8BD4" name="GL_TEXTURE_DEPTH_QCOM"/>
+        <enum value="0x8BD5" name="GL_TEXTURE_INTERNAL_FORMAT_QCOM"/>
+        <enum value="0x8BD6" name="GL_TEXTURE_FORMAT_QCOM"/>
+        <enum value="0x8BD7" name="GL_TEXTURE_TYPE_QCOM"/>
+        <enum value="0x8BD8" name="GL_TEXTURE_IMAGE_VALID_QCOM"/>
+        <enum value="0x8BD9" name="GL_TEXTURE_NUM_LEVELS_QCOM"/>
+        <enum value="0x8BDA" name="GL_TEXTURE_TARGET_QCOM"/>
+        <enum value="0x8BDB" name="GL_TEXTURE_OBJECT_VALID_QCOM"/>
+        <enum value="0x8BDC" name="GL_STATE_RESTORE"/>
+            <unused start="0x8BDD" end="0x8BE6" vendor="QCOM"/>
+        <enum value="0x8BE7" name="GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT"/>
+            <unused start="0x8BE8" end="0x8BEF" vendor="QCOM"/>
+        <enum value="0x8BFA" name="GL_TEXTURE_PROTECTED_EXT"/>
+        <enum value="0x8BFB" name="GL_TEXTURE_FOVEATED_FEATURE_BITS_QCOM"/>
+        <enum value="0x8BFC" name="GL_TEXTURE_FOVEATED_MIN_PIXEL_DENSITY_QCOM"/>
+        <enum value="0x8BFD" name="GL_TEXTURE_FOVEATED_FEATURE_QUERY_QCOM"/>
+        <enum value="0x8BFE" name="GL_TEXTURE_FOVEATED_NUM_FOCAL_POINTS_QUERY_QCOM"/>
+        <enum value="0x8BFF" name="GL_FRAMEBUFFER_INCOMPLETE_FOVEATION_QCOM"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8C00" end="0x8C0F" vendor="IMG">
+        <enum value="0x8C00" name="GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG"/>
+        <enum value="0x8C01" name="GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG"/>
+        <enum value="0x8C02" name="GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG"/>
+        <enum value="0x8C03" name="GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG"/>
+        <enum value="0x8C04" name="GL_MODULATE_COLOR_IMG"/>
+        <enum value="0x8C05" name="GL_RECIP_ADD_SIGNED_ALPHA_IMG"/>
+        <enum value="0x8C06" name="GL_TEXTURE_ALPHA_MODULATE_IMG"/>
+        <enum value="0x8C07" name="GL_FACTOR_ALPHA_MODULATE_IMG"/>
+        <enum value="0x8C08" name="GL_FRAGMENT_ALPHA_MODULATE_IMG"/>
+        <enum value="0x8C09" name="GL_ADD_BLEND_IMG"/>
+        <enum value="0x8C0A" name="GL_SGX_BINARY_IMG"/>
+            <unused start="0x8C0B" end="0x8C0F" vendor="IMG"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8C10" end="0x8C8F" vendor="NV" comment="For Pat Brown">
+        <enum value="0x8C10" name="GL_TEXTURE_RED_TYPE"/>
+        <enum value="0x8C10" name="GL_TEXTURE_RED_TYPE_ARB"/>
+        <enum value="0x8C11" name="GL_TEXTURE_GREEN_TYPE"/>
+        <enum value="0x8C11" name="GL_TEXTURE_GREEN_TYPE_ARB"/>
+        <enum value="0x8C12" name="GL_TEXTURE_BLUE_TYPE"/>
+        <enum value="0x8C12" name="GL_TEXTURE_BLUE_TYPE_ARB"/>
+        <enum value="0x8C13" name="GL_TEXTURE_ALPHA_TYPE"/>
+        <enum value="0x8C13" name="GL_TEXTURE_ALPHA_TYPE_ARB"/>
+        <enum value="0x8C14" name="GL_TEXTURE_LUMINANCE_TYPE"/>
+        <enum value="0x8C14" name="GL_TEXTURE_LUMINANCE_TYPE_ARB"/>
+        <enum value="0x8C15" name="GL_TEXTURE_INTENSITY_TYPE"/>
+        <enum value="0x8C15" name="GL_TEXTURE_INTENSITY_TYPE_ARB"/>
+        <enum value="0x8C16" name="GL_TEXTURE_DEPTH_TYPE"/>
+        <enum value="0x8C16" name="GL_TEXTURE_DEPTH_TYPE_ARB"/>
+        <enum value="0x8C17" name="GL_UNSIGNED_NORMALIZED"/>
+        <enum value="0x8C17" name="GL_UNSIGNED_NORMALIZED_ARB"/>
+        <enum value="0x8C17" name="GL_UNSIGNED_NORMALIZED_EXT"/>
+        <enum value="0x8C18" name="GL_TEXTURE_1D_ARRAY"/>
+        <enum value="0x8C18" name="GL_TEXTURE_1D_ARRAY_EXT"/>
+        <enum value="0x8C19" name="GL_PROXY_TEXTURE_1D_ARRAY"/>
+        <enum value="0x8C19" name="GL_PROXY_TEXTURE_1D_ARRAY_EXT"/>
+        <enum value="0x8C1A" name="GL_TEXTURE_2D_ARRAY"/>
+        <enum value="0x8C1A" name="GL_TEXTURE_2D_ARRAY_EXT"/>
+        <enum value="0x8C1B" name="GL_PROXY_TEXTURE_2D_ARRAY"/>
+        <enum value="0x8C1B" name="GL_PROXY_TEXTURE_2D_ARRAY_EXT"/>
+        <enum value="0x8C1C" name="GL_TEXTURE_BINDING_1D_ARRAY"/>
+        <enum value="0x8C1C" name="GL_TEXTURE_BINDING_1D_ARRAY_EXT"/>
+        <enum value="0x8C1D" name="GL_TEXTURE_BINDING_2D_ARRAY"/>
+        <enum value="0x8C1D" name="GL_TEXTURE_BINDING_2D_ARRAY_EXT"/>
+            <unused start="0x8C1E" end="0x8C25" vendor="NV"/>
+        <enum value="0x8C26" name="GL_GEOMETRY_PROGRAM_NV"/>
+        <enum value="0x8C27" name="GL_MAX_PROGRAM_OUTPUT_VERTICES_NV"/>
+        <enum value="0x8C28" name="GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV"/>
+        <enum value="0x8C29" name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS"/>
+        <enum value="0x8C29" name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_ARB"/>
+        <enum value="0x8C29" name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT"/>
+        <enum value="0x8C29" name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES"/>
+        <enum value="0x8C2A" name="GL_TEXTURE_BUFFER"/>
+        <enum value="0x8C2A" name="GL_TEXTURE_BUFFER_ARB"/>
+        <enum value="0x8C2A" name="GL_TEXTURE_BUFFER_EXT"/>
+        <enum value="0x8C2A" name="GL_TEXTURE_BUFFER_OES"/>
+        <enum value="0x8C2A" name="GL_TEXTURE_BUFFER_BINDING" comment="Equivalent to GL_TEXTURE_BUFFER_ARB query, but named more consistently"/>
+        <enum value="0x8C2A" name="GL_TEXTURE_BUFFER_BINDING_EXT"/>
+        <enum value="0x8C2A" name="GL_TEXTURE_BUFFER_BINDING_OES"/>
+        <enum value="0x8C2B" name="GL_MAX_TEXTURE_BUFFER_SIZE"/>
+        <enum value="0x8C2B" name="GL_MAX_TEXTURE_BUFFER_SIZE_ARB"/>
+        <enum value="0x8C2B" name="GL_MAX_TEXTURE_BUFFER_SIZE_EXT"/>
+        <enum value="0x8C2B" name="GL_MAX_TEXTURE_BUFFER_SIZE_OES"/>
+        <enum value="0x8C2C" name="GL_TEXTURE_BINDING_BUFFER"/>
+        <enum value="0x8C2C" name="GL_TEXTURE_BINDING_BUFFER_ARB"/>
+        <enum value="0x8C2C" name="GL_TEXTURE_BINDING_BUFFER_EXT"/>
+        <enum value="0x8C2C" name="GL_TEXTURE_BINDING_BUFFER_OES"/>
+        <enum value="0x8C2D" name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING"/>
+        <enum value="0x8C2D" name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING_ARB"/>
+        <enum value="0x8C2D" name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT"/>
+        <enum value="0x8C2D" name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING_OES"/>
+        <enum value="0x8C2E" name="GL_TEXTURE_BUFFER_FORMAT_ARB"/>
+        <enum value="0x8C2E" name="GL_TEXTURE_BUFFER_FORMAT_EXT"/>
+        <enum value="0x8C2F" name="GL_ANY_SAMPLES_PASSED"/>
+        <enum value="0x8C2F" name="GL_ANY_SAMPLES_PASSED_EXT"/>
+            <unused start="0x8C30" end="0x8C35" vendor="NV"/>
+        <enum value="0x8C36" name="GL_SAMPLE_SHADING"/>
+        <enum value="0x8C36" name="GL_SAMPLE_SHADING_ARB"/>
+        <enum value="0x8C36" name="GL_SAMPLE_SHADING_OES"/>
+        <enum value="0x8C37" name="GL_MIN_SAMPLE_SHADING_VALUE"/>
+        <enum value="0x8C37" name="GL_MIN_SAMPLE_SHADING_VALUE_ARB"/>
+        <enum value="0x8C37" name="GL_MIN_SAMPLE_SHADING_VALUE_OES"/>
+            <unused start="0x8C38" end="0x8C39" vendor="NV"/>
+        <enum value="0x8C3A" name="GL_R11F_G11F_B10F"/>
+        <enum value="0x8C3A" name="GL_R11F_G11F_B10F_APPLE"/>
+        <enum value="0x8C3A" name="GL_R11F_G11F_B10F_EXT"/>
+        <enum value="0x8C3B" name="GL_UNSIGNED_INT_10F_11F_11F_REV"/>
+        <enum value="0x8C3B" name="GL_UNSIGNED_INT_10F_11F_11F_REV_APPLE"/>
+        <enum value="0x8C3B" name="GL_UNSIGNED_INT_10F_11F_11F_REV_EXT"/>
+        <enum value="0x8C3C" name="GL_RGBA_SIGNED_COMPONENTS_EXT"/>
+        <enum value="0x8C3D" name="GL_RGB9_E5"/>
+        <enum value="0x8C3D" name="GL_RGB9_E5_APPLE"/>
+        <enum value="0x8C3D" name="GL_RGB9_E5_EXT"/>
+        <enum value="0x8C3E" name="GL_UNSIGNED_INT_5_9_9_9_REV"/>
+        <enum value="0x8C3E" name="GL_UNSIGNED_INT_5_9_9_9_REV_APPLE"/>
+        <enum value="0x8C3E" name="GL_UNSIGNED_INT_5_9_9_9_REV_EXT"/>
+        <enum value="0x8C3F" name="GL_TEXTURE_SHARED_SIZE"/>
+        <enum value="0x8C3F" name="GL_TEXTURE_SHARED_SIZE_EXT"/>
+        <enum value="0x8C40" name="GL_SRGB"/>
+        <enum value="0x8C40" name="GL_SRGB_EXT"/>
+        <enum value="0x8C41" name="GL_SRGB8"/>
+        <enum value="0x8C41" name="GL_SRGB8_EXT"/>
+        <enum value="0x8C41" name="GL_SRGB8_NV"/>
+        <enum value="0x8C42" name="GL_SRGB_ALPHA"/>
+        <enum value="0x8C42" name="GL_SRGB_ALPHA_EXT"/>
+        <enum value="0x8C43" name="GL_SRGB8_ALPHA8"/>
+        <enum value="0x8C43" name="GL_SRGB8_ALPHA8_EXT"/>
+        <enum value="0x8C44" name="GL_SLUMINANCE_ALPHA"/>
+        <enum value="0x8C44" name="GL_SLUMINANCE_ALPHA_EXT"/>
+        <enum value="0x8C44" name="GL_SLUMINANCE_ALPHA_NV"/>
+        <enum value="0x8C45" name="GL_SLUMINANCE8_ALPHA8"/>
+        <enum value="0x8C45" name="GL_SLUMINANCE8_ALPHA8_EXT"/>
+        <enum value="0x8C45" name="GL_SLUMINANCE8_ALPHA8_NV"/>
+        <enum value="0x8C46" name="GL_SLUMINANCE"/>
+        <enum value="0x8C46" name="GL_SLUMINANCE_EXT"/>
+        <enum value="0x8C46" name="GL_SLUMINANCE_NV"/>
+        <enum value="0x8C47" name="GL_SLUMINANCE8"/>
+        <enum value="0x8C47" name="GL_SLUMINANCE8_EXT"/>
+        <enum value="0x8C47" name="GL_SLUMINANCE8_NV"/>
+        <enum value="0x8C48" name="GL_COMPRESSED_SRGB"/>
+        <enum value="0x8C48" name="GL_COMPRESSED_SRGB_EXT"/>
+        <enum value="0x8C49" name="GL_COMPRESSED_SRGB_ALPHA"/>
+        <enum value="0x8C49" name="GL_COMPRESSED_SRGB_ALPHA_EXT"/>
+        <enum value="0x8C4A" name="GL_COMPRESSED_SLUMINANCE"/>
+        <enum value="0x8C4A" name="GL_COMPRESSED_SLUMINANCE_EXT"/>
+        <enum value="0x8C4B" name="GL_COMPRESSED_SLUMINANCE_ALPHA"/>
+        <enum value="0x8C4B" name="GL_COMPRESSED_SLUMINANCE_ALPHA_EXT"/>
+        <enum value="0x8C4C" name="GL_COMPRESSED_SRGB_S3TC_DXT1_EXT"/>
+        <enum value="0x8C4C" name="GL_COMPRESSED_SRGB_S3TC_DXT1_NV"/>
+        <enum value="0x8C4D" name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT"/>
+        <enum value="0x8C4D" name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_NV"/>
+        <enum value="0x8C4E" name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT"/>
+        <enum value="0x8C4E" name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_NV"/>
+        <enum value="0x8C4F" name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT"/>
+        <enum value="0x8C4F" name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_NV"/>
+            <unused start="0x8C50" end="0x8C6F" vendor="NV"/>
+        <enum value="0x8C70" name="GL_COMPRESSED_LUMINANCE_LATC1_EXT"/>
+        <enum value="0x8C71" name="GL_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT"/>
+        <enum value="0x8C72" name="GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT"/>
+        <enum value="0x8C73" name="GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT"/>
+        <enum value="0x8C74" name="GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV"/>
+        <enum value="0x8C75" name="GL_TESS_EVALUATION_PROGRAM_PARAMETER_BUFFER_NV"/>
+        <enum value="0x8C76" name="GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH"/>
+        <enum value="0x8C76" name="GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH_EXT"/>
+        <enum value="0x8C77" name="GL_BACK_PRIMARY_COLOR_NV"/>
+        <enum value="0x8C78" name="GL_BACK_SECONDARY_COLOR_NV"/>
+        <enum value="0x8C79" name="GL_TEXTURE_COORD_NV"/>
+        <enum value="0x8C7A" name="GL_CLIP_DISTANCE_NV"/>
+        <enum value="0x8C7B" name="GL_VERTEX_ID_NV"/>
+        <enum value="0x8C7C" name="GL_PRIMITIVE_ID_NV"/>
+        <enum value="0x8C7D" name="GL_GENERIC_ATTRIB_NV"/>
+        <enum value="0x8C7E" name="GL_TRANSFORM_FEEDBACK_ATTRIBS_NV"/>
+        <enum value="0x8C7F" name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE"/>
+        <enum value="0x8C7F" name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE_EXT"/>
+        <enum value="0x8C7F" name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV"/>
+        <enum value="0x8C80" name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS"/>
+        <enum value="0x8C80" name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT"/>
+        <enum value="0x8C80" name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_NV"/>
+        <enum value="0x8C81" name="GL_ACTIVE_VARYINGS_NV"/>
+        <enum value="0x8C82" name="GL_ACTIVE_VARYING_MAX_LENGTH_NV"/>
+        <enum value="0x8C83" name="GL_TRANSFORM_FEEDBACK_VARYINGS"/>
+        <enum value="0x8C83" name="GL_TRANSFORM_FEEDBACK_VARYINGS_EXT"/>
+        <enum value="0x8C83" name="GL_TRANSFORM_FEEDBACK_VARYINGS_NV"/>
+        <enum value="0x8C84" name="GL_TRANSFORM_FEEDBACK_BUFFER_START"/>
+        <enum value="0x8C84" name="GL_TRANSFORM_FEEDBACK_BUFFER_START_EXT"/>
+        <enum value="0x8C84" name="GL_TRANSFORM_FEEDBACK_BUFFER_START_NV"/>
+        <enum value="0x8C85" name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE"/>
+        <enum value="0x8C85" name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_EXT"/>
+        <enum value="0x8C85" name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_NV"/>
+        <enum value="0x8C86" name="GL_TRANSFORM_FEEDBACK_RECORD_NV"/>
+        <enum value="0x8C87" name="GL_PRIMITIVES_GENERATED"/>
+        <enum value="0x8C87" name="GL_PRIMITIVES_GENERATED_EXT"/>
+        <enum value="0x8C87" name="GL_PRIMITIVES_GENERATED_NV"/>
+        <enum value="0x8C87" name="GL_PRIMITIVES_GENERATED_OES"/>
+        <enum value="0x8C88" name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN"/>
+        <enum value="0x8C88" name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_EXT"/>
+        <enum value="0x8C88" name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_NV"/>
+        <enum value="0x8C89" name="GL_RASTERIZER_DISCARD"/>
+        <enum value="0x8C89" name="GL_RASTERIZER_DISCARD_EXT"/>
+        <enum value="0x8C89" name="GL_RASTERIZER_DISCARD_NV"/>
+        <enum value="0x8C8A" name="GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS"/>
+        <enum value="0x8C8A" name="GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT"/>
+        <enum value="0x8C8A" name="GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_NV"/>
+        <enum value="0x8C8B" name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS"/>
+        <enum value="0x8C8B" name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_EXT"/>
+        <enum value="0x8C8B" name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_NV"/>
+        <enum value="0x8C8C" name="GL_INTERLEAVED_ATTRIBS"/>
+        <enum value="0x8C8C" name="GL_INTERLEAVED_ATTRIBS_EXT"/>
+        <enum value="0x8C8C" name="GL_INTERLEAVED_ATTRIBS_NV"/>
+        <enum value="0x8C8D" name="GL_SEPARATE_ATTRIBS"/>
+        <enum value="0x8C8D" name="GL_SEPARATE_ATTRIBS_EXT"/>
+        <enum value="0x8C8D" name="GL_SEPARATE_ATTRIBS_NV"/>
+        <enum value="0x8C8E" name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+        <enum value="0x8C8E" name="GL_TRANSFORM_FEEDBACK_BUFFER_EXT"/>
+        <enum value="0x8C8E" name="GL_TRANSFORM_FEEDBACK_BUFFER_NV"/>
+        <enum value="0x8C8F" name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING"/>
+        <enum value="0x8C8F" name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_EXT"/>
+        <enum value="0x8C8F" name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8C90" end="0x8C9F" vendor="QCOM" comment="For Affie Munshi. Reassigned from AMD to QCOM (bug 5874)">
+            <unused start="0x8C90" end="0x8C91" vendor="QCOM"/>
+        <enum value="0x8C92" name="GL_ATC_RGB_AMD"/>
+        <enum value="0x8C93" name="GL_ATC_RGBA_EXPLICIT_ALPHA_AMD"/>
+            <unused start="0x8C94" end="0x8C9F" vendor="QCOM"/>
+    </enums>
+    <enums namespace="GL" start="0x8CA0" end="0x8CAF" vendor="ARB">
+        <enum value="0x8CA0" name="GL_POINT_SPRITE_COORD_ORIGIN"/>
+        <enum value="0x8CA1" name="GL_LOWER_LEFT"/>
+        <enum value="0x8CA1" name="GL_LOWER_LEFT_EXT" alias="GL_LOWER_LEFT"/>
+        <enum value="0x8CA2" name="GL_UPPER_LEFT"/>
+        <enum value="0x8CA2" name="GL_UPPER_LEFT_EXT" alias="GL_UPPER_LEFT"/>
+        <enum value="0x8CA3" name="GL_STENCIL_BACK_REF"/>
+        <enum value="0x8CA4" name="GL_STENCIL_BACK_VALUE_MASK"/>
+        <enum value="0x8CA5" name="GL_STENCIL_BACK_WRITEMASK"/>
+        <enum value="0x8CA6" name="GL_DRAW_FRAMEBUFFER_BINDING"/>
+        <enum value="0x8CA6" name="GL_DRAW_FRAMEBUFFER_BINDING_ANGLE"/>
+        <enum value="0x8CA6" name="GL_DRAW_FRAMEBUFFER_BINDING_APPLE"/>
+        <enum value="0x8CA6" name="GL_DRAW_FRAMEBUFFER_BINDING_EXT"/>
+        <enum value="0x8CA6" name="GL_DRAW_FRAMEBUFFER_BINDING_NV"/>
+        <enum value="0x8CA6" name="GL_FRAMEBUFFER_BINDING"/>
+        <enum value="0x8CA6" name="GL_FRAMEBUFFER_BINDING_ANGLE"/>
+        <enum value="0x8CA6" name="GL_FRAMEBUFFER_BINDING_EXT"/>
+        <enum value="0x8CA6" name="GL_FRAMEBUFFER_BINDING_OES"/>
+        <enum value="0x8CA7" name="GL_RENDERBUFFER_BINDING"/>
+        <enum value="0x8CA7" name="GL_RENDERBUFFER_BINDING_ANGLE"/>
+        <enum value="0x8CA7" name="GL_RENDERBUFFER_BINDING_EXT"/>
+        <enum value="0x8CA7" name="GL_RENDERBUFFER_BINDING_OES"/>
+        <enum value="0x8CA8" name="GL_READ_FRAMEBUFFER"/>
+        <enum value="0x8CA8" name="GL_READ_FRAMEBUFFER_ANGLE"/>
+        <enum value="0x8CA8" name="GL_READ_FRAMEBUFFER_APPLE"/>
+        <enum value="0x8CA8" name="GL_READ_FRAMEBUFFER_EXT"/>
+        <enum value="0x8CA8" name="GL_READ_FRAMEBUFFER_NV"/>
+        <enum value="0x8CA9" name="GL_DRAW_FRAMEBUFFER"/>
+        <enum value="0x8CA9" name="GL_DRAW_FRAMEBUFFER_ANGLE"/>
+        <enum value="0x8CA9" name="GL_DRAW_FRAMEBUFFER_APPLE"/>
+        <enum value="0x8CA9" name="GL_DRAW_FRAMEBUFFER_EXT"/>
+        <enum value="0x8CA9" name="GL_DRAW_FRAMEBUFFER_NV"/>
+        <enum value="0x8CAA" name="GL_READ_FRAMEBUFFER_BINDING"/>
+        <enum value="0x8CAA" name="GL_READ_FRAMEBUFFER_BINDING_ANGLE"/>
+        <enum value="0x8CAA" name="GL_READ_FRAMEBUFFER_BINDING_APPLE"/>
+        <enum value="0x8CAA" name="GL_READ_FRAMEBUFFER_BINDING_EXT"/>
+        <enum value="0x8CAA" name="GL_READ_FRAMEBUFFER_BINDING_NV"/>
+        <enum value="0x8CAB" name="GL_RENDERBUFFER_COVERAGE_SAMPLES_NV"/>
+        <enum value="0x8CAB" name="GL_RENDERBUFFER_SAMPLES"/>
+        <enum value="0x8CAB" name="GL_RENDERBUFFER_SAMPLES_ANGLE"/>
+        <enum value="0x8CAB" name="GL_RENDERBUFFER_SAMPLES_APPLE"/>
+        <enum value="0x8CAB" name="GL_RENDERBUFFER_SAMPLES_EXT"/>
+        <enum value="0x8CAB" name="GL_RENDERBUFFER_SAMPLES_NV"/>
+        <enum value="0x8CAC" name="GL_DEPTH_COMPONENT32F"/>
+        <enum value="0x8CAD" name="GL_DEPTH32F_STENCIL8"/>
+            <unused start="0x8CAE" end="0x8CAF" vendor="ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8CB0" end="0x8CCF" vendor="ZiiLabs" comment="For Barthold Lichtenbelt 2004/12/1">
+            <unused start="0x8CB0" end="0x8CCF" vendor="ZiiLabs"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8CD0" end="0x8D5F" vendor="ARB" comment="Framebuffer object specification + headroom">
+        <enum value="0x8CD0" name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE"/>
+        <enum value="0x8CD0" name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT"/>
+        <enum value="0x8CD0" name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_OES"/>
+        <enum value="0x8CD1" name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"/>
+        <enum value="0x8CD1" name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT"/>
+        <enum value="0x8CD1" name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_OES"/>
+        <enum value="0x8CD2" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"/>
+        <enum value="0x8CD2" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT"/>
+        <enum value="0x8CD2" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_OES"/>
+        <enum value="0x8CD3" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE"/>
+        <enum value="0x8CD3" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT"/>
+        <enum value="0x8CD3" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_OES"/>
+        <enum value="0x8CD4" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT"/>
+        <enum value="0x8CD4" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_OES"/>
+        <enum value="0x8CD4" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER"/>
+        <enum value="0x8CD4" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT"/>
+        <enum value="0x8CD5" name="GL_FRAMEBUFFER_COMPLETE"/>
+        <enum value="0x8CD5" name="GL_FRAMEBUFFER_COMPLETE_EXT"/>
+        <enum value="0x8CD5" name="GL_FRAMEBUFFER_COMPLETE_OES"/>
+        <enum value="0x8CD6" name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT"/>
+        <enum value="0x8CD6" name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT"/>
+        <enum value="0x8CD6" name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_OES"/>
+        <enum value="0x8CD7" name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"/>
+        <enum value="0x8CD7" name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT"/>
+        <enum value="0x8CD7" name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_OES"/>
+            <unused start="0x8CD8" vendor="ARB" comment="Removed 2005/09/26 in revision #117 of the FBO extension spec"/>
+            <!-- <enum value="0x8CD8" name="GL_FRAMEBUFFER_INCOMPLETE_DUPLICATE_ATTACHMENT_EXT"/> -->
+        <enum value="0x8CD9" name="GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS"/>
+        <enum value="0x8CD9" name="GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT"/>
+        <enum value="0x8CD9" name="GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_OES"/>
+        <enum value="0x8CDA" name="GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT"/>
+        <enum value="0x8CDA" name="GL_FRAMEBUFFER_INCOMPLETE_FORMATS_OES"/>
+        <enum value="0x8CDB" name="GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER"/>
+        <enum value="0x8CDB" name="GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT"/>
+        <enum value="0x8CDB" name="GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_OES"/>
+        <enum value="0x8CDC" name="GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER"/>
+        <enum value="0x8CDC" name="GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT"/>
+        <enum value="0x8CDC" name="GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_OES"/>
+        <enum value="0x8CDD" name="GL_FRAMEBUFFER_UNSUPPORTED"/>
+        <enum value="0x8CDD" name="GL_FRAMEBUFFER_UNSUPPORTED_EXT"/>
+        <enum value="0x8CDD" name="GL_FRAMEBUFFER_UNSUPPORTED_OES"/>
+            <unused start="0x8CDE" vendor="ARB" comment="Removed 2005/05/31 in revision #113 of the FBO extension spec"/>
+            <!-- <enum value="0x8CDE" name="GL_FRAMEBUFFER_STATUS_ERROR_EXT"/> -->
+        <enum value="0x8CDF" name="GL_MAX_COLOR_ATTACHMENTS"/>
+        <enum value="0x8CDF" name="GL_MAX_COLOR_ATTACHMENTS_EXT"/>
+        <enum value="0x8CDF" name="GL_MAX_COLOR_ATTACHMENTS_NV"/>
+        <enum value="0x8CE0" name="GL_COLOR_ATTACHMENT0"/>
+        <enum value="0x8CE0" name="GL_COLOR_ATTACHMENT0_EXT"/>
+        <enum value="0x8CE0" name="GL_COLOR_ATTACHMENT0_NV"/>
+        <enum value="0x8CE0" name="GL_COLOR_ATTACHMENT0_OES"/>
+        <enum value="0x8CE1" name="GL_COLOR_ATTACHMENT1"/>
+        <enum value="0x8CE1" name="GL_COLOR_ATTACHMENT1_EXT"/>
+        <enum value="0x8CE1" name="GL_COLOR_ATTACHMENT1_NV"/>
+        <enum value="0x8CE2" name="GL_COLOR_ATTACHMENT2"/>
+        <enum value="0x8CE2" name="GL_COLOR_ATTACHMENT2_EXT"/>
+        <enum value="0x8CE2" name="GL_COLOR_ATTACHMENT2_NV"/>
+        <enum value="0x8CE3" name="GL_COLOR_ATTACHMENT3"/>
+        <enum value="0x8CE3" name="GL_COLOR_ATTACHMENT3_EXT"/>
+        <enum value="0x8CE3" name="GL_COLOR_ATTACHMENT3_NV"/>
+        <enum value="0x8CE4" name="GL_COLOR_ATTACHMENT4"/>
+        <enum value="0x8CE4" name="GL_COLOR_ATTACHMENT4_EXT"/>
+        <enum value="0x8CE4" name="GL_COLOR_ATTACHMENT4_NV"/>
+        <enum value="0x8CE5" name="GL_COLOR_ATTACHMENT5"/>
+        <enum value="0x8CE5" name="GL_COLOR_ATTACHMENT5_EXT"/>
+        <enum value="0x8CE5" name="GL_COLOR_ATTACHMENT5_NV"/>
+        <enum value="0x8CE6" name="GL_COLOR_ATTACHMENT6"/>
+        <enum value="0x8CE6" name="GL_COLOR_ATTACHMENT6_EXT"/>
+        <enum value="0x8CE6" name="GL_COLOR_ATTACHMENT6_NV"/>
+        <enum value="0x8CE7" name="GL_COLOR_ATTACHMENT7"/>
+        <enum value="0x8CE7" name="GL_COLOR_ATTACHMENT7_EXT"/>
+        <enum value="0x8CE7" name="GL_COLOR_ATTACHMENT7_NV"/>
+        <enum value="0x8CE8" name="GL_COLOR_ATTACHMENT8"/>
+        <enum value="0x8CE8" name="GL_COLOR_ATTACHMENT8_EXT"/>
+        <enum value="0x8CE8" name="GL_COLOR_ATTACHMENT8_NV"/>
+        <enum value="0x8CE9" name="GL_COLOR_ATTACHMENT9"/>
+        <enum value="0x8CE9" name="GL_COLOR_ATTACHMENT9_EXT"/>
+        <enum value="0x8CE9" name="GL_COLOR_ATTACHMENT9_NV"/>
+        <enum value="0x8CEA" name="GL_COLOR_ATTACHMENT10"/>
+        <enum value="0x8CEA" name="GL_COLOR_ATTACHMENT10_EXT"/>
+        <enum value="0x8CEA" name="GL_COLOR_ATTACHMENT10_NV"/>
+        <enum value="0x8CEB" name="GL_COLOR_ATTACHMENT11"/>
+        <enum value="0x8CEB" name="GL_COLOR_ATTACHMENT11_EXT"/>
+        <enum value="0x8CEB" name="GL_COLOR_ATTACHMENT11_NV"/>
+        <enum value="0x8CEC" name="GL_COLOR_ATTACHMENT12"/>
+        <enum value="0x8CEC" name="GL_COLOR_ATTACHMENT12_EXT"/>
+        <enum value="0x8CEC" name="GL_COLOR_ATTACHMENT12_NV"/>
+        <enum value="0x8CED" name="GL_COLOR_ATTACHMENT13"/>
+        <enum value="0x8CED" name="GL_COLOR_ATTACHMENT13_EXT"/>
+        <enum value="0x8CED" name="GL_COLOR_ATTACHMENT13_NV"/>
+        <enum value="0x8CEE" name="GL_COLOR_ATTACHMENT14"/>
+        <enum value="0x8CEE" name="GL_COLOR_ATTACHMENT14_EXT"/>
+        <enum value="0x8CEE" name="GL_COLOR_ATTACHMENT14_NV"/>
+        <enum value="0x8CEF" name="GL_COLOR_ATTACHMENT15"/>
+        <enum value="0x8CEF" name="GL_COLOR_ATTACHMENT15_EXT"/>
+        <enum value="0x8CEF" name="GL_COLOR_ATTACHMENT15_NV"/>
+        <enum value="0x8CF0" name="GL_COLOR_ATTACHMENT16"/>
+        <enum value="0x8CF1" name="GL_COLOR_ATTACHMENT17"/>
+        <enum value="0x8CF2" name="GL_COLOR_ATTACHMENT18"/>
+        <enum value="0x8CF3" name="GL_COLOR_ATTACHMENT19"/>
+        <enum value="0x8CF4" name="GL_COLOR_ATTACHMENT20"/>
+        <enum value="0x8CF5" name="GL_COLOR_ATTACHMENT21"/>
+        <enum value="0x8CF6" name="GL_COLOR_ATTACHMENT22"/>
+        <enum value="0x8CF7" name="GL_COLOR_ATTACHMENT23"/>
+        <enum value="0x8CF8" name="GL_COLOR_ATTACHMENT24"/>
+        <enum value="0x8CF9" name="GL_COLOR_ATTACHMENT25"/>
+        <enum value="0x8CFA" name="GL_COLOR_ATTACHMENT26"/>
+        <enum value="0x8CFB" name="GL_COLOR_ATTACHMENT27"/>
+        <enum value="0x8CFC" name="GL_COLOR_ATTACHMENT28"/>
+        <enum value="0x8CFD" name="GL_COLOR_ATTACHMENT29"/>
+        <enum value="0x8CFE" name="GL_COLOR_ATTACHMENT30"/>
+        <enum value="0x8CFF" name="GL_COLOR_ATTACHMENT31"/>
+        <enum value="0x8D00" name="GL_DEPTH_ATTACHMENT"/>
+        <enum value="0x8D00" name="GL_DEPTH_ATTACHMENT_EXT"/>
+        <enum value="0x8D00" name="GL_DEPTH_ATTACHMENT_OES"/>
+            <unused start="0x8D01" end="0x8D1F" vendor="ARB" comment="For depth attachments 16-31"/>
+        <enum value="0x8D20" name="GL_STENCIL_ATTACHMENT"/>
+        <enum value="0x8D20" name="GL_STENCIL_ATTACHMENT_EXT"/>
+        <enum value="0x8D20" name="GL_STENCIL_ATTACHMENT_OES"/>
+            <unused start="0x8D21" end="0x8D3F" vendor="ARB" comment="For stencil attachments 16-31"/>
+        <enum value="0x8D40" name="GL_FRAMEBUFFER"/>
+        <enum value="0x8D40" name="GL_FRAMEBUFFER_EXT"/>
+        <enum value="0x8D40" name="GL_FRAMEBUFFER_OES"/>
+        <enum value="0x8D41" name="GL_RENDERBUFFER"/>
+        <enum value="0x8D41" name="GL_RENDERBUFFER_EXT"/>
+        <enum value="0x8D41" name="GL_RENDERBUFFER_OES"/>
+        <enum value="0x8D42" name="GL_RENDERBUFFER_WIDTH"/>
+        <enum value="0x8D42" name="GL_RENDERBUFFER_WIDTH_EXT"/>
+        <enum value="0x8D42" name="GL_RENDERBUFFER_WIDTH_OES"/>
+        <enum value="0x8D43" name="GL_RENDERBUFFER_HEIGHT"/>
+        <enum value="0x8D43" name="GL_RENDERBUFFER_HEIGHT_EXT"/>
+        <enum value="0x8D43" name="GL_RENDERBUFFER_HEIGHT_OES"/>
+        <enum value="0x8D44" name="GL_RENDERBUFFER_INTERNAL_FORMAT"/>
+        <enum value="0x8D44" name="GL_RENDERBUFFER_INTERNAL_FORMAT_EXT"/>
+        <enum value="0x8D44" name="GL_RENDERBUFFER_INTERNAL_FORMAT_OES"/>
+            <unused start="0x8D45" vendor="ARB" comment="Was for GL_STENCIL_INDEX_EXT, but now use core STENCIL_INDEX instead"/>
+        <enum value="0x8D46" name="GL_STENCIL_INDEX1"/>
+        <enum value="0x8D46" name="GL_STENCIL_INDEX1_EXT"/>
+        <enum value="0x8D46" name="GL_STENCIL_INDEX1_OES"/>
+        <enum value="0x8D47" name="GL_STENCIL_INDEX4"/>
+        <enum value="0x8D47" name="GL_STENCIL_INDEX4_EXT"/>
+        <enum value="0x8D47" name="GL_STENCIL_INDEX4_OES"/>
+        <enum value="0x8D48" name="GL_STENCIL_INDEX8"/>
+        <enum value="0x8D48" name="GL_STENCIL_INDEX8_EXT"/>
+        <enum value="0x8D48" name="GL_STENCIL_INDEX8_OES"/>
+        <enum value="0x8D49" name="GL_STENCIL_INDEX16"/>
+        <enum value="0x8D49" name="GL_STENCIL_INDEX16_EXT"/>
+            <unused start="0x8D4A" end="0x8D4F" vendor="ARB" comment="For additional stencil formats"/>
+        <enum value="0x8D50" name="GL_RENDERBUFFER_RED_SIZE"/>
+        <enum value="0x8D50" name="GL_RENDERBUFFER_RED_SIZE_EXT"/>
+        <enum value="0x8D50" name="GL_RENDERBUFFER_RED_SIZE_OES"/>
+        <enum value="0x8D51" name="GL_RENDERBUFFER_GREEN_SIZE"/>
+        <enum value="0x8D51" name="GL_RENDERBUFFER_GREEN_SIZE_EXT"/>
+        <enum value="0x8D51" name="GL_RENDERBUFFER_GREEN_SIZE_OES"/>
+        <enum value="0x8D52" name="GL_RENDERBUFFER_BLUE_SIZE"/>
+        <enum value="0x8D52" name="GL_RENDERBUFFER_BLUE_SIZE_EXT"/>
+        <enum value="0x8D52" name="GL_RENDERBUFFER_BLUE_SIZE_OES"/>
+        <enum value="0x8D53" name="GL_RENDERBUFFER_ALPHA_SIZE"/>
+        <enum value="0x8D53" name="GL_RENDERBUFFER_ALPHA_SIZE_EXT"/>
+        <enum value="0x8D53" name="GL_RENDERBUFFER_ALPHA_SIZE_OES"/>
+        <enum value="0x8D54" name="GL_RENDERBUFFER_DEPTH_SIZE"/>
+        <enum value="0x8D54" name="GL_RENDERBUFFER_DEPTH_SIZE_EXT"/>
+        <enum value="0x8D54" name="GL_RENDERBUFFER_DEPTH_SIZE_OES"/>
+        <enum value="0x8D55" name="GL_RENDERBUFFER_STENCIL_SIZE"/>
+        <enum value="0x8D55" name="GL_RENDERBUFFER_STENCIL_SIZE_EXT"/>
+        <enum value="0x8D55" name="GL_RENDERBUFFER_STENCIL_SIZE_OES"/>
+        <enum value="0x8D56" name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE"/>
+        <enum value="0x8D56" name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_ANGLE"/>
+        <enum value="0x8D56" name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE"/>
+        <enum value="0x8D56" name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT"/>
+        <enum value="0x8D56" name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_NV"/>
+        <enum value="0x8D57" name="GL_MAX_SAMPLES"/>
+        <enum value="0x8D57" name="GL_MAX_SAMPLES_ANGLE"/>
+        <enum value="0x8D57" name="GL_MAX_SAMPLES_APPLE"/>
+        <enum value="0x8D57" name="GL_MAX_SAMPLES_EXT"/>
+        <enum value="0x8D57" name="GL_MAX_SAMPLES_NV"/>
+            <unused start="0x8D58" end="0x8D5F" vendor="ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8D60" end="0x8D6F" vendor="OES">
+        <enum value="0x8D60" name="GL_TEXTURE_GEN_STR_OES"/>
+        <enum value="0x8D61" name="GL_HALF_FLOAT_OES"/>
+        <enum value="0x8D62" name="GL_RGB565_OES"/>
+        <enum value="0x8D62" name="GL_RGB565"/>
+            <unused start="0x8D63" vendor="OES" comment="Was GL_TEXTURE_IMMUTABLE_LEVELS in draft ES 3.0 spec"/>
+        <enum value="0x8D64" name="GL_ETC1_RGB8_OES"/>
+        <enum value="0x8D65" name="GL_TEXTURE_EXTERNAL_OES"/>
+        <enum value="0x8D66" name="GL_SAMPLER_EXTERNAL_OES"/>
+        <enum value="0x8D67" name="GL_TEXTURE_BINDING_EXTERNAL_OES"/>
+        <enum value="0x8D68" name="GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES"/>
+        <enum value="0x8D69" name="GL_PRIMITIVE_RESTART_FIXED_INDEX"/>
+        <enum value="0x8D6A" name="GL_ANY_SAMPLES_PASSED_CONSERVATIVE"/>
+        <enum value="0x8D6A" name="GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT"/>
+        <enum value="0x8D6B" name="GL_MAX_ELEMENT_INDEX"/>
+        <enum value="0x8D6C" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT"/>
+            <unused start="0x8D6D" end="0x8D6F" vendor="OES"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8D70" end="0x8DEF" vendor="NV" comment="For Pat Brown 2005/10/13">
+        <enum value="0x8D70" name="GL_RGBA32UI"/>
+        <enum value="0x8D70" name="GL_RGBA32UI_EXT"/>
+        <enum value="0x8D71" name="GL_RGB32UI"/>
+        <enum value="0x8D71" name="GL_RGB32UI_EXT"/>
+        <enum value="0x8D72" name="GL_ALPHA32UI_EXT"/>
+        <enum value="0x8D73" name="GL_INTENSITY32UI_EXT"/>
+        <enum value="0x8D74" name="GL_LUMINANCE32UI_EXT"/>
+        <enum value="0x8D75" name="GL_LUMINANCE_ALPHA32UI_EXT"/>
+        <enum value="0x8D76" name="GL_RGBA16UI"/>
+        <enum value="0x8D76" name="GL_RGBA16UI_EXT"/>
+        <enum value="0x8D77" name="GL_RGB16UI"/>
+        <enum value="0x8D77" name="GL_RGB16UI_EXT"/>
+        <enum value="0x8D78" name="GL_ALPHA16UI_EXT"/>
+        <enum value="0x8D79" name="GL_INTENSITY16UI_EXT"/>
+        <enum value="0x8D7A" name="GL_LUMINANCE16UI_EXT"/>
+        <enum value="0x8D7B" name="GL_LUMINANCE_ALPHA16UI_EXT"/>
+        <enum value="0x8D7C" name="GL_RGBA8UI"/>
+        <enum value="0x8D7C" name="GL_RGBA8UI_EXT"/>
+        <enum value="0x8D7D" name="GL_RGB8UI"/>
+        <enum value="0x8D7D" name="GL_RGB8UI_EXT"/>
+        <enum value="0x8D7E" name="GL_ALPHA8UI_EXT"/>
+        <enum value="0x8D7F" name="GL_INTENSITY8UI_EXT"/>
+        <enum value="0x8D80" name="GL_LUMINANCE8UI_EXT"/>
+        <enum value="0x8D81" name="GL_LUMINANCE_ALPHA8UI_EXT"/>
+        <enum value="0x8D82" name="GL_RGBA32I"/>
+        <enum value="0x8D82" name="GL_RGBA32I_EXT"/>
+        <enum value="0x8D83" name="GL_RGB32I"/>
+        <enum value="0x8D83" name="GL_RGB32I_EXT"/>
+        <enum value="0x8D84" name="GL_ALPHA32I_EXT"/>
+        <enum value="0x8D85" name="GL_INTENSITY32I_EXT"/>
+        <enum value="0x8D86" name="GL_LUMINANCE32I_EXT"/>
+        <enum value="0x8D87" name="GL_LUMINANCE_ALPHA32I_EXT"/>
+        <enum value="0x8D88" name="GL_RGBA16I"/>
+        <enum value="0x8D88" name="GL_RGBA16I_EXT"/>
+        <enum value="0x8D89" name="GL_RGB16I"/>
+        <enum value="0x8D89" name="GL_RGB16I_EXT"/>
+        <enum value="0x8D8A" name="GL_ALPHA16I_EXT"/>
+        <enum value="0x8D8B" name="GL_INTENSITY16I_EXT"/>
+        <enum value="0x8D8C" name="GL_LUMINANCE16I_EXT"/>
+        <enum value="0x8D8D" name="GL_LUMINANCE_ALPHA16I_EXT"/>
+        <enum value="0x8D8E" name="GL_RGBA8I"/>
+        <enum value="0x8D8E" name="GL_RGBA8I_EXT"/>
+        <enum value="0x8D8F" name="GL_RGB8I"/>
+        <enum value="0x8D8F" name="GL_RGB8I_EXT"/>
+        <enum value="0x8D90" name="GL_ALPHA8I_EXT"/>
+        <enum value="0x8D91" name="GL_INTENSITY8I_EXT"/>
+        <enum value="0x8D92" name="GL_LUMINANCE8I_EXT"/>
+        <enum value="0x8D93" name="GL_LUMINANCE_ALPHA8I_EXT"/>
+        <enum value="0x8D94" name="GL_RED_INTEGER"/>
+        <enum value="0x8D94" name="GL_RED_INTEGER_EXT"/>
+        <enum value="0x8D95" name="GL_GREEN_INTEGER"/>
+        <enum value="0x8D95" name="GL_GREEN_INTEGER_EXT"/>
+        <enum value="0x8D96" name="GL_BLUE_INTEGER"/>
+        <enum value="0x8D96" name="GL_BLUE_INTEGER_EXT"/>
+        <enum value="0x8D97" name="GL_ALPHA_INTEGER"/>
+        <enum value="0x8D97" name="GL_ALPHA_INTEGER_EXT"/>
+        <enum value="0x8D98" name="GL_RGB_INTEGER"/>
+        <enum value="0x8D98" name="GL_RGB_INTEGER_EXT"/>
+        <enum value="0x8D99" name="GL_RGBA_INTEGER"/>
+        <enum value="0x8D99" name="GL_RGBA_INTEGER_EXT"/>
+        <enum value="0x8D9A" name="GL_BGR_INTEGER"/>
+        <enum value="0x8D9A" name="GL_BGR_INTEGER_EXT"/>
+        <enum value="0x8D9B" name="GL_BGRA_INTEGER"/>
+        <enum value="0x8D9B" name="GL_BGRA_INTEGER_EXT"/>
+        <enum value="0x8D9C" name="GL_LUMINANCE_INTEGER_EXT"/>
+        <enum value="0x8D9D" name="GL_LUMINANCE_ALPHA_INTEGER_EXT"/>
+        <enum value="0x8D9E" name="GL_RGBA_INTEGER_MODE_EXT"/>
+        <enum value="0x8D9F" name="GL_INT_2_10_10_10_REV"/>
+        <enum value="0x8DA0" name="GL_MAX_PROGRAM_PARAMETER_BUFFER_BINDINGS_NV"/>
+        <enum value="0x8DA1" name="GL_MAX_PROGRAM_PARAMETER_BUFFER_SIZE_NV"/>
+        <enum value="0x8DA2" name="GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV"/>
+        <enum value="0x8DA3" name="GL_GEOMETRY_PROGRAM_PARAMETER_BUFFER_NV"/>
+        <enum value="0x8DA4" name="GL_FRAGMENT_PROGRAM_PARAMETER_BUFFER_NV"/>
+        <enum value="0x8DA5" name="GL_MAX_PROGRAM_GENERIC_ATTRIBS_NV"/>
+        <enum value="0x8DA6" name="GL_MAX_PROGRAM_GENERIC_RESULTS_NV"/>
+        <enum value="0x8DA7" name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED"/>
+        <enum value="0x8DA7" name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_ARB"/>
+        <enum value="0x8DA7" name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT"/>
+        <enum value="0x8DA7" name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES"/>
+        <enum value="0x8DA8" name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS"/>
+        <enum value="0x8DA8" name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_ARB"/>
+        <enum value="0x8DA8" name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT"/>
+        <enum value="0x8DA8" name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES"/>
+        <enum value="0x8DA9" name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_ARB"/>
+        <enum value="0x8DA9" name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT"/>
+            <!-- Also see the odd namespace "NVTransformFeedbackToken" above -->
+        <enum value="0x8DAA" name="GL_LAYER_NV"/>
+        <enum value="0x8DAB" name="GL_DEPTH_COMPONENT32F_NV"/>
+        <enum value="0x8DAC" name="GL_DEPTH32F_STENCIL8_NV"/>
+        <enum value="0x8DAD" name="GL_FLOAT_32_UNSIGNED_INT_24_8_REV"/>
+        <enum value="0x8DAD" name="GL_FLOAT_32_UNSIGNED_INT_24_8_REV_NV"/>
+        <enum value="0x8DAE" name="GL_SHADER_INCLUDE_ARB"/>
+        <enum value="0x8DAF" name="GL_DEPTH_BUFFER_FLOAT_MODE_NV"/>
+            <unused start="0x8DB0" end="0x8DB8" vendor="NV"/>
+        <enum value="0x8DB9" name="GL_FRAMEBUFFER_SRGB"/>
+        <enum value="0x8DB9" name="GL_FRAMEBUFFER_SRGB_EXT"/>
+        <enum value="0x8DBA" name="GL_FRAMEBUFFER_SRGB_CAPABLE_EXT"/>
+        <enum value="0x8DBB" name="GL_COMPRESSED_RED_RGTC1"/>
+        <enum value="0x8DBB" name="GL_COMPRESSED_RED_RGTC1_EXT"/>
+        <enum value="0x8DBC" name="GL_COMPRESSED_SIGNED_RED_RGTC1"/>
+        <enum value="0x8DBC" name="GL_COMPRESSED_SIGNED_RED_RGTC1_EXT"/>
+        <enum value="0x8DBD" name="GL_COMPRESSED_RED_GREEN_RGTC2_EXT"/>
+        <enum value="0x8DBD" name="GL_COMPRESSED_RG_RGTC2"/>
+        <enum value="0x8DBE" name="GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT"/>
+        <enum value="0x8DBE" name="GL_COMPRESSED_SIGNED_RG_RGTC2"/>
+        <enum value="0x8DC0" name="GL_SAMPLER_1D_ARRAY"/>
+        <enum value="0x8DC0" name="GL_SAMPLER_1D_ARRAY_EXT"/>
+        <enum value="0x8DC1" name="GL_SAMPLER_2D_ARRAY"/>
+        <enum value="0x8DC1" name="GL_SAMPLER_2D_ARRAY_EXT"/>
+        <enum value="0x8DC2" name="GL_SAMPLER_BUFFER"/>
+        <enum value="0x8DC2" name="GL_SAMPLER_BUFFER_EXT"/>
+        <enum value="0x8DC2" name="GL_SAMPLER_BUFFER_OES"/>
+        <enum value="0x8DC3" name="GL_SAMPLER_1D_ARRAY_SHADOW"/>
+        <enum value="0x8DC3" name="GL_SAMPLER_1D_ARRAY_SHADOW_EXT"/>
+        <enum value="0x8DC4" name="GL_SAMPLER_2D_ARRAY_SHADOW"/>
+        <enum value="0x8DC4" name="GL_SAMPLER_2D_ARRAY_SHADOW_EXT"/>
+        <enum value="0x8DC4" name="GL_SAMPLER_2D_ARRAY_SHADOW_NV"/>
+        <enum value="0x8DC5" name="GL_SAMPLER_CUBE_SHADOW"/>
+        <enum value="0x8DC5" name="GL_SAMPLER_CUBE_SHADOW_EXT"/>
+        <enum value="0x8DC5" name="GL_SAMPLER_CUBE_SHADOW_NV"/>
+        <enum value="0x8DC6" name="GL_UNSIGNED_INT_VEC2"/>
+        <enum value="0x8DC6" name="GL_UNSIGNED_INT_VEC2_EXT"/>
+        <enum value="0x8DC7" name="GL_UNSIGNED_INT_VEC3"/>
+        <enum value="0x8DC7" name="GL_UNSIGNED_INT_VEC3_EXT"/>
+        <enum value="0x8DC8" name="GL_UNSIGNED_INT_VEC4"/>
+        <enum value="0x8DC8" name="GL_UNSIGNED_INT_VEC4_EXT"/>
+        <enum value="0x8DC9" name="GL_INT_SAMPLER_1D"/>
+        <enum value="0x8DC9" name="GL_INT_SAMPLER_1D_EXT"/>
+        <enum value="0x8DCA" name="GL_INT_SAMPLER_2D"/>
+        <enum value="0x8DCA" name="GL_INT_SAMPLER_2D_EXT"/>
+        <enum value="0x8DCB" name="GL_INT_SAMPLER_3D"/>
+        <enum value="0x8DCB" name="GL_INT_SAMPLER_3D_EXT"/>
+        <enum value="0x8DCC" name="GL_INT_SAMPLER_CUBE"/>
+        <enum value="0x8DCC" name="GL_INT_SAMPLER_CUBE_EXT"/>
+        <enum value="0x8DCD" name="GL_INT_SAMPLER_2D_RECT"/>
+        <enum value="0x8DCD" name="GL_INT_SAMPLER_2D_RECT_EXT"/>
+        <enum value="0x8DCE" name="GL_INT_SAMPLER_1D_ARRAY"/>
+        <enum value="0x8DCE" name="GL_INT_SAMPLER_1D_ARRAY_EXT"/>
+        <enum value="0x8DCF" name="GL_INT_SAMPLER_2D_ARRAY"/>
+        <enum value="0x8DCF" name="GL_INT_SAMPLER_2D_ARRAY_EXT"/>
+        <enum value="0x8DD0" name="GL_INT_SAMPLER_BUFFER"/>
+        <enum value="0x8DD0" name="GL_INT_SAMPLER_BUFFER_EXT"/>
+        <enum value="0x8DD0" name="GL_INT_SAMPLER_BUFFER_OES"/>
+        <enum value="0x8DD1" name="GL_UNSIGNED_INT_SAMPLER_1D"/>
+        <enum value="0x8DD1" name="GL_UNSIGNED_INT_SAMPLER_1D_EXT"/>
+        <enum value="0x8DD2" name="GL_UNSIGNED_INT_SAMPLER_2D"/>
+        <enum value="0x8DD2" name="GL_UNSIGNED_INT_SAMPLER_2D_EXT"/>
+        <enum value="0x8DD3" name="GL_UNSIGNED_INT_SAMPLER_3D"/>
+        <enum value="0x8DD3" name="GL_UNSIGNED_INT_SAMPLER_3D_EXT"/>
+        <enum value="0x8DD4" name="GL_UNSIGNED_INT_SAMPLER_CUBE"/>
+        <enum value="0x8DD4" name="GL_UNSIGNED_INT_SAMPLER_CUBE_EXT"/>
+        <enum value="0x8DD5" name="GL_UNSIGNED_INT_SAMPLER_2D_RECT"/>
+        <enum value="0x8DD5" name="GL_UNSIGNED_INT_SAMPLER_2D_RECT_EXT"/>
+        <enum value="0x8DD6" name="GL_UNSIGNED_INT_SAMPLER_1D_ARRAY"/>
+        <enum value="0x8DD6" name="GL_UNSIGNED_INT_SAMPLER_1D_ARRAY_EXT"/>
+        <enum value="0x8DD7" name="GL_UNSIGNED_INT_SAMPLER_2D_ARRAY"/>
+        <enum value="0x8DD7" name="GL_UNSIGNED_INT_SAMPLER_2D_ARRAY_EXT"/>
+        <enum value="0x8DD8" name="GL_UNSIGNED_INT_SAMPLER_BUFFER"/>
+        <enum value="0x8DD8" name="GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT"/>
+        <enum value="0x8DD8" name="GL_UNSIGNED_INT_SAMPLER_BUFFER_OES"/>
+        <enum value="0x8DD9" name="GL_GEOMETRY_SHADER"/>
+        <enum value="0x8DD9" name="GL_GEOMETRY_SHADER_ARB"/>
+        <enum value="0x8DD9" name="GL_GEOMETRY_SHADER_EXT"/>
+        <enum value="0x8DD9" name="GL_GEOMETRY_SHADER_OES"/>
+        <enum value="0x8DDA" name="GL_GEOMETRY_VERTICES_OUT_ARB"/>
+        <enum value="0x8DDA" name="GL_GEOMETRY_VERTICES_OUT_EXT"/>
+        <enum value="0x8DDB" name="GL_GEOMETRY_INPUT_TYPE_ARB"/>
+        <enum value="0x8DDB" name="GL_GEOMETRY_INPUT_TYPE_EXT"/>
+        <enum value="0x8DDC" name="GL_GEOMETRY_OUTPUT_TYPE_ARB"/>
+        <enum value="0x8DDC" name="GL_GEOMETRY_OUTPUT_TYPE_EXT"/>
+        <enum value="0x8DDD" name="GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB"/>
+        <enum value="0x8DDD" name="GL_MAX_GEOMETRY_VARYING_COMPONENTS_EXT"/>
+        <enum value="0x8DDE" name="GL_MAX_VERTEX_VARYING_COMPONENTS_ARB"/>
+        <enum value="0x8DDE" name="GL_MAX_VERTEX_VARYING_COMPONENTS_EXT"/>
+        <enum value="0x8DDF" name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS"/>
+        <enum value="0x8DDF" name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_ARB"/>
+        <enum value="0x8DDF" name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT"/>
+        <enum value="0x8DDF" name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES"/>
+        <enum value="0x8DE0" name="GL_MAX_GEOMETRY_OUTPUT_VERTICES"/>
+        <enum value="0x8DE0" name="GL_MAX_GEOMETRY_OUTPUT_VERTICES_ARB"/>
+        <enum value="0x8DE0" name="GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT"/>
+        <enum value="0x8DE0" name="GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES"/>
+        <enum value="0x8DE1" name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS"/>
+        <enum value="0x8DE1" name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_ARB"/>
+        <enum value="0x8DE1" name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT"/>
+        <enum value="0x8DE1" name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES"/>
+        <enum value="0x8DE2" name="GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT"/>
+        <enum value="0x8DE3" name="GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT"/>
+        <enum value="0x8DE4" name="GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT"/>
+        <enum value="0x8DE5" name="GL_ACTIVE_SUBROUTINES"/>
+        <enum value="0x8DE6" name="GL_ACTIVE_SUBROUTINE_UNIFORMS"/>
+        <enum value="0x8DE7" name="GL_MAX_SUBROUTINES"/>
+        <enum value="0x8DE8" name="GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS"/>
+        <enum value="0x8DE9" name="GL_NAMED_STRING_LENGTH_ARB"/>
+        <enum value="0x8DEA" name="GL_NAMED_STRING_TYPE_ARB"/>
+            <unused start="0x8DEB" end="0x8DEC" vendor="NV"/>
+        <enum value="0x8DED" name="GL_MAX_BINDABLE_UNIFORM_SIZE_EXT"/>
+        <enum value="0x8DEE" name="GL_UNIFORM_BUFFER_EXT"/>
+        <enum value="0x8DEF" name="GL_UNIFORM_BUFFER_BINDING_EXT"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8DF0" end="0x8E0F" vendor="OES">
+        <enum value="0x8DF0" name="GL_LOW_FLOAT"/>
+        <enum value="0x8DF1" name="GL_MEDIUM_FLOAT"/>
+        <enum value="0x8DF2" name="GL_HIGH_FLOAT"/>
+        <enum value="0x8DF3" name="GL_LOW_INT"/>
+        <enum value="0x8DF4" name="GL_MEDIUM_INT"/>
+        <enum value="0x8DF5" name="GL_HIGH_INT"/>
+        <enum value="0x8DF6" name="GL_UNSIGNED_INT_10_10_10_2_OES"/>
+        <enum value="0x8DF7" name="GL_INT_10_10_10_2_OES"/>
+        <enum value="0x8DF8" name="GL_SHADER_BINARY_FORMATS"/>
+        <enum value="0x8DF9" name="GL_NUM_SHADER_BINARY_FORMATS"/>
+        <enum value="0x8DFA" name="GL_SHADER_COMPILER"/>
+        <enum value="0x8DFB" name="GL_MAX_VERTEX_UNIFORM_VECTORS"/>
+        <enum value="0x8DFC" name="GL_MAX_VARYING_VECTORS"/>
+        <enum value="0x8DFD" name="GL_MAX_FRAGMENT_UNIFORM_VECTORS"/>
+            <unused start="0x8DFE" end="0x8E0F" vendor="OES"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8E10" end="0x8E8F" vendor="NV" comment="For Michael Gold 2006/08/07">
+        <enum value="0x8E10" name="GL_RENDERBUFFER_COLOR_SAMPLES_NV"/>
+        <enum value="0x8E11" name="GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV"/>
+        <enum value="0x8E12" name="GL_MULTISAMPLE_COVERAGE_MODES_NV"/>
+        <enum value="0x8E13" name="GL_QUERY_WAIT"/>
+        <enum value="0x8E13" name="GL_QUERY_WAIT_NV"/>
+        <enum value="0x8E14" name="GL_QUERY_NO_WAIT"/>
+        <enum value="0x8E14" name="GL_QUERY_NO_WAIT_NV"/>
+        <enum value="0x8E15" name="GL_QUERY_BY_REGION_WAIT"/>
+        <enum value="0x8E15" name="GL_QUERY_BY_REGION_WAIT_NV"/>
+        <enum value="0x8E16" name="GL_QUERY_BY_REGION_NO_WAIT"/>
+        <enum value="0x8E16" name="GL_QUERY_BY_REGION_NO_WAIT_NV"/>
+        <enum value="0x8E17" name="GL_QUERY_WAIT_INVERTED"/>
+        <enum value="0x8E18" name="GL_QUERY_NO_WAIT_INVERTED"/>
+        <enum value="0x8E19" name="GL_QUERY_BY_REGION_WAIT_INVERTED"/>
+        <enum value="0x8E1A" name="GL_QUERY_BY_REGION_NO_WAIT_INVERTED"/>
+        <enum value="0x8E1B" name="GL_POLYGON_OFFSET_CLAMP"/>
+        <enum value="0x8E1B" name="GL_POLYGON_OFFSET_CLAMP_EXT" alias="GL_POLYGON_OFFSET_CLAMP"/>
+            <unused start="0x8E1C" end="0x8E1D" vendor="NV"/>
+        <enum value="0x8E1E" name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+        <enum value="0x8E1E" name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_EXT"/>
+        <enum value="0x8E1E" name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_OES"/>
+        <enum value="0x8E1F" name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+        <enum value="0x8E1F" name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT"/>
+        <enum value="0x8E1F" name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_OES"/>
+        <enum value="0x8E20" name="GL_COLOR_SAMPLES_NV"/>
+            <unused start="0x8E21" vendor="NV"/>
+        <enum value="0x8E22" name="GL_TRANSFORM_FEEDBACK"/>
+        <enum value="0x8E22" name="GL_TRANSFORM_FEEDBACK_NV"/>
+        <enum value="0x8E23" name="GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED"/>
+        <enum value="0x8E23" name="GL_TRANSFORM_FEEDBACK_PAUSED" alias="GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED"/>
+        <enum value="0x8E23" name="GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED_NV"/>
+        <enum value="0x8E24" name="GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE"/>
+        <enum value="0x8E24" name="GL_TRANSFORM_FEEDBACK_ACTIVE" alias="GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE"/>
+        <enum value="0x8E24" name="GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE_NV"/>
+        <enum value="0x8E25" name="GL_TRANSFORM_FEEDBACK_BINDING"/>
+        <enum value="0x8E25" name="GL_TRANSFORM_FEEDBACK_BINDING_NV"/>
+        <enum value="0x8E26" name="GL_FRAME_NV"/>
+        <enum value="0x8E27" name="GL_FIELDS_NV"/>
+        <enum value="0x8E28" name="GL_CURRENT_TIME_NV"/>
+        <enum value="0x8E28" name="GL_TIMESTAMP"/>
+        <enum value="0x8E28" name="GL_TIMESTAMP_EXT"/>
+        <enum value="0x8E29" name="GL_NUM_FILL_STREAMS_NV"/>
+        <enum value="0x8E2A" name="GL_PRESENT_TIME_NV"/>
+        <enum value="0x8E2B" name="GL_PRESENT_DURATION_NV"/>
+        <enum value="0x8E2C" name="GL_DEPTH_COMPONENT16_NONLINEAR_NV"/>
+        <enum value="0x8E2D" name="GL_PROGRAM_MATRIX_EXT"/>
+        <enum value="0x8E2E" name="GL_TRANSPOSE_PROGRAM_MATRIX_EXT"/>
+        <enum value="0x8E2F" name="GL_PROGRAM_MATRIX_STACK_DEPTH_EXT"/>
+            <unused start="0x8E30" end="0x8E41" vendor="NV"/>
+        <enum value="0x8E42" name="GL_TEXTURE_SWIZZLE_R"/>
+        <enum value="0x8E42" name="GL_TEXTURE_SWIZZLE_R_EXT"/>
+        <enum value="0x8E43" name="GL_TEXTURE_SWIZZLE_G"/>
+        <enum value="0x8E43" name="GL_TEXTURE_SWIZZLE_G_EXT"/>
+        <enum value="0x8E44" name="GL_TEXTURE_SWIZZLE_B"/>
+        <enum value="0x8E44" name="GL_TEXTURE_SWIZZLE_B_EXT"/>
+        <enum value="0x8E45" name="GL_TEXTURE_SWIZZLE_A"/>
+        <enum value="0x8E45" name="GL_TEXTURE_SWIZZLE_A_EXT"/>
+        <enum value="0x8E46" name="GL_TEXTURE_SWIZZLE_RGBA"/>
+        <enum value="0x8E46" name="GL_TEXTURE_SWIZZLE_RGBA_EXT"/>
+        <enum value="0x8E47" name="GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS"/>
+        <enum value="0x8E48" name="GL_ACTIVE_SUBROUTINE_MAX_LENGTH"/>
+        <enum value="0x8E49" name="GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH"/>
+        <enum value="0x8E4A" name="GL_NUM_COMPATIBLE_SUBROUTINES"/>
+        <enum value="0x8E4B" name="GL_COMPATIBLE_SUBROUTINES"/>
+        <enum value="0x8E4C" name="GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION"/>
+        <enum value="0x8E4C" name="GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION_EXT"/>
+        <enum value="0x8E4D" name="GL_FIRST_VERTEX_CONVENTION"/>
+        <enum value="0x8E4D" name="GL_FIRST_VERTEX_CONVENTION_EXT"/>
+        <enum value="0x8E4D" name="GL_FIRST_VERTEX_CONVENTION_OES"/>
+        <enum value="0x8E4E" name="GL_LAST_VERTEX_CONVENTION"/>
+        <enum value="0x8E4E" name="GL_LAST_VERTEX_CONVENTION_EXT"/>
+        <enum value="0x8E4E" name="GL_LAST_VERTEX_CONVENTION_OES"/>
+        <enum value="0x8E4F" name="GL_PROVOKING_VERTEX"/>
+        <enum value="0x8E4F" name="GL_PROVOKING_VERTEX_EXT"/>
+        <enum value="0x8E50" name="GL_SAMPLE_POSITION"/>
+        <enum value="0x8E50" name="GL_SAMPLE_POSITION_NV"/>
+        <enum value="0x8E50" name="GL_SAMPLE_LOCATION_ARB" alias="GL_SAMPLE_POSITION"/>
+        <enum value="0x8E50" name="GL_SAMPLE_LOCATION_NV" alias="GL_SAMPLE_POSITION_NV"/>
+        <enum value="0x8E51" name="GL_SAMPLE_MASK"/>
+        <enum value="0x8E51" name="GL_SAMPLE_MASK_NV"/>
+        <enum value="0x8E52" name="GL_SAMPLE_MASK_VALUE"/>
+        <enum value="0x8E52" name="GL_SAMPLE_MASK_VALUE_NV"/>
+        <enum value="0x8E53" name="GL_TEXTURE_BINDING_RENDERBUFFER_NV"/>
+        <enum value="0x8E54" name="GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV"/>
+        <enum value="0x8E55" name="GL_TEXTURE_RENDERBUFFER_NV"/>
+        <enum value="0x8E56" name="GL_SAMPLER_RENDERBUFFER_NV"/>
+        <enum value="0x8E57" name="GL_INT_SAMPLER_RENDERBUFFER_NV"/>
+        <enum value="0x8E58" name="GL_UNSIGNED_INT_SAMPLER_RENDERBUFFER_NV"/>
+        <enum value="0x8E59" name="GL_MAX_SAMPLE_MASK_WORDS"/>
+        <enum value="0x8E59" name="GL_MAX_SAMPLE_MASK_WORDS_NV"/>
+        <enum value="0x8E5A" name="GL_MAX_GEOMETRY_PROGRAM_INVOCATIONS_NV"/>
+        <enum value="0x8E5A" name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS"/>
+        <enum value="0x8E5A" name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS_EXT"/>
+        <enum value="0x8E5A" name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES"/>
+        <enum value="0x8E5B" name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET"/>
+        <enum value="0x8E5B" name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_OES"/>
+        <enum value="0x8E5B" name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_NV"/>
+        <enum value="0x8E5C" name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET"/>
+        <enum value="0x8E5C" name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_OES"/>
+        <enum value="0x8E5C" name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_NV"/>
+        <enum value="0x8E5D" name="GL_FRAGMENT_INTERPOLATION_OFFSET_BITS"/>
+        <enum value="0x8E5D" name="GL_FRAGMENT_INTERPOLATION_OFFSET_BITS_OES"/>
+        <enum value="0x8E5D" name="GL_FRAGMENT_PROGRAM_INTERPOLATION_OFFSET_BITS_NV"/>
+        <enum value="0x8E5E" name="GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET"/>
+        <enum value="0x8E5E" name="GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_ARB"/>
+        <enum value="0x8E5E" name="GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_NV"/>
+        <enum value="0x8E5F" name="GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET"/>
+        <enum value="0x8E5F" name="GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_ARB"/>
+        <enum value="0x8E5F" name="GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_NV"/>
+        <enum value="0x8E60" name="GL_MAX_MESH_UNIFORM_BLOCKS_NV"/>
+        <enum value="0x8E61" name="GL_MAX_MESH_TEXTURE_IMAGE_UNITS_NV"/>
+        <enum value="0x8E62" name="GL_MAX_MESH_IMAGE_UNIFORMS_NV"/>
+        <enum value="0x8E63" name="GL_MAX_MESH_UNIFORM_COMPONENTS_NV"/>
+        <enum value="0x8E64" name="GL_MAX_MESH_ATOMIC_COUNTER_BUFFERS_NV"/>
+        <enum value="0x8E65" name="GL_MAX_MESH_ATOMIC_COUNTERS_NV"/>
+        <enum value="0x8E66" name="GL_MAX_MESH_SHADER_STORAGE_BLOCKS_NV"/>
+        <enum value="0x8E67" name="GL_MAX_COMBINED_MESH_UNIFORM_COMPONENTS_NV"/>
+        <enum value="0x8E68" name="GL_MAX_TASK_UNIFORM_BLOCKS_NV"/>
+        <enum value="0x8E69" name="GL_MAX_TASK_TEXTURE_IMAGE_UNITS_NV"/>
+        <enum value="0x8E6A" name="GL_MAX_TASK_IMAGE_UNIFORMS_NV"/>
+        <enum value="0x8E6B" name="GL_MAX_TASK_UNIFORM_COMPONENTS_NV"/>
+        <enum value="0x8E6C" name="GL_MAX_TASK_ATOMIC_COUNTER_BUFFERS_NV"/>
+        <enum value="0x8E6D" name="GL_MAX_TASK_ATOMIC_COUNTERS_NV"/>
+        <enum value="0x8E6E" name="GL_MAX_TASK_SHADER_STORAGE_BLOCKS_NV"/>
+        <enum value="0x8E6F" name="GL_MAX_COMBINED_TASK_UNIFORM_COMPONENTS_NV"/>
+        <enum value="0x8E70" name="GL_MAX_TRANSFORM_FEEDBACK_BUFFERS"/>
+        <enum value="0x8E71" name="GL_MAX_VERTEX_STREAMS"/>
+        <enum value="0x8E72" name="GL_PATCH_VERTICES"/>
+        <enum value="0x8E72" name="GL_PATCH_VERTICES_EXT"/>
+        <enum value="0x8E72" name="GL_PATCH_VERTICES_OES"/>
+        <enum value="0x8E73" name="GL_PATCH_DEFAULT_INNER_LEVEL"/>
+        <enum value="0x8E73" name="GL_PATCH_DEFAULT_INNER_LEVEL_EXT"/>
+        <enum value="0x8E74" name="GL_PATCH_DEFAULT_OUTER_LEVEL"/>
+        <enum value="0x8E74" name="GL_PATCH_DEFAULT_OUTER_LEVEL_EXT"/>
+        <enum value="0x8E75" name="GL_TESS_CONTROL_OUTPUT_VERTICES"/>
+        <enum value="0x8E75" name="GL_TESS_CONTROL_OUTPUT_VERTICES_EXT"/>
+        <enum value="0x8E75" name="GL_TESS_CONTROL_OUTPUT_VERTICES_OES"/>
+        <enum value="0x8E76" name="GL_TESS_GEN_MODE"/>
+        <enum value="0x8E76" name="GL_TESS_GEN_MODE_EXT"/>
+        <enum value="0x8E76" name="GL_TESS_GEN_MODE_OES"/>
+        <enum value="0x8E77" name="GL_TESS_GEN_SPACING"/>
+        <enum value="0x8E77" name="GL_TESS_GEN_SPACING_EXT"/>
+        <enum value="0x8E77" name="GL_TESS_GEN_SPACING_OES"/>
+        <enum value="0x8E78" name="GL_TESS_GEN_VERTEX_ORDER"/>
+        <enum value="0x8E78" name="GL_TESS_GEN_VERTEX_ORDER_EXT"/>
+        <enum value="0x8E78" name="GL_TESS_GEN_VERTEX_ORDER_OES"/>
+        <enum value="0x8E79" name="GL_TESS_GEN_POINT_MODE"/>
+        <enum value="0x8E79" name="GL_TESS_GEN_POINT_MODE_EXT"/>
+        <enum value="0x8E79" name="GL_TESS_GEN_POINT_MODE_OES"/>
+        <enum value="0x8E7A" name="GL_ISOLINES"/>
+        <enum value="0x8E7A" name="GL_ISOLINES_EXT"/>
+        <enum value="0x8E7A" name="GL_ISOLINES_OES"/>
+        <enum value="0x8E7B" name="GL_FRACTIONAL_ODD"/>
+        <enum value="0x8E7B" name="GL_FRACTIONAL_ODD_EXT"/>
+        <enum value="0x8E7B" name="GL_FRACTIONAL_ODD_OES"/>
+        <enum value="0x8E7C" name="GL_FRACTIONAL_EVEN"/>
+        <enum value="0x8E7C" name="GL_FRACTIONAL_EVEN_EXT"/>
+        <enum value="0x8E7C" name="GL_FRACTIONAL_EVEN_OES"/>
+        <enum value="0x8E7D" name="GL_MAX_PATCH_VERTICES"/>
+        <enum value="0x8E7D" name="GL_MAX_PATCH_VERTICES_EXT"/>
+        <enum value="0x8E7D" name="GL_MAX_PATCH_VERTICES_OES"/>
+        <enum value="0x8E7E" name="GL_MAX_TESS_GEN_LEVEL"/>
+        <enum value="0x8E7E" name="GL_MAX_TESS_GEN_LEVEL_EXT"/>
+        <enum value="0x8E7E" name="GL_MAX_TESS_GEN_LEVEL_OES"/>
+        <enum value="0x8E7F" name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+        <enum value="0x8E7F" name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_EXT"/>
+        <enum value="0x8E7F" name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_OES"/>
+        <enum value="0x8E80" name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+        <enum value="0x8E80" name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT"/>
+        <enum value="0x8E80" name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_OES"/>
+        <enum value="0x8E81" name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS"/>
+        <enum value="0x8E81" name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_EXT"/>
+        <enum value="0x8E81" name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_OES"/>
+        <enum value="0x8E82" name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS"/>
+        <enum value="0x8E82" name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_EXT"/>
+        <enum value="0x8E82" name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_OES"/>
+        <enum value="0x8E83" name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS"/>
+        <enum value="0x8E83" name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_EXT"/>
+        <enum value="0x8E83" name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_OES"/>
+        <enum value="0x8E84" name="GL_MAX_TESS_PATCH_COMPONENTS"/>
+        <enum value="0x8E84" name="GL_MAX_TESS_PATCH_COMPONENTS_EXT"/>
+        <enum value="0x8E84" name="GL_MAX_TESS_PATCH_COMPONENTS_OES"/>
+        <enum value="0x8E85" name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS"/>
+        <enum value="0x8E85" name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_EXT"/>
+        <enum value="0x8E85" name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_OES"/>
+        <enum value="0x8E86" name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS"/>
+        <enum value="0x8E86" name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_EXT"/>
+        <enum value="0x8E86" name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_OES"/>
+        <enum value="0x8E87" name="GL_TESS_EVALUATION_SHADER"/>
+        <enum value="0x8E87" name="GL_TESS_EVALUATION_SHADER_EXT"/>
+        <enum value="0x8E87" name="GL_TESS_EVALUATION_SHADER_OES"/>
+        <enum value="0x8E88" name="GL_TESS_CONTROL_SHADER"/>
+        <enum value="0x8E88" name="GL_TESS_CONTROL_SHADER_EXT"/>
+        <enum value="0x8E88" name="GL_TESS_CONTROL_SHADER_OES"/>
+        <enum value="0x8E89" name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS"/>
+        <enum value="0x8E89" name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_EXT"/>
+        <enum value="0x8E89" name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_OES"/>
+        <enum value="0x8E8A" name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS"/>
+        <enum value="0x8E8A" name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_EXT"/>
+        <enum value="0x8E8A" name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_OES"/>
+            <unused start="0x8E8B" vendor="NV"/>
+        <enum value="0x8E8C" name="GL_COMPRESSED_RGBA_BPTC_UNORM"/>
+        <enum value="0x8E8C" name="GL_COMPRESSED_RGBA_BPTC_UNORM_ARB"/>
+        <enum value="0x8E8C" name="GL_COMPRESSED_RGBA_BPTC_UNORM_EXT"/>
+        <enum value="0x8E8D" name="GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM"/>
+        <enum value="0x8E8D" name="GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB"/>
+        <enum value="0x8E8D" name="GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT"/>
+        <enum value="0x8E8E" name="GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT"/>
+        <enum value="0x8E8E" name="GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB"/>
+        <enum value="0x8E8E" name="GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT"/>
+        <enum value="0x8E8F" name="GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT"/>
+        <enum value="0x8E8F" name="GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB"/>
+        <enum value="0x8E8F" name="GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8E90" end="0x8E9F" vendor="QNX" comment="For QNX_texture_tiling, QNX_complex_polygon, QNX_stippled_lines (Khronos bug 696)">
+            <unused start="0x8E90" end="0x8E9F" vendor="QNX"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8EA0" end="0x8EAF" vendor="IMG">
+            <unused start="0x8EA0" end="0x8EAF" vendor="IMG"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8EB0" end="0x8EBF" vendor="OES" comment="For Affie Munshi 2007/07/20">
+            <unused start="0x8EB0" end="0x8EBF" vendor="OES"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8EC0" end="0x8ECF" vendor="Vincent">
+            <unused start="0x8EC0" end="0x8ECF" vendor="Vincent"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8ED0" end="0x8F4F" vendor="NV" comment="For Pat Brown, Khronos bug 3191">
+        <enum value="0x8ED0" name="GL_COVERAGE_COMPONENT_NV"/>
+        <enum value="0x8ED1" name="GL_COVERAGE_COMPONENT4_NV"/>
+        <enum value="0x8ED2" name="GL_COVERAGE_ATTACHMENT_NV"/>
+        <enum value="0x8ED3" name="GL_COVERAGE_BUFFERS_NV"/>
+        <enum value="0x8ED4" name="GL_COVERAGE_SAMPLES_NV"/>
+        <enum value="0x8ED5" name="GL_COVERAGE_ALL_FRAGMENTS_NV"/>
+        <enum value="0x8ED6" name="GL_COVERAGE_EDGE_FRAGMENTS_NV"/>
+        <enum value="0x8ED7" name="GL_COVERAGE_AUTOMATIC_NV"/>
+            <unused start="0x8ED8" end="0x8F0F" vendor="NV"/>
+        <enum value="0x8F10" name="GL_INCLUSIVE_EXT"/>
+        <enum value="0x8F11" name="GL_EXCLUSIVE_EXT"/>
+        <enum value="0x8F12" name="GL_WINDOW_RECTANGLE_EXT"/>
+        <enum value="0x8F13" name="GL_WINDOW_RECTANGLE_MODE_EXT"/>
+        <enum value="0x8F14" name="GL_MAX_WINDOW_RECTANGLES_EXT"/>
+        <enum value="0x8F15" name="GL_NUM_WINDOW_RECTANGLES_EXT"/>
+            <unused start="0x8F16" end="0x8F1C" vendor="NV"/>
+        <enum value="0x8F1D" name="GL_BUFFER_GPU_ADDRESS_NV"/>
+        <enum value="0x8F1E" name="GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV"/>
+        <enum value="0x8F1F" name="GL_ELEMENT_ARRAY_UNIFIED_NV"/>
+        <enum value="0x8F20" name="GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F21" name="GL_VERTEX_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F22" name="GL_NORMAL_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F23" name="GL_COLOR_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F24" name="GL_INDEX_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F25" name="GL_TEXTURE_COORD_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F26" name="GL_EDGE_FLAG_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F27" name="GL_SECONDARY_COLOR_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F28" name="GL_FOG_COORD_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F29" name="GL_ELEMENT_ARRAY_ADDRESS_NV"/>
+        <enum value="0x8F2A" name="GL_VERTEX_ATTRIB_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F2B" name="GL_VERTEX_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F2C" name="GL_NORMAL_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F2D" name="GL_COLOR_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F2E" name="GL_INDEX_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F2F" name="GL_TEXTURE_COORD_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F30" name="GL_EDGE_FLAG_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F31" name="GL_SECONDARY_COLOR_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F32" name="GL_FOG_COORD_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F33" name="GL_ELEMENT_ARRAY_LENGTH_NV"/>
+        <enum value="0x8F34" name="GL_GPU_ADDRESS_NV"/>
+        <enum value="0x8F35" name="GL_MAX_SHADER_BUFFER_ADDRESS_NV"/>
+        <enum value="0x8F36" name="GL_COPY_READ_BUFFER"/>
+        <enum value="0x8F36" name="GL_COPY_READ_BUFFER_NV"/>
+        <enum value="0x8F36" name="GL_COPY_READ_BUFFER_BINDING" alias="GL_COPY_READ_BUFFER"/>
+        <enum value="0x8F37" name="GL_COPY_WRITE_BUFFER"/>
+        <enum value="0x8F37" name="GL_COPY_WRITE_BUFFER_NV"/>
+        <enum value="0x8F37" name="GL_COPY_WRITE_BUFFER_BINDING" alias="GL_COPY_WRITE_BUFFER"/>
+        <enum value="0x8F38" name="GL_MAX_IMAGE_UNITS"/>
+        <enum value="0x8F38" name="GL_MAX_IMAGE_UNITS_EXT"/>
+        <enum value="0x8F39" name="GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS"/>
+        <enum value="0x8F39" name="GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS_EXT"/>
+        <enum value="0x8F39" name="GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES" alias="GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS"/>
+        <enum value="0x8F3A" name="GL_IMAGE_BINDING_NAME"/>
+        <enum value="0x8F3A" name="GL_IMAGE_BINDING_NAME_EXT"/>
+        <enum value="0x8F3B" name="GL_IMAGE_BINDING_LEVEL"/>
+        <enum value="0x8F3B" name="GL_IMAGE_BINDING_LEVEL_EXT"/>
+        <enum value="0x8F3C" name="GL_IMAGE_BINDING_LAYERED"/>
+        <enum value="0x8F3C" name="GL_IMAGE_BINDING_LAYERED_EXT"/>
+        <enum value="0x8F3D" name="GL_IMAGE_BINDING_LAYER"/>
+        <enum value="0x8F3D" name="GL_IMAGE_BINDING_LAYER_EXT"/>
+        <enum value="0x8F3E" name="GL_IMAGE_BINDING_ACCESS"/>
+        <enum value="0x8F3E" name="GL_IMAGE_BINDING_ACCESS_EXT"/>
+        <enum value="0x8F3F" name="GL_DRAW_INDIRECT_BUFFER"/>
+        <enum value="0x8F40" name="GL_DRAW_INDIRECT_UNIFIED_NV"/>
+        <enum value="0x8F41" name="GL_DRAW_INDIRECT_ADDRESS_NV"/>
+        <enum value="0x8F42" name="GL_DRAW_INDIRECT_LENGTH_NV"/>
+        <enum value="0x8F43" name="GL_DRAW_INDIRECT_BUFFER_BINDING"/>
+        <enum value="0x8F44" name="GL_MAX_PROGRAM_SUBROUTINE_PARAMETERS_NV"/>
+        <enum value="0x8F45" name="GL_MAX_PROGRAM_SUBROUTINE_NUM_NV"/>
+        <enum value="0x8F46" name="GL_DOUBLE_MAT2"/>
+        <enum value="0x8F46" name="GL_DOUBLE_MAT2_EXT"/>
+        <enum value="0x8F47" name="GL_DOUBLE_MAT3"/>
+        <enum value="0x8F47" name="GL_DOUBLE_MAT3_EXT"/>
+        <enum value="0x8F48" name="GL_DOUBLE_MAT4"/>
+        <enum value="0x8F48" name="GL_DOUBLE_MAT4_EXT"/>
+        <enum value="0x8F49" name="GL_DOUBLE_MAT2x3"/>
+        <enum value="0x8F49" name="GL_DOUBLE_MAT2x3_EXT"/>
+        <enum value="0x8F4A" name="GL_DOUBLE_MAT2x4"/>
+        <enum value="0x8F4A" name="GL_DOUBLE_MAT2x4_EXT"/>
+        <enum value="0x8F4B" name="GL_DOUBLE_MAT3x2"/>
+        <enum value="0x8F4B" name="GL_DOUBLE_MAT3x2_EXT"/>
+        <enum value="0x8F4C" name="GL_DOUBLE_MAT3x4"/>
+        <enum value="0x8F4C" name="GL_DOUBLE_MAT3x4_EXT"/>
+        <enum value="0x8F4D" name="GL_DOUBLE_MAT4x2"/>
+        <enum value="0x8F4D" name="GL_DOUBLE_MAT4x2_EXT"/>
+        <enum value="0x8F4E" name="GL_DOUBLE_MAT4x3"/>
+        <enum value="0x8F4E" name="GL_DOUBLE_MAT4x3_EXT"/>
+        <enum value="0x8F4F" name="GL_VERTEX_BINDING_BUFFER"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8F50" end="0x8F5F" vendor="ZiiLabs" comment="For Jon Kennedy, Khronos public bug 75">
+            <unused start="0x8F50" end="0x8F5F" vendor="ZiiLabs"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8F60" end="0x8F6F" vendor="ARM" comment="For Remi Pedersen, Khronos bug 3745">
+        <enum value="0x8F60" name="GL_MALI_SHADER_BINARY_ARM"/>
+        <enum value="0x8F61" name="GL_MALI_PROGRAM_BINARY_ARM"/>
+            <unused start="0x8F62" vendor="ARM"/>
+        <enum value="0x8F63" name="GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE_EXT"/>
+        <enum value="0x8F64" name="GL_SHADER_PIXEL_LOCAL_STORAGE_EXT"/>
+        <enum value="0x8F65" name="GL_FETCH_PER_SAMPLE_ARM"/>
+        <enum value="0x8F66" name="GL_FRAGMENT_SHADER_FRAMEBUFFER_FETCH_MRT_ARM"/>
+        <enum value="0x8F67" name="GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_SIZE_EXT"/>
+            <unused start="0x8F68" vendor="ARM"/>
+        <enum value="0x8F69" name="GL_TEXTURE_ASTC_DECODE_PRECISION_EXT"/>
+            <unused start="0x8F6A" end="0x8F6F" vendor="ARM"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8F70" end="0x8F7F" vendor="HI" comment="For Mark Callow, Khronos bug 4055. Shared with EGL.">
+            <unused start="0x8F70" end="0x8F7F" vendor="HI"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8F80" end="0x8F8F" vendor="Zebra" comment="For Mike Weiblen, public bug 910">
+            <unused start="0x8F80" end="0x8F8F" vendor="Zebra"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8F90" end="0x8F9F" vendor="ARB">
+        <enum value="0x8F90" name="GL_RED_SNORM"/>
+        <enum value="0x8F91" name="GL_RG_SNORM"/>
+        <enum value="0x8F92" name="GL_RGB_SNORM"/>
+        <enum value="0x8F93" name="GL_RGBA_SNORM"/>
+        <enum value="0x8F94" name="GL_R8_SNORM"/>
+        <enum value="0x8F95" name="GL_RG8_SNORM"/>
+        <enum value="0x8F96" name="GL_RGB8_SNORM"/>
+        <enum value="0x8F97" name="GL_RGBA8_SNORM"/>
+        <enum value="0x8F98" name="GL_R16_SNORM"/>
+        <enum value="0x8F98" name="GL_R16_SNORM_EXT"/>
+        <enum value="0x8F99" name="GL_RG16_SNORM"/>
+        <enum value="0x8F99" name="GL_RG16_SNORM_EXT"/>
+        <enum value="0x8F9A" name="GL_RGB16_SNORM"/>
+        <enum value="0x8F9A" name="GL_RGB16_SNORM_EXT"/>
+        <enum value="0x8F9B" name="GL_RGBA16_SNORM"/>
+        <enum value="0x8F9B" name="GL_RGBA16_SNORM_EXT"/>
+        <enum value="0x8F9C" name="GL_SIGNED_NORMALIZED"/>
+        <enum value="0x8F9D" name="GL_PRIMITIVE_RESTART"/>
+        <enum value="0x8F9E" name="GL_PRIMITIVE_RESTART_INDEX"/>
+        <enum value="0x8F9F" name="GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8FA0" end="0x8FBF" vendor="QCOM" comment="For Maurice Ribble, bug 4512">
+        <enum value="0x8FA0" name="GL_PERFMON_GLOBAL_MODE_QCOM"/>
+        <enum value="0x8FA1" name="GL_MAX_SHADER_SUBSAMPLED_IMAGE_UNITS_QCOM"/>
+            <unused start="0x8FA2" end="0x8FAF" vendor="QCOM"/>
+        <enum value="0x8FB0" name="GL_BINNING_CONTROL_HINT_QCOM"/>
+        <enum value="0x8FB1" name="GL_CPU_OPTIMIZED_QCOM"/>
+        <enum value="0x8FB2" name="GL_GPU_OPTIMIZED_QCOM"/>
+        <enum value="0x8FB3" name="GL_RENDER_DIRECT_TO_FRAMEBUFFER_QCOM"/>
+            <unused start="0x8FB4" end="0x8FBA" vendor="QCOM"/>
+        <enum value="0x8FBB" name="GL_GPU_DISJOINT_EXT"/>
+            <unused start="0x8FBC" vendor="QCOM"/>
+        <enum value="0x8FBD" name="GL_SR8_EXT"/>
+        <enum value="0x8FBE" name="GL_SRG8_EXT"/>
+        <enum value="0x8FBF" name="GL_TEXTURE_FORMAT_SRGB_OVERRIDE_EXT"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8FC0" end="0x8FDF" vendor="VIV" comment="For Frido Garritsen, bug 4526">
+        <enum value="0x8FC4" name="GL_SHADER_BINARY_VIV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x8FE0" end="0x8FFF" vendor="NV" comment="For Pat Brown, bug 4935">
+        <enum value="0x8FE0" name="GL_INT8_NV"/>
+        <enum value="0x8FE1" name="GL_INT8_VEC2_NV"/>
+        <enum value="0x8FE2" name="GL_INT8_VEC3_NV"/>
+        <enum value="0x8FE3" name="GL_INT8_VEC4_NV"/>
+        <enum value="0x8FE4" name="GL_INT16_NV"/>
+        <enum value="0x8FE5" name="GL_INT16_VEC2_NV"/>
+        <enum value="0x8FE6" name="GL_INT16_VEC3_NV"/>
+        <enum value="0x8FE7" name="GL_INT16_VEC4_NV"/>
+        <enum value="0x8FE9" name="GL_INT64_VEC2_ARB"/>
+        <enum value="0x8FE9" name="GL_INT64_VEC2_NV"/>
+        <enum value="0x8FEA" name="GL_INT64_VEC3_ARB"/>
+        <enum value="0x8FEA" name="GL_INT64_VEC3_NV"/>
+        <enum value="0x8FEB" name="GL_INT64_VEC4_ARB"/>
+        <enum value="0x8FEB" name="GL_INT64_VEC4_NV"/>
+        <enum value="0x8FEC" name="GL_UNSIGNED_INT8_NV"/>
+        <enum value="0x8FED" name="GL_UNSIGNED_INT8_VEC2_NV"/>
+        <enum value="0x8FEE" name="GL_UNSIGNED_INT8_VEC3_NV"/>
+        <enum value="0x8FEF" name="GL_UNSIGNED_INT8_VEC4_NV"/>
+        <enum value="0x8FF0" name="GL_UNSIGNED_INT16_NV"/>
+        <enum value="0x8FF1" name="GL_UNSIGNED_INT16_VEC2_NV"/>
+        <enum value="0x8FF2" name="GL_UNSIGNED_INT16_VEC3_NV"/>
+        <enum value="0x8FF3" name="GL_UNSIGNED_INT16_VEC4_NV"/>
+        <enum value="0x8FF5" name="GL_UNSIGNED_INT64_VEC2_ARB"/>
+        <enum value="0x8FF5" name="GL_UNSIGNED_INT64_VEC2_NV"/>
+        <enum value="0x8FF6" name="GL_UNSIGNED_INT64_VEC3_ARB"/>
+        <enum value="0x8FF6" name="GL_UNSIGNED_INT64_VEC3_NV"/>
+        <enum value="0x8FF7" name="GL_UNSIGNED_INT64_VEC4_ARB"/>
+        <enum value="0x8FF7" name="GL_UNSIGNED_INT64_VEC4_NV"/>
+        <enum value="0x8FF8" name="GL_FLOAT16_NV"/>
+        <enum value="0x8FF9" name="GL_FLOAT16_VEC2_NV"/>
+        <enum value="0x8FFA" name="GL_FLOAT16_VEC3_NV"/>
+        <enum value="0x8FFB" name="GL_FLOAT16_VEC4_NV"/>
+        <enum value="0x8FFC" name="GL_DOUBLE_VEC2"/>
+        <enum value="0x8FFC" name="GL_DOUBLE_VEC2_EXT"/>
+        <enum value="0x8FFD" name="GL_DOUBLE_VEC3"/>
+        <enum value="0x8FFD" name="GL_DOUBLE_VEC3_EXT"/>
+        <enum value="0x8FFE" name="GL_DOUBLE_VEC4"/>
+        <enum value="0x8FFE" name="GL_DOUBLE_VEC4_EXT"/>
+            <unused start="0x8FFF" vendor="NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9000" end="0x901F" vendor="AMD" comment="For Bill Licea-Kane">
+        <enum value="0x9001" name="GL_SAMPLER_BUFFER_AMD"/>
+        <enum value="0x9002" name="GL_INT_SAMPLER_BUFFER_AMD"/>
+        <enum value="0x9003" name="GL_UNSIGNED_INT_SAMPLER_BUFFER_AMD"/>
+        <enum value="0x9004" name="GL_TESSELLATION_MODE_AMD"/>
+        <enum value="0x9005" name="GL_TESSELLATION_FACTOR_AMD"/>
+        <enum value="0x9006" name="GL_DISCRETE_AMD"/>
+        <enum value="0x9007" name="GL_CONTINUOUS_AMD"/>
+            <unused start="0x9008" vendor="AMD"/>
+        <enum value="0x9009" name="GL_TEXTURE_CUBE_MAP_ARRAY"/>
+        <enum value="0x9009" name="GL_TEXTURE_CUBE_MAP_ARRAY_ARB"/>
+        <enum value="0x9009" name="GL_TEXTURE_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x9009" name="GL_TEXTURE_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x900A" name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY"/>
+        <enum value="0x900A" name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_ARB"/>
+        <enum value="0x900A" name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x900A" name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x900B" name="GL_PROXY_TEXTURE_CUBE_MAP_ARRAY"/>
+        <enum value="0x900B" name="GL_PROXY_TEXTURE_CUBE_MAP_ARRAY_ARB"/>
+        <enum value="0x900C" name="GL_SAMPLER_CUBE_MAP_ARRAY"/>
+        <enum value="0x900C" name="GL_SAMPLER_CUBE_MAP_ARRAY_ARB"/>
+        <enum value="0x900C" name="GL_SAMPLER_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x900C" name="GL_SAMPLER_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x900D" name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW"/>
+        <enum value="0x900D" name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB"/>
+        <enum value="0x900D" name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_EXT"/>
+        <enum value="0x900D" name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_OES"/>
+        <enum value="0x900E" name="GL_INT_SAMPLER_CUBE_MAP_ARRAY"/>
+        <enum value="0x900E" name="GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB"/>
+        <enum value="0x900E" name="GL_INT_SAMPLER_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x900E" name="GL_INT_SAMPLER_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x900F" name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY"/>
+        <enum value="0x900F" name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB"/>
+        <enum value="0x900F" name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x900F" name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x9010" name="GL_ALPHA_SNORM"/>
+        <enum value="0x9011" name="GL_LUMINANCE_SNORM"/>
+        <enum value="0x9012" name="GL_LUMINANCE_ALPHA_SNORM"/>
+        <enum value="0x9013" name="GL_INTENSITY_SNORM"/>
+        <enum value="0x9014" name="GL_ALPHA8_SNORM"/>
+        <enum value="0x9015" name="GL_LUMINANCE8_SNORM"/>
+        <enum value="0x9016" name="GL_LUMINANCE8_ALPHA8_SNORM"/>
+        <enum value="0x9017" name="GL_INTENSITY8_SNORM"/>
+        <enum value="0x9018" name="GL_ALPHA16_SNORM"/>
+        <enum value="0x9019" name="GL_LUMINANCE16_SNORM"/>
+        <enum value="0x901A" name="GL_LUMINANCE16_ALPHA16_SNORM"/>
+        <enum value="0x901B" name="GL_INTENSITY16_SNORM"/>
+        <enum value="0x901C" name="GL_FACTOR_MIN_AMD"/>
+        <enum value="0x901D" name="GL_FACTOR_MAX_AMD"/>
+        <enum value="0x901E" name="GL_DEPTH_CLAMP_NEAR_AMD"/>
+        <enum value="0x901F" name="GL_DEPTH_CLAMP_FAR_AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9020" end="0x90FF" vendor="NV" comment="For Pat Brown, bug 4935">
+        <enum value="0x9020" name="GL_VIDEO_BUFFER_NV"/>
+        <enum value="0x9021" name="GL_VIDEO_BUFFER_BINDING_NV"/>
+        <enum value="0x9022" name="GL_FIELD_UPPER_NV"/>
+        <enum value="0x9023" name="GL_FIELD_LOWER_NV"/>
+        <enum value="0x9024" name="GL_NUM_VIDEO_CAPTURE_STREAMS_NV"/>
+        <enum value="0x9025" name="GL_NEXT_VIDEO_CAPTURE_BUFFER_STATUS_NV"/>
+        <enum value="0x9026" name="GL_VIDEO_CAPTURE_TO_422_SUPPORTED_NV"/>
+        <enum value="0x9027" name="GL_LAST_VIDEO_CAPTURE_STATUS_NV"/>
+        <enum value="0x9028" name="GL_VIDEO_BUFFER_PITCH_NV"/>
+        <enum value="0x9029" name="GL_VIDEO_COLOR_CONVERSION_MATRIX_NV"/>
+        <enum value="0x902A" name="GL_VIDEO_COLOR_CONVERSION_MAX_NV"/>
+        <enum value="0x902B" name="GL_VIDEO_COLOR_CONVERSION_MIN_NV"/>
+        <enum value="0x902C" name="GL_VIDEO_COLOR_CONVERSION_OFFSET_NV"/>
+        <enum value="0x902D" name="GL_VIDEO_BUFFER_INTERNAL_FORMAT_NV"/>
+        <enum value="0x902E" name="GL_PARTIAL_SUCCESS_NV"/>
+        <enum value="0x902F" name="GL_SUCCESS_NV"/>
+        <enum value="0x9030" name="GL_FAILURE_NV"/>
+        <enum value="0x9031" name="GL_YCBYCR8_422_NV"/>
+        <enum value="0x9032" name="GL_YCBAYCR8A_4224_NV"/>
+        <enum value="0x9033" name="GL_Z6Y10Z6CB10Z6Y10Z6CR10_422_NV"/>
+        <enum value="0x9034" name="GL_Z6Y10Z6CB10Z6A10Z6Y10Z6CR10Z6A10_4224_NV"/>
+        <enum value="0x9035" name="GL_Z4Y12Z4CB12Z4Y12Z4CR12_422_NV"/>
+        <enum value="0x9036" name="GL_Z4Y12Z4CB12Z4A12Z4Y12Z4CR12Z4A12_4224_NV"/>
+        <enum value="0x9037" name="GL_Z4Y12Z4CB12Z4CR12_444_NV"/>
+        <enum value="0x9038" name="GL_VIDEO_CAPTURE_FRAME_WIDTH_NV"/>
+        <enum value="0x9039" name="GL_VIDEO_CAPTURE_FRAME_HEIGHT_NV"/>
+        <enum value="0x903A" name="GL_VIDEO_CAPTURE_FIELD_UPPER_HEIGHT_NV"/>
+        <enum value="0x903B" name="GL_VIDEO_CAPTURE_FIELD_LOWER_HEIGHT_NV"/>
+        <enum value="0x903C" name="GL_VIDEO_CAPTURE_SURFACE_ORIGIN_NV"/>
+            <unused start="0x903D" end="0x9044" vendor="NV"/>
+        <enum value="0x9045" name="GL_TEXTURE_COVERAGE_SAMPLES_NV"/>
+        <enum value="0x9046" name="GL_TEXTURE_COLOR_SAMPLES_NV"/>
+        <enum value="0x9047" name="GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX"/>
+        <enum value="0x9048" name="GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX"/>
+        <enum value="0x9049" name="GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX"/>
+        <enum value="0x904A" name="GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX"/>
+        <enum value="0x904B" name="GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX"/>
+        <enum value="0x904C" name="GL_IMAGE_1D"/>
+        <enum value="0x904C" name="GL_IMAGE_1D_EXT"/>
+        <enum value="0x904D" name="GL_IMAGE_2D"/>
+        <enum value="0x904D" name="GL_IMAGE_2D_EXT"/>
+        <enum value="0x904E" name="GL_IMAGE_3D"/>
+        <enum value="0x904E" name="GL_IMAGE_3D_EXT"/>
+        <enum value="0x904F" name="GL_IMAGE_2D_RECT"/>
+        <enum value="0x904F" name="GL_IMAGE_2D_RECT_EXT"/>
+        <enum value="0x9050" name="GL_IMAGE_CUBE"/>
+        <enum value="0x9050" name="GL_IMAGE_CUBE_EXT"/>
+        <enum value="0x9051" name="GL_IMAGE_BUFFER"/>
+        <enum value="0x9051" name="GL_IMAGE_BUFFER_EXT"/>
+        <enum value="0x9051" name="GL_IMAGE_BUFFER_OES"/>
+        <enum value="0x9052" name="GL_IMAGE_1D_ARRAY"/>
+        <enum value="0x9052" name="GL_IMAGE_1D_ARRAY_EXT"/>
+        <enum value="0x9053" name="GL_IMAGE_2D_ARRAY"/>
+        <enum value="0x9053" name="GL_IMAGE_2D_ARRAY_EXT"/>
+        <enum value="0x9054" name="GL_IMAGE_CUBE_MAP_ARRAY"/>
+        <enum value="0x9054" name="GL_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x9054" name="GL_IMAGE_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x9055" name="GL_IMAGE_2D_MULTISAMPLE"/>
+        <enum value="0x9055" name="GL_IMAGE_2D_MULTISAMPLE_EXT"/>
+        <enum value="0x9056" name="GL_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x9056" name="GL_IMAGE_2D_MULTISAMPLE_ARRAY_EXT"/>
+        <enum value="0x9057" name="GL_INT_IMAGE_1D"/>
+        <enum value="0x9057" name="GL_INT_IMAGE_1D_EXT"/>
+        <enum value="0x9058" name="GL_INT_IMAGE_2D"/>
+        <enum value="0x9058" name="GL_INT_IMAGE_2D_EXT"/>
+        <enum value="0x9059" name="GL_INT_IMAGE_3D"/>
+        <enum value="0x9059" name="GL_INT_IMAGE_3D_EXT"/>
+        <enum value="0x905A" name="GL_INT_IMAGE_2D_RECT"/>
+        <enum value="0x905A" name="GL_INT_IMAGE_2D_RECT_EXT"/>
+        <enum value="0x905B" name="GL_INT_IMAGE_CUBE"/>
+        <enum value="0x905B" name="GL_INT_IMAGE_CUBE_EXT"/>
+        <enum value="0x905C" name="GL_INT_IMAGE_BUFFER"/>
+        <enum value="0x905C" name="GL_INT_IMAGE_BUFFER_EXT"/>
+        <enum value="0x905C" name="GL_INT_IMAGE_BUFFER_OES"/>
+        <enum value="0x905D" name="GL_INT_IMAGE_1D_ARRAY"/>
+        <enum value="0x905D" name="GL_INT_IMAGE_1D_ARRAY_EXT"/>
+        <enum value="0x905E" name="GL_INT_IMAGE_2D_ARRAY"/>
+        <enum value="0x905E" name="GL_INT_IMAGE_2D_ARRAY_EXT"/>
+        <enum value="0x905F" name="GL_INT_IMAGE_CUBE_MAP_ARRAY"/>
+        <enum value="0x905F" name="GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x905F" name="GL_INT_IMAGE_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x9060" name="GL_INT_IMAGE_2D_MULTISAMPLE"/>
+        <enum value="0x9060" name="GL_INT_IMAGE_2D_MULTISAMPLE_EXT"/>
+        <enum value="0x9061" name="GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x9061" name="GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT"/>
+        <enum value="0x9062" name="GL_UNSIGNED_INT_IMAGE_1D"/>
+        <enum value="0x9062" name="GL_UNSIGNED_INT_IMAGE_1D_EXT"/>
+        <enum value="0x9063" name="GL_UNSIGNED_INT_IMAGE_2D"/>
+        <enum value="0x9063" name="GL_UNSIGNED_INT_IMAGE_2D_EXT"/>
+        <enum value="0x9064" name="GL_UNSIGNED_INT_IMAGE_3D"/>
+        <enum value="0x9064" name="GL_UNSIGNED_INT_IMAGE_3D_EXT"/>
+        <enum value="0x9065" name="GL_UNSIGNED_INT_IMAGE_2D_RECT"/>
+        <enum value="0x9065" name="GL_UNSIGNED_INT_IMAGE_2D_RECT_EXT"/>
+        <enum value="0x9066" name="GL_UNSIGNED_INT_IMAGE_CUBE"/>
+        <enum value="0x9066" name="GL_UNSIGNED_INT_IMAGE_CUBE_EXT"/>
+        <enum value="0x9067" name="GL_UNSIGNED_INT_IMAGE_BUFFER"/>
+        <enum value="0x9067" name="GL_UNSIGNED_INT_IMAGE_BUFFER_EXT"/>
+        <enum value="0x9067" name="GL_UNSIGNED_INT_IMAGE_BUFFER_OES"/>
+        <enum value="0x9068" name="GL_UNSIGNED_INT_IMAGE_1D_ARRAY"/>
+        <enum value="0x9068" name="GL_UNSIGNED_INT_IMAGE_1D_ARRAY_EXT"/>
+        <enum value="0x9069" name="GL_UNSIGNED_INT_IMAGE_2D_ARRAY"/>
+        <enum value="0x9069" name="GL_UNSIGNED_INT_IMAGE_2D_ARRAY_EXT"/>
+        <enum value="0x906A" name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY"/>
+        <enum value="0x906A" name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+        <enum value="0x906A" name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_OES"/>
+        <enum value="0x906B" name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE"/>
+        <enum value="0x906B" name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_EXT"/>
+        <enum value="0x906C" name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x906C" name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT"/>
+        <enum value="0x906D" name="GL_MAX_IMAGE_SAMPLES"/>
+        <enum value="0x906D" name="GL_MAX_IMAGE_SAMPLES_EXT"/>
+        <enum value="0x906E" name="GL_IMAGE_BINDING_FORMAT"/>
+        <enum value="0x906E" name="GL_IMAGE_BINDING_FORMAT_EXT"/>
+        <enum value="0x906F" name="GL_RGB10_A2UI"/>
+        <enum value="0x9070" name="GL_PATH_FORMAT_SVG_NV"/>
+        <enum value="0x9071" name="GL_PATH_FORMAT_PS_NV"/>
+        <enum value="0x9072" name="GL_STANDARD_FONT_NAME_NV"/>
+        <enum value="0x9073" name="GL_SYSTEM_FONT_NAME_NV"/>
+        <enum value="0x9074" name="GL_FILE_NAME_NV"/>
+        <enum value="0x9075" name="GL_PATH_STROKE_WIDTH_NV"/>
+        <enum value="0x9076" name="GL_PATH_END_CAPS_NV"/>
+        <enum value="0x9077" name="GL_PATH_INITIAL_END_CAP_NV"/>
+        <enum value="0x9078" name="GL_PATH_TERMINAL_END_CAP_NV"/>
+        <enum value="0x9079" name="GL_PATH_JOIN_STYLE_NV"/>
+        <enum value="0x907A" name="GL_PATH_MITER_LIMIT_NV"/>
+        <enum value="0x907B" name="GL_PATH_DASH_CAPS_NV"/>
+        <enum value="0x907C" name="GL_PATH_INITIAL_DASH_CAP_NV"/>
+        <enum value="0x907D" name="GL_PATH_TERMINAL_DASH_CAP_NV"/>
+        <enum value="0x907E" name="GL_PATH_DASH_OFFSET_NV"/>
+        <enum value="0x907F" name="GL_PATH_CLIENT_LENGTH_NV"/>
+        <enum value="0x9080" name="GL_PATH_FILL_MODE_NV"/>
+        <enum value="0x9081" name="GL_PATH_FILL_MASK_NV"/>
+        <enum value="0x9082" name="GL_PATH_FILL_COVER_MODE_NV"/>
+        <enum value="0x9083" name="GL_PATH_STROKE_COVER_MODE_NV"/>
+        <enum value="0x9084" name="GL_PATH_STROKE_MASK_NV"/>
+            <!-- <enum value="0x9085" name="GL_PATH_SAMPLE_QUALITY_NV"          comment="Removed from extension"/> -->
+            <!-- <enum value="0x9086" name="GL_PATH_STROKE_BOUND_NV"            comment="Removed from extension"/> -->
+            <!-- <enum value="0x9087" name="GL_PATH_STROKE_OVERSAMPLE_COUNT_NV" comment="Removed from extension"/> -->
+        <enum value="0x9088" name="GL_COUNT_UP_NV"/>
+        <enum value="0x9089" name="GL_COUNT_DOWN_NV"/>
+        <enum value="0x908A" name="GL_PATH_OBJECT_BOUNDING_BOX_NV"/>
+        <enum value="0x908B" name="GL_CONVEX_HULL_NV"/>
+            <!-- <enum value="0x908C" name="GL_MULTI_HULLS_NV"                  comment="Removed from extension"/> -->
+        <enum value="0x908D" name="GL_BOUNDING_BOX_NV"/>
+        <enum value="0x908E" name="GL_TRANSLATE_X_NV"/>
+        <enum value="0x908F" name="GL_TRANSLATE_Y_NV"/>
+        <enum value="0x9090" name="GL_TRANSLATE_2D_NV"/>
+        <enum value="0x9091" name="GL_TRANSLATE_3D_NV"/>
+        <enum value="0x9092" name="GL_AFFINE_2D_NV"/>
+            <!-- <enum value="0x9093" name="GL_PROJECTIVE_2D_NV"                comment="Removed from extension"/> -->
+        <enum value="0x9094" name="GL_AFFINE_3D_NV"/>
+            <!-- <enum value="0x9095" name="GL_PROJECTIVE_3D_NV"                comment="Removed from extension"/> -->
+        <enum value="0x9096" name="GL_TRANSPOSE_AFFINE_2D_NV"/>
+            <!-- <enum value="0x9097" name="GL_TRANSPOSE_PROJECTIVE_2D_NV"      comment="Removed from extension"/> -->
+        <enum value="0x9098" name="GL_TRANSPOSE_AFFINE_3D_NV"/>
+            <!-- <enum value="0x9099" name="GL_TRANSPOSE_PROJECTIVE_3D_NV"      comment="Removed from extension"/> -->
+        <enum value="0x909A" name="GL_UTF8_NV"/>
+        <enum value="0x909B" name="GL_UTF16_NV"/>
+        <enum value="0x909C" name="GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV"/>
+        <enum value="0x909D" name="GL_PATH_COMMAND_COUNT_NV"/>
+        <enum value="0x909E" name="GL_PATH_COORD_COUNT_NV"/>
+        <enum value="0x909F" name="GL_PATH_DASH_ARRAY_COUNT_NV"/>
+        <enum value="0x90A0" name="GL_PATH_COMPUTED_LENGTH_NV"/>
+        <enum value="0x90A1" name="GL_PATH_FILL_BOUNDING_BOX_NV"/>
+        <enum value="0x90A2" name="GL_PATH_STROKE_BOUNDING_BOX_NV"/>
+        <enum value="0x90A3" name="GL_SQUARE_NV"/>
+        <enum value="0x90A4" name="GL_ROUND_NV"/>
+        <enum value="0x90A5" name="GL_TRIANGULAR_NV"/>
+        <enum value="0x90A6" name="GL_BEVEL_NV"/>
+        <enum value="0x90A7" name="GL_MITER_REVERT_NV"/>
+        <enum value="0x90A8" name="GL_MITER_TRUNCATE_NV"/>
+        <enum value="0x90A9" name="GL_SKIP_MISSING_GLYPH_NV"/>
+        <enum value="0x90AA" name="GL_USE_MISSING_GLYPH_NV"/>
+        <enum value="0x90AB" name="GL_PATH_ERROR_POSITION_NV"/>
+        <enum value="0x90AC" name="GL_PATH_FOG_GEN_MODE_NV"/>
+        <enum value="0x90AD" name="GL_ACCUM_ADJACENT_PAIRS_NV"/>
+        <enum value="0x90AE" name="GL_ADJACENT_PAIRS_NV"/>
+        <enum value="0x90AF" name="GL_FIRST_TO_REST_NV"/>
+        <enum value="0x90B0" name="GL_PATH_GEN_MODE_NV"/>
+        <enum value="0x90B1" name="GL_PATH_GEN_COEFF_NV"/>
+        <enum value="0x90B2" name="GL_PATH_GEN_COLOR_FORMAT_NV"/>
+        <enum value="0x90B3" name="GL_PATH_GEN_COMPONENTS_NV"/>
+        <enum value="0x90B4" name="GL_PATH_DASH_OFFSET_RESET_NV"/>
+        <enum value="0x90B5" name="GL_MOVE_TO_RESETS_NV"/>
+        <enum value="0x90B6" name="GL_MOVE_TO_CONTINUES_NV"/>
+        <enum value="0x90B7" name="GL_PATH_STENCIL_FUNC_NV"/>
+        <enum value="0x90B8" name="GL_PATH_STENCIL_REF_NV"/>
+        <enum value="0x90B9" name="GL_PATH_STENCIL_VALUE_MASK_NV"/>
+        <enum value="0x90BA" name="GL_SCALED_RESOLVE_FASTEST_EXT"/>
+        <enum value="0x90BB" name="GL_SCALED_RESOLVE_NICEST_EXT"/>
+        <enum value="0x90BC" name="GL_MIN_MAP_BUFFER_ALIGNMENT"/>
+        <enum value="0x90BD" name="GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV"/>
+        <enum value="0x90BE" name="GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV"/>
+        <enum value="0x90BF" name="GL_PATH_COVER_DEPTH_FUNC_NV"/>
+            <unused start="0x90C0" end="0x90C6" vendor="NV"/>
+        <enum value="0x90C7" name="GL_IMAGE_FORMAT_COMPATIBILITY_TYPE"/>
+        <enum value="0x90C8" name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE"/>
+        <enum value="0x90C9" name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS"/>
+        <enum value="0x90CA" name="GL_MAX_VERTEX_IMAGE_UNIFORMS"/>
+        <enum value="0x90CB" name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS"/>
+        <enum value="0x90CB" name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_EXT"/>
+        <enum value="0x90CB" name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_OES"/>
+        <enum value="0x90CC" name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS"/>
+        <enum value="0x90CC" name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_EXT"/>
+        <enum value="0x90CC" name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_OES"/>
+        <enum value="0x90CD" name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS"/>
+        <enum value="0x90CD" name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS_EXT"/>
+        <enum value="0x90CD" name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES"/>
+        <enum value="0x90CE" name="GL_MAX_FRAGMENT_IMAGE_UNIFORMS"/>
+        <enum value="0x90CF" name="GL_MAX_COMBINED_IMAGE_UNIFORMS"/>
+        <enum value="0x90D0" name="GL_MAX_DEEP_3D_TEXTURE_WIDTH_HEIGHT_NV"/>
+        <enum value="0x90D1" name="GL_MAX_DEEP_3D_TEXTURE_DEPTH_NV"/>
+        <enum value="0x90D2" name="GL_SHADER_STORAGE_BUFFER"/>
+        <enum value="0x90D3" name="GL_SHADER_STORAGE_BUFFER_BINDING"/>
+        <enum value="0x90D4" name="GL_SHADER_STORAGE_BUFFER_START"/>
+        <enum value="0x90D5" name="GL_SHADER_STORAGE_BUFFER_SIZE"/>
+        <enum value="0x90D6" name="GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS"/>
+        <enum value="0x90D7" name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS"/>
+        <enum value="0x90D7" name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_EXT"/>
+        <enum value="0x90D7" name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES"/>
+        <enum value="0x90D8" name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS"/>
+        <enum value="0x90D8" name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_EXT"/>
+        <enum value="0x90D8" name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_OES"/>
+        <enum value="0x90D9" name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS"/>
+        <enum value="0x90D9" name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_EXT"/>
+        <enum value="0x90D9" name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_OES"/>
+        <enum value="0x90DA" name="GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS"/>
+        <enum value="0x90DB" name="GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS"/>
+        <enum value="0x90DC" name="GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS"/>
+        <enum value="0x90DD" name="GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS"/>
+        <enum value="0x90DE" name="GL_MAX_SHADER_STORAGE_BLOCK_SIZE"/>
+        <enum value="0x90DF" name="GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT"/>
+            <unused start="0x90E0" vendor="NV"/>
+        <enum value="0x90E1" name="GL_SYNC_X11_FENCE_EXT"/>
+            <unused start="0x90E2" end="0x90E9" vendor="NV"/>
+        <enum value="0x90EA" name="GL_DEPTH_STENCIL_TEXTURE_MODE"/>
+        <enum value="0x90EB" name="GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS"/>
+        <enum value="0x90EB" name="GL_MAX_COMPUTE_FIXED_GROUP_INVOCATIONS_ARB" alias="GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS"/>
+        <enum value="0x90EC" name="GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER"/>
+        <enum value="0x90ED" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER"/>
+        <enum value="0x90EE" name="GL_DISPATCH_INDIRECT_BUFFER"/>
+        <enum value="0x90EF" name="GL_DISPATCH_INDIRECT_BUFFER_BINDING"/>
+        <enum value="0x90F0" name="GL_COLOR_ATTACHMENT_EXT"/>
+        <enum value="0x90F1" name="GL_MULTIVIEW_EXT"/>
+        <enum value="0x90F2" name="GL_MAX_MULTIVIEW_BUFFERS_EXT"/>
+        <enum value="0x90F3" name="GL_CONTEXT_ROBUST_ACCESS"/>
+        <enum value="0x90F3" name="GL_CONTEXT_ROBUST_ACCESS_EXT"/>
+        <enum value="0x90F3" name="GL_CONTEXT_ROBUST_ACCESS_KHR"/>
+            <unused start="0x90F4" end="0x90FA" vendor="NV"/>
+        <enum value="0x90FB" name="GL_COMPUTE_PROGRAM_NV"/>
+        <enum value="0x90FC" name="GL_COMPUTE_PROGRAM_PARAMETER_BUFFER_NV"/>
+            <unused start="0x90FD" end="0x90FF" vendor="NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9100" end="0x912F" vendor="ARB">
+        <enum value="0x9100" name="GL_TEXTURE_2D_MULTISAMPLE"/>
+        <enum value="0x9101" name="GL_PROXY_TEXTURE_2D_MULTISAMPLE"/>
+        <enum value="0x9102" name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x9102" name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES"/>
+        <enum value="0x9103" name="GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x9104" name="GL_TEXTURE_BINDING_2D_MULTISAMPLE"/>
+        <enum value="0x9105" name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x9105" name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES"/>
+        <enum value="0x9106" name="GL_TEXTURE_SAMPLES"/>
+        <enum value="0x9107" name="GL_TEXTURE_FIXED_SAMPLE_LOCATIONS"/>
+        <enum value="0x9108" name="GL_SAMPLER_2D_MULTISAMPLE"/>
+        <enum value="0x9109" name="GL_INT_SAMPLER_2D_MULTISAMPLE"/>
+        <enum value="0x910A" name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE"/>
+        <enum value="0x910B" name="GL_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x910B" name="GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES"/>
+        <enum value="0x910C" name="GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x910C" name="GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES"/>
+        <enum value="0x910D" name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+        <enum value="0x910D" name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES"/>
+        <enum value="0x910E" name="GL_MAX_COLOR_TEXTURE_SAMPLES"/>
+        <enum value="0x910F" name="GL_MAX_DEPTH_TEXTURE_SAMPLES"/>
+        <enum value="0x9110" name="GL_MAX_INTEGER_SAMPLES"/>
+        <enum value="0x9111" name="GL_MAX_SERVER_WAIT_TIMEOUT"/>
+        <enum value="0x9111" name="GL_MAX_SERVER_WAIT_TIMEOUT_APPLE"/>
+        <enum value="0x9112" name="GL_OBJECT_TYPE"/>
+        <enum value="0x9112" name="GL_OBJECT_TYPE_APPLE"/>
+        <enum value="0x9113" name="GL_SYNC_CONDITION"/>
+        <enum value="0x9113" name="GL_SYNC_CONDITION_APPLE"/>
+        <enum value="0x9114" name="GL_SYNC_STATUS"/>
+        <enum value="0x9114" name="GL_SYNC_STATUS_APPLE"/>
+        <enum value="0x9115" name="GL_SYNC_FLAGS"/>
+        <enum value="0x9115" name="GL_SYNC_FLAGS_APPLE"/>
+        <enum value="0x9116" name="GL_SYNC_FENCE"/>
+        <enum value="0x9116" name="GL_SYNC_FENCE_APPLE"/>
+        <enum value="0x9117" name="GL_SYNC_GPU_COMMANDS_COMPLETE"/>
+        <enum value="0x9117" name="GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE"/>
+        <enum value="0x9118" name="GL_UNSIGNALED"/>
+        <enum value="0x9118" name="GL_UNSIGNALED_APPLE"/>
+        <enum value="0x9119" name="GL_SIGNALED"/>
+        <enum value="0x9119" name="GL_SIGNALED_APPLE"/>
+        <enum value="0x911A" name="GL_ALREADY_SIGNALED"/>
+        <enum value="0x911A" name="GL_ALREADY_SIGNALED_APPLE"/>
+        <enum value="0x911B" name="GL_TIMEOUT_EXPIRED"/>
+        <enum value="0x911B" name="GL_TIMEOUT_EXPIRED_APPLE"/>
+        <enum value="0x911C" name="GL_CONDITION_SATISFIED"/>
+        <enum value="0x911C" name="GL_CONDITION_SATISFIED_APPLE"/>
+        <enum value="0x911D" name="GL_WAIT_FAILED"/>
+        <enum value="0x911D" name="GL_WAIT_FAILED_APPLE"/>
+        <enum value="0x911F" name="GL_BUFFER_ACCESS_FLAGS"/>
+        <enum value="0x9120" name="GL_BUFFER_MAP_LENGTH"/>
+        <enum value="0x9121" name="GL_BUFFER_MAP_OFFSET"/>
+        <enum value="0x9122" name="GL_MAX_VERTEX_OUTPUT_COMPONENTS"/>
+        <enum value="0x9123" name="GL_MAX_GEOMETRY_INPUT_COMPONENTS"/>
+        <enum value="0x9123" name="GL_MAX_GEOMETRY_INPUT_COMPONENTS_EXT"/>
+        <enum value="0x9123" name="GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES"/>
+        <enum value="0x9124" name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS"/>
+        <enum value="0x9124" name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_EXT"/>
+        <enum value="0x9124" name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES"/>
+        <enum value="0x9125" name="GL_MAX_FRAGMENT_INPUT_COMPONENTS"/>
+        <enum value="0x9126" name="GL_CONTEXT_PROFILE_MASK"/>
+        <enum value="0x9127" name="GL_UNPACK_COMPRESSED_BLOCK_WIDTH"/>
+        <enum value="0x9128" name="GL_UNPACK_COMPRESSED_BLOCK_HEIGHT"/>
+        <enum value="0x9129" name="GL_UNPACK_COMPRESSED_BLOCK_DEPTH"/>
+        <enum value="0x912A" name="GL_UNPACK_COMPRESSED_BLOCK_SIZE"/>
+        <enum value="0x912B" name="GL_PACK_COMPRESSED_BLOCK_WIDTH"/>
+        <enum value="0x912C" name="GL_PACK_COMPRESSED_BLOCK_HEIGHT"/>
+        <enum value="0x912D" name="GL_PACK_COMPRESSED_BLOCK_DEPTH"/>
+        <enum value="0x912E" name="GL_PACK_COMPRESSED_BLOCK_SIZE"/>
+        <enum value="0x912F" name="GL_TEXTURE_IMMUTABLE_FORMAT"/>
+        <enum value="0x912F" name="GL_TEXTURE_IMMUTABLE_FORMAT_EXT"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9130" end="0x913F" vendor="IMG" comment="Khronos bug 882">
+        <enum value="0x9130" name="GL_SGX_PROGRAM_BINARY_IMG"/>
+            <unused start="0x9131" end="0x9132" vendor="IMG"/>
+        <enum value="0x9133" name="GL_RENDERBUFFER_SAMPLES_IMG"/>
+        <enum value="0x9134" name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG"/>
+        <enum value="0x9135" name="GL_MAX_SAMPLES_IMG"/>
+        <enum value="0x9136" name="GL_TEXTURE_SAMPLES_IMG"/>
+        <enum value="0x9137" name="GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG"/>
+        <enum value="0x9138" name="GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG"/>
+        <enum value="0x9139" name="GL_CUBIC_IMG"/>
+        <enum value="0x913A" name="GL_CUBIC_MIPMAP_NEAREST_IMG"/>
+        <enum value="0x913B" name="GL_CUBIC_MIPMAP_LINEAR_IMG"/>
+        <enum value="0x913C" name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_AND_DOWNSAMPLE_IMG"/>
+        <enum value="0x913D" name="GL_NUM_DOWNSAMPLE_SCALES_IMG"/>
+        <enum value="0x913E" name="GL_DOWNSAMPLE_SCALES_IMG"/>
+        <enum value="0x913F" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SCALE_IMG"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9140" end="0x923F" vendor="AMD" comment="Khronos bugs 5899, 6004">
+            <unused start="0x9140" end="0x9142" vendor="AMD"/>
+        <enum value="0x9143" name="GL_MAX_DEBUG_MESSAGE_LENGTH"/>
+        <enum value="0x9143" name="GL_MAX_DEBUG_MESSAGE_LENGTH_AMD"/>
+        <enum value="0x9143" name="GL_MAX_DEBUG_MESSAGE_LENGTH_ARB"/>
+        <enum value="0x9143" name="GL_MAX_DEBUG_MESSAGE_LENGTH_KHR"/>
+        <enum value="0x9144" name="GL_MAX_DEBUG_LOGGED_MESSAGES"/>
+        <enum value="0x9144" name="GL_MAX_DEBUG_LOGGED_MESSAGES_AMD"/>
+        <enum value="0x9144" name="GL_MAX_DEBUG_LOGGED_MESSAGES_ARB"/>
+        <enum value="0x9144" name="GL_MAX_DEBUG_LOGGED_MESSAGES_KHR"/>
+        <enum value="0x9145" name="GL_DEBUG_LOGGED_MESSAGES"/>
+        <enum value="0x9145" name="GL_DEBUG_LOGGED_MESSAGES_AMD"/>
+        <enum value="0x9145" name="GL_DEBUG_LOGGED_MESSAGES_ARB"/>
+        <enum value="0x9145" name="GL_DEBUG_LOGGED_MESSAGES_KHR"/>
+        <enum value="0x9146" name="GL_DEBUG_SEVERITY_HIGH"/>
+        <enum value="0x9146" name="GL_DEBUG_SEVERITY_HIGH_AMD"/>
+        <enum value="0x9146" name="GL_DEBUG_SEVERITY_HIGH_ARB"/>
+        <enum value="0x9146" name="GL_DEBUG_SEVERITY_HIGH_KHR"/>
+        <enum value="0x9147" name="GL_DEBUG_SEVERITY_MEDIUM"/>
+        <enum value="0x9147" name="GL_DEBUG_SEVERITY_MEDIUM_AMD"/>
+        <enum value="0x9147" name="GL_DEBUG_SEVERITY_MEDIUM_ARB"/>
+        <enum value="0x9147" name="GL_DEBUG_SEVERITY_MEDIUM_KHR"/>
+        <enum value="0x9148" name="GL_DEBUG_SEVERITY_LOW"/>
+        <enum value="0x9148" name="GL_DEBUG_SEVERITY_LOW_AMD"/>
+        <enum value="0x9148" name="GL_DEBUG_SEVERITY_LOW_ARB"/>
+        <enum value="0x9148" name="GL_DEBUG_SEVERITY_LOW_KHR"/>
+        <enum value="0x9149" name="GL_DEBUG_CATEGORY_API_ERROR_AMD"/>
+        <enum value="0x914A" name="GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD"/>
+        <enum value="0x914B" name="GL_DEBUG_CATEGORY_DEPRECATION_AMD"/>
+        <enum value="0x914C" name="GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD"/>
+        <enum value="0x914D" name="GL_DEBUG_CATEGORY_PERFORMANCE_AMD"/>
+        <enum value="0x914E" name="GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD"/>
+        <enum value="0x914F" name="GL_DEBUG_CATEGORY_APPLICATION_AMD"/>
+        <enum value="0x9150" name="GL_DEBUG_CATEGORY_OTHER_AMD"/>
+        <enum value="0x9151" name="GL_BUFFER_OBJECT_EXT"/>
+        <enum value="0x9151" name="GL_DATA_BUFFER_AMD"/>
+        <enum value="0x9152" name="GL_PERFORMANCE_MONITOR_AMD"/>
+        <enum value="0x9153" name="GL_QUERY_OBJECT_AMD"/>
+        <enum value="0x9153" name="GL_QUERY_OBJECT_EXT"/>
+        <enum value="0x9154" name="GL_VERTEX_ARRAY_OBJECT_AMD"/>
+        <enum value="0x9154" name="GL_VERTEX_ARRAY_OBJECT_EXT"/>
+        <enum value="0x9155" name="GL_SAMPLER_OBJECT_AMD"/>
+            <unused start="0x9156" end="0x915F" vendor="AMD"/>
+        <enum value="0x9160" name="GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD"/>
+            <unused start="0x9161" vendor="AMD"/>
+        <enum value="0x9192" name="GL_QUERY_BUFFER"/>
+        <enum value="0x9192" name="GL_QUERY_BUFFER_AMD"/>
+        <enum value="0x9193" name="GL_QUERY_BUFFER_BINDING"/>
+        <enum value="0x9193" name="GL_QUERY_BUFFER_BINDING_AMD"/>
+        <enum value="0x9194" name="GL_QUERY_RESULT_NO_WAIT"/>
+        <enum value="0x9194" name="GL_QUERY_RESULT_NO_WAIT_AMD"/>
+        <enum value="0x9195" name="GL_VIRTUAL_PAGE_SIZE_X_ARB"/>
+        <enum value="0x9195" name="GL_VIRTUAL_PAGE_SIZE_X_EXT"/>
+        <enum value="0x9195" name="GL_VIRTUAL_PAGE_SIZE_X_AMD"/>
+        <enum value="0x9196" name="GL_VIRTUAL_PAGE_SIZE_Y_ARB"/>
+        <enum value="0x9196" name="GL_VIRTUAL_PAGE_SIZE_Y_EXT"/>
+        <enum value="0x9196" name="GL_VIRTUAL_PAGE_SIZE_Y_AMD"/>
+        <enum value="0x9197" name="GL_VIRTUAL_PAGE_SIZE_Z_ARB"/>
+        <enum value="0x9197" name="GL_VIRTUAL_PAGE_SIZE_Z_EXT"/>
+        <enum value="0x9197" name="GL_VIRTUAL_PAGE_SIZE_Z_AMD"/>
+        <enum value="0x9198" name="GL_MAX_SPARSE_TEXTURE_SIZE_ARB"/>
+        <enum value="0x9198" name="GL_MAX_SPARSE_TEXTURE_SIZE_EXT"/>
+        <enum value="0x9198" name="GL_MAX_SPARSE_TEXTURE_SIZE_AMD"/>
+        <enum value="0x9199" name="GL_MAX_SPARSE_3D_TEXTURE_SIZE_ARB"/>
+        <enum value="0x9199" name="GL_MAX_SPARSE_3D_TEXTURE_SIZE_EXT"/>
+        <enum value="0x9199" name="GL_MAX_SPARSE_3D_TEXTURE_SIZE_AMD"/>
+        <enum value="0x919A" name="GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS"/>
+        <enum value="0x919A" name="GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_ARB"/>
+        <enum value="0x919A" name="GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_EXT"/>
+        <enum value="0x919B" name="GL_MIN_SPARSE_LEVEL_AMD"/>
+        <enum value="0x919C" name="GL_MIN_LOD_WARNING_AMD"/>
+        <enum value="0x919D" name="GL_TEXTURE_BUFFER_OFFSET"/>
+        <enum value="0x919D" name="GL_TEXTURE_BUFFER_OFFSET_EXT"/>
+        <enum value="0x919D" name="GL_TEXTURE_BUFFER_OFFSET_OES"/>
+        <enum value="0x919E" name="GL_TEXTURE_BUFFER_SIZE"/>
+        <enum value="0x919E" name="GL_TEXTURE_BUFFER_SIZE_EXT"/>
+        <enum value="0x919E" name="GL_TEXTURE_BUFFER_SIZE_OES"/>
+        <enum value="0x919F" name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT"/>
+        <enum value="0x919F" name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_EXT"/>
+        <enum value="0x919F" name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_OES"/>
+        <enum value="0x91A0" name="GL_STREAM_RASTERIZATION_AMD"/>
+            <unused start="0x91A1" end="0x91A3" vendor="AMD"/>
+        <enum value="0x91A4" name="GL_VERTEX_ELEMENT_SWIZZLE_AMD"/>
+        <enum value="0x91A5" name="GL_VERTEX_ID_SWIZZLE_AMD"/>
+        <enum value="0x91A6" name="GL_TEXTURE_SPARSE_ARB"/>
+        <enum value="0x91A6" name="GL_TEXTURE_SPARSE_EXT"/>
+        <enum value="0x91A7" name="GL_VIRTUAL_PAGE_SIZE_INDEX_ARB"/>
+        <enum value="0x91A7" name="GL_VIRTUAL_PAGE_SIZE_INDEX_EXT"/>
+        <enum value="0x91A8" name="GL_NUM_VIRTUAL_PAGE_SIZES_ARB"/>
+        <enum value="0x91A8" name="GL_NUM_VIRTUAL_PAGE_SIZES_EXT"/>
+        <enum value="0x91A9" name="GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_ARB"/>
+        <enum value="0x91A9" name="GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_EXT"/>
+        <enum value="0x91AA" name="GL_NUM_SPARSE_LEVELS_ARB"/>
+        <enum value="0x91AA" name="GL_NUM_SPARSE_LEVELS_EXT"/>
+            <unused start="0x91AB" end="0x91AD" vendor="AMD"/>
+        <enum value="0x91AE" name="GL_PIXELS_PER_SAMPLE_PATTERN_X_AMD"/>
+        <enum value="0x91AF" name="GL_PIXELS_PER_SAMPLE_PATTERN_Y_AMD"/>
+        <enum value="0x91B0" name="GL_MAX_SHADER_COMPILER_THREADS_KHR"/>
+        <enum value="0x91B0" name="GL_MAX_SHADER_COMPILER_THREADS_ARB" alias="GL_MAX_SHADER_COMPILER_THREADS_KHR"/>
+        <enum value="0x91B1" name="GL_COMPLETION_STATUS_KHR"/>
+        <enum value="0x91B1" name="GL_COMPLETION_STATUS_ARB" alias="GL_COMPLETION_STATUS_KHR"/>
+        <enum value="0x91B2" name="GL_RENDERBUFFER_STORAGE_SAMPLES_AMD"/>
+        <enum value="0x91B3" name="GL_MAX_COLOR_FRAMEBUFFER_SAMPLES_AMD"/>
+        <enum value="0x91B4" name="GL_MAX_COLOR_FRAMEBUFFER_STORAGE_SAMPLES_AMD"/>
+        <enum value="0x91B5" name="GL_MAX_DEPTH_STENCIL_FRAMEBUFFER_SAMPLES_AMD"/>
+        <enum value="0x91B6" name="GL_NUM_SUPPORTED_MULTISAMPLE_MODES_AMD"/>
+        <enum value="0x91B7" name="GL_SUPPORTED_MULTISAMPLE_MODES_AMD"/>
+            <unused start="0x91B8" end="0x91B8" vendor="AMD"/>
+        <enum value="0x91B9" name="GL_COMPUTE_SHADER"/>
+            <unused start="0x91BA" vendor="AMD"/>
+        <enum value="0x91BB" name="GL_MAX_COMPUTE_UNIFORM_BLOCKS"/>
+        <enum value="0x91BC" name="GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS"/>
+        <enum value="0x91BD" name="GL_MAX_COMPUTE_IMAGE_UNIFORMS"/>
+        <enum value="0x91BE" name="GL_MAX_COMPUTE_WORK_GROUP_COUNT"/>
+        <enum value="0x91BF" name="GL_MAX_COMPUTE_WORK_GROUP_SIZE"/>
+        <enum value="0x91BF" name="GL_MAX_COMPUTE_FIXED_GROUP_SIZE_ARB" alias="GL_MAX_COMPUTE_WORK_GROUP_SIZE"/>
+            <unused start="0x91C0" end="0x91C4" vendor="AMD"/>
+        <enum value="0x91C5" name="GL_FLOAT16_MAT2_AMD"/>
+        <enum value="0x91C6" name="GL_FLOAT16_MAT3_AMD"/>
+        <enum value="0x91C7" name="GL_FLOAT16_MAT4_AMD"/>
+        <enum value="0x91C8" name="GL_FLOAT16_MAT2x3_AMD"/>
+        <enum value="0x91C9" name="GL_FLOAT16_MAT2x4_AMD"/>
+        <enum value="0x91CA" name="GL_FLOAT16_MAT3x2_AMD"/>
+        <enum value="0x91CB" name="GL_FLOAT16_MAT3x4_AMD"/>
+        <enum value="0x91CC" name="GL_FLOAT16_MAT4x2_AMD"/>
+        <enum value="0x91CD" name="GL_FLOAT16_MAT4x3_AMD"/>
+            <unused start="0x91CE" end="0x923F" vendor="AMD"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9240" end="0x924F" vendor="WEBGL" comment="Khronos bug 6473,6884">
+        <enum value="0x9240" name="GL_UNPACK_FLIP_Y_WEBGL"/>
+        <enum value="0x9241" name="GL_UNPACK_PREMULTIPLY_ALPHA_WEBGL"/>
+        <enum value="0x9242" name="GL_CONTEXT_LOST_WEBGL"/>
+        <enum value="0x9243" name="GL_UNPACK_COLORSPACE_CONVERSION_WEBGL"/>
+        <enum value="0x9244" name="GL_BROWSER_DEFAULT_WEBGL"/>
+            <unused start="0x9245" end="0x924F" vendor="WEBGL"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9250" end="0x925F" vendor="DMP" comment="For Eisaku Ohbuchi via email">
+        <enum value="0x9250" name="GL_SHADER_BINARY_DMP"/>
+        <enum value="0x9251" name="GL_SMAPHS30_PROGRAM_BINARY_DMP"/>
+        <enum value="0x9252" name="GL_SMAPHS_PROGRAM_BINARY_DMP"/>
+        <enum value="0x9253" name="GL_DMP_PROGRAM_BINARY_DMP"/>
+            <unused start="0x9254" end="0x925F" vendor="DMP"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9260" end="0x926F" vendor="FJ" comment="Khronos bug 7486">
+        <enum value="0x9260" name="GL_GCCSO_SHADER_BINARY_FJ"/>
+            <unused start="0x9261" end="0x926F" vendor="FJ"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9270" end="0x927F" vendor="OES" comment="Khronos bug 7625">
+        <enum value="0x9270" name="GL_COMPRESSED_R11_EAC"/>
+        <enum value="0x9270" name="GL_COMPRESSED_R11_EAC_OES"/>
+        <enum value="0x9271" name="GL_COMPRESSED_SIGNED_R11_EAC"/>
+        <enum value="0x9271" name="GL_COMPRESSED_SIGNED_R11_EAC_OES"/>
+        <enum value="0x9272" name="GL_COMPRESSED_RG11_EAC"/>
+        <enum value="0x9272" name="GL_COMPRESSED_RG11_EAC_OES"/>
+        <enum value="0x9273" name="GL_COMPRESSED_SIGNED_RG11_EAC"/>
+        <enum value="0x9273" name="GL_COMPRESSED_SIGNED_RG11_EAC_OES"/>
+        <enum value="0x9274" name="GL_COMPRESSED_RGB8_ETC2"/>
+        <enum value="0x9274" name="GL_COMPRESSED_RGB8_ETC2_OES"/>
+        <enum value="0x9275" name="GL_COMPRESSED_SRGB8_ETC2"/>
+        <enum value="0x9275" name="GL_COMPRESSED_SRGB8_ETC2_OES"/>
+        <enum value="0x9276" name="GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+        <enum value="0x9276" name="GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2_OES"/>
+        <enum value="0x9277" name="GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+        <enum value="0x9277" name="GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2_OES"/>
+        <enum value="0x9278" name="GL_COMPRESSED_RGBA8_ETC2_EAC"/>
+        <enum value="0x9278" name="GL_COMPRESSED_RGBA8_ETC2_EAC_OES"/>
+        <enum value="0x9279" name="GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC"/>
+        <enum value="0x9279" name="GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC_OES"/>
+            <unused start="0x927A" end="0x927F" vendor="OES"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9280" end="0x937F" vendor="NV" comment="Khronos bug 7658">
+        <enum value="0x9280" name="GL_BLEND_PREMULTIPLIED_SRC_NV"/>
+        <enum value="0x9281" name="GL_BLEND_OVERLAP_NV"/>
+        <enum value="0x9282" name="GL_UNCORRELATED_NV"/>
+        <enum value="0x9283" name="GL_DISJOINT_NV"/>
+        <enum value="0x9284" name="GL_CONJOINT_NV"/>
+        <enum value="0x9285" name="GL_BLEND_ADVANCED_COHERENT_KHR"/>
+        <enum value="0x9285" name="GL_BLEND_ADVANCED_COHERENT_NV"/>
+        <enum value="0x9286" name="GL_SRC_NV"/>
+        <enum value="0x9287" name="GL_DST_NV"/>
+        <enum value="0x9288" name="GL_SRC_OVER_NV"/>
+        <enum value="0x9289" name="GL_DST_OVER_NV"/>
+        <enum value="0x928A" name="GL_SRC_IN_NV"/>
+        <enum value="0x928B" name="GL_DST_IN_NV"/>
+        <enum value="0x928C" name="GL_SRC_OUT_NV"/>
+        <enum value="0x928D" name="GL_DST_OUT_NV"/>
+        <enum value="0x928E" name="GL_SRC_ATOP_NV"/>
+        <enum value="0x928F" name="GL_DST_ATOP_NV"/>
+            <unused start="0x9290" vendor="NV"/>
+        <enum value="0x9291" name="GL_PLUS_NV"/>
+        <enum value="0x9292" name="GL_PLUS_DARKER_NV"/>
+            <unused start="0x9293" vendor="NV"/>
+        <enum value="0x9294" name="GL_MULTIPLY"/>
+        <enum value="0x9294" name="GL_MULTIPLY_KHR"/>
+        <enum value="0x9294" name="GL_MULTIPLY_NV"/>
+        <enum value="0x9295" name="GL_SCREEN"/>
+        <enum value="0x9295" name="GL_SCREEN_KHR"/>
+        <enum value="0x9295" name="GL_SCREEN_NV"/>
+        <enum value="0x9296" name="GL_OVERLAY"/>
+        <enum value="0x9296" name="GL_OVERLAY_KHR"/>
+        <enum value="0x9296" name="GL_OVERLAY_NV"/>
+        <enum value="0x9297" name="GL_DARKEN"/>
+        <enum value="0x9297" name="GL_DARKEN_KHR"/>
+        <enum value="0x9297" name="GL_DARKEN_NV"/>
+        <enum value="0x9298" name="GL_LIGHTEN"/>
+        <enum value="0x9298" name="GL_LIGHTEN_KHR"/>
+        <enum value="0x9298" name="GL_LIGHTEN_NV"/>
+        <enum value="0x9299" name="GL_COLORDODGE"/>
+        <enum value="0x9299" name="GL_COLORDODGE_KHR"/>
+        <enum value="0x9299" name="GL_COLORDODGE_NV"/>
+        <enum value="0x929A" name="GL_COLORBURN"/>
+        <enum value="0x929A" name="GL_COLORBURN_KHR"/>
+        <enum value="0x929A" name="GL_COLORBURN_NV"/>
+        <enum value="0x929B" name="GL_HARDLIGHT"/>
+        <enum value="0x929B" name="GL_HARDLIGHT_KHR"/>
+        <enum value="0x929B" name="GL_HARDLIGHT_NV"/>
+        <enum value="0x929C" name="GL_SOFTLIGHT"/>
+        <enum value="0x929C" name="GL_SOFTLIGHT_KHR"/>
+        <enum value="0x929C" name="GL_SOFTLIGHT_NV"/>
+            <unused start="0x929D" vendor="NV"/>
+        <enum value="0x929E" name="GL_DIFFERENCE"/>
+        <enum value="0x929E" name="GL_DIFFERENCE_KHR"/>
+        <enum value="0x929E" name="GL_DIFFERENCE_NV"/>
+        <enum value="0x929F" name="GL_MINUS_NV"/>
+        <enum value="0x92A0" name="GL_EXCLUSION"/>
+        <enum value="0x92A0" name="GL_EXCLUSION_KHR"/>
+        <enum value="0x92A0" name="GL_EXCLUSION_NV"/>
+        <enum value="0x92A1" name="GL_CONTRAST_NV"/>
+            <unused start="0x92A2" vendor="NV"/>
+        <enum value="0x92A3" name="GL_INVERT_RGB_NV"/>
+        <enum value="0x92A4" name="GL_LINEARDODGE_NV"/>
+        <enum value="0x92A5" name="GL_LINEARBURN_NV"/>
+        <enum value="0x92A6" name="GL_VIVIDLIGHT_NV"/>
+        <enum value="0x92A7" name="GL_LINEARLIGHT_NV"/>
+        <enum value="0x92A8" name="GL_PINLIGHT_NV"/>
+        <enum value="0x92A9" name="GL_HARDMIX_NV"/>
+            <unused start="0x92AA" end="0x92AC" vendor="NV"/>
+        <enum value="0x92AD" name="GL_HSL_HUE"/>
+        <enum value="0x92AD" name="GL_HSL_HUE_KHR"/>
+        <enum value="0x92AD" name="GL_HSL_HUE_NV"/>
+        <enum value="0x92AE" name="GL_HSL_SATURATION"/>
+        <enum value="0x92AE" name="GL_HSL_SATURATION_KHR"/>
+        <enum value="0x92AE" name="GL_HSL_SATURATION_NV"/>
+        <enum value="0x92AF" name="GL_HSL_COLOR"/>
+        <enum value="0x92AF" name="GL_HSL_COLOR_KHR"/>
+        <enum value="0x92AF" name="GL_HSL_COLOR_NV"/>
+        <enum value="0x92B0" name="GL_HSL_LUMINOSITY"/>
+        <enum value="0x92B0" name="GL_HSL_LUMINOSITY_KHR"/>
+        <enum value="0x92B0" name="GL_HSL_LUMINOSITY_NV"/>
+        <enum value="0x92B1" name="GL_PLUS_CLAMPED_NV"/>
+        <enum value="0x92B2" name="GL_PLUS_CLAMPED_ALPHA_NV"/>
+        <enum value="0x92B3" name="GL_MINUS_CLAMPED_NV"/>
+        <enum value="0x92B4" name="GL_INVERT_OVG_NV"/>
+            <unused start="0x92B5" end="0x92B9" vendor="NV"/>
+        <enum value="0x92BA" name="GL_MAX_LGPU_GPUS_NVX"/>
+        <enum value="0x92BA" name="GL_MULTICAST_GPUS_NV"/>
+        <enum value="0x92BB" name="GL_PURGED_CONTEXT_RESET_NV"/>
+            <unused start="0x92BC" end="0x92BD" vendor="NV"/>
+        <enum value="0x92BE" name="GL_PRIMITIVE_BOUNDING_BOX_ARB"/>
+        <enum value="0x92BE" name="GL_PRIMITIVE_BOUNDING_BOX"/>
+        <enum value="0x92BE" name="GL_PRIMITIVE_BOUNDING_BOX_EXT"/>
+        <enum value="0x92BE" name="GL_PRIMITIVE_BOUNDING_BOX_OES"/>
+        <enum value="0x92BF" name="GL_ALPHA_TO_COVERAGE_DITHER_MODE_NV"/>
+        <enum value="0x92C0" name="GL_ATOMIC_COUNTER_BUFFER"/>
+        <enum value="0x92C1" name="GL_ATOMIC_COUNTER_BUFFER_BINDING"/>
+        <enum value="0x92C2" name="GL_ATOMIC_COUNTER_BUFFER_START"/>
+        <enum value="0x92C3" name="GL_ATOMIC_COUNTER_BUFFER_SIZE"/>
+        <enum value="0x92C4" name="GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE"/>
+        <enum value="0x92C5" name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS"/>
+        <enum value="0x92C6" name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES"/>
+        <enum value="0x92C7" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER"/>
+        <enum value="0x92C8" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+        <enum value="0x92C9" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+        <enum value="0x92CA" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER"/>
+        <enum value="0x92CB" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER"/>
+        <enum value="0x92CC" name="GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x92CD" name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x92CD" name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_EXT"/>
+        <enum value="0x92CD" name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_OES"/>
+        <enum value="0x92CE" name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x92CE" name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_EXT"/>
+        <enum value="0x92CE" name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_OES"/>
+        <enum value="0x92CF" name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x92CF" name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_EXT"/>
+        <enum value="0x92CF" name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES"/>
+        <enum value="0x92D0" name="GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x92D1" name="GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x92D2" name="GL_MAX_VERTEX_ATOMIC_COUNTERS"/>
+        <enum value="0x92D3" name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS"/>
+        <enum value="0x92D3" name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_EXT"/>
+        <enum value="0x92D3" name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_OES"/>
+        <enum value="0x92D4" name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS"/>
+        <enum value="0x92D4" name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_EXT"/>
+        <enum value="0x92D4" name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_OES"/>
+        <enum value="0x92D5" name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS"/>
+        <enum value="0x92D5" name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS_EXT"/>
+        <enum value="0x92D5" name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES"/>
+        <enum value="0x92D6" name="GL_MAX_FRAGMENT_ATOMIC_COUNTERS"/>
+        <enum value="0x92D7" name="GL_MAX_COMBINED_ATOMIC_COUNTERS"/>
+        <enum value="0x92D8" name="GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE"/>
+        <enum value="0x92D9" name="GL_ACTIVE_ATOMIC_COUNTER_BUFFERS"/>
+        <enum value="0x92DA" name="GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX"/>
+        <enum value="0x92DB" name="GL_UNSIGNED_INT_ATOMIC_COUNTER"/>
+        <enum value="0x92DC" name="GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS"/>
+        <enum value="0x92DD" name="GL_FRAGMENT_COVERAGE_TO_COLOR_NV"/>
+        <enum value="0x92DE" name="GL_FRAGMENT_COVERAGE_COLOR_NV"/>
+        <enum value="0x92DF" name="GL_MESH_OUTPUT_PER_VERTEX_GRANULARITY_NV"/>
+        <enum value="0x92E0" name="GL_DEBUG_OUTPUT"/>
+        <enum value="0x92E0" name="GL_DEBUG_OUTPUT_KHR"/>
+        <enum value="0x92E1" name="GL_UNIFORM"/>
+        <enum value="0x92E2" name="GL_UNIFORM_BLOCK"/>
+        <enum value="0x92E3" name="GL_PROGRAM_INPUT"/>
+        <enum value="0x92E4" name="GL_PROGRAM_OUTPUT"/>
+        <enum value="0x92E5" name="GL_BUFFER_VARIABLE"/>
+        <enum value="0x92E6" name="GL_SHADER_STORAGE_BLOCK"/>
+        <enum value="0x92E7" name="GL_IS_PER_PATCH"/>
+        <enum value="0x92E7" name="GL_IS_PER_PATCH_EXT"/>
+        <enum value="0x92E7" name="GL_IS_PER_PATCH_OES"/>
+        <enum value="0x92E8" name="GL_VERTEX_SUBROUTINE"/>
+        <enum value="0x92E9" name="GL_TESS_CONTROL_SUBROUTINE"/>
+        <enum value="0x92EA" name="GL_TESS_EVALUATION_SUBROUTINE"/>
+        <enum value="0x92EB" name="GL_GEOMETRY_SUBROUTINE"/>
+        <enum value="0x92EC" name="GL_FRAGMENT_SUBROUTINE"/>
+        <enum value="0x92ED" name="GL_COMPUTE_SUBROUTINE"/>
+        <enum value="0x92EE" name="GL_VERTEX_SUBROUTINE_UNIFORM"/>
+        <enum value="0x92EF" name="GL_TESS_CONTROL_SUBROUTINE_UNIFORM"/>
+        <enum value="0x92F0" name="GL_TESS_EVALUATION_SUBROUTINE_UNIFORM"/>
+        <enum value="0x92F1" name="GL_GEOMETRY_SUBROUTINE_UNIFORM"/>
+        <enum value="0x92F2" name="GL_FRAGMENT_SUBROUTINE_UNIFORM"/>
+        <enum value="0x92F3" name="GL_COMPUTE_SUBROUTINE_UNIFORM"/>
+        <enum value="0x92F4" name="GL_TRANSFORM_FEEDBACK_VARYING"/>
+        <enum value="0x92F5" name="GL_ACTIVE_RESOURCES"/>
+        <enum value="0x92F6" name="GL_MAX_NAME_LENGTH"/>
+        <enum value="0x92F7" name="GL_MAX_NUM_ACTIVE_VARIABLES"/>
+        <enum value="0x92F8" name="GL_MAX_NUM_COMPATIBLE_SUBROUTINES"/>
+        <enum value="0x92F9" name="GL_NAME_LENGTH"/>
+        <enum value="0x92FA" name="GL_TYPE"/>
+        <enum value="0x92FB" name="GL_ARRAY_SIZE"/>
+        <enum value="0x92FC" name="GL_OFFSET"/>
+        <enum value="0x92FD" name="GL_BLOCK_INDEX"/>
+        <enum value="0x92FE" name="GL_ARRAY_STRIDE"/>
+        <enum value="0x92FF" name="GL_MATRIX_STRIDE"/>
+        <enum value="0x9300" name="GL_IS_ROW_MAJOR"/>
+        <enum value="0x9301" name="GL_ATOMIC_COUNTER_BUFFER_INDEX"/>
+        <enum value="0x9302" name="GL_BUFFER_BINDING"/>
+        <enum value="0x9303" name="GL_BUFFER_DATA_SIZE"/>
+        <enum value="0x9304" name="GL_NUM_ACTIVE_VARIABLES"/>
+        <enum value="0x9305" name="GL_ACTIVE_VARIABLES"/>
+        <enum value="0x9306" name="GL_REFERENCED_BY_VERTEX_SHADER"/>
+        <enum value="0x9307" name="GL_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+        <enum value="0x9307" name="GL_REFERENCED_BY_TESS_CONTROL_SHADER_EXT"/>
+        <enum value="0x9307" name="GL_REFERENCED_BY_TESS_CONTROL_SHADER_OES"/>
+        <enum value="0x9308" name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+        <enum value="0x9308" name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER_EXT"/>
+        <enum value="0x9308" name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER_OES"/>
+        <enum value="0x9309" name="GL_REFERENCED_BY_GEOMETRY_SHADER"/>
+        <enum value="0x9309" name="GL_REFERENCED_BY_GEOMETRY_SHADER_EXT"/>
+        <enum value="0x9309" name="GL_REFERENCED_BY_GEOMETRY_SHADER_OES"/>
+        <enum value="0x930A" name="GL_REFERENCED_BY_FRAGMENT_SHADER"/>
+        <enum value="0x930B" name="GL_REFERENCED_BY_COMPUTE_SHADER"/>
+        <enum value="0x930C" name="GL_TOP_LEVEL_ARRAY_SIZE"/>
+        <enum value="0x930D" name="GL_TOP_LEVEL_ARRAY_STRIDE"/>
+        <enum value="0x930E" name="GL_LOCATION"/>
+        <enum value="0x930F" name="GL_LOCATION_INDEX"/>
+        <enum value="0x930F" name="GL_LOCATION_INDEX_EXT"/>
+        <enum value="0x9310" name="GL_FRAMEBUFFER_DEFAULT_WIDTH"/>
+        <enum value="0x9311" name="GL_FRAMEBUFFER_DEFAULT_HEIGHT"/>
+        <enum value="0x9312" name="GL_FRAMEBUFFER_DEFAULT_LAYERS"/>
+        <enum value="0x9312" name="GL_FRAMEBUFFER_DEFAULT_LAYERS_EXT"/>
+        <enum value="0x9312" name="GL_FRAMEBUFFER_DEFAULT_LAYERS_OES"/>
+        <enum value="0x9313" name="GL_FRAMEBUFFER_DEFAULT_SAMPLES"/>
+        <enum value="0x9314" name="GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS"/>
+        <enum value="0x9315" name="GL_MAX_FRAMEBUFFER_WIDTH"/>
+        <enum value="0x9316" name="GL_MAX_FRAMEBUFFER_HEIGHT"/>
+        <enum value="0x9317" name="GL_MAX_FRAMEBUFFER_LAYERS"/>
+        <enum value="0x9317" name="GL_MAX_FRAMEBUFFER_LAYERS_EXT"/>
+        <enum value="0x9317" name="GL_MAX_FRAMEBUFFER_LAYERS_OES"/>
+        <enum value="0x9318" name="GL_MAX_FRAMEBUFFER_SAMPLES"/>
+            <unused start="0x9319" end="0x9326" vendor="NV"/>
+        <enum value="0x9327" name="GL_RASTER_MULTISAMPLE_EXT"/>
+        <enum value="0x9328" name="GL_RASTER_SAMPLES_EXT"/>
+        <enum value="0x9329" name="GL_MAX_RASTER_SAMPLES_EXT"/>
+        <enum value="0x932A" name="GL_RASTER_FIXED_SAMPLE_LOCATIONS_EXT"/>
+        <enum value="0x932B" name="GL_MULTISAMPLE_RASTERIZATION_ALLOWED_EXT"/>
+        <enum value="0x932C" name="GL_EFFECTIVE_RASTER_SAMPLES_EXT"/>
+        <enum value="0x932D" name="GL_DEPTH_SAMPLES_NV"/>
+        <enum value="0x932E" name="GL_STENCIL_SAMPLES_NV"/>
+        <enum value="0x932F" name="GL_MIXED_DEPTH_SAMPLES_SUPPORTED_NV"/>
+        <enum value="0x9330" name="GL_MIXED_STENCIL_SAMPLES_SUPPORTED_NV"/>
+        <enum value="0x9331" name="GL_COVERAGE_MODULATION_TABLE_NV"/>
+        <enum value="0x9332" name="GL_COVERAGE_MODULATION_NV"/>
+        <enum value="0x9333" name="GL_COVERAGE_MODULATION_TABLE_SIZE_NV"/>
+            <unused start="0x9334" end="0x9338" vendor="NV"/>
+        <enum value="0x9339" name="GL_WARP_SIZE_NV"/>
+        <enum value="0x933A" name="GL_WARPS_PER_SM_NV"/>
+        <enum value="0x933B" name="GL_SM_COUNT_NV"/>
+        <enum value="0x933C" name="GL_FILL_RECTANGLE_NV"/>
+        <enum value="0x933D" name="GL_SAMPLE_LOCATION_SUBPIXEL_BITS_ARB"/>
+        <enum value="0x933D" name="GL_SAMPLE_LOCATION_SUBPIXEL_BITS_NV"/>
+        <enum value="0x933E" name="GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_ARB"/>
+        <enum value="0x933E" name="GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_NV"/>
+        <enum value="0x933F" name="GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_ARB"/>
+        <enum value="0x933F" name="GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_NV"/>
+        <enum value="0x9340" name="GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_ARB"/>
+        <enum value="0x9340" name="GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_NV"/>
+        <enum value="0x9341" name="GL_PROGRAMMABLE_SAMPLE_LOCATION_ARB"/>
+        <enum value="0x9341" name="GL_PROGRAMMABLE_SAMPLE_LOCATION_NV"/>
+        <enum value="0x9342" name="GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_ARB"/>
+        <enum value="0x9342" name="GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_NV"/>
+        <enum value="0x9343" name="GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_ARB"/>
+        <enum value="0x9343" name="GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_NV"/>
+        <enum value="0x9344" name="GL_MAX_COMPUTE_VARIABLE_GROUP_INVOCATIONS_ARB"/>
+        <enum value="0x9345" name="GL_MAX_COMPUTE_VARIABLE_GROUP_SIZE_ARB"/>
+        <enum value="0x9346" name="GL_CONSERVATIVE_RASTERIZATION_NV"/>
+        <enum value="0x9347" name="GL_SUBPIXEL_PRECISION_BIAS_X_BITS_NV"/>
+        <enum value="0x9348" name="GL_SUBPIXEL_PRECISION_BIAS_Y_BITS_NV"/>
+        <enum value="0x9349" name="GL_MAX_SUBPIXEL_PRECISION_BIAS_BITS_NV"/>
+        <enum value="0x934A" name="GL_LOCATION_COMPONENT"/>
+        <enum value="0x934B" name="GL_TRANSFORM_FEEDBACK_BUFFER_INDEX"/>
+        <enum value="0x934C" name="GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE"/>
+        <enum value="0x934D" name="GL_ALPHA_TO_COVERAGE_DITHER_DEFAULT_NV"/>
+        <enum value="0x934E" name="GL_ALPHA_TO_COVERAGE_DITHER_ENABLE_NV"/>
+        <enum value="0x934F" name="GL_ALPHA_TO_COVERAGE_DITHER_DISABLE_NV"/>
+        <enum value="0x9350" name="GL_VIEWPORT_SWIZZLE_POSITIVE_X_NV"/>
+        <enum value="0x9351" name="GL_VIEWPORT_SWIZZLE_NEGATIVE_X_NV"/>
+        <enum value="0x9352" name="GL_VIEWPORT_SWIZZLE_POSITIVE_Y_NV"/>
+        <enum value="0x9353" name="GL_VIEWPORT_SWIZZLE_NEGATIVE_Y_NV"/>
+        <enum value="0x9354" name="GL_VIEWPORT_SWIZZLE_POSITIVE_Z_NV"/>
+        <enum value="0x9355" name="GL_VIEWPORT_SWIZZLE_NEGATIVE_Z_NV"/>
+        <enum value="0x9356" name="GL_VIEWPORT_SWIZZLE_POSITIVE_W_NV"/>
+        <enum value="0x9357" name="GL_VIEWPORT_SWIZZLE_NEGATIVE_W_NV"/>
+        <enum value="0x9358" name="GL_VIEWPORT_SWIZZLE_X_NV"/>
+        <enum value="0x9359" name="GL_VIEWPORT_SWIZZLE_Y_NV"/>
+        <enum value="0x935A" name="GL_VIEWPORT_SWIZZLE_Z_NV"/>
+        <enum value="0x935B" name="GL_VIEWPORT_SWIZZLE_W_NV"/>
+        <enum value="0x935C" name="GL_CLIP_ORIGIN"/>
+        <enum value="0x935C" name="GL_CLIP_ORIGIN_EXT" alias="GL_CLIP_ORIGIN"/>
+        <enum value="0x935D" name="GL_CLIP_DEPTH_MODE"/>
+        <enum value="0x935D" name="GL_CLIP_DEPTH_MODE_EXT" alias="GL_CLIP_DEPTH_MODE"/>
+        <enum value="0x935E" name="GL_NEGATIVE_ONE_TO_ONE"/>
+        <enum value="0x935E" name="GL_NEGATIVE_ONE_TO_ONE_EXT" alias="GL_NEGATIVE_ONE_TO_ONE"/>
+        <enum value="0x935F" name="GL_ZERO_TO_ONE"/>
+        <enum value="0x935F" name="GL_ZERO_TO_ONE_EXT" alias="GL_ZERO_TO_ONE"/>
+            <unused start="0x9360" end="0x9364" vendor="NV"/>
+        <enum value="0x9365" name="GL_CLEAR_TEXTURE"/>
+        <enum value="0x9366" name="GL_TEXTURE_REDUCTION_MODE_ARB"/>
+        <enum value="0x9366" name="GL_TEXTURE_REDUCTION_MODE_EXT" alias="GL_TEXTURE_REDUCTION_MODE_ARB"/>
+        <enum value="0x9367" name="GL_WEIGHTED_AVERAGE_ARB"/>
+        <enum value="0x9367" name="GL_WEIGHTED_AVERAGE_EXT" alias="GL_WEIGHTED_AVERAGE_ARB"/>
+        <enum value="0x9368" name="GL_FONT_GLYPHS_AVAILABLE_NV"/>
+        <enum value="0x9369" name="GL_FONT_TARGET_UNAVAILABLE_NV"/>
+        <enum value="0x936A" name="GL_FONT_UNAVAILABLE_NV"/>
+        <enum value="0x936B" name="GL_FONT_UNINTELLIGIBLE_NV"/>
+        <enum value="0x936C" name="GL_STANDARD_FONT_FORMAT_NV"/>
+        <enum value="0x936D" name="GL_FRAGMENT_INPUT_NV"/>
+        <enum value="0x936E" name="GL_UNIFORM_BUFFER_UNIFIED_NV"/>
+        <enum value="0x936F" name="GL_UNIFORM_BUFFER_ADDRESS_NV"/>
+        <enum value="0x9370" name="GL_UNIFORM_BUFFER_LENGTH_NV"/>
+        <enum value="0x9371" name="GL_MULTISAMPLES_NV"/>
+        <enum value="0x9372" name="GL_SUPERSAMPLE_SCALE_X_NV"/>
+        <enum value="0x9373" name="GL_SUPERSAMPLE_SCALE_Y_NV"/>
+        <enum value="0x9374" name="GL_CONFORMANT_NV"/>
+            <unused start="0x9375" end="0x9378" vendor="NV"/>
+        <enum value="0x9379" name="GL_CONSERVATIVE_RASTER_DILATE_NV"/>
+        <enum value="0x937A" name="GL_CONSERVATIVE_RASTER_DILATE_RANGE_NV"/>
+        <enum value="0x937B" name="GL_CONSERVATIVE_RASTER_DILATE_GRANULARITY_NV"/>
+        <enum value="0x937C" name="GL_VIEWPORT_POSITION_W_SCALE_NV"/>
+        <enum value="0x937D" name="GL_VIEWPORT_POSITION_W_SCALE_X_COEFF_NV"/>
+        <enum value="0x937E" name="GL_VIEWPORT_POSITION_W_SCALE_Y_COEFF_NV"/>
+        <enum value="0x937F" name="GL_REPRESENTATIVE_FRAGMENT_TEST_NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9380" end="0x939F" vendor="ARB">
+        <enum value="0x9380" name="GL_NUM_SAMPLE_COUNTS"/>
+        <enum value="0x9381" name="GL_MULTISAMPLE_LINE_WIDTH_RANGE_ARB"/>
+        <enum value="0x9381" name="GL_MULTISAMPLE_LINE_WIDTH_RANGE"/>
+        <enum value="0x9382" name="GL_MULTISAMPLE_LINE_WIDTH_GRANULARITY_ARB"/>
+        <enum value="0x9382" name="GL_MULTISAMPLE_LINE_WIDTH_GRANULARITY"/>
+        <enum value="0x9383" name="GL_VIEW_CLASS_EAC_R11"/>
+        <enum value="0x9384" name="GL_VIEW_CLASS_EAC_RG11"/>
+        <enum value="0x9385" name="GL_VIEW_CLASS_ETC2_RGB"/>
+        <enum value="0x9386" name="GL_VIEW_CLASS_ETC2_RGBA"/>
+        <enum value="0x9387" name="GL_VIEW_CLASS_ETC2_EAC_RGBA"/>
+        <enum value="0x9388" name="GL_VIEW_CLASS_ASTC_4x4_RGBA"/>
+        <enum value="0x9389" name="GL_VIEW_CLASS_ASTC_5x4_RGBA"/>
+        <enum value="0x938A" name="GL_VIEW_CLASS_ASTC_5x5_RGBA"/>
+        <enum value="0x938B" name="GL_VIEW_CLASS_ASTC_6x5_RGBA"/>
+        <enum value="0x938C" name="GL_VIEW_CLASS_ASTC_6x6_RGBA"/>
+        <enum value="0x938D" name="GL_VIEW_CLASS_ASTC_8x5_RGBA"/>
+        <enum value="0x938E" name="GL_VIEW_CLASS_ASTC_8x6_RGBA"/>
+        <enum value="0x938F" name="GL_VIEW_CLASS_ASTC_8x8_RGBA"/>
+        <enum value="0x9390" name="GL_VIEW_CLASS_ASTC_10x5_RGBA"/>
+        <enum value="0x9391" name="GL_VIEW_CLASS_ASTC_10x6_RGBA"/>
+        <enum value="0x9392" name="GL_VIEW_CLASS_ASTC_10x8_RGBA"/>
+        <enum value="0x9393" name="GL_VIEW_CLASS_ASTC_10x10_RGBA"/>
+        <enum value="0x9394" name="GL_VIEW_CLASS_ASTC_12x10_RGBA"/>
+        <enum value="0x9395" name="GL_VIEW_CLASS_ASTC_12x12_RGBA"/>
+            <unused start="0x9396" end="0x939F" vendor="ARB" comment="reserved for ASTC 3D interactions with ARB_ifq2"/>
+            <!-- <enum value="0x9396" name="GL_VIEW_CLASS_ASTC_3x3x3_RGBA"/> -->
+            <!-- <enum value="0x9397" name="GL_VIEW_CLASS_ASTC_4x3x3_RGBA"/> -->
+            <!-- <enum value="0x9398" name="GL_VIEW_CLASS_ASTC_4x4x3_RGBA"/> -->
+            <!-- <enum value="0x9399" name="GL_VIEW_CLASS_ASTC_4x4x4_RGBA"/> -->
+            <!-- <enum value="0x939A" name="GL_VIEW_CLASS_ASTC_5x4x4_RGBA"/> -->
+            <!-- <enum value="0x939B" name="GL_VIEW_CLASS_ASTC_5x5x4_RGBA"/> -->
+            <!-- <enum value="0x939C" name="GL_VIEW_CLASS_ASTC_5x5x5_RGBA"/> -->
+            <!-- <enum value="0x939D" name="GL_VIEW_CLASS_ASTC_6x5x5_RGBA"/> -->
+            <!-- <enum value="0x939E" name="GL_VIEW_CLASS_ASTC_6x6x5_RGBA"/> -->
+            <!-- <enum value="0x939F" name="GL_VIEW_CLASS_ASTC_6x6x6_RGBA"/> -->
+    </enums>
+
+    <enums namespace="GL" start="0x93A0" end="0x93AF" vendor="ANGLE" comment="Khronos bug 8100">
+        <enum value="0x93A0" name="GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE"/>
+        <enum value="0x93A1" name="GL_BGRA8_EXT"/>
+        <enum value="0x93A2" name="GL_TEXTURE_USAGE_ANGLE"/>
+        <enum value="0x93A3" name="GL_FRAMEBUFFER_ATTACHMENT_ANGLE"/>
+        <enum value="0x93A4" name="GL_PACK_REVERSE_ROW_ORDER_ANGLE"/>
+            <unused start="0x93A5" vendor="ANGLE"/>
+        <enum value="0x93A6" name="GL_PROGRAM_BINARY_ANGLE"/>
+            <unused start="0x93A7" end="0x93AF" vendor="ANGLE"/>
+    </enums>
+
+    <enums namespace="GL" start="0x93B0" end="0x93EF" vendor="OES" comment="Khronos bug 8853">
+        <enum value="0x93B0" name="GL_COMPRESSED_RGBA_ASTC_4x4"/>
+        <enum value="0x93B0" name="GL_COMPRESSED_RGBA_ASTC_4x4_KHR"/>
+        <enum value="0x93B1" name="GL_COMPRESSED_RGBA_ASTC_5x4"/>
+        <enum value="0x93B1" name="GL_COMPRESSED_RGBA_ASTC_5x4_KHR"/>
+        <enum value="0x93B2" name="GL_COMPRESSED_RGBA_ASTC_5x5"/>
+        <enum value="0x93B2" name="GL_COMPRESSED_RGBA_ASTC_5x5_KHR"/>
+        <enum value="0x93B3" name="GL_COMPRESSED_RGBA_ASTC_6x5"/>
+        <enum value="0x93B3" name="GL_COMPRESSED_RGBA_ASTC_6x5_KHR"/>
+        <enum value="0x93B4" name="GL_COMPRESSED_RGBA_ASTC_6x6"/>
+        <enum value="0x93B4" name="GL_COMPRESSED_RGBA_ASTC_6x6_KHR"/>
+        <enum value="0x93B5" name="GL_COMPRESSED_RGBA_ASTC_8x5"/>
+        <enum value="0x93B5" name="GL_COMPRESSED_RGBA_ASTC_8x5_KHR"/>
+        <enum value="0x93B6" name="GL_COMPRESSED_RGBA_ASTC_8x6"/>
+        <enum value="0x93B6" name="GL_COMPRESSED_RGBA_ASTC_8x6_KHR"/>
+        <enum value="0x93B7" name="GL_COMPRESSED_RGBA_ASTC_8x8"/>
+        <enum value="0x93B7" name="GL_COMPRESSED_RGBA_ASTC_8x8_KHR"/>
+        <enum value="0x93B8" name="GL_COMPRESSED_RGBA_ASTC_10x5"/>
+        <enum value="0x93B8" name="GL_COMPRESSED_RGBA_ASTC_10x5_KHR"/>
+        <enum value="0x93B9" name="GL_COMPRESSED_RGBA_ASTC_10x6"/>
+        <enum value="0x93B9" name="GL_COMPRESSED_RGBA_ASTC_10x6_KHR"/>
+        <enum value="0x93BA" name="GL_COMPRESSED_RGBA_ASTC_10x8"/>
+        <enum value="0x93BA" name="GL_COMPRESSED_RGBA_ASTC_10x8_KHR"/>
+        <enum value="0x93BB" name="GL_COMPRESSED_RGBA_ASTC_10x10"/>
+        <enum value="0x93BB" name="GL_COMPRESSED_RGBA_ASTC_10x10_KHR"/>
+        <enum value="0x93BC" name="GL_COMPRESSED_RGBA_ASTC_12x10"/>
+        <enum value="0x93BC" name="GL_COMPRESSED_RGBA_ASTC_12x10_KHR"/>
+        <enum value="0x93BD" name="GL_COMPRESSED_RGBA_ASTC_12x12"/>
+        <enum value="0x93BD" name="GL_COMPRESSED_RGBA_ASTC_12x12_KHR"/>
+            <unused start="0x93BE" end="0x93BF" vendor="OES"/>
+        <enum value="0x93C0" name="GL_COMPRESSED_RGBA_ASTC_3x3x3_OES"/>
+        <enum value="0x93C1" name="GL_COMPRESSED_RGBA_ASTC_4x3x3_OES"/>
+        <enum value="0x93C2" name="GL_COMPRESSED_RGBA_ASTC_4x4x3_OES"/>
+        <enum value="0x93C3" name="GL_COMPRESSED_RGBA_ASTC_4x4x4_OES"/>
+        <enum value="0x93C4" name="GL_COMPRESSED_RGBA_ASTC_5x4x4_OES"/>
+        <enum value="0x93C5" name="GL_COMPRESSED_RGBA_ASTC_5x5x4_OES"/>
+        <enum value="0x93C6" name="GL_COMPRESSED_RGBA_ASTC_5x5x5_OES"/>
+        <enum value="0x93C7" name="GL_COMPRESSED_RGBA_ASTC_6x5x5_OES"/>
+        <enum value="0x93C8" name="GL_COMPRESSED_RGBA_ASTC_6x6x5_OES"/>
+        <enum value="0x93C9" name="GL_COMPRESSED_RGBA_ASTC_6x6x6_OES"/>
+            <unused start="0x93CA" end="0x93CF" vendor="OES"/>
+        <enum value="0x93D0" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4"/>
+        <enum value="0x93D0" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR"/>
+        <enum value="0x93D1" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4"/>
+        <enum value="0x93D1" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR"/>
+        <enum value="0x93D2" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5"/>
+        <enum value="0x93D2" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR"/>
+        <enum value="0x93D3" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5"/>
+        <enum value="0x93D3" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR"/>
+        <enum value="0x93D4" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6"/>
+        <enum value="0x93D4" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR"/>
+        <enum value="0x93D5" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5"/>
+        <enum value="0x93D5" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR"/>
+        <enum value="0x93D6" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6"/>
+        <enum value="0x93D6" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR"/>
+        <enum value="0x93D7" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8"/>
+        <enum value="0x93D7" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR"/>
+        <enum value="0x93D8" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5"/>
+        <enum value="0x93D8" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR"/>
+        <enum value="0x93D9" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6"/>
+        <enum value="0x93D9" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR"/>
+        <enum value="0x93DA" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8"/>
+        <enum value="0x93DA" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR"/>
+        <enum value="0x93DB" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10"/>
+        <enum value="0x93DB" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR"/>
+        <enum value="0x93DC" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10"/>
+        <enum value="0x93DC" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR"/>
+        <enum value="0x93DD" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12"/>
+        <enum value="0x93DD" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR"/>
+            <unused start="0x93DE" end="0x93DF" vendor="OES"/>
+        <enum value="0x93E0" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES"/>
+        <enum value="0x93E1" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES"/>
+        <enum value="0x93E2" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES"/>
+        <enum value="0x93E3" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES"/>
+        <enum value="0x93E4" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES"/>
+        <enum value="0x93E5" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES"/>
+        <enum value="0x93E6" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES"/>
+        <enum value="0x93E7" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES"/>
+        <enum value="0x93E8" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES"/>
+        <enum value="0x93E9" name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES"/>
+            <unused start="0x93EA" end="0x93EF" vendor="OES"/>
+    </enums>
+
+    <enums namespace="GL" start="0x93F0" end="0x94EF" vendor="APPLE" comment="Khronos bug 10233">
+        <enum value="0x93F0" name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2_IMG"/>
+        <enum value="0x93F1" name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2_IMG"/>
+            <unused start="0x93F2" end="0x94EF" vendor="APPLE"/>
+    </enums>
+
+    <enums namespace="GL" start="0x94F0" end="0x950F" vendor="INTEL" comment="Khronos bug 11345">
+        <enum value="0x94F0" name="GL_PERFQUERY_COUNTER_EVENT_INTEL"/>
+        <enum value="0x94F1" name="GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL"/>
+        <enum value="0x94F2" name="GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL"/>
+        <enum value="0x94F3" name="GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL"/>
+        <enum value="0x94F4" name="GL_PERFQUERY_COUNTER_RAW_INTEL"/>
+        <enum value="0x94F5" name="GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL"/>
+            <unused start="0x94F6" end="0x94F7" vendor="INTEL"/>
+        <enum value="0x94F8" name="GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL"/>
+        <enum value="0x94F9" name="GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL"/>
+        <enum value="0x94FA" name="GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL"/>
+        <enum value="0x94FB" name="GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL"/>
+        <enum value="0x94FC" name="GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL"/>
+        <enum value="0x94FD" name="GL_PERFQUERY_QUERY_NAME_LENGTH_MAX_INTEL"/>
+        <enum value="0x94FE" name="GL_PERFQUERY_COUNTER_NAME_LENGTH_MAX_INTEL"/>
+        <enum value="0x94FF" name="GL_PERFQUERY_COUNTER_DESC_LENGTH_MAX_INTEL"/>
+        <enum value="0x9500" name="GL_PERFQUERY_GPA_EXTENDED_COUNTERS_INTEL"/>
+            <unused start="0x9501" end="0x950F" vendor="INTEL"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9510" end="0x952F" vendor="Broadcom" comment="Khronos bug 12203">
+            <unused start="0x9510" end="0x952F" vendor="Broadcom"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9530" end="0x962F" vendor="NV" comment="Khronos bug 12977">
+        <enum value="0x9530" name="GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT"/>
+        <enum value="0x9531" name="GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT"/>
+            <unused start="0x9532" end="0x9535" vendor="NV"/>
+        <enum value="0x9536" name="GL_MAX_MESH_TOTAL_MEMORY_SIZE_NV"/>
+        <enum value="0x9537" name="GL_MAX_TASK_TOTAL_MEMORY_SIZE_NV"/>
+        <enum value="0x9538" name="GL_MAX_MESH_OUTPUT_VERTICES_NV"/>
+        <enum value="0x9539" name="GL_MAX_MESH_OUTPUT_PRIMITIVES_NV"/>
+        <enum value="0x953A" name="GL_MAX_TASK_OUTPUT_COUNT_NV"/>
+        <enum value="0x953B" name="GL_MAX_MESH_WORK_GROUP_SIZE_NV"/>
+        <enum value="0x953C" name="GL_MAX_TASK_WORK_GROUP_SIZE_NV"/>
+        <enum value="0x953D" name="GL_MAX_DRAW_MESH_TASKS_COUNT_NV"/>
+        <enum value="0x953E" name="GL_MESH_WORK_GROUP_SIZE_NV"/>
+        <enum value="0x953F" name="GL_TASK_WORK_GROUP_SIZE_NV"/>
+        <enum value="0x9540" name="GL_QUERY_RESOURCE_TYPE_VIDMEM_ALLOC_NV"/>
+            <unused start="0x9541" vendor="NV"/>
+        <enum value="0x9542" name="GL_QUERY_RESOURCE_MEMTYPE_VIDMEM_NV"/>
+        <enum value="0x9543" name="GL_MESH_OUTPUT_PER_PRIMITIVE_GRANULARITY_NV"/>
+        <enum value="0x9544" name="GL_QUERY_RESOURCE_SYS_RESERVED_NV"/>
+        <enum value="0x9545" name="GL_QUERY_RESOURCE_TEXTURE_NV"/>
+        <enum value="0x9546" name="GL_QUERY_RESOURCE_RENDERBUFFER_NV"/>
+        <enum value="0x9547" name="GL_QUERY_RESOURCE_BUFFEROBJECT_NV"/>
+        <enum value="0x9548" name="GL_PER_GPU_STORAGE_NV"/>
+        <enum value="0x9549" name="GL_MULTICAST_PROGRAMMABLE_SAMPLE_LOCATION_NV"/>
+            <unused start="0x954A" end="0x954C" vendor="NV"/>
+        <enum value="0x954D" name="GL_CONSERVATIVE_RASTER_MODE_NV"/>
+        <enum value="0x954E" name="GL_CONSERVATIVE_RASTER_MODE_POST_SNAP_NV"/>
+        <enum value="0x954F" name="GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_TRIANGLES_NV"/>
+        <enum value="0x9550" name="GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_NV"/>
+        <enum value="0x9551" name="GL_SHADER_BINARY_FORMAT_SPIR_V"/>
+        <enum value="0x9551" name="GL_SHADER_BINARY_FORMAT_SPIR_V_ARB" alias="GL_SHADER_BINARY_FORMAT_SPIR_V"/>
+        <enum value="0x9552" name="GL_SPIR_V_BINARY"/>
+        <enum value="0x9552" name="GL_SPIR_V_BINARY_ARB" alias="GL_SPIR_V_BINARY"/>
+        <enum value="0x9553" name="GL_SPIR_V_EXTENSIONS"/>
+        <enum value="0x9554" name="GL_NUM_SPIR_V_EXTENSIONS"/>
+        <enum value="0x9555" name="GL_SCISSOR_TEST_EXCLUSIVE_NV"/>
+        <enum value="0x9556" name="GL_SCISSOR_BOX_EXCLUSIVE_NV"/>
+        <enum value="0x9557" name="GL_MAX_MESH_VIEWS_NV"/>
+        <enum value="0x9558" name="GL_RENDER_GPU_MASK_NV"/>
+        <enum value="0x9559" name="GL_MESH_SHADER_NV"/>
+        <enum value="0x955A" name="GL_TASK_SHADER_NV"/>
+        <enum value="0x955B" name="GL_SHADING_RATE_IMAGE_BINDING_NV"/>
+        <enum value="0x955C" name="GL_SHADING_RATE_IMAGE_TEXEL_WIDTH_NV"/>
+        <enum value="0x955D" name="GL_SHADING_RATE_IMAGE_TEXEL_HEIGHT_NV"/>
+        <enum value="0x955E" name="GL_SHADING_RATE_IMAGE_PALETTE_SIZE_NV"/>
+        <enum value="0x955F" name="GL_MAX_COARSE_FRAGMENT_SAMPLES_NV"/>
+            <unused start="0x9560" end="0x9562" vendor="NV"/>
+        <enum value="0x9563" name="GL_SHADING_RATE_IMAGE_NV"/>
+        <enum value="0x9564" name="GL_SHADING_RATE_NO_INVOCATIONS_NV"/>
+        <enum value="0x9565" name="GL_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV"/>
+        <enum value="0x9566" name="GL_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV"/>
+        <enum value="0x9567" name="GL_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV"/>
+        <enum value="0x9568" name="GL_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV"/>
+        <enum value="0x9569" name="GL_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV"/>
+        <enum value="0x956A" name="GL_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV"/>
+        <enum value="0x956B" name="GL_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV"/>
+        <enum value="0x956C" name="GL_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV"/>
+        <enum value="0x956D" name="GL_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV"/>
+        <enum value="0x956E" name="GL_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV"/>
+        <enum value="0x956F" name="GL_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV"/>
+            <unused start="0x9570" end="0x9578" vendor="NV"/>
+        <enum value="0x9579" name="GL_MESH_VERTICES_OUT_NV"/>
+        <enum value="0x957A" name="GL_MESH_PRIMITIVES_OUT_NV"/>
+        <enum value="0x957B" name="GL_MESH_OUTPUT_TYPE_NV"/>
+        <enum value="0x957C" name="GL_MESH_SUBROUTINE_NV"/>
+        <enum value="0x957D" name="GL_TASK_SUBROUTINE_NV"/>
+        <enum value="0x957E" name="GL_MESH_SUBROUTINE_UNIFORM_NV"/>
+        <enum value="0x957F" name="GL_TASK_SUBROUTINE_UNIFORM_NV"/>
+        <enum value="0x9580" name="GL_TEXTURE_TILING_EXT"/>
+        <enum value="0x9581" name="GL_DEDICATED_MEMORY_OBJECT_EXT"/>
+        <enum value="0x9582" name="GL_NUM_TILING_TYPES_EXT"/>
+        <enum value="0x9583" name="GL_TILING_TYPES_EXT"/>
+        <enum value="0x9584" name="GL_OPTIMAL_TILING_EXT"/>
+        <enum value="0x9585" name="GL_LINEAR_TILING_EXT"/>
+        <enum value="0x9586" name="GL_HANDLE_TYPE_OPAQUE_FD_EXT"/>
+        <enum value="0x9587" name="GL_HANDLE_TYPE_OPAQUE_WIN32_EXT"/>
+        <enum value="0x9588" name="GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT"/>
+        <enum value="0x9589" name="GL_HANDLE_TYPE_D3D12_TILEPOOL_EXT"/>
+        <enum value="0x958A" name="GL_HANDLE_TYPE_D3D12_RESOURCE_EXT"/>
+        <enum value="0x958B" name="GL_HANDLE_TYPE_D3D11_IMAGE_EXT"/>
+        <enum value="0x958C" name="GL_HANDLE_TYPE_D3D11_IMAGE_KMT_EXT"/>
+        <enum value="0x958D" name="GL_LAYOUT_GENERAL_EXT"/>
+        <enum value="0x958E" name="GL_LAYOUT_COLOR_ATTACHMENT_EXT"/>
+        <enum value="0x958F" name="GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT"/>
+        <enum value="0x9590" name="GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT"/>
+        <enum value="0x9591" name="GL_LAYOUT_SHADER_READ_ONLY_EXT"/>
+        <enum value="0x9592" name="GL_LAYOUT_TRANSFER_SRC_EXT"/>
+        <enum value="0x9593" name="GL_LAYOUT_TRANSFER_DST_EXT"/>
+        <enum value="0x9594" name="GL_HANDLE_TYPE_D3D12_FENCE_EXT"/>
+        <enum value="0x9595" name="GL_D3D12_FENCE_VALUE_EXT"/>
+        <enum value="0x9596" name="GL_NUM_DEVICE_UUIDS_EXT"/>
+        <enum value="0x9597" name="GL_DEVICE_UUID_EXT"/>
+        <enum value="0x9598" name="GL_DRIVER_UUID_EXT"/>
+        <enum value="0x9599" name="GL_DEVICE_LUID_EXT"/>
+        <enum value="0x959A" name="GL_DEVICE_NODE_MASK_EXT"/>
+        <enum value="0x959B" name="GL_PROTECTED_MEMORY_OBJECT_EXT"/>
+        <enum value="0x959C" name="GL_UNIFORM_BLOCK_REFERENCED_BY_MESH_SHADER_NV"/>
+        <enum value="0x959D" name="GL_UNIFORM_BLOCK_REFERENCED_BY_TASK_SHADER_NV"/>
+        <enum value="0x959E" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_MESH_SHADER_NV"/>
+        <enum value="0x959F" name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TASK_SHADER_NV"/>
+        <enum value="0x95A0" name="GL_REFERENCED_BY_MESH_SHADER_NV"/>
+        <enum value="0x95A1" name="GL_REFERENCED_BY_TASK_SHADER_NV"/>
+        <enum value="0x95A2" name="GL_MAX_MESH_WORK_GROUP_INVOCATIONS_NV"/>
+        <enum value="0x95A3" name="GL_MAX_TASK_WORK_GROUP_INVOCATIONS_NV"/>
+        <enum value="0x95A4" name="GL_ATTACHED_MEMORY_OBJECT_NV"/>
+        <enum value="0x95A5" name="GL_ATTACHED_MEMORY_OFFSET_NV"/>
+        <enum value="0x95A6" name="GL_MEMORY_ATTACHABLE_ALIGNMENT_NV"/>
+        <enum value="0x95A7" name="GL_MEMORY_ATTACHABLE_SIZE_NV"/>
+        <enum value="0x95A8" name="GL_MEMORY_ATTACHABLE_NV"/>
+        <enum value="0x95A9" name="GL_DETACHED_MEMORY_INCARNATION_NV"/>
+        <enum value="0x95AA" name="GL_DETACHED_TEXTURES_NV"/>
+        <enum value="0x95AB" name="GL_DETACHED_BUFFERS_NV"/>
+        <enum value="0x95AC" name="GL_MAX_DETACHED_TEXTURES_NV"/>
+        <enum value="0x95AD" name="GL_MAX_DETACHED_BUFFERS_NV"/>
+        <enum value="0x95AE" name="GL_SHADING_RATE_SAMPLE_ORDER_DEFAULT_NV"/>
+        <enum value="0x95AF" name="GL_SHADING_RATE_SAMPLE_ORDER_PIXEL_MAJOR_NV"/>
+        <enum value="0x95B0" name="GL_SHADING_RATE_SAMPLE_ORDER_SAMPLE_MAJOR_NV"/>
+        <unused start="0x9581" end="0x962F" vendor="NV"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9630" end="0x963F" vendor="Oculus" comment="Email from Cass Everitt">
+        <enum value="0x9630" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_NUM_VIEWS_OVR"/>
+        <enum value="0x9631" name="GL_MAX_VIEWS_OVR"/>
+        <enum value="0x9632" name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_BASE_VIEW_INDEX_OVR"/>
+        <enum value="0x9633" name="GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR"/>
+            <unused start="0x9634" end="0x963F" vendor="Oculus"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9640" end="0x964F" vendor="Mediatek" comment="Khronos bug 14294">
+        <enum value="0x9640" name="GL_GS_SHADER_BINARY_MTK"/>
+        <enum value="0x9641" name="GL_GS_PROGRAM_BINARY_MTK"/>
+            <unused start="0x9642" end="0x964F" vendor="Mediatek"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9650" end="0x968F" vendor="IMG" comment="Khronos bug 14977">
+        <enum value="0x9650" name="GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_FAST_SIZE_EXT"/>
+        <enum value="0x9651" name="GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_SIZE_EXT"/>
+        <enum value="0x9652" name="GL_FRAMEBUFFER_INCOMPLETE_INSUFFICIENT_SHADER_COMBINED_LOCAL_STORAGE_EXT"/>
+            <unused start="0x9653" end="0x968F" vendor="IMG"/>
+    </enums>
+
+    <enums namespace="GL" start="0x9690" end="0x969F" vendor="ANGLE" comment="Khronos bug 15423">
+            <unused start="0x9690" end="0x969F" vendor="ANGLE"/>
+    </enums>
+
+    <enums namespace="GL" start="0x96A0" end="0x96AF" vendor="Qualcomm" comment="contact Maurice Ribble">
+            <unused start="0x96A0" end="0x96A1" vendor="Qualcomm"/>
+        <enum value="0x96A2" name="GL_FRAMEBUFFER_FETCH_NONCOHERENT_QCOM"/>
+        <enum value="0x96A3" name="GL_VALIDATE_SHADER_BINARY_QCOM"/>
+            <unused start="0x96A4" end="0x96AF" vendor="Qualcomm"/>
+    </enums>
+
+<!-- Enums reservable for future use. To reserve a new range, allocate one
+     or more multiples of 16 starting at the lowest available point in this
+     block and note it in a new <enums> block immediately above.
+
+     Please remember that new enumerant allocations must be obtained by
+     request to the Khronos API registrar (see comments at the top of this
+     file) File requests in the Khronos Bugzilla, OpenGL project, Registry
+     component. -->
+
+    <enums namespace="GL" start="0x96B0" end="99999" vendor="ARB" comment="RESERVED FOR FUTURE ALLOCATIONS BY KHRONOS">
+        <unused start="0x96B0" end="99999" comment="RESERVED"/>
+    </enums>
+
+<!-- Historical large block allocations, all unused except (in older days) by IBM -->
+    <enums namespace="GL" start="100000" end="100999" vendor="ARB" comment="GLU enums"/>
+    <enums namespace="GL" start="101000" end="101999" vendor="ARB" comment="Conformance test enums"/>
+    <enums namespace="GL" start="102000" end="102999" vendor="ARB" comment="Unused, unlikely to ever be used"/>
+
+    <enums namespace="GL" start="103000" end="103999" vendor="IBM" comment="IBM is out of the graphics hardware business. Most of this range will remain unused.">
+        <enum value="0x19262" name="GL_RASTER_POSITION_UNCLIPPED_IBM"/>
+        <enum value="103050" name="GL_CULL_VERTEX_IBM"/>
+        <enum value="103060" name="GL_ALL_STATIC_DATA_IBM"/>
+        <enum value="103061" name="GL_STATIC_VERTEX_ARRAY_IBM"/>
+        <enum value="103070" name="GL_VERTEX_ARRAY_LIST_IBM"/>
+        <enum value="103071" name="GL_NORMAL_ARRAY_LIST_IBM"/>
+        <enum value="103072" name="GL_COLOR_ARRAY_LIST_IBM"/>
+        <enum value="103073" name="GL_INDEX_ARRAY_LIST_IBM"/>
+        <enum value="103074" name="GL_TEXTURE_COORD_ARRAY_LIST_IBM"/>
+        <enum value="103075" name="GL_EDGE_FLAG_ARRAY_LIST_IBM"/>
+        <enum value="103076" name="GL_FOG_COORDINATE_ARRAY_LIST_IBM"/>
+        <enum value="103077" name="GL_SECONDARY_COLOR_ARRAY_LIST_IBM"/>
+        <enum value="103080" name="GL_VERTEX_ARRAY_LIST_STRIDE_IBM"/>
+        <enum value="103081" name="GL_NORMAL_ARRAY_LIST_STRIDE_IBM"/>
+        <enum value="103082" name="GL_COLOR_ARRAY_LIST_STRIDE_IBM"/>
+        <enum value="103083" name="GL_INDEX_ARRAY_LIST_STRIDE_IBM"/>
+        <enum value="103084" name="GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM"/>
+        <enum value="103085" name="GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM"/>
+        <enum value="103086" name="GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM"/>
+        <enum value="103087" name="GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM"/>
+    </enums>
+
+    <enums namespace="GL" start="104000" end="104999" vendor="NEC" comment="NEC may be out of the graphics hardware business?"/>
+    <enums namespace="GL" start="105000" end="105999" vendor="Compaq" comment="Compaq was acquired by HP"/>
+    <enums namespace="GL" start="106000" end="106999" vendor="KPC" comment="Kubota Pacific is out of business"/>
+    <enums namespace="GL" start="107000" end="107999" vendor="PGI" comment="Portland Graphics was acquired by Template Graphics, which is out of business">
+            <!-- lots of <unused> areas here which won't be computed yet -->
+        <enum value="0x1A1F8" name="GL_PREFER_DOUBLEBUFFER_HINT_PGI"/>
+        <enum value="0x1A1FD" name="GL_CONSERVE_MEMORY_HINT_PGI"/>
+        <enum value="0x1A1FE" name="GL_RECLAIM_MEMORY_HINT_PGI"/>
+        <enum value="0x1A202" name="GL_NATIVE_GRAPHICS_HANDLE_PGI"/>
+        <enum value="0x1A203" name="GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI"/>
+        <enum value="0x1A204" name="GL_NATIVE_GRAPHICS_END_HINT_PGI"/>
+        <enum value="0x1A20C" name="GL_ALWAYS_FAST_HINT_PGI"/>
+        <enum value="0x1A20D" name="GL_ALWAYS_SOFT_HINT_PGI"/>
+        <enum value="0x1A20E" name="GL_ALLOW_DRAW_OBJ_HINT_PGI"/>
+        <enum value="0x1A20F" name="GL_ALLOW_DRAW_WIN_HINT_PGI"/>
+        <enum value="0x1A210" name="GL_ALLOW_DRAW_FRG_HINT_PGI"/>
+        <enum value="0x1A211" name="GL_ALLOW_DRAW_MEM_HINT_PGI"/>
+        <enum value="0x1A216" name="GL_STRICT_DEPTHFUNC_HINT_PGI"/>
+        <enum value="0x1A217" name="GL_STRICT_LIGHTING_HINT_PGI"/>
+        <enum value="0x1A218" name="GL_STRICT_SCISSOR_HINT_PGI"/>
+        <enum value="0x1A219" name="GL_FULL_STIPPLE_HINT_PGI"/>
+        <enum value="0x1A220" name="GL_CLIP_NEAR_HINT_PGI"/>
+        <enum value="0x1A221" name="GL_CLIP_FAR_HINT_PGI"/>
+        <enum value="0x1A222" name="GL_WIDE_LINE_HINT_PGI"/>
+        <enum value="0x1A223" name="GL_BACK_NORMALS_HINT_PGI"/>
+        <enum value="0x1A22A" name="GL_VERTEX_DATA_HINT_PGI"/>
+        <enum value="0x1A22B" name="GL_VERTEX_CONSISTENT_HINT_PGI"/>
+        <enum value="0x1A22C" name="GL_MATERIAL_SIDE_HINT_PGI"/>
+        <enum value="0x1A22D" name="GL_MAX_VERTEX_HINT_PGI"/>
+    </enums>
+
+    <enums namespace="GL" start="108000" end="108999" vendor="ES" comment="Evans and Sutherland is out of the graphics hardware business"/>
+
+    <!-- SECTION: GL command definitions. -->
+    <commands namespace="GL">
+        <command>
+            <proto>void <name>glAccum</name></proto>
+            <param group="AccumOp"><ptype>GLenum</ptype> <name>op</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>value</name></param>
+            <glx type="render" opcode="137"/>
+        </command>
+        <command>
+            <proto>void <name>glAccumxOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLfixed</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glActiveProgramEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glActiveShaderProgram</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glActiveShaderProgramEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glActiveStencilFaceEXT</name></proto>
+            <param group="StencilFaceDirection"><ptype>GLenum</ptype> <name>face</name></param>
+            <glx type="render" opcode="4220"/>
+        </command>
+        <command>
+            <proto>void <name>glActiveTexture</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <glx type="render" opcode="197"/>
+        </command>
+        <command>
+            <proto>void <name>glActiveTextureARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <alias name="glActiveTexture"/>
+            <glx type="render" opcode="197"/>
+        </command>
+        <command>
+            <proto>void <name>glActiveVaryingNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAlphaFragmentOp1ATI</name></proto>
+            <param group="FragmentOpATI"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Mod</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAlphaFragmentOp2ATI</name></proto>
+            <param group="FragmentOpATI"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Mod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Mod</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAlphaFragmentOp3ATI</name></proto>
+            <param group="FragmentOpATI"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Mod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Mod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg3</name></param>
+            <param><ptype>GLuint</ptype> <name>arg3Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg3Mod</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAlphaFunc</name></proto>
+            <param group="AlphaFunction"><ptype>GLenum</ptype> <name>func</name></param>
+            <param><ptype>GLfloat</ptype> <name>ref</name></param>
+            <glx type="render" opcode="159"/>
+        </command>
+        <command>
+            <proto>void <name>glAlphaFuncQCOM</name></proto>
+            <param><ptype>GLenum</ptype> <name>func</name></param>
+            <param><ptype>GLclampf</ptype> <name>ref</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAlphaFuncx</name></proto>
+            <param group="AlphaFunction"><ptype>GLenum</ptype> <name>func</name></param>
+            <param><ptype>GLfixed</ptype> <name>ref</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAlphaFuncxOES</name></proto>
+            <param group="AlphaFunction"><ptype>GLenum</ptype> <name>func</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>ref</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAlphaToCoverageDitherControlNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glApplyFramebufferAttachmentCMAAINTEL</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glApplyTextureEXT</name></proto>
+            <param group="LightTextureModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glAcquireKeyedMutexWin32EXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>key</name></param>
+            <param><ptype>GLuint</ptype> <name>timeout</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glAreProgramsResidentNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>programs</name></param>
+            <param group="Boolean" len="n"><ptype>GLboolean</ptype> *<name>residences</name></param>
+            <glx type="vendor" opcode="1293"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glAreTexturesResident</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <param group="Boolean" len="n"><ptype>GLboolean</ptype> *<name>residences</name></param>
+            <glx type="single" opcode="143"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glAreTexturesResidentEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <param group="Boolean" len="n"><ptype>GLboolean</ptype> *<name>residences</name></param>
+            <glx type="vendor" opcode="11"/>
+        </command>
+        <command>
+            <proto>void <name>glArrayElement</name></proto>
+            <param><ptype>GLint</ptype> <name>i</name></param>
+        </command>
+        <command>
+            <proto>void <name>glArrayElementEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>i</name></param>
+            <alias name="glArrayElement"/>
+        </command>
+        <command>
+            <proto>void <name>glArrayObjectATI</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ScalarType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAsyncMarkerSGIX</name></proto>
+            <param><ptype>GLuint</ptype> <name>marker</name></param>
+        </command>
+        <command>
+            <proto>void <name>glAttachObjectARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>containerObj</name></param>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>obj</name></param>
+            <alias name="glAttachShader"/>
+        </command>
+        <command>
+            <proto>void <name>glAttachShader</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBegin</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="4"/>
+        </command>
+        <command>
+            <proto>void <name>glBeginConditionalRender</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ConditionalRenderMode"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBeginConditionalRenderNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ConditionalRenderMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glBeginConditionalRender"/>
+            <glx type="render" opcode="348"/>
+        </command>
+        <command>
+            <proto>void <name>glBeginConditionalRenderNVX</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBeginFragmentShaderATI</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glBeginOcclusionQueryNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBeginPerfMonitorAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>monitor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBeginPerfQueryINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryHandle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBeginQuery</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <glx type="render" opcode="231"/>
+        </command>
+        <command>
+            <proto>void <name>glBeginQueryARB</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <alias name="glBeginQuery"/>
+        </command>
+        <command>
+            <proto>void <name>glBeginQueryEXT</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBeginQueryIndexed</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBeginTransformFeedback</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>primitiveMode</name></param>
+            <glx type="render" opcode="357"/>
+        </command>
+        <command>
+            <proto>void <name>glBeginTransformFeedbackEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>primitiveMode</name></param>
+            <alias name="glBeginTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glBeginTransformFeedbackNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>primitiveMode</name></param>
+            <alias name="glBeginTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glBeginVertexShaderEXT</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glBeginVideoCaptureNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindAttribLocation</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindAttribLocationARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param>const <ptype>GLcharARB</ptype> *<name>name</name></param>
+            <alias name="glBindAttribLocation"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBuffer</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <alias name="glBindBuffer"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferBase</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <glx type="render" opcode="356"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferBaseEXT</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <alias name="glBindBufferBase"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferBaseNV</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <alias name="glBindBufferBase"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferOffsetEXT</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferOffsetNV</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <alias name="glBindBufferOffsetEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferRange</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <glx type="render" opcode="355"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferRangeEXT</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <alias name="glBindBufferRange"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBufferRangeNV</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <alias name="glBindBufferRange"/>
+        </command>
+        <command>
+            <proto>void <name>glBindBuffersBase</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>buffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindBuffersRange</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>buffers</name></param>
+            <param len="count">const <ptype>GLintptr</ptype> *<name>offsets</name></param>
+            <param len="count">const <ptype>GLsizeiptr</ptype> *<name>sizes</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindFragDataLocation</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>color</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindFragDataLocationEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>color</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+            <alias name="glBindFragDataLocation"/>
+        </command>
+        <command>
+            <proto>void <name>glBindFragDataLocationIndexed</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>colorNumber</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindFragDataLocationIndexedEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>colorNumber</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+            <alias name="glBindFragDataLocationIndexed"/>
+        </command>
+        <command>
+            <proto>void <name>glBindFragmentShaderATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindFramebuffer</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <glx type="render" opcode="236"/>
+        </command>
+        <command>
+            <proto>void <name>glBindFramebufferEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <glx type="render" opcode="4319"/>
+        </command>
+        <command>
+            <proto>void <name>glBindFramebufferOES</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindImageTexture</name></proto>
+            <param><ptype>GLuint</ptype> <name>unit</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>layered</name></param>
+            <param><ptype>GLint</ptype> <name>layer</name></param>
+            <param group="BufferAccessARB"><ptype>GLenum</ptype> <name>access</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>format</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindImageTextureEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>layered</name></param>
+            <param><ptype>GLint</ptype> <name>layer</name></param>
+            <param group="BufferAccessARB"><ptype>GLenum</ptype> <name>access</name></param>
+            <param><ptype>GLint</ptype> <name>format</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindImageTextures</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>textures</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glBindLightParameterEXT</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glBindMaterialParameterEXT</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindMultiTextureEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glBindParameterEXT</name></proto>
+            <param group="VertexShaderParameterEXT"><ptype>GLenum</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindProgramARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <glx type="render" opcode="4180"/>
+        </command>
+        <command>
+            <proto>void <name>glBindProgramNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <alias name="glBindProgramARB"/>
+            <glx type="render" opcode="4180"/>
+        </command>
+        <command>
+            <proto>void <name>glBindProgramPipeline</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindProgramPipelineEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindRenderbuffer</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <glx type="render" opcode="235"/>
+        </command>
+        <command>
+            <proto>void <name>glBindRenderbufferEXT</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <glx type="render" opcode="4316"/>
+        </command>
+        <command>
+            <proto>void <name>glBindRenderbufferOES</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindSampler</name></proto>
+            <param><ptype>GLuint</ptype> <name>unit</name></param>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindSamplers</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>samplers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindShadingRateImageNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glBindTexGenParameterEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>unit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindTexture</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <glx type="render" opcode="4117"/>
+        </command>
+        <command>
+            <proto>void <name>glBindTextureEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <alias name="glBindTexture"/>
+            <glx type="render" opcode="4117"/>
+        </command>
+        <command>
+            <proto>void <name>glBindTextureUnit</name></proto>
+            <param><ptype>GLuint</ptype> <name>unit</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glBindTextureUnitParameterEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>unit</name></param>
+            <param group="VertexShaderTextureUnitParameter"><ptype>GLenum</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindTextures</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>textures</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindTransformFeedback</name></proto>
+            <param group="BindTransformFeedbackTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindTransformFeedbackNV</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindVertexArray</name></proto>
+            <param><ptype>GLuint</ptype> <name>array</name></param>
+            <glx type="render" opcode="350"/>
+        </command>
+        <command>
+            <proto>void <name>glBindVertexArrayAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>array</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindVertexArrayOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>array</name></param>
+            <alias name="glBindVertexArray"/>
+        </command>
+        <command>
+            <proto>void <name>glBindVertexBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindVertexBuffers</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>buffers</name></param>
+            <param len="count">const <ptype>GLintptr</ptype> *<name>offsets</name></param>
+            <param len="count">const <ptype>GLsizei</ptype> *<name>strides</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindVertexShaderEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindVideoCaptureStreamBufferNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>frame_region</name></param>
+            <param group="BufferOffsetARB"><ptype>GLintptrARB</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBindVideoCaptureStreamTextureNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>frame_region</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3bEXT</name></proto>
+            <param><ptype>GLbyte</ptype> <name>bx</name></param>
+            <param><ptype>GLbyte</ptype> <name>by</name></param>
+            <param><ptype>GLbyte</ptype> <name>bz</name></param>
+            <vecequiv name="glBinormal3bvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3bvEXT</name></proto>
+            <param len="3">const <ptype>GLbyte</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3dEXT</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>bx</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>by</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>bz</name></param>
+            <vecequiv name="glBinormal3dvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3dvEXT</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3fEXT</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>bx</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>by</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>bz</name></param>
+            <vecequiv name="glBinormal3fvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3fvEXT</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3iEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>bx</name></param>
+            <param><ptype>GLint</ptype> <name>by</name></param>
+            <param><ptype>GLint</ptype> <name>bz</name></param>
+            <vecequiv name="glBinormal3ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3ivEXT</name></proto>
+            <param len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3sEXT</name></proto>
+            <param><ptype>GLshort</ptype> <name>bx</name></param>
+            <param><ptype>GLshort</ptype> <name>by</name></param>
+            <param><ptype>GLshort</ptype> <name>bz</name></param>
+            <vecequiv name="glBinormal3svEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glBinormal3svEXT</name></proto>
+            <param len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBinormalPointerEXT</name></proto>
+            <param group="BinormalPointerTypeEXT"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBitmap</name></proto>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>xorig</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>yorig</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>xmove</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>ymove</name></param>
+            <param len="COMPSIZE(width,height)">const <ptype>GLubyte</ptype> *<name>bitmap</name></param>
+            <glx type="render" opcode="5"/>
+            <glx type="render" opcode="311" name="glBitmapPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glBitmapxOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLfixed</ptype> <name>xorig</name></param>
+            <param><ptype>GLfixed</ptype> <name>yorig</name></param>
+            <param><ptype>GLfixed</ptype> <name>xmove</name></param>
+            <param><ptype>GLfixed</ptype> <name>ymove</name></param>
+            <param len="COMPSIZE(width,height)">const <ptype>GLubyte</ptype> *<name>bitmap</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendBarrier</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glBlendBarrierKHR</name></proto>
+            <alias name="glBlendBarrier"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendBarrierNV</name></proto>
+            <alias name="glBlendBarrier"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendColor</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>red</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>green</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>blue</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>alpha</name></param>
+            <glx type="render" opcode="4096"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendColorEXT</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>red</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>green</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>blue</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>alpha</name></param>
+            <alias name="glBlendColor"/>
+            <glx type="render" opcode="4096"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendColorxOES</name></proto>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>red</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>green</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>blue</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>alpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquation</name></proto>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="4097"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationEXT</name></proto>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glBlendEquation"/>
+            <glx type="render" opcode="4097"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationIndexedAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glBlendEquationi"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationOES</name></proto>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparate</name></proto>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+            <glx type="render" opcode="4228"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparateEXT</name></proto>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+            <alias name="glBlendEquationSeparate"/>
+            <glx type="render" opcode="4228"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparateIndexedAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+            <alias name="glBlendEquationSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparateOES</name></proto>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparatei</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparateiARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+            <alias name="glBlendEquationSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparateiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+            <alias name="glBlendEquationSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationSeparateiOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeRGB</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>modeAlpha</name></param>
+            <alias name="glBlendEquationSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationi</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationiARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glBlendEquationi"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glBlendEquationi"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendEquationiOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendEquationModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glBlendEquationi"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFunc</name></proto>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>sfactor</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dfactor</name></param>
+            <glx type="render" opcode="160"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncIndexedAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param><ptype>GLenum</ptype> <name>src</name></param>
+            <param><ptype>GLenum</ptype> <name>dst</name></param>
+            <alias name="glBlendFunci"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparate</name></proto>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>sfactorRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dfactorRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>sfactorAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dfactorAlpha</name></param>
+            <glx type="render" opcode="4134"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparateEXT</name></proto>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>sfactorRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dfactorRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>sfactorAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dfactorAlpha</name></param>
+            <alias name="glBlendFuncSeparate"/>
+            <glx type="render" opcode="4134"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparateINGR</name></proto>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>sfactorRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dfactorRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>sfactorAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dfactorAlpha</name></param>
+            <alias name="glBlendFuncSeparate"/>
+            <glx type="render" opcode="4134"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparateIndexedAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstAlpha</name></param>
+            <alias name="glBlendFuncSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparateOES</name></proto>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstAlpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparatei</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstAlpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparateiARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstAlpha</name></param>
+            <alias name="glBlendFuncSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparateiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstAlpha</name></param>
+            <alias name="glBlendFuncSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFuncSeparateiOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstRGB</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>srcAlpha</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dstAlpha</name></param>
+            <alias name="glBlendFuncSeparatei"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFunci</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>src</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dst</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlendFunciARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>src</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dst</name></param>
+            <alias name="glBlendFunci"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFunciEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>src</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dst</name></param>
+            <alias name="glBlendFunci"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendFunciOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>buf</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>src</name></param>
+            <param group="BlendingFactor"><ptype>GLenum</ptype> <name>dst</name></param>
+            <alias name="glBlendFunci"/>
+        </command>
+        <command>
+            <proto>void <name>glBlendParameteriNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlitFramebuffer</name></proto>
+            <param><ptype>GLint</ptype> <name>srcX0</name></param>
+            <param><ptype>GLint</ptype> <name>srcY0</name></param>
+            <param><ptype>GLint</ptype> <name>srcX1</name></param>
+            <param><ptype>GLint</ptype> <name>srcY1</name></param>
+            <param><ptype>GLint</ptype> <name>dstX0</name></param>
+            <param><ptype>GLint</ptype> <name>dstY0</name></param>
+            <param><ptype>GLint</ptype> <name>dstX1</name></param>
+            <param><ptype>GLint</ptype> <name>dstY1</name></param>
+            <param group="ClearBufferMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <param group="BlitFramebufferFilter"><ptype>GLenum</ptype> <name>filter</name></param>
+            <glx type="render" opcode="4330"/>
+        </command>
+        <command>
+            <proto>void <name>glBlitFramebufferANGLE</name></proto>
+            <param><ptype>GLint</ptype> <name>srcX0</name></param>
+            <param><ptype>GLint</ptype> <name>srcY0</name></param>
+            <param><ptype>GLint</ptype> <name>srcX1</name></param>
+            <param><ptype>GLint</ptype> <name>srcY1</name></param>
+            <param><ptype>GLint</ptype> <name>dstX0</name></param>
+            <param><ptype>GLint</ptype> <name>dstY0</name></param>
+            <param><ptype>GLint</ptype> <name>dstX1</name></param>
+            <param><ptype>GLint</ptype> <name>dstY1</name></param>
+            <param group="ClearBufferMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <param group="BlitFramebufferFilter"><ptype>GLenum</ptype> <name>filter</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBlitFramebufferEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>srcX0</name></param>
+            <param><ptype>GLint</ptype> <name>srcY0</name></param>
+            <param><ptype>GLint</ptype> <name>srcX1</name></param>
+            <param><ptype>GLint</ptype> <name>srcY1</name></param>
+            <param><ptype>GLint</ptype> <name>dstX0</name></param>
+            <param><ptype>GLint</ptype> <name>dstY0</name></param>
+            <param><ptype>GLint</ptype> <name>dstX1</name></param>
+            <param><ptype>GLint</ptype> <name>dstY1</name></param>
+            <param group="ClearBufferMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <param group="BlitFramebufferFilter"><ptype>GLenum</ptype> <name>filter</name></param>
+            <alias name="glBlitFramebuffer"/>
+            <glx type="render" opcode="4330"/>
+        </command>
+        <command>
+            <proto>void <name>glBlitFramebufferNV</name></proto>
+            <param><ptype>GLint</ptype> <name>srcX0</name></param>
+            <param><ptype>GLint</ptype> <name>srcY0</name></param>
+            <param><ptype>GLint</ptype> <name>srcX1</name></param>
+            <param><ptype>GLint</ptype> <name>srcY1</name></param>
+            <param><ptype>GLint</ptype> <name>dstX0</name></param>
+            <param><ptype>GLint</ptype> <name>dstY0</name></param>
+            <param><ptype>GLint</ptype> <name>dstX1</name></param>
+            <param><ptype>GLint</ptype> <name>dstY1</name></param>
+            <param group="ClearBufferMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <param group="BlitFramebufferFilter"><ptype>GLenum</ptype> <name>filter</name></param>
+            <alias name="glBlitFramebuffer"/>
+        </command>
+        <command>
+            <proto>void <name>glBlitNamedFramebuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>readFramebuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>drawFramebuffer</name></param>
+            <param><ptype>GLint</ptype> <name>srcX0</name></param>
+            <param><ptype>GLint</ptype> <name>srcY0</name></param>
+            <param><ptype>GLint</ptype> <name>srcX1</name></param>
+            <param><ptype>GLint</ptype> <name>srcY1</name></param>
+            <param><ptype>GLint</ptype> <name>dstX0</name></param>
+            <param><ptype>GLint</ptype> <name>dstY0</name></param>
+            <param><ptype>GLint</ptype> <name>dstX1</name></param>
+            <param><ptype>GLint</ptype> <name>dstY1</name></param>
+            <param group="ClearBufferMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <param group="BlitFramebufferFilter"><ptype>GLenum</ptype> <name>filter</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferAddressRangeNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>address</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferAttachMemoryNV</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferData</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+            <param group="BufferUsageARB"><ptype>GLenum</ptype> <name>usage</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferDataARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferSizeARB"><ptype>GLsizeiptrARB</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+            <param group="BufferUsageARB"><ptype>GLenum</ptype> <name>usage</name></param>
+            <alias name="glBufferData"/>
+        </command>
+        <command>
+            <proto>void <name>glBufferPageCommitmentARB</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param><ptype>GLboolean</ptype> <name>commit</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferParameteriAPPLE</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferStorage</name></proto>
+            <param group="BufferStorageTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+            <param group="BufferStorageMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferStorageEXT</name></proto>
+            <param group="BufferStorageTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+            <param group="BufferStorageMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+            <alias name="glBufferStorage"/>
+        </command>
+        <command>
+            <proto>void <name>glBufferStorageExternalEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param><ptype>GLeglClientBufferEXT</ptype> <name>clientBuffer</name></param>
+            <param group="BufferStorageMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferStorageMemEXT</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferSubData</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glBufferSubDataARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferOffsetARB"><ptype>GLintptrARB</ptype> <name>offset</name></param>
+            <param group="BufferSizeARB"><ptype>GLsizeiptrARB</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+            <alias name="glBufferSubData"/>
+        </command>
+        <command>
+            <proto>void <name>glCallCommandListNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>list</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCallList</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <glx type="render" opcode="1"/>
+        </command>
+        <command>
+            <proto>void <name>glCallLists</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="ListNameType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(n,type)">const void *<name>lists</name></param>
+            <glx type="render" opcode="2"/>
+        </command>
+        <command>
+            <proto group="FramebufferStatus"><ptype>GLenum</ptype> <name>glCheckFramebufferStatus</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <glx type="vendor" opcode="1427"/>
+        </command>
+        <command>
+            <proto group="FramebufferStatus"><ptype>GLenum</ptype> <name>glCheckFramebufferStatusEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glCheckFramebufferStatus"/>
+            <glx type="vendor" opcode="1427"/>
+        </command>
+        <command>
+            <proto group="FramebufferStatus"><ptype>GLenum</ptype> <name>glCheckFramebufferStatusOES</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto group="FramebufferStatus"><ptype>GLenum</ptype> <name>glCheckNamedFramebufferStatus</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto group="FramebufferStatus"><ptype>GLenum</ptype> <name>glCheckNamedFramebufferStatusEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClampColor</name></proto>
+            <param group="ClampColorTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ClampColorModeARB"><ptype>GLenum</ptype> <name>clamp</name></param>
+            <glx type="render" opcode="234"/>
+        </command>
+        <command>
+            <proto>void <name>glClampColorARB</name></proto>
+            <param group="ClampColorTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ClampColorModeARB"><ptype>GLenum</ptype> <name>clamp</name></param>
+            <alias name="glClampColor"/>
+            <glx type="render" opcode="234"/>
+        </command>
+        <command>
+            <proto>void <name>glClear</name></proto>
+            <param group="ClearBufferMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <glx type="render" opcode="127"/>
+        </command>
+        <command>
+            <proto>void <name>glClearAccum</name></proto>
+            <param><ptype>GLfloat</ptype> <name>red</name></param>
+            <param><ptype>GLfloat</ptype> <name>green</name></param>
+            <param><ptype>GLfloat</ptype> <name>blue</name></param>
+            <param><ptype>GLfloat</ptype> <name>alpha</name></param>
+            <glx type="render" opcode="128"/>
+        </command>
+        <command>
+            <proto>void <name>glClearAccumxOES</name></proto>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>red</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>green</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>blue</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>alpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearBufferData</name></proto>
+            <param group="BufferStorageTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearBufferSubData</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearBufferfi</name></proto>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param group="DrawBufferName"><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param><ptype>GLfloat</ptype> <name>depth</name></param>
+            <param><ptype>GLint</ptype> <name>stencil</name></param>
+            <glx type="render" opcode="360"/>
+        </command>
+        <command>
+            <proto>void <name>glClearBufferfv</name></proto>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param group="DrawBufferName"><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param len="COMPSIZE(buffer)">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <glx type="render" opcode="361"/>
+        </command>
+        <command>
+            <proto>void <name>glClearBufferiv</name></proto>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param group="DrawBufferName"><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param len="COMPSIZE(buffer)">const <ptype>GLint</ptype> *<name>value</name></param>
+            <glx type="render" opcode="362"/>
+        </command>
+        <command>
+            <proto>void <name>glClearBufferuiv</name></proto>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param group="DrawBufferName"><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param len="COMPSIZE(buffer)">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <glx type="render" opcode="363"/>
+        </command>
+        <command>
+            <proto>void <name>glClearColor</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>red</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>green</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>blue</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>alpha</name></param>
+            <glx type="render" opcode="130"/>
+        </command>
+        <command>
+            <proto>void <name>glClearColorIiEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>red</name></param>
+            <param><ptype>GLint</ptype> <name>green</name></param>
+            <param><ptype>GLint</ptype> <name>blue</name></param>
+            <param><ptype>GLint</ptype> <name>alpha</name></param>
+            <glx type="render" opcode="4292"/>
+        </command>
+        <command>
+            <proto>void <name>glClearColorIuiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>red</name></param>
+            <param><ptype>GLuint</ptype> <name>green</name></param>
+            <param><ptype>GLuint</ptype> <name>blue</name></param>
+            <param><ptype>GLuint</ptype> <name>alpha</name></param>
+            <glx type="render" opcode="4293"/>
+        </command>
+        <command>
+            <proto>void <name>glClearColorx</name></proto>
+            <param><ptype>GLfixed</ptype> <name>red</name></param>
+            <param><ptype>GLfixed</ptype> <name>green</name></param>
+            <param><ptype>GLfixed</ptype> <name>blue</name></param>
+            <param><ptype>GLfixed</ptype> <name>alpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearColorxOES</name></proto>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>red</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>green</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>blue</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>alpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearDepth</name></proto>
+            <param><ptype>GLdouble</ptype> <name>depth</name></param>
+            <glx type="render" opcode="132"/>
+        </command>
+        <command>
+            <proto>void <name>glClearDepthdNV</name></proto>
+            <param><ptype>GLdouble</ptype> <name>depth</name></param>
+            <glx type="render" opcode="4284"/>
+        </command>
+        <command>
+            <proto>void <name>glClearDepthf</name></proto>
+            <param><ptype>GLfloat</ptype> <name>d</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearDepthfOES</name></proto>
+            <param group="ClampedFloat32"><ptype>GLclampf</ptype> <name>depth</name></param>
+            <glx type="render" opcode="4308"/>
+            <alias name="glClearDepthf"/>
+        </command>
+        <command>
+            <proto>void <name>glClearDepthx</name></proto>
+            <param><ptype>GLfixed</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearDepthxOES</name></proto>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearIndex</name></proto>
+            <param group="MaskedColorIndexValueF"><ptype>GLfloat</ptype> <name>c</name></param>
+            <glx type="render" opcode="129"/>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedBufferData</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedBufferDataEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedBufferSubData</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedBufferSubDataEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedFramebufferfi</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param><ptype>GLfloat</ptype> <name>depth</name></param>
+            <param><ptype>GLint</ptype> <name>stencil</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedFramebufferfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedFramebufferiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param>const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearNamedFramebufferuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="Buffer"><ptype>GLenum</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>drawbuffer</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearPixelLocalStorageuiEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>offset</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearStencil</name></proto>
+            <param group="StencilValue"><ptype>GLint</ptype> <name>s</name></param>
+            <glx type="render" opcode="131"/>
+        </command>
+        <command>
+            <proto>void <name>glClearTexImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearTexImageEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+            <alias name="glClearTexImage"/>
+        </command>
+        <command>
+            <proto>void <name>glClearTexSubImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClearTexSubImageEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type)">const void *<name>data</name></param>
+            <alias name="glClearTexSubImage"/>
+        </command>
+        <command>
+            <proto>void <name>glClientActiveTexture</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClientActiveTextureARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <alias name="glClientActiveTexture"/>
+        </command>
+        <command>
+            <proto>void <name>glClientActiveVertexStreamATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClientAttribDefaultEXT</name></proto>
+            <param group="ClientAttribMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto group="SyncStatus"><ptype>GLenum</ptype> <name>glClientWaitSync</name></proto>
+            <param group="sync"><ptype>GLsync</ptype> <name>sync</name></param>
+            <param group="SyncObjectMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+            <param><ptype>GLuint64</ptype> <name>timeout</name></param>
+        </command>
+        <command>
+            <proto group="SyncStatus"><ptype>GLenum</ptype> <name>glClientWaitSyncAPPLE</name></proto>
+            <param><ptype>GLsync</ptype> <name>sync</name></param>
+            <param group="SyncObjectMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+            <param><ptype>GLuint64</ptype> <name>timeout</name></param>
+            <alias name="glClientWaitSync"/>
+        </command>
+        <command>
+            <proto>void <name>glClipControl</name></proto>
+            <param group="ClipControlOrigin"><ptype>GLenum</ptype> <name>origin</name></param>
+            <param group="ClipControlDepth"><ptype>GLenum</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClipControlEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>origin</name></param>
+            <param><ptype>GLenum</ptype> <name>depth</name></param>
+            <alias name="glClipControl"/>
+        </command>
+        <command>
+            <proto>void <name>glClipPlane</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>equation</name></param>
+            <glx type="render" opcode="77"/>
+        </command>
+        <command>
+            <proto>void <name>glClipPlanef</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>p</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>eqn</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClipPlanefIMG</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>p</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>eqn</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClipPlanefOES</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>equation</name></param>
+            <glx type="render" opcode="4312"/>
+        </command>
+        <command>
+            <proto>void <name>glClipPlanex</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>equation</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClipPlanexIMG</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>p</name></param>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>eqn</name></param>
+        </command>
+        <command>
+            <proto>void <name>glClipPlanexOES</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>equation</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor3b</name></proto>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>red</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>green</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3bv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3bv</name></proto>
+            <param group="ColorB" len="3">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="6"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3d</name></proto>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>red</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>green</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3dv</name></proto>
+            <param group="ColorD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="7"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3f</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>red</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>green</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3fVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor3fVertex3fvSUN</name></proto>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor3fv</name></proto>
+            <param group="ColorF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="8"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>red</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>green</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3hvNV</name></proto>
+            <param group="Half16NV" len="3">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4244"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3i</name></proto>
+            <param group="ColorI"><ptype>GLint</ptype> <name>red</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>green</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3iv</name></proto>
+            <param group="ColorI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="9"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3s</name></proto>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>red</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>green</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3sv</name></proto>
+            <param group="ColorS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="10"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3ub</name></proto>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>red</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>green</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3ubv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3ubv</name></proto>
+            <param group="ColorUB" len="3">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="11"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3ui</name></proto>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>red</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>green</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3uiv</name></proto>
+            <param group="ColorUI" len="3">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="12"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3us</name></proto>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>red</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>green</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>blue</name></param>
+            <vecequiv name="glColor3usv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3usv</name></proto>
+            <param group="ColorUS" len="3">const <ptype>GLushort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="13"/>
+        </command>
+        <command>
+            <proto>void <name>glColor3xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>red</name></param>
+            <param><ptype>GLfixed</ptype> <name>green</name></param>
+            <param><ptype>GLfixed</ptype> <name>blue</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor3xvOES</name></proto>
+            <param len="3">const <ptype>GLfixed</ptype> *<name>components</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4b</name></proto>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>red</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>green</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>blue</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4bv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4bv</name></proto>
+            <param group="ColorB" len="4">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="14"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4d</name></proto>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>red</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>green</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>blue</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4dv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4dv</name></proto>
+            <param group="ColorD" len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="15"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4f</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>red</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>green</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>blue</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4fNormal3fVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4fNormal3fVertex3fvSUN</name></proto>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4fv</name></proto>
+            <param group="ColorF" len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="16"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>red</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>green</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>blue</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4hvNV</name></proto>
+            <param group="Half16NV" len="4">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4245"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4i</name></proto>
+            <param group="ColorI"><ptype>GLint</ptype> <name>red</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>green</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>blue</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4iv</name></proto>
+            <param group="ColorI" len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="17"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4s</name></proto>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>red</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>green</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>blue</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4sv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4sv</name></proto>
+            <param group="ColorS" len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="18"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4ub</name></proto>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>red</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>green</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>blue</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4ubv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4ubVertex2fSUN</name></proto>
+            <param><ptype>GLubyte</ptype> <name>r</name></param>
+            <param><ptype>GLubyte</ptype> <name>g</name></param>
+            <param><ptype>GLubyte</ptype> <name>b</name></param>
+            <param><ptype>GLubyte</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4ubVertex2fvSUN</name></proto>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>c</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4ubVertex3fSUN</name></proto>
+            <param><ptype>GLubyte</ptype> <name>r</name></param>
+            <param><ptype>GLubyte</ptype> <name>g</name></param>
+            <param><ptype>GLubyte</ptype> <name>b</name></param>
+            <param><ptype>GLubyte</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4ubVertex3fvSUN</name></proto>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4ubv</name></proto>
+            <param group="ColorUB" len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="19"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4ui</name></proto>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>red</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>green</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>blue</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4uiv</name></proto>
+            <param group="ColorUI" len="4">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="20"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4us</name></proto>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>red</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>green</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>blue</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>alpha</name></param>
+            <vecequiv name="glColor4usv"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4usv</name></proto>
+            <param group="ColorUS" len="4">const <ptype>GLushort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="21"/>
+        </command>
+        <command>
+            <proto>void <name>glColor4x</name></proto>
+            <param><ptype>GLfixed</ptype> <name>red</name></param>
+            <param><ptype>GLfixed</ptype> <name>green</name></param>
+            <param><ptype>GLfixed</ptype> <name>blue</name></param>
+            <param><ptype>GLfixed</ptype> <name>alpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>red</name></param>
+            <param><ptype>GLfixed</ptype> <name>green</name></param>
+            <param><ptype>GLfixed</ptype> <name>blue</name></param>
+            <param><ptype>GLfixed</ptype> <name>alpha</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColor4xvOES</name></proto>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>components</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorFormatNV</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorFragmentOp1ATI</name></proto>
+            <param group="FragmentOpATI"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMask</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Mod</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorFragmentOp2ATI</name></proto>
+            <param group="FragmentOpATI"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMask</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Mod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Mod</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorFragmentOp3ATI</name></proto>
+            <param group="FragmentOpATI"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMask</name></param>
+            <param><ptype>GLuint</ptype> <name>dstMod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1Mod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2Mod</name></param>
+            <param><ptype>GLuint</ptype> <name>arg3</name></param>
+            <param><ptype>GLuint</ptype> <name>arg3Rep</name></param>
+            <param><ptype>GLuint</ptype> <name>arg3Mod</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorMask</name></proto>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>red</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>green</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>blue</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>alpha</name></param>
+            <glx type="render" opcode="134"/>
+        </command>
+        <command>
+            <proto>void <name>glColorMaskIndexedEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>r</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>g</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>b</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>a</name></param>
+            <alias name="glColorMaski"/>
+            <glx type="render" opcode="352"/>
+        </command>
+        <command>
+            <proto>void <name>glColorMaski</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>r</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>g</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>b</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>a</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorMaskiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>r</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>g</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>b</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>a</name></param>
+            <alias name="glColorMaski"/>
+        </command>
+        <command>
+            <proto>void <name>glColorMaskiOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>r</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>g</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>b</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>a</name></param>
+            <alias name="glColorMaski"/>
+        </command>
+        <command>
+            <proto>void <name>glColorMaterial</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="ColorMaterialParameter"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="78"/>
+        </command>
+        <command>
+            <proto>void <name>glColorP3ui</name></proto>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>color</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorP3uiv</name></proto>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>color</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorP4ui</name></proto>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>color</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorP4uiv</name></proto>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>color</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorPointer</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorPointerEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(size,type,stride,count)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorPointerListIBM</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorPointervINTEL</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="4">const void **<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glColorSubTable</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,count)">const void *<name>data</name></param>
+            <glx type="render" opcode="195"/>
+            <glx type="render" opcode="312" name="glColorSubTablePBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glColorSubTableEXT</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,count)">const void *<name>data</name></param>
+            <alias name="glColorSubTable"/>
+        </command>
+        <command>
+            <proto>void <name>glColorTable</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>table</name></param>
+            <glx type="render" opcode="2053"/>
+            <glx type="render" opcode="313" name="glColorTablePBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glColorTableEXT</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>table</name></param>
+            <alias name="glColorTable"/>
+        </command>
+        <command>
+            <proto>void <name>glColorTableParameterfv</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="2054"/>
+        </command>
+        <command>
+            <proto>void <name>glColorTableParameterfvSGI</name></proto>
+            <param group="ColorTableTargetSGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glColorTableParameterfv"/>
+            <glx type="render" opcode="2054"/>
+        </command>
+        <command>
+            <proto>void <name>glColorTableParameteriv</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="2055"/>
+        </command>
+        <command>
+            <proto>void <name>glColorTableParameterivSGI</name></proto>
+            <param group="ColorTableTargetSGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glColorTableParameteriv"/>
+            <glx type="render" opcode="2055"/>
+        </command>
+        <command>
+            <proto>void <name>glColorTableSGI</name></proto>
+            <param group="ColorTableTargetSGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>table</name></param>
+            <alias name="glColorTable"/>
+            <glx type="render" opcode="2053"/>
+        </command>
+        <command>
+            <proto>void <name>glCombinerInputNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerPortionNV"><ptype>GLenum</ptype> <name>portion</name></param>
+            <param group="CombinerVariableNV"><ptype>GLenum</ptype> <name>variable</name></param>
+            <param group="CombinerRegisterNV"><ptype>GLenum</ptype> <name>input</name></param>
+            <param group="CombinerMappingNV"><ptype>GLenum</ptype> <name>mapping</name></param>
+            <param group="CombinerComponentUsageNV"><ptype>GLenum</ptype> <name>componentUsage</name></param>
+            <glx type="render" opcode="4140"/>
+        </command>
+        <command>
+            <proto>void <name>glCombinerOutputNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerPortionNV"><ptype>GLenum</ptype> <name>portion</name></param>
+            <param group="CombinerRegisterNV"><ptype>GLenum</ptype> <name>abOutput</name></param>
+            <param group="CombinerRegisterNV"><ptype>GLenum</ptype> <name>cdOutput</name></param>
+            <param group="CombinerRegisterNV"><ptype>GLenum</ptype> <name>sumOutput</name></param>
+            <param group="CombinerScaleNV"><ptype>GLenum</ptype> <name>scale</name></param>
+            <param group="CombinerBiasNV"><ptype>GLenum</ptype> <name>bias</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>abDotProduct</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>cdDotProduct</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>muxSum</name></param>
+            <glx type="render" opcode="4141"/>
+        </command>
+        <command>
+            <proto>void <name>glCombinerParameterfNV</name></proto>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="4136"/>
+        </command>
+        <command>
+            <proto>void <name>glCombinerParameterfvNV</name></proto>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4137"/>
+        </command>
+        <command>
+            <proto>void <name>glCombinerParameteriNV</name></proto>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="4138"/>
+        </command>
+        <command>
+            <proto>void <name>glCombinerParameterivNV</name></proto>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4139"/>
+        </command>
+        <command>
+            <proto>void <name>glCombinerStageParameterfvNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCommandListSegmentsNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>list</name></param>
+            <param><ptype>GLuint</ptype> <name>segments</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompileCommandListNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>list</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompileShader</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompileShaderARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>shaderObj</name></param>
+            <alias name="glCompileShader"/>
+        </command>
+        <command>
+            <proto>void <name>glCompileShaderIncludeARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLchar</ptype> *const*<name>path</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedMultiTexImage1DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedMultiTexImage2DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedMultiTexImage3DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedMultiTexSubImage1DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedMultiTexSubImage2DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedMultiTexSubImage3DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexImage1D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <glx type="render" opcode="214"/>
+            <glx type="render" opcode="314" name="glCompressedTexImage1DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexImage1DARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <alias name="glCompressedTexImage1D"/>
+            <glx type="render" opcode="214"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexImage2D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <glx type="render" opcode="215"/>
+            <glx type="render" opcode="315" name="glCompressedTexImage2DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexImage2DARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <alias name="glCompressedTexImage2D"/>
+            <glx type="render" opcode="215"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexImage3D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <glx type="render" opcode="216"/>
+            <glx type="render" opcode="316" name="glCompressedTexImage3DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexImage3DARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <alias name="glCompressedTexImage3D"/>
+            <glx type="render" opcode="216"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexImage3DOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexSubImage1D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <glx type="render" opcode="217"/>
+            <glx type="render" opcode="317" name="glCompressedTexSubImage1DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexSubImage1DARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <alias name="glCompressedTexSubImage1D"/>
+            <glx type="render" opcode="217"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexSubImage2D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <glx type="render" opcode="218"/>
+            <glx type="render" opcode="318" name="glCompressedTexSubImage2DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexSubImage2DARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <alias name="glCompressedTexSubImage2D"/>
+            <glx type="render" opcode="218"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexSubImage3D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <glx type="render" opcode="219"/>
+            <glx type="render" opcode="319" name="glCompressedTexSubImage3DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexSubImage3DARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param group="CompressedTextureARB" len="imageSize">const void *<name>data</name></param>
+            <alias name="glCompressedTexSubImage3D"/>
+            <glx type="render" opcode="219"/>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTexSubImage3DOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureImage1DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureImage2DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureImage3DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureSubImage1D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param>const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureSubImage1DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureSubImage2D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param>const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureSubImage2DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureSubImage3D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param>const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCompressedTextureSubImage3DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>imageSize</name></param>
+            <param len="imageSize">const void *<name>bits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glConservativeRasterParameterfNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glConservativeRasterParameteriNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionFilter1D</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>image</name></param>
+            <glx type="render" opcode="4101"/>
+            <glx type="render" opcode="320" name="glConvolutionFilter1DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionFilter1DEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>image</name></param>
+            <alias name="glConvolutionFilter1D"/>
+            <glx type="render" opcode="4101"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionFilter2D</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>image</name></param>
+            <glx type="render" opcode="4102"/>
+            <glx type="render" opcode="321" name="glConvolutionFilter2DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionFilter2DEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>image</name></param>
+            <alias name="glConvolutionFilter2D"/>
+            <glx type="render" opcode="4102"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameterf</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>params</name></param>
+            <glx type="render" opcode="4103"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameterfEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>params</name></param>
+            <alias name="glConvolutionParameterf"/>
+            <glx type="render" opcode="4103"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameterfv</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4104"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameterfvEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glConvolutionParameterfv"/>
+            <glx type="render" opcode="4104"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameteri</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>params</name></param>
+            <glx type="render" opcode="4105"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameteriEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>params</name></param>
+            <alias name="glConvolutionParameteri"/>
+            <glx type="render" opcode="4105"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameteriv</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4106"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameterivEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glConvolutionParameteriv"/>
+            <glx type="render" opcode="4106"/>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameterxOES</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glConvolutionParameterxvOES</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyBufferSubData</name></proto>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>readTarget</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>writeTarget</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>readOffset</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>writeOffset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <glx type="single" opcode="221"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyBufferSubDataNV</name></proto>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>readTarget</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>writeTarget</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>readOffset</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>writeOffset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <alias name="glCopyBufferSubData"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyColorSubTable</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>start</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <glx type="render" opcode="196"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyColorSubTableEXT</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>start</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <alias name="glCopyColorSubTable"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyColorTable</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <glx type="render" opcode="2056"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyColorTableSGI</name></proto>
+            <param group="ColorTableTargetSGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <alias name="glCopyColorTable"/>
+            <glx type="render" opcode="2056"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyConvolutionFilter1D</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <glx type="render" opcode="4107"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyConvolutionFilter1DEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <alias name="glCopyConvolutionFilter1D"/>
+            <glx type="render" opcode="4107"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyConvolutionFilter2D</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <glx type="render" opcode="4108"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyConvolutionFilter2DEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glCopyConvolutionFilter2D"/>
+            <glx type="render" opcode="4108"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyImageSubData</name></proto>
+            <param><ptype>GLuint</ptype> <name>srcName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>srcTarget</name></param>
+            <param><ptype>GLint</ptype> <name>srcLevel</name></param>
+            <param><ptype>GLint</ptype> <name>srcX</name></param>
+            <param><ptype>GLint</ptype> <name>srcY</name></param>
+            <param><ptype>GLint</ptype> <name>srcZ</name></param>
+            <param><ptype>GLuint</ptype> <name>dstName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>dstTarget</name></param>
+            <param><ptype>GLint</ptype> <name>dstLevel</name></param>
+            <param><ptype>GLint</ptype> <name>dstX</name></param>
+            <param><ptype>GLint</ptype> <name>dstY</name></param>
+            <param><ptype>GLint</ptype> <name>dstZ</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcWidth</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcHeight</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcDepth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyImageSubDataEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>srcName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>srcTarget</name></param>
+            <param><ptype>GLint</ptype> <name>srcLevel</name></param>
+            <param><ptype>GLint</ptype> <name>srcX</name></param>
+            <param><ptype>GLint</ptype> <name>srcY</name></param>
+            <param><ptype>GLint</ptype> <name>srcZ</name></param>
+            <param><ptype>GLuint</ptype> <name>dstName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>dstTarget</name></param>
+            <param><ptype>GLint</ptype> <name>dstLevel</name></param>
+            <param><ptype>GLint</ptype> <name>dstX</name></param>
+            <param><ptype>GLint</ptype> <name>dstY</name></param>
+            <param><ptype>GLint</ptype> <name>dstZ</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcWidth</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcHeight</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcDepth</name></param>
+            <alias name="glCopyImageSubData"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyImageSubDataNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>srcName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>srcTarget</name></param>
+            <param><ptype>GLint</ptype> <name>srcLevel</name></param>
+            <param><ptype>GLint</ptype> <name>srcX</name></param>
+            <param><ptype>GLint</ptype> <name>srcY</name></param>
+            <param><ptype>GLint</ptype> <name>srcZ</name></param>
+            <param><ptype>GLuint</ptype> <name>dstName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>dstTarget</name></param>
+            <param><ptype>GLint</ptype> <name>dstLevel</name></param>
+            <param><ptype>GLint</ptype> <name>dstX</name></param>
+            <param><ptype>GLint</ptype> <name>dstY</name></param>
+            <param><ptype>GLint</ptype> <name>dstZ</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <glx type="render" opcode="4291"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyImageSubDataOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>srcName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>srcTarget</name></param>
+            <param><ptype>GLint</ptype> <name>srcLevel</name></param>
+            <param><ptype>GLint</ptype> <name>srcX</name></param>
+            <param><ptype>GLint</ptype> <name>srcY</name></param>
+            <param><ptype>GLint</ptype> <name>srcZ</name></param>
+            <param><ptype>GLuint</ptype> <name>dstName</name></param>
+            <param group="CopyBufferSubDataTarget"><ptype>GLenum</ptype> <name>dstTarget</name></param>
+            <param><ptype>GLint</ptype> <name>dstLevel</name></param>
+            <param><ptype>GLint</ptype> <name>dstX</name></param>
+            <param><ptype>GLint</ptype> <name>dstY</name></param>
+            <param><ptype>GLint</ptype> <name>dstZ</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcWidth</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcHeight</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcDepth</name></param>
+            <alias name="glCopyImageSubData"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyMultiTexImage1DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyMultiTexImage2DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyMultiTexSubImage1DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyMultiTexSubImage2DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyMultiTexSubImage3DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyNamedBufferSubData</name></proto>
+            <param><ptype>GLuint</ptype> <name>readBuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>writeBuffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>readOffset</name></param>
+            <param><ptype>GLintptr</ptype> <name>writeOffset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyPathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>resultPath</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>srcPath</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyPixels</name></proto>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelCopyType"><ptype>GLenum</ptype> <name>type</name></param>
+            <glx type="render" opcode="172"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexImage1D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <glx type="render" opcode="4119"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexImage1DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <alias name="glCopyTexImage1D"/>
+            <glx type="render" opcode="4119"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexImage2D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <glx type="render" opcode="4120"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexImage2DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <alias name="glCopyTexImage2D"/>
+            <glx type="render" opcode="4120"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexSubImage1D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <glx type="render" opcode="4121"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexSubImage1DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <alias name="glCopyTexSubImage1D"/>
+            <glx type="render" opcode="4121"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexSubImage2D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <glx type="render" opcode="4122"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexSubImage2DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glCopyTexSubImage2D"/>
+            <glx type="render" opcode="4122"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexSubImage3D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <glx type="render" opcode="4123"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexSubImage3DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glCopyTexSubImage3D"/>
+            <glx type="render" opcode="4123"/>
+        </command>
+        <command>
+            <proto>void <name>glCopyTexSubImage3DOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureImage1DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureImage2DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureLevelsAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>destinationTexture</name></param>
+            <param><ptype>GLuint</ptype> <name>sourceTexture</name></param>
+            <param><ptype>GLint</ptype> <name>sourceBaseLevel</name></param>
+            <param><ptype>GLsizei</ptype> <name>sourceLevelCount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureSubImage1D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureSubImage1DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureSubImage2D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureSubImage2DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureSubImage3D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCopyTextureSubImage3DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverFillPathInstancedNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param group="PathElementType"><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param group="PathElement" len="COMPSIZE(numPaths,pathNameType,paths)">const void *<name>paths</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param group="PathCoverMode"><ptype>GLenum</ptype> <name>coverMode</name></param>
+            <param group="PathTransformType"><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param len="COMPSIZE(numPaths,transformType)">const <ptype>GLfloat</ptype> *<name>transformValues</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverFillPathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathCoverMode"><ptype>GLenum</ptype> <name>coverMode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverStrokePathInstancedNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param group="PathElementType"><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param group="PathElement" len="COMPSIZE(numPaths,pathNameType,paths)">const void *<name>paths</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param group="PathCoverMode"><ptype>GLenum</ptype> <name>coverMode</name></param>
+            <param group="PathTransformType"><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param len="COMPSIZE(numPaths,transformType)">const <ptype>GLfloat</ptype> *<name>transformValues</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverStrokePathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathCoverMode"><ptype>GLenum</ptype> <name>coverMode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverageMaskNV</name></proto>
+            <param><ptype>GLboolean</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverageModulationNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>components</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverageModulationTableNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCoverageOperationNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>operation</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateBuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>buffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateCommandListsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>lists</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateFramebuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>framebuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateMemoryObjectsEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param><ptype>GLuint</ptype> *<name>memoryObjects</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreatePerfQueryINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryId</name></param>
+            <param><ptype>GLuint</ptype> *<name>queryHandle</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glCreateProgram</name></proto>
+        </command>
+        <command>
+            <proto group="handleARB"><ptype>GLhandleARB</ptype> <name>glCreateProgramObjectARB</name></proto>
+            <alias name="glCreateProgram"/>
+        </command>
+        <command>
+            <proto>void <name>glCreateProgramPipelines</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>pipelines</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateQueries</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateRenderbuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateSamplers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>samplers</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glCreateShader</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>type</name></param>
+        </command>
+        <command>
+            <proto group="handleARB"><ptype>GLhandleARB</ptype> <name>glCreateShaderObjectARB</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shaderType</name></param>
+            <alias name="glCreateShader"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glCreateShaderProgramEXT</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>string</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glCreateShaderProgramv</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLchar</ptype> *const*<name>strings</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glCreateShaderProgramvEXT</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLchar</ptype> **<name>strings</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateStatesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>states</name></param>
+        </command>
+        <command>
+            <proto group="sync"><ptype>GLsync</ptype> <name>glCreateSyncFromCLeventARB</name></proto>
+            <param group="cl_context"><ptype>struct _cl_context</ptype> *<name>context</name></param>
+            <param group="cl_event"><ptype>struct _cl_event</ptype> *<name>event</name></param>
+            <param><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateTextures</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>textures</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateTransformFeedbacks</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCreateVertexArrays</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>arrays</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCullFace</name></proto>
+            <param group="CullFaceMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="79"/>
+        </command>
+        <command>
+            <proto>void <name>glCullParameterdvEXT</name></proto>
+            <param group="CullParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCullParameterfvEXT</name></proto>
+            <param group="CullParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glCurrentPaletteMatrixARB</name></proto>
+            <param><ptype>GLint</ptype> <name>index</name></param>
+            <glx type="render" opcode="4329"/>
+        </command>
+        <command>
+            <proto>void <name>glCurrentPaletteMatrixOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>matrixpaletteindex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageCallback</name></proto>
+            <param><ptype>GLDEBUGPROC</ptype> <name>callback</name></param>
+            <param>const void *<name>userParam</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageCallbackAMD</name></proto>
+            <param><ptype>GLDEBUGPROCAMD</ptype> <name>callback</name></param>
+            <param>void *<name>userParam</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageCallbackARB</name></proto>
+            <param><ptype>GLDEBUGPROCARB</ptype> <name>callback</name></param>
+            <param len="COMPSIZE(callback)">const void *<name>userParam</name></param>
+            <alias name="glDebugMessageCallback"/>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageCallbackKHR</name></proto>
+            <param><ptype>GLDEBUGPROCKHR</ptype> <name>callback</name></param>
+            <param>const void *<name>userParam</name></param>
+            <alias name="glDebugMessageCallback"/>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageControl</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param group="DebugType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>ids</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>enabled</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageControlARB</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param group="DebugType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>ids</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>enabled</name></param>
+            <alias name="glDebugMessageControl"/>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageControlKHR</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param group="DebugType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>ids</name></param>
+            <param><ptype>GLboolean</ptype> <name>enabled</name></param>
+            <alias name="glDebugMessageControl"/>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageEnableAMD</name></proto>
+            <param><ptype>GLenum</ptype> <name>category</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>ids</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>enabled</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageInsert</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param group="DebugType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="COMPSIZE(buf,length)">const <ptype>GLchar</ptype> *<name>buf</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageInsertAMD</name></proto>
+            <param><ptype>GLenum</ptype> <name>category</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="length">const <ptype>GLchar</ptype> *<name>buf</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageInsertARB</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param group="DebugType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="length">const <ptype>GLchar</ptype> *<name>buf</name></param>
+            <alias name="glDebugMessageInsert"/>
+        </command>
+        <command>
+            <proto>void <name>glDebugMessageInsertKHR</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param group="DebugType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="DebugSeverity"><ptype>GLenum</ptype> <name>severity</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>buf</name></param>
+            <alias name="glDebugMessageInsert"/>
+        </command>
+        <command>
+            <proto>void <name>glDeformSGIX</name></proto>
+            <param group="FfdMaskSGIX"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <glx type="render" opcode="2075"/>
+        </command>
+        <command>
+            <proto>void <name>glDeformationMap3dSGIX</name></proto>
+            <param group="FfdTargetSGIX"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>ustride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>uorder</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>vstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>vorder</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>w1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>w2</name></param>
+            <param><ptype>GLint</ptype> <name>wstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>worder</name></param>
+            <param group="CoordD" len="COMPSIZE(target,ustride,uorder,vstride,vorder,wstride,worder)">const <ptype>GLdouble</ptype> *<name>points</name></param>
+            <glx type="render" opcode="2073"/>
+        </command>
+        <command>
+            <proto>void <name>glDeformationMap3fSGIX</name></proto>
+            <param group="FfdTargetSGIX"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>ustride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>uorder</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>vstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>vorder</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>w1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>w2</name></param>
+            <param><ptype>GLint</ptype> <name>wstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>worder</name></param>
+            <param group="CoordF" len="COMPSIZE(target,ustride,uorder,vstride,vorder,wstride,worder)">const <ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="render" opcode="2074"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteAsyncMarkersSGIX</name></proto>
+            <param><ptype>GLuint</ptype> <name>marker</name></param>
+            <param><ptype>GLsizei</ptype> <name>range</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteBuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>buffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteBuffersARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>buffers</name></param>
+            <alias name="glDeleteBuffers"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteCommandListsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>lists</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteFencesAPPLE</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="FenceNV" len="n">const <ptype>GLuint</ptype> *<name>fences</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteFencesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="FenceNV" len="n">const <ptype>GLuint</ptype> *<name>fences</name></param>
+            <glx type="vendor" opcode="1276"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteFragmentShaderATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteFramebuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>framebuffers</name></param>
+            <glx type="render" opcode="4320"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteFramebuffersEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>framebuffers</name></param>
+            <alias name="glDeleteFramebuffers"/>
+            <glx type="render" opcode="4320"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteFramebuffersOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>framebuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteLists</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param><ptype>GLsizei</ptype> <name>range</name></param>
+            <glx type="single" opcode="103"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteMemoryObjectsEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>memoryObjects</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteNamedStringARB</name></proto>
+            <param><ptype>GLint</ptype> <name>namelen</name></param>
+            <param len="namelen">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteNamesAMD</name></proto>
+            <param><ptype>GLenum</ptype> <name>identifier</name></param>
+            <param><ptype>GLuint</ptype> <name>num</name></param>
+            <param len="num">const <ptype>GLuint</ptype> *<name>names</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteObjectARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>obj</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteOcclusionQueriesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeletePathsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>range</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeletePerfMonitorsAMD</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>monitors</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeletePerfQueryINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryHandle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteProgram</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <glx type="single" opcode="202"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteProgramPipelines</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>pipelines</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteProgramPipelinesEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>pipelines</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteProgramsARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>programs</name></param>
+            <glx type="vendor" opcode="1294"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteProgramsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>programs</name></param>
+            <alias name="glDeleteProgramsARB"/>
+            <glx type="vendor" opcode="1294"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteQueries</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>ids</name></param>
+            <glx type="single" opcode="161"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteQueriesARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>ids</name></param>
+            <alias name="glDeleteQueries"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteQueriesEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteQueryResourceTagNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLint</ptype> *<name>tagIds</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteRenderbuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+            <glx type="render" opcode="4317"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteRenderbuffersEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+            <alias name="glDeleteRenderbuffers"/>
+            <glx type="render" opcode="4317"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteRenderbuffersOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteSamplers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>samplers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteSemaphoresEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>semaphores</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteShader</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <glx type="single" opcode="195"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteStatesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>states</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteSync</name></proto>
+            <param group="sync"><ptype>GLsync</ptype> <name>sync</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteSyncAPPLE</name></proto>
+            <param><ptype>GLsync</ptype> <name>sync</name></param>
+            <alias name="glDeleteSync"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteTextures</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <glx type="single" opcode="144"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteTexturesEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <glx type="vendor" opcode="12"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteTransformFeedbacks</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDeleteTransformFeedbacksNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>ids</name></param>
+            <alias name="glDeleteTransformFeedbacks"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteVertexArrays</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>arrays</name></param>
+            <glx type="render" opcode="351"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteVertexArraysAPPLE</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>arrays</name></param>
+            <alias name="glDeleteVertexArrays"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteVertexArraysOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>arrays</name></param>
+            <alias name="glDeleteVertexArrays"/>
+        </command>
+        <command>
+            <proto>void <name>glDeleteVertexShaderEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthBoundsEXT</name></proto>
+            <param group="ClampedFloat64"><ptype>GLclampd</ptype> <name>zmin</name></param>
+            <param group="ClampedFloat64"><ptype>GLclampd</ptype> <name>zmax</name></param>
+            <glx type="render" opcode="4229"/>
+        </command>
+        <command>
+            <proto>void <name>glDepthBoundsdNV</name></proto>
+            <param><ptype>GLdouble</ptype> <name>zmin</name></param>
+            <param><ptype>GLdouble</ptype> <name>zmax</name></param>
+            <glx type="render" opcode="4285"/>
+        </command>
+        <command>
+            <proto>void <name>glDepthFunc</name></proto>
+            <param group="DepthFunction"><ptype>GLenum</ptype> <name>func</name></param>
+            <glx type="render" opcode="164"/>
+        </command>
+        <command>
+            <proto>void <name>glDepthMask</name></proto>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>flag</name></param>
+            <glx type="render" opcode="135"/>
+        </command>
+        <command>
+            <proto>void <name>glDepthRange</name></proto>
+            <param><ptype>GLdouble</ptype> <name>n</name></param>
+            <param><ptype>GLdouble</ptype> <name>f</name></param>
+            <glx type="render" opcode="174"/>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangeArrayfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangeArrayfvOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangeArrayv</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangeIndexed</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>n</name></param>
+            <param><ptype>GLdouble</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangeIndexedfNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>n</name></param>
+            <param><ptype>GLfloat</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangeIndexedfOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>n</name></param>
+            <param><ptype>GLfloat</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangedNV</name></proto>
+            <param><ptype>GLdouble</ptype> <name>zNear</name></param>
+            <param><ptype>GLdouble</ptype> <name>zFar</name></param>
+            <glx type="render" opcode="4283"/>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangef</name></proto>
+            <param><ptype>GLfloat</ptype> <name>n</name></param>
+            <param><ptype>GLfloat</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangefOES</name></proto>
+            <param group="ClampedFloat32"><ptype>GLclampf</ptype> <name>n</name></param>
+            <param group="ClampedFloat32"><ptype>GLclampf</ptype> <name>f</name></param>
+            <glx type="render" opcode="4309"/>
+            <alias name="glDepthRangef"/>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangex</name></proto>
+            <param><ptype>GLfixed</ptype> <name>n</name></param>
+            <param><ptype>GLfixed</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDepthRangexOES</name></proto>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>n</name></param>
+            <param group="ClampedFixed"><ptype>GLfixed</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDetachObjectARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>containerObj</name></param>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>attachedObj</name></param>
+            <alias name="glDetachShader"/>
+        </command>
+        <command>
+            <proto>void <name>glDetachShader</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDetailTexFuncSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n*2">const <ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="render" opcode="2051"/>
+        </command>
+        <command>
+            <proto>void <name>glDisable</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>cap</name></param>
+            <glx type="render" opcode="138"/>
+        </command>
+        <command>
+            <proto>void <name>glDisableClientState</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableClientStateIndexedEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableClientStateiEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableDriverControlQCOM</name></proto>
+            <param><ptype>GLuint</ptype> <name>driverControl</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableIndexedEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glDisablei"/>
+            <glx type="render" opcode="354"/>
+        </command>
+        <command>
+            <proto>void <name>glDisableVariantClientStateEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableVertexArrayAttrib</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableVertexArrayAttribEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableVertexArrayEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableVertexAttribAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableVertexAttribArray</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableVertexAttribArrayARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glDisableVertexAttribArray"/>
+        </command>
+        <command>
+            <proto>void <name>glDisablei</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDisableiEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glDisablei"/>
+        </command>
+        <command>
+            <proto>void <name>glDisableiNV</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glDisablei"/>
+        </command>
+        <command>
+            <proto>void <name>glDisableiOES</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glDisablei"/>
+        </command>
+        <command>
+            <proto>void <name>glDiscardFramebufferEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>numAttachments</name></param>
+            <param len="numAttachments">const <ptype>GLenum</ptype> *<name>attachments</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDispatchCompute</name></proto>
+            <param><ptype>GLuint</ptype> <name>num_groups_x</name></param>
+            <param><ptype>GLuint</ptype> <name>num_groups_y</name></param>
+            <param><ptype>GLuint</ptype> <name>num_groups_z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDispatchComputeGroupSizeARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>num_groups_x</name></param>
+            <param><ptype>GLuint</ptype> <name>num_groups_y</name></param>
+            <param><ptype>GLuint</ptype> <name>num_groups_z</name></param>
+            <param><ptype>GLuint</ptype> <name>group_size_x</name></param>
+            <param><ptype>GLuint</ptype> <name>group_size_y</name></param>
+            <param><ptype>GLuint</ptype> <name>group_size_z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDispatchComputeIndirect</name></proto>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>indirect</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawArrays</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <glx type="render" opcode="193"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <alias name="glDrawArrays"/>
+            <glx type="render" opcode="4116"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysIndirect</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param>const void *<name>indirect</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysInstanced</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysInstancedANGLE</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawArraysInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysInstancedARB</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawArraysInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysInstancedBaseInstance</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLuint</ptype> <name>baseinstance</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysInstancedBaseInstanceEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLuint</ptype> <name>baseinstance</name></param>
+            <alias name="glDrawArraysInstancedBaseInstance"/>
+        </command>
+        <command comment="primcount should be renamed to instanceCount for OpenGL ES">
+            <proto>void <name>glDrawArraysInstancedEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawArraysInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawArraysInstancedNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawArraysInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawBuffer</name></proto>
+            <param group="DrawBufferMode"><ptype>GLenum</ptype> <name>buf</name></param>
+            <glx type="render" opcode="126"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawBuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="DrawBufferModeATI" len="n">const <ptype>GLenum</ptype> *<name>bufs</name></param>
+            <glx type="render" opcode="233"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawBuffersARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="DrawBufferModeATI" len="n">const <ptype>GLenum</ptype> *<name>bufs</name></param>
+            <alias name="glDrawBuffers"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawBuffersATI</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="DrawBufferModeATI" len="n">const <ptype>GLenum</ptype> *<name>bufs</name></param>
+            <alias name="glDrawBuffers"/>
+            <glx type="render" opcode="233"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawBuffersEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLenum</ptype> *<name>bufs</name></param>
+            <alias name="glDrawBuffers"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawBuffersIndexedEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLenum</ptype> *<name>location</name></param>
+            <param len="n">const <ptype>GLint</ptype> *<name>indices</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawBuffersNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLenum</ptype> *<name>bufs</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawCommandsAddressNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>primitiveMode</name></param>
+            <param>const <ptype>GLuint64</ptype> *<name>indirects</name></param>
+            <param>const <ptype>GLsizei</ptype> *<name>sizes</name></param>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawCommandsNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>primitiveMode</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param>const <ptype>GLintptr</ptype> *<name>indirects</name></param>
+            <param>const <ptype>GLsizei</ptype> *<name>sizes</name></param>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawCommandsStatesAddressNV</name></proto>
+            <param>const <ptype>GLuint64</ptype> *<name>indirects</name></param>
+            <param>const <ptype>GLsizei</ptype> *<name>sizes</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>states</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>fbos</name></param>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawCommandsStatesNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param>const <ptype>GLintptr</ptype> *<name>indirects</name></param>
+            <param>const <ptype>GLsizei</ptype> *<name>sizes</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>states</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>fbos</name></param>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementArrayAPPLE</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementArrayATI</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElements</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsBaseVertex</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsBaseVertexEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <alias name="glDrawElementsBaseVertex"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsBaseVertexOES</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <alias name="glDrawElementsBaseVertex"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsIndirect</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>indirect</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstanced</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedANGLE</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawElementsInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedARB</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawElementsInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedBaseInstance</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="count">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLuint</ptype> <name>baseinstance</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedBaseInstanceEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="count">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLuint</ptype> <name>baseinstance</name></param>
+            <alias name="glDrawElementsInstancedBaseInstance"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedBaseVertex</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedBaseVertexBaseInstance</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="count">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <param><ptype>GLuint</ptype> <name>baseinstance</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedBaseVertexBaseInstanceEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="count">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <param><ptype>GLuint</ptype> <name>baseinstance</name></param>
+            <alias name="glDrawElementsInstancedBaseVertexBaseInstance"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedBaseVertexEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <alias name="glDrawElementsInstancedBaseVertex"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedBaseVertexOES</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <alias name="glDrawElementsInstancedBaseVertex"/>
+        </command>
+        <command comment="primcount should be renamed to instanceCount for OpenGL ES">
+            <proto>void <name>glDrawElementsInstancedEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawElementsInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawElementsInstancedNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glDrawElementsInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawMeshArraysSUN</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawMeshTasksNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawMeshTasksIndirectNV</name></proto>
+            <param><ptype>GLintptr</ptype> <name>indirect</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawPixels</name></proto>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="173"/>
+            <glx type="render" opcode="322" name="glDrawPixelsPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawRangeElementArrayAPPLE</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawRangeElementArrayATI</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawRangeElements</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawRangeElementsBaseVertex</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawRangeElementsBaseVertexEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <alias name="glDrawRangeElementsBaseVertex"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawRangeElementsBaseVertexOES</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <param><ptype>GLint</ptype> <name>basevertex</name></param>
+            <alias name="glDrawRangeElementsBaseVertex"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawRangeElementsEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(count,type)">const void *<name>indices</name></param>
+            <alias name="glDrawRangeElements"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexfOES</name></proto>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>width</name></param>
+            <param><ptype>GLfloat</ptype> <name>height</name></param>
+            <vecequiv name="glDrawTexfvOES"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexfvOES</name></proto>
+            <param len="5">const <ptype>GLfloat</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexiOES</name></proto>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <param><ptype>GLint</ptype> <name>width</name></param>
+            <param><ptype>GLint</ptype> <name>height</name></param>
+            <vecequiv name="glDrawTexivOES"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexivOES</name></proto>
+            <param len="5">const <ptype>GLint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexsOES</name></proto>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <param><ptype>GLshort</ptype> <name>width</name></param>
+            <param><ptype>GLshort</ptype> <name>height</name></param>
+            <vecequiv name="glDrawTexsvOES"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexsvOES</name></proto>
+            <param len="5">const <ptype>GLshort</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTextureNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param><ptype>GLfloat</ptype> <name>x0</name></param>
+            <param><ptype>GLfloat</ptype> <name>y0</name></param>
+            <param><ptype>GLfloat</ptype> <name>x1</name></param>
+            <param><ptype>GLfloat</ptype> <name>y1</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>s0</name></param>
+            <param><ptype>GLfloat</ptype> <name>t0</name></param>
+            <param><ptype>GLfloat</ptype> <name>s1</name></param>
+            <param><ptype>GLfloat</ptype> <name>t1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+            <param><ptype>GLfixed</ptype> <name>width</name></param>
+            <param><ptype>GLfixed</ptype> <name>height</name></param>
+            <vecequiv name="glDrawTexxvOES"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTexxvOES</name></proto>
+            <param len="5">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTransformFeedback</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTransformFeedbackEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <alias name="glDrawTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTransformFeedbackInstanced</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTransformFeedbackInstancedEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+            <alias name="glDrawTransformFeedbackInstanced"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTransformFeedbackNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <alias name="glDrawTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glDrawTransformFeedbackStream</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawTransformFeedbackStreamInstanced</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLsizei</ptype> <name>instancecount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEGLImageTargetRenderbufferStorageOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLeglImageOES</ptype> <name>image</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEGLImageTargetTexStorageEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLeglImageOES</ptype> <name>image</name></param>
+            <param>const <ptype>GLint</ptype>* <name>attrib_list</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEGLImageTargetTexture2DOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLeglImageOES</ptype> <name>image</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEGLImageTargetTextureStorageEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLeglImageOES</ptype> <name>image</name></param>
+            <param>const <ptype>GLint</ptype>* <name>attrib_list</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEdgeFlag</name></proto>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>flag</name></param>
+            <vecequiv name="glEdgeFlagv"/>
+        </command>
+        <command>
+            <proto>void <name>glEdgeFlagFormatNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEdgeFlagPointer</name></proto>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEdgeFlagPointerEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean" len="COMPSIZE(stride,count)">const <ptype>GLboolean</ptype> *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEdgeFlagPointerListIBM</name></proto>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param group="BooleanPointer" len="COMPSIZE(stride)">const <ptype>GLboolean</ptype> **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEdgeFlagv</name></proto>
+            <param group="Boolean" len="1">const <ptype>GLboolean</ptype> *<name>flag</name></param>
+            <glx type="render" opcode="22"/>
+        </command>
+        <command>
+            <proto>void <name>glElementPointerAPPLE</name></proto>
+            <param group="ElementPointerTypeATI"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(type)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glElementPointerATI</name></proto>
+            <param group="ElementPointerTypeATI"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(type)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnable</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>cap</name></param>
+            <glx type="render" opcode="139"/>
+        </command>
+        <command>
+            <proto>void <name>glEnableClientState</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableClientStateIndexedEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableClientStateiEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableDriverControlQCOM</name></proto>
+            <param><ptype>GLuint</ptype> <name>driverControl</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableIndexedEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glEnablei"/>
+            <glx type="render" opcode="353"/>
+        </command>
+        <command>
+            <proto>void <name>glEnableVariantClientStateEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableVertexArrayAttrib</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableVertexArrayAttribEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableVertexArrayEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableVertexAttribAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableVertexAttribArray</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableVertexAttribArrayARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glEnableVertexAttribArray"/>
+        </command>
+        <command>
+            <proto>void <name>glEnablei</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEnableiEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glEnablei"/>
+        </command>
+        <command>
+            <proto>void <name>glEnableiNV</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glEnablei"/>
+        </command>
+        <command>
+            <proto>void <name>glEnableiOES</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glEnablei"/>
+        </command>
+        <command>
+            <proto>void <name>glEnd</name></proto>
+            <glx type="render" opcode="23"/>
+        </command>
+        <command>
+            <proto>void <name>glEndConditionalRender</name></proto>
+            <glx type="render" opcode="349"/>
+        </command>
+        <command>
+            <proto>void <name>glEndConditionalRenderNV</name></proto>
+            <alias name="glEndConditionalRender"/>
+        </command>
+        <command>
+            <proto>void <name>glEndConditionalRenderNVX</name></proto>
+            <alias name="glEndConditionalRender"/>
+        </command>
+        <command>
+            <proto>void <name>glEndFragmentShaderATI</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glEndList</name></proto>
+            <glx type="single" opcode="102"/>
+        </command>
+        <command>
+            <proto>void <name>glEndOcclusionQueryNV</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glEndPerfMonitorAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>monitor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEndPerfQueryINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryHandle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEndQuery</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <glx type="render" opcode="232"/>
+        </command>
+        <command>
+            <proto>void <name>glEndQueryARB</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glEndQuery"/>
+        </command>
+        <command>
+            <proto>void <name>glEndQueryEXT</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEndQueryIndexed</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEndTilingQCOM</name></proto>
+            <param group="BufferBitQCOM"><ptype>GLbitfield</ptype> <name>preserveMask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEndTransformFeedback</name></proto>
+            <glx type="render" opcode="358"/>
+        </command>
+        <command>
+            <proto>void <name>glEndTransformFeedbackEXT</name></proto>
+            <alias name="glEndTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glEndTransformFeedbackNV</name></proto>
+            <alias name="glEndTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glEndVertexShaderEXT</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glEndVideoCaptureNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord1d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u</name></param>
+            <vecequiv name="glEvalCoord1dv"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord1dv</name></proto>
+            <param group="CoordD" len="1">const <ptype>GLdouble</ptype> *<name>u</name></param>
+            <glx type="render" opcode="151"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord1f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u</name></param>
+            <vecequiv name="glEvalCoord1fv"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord1fv</name></proto>
+            <param group="CoordF" len="1">const <ptype>GLfloat</ptype> *<name>u</name></param>
+            <glx type="render" opcode="152"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord1xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>u</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord1xvOES</name></proto>
+            <param len="1">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord2d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v</name></param>
+            <vecequiv name="glEvalCoord2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord2dv</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>u</name></param>
+            <glx type="render" opcode="153"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord2f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v</name></param>
+            <vecequiv name="glEvalCoord2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord2fv</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>u</name></param>
+            <glx type="render" opcode="154"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord2xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>u</name></param>
+            <param><ptype>GLfixed</ptype> <name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEvalCoord2xvOES</name></proto>
+            <param len="2">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEvalMapsNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="EvalMapsModeNV"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glEvalMesh1</name></proto>
+            <param group="MeshMode1"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>i1</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>i2</name></param>
+            <glx type="render" opcode="155"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalMesh2</name></proto>
+            <param group="MeshMode2"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>i1</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>i2</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>j1</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>j2</name></param>
+            <glx type="render" opcode="157"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalPoint1</name></proto>
+            <param><ptype>GLint</ptype> <name>i</name></param>
+            <glx type="render" opcode="156"/>
+        </command>
+        <command>
+            <proto>void <name>glEvalPoint2</name></proto>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>i</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>j</name></param>
+            <glx type="render" opcode="158"/>
+        </command>
+        <command>
+            <proto>void <name>glEvaluateDepthValuesARB</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glExecuteProgramNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4181"/>
+        </command>
+        <command>
+            <proto>void <name>glExtGetBufferPointervQCOM</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param>void **<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetBuffersQCOM</name></proto>
+            <param len="maxBuffers"><ptype>GLuint</ptype> *<name>buffers</name></param>
+            <param><ptype>GLint</ptype> <name>maxBuffers</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>numBuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetFramebuffersQCOM</name></proto>
+            <param len="maxFramebuffers"><ptype>GLuint</ptype> *<name>framebuffers</name></param>
+            <param><ptype>GLint</ptype> <name>maxFramebuffers</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>numFramebuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetProgramBinarySourceQCOM</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param><ptype>GLchar</ptype> *<name>source</name></param>
+            <param><ptype>GLint</ptype> *<name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetProgramsQCOM</name></proto>
+            <param len="maxPrograms"><ptype>GLuint</ptype> *<name>programs</name></param>
+            <param><ptype>GLint</ptype> <name>maxPrograms</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>numPrograms</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetRenderbuffersQCOM</name></proto>
+            <param len="maxRenderbuffers"><ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+            <param><ptype>GLint</ptype> <name>maxRenderbuffers</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>numRenderbuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetShadersQCOM</name></proto>
+            <param len="maxShaders"><ptype>GLuint</ptype> *<name>shaders</name></param>
+            <param><ptype>GLint</ptype> <name>maxShaders</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>numShaders</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetTexLevelParameterivQCOM</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLenum</ptype> <name>face</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetTexSubImageQCOM</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>void *<name>texels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtGetTexturesQCOM</name></proto>
+            <param><ptype>GLuint</ptype> *<name>textures</name></param>
+            <param><ptype>GLint</ptype> <name>maxTextures</name></param>
+            <param><ptype>GLint</ptype> *<name>numTextures</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glExtIsProgramBinaryQCOM</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtTexObjectStateOverrideiQCOM</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glExtractComponentEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>res</name></param>
+            <param><ptype>GLuint</ptype> <name>src</name></param>
+            <param><ptype>GLuint</ptype> <name>num</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFeedbackBuffer</name></proto>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param group="FeedbackType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="FeedbackElement" len="size"><ptype>GLfloat</ptype> *<name>buffer</name></param>
+            <glx type="single" opcode="105"/>
+        </command>
+        <command>
+            <proto>void <name>glFeedbackBufferxOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="n">const <ptype>GLfixed</ptype> *<name>buffer</name></param>
+        </command>
+        <command>
+            <proto group="sync"><ptype>GLsync</ptype> <name>glFenceSync</name></proto>
+            <param group="SyncCondition"><ptype>GLenum</ptype> <name>condition</name></param>
+            <param><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLsync</ptype> <name>glFenceSyncAPPLE</name></proto>
+            <param group="SyncCondition"><ptype>GLenum</ptype> <name>condition</name></param>
+            <param><ptype>GLbitfield</ptype> <name>flags</name></param>
+            <alias name="glFenceSync"/>
+        </command>
+        <command>
+            <proto>void <name>glFinalCombinerInputNV</name></proto>
+            <param group="CombinerVariableNV"><ptype>GLenum</ptype> <name>variable</name></param>
+            <param group="CombinerRegisterNV"><ptype>GLenum</ptype> <name>input</name></param>
+            <param group="CombinerMappingNV"><ptype>GLenum</ptype> <name>mapping</name></param>
+            <param group="CombinerComponentUsageNV"><ptype>GLenum</ptype> <name>componentUsage</name></param>
+            <glx type="render" opcode="4142"/>
+        </command>
+        <command>
+            <proto>void <name>glFinish</name></proto>
+            <glx type="single" opcode="108"/>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glFinishAsyncSGIX</name></proto>
+            <param len="1"><ptype>GLuint</ptype> *<name>markerp</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFinishFenceAPPLE</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFinishFenceNV</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+            <glx type="vendor" opcode="1312"/>
+        </command>
+        <command>
+            <proto>void <name>glFinishObjectAPPLE</name></proto>
+            <param group="ObjectTypeAPPLE"><ptype>GLenum</ptype> <name>object</name></param>
+            <param><ptype>GLint</ptype> <name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFinishTextureSUNX</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glFlush</name></proto>
+            <glx type="single" opcode="142"/>
+        </command>
+        <command>
+            <proto>void <name>glFlushMappedBufferRange</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFlushMappedBufferRangeAPPLE</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <alias name="glFlushMappedBufferRange"/>
+        </command>
+        <command>
+            <proto>void <name>glFlushMappedBufferRangeEXT</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>length</name></param>
+            <alias name="glFlushMappedBufferRange"/>
+        </command>
+        <command>
+            <proto>void <name>glFlushMappedNamedBufferRange</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFlushMappedNamedBufferRangeEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFlushPixelDataRangeNV</name></proto>
+            <param group="PixelDataRangeTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFlushRasterSGIX</name></proto>
+            <glx type="vendor" opcode="4105"/>
+        </command>
+        <command>
+            <proto>void <name>glFlushStaticDataIBM</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFlushVertexArrayRangeAPPLE</name></proto>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="length">void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFlushVertexArrayRangeNV</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordFormatNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordPointer</name></proto>
+            <param group="FogPointerTypeEXT"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordPointerEXT</name></proto>
+            <param group="FogPointerTypeEXT"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+            <alias name="glFogCoordPointer"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordPointerListIBM</name></proto>
+            <param group="FogPointerTypeIBM"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordd</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>coord</name></param>
+            <vecequiv name="glFogCoorddv"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoorddEXT</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>coord</name></param>
+            <alias name="glFogCoordd"/>
+            <vecequiv name="glFogCoorddvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoorddv</name></proto>
+            <param group="CoordD" len="1">const <ptype>GLdouble</ptype> *<name>coord</name></param>
+            <glx type="render" opcode="4125"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoorddvEXT</name></proto>
+            <param group="CoordD" len="1">const <ptype>GLdouble</ptype> *<name>coord</name></param>
+            <alias name="glFogCoorddv"/>
+            <glx type="render" opcode="4125"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordf</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>coord</name></param>
+            <vecequiv name="glFogCoordfv"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordfEXT</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>coord</name></param>
+            <alias name="glFogCoordf"/>
+            <vecequiv name="glFogCoordfvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordfv</name></proto>
+            <param group="CoordF" len="1">const <ptype>GLfloat</ptype> *<name>coord</name></param>
+            <glx type="render" opcode="4124"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordfvEXT</name></proto>
+            <param group="CoordF" len="1">const <ptype>GLfloat</ptype> *<name>coord</name></param>
+            <alias name="glFogCoordfv"/>
+            <glx type="render" opcode="4124"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordhNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>fog</name></param>
+            <vecequiv name="glFogCoordhvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glFogCoordhvNV</name></proto>
+            <param group="Half16NV" len="1">const <ptype>GLhalfNV</ptype> *<name>fog</name></param>
+            <glx type="render" opcode="4254"/>
+        </command>
+        <command>
+            <proto>void <name>glFogFuncSGIS</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n*2">const <ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="render" opcode="2067"/>
+        </command>
+        <command>
+            <proto>void <name>glFogf</name></proto>
+            <param group="FogParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="80"/>
+        </command>
+        <command>
+            <proto>void <name>glFogfv</name></proto>
+            <param group="FogParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="81"/>
+        </command>
+        <command>
+            <proto>void <name>glFogi</name></proto>
+            <param group="FogParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="82"/>
+        </command>
+        <command>
+            <proto>void <name>glFogiv</name></proto>
+            <param group="FogParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="83"/>
+        </command>
+        <command>
+            <proto>void <name>glFogx</name></proto>
+            <param group="FogPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFogxOES</name></proto>
+            <param group="FogPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFogxv</name></proto>
+            <param group="FogPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFogxvOES</name></proto>
+            <param group="FogPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentColorMaterialSGIX</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentCoverageColorNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>color</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightModelfSGIX</name></proto>
+            <param group="FragmentLightModelParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightModelfvSGIX</name></proto>
+            <param group="FragmentLightModelParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightModeliSGIX</name></proto>
+            <param group="FragmentLightModelParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightModelivSGIX</name></proto>
+            <param group="FragmentLightModelParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightfSGIX</name></proto>
+            <param group="FragmentLightNameSGIX"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="FragmentLightParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightfvSGIX</name></proto>
+            <param group="FragmentLightNameSGIX"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="FragmentLightParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightiSGIX</name></proto>
+            <param group="FragmentLightNameSGIX"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="FragmentLightParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentLightivSGIX</name></proto>
+            <param group="FragmentLightNameSGIX"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="FragmentLightParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentMaterialfSGIX</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentMaterialfvSGIX</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentMaterialiSGIX</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFragmentMaterialivSGIX</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFrameTerminatorGREMEDY</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glFrameZoomSGIX</name></proto>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>factor</name></param>
+            <glx type="render" opcode="2072"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferDrawBufferEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="DrawBufferMode"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferDrawBuffersEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="DrawBufferMode" len="n">const <ptype>GLenum</ptype> *<name>bufs</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferFetchBarrierEXT</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferFetchBarrierQCOM</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferFoveationConfigQCOM</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>numLayers</name></param>
+            <param><ptype>GLuint</ptype> <name>focalPointsPerLayer</name></param>
+            <param><ptype>GLuint</ptype> <name>requestedFeatures</name></param>
+            <param len="1"><ptype>GLuint</ptype> *<name>providedFeatures</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferFoveationParametersQCOM</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>layer</name></param>
+            <param><ptype>GLuint</ptype> <name>focalPoint</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>focalX</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>focalY</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>gainX</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>gainY</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>foveaArea</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferParameteri</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferPixelLocalStorageSizeEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferReadBufferEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="ReadBufferMode"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferRenderbuffer</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>renderbuffertarget</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <glx type="render" opcode="4324"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferRenderbufferEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>renderbuffertarget</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <alias name="glFramebufferRenderbuffer"/>
+            <glx type="render" opcode="4324"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferRenderbufferOES</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>renderbuffertarget</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferSampleLocationsfvARB</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferSampleLocationsfvNV</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferSamplePositionsfvAMD</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>numsamples</name></param>
+            <param><ptype>GLuint</ptype> <name>pixelindex</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture1D</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <glx type="render" opcode="4321"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture1DEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <alias name="glFramebufferTexture1D"/>
+            <glx type="render" opcode="4321"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture2D</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <glx type="render" opcode="4322"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture2DEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <alias name="glFramebufferTexture2D"/>
+            <glx type="render" opcode="4322"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture2DDownsampleIMG</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xscale</name></param>
+            <param><ptype>GLint</ptype> <name>yscale</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture2DMultisampleEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture2DMultisampleIMG</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture2DOES</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture3D</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <glx type="render" opcode="4323"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture3DEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <alias name="glFramebufferTexture3D"/>
+            <glx type="render" opcode="4323"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTexture3DOES</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureARB</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <alias name="glFramebufferTexture"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <alias name="glFramebufferTexture"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureFaceARB</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>face</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureFaceEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>face</name></param>
+            <alias name="glFramebufferTextureFaceARB"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureLayer</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>layer</name></param>
+            <glx type="render" opcode="237"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureLayerARB</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>layer</name></param>
+            <alias name="glFramebufferTextureLayer"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureLayerEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>layer</name></param>
+            <alias name="glFramebufferTextureLayer"/>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureLayerDownsampleIMG</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>layer</name></param>
+            <param><ptype>GLint</ptype> <name>xscale</name></param>
+            <param><ptype>GLint</ptype> <name>yscale</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureMultisampleMultiviewOVR</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLint</ptype> <name>baseViewIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>numViews</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureMultiviewOVR</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>baseViewIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>numViews</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFramebufferTextureOES</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <alias name="glFramebufferTexture"/>
+        </command>
+        <command>
+            <proto>void <name>glFreeObjectBufferATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFrontFace</name></proto>
+            <param group="FrontFaceDirection"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="84"/>
+        </command>
+        <command>
+            <proto>void <name>glFrustum</name></proto>
+            <param><ptype>GLdouble</ptype> <name>left</name></param>
+            <param><ptype>GLdouble</ptype> <name>right</name></param>
+            <param><ptype>GLdouble</ptype> <name>bottom</name></param>
+            <param><ptype>GLdouble</ptype> <name>top</name></param>
+            <param><ptype>GLdouble</ptype> <name>zNear</name></param>
+            <param><ptype>GLdouble</ptype> <name>zFar</name></param>
+            <glx type="render" opcode="175"/>
+        </command>
+        <command>
+            <proto>void <name>glFrustumf</name></proto>
+            <param><ptype>GLfloat</ptype> <name>l</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>n</name></param>
+            <param><ptype>GLfloat</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFrustumfOES</name></proto>
+            <param><ptype>GLfloat</ptype> <name>l</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>n</name></param>
+            <param><ptype>GLfloat</ptype> <name>f</name></param>
+            <glx type="render" opcode="4310"/>
+        </command>
+        <command>
+            <proto>void <name>glFrustumx</name></proto>
+            <param><ptype>GLfixed</ptype> <name>l</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+            <param><ptype>GLfixed</ptype> <name>b</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>n</name></param>
+            <param><ptype>GLfixed</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glFrustumxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>l</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+            <param><ptype>GLfixed</ptype> <name>b</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>n</name></param>
+            <param><ptype>GLfixed</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGenAsyncMarkersSGIX</name></proto>
+            <param><ptype>GLsizei</ptype> <name>range</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenBuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>buffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenBuffersARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>buffers</name></param>
+            <alias name="glGenBuffers"/>
+        </command>
+        <command>
+            <proto>void <name>glGenFencesAPPLE</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="FenceNV" len="n"><ptype>GLuint</ptype> *<name>fences</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenFencesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="FenceNV" len="n"><ptype>GLuint</ptype> *<name>fences</name></param>
+            <glx type="vendor" opcode="1277"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGenFragmentShadersATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>range</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenFramebuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>framebuffers</name></param>
+            <glx type="vendor" opcode="1426"/>
+        </command>
+        <command>
+            <proto>void <name>glGenFramebuffersEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>framebuffers</name></param>
+            <alias name="glGenFramebuffers"/>
+            <glx type="vendor" opcode="1426"/>
+        </command>
+        <command>
+            <proto>void <name>glGenFramebuffersOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>framebuffers</name></param>
+        </command>
+        <command>
+            <proto group="List"><ptype>GLuint</ptype> <name>glGenLists</name></proto>
+            <param><ptype>GLsizei</ptype> <name>range</name></param>
+            <glx type="single" opcode="104"/>
+        </command>
+        <command>
+            <proto>void <name>glGenNamesAMD</name></proto>
+            <param><ptype>GLenum</ptype> <name>identifier</name></param>
+            <param><ptype>GLuint</ptype> <name>num</name></param>
+            <param len="num"><ptype>GLuint</ptype> *<name>names</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenOcclusionQueriesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto group="Path"><ptype>GLuint</ptype> <name>glGenPathsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>range</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenPerfMonitorsAMD</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>monitors</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenProgramPipelines</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>pipelines</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenProgramPipelinesEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>pipelines</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenProgramsARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>programs</name></param>
+            <glx type="vendor" opcode="1295"/>
+        </command>
+        <command>
+            <proto>void <name>glGenProgramsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>programs</name></param>
+            <alias name="glGenProgramsARB"/>
+            <glx type="vendor" opcode="1295"/>
+        </command>
+        <command>
+            <proto>void <name>glGenQueries</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+            <glx type="single" opcode="162"/>
+        </command>
+        <command>
+            <proto>void <name>glGenQueriesARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+            <alias name="glGenQueries"/>
+        </command>
+        <command>
+            <proto>void <name>glGenQueriesEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenQueryResourceTagNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLint</ptype> *<name>tagIds</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenRenderbuffers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+            <glx type="vendor" opcode="1423"/>
+        </command>
+        <command>
+            <proto>void <name>glGenRenderbuffersEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+            <alias name="glGenRenderbuffers"/>
+            <glx type="vendor" opcode="1423"/>
+        </command>
+        <command>
+            <proto>void <name>glGenRenderbuffersOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>renderbuffers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenSamplers</name></proto>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count"><ptype>GLuint</ptype> *<name>samplers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenSemaphoresEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>semaphores</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGenSymbolsEXT</name></proto>
+            <param group="DataTypeEXT"><ptype>GLenum</ptype> <name>datatype</name></param>
+            <param group="VertexShaderStorageTypeEXT"><ptype>GLenum</ptype> <name>storagetype</name></param>
+            <param group="ParameterRangeEXT"><ptype>GLenum</ptype> <name>range</name></param>
+            <param><ptype>GLuint</ptype> <name>components</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenTextures</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n"><ptype>GLuint</ptype> *<name>textures</name></param>
+            <glx type="single" opcode="145"/>
+        </command>
+        <command>
+            <proto>void <name>glGenTexturesEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n"><ptype>GLuint</ptype> *<name>textures</name></param>
+            <glx type="vendor" opcode="13"/>
+        </command>
+        <command>
+            <proto>void <name>glGenTransformFeedbacks</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenTransformFeedbacksNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>ids</name></param>
+            <alias name="glGenTransformFeedbacks"/>
+        </command>
+        <command>
+            <proto>void <name>glGenVertexArrays</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>arrays</name></param>
+            <glx type="single" opcode="206"/>
+        </command>
+        <command>
+            <proto>void <name>glGenVertexArraysAPPLE</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>arrays</name></param>
+            <alias name="glGenVertexArrays"/>
+        </command>
+        <command>
+            <proto>void <name>glGenVertexArraysOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n"><ptype>GLuint</ptype> *<name>arrays</name></param>
+            <alias name="glGenVertexArrays"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGenVertexShadersEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>range</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenerateMipmap</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <glx type="render" opcode="4325"/>
+        </command>
+        <command>
+            <proto>void <name>glGenerateMipmapEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glGenerateMipmap"/>
+            <glx type="render" opcode="4325"/>
+        </command>
+        <command>
+            <proto>void <name>glGenerateMipmapOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenerateMultiTexMipmapEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenerateTextureMipmap</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGenerateTextureMipmapEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveAtomicCounterBufferiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>bufferIndex</name></param>
+            <param group="AtomicCounterBufferPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveAttrib</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>size</name></param>
+            <param group="AttributeType" len="1"><ptype>GLenum</ptype> *<name>type</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveAttribARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxLength</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>size</name></param>
+            <param len="1" group="AttributeType"><ptype>GLenum</ptype> *<name>type</name></param>
+            <param len="maxLength"><ptype>GLcharARB</ptype> *<name>name</name></param>
+            <alias name="glGetActiveAttrib"/>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveSubroutineName</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufsize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufsize"><ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveSubroutineUniformName</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufsize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufsize"><ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveSubroutineUniformiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="SubroutineParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveUniform</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>size</name></param>
+            <param len="1" group="AttributeType"><ptype>GLenum</ptype> *<name>type</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveUniformARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxLength</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>size</name></param>
+            <param len="1" group="AttributeType"><ptype>GLenum</ptype> *<name>type</name></param>
+            <param len="maxLength"><ptype>GLcharARB</ptype> *<name>name</name></param>
+            <alias name="glGetActiveUniform"/>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveUniformBlockName</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>uniformBlockIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>uniformBlockName</name></param>
+            <glx type="single" opcode="220"/>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveUniformBlockiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>uniformBlockIndex</name></param>
+            <param group="UniformBlockPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(program,uniformBlockIndex,pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="219"/>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveUniformName</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>uniformIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>uniformName</name></param>
+            <glx type="single" opcode="217"/>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveUniformsiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>uniformCount</name></param>
+            <param len="uniformCount">const <ptype>GLuint</ptype> *<name>uniformIndices</name></param>
+            <param group="UniformPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(uniformCount,pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="216"/>
+        </command>
+        <command>
+            <proto>void <name>glGetActiveVaryingNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>size</name></param>
+            <param len="1"><ptype>GLenum</ptype> *<name>type</name></param>
+            <param len="COMPSIZE(program,index,bufSize)"><ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetArrayObjectfvATI</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetArrayObjectivATI</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>array</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command comment="Could be an alias of glGetAttachedShaders except that GLhandleARB is different on MacOS X">
+            <proto>void <name>glGetAttachedObjectsARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>containerObj</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxCount</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>count</name></param>
+            <param group="handleARB" len="maxCount"><ptype>GLhandleARB</ptype> *<name>obj</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetAttachedShaders</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxCount</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>count</name></param>
+            <param len="maxCount"><ptype>GLuint</ptype> *<name>shaders</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetAttribLocation</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetAttribLocationARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <param>const <ptype>GLcharARB</ptype> *<name>name</name></param>
+            <alias name="glGetAttribLocation"/>
+        </command>
+        <command>
+            <proto>void <name>glGetBooleanIndexedvEXT</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Boolean" len="COMPSIZE(target)"><ptype>GLboolean</ptype> *<name>data</name></param>
+            <alias name="glGetBooleani_v"/>
+            <glx type="single" opcode="210"/>
+        </command>
+        <command>
+            <proto>void <name>glGetBooleani_v</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Boolean" len="COMPSIZE(target)"><ptype>GLboolean</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetBooleanv</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="Boolean" len="COMPSIZE(pname)"><ptype>GLboolean</ptype> *<name>data</name></param>
+            <glx type="single" opcode="112"/>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferParameteri64v</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferPNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferParameteriv</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferPNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferParameterivARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferPNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetBufferParameteriv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferParameterui64vNV</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferPointerv</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferPointerNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferPointervARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferPointerNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>params</name></param>
+            <alias name="glGetBufferPointerv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferPointervOES</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferPointerNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>void **<name>params</name></param>
+            <alias name="glGetBufferPointerv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferSubData</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="size">void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetBufferSubDataARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferOffsetARB"><ptype>GLintptrARB</ptype> <name>offset</name></param>
+            <param group="BufferSizeARB"><ptype>GLsizeiptrARB</ptype> <name>size</name></param>
+            <param len="size">void *<name>data</name></param>
+            <alias name="glGetBufferSubData"/>
+        </command>
+        <command>
+            <proto>void <name>glGetClipPlane</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>equation</name></param>
+            <glx type="single" opcode="113"/>
+        </command>
+        <command>
+            <proto>void <name>glGetClipPlanef</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>equation</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetClipPlanefOES</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>equation</name></param>
+            <glx type="vendor" opcode="1421"/>
+        </command>
+        <command>
+            <proto>void <name>glGetClipPlanex</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4"><ptype>GLfixed</ptype> *<name>equation</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetClipPlanexOES</name></proto>
+            <param group="ClipPlaneName"><ptype>GLenum</ptype> <name>plane</name></param>
+            <param len="4"><ptype>GLfixed</ptype> *<name>equation</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTable</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>table</name></param>
+            <glx type="single" opcode="147"/>
+            <glx type="render" opcode="334" name="glGetColorTablePBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableEXT</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>data</name></param>
+            <alias name="glGetColorTable"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableParameterfv</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="148"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableParameterfvEXT</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glGetColorTableParameterfv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableParameterfvSGI</name></proto>
+            <param group="ColorTableTargetSGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="4099"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableParameteriv</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="149"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableParameterivEXT</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetColorTableParameteriv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableParameterivSGI</name></proto>
+            <param group="ColorTableTargetSGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetColorTableParameterPNameSGI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="4100"/>
+        </command>
+        <command>
+            <proto>void <name>glGetColorTableSGI</name></proto>
+            <param group="ColorTableTargetSGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>table</name></param>
+            <glx type="vendor" opcode="4098"/>
+        </command>
+        <command>
+            <proto>void <name>glGetCombinerInputParameterfvNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerPortionNV"><ptype>GLenum</ptype> <name>portion</name></param>
+            <param group="CombinerVariableNV"><ptype>GLenum</ptype> <name>variable</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1270"/>
+        </command>
+        <command>
+            <proto>void <name>glGetCombinerInputParameterivNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerPortionNV"><ptype>GLenum</ptype> <name>portion</name></param>
+            <param group="CombinerVariableNV"><ptype>GLenum</ptype> <name>variable</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1271"/>
+        </command>
+        <command>
+            <proto>void <name>glGetCombinerOutputParameterfvNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerPortionNV"><ptype>GLenum</ptype> <name>portion</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1272"/>
+        </command>
+        <command>
+            <proto>void <name>glGetCombinerOutputParameterivNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerPortionNV"><ptype>GLenum</ptype> <name>portion</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1273"/>
+        </command>
+        <command>
+            <proto>void <name>glGetCombinerStageParameterfvNV</name></proto>
+            <param group="CombinerStageNV"><ptype>GLenum</ptype> <name>stage</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetCommandHeaderNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>tokenID</name></param>
+            <param><ptype>GLuint</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetCompressedMultiTexImageEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>lod</name></param>
+            <param len="COMPSIZE(target,lod)">void *<name>img</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetCompressedTexImage</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CompressedTextureARB" len="COMPSIZE(target,level)">void *<name>img</name></param>
+            <glx type="single" opcode="160"/>
+            <glx type="render" opcode="335" name="glGetCompressedTexImagePBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetCompressedTexImageARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CompressedTextureARB" len="COMPSIZE(target,level)">void *<name>img</name></param>
+            <alias name="glGetCompressedTexImage"/>
+            <glx type="single" opcode="160"/>
+        </command>
+        <command>
+            <proto>void <name>glGetCompressedTextureImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetCompressedTextureImageEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>lod</name></param>
+            <param len="COMPSIZE(target,lod)">void *<name>img</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetCompressedTextureSubImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetConvolutionFilter</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>image</name></param>
+            <glx type="single" opcode="150"/>
+            <glx type="render" opcode="336" name="glGetConvolutionFilterPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetConvolutionFilterEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>image</name></param>
+            <glx type="vendor" opcode="1"/>
+        </command>
+        <command>
+            <proto>void <name>glGetConvolutionParameterfv</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="151"/>
+        </command>
+        <command>
+            <proto>void <name>glGetConvolutionParameterfvEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="2"/>
+        </command>
+        <command>
+            <proto>void <name>glGetConvolutionParameteriv</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="152"/>
+        </command>
+        <command>
+            <proto>void <name>glGetConvolutionParameterivEXT</name></proto>
+            <param group="ConvolutionTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ConvolutionParameterEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="3"/>
+        </command>
+        <command>
+            <proto>void <name>glGetConvolutionParameterxvOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetCoverageModulationTableNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>bufsize</name></param>
+            <param><ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetDebugMessageLog</name></proto>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="count" group="DebugSource"><ptype>GLenum</ptype> *<name>sources</name></param>
+            <param len="count" group="DebugType"><ptype>GLenum</ptype> *<name>types</name></param>
+            <param len="count"><ptype>GLuint</ptype> *<name>ids</name></param>
+            <param len="count" group="DebugSeverity"><ptype>GLenum</ptype> *<name>severities</name></param>
+            <param len="count"><ptype>GLsizei</ptype> *<name>lengths</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>messageLog</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetDebugMessageLogAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufsize</name></param>
+            <param len="count"><ptype>GLenum</ptype> *<name>categories</name></param>
+            <param len="count" group="DebugSeverity"><ptype>GLuint</ptype> *<name>severities</name></param>
+            <param len="count"><ptype>GLuint</ptype> *<name>ids</name></param>
+            <param len="count"><ptype>GLsizei</ptype> *<name>lengths</name></param>
+            <param len="bufsize"><ptype>GLchar</ptype> *<name>message</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetDebugMessageLogARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="count" group="DebugSource"><ptype>GLenum</ptype> *<name>sources</name></param>
+            <param len="count" group="DebugType"><ptype>GLenum</ptype> *<name>types</name></param>
+            <param len="count"><ptype>GLuint</ptype> *<name>ids</name></param>
+            <param len="count" group="DebugSeverity"><ptype>GLenum</ptype> *<name>severities</name></param>
+            <param len="count"><ptype>GLsizei</ptype> *<name>lengths</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>messageLog</name></param>
+            <alias name="glGetDebugMessageLog"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetDebugMessageLogKHR</name></proto>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="count" group="DebugSource"><ptype>GLenum</ptype> *<name>sources</name></param>
+            <param len="count" group="DebugType"><ptype>GLenum</ptype> *<name>types</name></param>
+            <param len="count"><ptype>GLuint</ptype> *<name>ids</name></param>
+            <param len="count" group="DebugSeverity"><ptype>GLenum</ptype> *<name>severities</name></param>
+            <param len="count"><ptype>GLsizei</ptype> *<name>lengths</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>messageLog</name></param>
+            <alias name="glGetDebugMessageLog"/>
+        </command>
+        <command>
+            <proto>void <name>glGetDetailTexFuncSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="vendor" opcode="4096"/>
+        </command>
+        <command>
+            <proto>void <name>glGetDoubleIndexedvEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLdouble</ptype> *<name>data</name></param>
+            <alias name="glGetDoublei_v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetDoublei_v</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLdouble</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetDoublei_vEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <alias name="glGetDoublei_v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetDoublev</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLdouble</ptype> *<name>data</name></param>
+            <glx type="single" opcode="114"/>
+        </command>
+        <command>
+            <proto>void <name>glGetDriverControlStringQCOM</name></proto>
+            <param><ptype>GLuint</ptype> <name>driverControl</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>driverControlString</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetDriverControlsQCOM</name></proto>
+            <param><ptype>GLint</ptype> *<name>num</name></param>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param len="size"><ptype>GLuint</ptype> *<name>driverControls</name></param>
+        </command>
+        <command>
+            <proto group="ErrorCode"><ptype>GLenum</ptype> <name>glGetError</name></proto>
+            <glx type="single" opcode="115"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFenceivNV</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+            <param group="FenceParameterNameNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1280"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFinalCombinerInputParameterfvNV</name></proto>
+            <param group="CombinerVariableNV"><ptype>GLenum</ptype> <name>variable</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1274"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFinalCombinerInputParameterivNV</name></proto>
+            <param group="CombinerVariableNV"><ptype>GLenum</ptype> <name>variable</name></param>
+            <param group="CombinerParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1275"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFirstPerfQueryIdINTEL</name></proto>
+            <param><ptype>GLuint</ptype> *<name>queryId</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFixedv</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFixedvOES</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFloatIndexedvEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLfloat</ptype> *<name>data</name></param>
+            <alias name="glGetFloati_v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFloati_v</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLfloat</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFloati_vEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glGetFloati_v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFloati_vNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLfloat</ptype> *<name>data</name></param>
+            <alias name="glGetFloati_v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFloati_vOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLfloat</ptype> *<name>data</name></param>
+            <alias name="glGetFloati_v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFloatv</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>data</name></param>
+            <glx type="single" opcode="116"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFogFuncSGIS</name></proto>
+            <param len="COMPSIZE()"><ptype>GLfloat</ptype> *<name>points</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetFragDataIndex</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetFragDataIndexEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+            <alias name="glGetFragDataIndex"/>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetFragDataLocation</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetFragDataLocationEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+            <alias name="glGetFragDataLocation"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFragmentLightfvSGIX</name></proto>
+            <param group="FragmentLightNameSGIX"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="FragmentLightParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFragmentLightivSGIX</name></proto>
+            <param group="FragmentLightNameSGIX"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="FragmentLightParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFragmentMaterialfvSGIX</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFragmentMaterialivSGIX</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFramebufferAttachmentParameteriv</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="FramebufferAttachmentParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1428"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFramebufferAttachmentParameterivEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="FramebufferAttachmentParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetFramebufferAttachmentParameteriv"/>
+            <glx type="vendor" opcode="1428"/>
+        </command>
+        <command>
+            <proto>void <name>glGetFramebufferAttachmentParameterivOES</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="FramebufferAttachmentParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFramebufferParameterfvAMD</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachmentParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>numsamples</name></param>
+            <param><ptype>GLuint</ptype> <name>pixelindex</name></param>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param><ptype>GLfloat</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFramebufferParameteriv</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="FramebufferAttachmentParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetFramebufferParameterivEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="GetFramebufferParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLsizei</ptype> <name>glGetFramebufferPixelLocalStorageSizeEXT</name></proto>
+            <param group="FramebufferTarget"><ptype>GLuint</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto group="GraphicsResetStatus"><ptype>GLenum</ptype> <name>glGetGraphicsResetStatus</name></proto>
+        </command>
+        <command>
+            <proto group="GraphicsResetStatus"><ptype>GLenum</ptype> <name>glGetGraphicsResetStatusARB</name></proto>
+        </command>
+        <command>
+            <proto group="GraphicsResetStatus"><ptype>GLenum</ptype> <name>glGetGraphicsResetStatusEXT</name></proto>
+            <alias name="glGetGraphicsResetStatus"/>
+        </command>
+        <command>
+            <proto group="GraphicsResetStatus"><ptype>GLenum</ptype> <name>glGetGraphicsResetStatusKHR</name></proto>
+            <alias name="glGetGraphicsResetStatus"/>
+        </command>
+        <command>
+            <proto group="handleARB"><ptype>GLhandleARB</ptype> <name>glGetHandleARB</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetHistogram</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>values</name></param>
+            <glx type="single" opcode="154"/>
+            <glx type="render" opcode="337" name="glGetHistogramPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetHistogramEXT</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>values</name></param>
+            <glx type="vendor" opcode="5"/>
+        </command>
+        <command>
+            <proto>void <name>glGetHistogramParameterfv</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetHistogramParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="155"/>
+        </command>
+        <command>
+            <proto>void <name>glGetHistogramParameterfvEXT</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetHistogramParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="6"/>
+        </command>
+        <command>
+            <proto>void <name>glGetHistogramParameteriv</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetHistogramParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="156"/>
+        </command>
+        <command>
+            <proto>void <name>glGetHistogramParameterivEXT</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetHistogramParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="7"/>
+        </command>
+        <command>
+            <proto>void <name>glGetHistogramParameterxvOES</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetHistogramParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetImageHandleARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLboolean</ptype> <name>layered</name></param>
+            <param><ptype>GLint</ptype> <name>layer</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetImageHandleNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>layered</name></param>
+            <param><ptype>GLint</ptype> <name>layer</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetImageTransformParameterfvHP</name></proto>
+            <param group="ImageTransformTargetHP"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ImageTransformPNameHP"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetImageTransformParameterivHP</name></proto>
+            <param group="ImageTransformTargetHP"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ImageTransformPNameHP"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInfoLogARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>obj</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxLength</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="maxLength"><ptype>GLcharARB</ptype> *<name>infoLog</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetInstrumentsSGIX</name></proto>
+            <glx type="vendor" opcode="4102"/>
+        </command>
+        <command>
+            <proto>void <name>glGetInteger64i_v</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLint64</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInteger64v</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint64</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInteger64vAPPLE</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint64</ptype> *<name>params</name></param>
+            <alias name="glGetInteger64v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetIntegerIndexedvEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLint</ptype> *<name>data</name></param>
+            <alias name="glGetIntegeri_v"/>
+            <glx type="single" opcode="211"/>
+        </command>
+        <command>
+            <proto>void <name>glGetIntegeri_v</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLint</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetIntegeri_vEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetIntegerui64i_vNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>value</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(value)"><ptype>GLuint64EXT</ptype> *<name>result</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetIntegerui64vNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(value)"><ptype>GLuint64EXT</ptype> *<name>result</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetIntegerv</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>data</name></param>
+            <glx type="single" opcode="117"/>
+        </command>
+        <command>
+            <proto>void <name>glGetInternalformatSampleivNV</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormatPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInternalformati64v</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="InternalFormatPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInternalformativ</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="InternalFormatPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInvariantBooleanvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param group="Boolean" len="COMPSIZE(id)"><ptype>GLboolean</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInvariantFloatvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(id)"><ptype>GLfloat</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetInvariantIntegervEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(id)"><ptype>GLint</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetLightfv</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="118"/>
+        </command>
+        <command>
+            <proto>void <name>glGetLightiv</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="119"/>
+        </command>
+        <command>
+            <proto>void <name>glGetLightxOES</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetLightxv</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetLightxvOES</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetListParameterfvSGIX</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param group="ListParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetListParameterivSGIX</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param group="ListParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetLocalConstantBooleanvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param group="Boolean" len="COMPSIZE(id)"><ptype>GLboolean</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetLocalConstantFloatvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(id)"><ptype>GLfloat</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetLocalConstantIntegervEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(id)"><ptype>GLint</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMapAttribParameterfvNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="MapAttribParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMapAttribParameterivNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="MapAttribParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMapControlPointsNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="MapTypeNV"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>ustride</name></param>
+            <param><ptype>GLsizei</ptype> <name>vstride</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>packed</name></param>
+            <param len="COMPSIZE(target)">void *<name>points</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMapParameterfvNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(target,pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMapParameterivNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(target,pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMapdv</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param len="COMPSIZE(target,query)"><ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="single" opcode="120"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMapfv</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param len="COMPSIZE(target,query)"><ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="single" opcode="121"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMapiv</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param len="COMPSIZE(target,query)"><ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="single" opcode="122"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMapxvOES</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param len="COMPSIZE(query)"><ptype>GLfixed</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMaterialfv</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="123"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMaterialiv</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="124"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMaterialxOES</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMaterialxv</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMaterialxvOES</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMemoryObjectDetachedResourcesuivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMemoryObjectParameterivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memoryObject</name></param>
+            <param group="MemoryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMinmax</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>values</name></param>
+            <glx type="single" opcode="157"/>
+            <glx type="render" opcode="338" name="glGetMinmaxPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMinmaxEXT</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>values</name></param>
+            <glx type="vendor" opcode="8"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMinmaxParameterfv</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMinmaxParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="158"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMinmaxParameterfvEXT</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMinmaxParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="9"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMinmaxParameteriv</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMinmaxParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="159"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMinmaxParameterivEXT</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetMinmaxParameterPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="10"/>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexEnvfvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexEnvivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexGendvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexGenfvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexGenivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexImageEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,level,format,type)">void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexLevelParameterfvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexLevelParameterivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexParameterIivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexParameterIuivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexParameterfvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultiTexParameterivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultisamplefv</name></proto>
+            <param group="GetMultisamplePNameNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>val</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetMultisamplefvNV</name></proto>
+            <param group="GetMultisamplePNameNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2"><ptype>GLfloat</ptype> *<name>val</name></param>
+            <alias name="glGetMultisamplefv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferParameteri64v</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="VertexBufferObjectParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="VertexBufferObjectParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferParameterivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="VertexBufferObjectParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferParameterui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="VertexBufferObjectParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferPointerv</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="VertexBufferObjectParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>void **<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferPointervEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="VertexBufferObjectParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferSubData</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param>void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedBufferSubDataEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="COMPSIZE(size)">void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedFramebufferParameterfvAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>numsamples</name></param>
+            <param><ptype>GLuint</ptype> <name>pixelindex</name></param>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param><ptype>GLfloat</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedFramebufferAttachmentParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="FramebufferAttachmentParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedFramebufferAttachmentParameterivEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="FramebufferAttachmentParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedFramebufferParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="GetFramebufferParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedFramebufferParameterivEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="GetFramebufferParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedProgramLocalParameterIivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedProgramLocalParameterIuivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedProgramLocalParameterdvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedProgramLocalParameterfvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedProgramStringEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ProgramStringProperty"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(program,pname)">void *<name>string</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedProgramivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ProgramPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedRenderbufferParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param group="RenderbufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedRenderbufferParameterivEXT</name></proto>
+            <param group="Renderbuffer"><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param group="RenderbufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedStringARB</name></proto>
+            <param><ptype>GLint</ptype> <name>namelen</name></param>
+            <param len="namelen">const <ptype>GLchar</ptype> *<name>name</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>stringlen</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>string</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNamedStringivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>namelen</name></param>
+            <param len="namelen">const <ptype>GLchar</ptype> *<name>name</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetNextPerfQueryIdINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryId</name></param>
+            <param><ptype>GLuint</ptype> *<name>nextQueryId</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectBufferfvATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectBufferivATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectLabel</name></proto>
+            <param><ptype>GLenum</ptype> <name>identifier</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>label</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectLabelEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>object</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>label</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectLabelKHR</name></proto>
+            <param><ptype>GLenum</ptype> <name>identifier</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>label</name></param>
+            <alias name="glGetObjectLabel"/>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectParameterfvARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>obj</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectParameterivAPPLE</name></proto>
+            <param><ptype>GLenum</ptype> <name>objectType</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectParameterivARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>obj</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectPtrLabel</name></proto>
+            <param>const void *<name>ptr</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>label</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetObjectPtrLabelKHR</name></proto>
+            <param>const void *<name>ptr</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>label</name></param>
+            <alias name="glGetObjectPtrLabel"/>
+        </command>
+        <command>
+            <proto>void <name>glGetOcclusionQueryivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="OcclusionQueryParameterNameNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetOcclusionQueryuivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="OcclusionQueryParameterNameNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathColorGenfvNV</name></proto>
+            <param group="PathColor"><ptype>GLenum</ptype> <name>color</name></param>
+            <param group="PathGenMode"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathColorGenivNV</name></proto>
+            <param group="PathColor"><ptype>GLenum</ptype> <name>color</name></param>
+            <param group="PathGenMode"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathCommandsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathCommand" len="COMPSIZE(path)"><ptype>GLubyte</ptype> *<name>commands</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathCoordsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param len="COMPSIZE(path)"><ptype>GLfloat</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathDashArrayNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param len="COMPSIZE(path)"><ptype>GLfloat</ptype> *<name>dashArray</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLfloat</ptype> <name>glGetPathLengthNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>startSegment</name></param>
+            <param><ptype>GLsizei</ptype> <name>numSegments</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathMetricRangeNV</name></proto>
+            <param group="PathMetricMask"><ptype>GLbitfield</ptype> <name>metricQueryMask</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>firstPathName</name></param>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(metricQueryMask,numPaths,stride)"><ptype>GLfloat</ptype> *<name>metrics</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathMetricsNV</name></proto>
+            <param group="PathMetricMask"><ptype>GLbitfield</ptype> <name>metricQueryMask</name></param>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param group="PathElementType"><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param group="PathElement" len="COMPSIZE(numPaths,pathNameType,paths)">const void *<name>paths</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(metricQueryMask,numPaths,stride)"><ptype>GLfloat</ptype> *<name>metrics</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathParameterfvNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathParameterivNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathSpacingNV</name></proto>
+            <param group="PathListMode"><ptype>GLenum</ptype> <name>pathListMode</name></param>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param group="PathElementType"><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param group="PathElement" len="COMPSIZE(numPaths,pathNameType,paths)">const void *<name>paths</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param><ptype>GLfloat</ptype> <name>advanceScale</name></param>
+            <param><ptype>GLfloat</ptype> <name>kerningScale</name></param>
+            <param group="PathTransformType"><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param len="COMPSIZE(pathListMode,numPaths)"><ptype>GLfloat</ptype> *<name>returnedSpacing</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathTexGenfvNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texCoordSet</name></param>
+            <param group="PathGenMode"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPathTexGenivNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texCoordSet</name></param>
+            <param group="PathGenMode"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfCounterInfoINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryId</name></param>
+            <param><ptype>GLuint</ptype> <name>counterId</name></param>
+            <param><ptype>GLuint</ptype> <name>counterNameLength</name></param>
+            <param><ptype>GLchar</ptype> *<name>counterName</name></param>
+            <param><ptype>GLuint</ptype> <name>counterDescLength</name></param>
+            <param><ptype>GLchar</ptype> *<name>counterDesc</name></param>
+            <param><ptype>GLuint</ptype> *<name>counterOffset</name></param>
+            <param><ptype>GLuint</ptype> *<name>counterDataSize</name></param>
+            <param><ptype>GLuint</ptype> *<name>counterTypeEnum</name></param>
+            <param><ptype>GLuint</ptype> *<name>counterDataTypeEnum</name></param>
+            <param><ptype>GLuint64</ptype> *<name>rawCounterMaxValue</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfMonitorCounterDataAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>monitor</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLsizei</ptype> <name>dataSize</name></param>
+            <param len="dataSize"><ptype>GLuint</ptype> *<name>data</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>bytesWritten</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfMonitorCounterInfoAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>group</name></param>
+            <param><ptype>GLuint</ptype> <name>counter</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfMonitorCounterStringAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>group</name></param>
+            <param><ptype>GLuint</ptype> <name>counter</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>counterString</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfMonitorCountersAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>group</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>numCounters</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>maxActiveCounters</name></param>
+            <param><ptype>GLsizei</ptype> <name>counterSize</name></param>
+            <param len="counterSize"><ptype>GLuint</ptype> *<name>counters</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfMonitorGroupStringAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>group</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>groupString</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfMonitorGroupsAMD</name></proto>
+            <param len="1"><ptype>GLint</ptype> *<name>numGroups</name></param>
+            <param><ptype>GLsizei</ptype> <name>groupsSize</name></param>
+            <param len="groupsSize"><ptype>GLuint</ptype> *<name>groups</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfQueryDataINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryHandle</name></param>
+            <param><ptype>GLuint</ptype> <name>flags</name></param>
+            <param><ptype>GLsizei</ptype> <name>dataSize</name></param>
+            <param>void *<name>data</name></param>
+            <param><ptype>GLuint</ptype> *<name>bytesWritten</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfQueryIdByNameINTEL</name></proto>
+            <param><ptype>GLchar</ptype> *<name>queryName</name></param>
+            <param><ptype>GLuint</ptype> *<name>queryId</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPerfQueryInfoINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>queryId</name></param>
+            <param><ptype>GLuint</ptype> <name>queryNameLength</name></param>
+            <param><ptype>GLchar</ptype> *<name>queryName</name></param>
+            <param><ptype>GLuint</ptype> *<name>dataSize</name></param>
+            <param><ptype>GLuint</ptype> *<name>noCounters</name></param>
+            <param><ptype>GLuint</ptype> *<name>noInstances</name></param>
+            <param><ptype>GLuint</ptype> *<name>capsMask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelMapfv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param len="COMPSIZE(map)"><ptype>GLfloat</ptype> *<name>values</name></param>
+            <glx type="single" opcode="125"/>
+            <glx type="render" opcode="339" name="glGetPixelMapfvPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelMapuiv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param len="COMPSIZE(map)"><ptype>GLuint</ptype> *<name>values</name></param>
+            <glx type="single" opcode="126"/>
+            <glx type="render" opcode="340" name="glGetPixelMapuivPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelMapusv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param len="COMPSIZE(map)"><ptype>GLushort</ptype> *<name>values</name></param>
+            <glx type="single" opcode="127"/>
+            <glx type="render" opcode="341" name="glGetPixelMapusvPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelMapxv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size"><ptype>GLfixed</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelTexGenParameterfvSGIS</name></proto>
+            <param group="PixelTexGenParameterNameSGIS"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelTexGenParameterivSGIS</name></proto>
+            <param group="PixelTexGenParameterNameSGIS"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelTransformParameterfvEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="2051"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPixelTransformParameterivEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="2052"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPointerIndexedvEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">void **<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPointeri_vEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">void **<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetPointerv</name></proto>
+            <param group="GetPointervPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>params</name></param>
+            <glx type="single" opcode="208"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPointervEXT</name></proto>
+            <param group="GetPointervPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>params</name></param>
+            <alias name="glGetPointerv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPointervKHR</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>void **<name>params</name></param>
+            <alias name="glGetPointerv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetPolygonStipple</name></proto>
+            <param len="COMPSIZE()"><ptype>GLubyte</ptype> *<name>mask</name></param>
+            <glx type="single" opcode="128"/>
+            <glx type="render" opcode="342" name="glGetPolygonStipplePBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramBinary</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLenum</ptype> *<name>binaryFormat</name></param>
+            <param len="bufSize">void *<name>binary</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramBinaryOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLenum</ptype> *<name>binaryFormat</name></param>
+            <param len="bufSize">void *<name>binary</name></param>
+            <alias name="glGetProgramBinary"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramEnvParameterIivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramEnvParameterIuivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramEnvParameterdvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramEnvParameterfvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramInfoLog</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>infoLog</name></param>
+            <glx type="single" opcode="201"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramInterfaceiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param group="ProgramInterfacePName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramLocalParameterIivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramLocalParameterIuivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramLocalParameterdvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramLocalParameterfvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramNamedParameterdvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="1">const <ptype>GLubyte</ptype> *<name>name</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1311"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramNamedParameterfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="1">const <ptype>GLubyte</ptype> *<name>name</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1310"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramParameterdvNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1297"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramParameterfvNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1296"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramPipelineInfoLog</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>infoLog</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramPipelineInfoLogEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>infoLog</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramPipelineiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param group="PipelineParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramPipelineivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param group="PipelineParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetProgramResourceIndex</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetProgramResourceLocation</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetProgramResourceLocationIndex</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetProgramResourceLocationIndexEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramResourceName</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramResourcefvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>propCount</name></param>
+            <param>const <ptype>GLenum</ptype> *<name>props</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramResourceiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramInterface"><ptype>GLenum</ptype> <name>programInterface</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>propCount</name></param>
+            <param len="propCount">const <ptype>GLenum</ptype> *<name>props</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramStageiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param group="ProgramStagePName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramStringARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ProgramStringPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(target,pname)">void *<name>string</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramStringNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="ProgramCharacterNV" len="COMPSIZE(id,pname)"><ptype>GLubyte</ptype> *<name>program</name></param>
+            <glx type="vendor" opcode="1299"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramSubroutineParameteruivNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLuint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="199"/>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramivARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ProgramPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetProgramivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1298"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryBufferObjecti64v</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryBufferObjectiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryBufferObjectui64v</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryBufferObjectuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryIndexediv</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="QueryParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjecti64v</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjecti64vEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint64</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1328"/>
+            <alias name="glGetQueryObjecti64v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="165"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetQueryObjectiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetQueryObjectiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectui64v</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectui64vEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint64</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1329"/>
+            <alias name="glGetQueryObjectui64v"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="166"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectuivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetQueryObjectuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryObjectuivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryiv</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="QueryParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="164"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryivARB</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="QueryParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetQueryiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetQueryivEXT</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="QueryParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetRenderbufferParameteriv</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="RenderbufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1424"/>
+        </command>
+        <command>
+            <proto>void <name>glGetRenderbufferParameterivEXT</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="RenderbufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetRenderbufferParameteriv"/>
+            <glx type="vendor" opcode="1424"/>
+        </command>
+        <command>
+            <proto>void <name>glGetRenderbufferParameterivOES</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="RenderbufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameterIiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameterIivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetSamplerParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameterIivOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetSamplerParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameterIuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameterIuivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetSamplerParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameterIuivOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetSamplerParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameterfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSamplerParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSemaphoreParameterui64vEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+            <param group="SemaphoreParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSeparableFilter</name></proto>
+            <param group="SeparableTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>row</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>column</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>span</name></param>
+            <glx type="single" opcode="153"/>
+            <glx type="render" opcode="343" name="glGetSeparableFilterPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetSeparableFilterEXT</name></proto>
+            <param group="SeparableTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>row</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>column</name></param>
+            <param len="COMPSIZE(target,format,type)">void *<name>span</name></param>
+            <glx type="vendor" opcode="4"/>
+        </command>
+        <command>
+            <proto>void <name>glGetShaderInfoLog</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>infoLog</name></param>
+            <glx type="single" opcode="200"/>
+        </command>
+        <command>
+            <proto>void <name>glGetShaderPrecisionFormat</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param group="PrecisionType"><ptype>GLenum</ptype> <name>precisiontype</name></param>
+            <param len="2"><ptype>GLint</ptype> *<name>range</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>precision</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetShaderSource</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>source</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetShaderSourceARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>obj</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxLength</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="maxLength"><ptype>GLcharARB</ptype> *<name>source</name></param>
+            <alias name="glGetShaderSource"/>
+        </command>
+        <command>
+            <proto>void <name>glGetShaderiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param group="ShaderParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="198"/>
+        </command>
+        <command>
+            <proto>void <name>glGetShadingRateImagePaletteNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>viewport</name></param>
+            <param><ptype>GLuint</ptype> <name>entry</name></param>
+            <param len="1"><ptype>GLenum</ptype> *<name>rate</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetShadingRateSampleLocationivNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>rate</name></param>
+            <param><ptype>GLuint</ptype> <name>samples</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3"><ptype>GLint</ptype> *<name>location</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSharpenTexFuncSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="vendor" opcode="4097"/>
+        </command>
+        <command>
+            <proto><ptype>GLushort</ptype> <name>glGetStageIndexNV</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+        </command>
+        <command>
+            <proto group="String">const <ptype>GLubyte</ptype> *<name>glGetString</name></proto>
+            <param group="StringName"><ptype>GLenum</ptype> <name>name</name></param>
+            <glx type="single" opcode="129"/>
+        </command>
+        <command>
+            <proto group="String">const <ptype>GLubyte</ptype> *<name>glGetStringi</name></proto>
+            <param group="StringName"><ptype>GLenum</ptype> <name>name</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <glx type="single" opcode="214"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetSubroutineIndex</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetSubroutineUniformLocation</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSynciv</name></proto>
+            <param group="sync"><ptype>GLsync</ptype> <name>sync</name></param>
+            <param group="SyncParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetSyncivAPPLE</name></proto>
+            <param><ptype>GLsync</ptype> <name>sync</name></param>
+            <param group="SyncParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>values</name></param>
+            <alias name="glGetSynciv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexBumpParameterfvATI</name></proto>
+            <param group="GetTexBumpParameterATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexBumpParameterivATI</name></proto>
+            <param group="GetTexBumpParameterATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexEnvfv</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="130"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexEnviv</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="131"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexEnvxv</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexEnvxvOES</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexFilterFuncSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureFilterSGIS"><ptype>GLenum</ptype> <name>filter</name></param>
+            <param len="COMPSIZE(target,filter)"><ptype>GLfloat</ptype> *<name>weights</name></param>
+            <glx type="vendor" opcode="4101"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexGendv</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <glx type="single" opcode="132"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexGenfv</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="133"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexGenfvOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexGeniv</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="134"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexGenivOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexGenxvOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexImage</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,level,format,type)">void *<name>pixels</name></param>
+            <glx type="single" opcode="135"/>
+            <glx type="render" opcode="344" name="glGetTexImagePBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexLevelParameterfv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="138"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexLevelParameteriv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="139"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexLevelParameterxvOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterIiv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="203"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterIivEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetTexParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterIivOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetTexParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterIuiv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="204"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterIuivEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetTexParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterIuivOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetTexParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterPointervAPPLE</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterfv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="single" opcode="136"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameteriv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="single" opcode="137"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterxv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTexParameterxvOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetTextureHandleARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetTextureHandleIMG</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <alias name="glGetTextureHandleARB"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetTextureHandleNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureImageEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,level,format,type)">void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureLevelParameterfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureLevelParameterfvEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureLevelParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureLevelParameterivEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameterIiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameterIivEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameterIuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameterIuivEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameterfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameterfvEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureParameterivEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetTextureSamplerHandleARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetTextureSamplerHandleIMG</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <alias name="glGetTextureSamplerHandleARB"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint64</ptype> <name>glGetTextureSamplerHandleNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTextureSubImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTrackMatrixivNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>address</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1300"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTransformFeedbackVarying</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>size</name></param>
+            <param len="1"><ptype>GLenum</ptype> *<name>type</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>name</name></param>
+            <glx type="single" opcode="213"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTransformFeedbackVaryingEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>size</name></param>
+            <param len="1"><ptype>GLenum</ptype> *<name>type</name></param>
+            <param len="bufSize"><ptype>GLchar</ptype> *<name>name</name></param>
+            <alias name="glGetTransformFeedbackVarying"/>
+        </command>
+        <command>
+            <proto>void <name>glGetTransformFeedbackVaryingNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>location</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTransformFeedbacki64_v</name></proto>
+            <param><ptype>GLuint</ptype> <name>xfb</name></param>
+            <param group="TransformFeedbackPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint64</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTransformFeedbacki_v</name></proto>
+            <param><ptype>GLuint</ptype> <name>xfb</name></param>
+            <param group="TransformFeedbackPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTransformFeedbackiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>xfb</name></param>
+            <param group="TransformFeedbackPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetTranslatedShaderSourceANGLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufsize</name></param>
+            <param len="1"><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param><ptype>GLchar</ptype> *<name>source</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glGetUniformBlockIndex</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param len="COMPSIZE()">const <ptype>GLchar</ptype> *<name>uniformBlockName</name></param>
+            <glx type="single" opcode="218"/>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetUniformBufferSizeEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformIndices</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>uniformCount</name></param>
+            <param len="COMPSIZE(uniformCount)">const <ptype>GLchar</ptype> *const*<name>uniformNames</name></param>
+            <param len="COMPSIZE(uniformCount)"><ptype>GLuint</ptype> *<name>uniformIndices</name></param>
+            <glx type="single" opcode="215"/>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetUniformLocation</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetUniformLocationARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <param>const <ptype>GLcharARB</ptype> *<name>name</name></param>
+            <alias name="glGetUniformLocation"/>
+        </command>
+        <command>
+            <proto group="BufferOffset"><ptype>GLintptr</ptype> <name>glGetUniformOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformSubroutineuiv</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="1"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformdv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformfvARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(programObj,location)"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glGetUniformfv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformi64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformi64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformivARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(programObj,location)"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetUniformiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLuint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLuint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUniformuivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param len="COMPSIZE(program,location)"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetUniformuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetUnsignedBytevEXT</name></proto>
+            <param group="GetPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLubyte</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetUnsignedBytei_vEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="COMPSIZE(target)"><ptype>GLubyte</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVariantArrayObjectfvATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVariantArrayObjectivATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVariantBooleanvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param group="Boolean" len="COMPSIZE(id)"><ptype>GLboolean</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVariantFloatvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(id)"><ptype>GLfloat</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVariantIntegervEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(id)"><ptype>GLint</ptype> *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVariantPointervEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="GetVariantValueEXT"><ptype>GLenum</ptype> <name>value</name></param>
+            <param len="COMPSIZE(id)">void **<name>data</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glGetVaryingLocationNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexArrayIndexed64iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexArrayPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint64</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexArrayIndexediv</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexArrayPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexArrayIntegeri_vEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexArrayPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexArrayIntegervEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param group="VertexArrayPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexArrayPointeri_vEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexArrayPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>void **<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexArrayPointervEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param group="VertexArrayPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexArrayiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param group="VertexArrayPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribArrayObjectfvATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribArrayObjectivATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="ArrayObjectPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribIiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribIivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribIuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribIuivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribLdv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribLdvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribLdv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribLi64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribLui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribLui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribPointerv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>pointer</name></param>
+            <glx type="single" opcode="209"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribPointervARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>pointer</name></param>
+            <alias name="glGetVertexAttribPointerv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribPointervNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">void **<name>pointer</name></param>
+            <alias name="glGetVertexAttribPointerv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribdv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1301"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribdvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribdv"/>
+            <glx type="vendor" opcode="1301"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribdvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLdouble</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribdv"/>
+            <glx type="vendor" opcode="1301"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1302"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribfvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribfv"/>
+            <glx type="vendor" opcode="1302"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribfv"/>
+            <glx type="vendor" opcode="1302"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="vendor" opcode="1303"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPropertyARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="4"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribiv"/>
+            <glx type="vendor" opcode="1303"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVertexAttribivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetVertexAttribiv"/>
+            <glx type="vendor" opcode="1303"/>
+        </command>
+        <command>
+            <proto>void <name>glGetVideoCaptureStreamdvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVideoCaptureStreamfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVideoCaptureStreamivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVideoCaptureivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVideoi64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_slot</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVideoivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_slot</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVideoui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_slot</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint64EXT</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetVideouivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_slot</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnColorTable</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>table</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnColorTableARB</name></proto>
+            <param group="ColorTableTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>table</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnCompressedTexImage</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>lod</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnCompressedTexImageARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>lod</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>img</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnConvolutionFilter</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>image</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnConvolutionFilterARB</name></proto>
+            <param group="ConvolutionTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>image</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnHistogram</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnHistogramARB</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMapdv</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMapdvARB</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMapfv</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMapfvARB</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMapiv</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMapivARB</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapQuery"><ptype>GLenum</ptype> <name>query</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMinmax</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param>void *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnMinmaxARB</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>reset</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPixelMapfv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLfloat</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPixelMapfvARB</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLfloat</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPixelMapuiv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLuint</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPixelMapuivARB</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLuint</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPixelMapusv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLushort</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPixelMapusvARB</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLushort</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPolygonStipple</name></proto>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLubyte</ptype> *<name>pattern</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnPolygonStippleARB</name></proto>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLubyte</ptype> *<name>pattern</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnSeparableFilter</name></proto>
+            <param group="SeparableTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>rowBufSize</name></param>
+            <param>void *<name>row</name></param>
+            <param><ptype>GLsizei</ptype> <name>columnBufSize</name></param>
+            <param>void *<name>column</name></param>
+            <param>void *<name>span</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnSeparableFilterARB</name></proto>
+            <param group="SeparableTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>rowBufSize</name></param>
+            <param len="rowBufSize">void *<name>row</name></param>
+            <param><ptype>GLsizei</ptype> <name>columnBufSize</name></param>
+            <param len="columnBufSize">void *<name>column</name></param>
+            <param len="0">void *<name>span</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnTexImage</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnTexImageARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>img</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformdv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformdvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformfvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformfvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glGetnUniformfv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformfvKHR</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glGetnUniformfv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformi64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetnUniformiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformivKHR</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glGetnUniformiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLuint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformuivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGetnUniformuivKHR</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize"><ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glGetnUniformuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactorbSUN</name></proto>
+            <param><ptype>GLbyte</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactordSUN</name></proto>
+            <param><ptype>GLdouble</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactorfSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactoriSUN</name></proto>
+            <param><ptype>GLint</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactorsSUN</name></proto>
+            <param><ptype>GLshort</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactorubSUN</name></proto>
+            <param><ptype>GLubyte</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactoruiSUN</name></proto>
+            <param><ptype>GLuint</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glGlobalAlphaFactorusSUN</name></proto>
+            <param><ptype>GLushort</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glHint</name></proto>
+            <param group="HintTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="HintMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="85"/>
+        </command>
+        <command>
+            <proto>void <name>glHintPGI</name></proto>
+            <param group="HintTargetPGI"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glHistogram</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>sink</name></param>
+            <glx type="render" opcode="4110"/>
+        </command>
+        <command>
+            <proto>void <name>glHistogramEXT</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>sink</name></param>
+            <alias name="glHistogram"/>
+            <glx type="render" opcode="4110"/>
+        </command>
+        <command>
+            <proto>void <name>glIglooInterfaceSGIX</name></proto>
+            <param group="IglooFunctionSelectSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="IglooParameterSGIX" len="COMPSIZE(pname)">const void *<name>params</name></param>
+            <glx type="render" opcode="200"/>
+        </command>
+        <command>
+            <proto>void <name>glImageTransformParameterfHP</name></proto>
+            <param group="ImageTransformTargetHP"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ImageTransformPNameHP"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImageTransformParameterfvHP</name></proto>
+            <param group="ImageTransformTargetHP"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ImageTransformPNameHP"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImageTransformParameteriHP</name></proto>
+            <param group="ImageTransformTargetHP"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ImageTransformPNameHP"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImageTransformParameterivHP</name></proto>
+            <param group="ImageTransformTargetHP"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ImageTransformPNameHP"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImportMemoryFdEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>size</name></param>
+            <param group="ExternalHandleType"><ptype>GLenum</ptype> <name>handleType</name></param>
+            <param><ptype>GLint</ptype> <name>fd</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImportMemoryWin32HandleEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>size</name></param>
+            <param group="ExternalHandleType"><ptype>GLenum</ptype> <name>handleType</name></param>
+            <param>void *<name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImportMemoryWin32NameEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>size</name></param>
+            <param group="ExternalHandleType"><ptype>GLenum</ptype> <name>handleType</name></param>
+            <param>const void *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImportSemaphoreFdEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+            <param group="ExternalHandleType"><ptype>GLenum</ptype> <name>handleType</name></param>
+            <param><ptype>GLint</ptype> <name>fd</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImportSemaphoreWin32HandleEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+            <param group="ExternalHandleType"><ptype>GLenum</ptype> <name>handleType</name></param>
+            <param>void *<name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glImportSemaphoreWin32NameEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+            <param group="ExternalHandleType"><ptype>GLenum</ptype> <name>handleType</name></param>
+            <param>const void *<name>name</name></param>
+        </command>
+        <command>
+            <proto group="sync"><ptype>GLsync</ptype> <name>glImportSyncEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>external_sync_type</name></param>
+            <param><ptype>GLintptr</ptype> <name>external_sync</name></param>
+            <param><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexFormatNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexFuncEXT</name></proto>
+            <param group="IndexFunctionEXT"><ptype>GLenum</ptype> <name>func</name></param>
+            <param group="ClampedFloat32"><ptype>GLclampf</ptype> <name>ref</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexMask</name></proto>
+            <param group="MaskedColorIndexValueI"><ptype>GLuint</ptype> <name>mask</name></param>
+            <glx type="render" opcode="136"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexMaterialEXT</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="IndexMaterialParameterEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexPointer</name></proto>
+            <param group="IndexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexPointerEXT</name></proto>
+            <param group="IndexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(type,stride,count)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexPointerListIBM</name></proto>
+            <param group="IndexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexd</name></proto>
+            <param group="ColorIndexValueD"><ptype>GLdouble</ptype> <name>c</name></param>
+            <vecequiv name="glIndexdv"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexdv</name></proto>
+            <param group="ColorIndexValueD" len="1">const <ptype>GLdouble</ptype> *<name>c</name></param>
+            <glx type="render" opcode="24"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexf</name></proto>
+            <param group="ColorIndexValueF"><ptype>GLfloat</ptype> <name>c</name></param>
+            <vecequiv name="glIndexfv"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexfv</name></proto>
+            <param group="ColorIndexValueF" len="1">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <glx type="render" opcode="25"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexi</name></proto>
+            <param group="ColorIndexValueI"><ptype>GLint</ptype> <name>c</name></param>
+            <vecequiv name="glIndexiv"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexiv</name></proto>
+            <param group="ColorIndexValueI" len="1">const <ptype>GLint</ptype> *<name>c</name></param>
+            <glx type="render" opcode="26"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexs</name></proto>
+            <param group="ColorIndexValueS"><ptype>GLshort</ptype> <name>c</name></param>
+            <vecequiv name="glIndexsv"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexsv</name></proto>
+            <param group="ColorIndexValueS" len="1">const <ptype>GLshort</ptype> *<name>c</name></param>
+            <glx type="render" opcode="27"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexub</name></proto>
+            <param group="ColorIndexValueUB"><ptype>GLubyte</ptype> <name>c</name></param>
+            <vecequiv name="glIndexubv"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexubv</name></proto>
+            <param group="ColorIndexValueUB" len="1">const <ptype>GLubyte</ptype> *<name>c</name></param>
+            <glx type="render" opcode="194"/>
+        </command>
+        <command>
+            <proto>void <name>glIndexxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>component</name></param>
+        </command>
+        <command>
+            <proto>void <name>glIndexxvOES</name></proto>
+            <param len="1">const <ptype>GLfixed</ptype> *<name>component</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInitNames</name></proto>
+            <glx type="render" opcode="121"/>
+        </command>
+        <command>
+            <proto>void <name>glInsertComponentEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>res</name></param>
+            <param><ptype>GLuint</ptype> <name>src</name></param>
+            <param><ptype>GLuint</ptype> <name>num</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInsertEventMarkerEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>marker</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInstrumentsBufferSGIX</name></proto>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param len="size"><ptype>GLint</ptype> *<name>buffer</name></param>
+            <glx type="vendor" opcode="4103"/>
+        </command>
+        <command>
+            <proto>void <name>glInterleavedArrays</name></proto>
+            <param group="InterleavedArrayFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(format,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInterpolatePathsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>resultPath</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathA</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathB</name></param>
+            <param><ptype>GLfloat</ptype> <name>weight</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateBufferData</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateBufferSubData</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateFramebuffer</name></proto>
+            <param group="FramebufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>numAttachments</name></param>
+            <param len="numAttachments">const <ptype>GLenum</ptype> *<name>attachments</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateNamedFramebufferData</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>numAttachments</name></param>
+            <param group="FramebufferAttachment">const <ptype>GLenum</ptype> *<name>attachments</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateNamedFramebufferSubData</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>numAttachments</name></param>
+            <param group="FramebufferAttachment">const <ptype>GLenum</ptype> *<name>attachments</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateSubFramebuffer</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>numAttachments</name></param>
+            <param len="numAttachments" group="FramebufferAttachment">const <ptype>GLenum</ptype> *<name>attachments</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateTexImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glInvalidateTexSubImage</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsAsyncMarkerSGIX</name></proto>
+            <param><ptype>GLuint</ptype> <name>marker</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsBufferARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <alias name="glIsBuffer"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsBufferResidentNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsCommandListNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>list</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsEnabled</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>cap</name></param>
+            <glx type="single" opcode="140"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsEnabledIndexedEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glIsEnabledi"/>
+            <glx type="single" opcode="212"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsEnabledi</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsEnablediEXT</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glIsEnabledi"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsEnablediNV</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glIsEnabledi"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsEnablediOES</name></proto>
+            <param group="EnableCap"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <alias name="glIsEnabledi"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsFenceAPPLE</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsFenceNV</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+            <glx type="vendor" opcode="1278"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsFramebuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <glx type="vendor" opcode="1425"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsFramebufferEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <alias name="glIsFramebuffer"/>
+            <glx type="vendor" opcode="1425"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsFramebufferOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsImageHandleResidentARB</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsImageHandleResidentNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsList</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <glx type="single" opcode="141"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsMemoryObjectEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memoryObject</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsNameAMD</name></proto>
+            <param><ptype>GLenum</ptype> <name>identifier</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsNamedBufferResidentNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsNamedStringARB</name></proto>
+            <param><ptype>GLint</ptype> <name>namelen</name></param>
+            <param len="namelen">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsObjectBufferATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsOcclusionQueryNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsPathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsPointInFillPathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsPointInStrokePathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsProgram</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <glx type="single" opcode="197"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsProgramARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <glx type="vendor" opcode="1304"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsProgramNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <alias name="glIsProgramARB"/>
+            <glx type="vendor" opcode="1304"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsProgramPipeline</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsProgramPipelineEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsQuery</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <glx type="single" opcode="163"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsQueryARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <alias name="glIsQuery"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsQueryEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsRenderbuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <glx type="vendor" opcode="1422"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsRenderbufferEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <alias name="glIsRenderbuffer"/>
+            <glx type="vendor" opcode="1422"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsRenderbufferOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsSemaphoreEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsSampler</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsShader</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <glx type="single" opcode="196"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsStateNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>state</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsSync</name></proto>
+            <param group="sync"><ptype>GLsync</ptype> <name>sync</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsSyncAPPLE</name></proto>
+            <param><ptype>GLsync</ptype> <name>sync</name></param>
+            <alias name="glIsSync"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsTexture</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <glx type="single" opcode="146"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsTextureEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <glx type="vendor" opcode="14"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsTextureHandleResidentARB</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsTextureHandleResidentNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsTransformFeedback</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsTransformFeedbackNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <alias name="glIsTransformFeedback"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsVariantEnabledEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="VariantCapEXT"><ptype>GLenum</ptype> <name>cap</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsVertexArray</name></proto>
+            <param><ptype>GLuint</ptype> <name>array</name></param>
+            <glx type="single" opcode="207"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsVertexArrayAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>array</name></param>
+            <alias name="glIsVertexArray"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glIsVertexArrayOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>array</name></param>
+            <alias name="glIsVertexArray"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glIsVertexAttribEnabledAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLGPUCopyImageSubDataNVX</name></proto>
+            <param><ptype>GLuint</ptype> <name>sourceGpu</name></param>
+            <param><ptype>GLbitfield</ptype> <name>destinationGpuMask</name></param>
+            <param><ptype>GLuint</ptype> <name>srcName</name></param>
+            <param><ptype>GLenum</ptype> <name>srcTarget</name></param>
+            <param><ptype>GLint</ptype> <name>srcLevel</name></param>
+            <param><ptype>GLint</ptype> <name>srcX</name></param>
+            <param><ptype>GLint</ptype> <name>srxY</name></param>
+            <param><ptype>GLint</ptype> <name>srcZ</name></param>
+            <param><ptype>GLuint</ptype> <name>dstName</name></param>
+            <param><ptype>GLenum</ptype> <name>dstTarget</name></param>
+            <param><ptype>GLint</ptype> <name>dstLevel</name></param>
+            <param><ptype>GLint</ptype> <name>dstX</name></param>
+            <param><ptype>GLint</ptype> <name>dstY</name></param>
+            <param><ptype>GLint</ptype> <name>dstZ</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLGPUInterlockNVX</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glLGPUNamedBufferSubDataNVX</name></proto>
+            <param><ptype>GLbitfield</ptype> <name>gpuMask</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param>const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLabelObjectEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>object</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>label</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightEnviSGIX</name></proto>
+            <param group="LightEnvParameterSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightModelf</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="90"/>
+        </command>
+        <command>
+            <proto>void <name>glLightModelfv</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="91"/>
+        </command>
+        <command>
+            <proto>void <name>glLightModeli</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="92"/>
+        </command>
+        <command>
+            <proto>void <name>glLightModeliv</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="93"/>
+        </command>
+        <command>
+            <proto>void <name>glLightModelx</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightModelxOES</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightModelxv</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightModelxvOES</name></proto>
+            <param group="LightModelParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightf</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="86"/>
+        </command>
+        <command>
+            <proto>void <name>glLightfv</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="87"/>
+        </command>
+        <command>
+            <proto>void <name>glLighti</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="88"/>
+        </command>
+        <command>
+            <proto>void <name>glLightiv</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="89"/>
+        </command>
+        <command>
+            <proto>void <name>glLightx</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightxOES</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightxv</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLightxvOES</name></proto>
+            <param group="LightName"><ptype>GLenum</ptype> <name>light</name></param>
+            <param group="LightParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLineStipple</name></proto>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>factor</name></param>
+            <param group="LineStipple"><ptype>GLushort</ptype> <name>pattern</name></param>
+            <glx type="render" opcode="94"/>
+        </command>
+        <command>
+            <proto>void <name>glLineWidth</name></proto>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>width</name></param>
+            <glx type="render" opcode="95"/>
+        </command>
+        <command>
+            <proto>void <name>glLineWidthx</name></proto>
+            <param><ptype>GLfixed</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLineWidthxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLinkProgram</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLinkProgramARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <alias name="glLinkProgram"/>
+        </command>
+        <command>
+            <proto>void <name>glListBase</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>base</name></param>
+            <glx type="render" opcode="3"/>
+        </command>
+        <command>
+            <proto>void <name>glListDrawCommandsStatesClientNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>list</name></param>
+            <param><ptype>GLuint</ptype> <name>segment</name></param>
+            <param>const void **<name>indirects</name></param>
+            <param>const <ptype>GLsizei</ptype> *<name>sizes</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>states</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>fbos</name></param>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glListParameterfSGIX</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param group="ListParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="2078"/>
+        </command>
+        <command>
+            <proto>void <name>glListParameterfvSGIX</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param group="ListParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="2079"/>
+        </command>
+        <command>
+            <proto>void <name>glListParameteriSGIX</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param group="ListParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="2080"/>
+        </command>
+        <command>
+            <proto>void <name>glListParameterivSGIX</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param group="ListParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="2081"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadIdentity</name></proto>
+            <glx type="render" opcode="176"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadIdentityDeformationMapSGIX</name></proto>
+            <param group="FfdMaskSGIX"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <glx type="render" opcode="2076"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadMatrixd</name></proto>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+            <glx type="render" opcode="178"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadMatrixf</name></proto>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+            <glx type="render" opcode="177"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadMatrixx</name></proto>
+            <param len="16">const <ptype>GLfixed</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLoadMatrixxOES</name></proto>
+            <param len="16">const <ptype>GLfixed</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLoadName</name></proto>
+            <param group="SelectName"><ptype>GLuint</ptype> <name>name</name></param>
+            <glx type="render" opcode="122"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadPaletteFromModelViewMatrixOES</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glLoadProgramNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="len">const <ptype>GLubyte</ptype> *<name>program</name></param>
+            <glx type="render" opcode="4183"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadTransposeMatrixd</name></proto>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLoadTransposeMatrixdARB</name></proto>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+            <alias name="glLoadTransposeMatrixd"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadTransposeMatrixf</name></proto>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLoadTransposeMatrixfARB</name></proto>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+            <alias name="glLoadTransposeMatrixf"/>
+        </command>
+        <command>
+            <proto>void <name>glLoadTransposeMatrixxOES</name></proto>
+            <param len="16">const <ptype>GLfixed</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLockArraysEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glLogicOp</name></proto>
+            <param group="LogicOp"><ptype>GLenum</ptype> <name>opcode</name></param>
+            <glx type="render" opcode="161"/>
+        </command>
+        <command>
+            <proto>void <name>glMakeBufferNonResidentNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeBufferResidentNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeImageHandleNonResidentARB</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeImageHandleNonResidentNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeImageHandleResidentARB</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+            <param><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeImageHandleResidentNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+            <param><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeNamedBufferNonResidentNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeNamedBufferResidentNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeTextureHandleNonResidentARB</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeTextureHandleNonResidentNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeTextureHandleResidentARB</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMakeTextureHandleResidentNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>handle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMap1d</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>order</name></param>
+            <param group="CoordD" len="COMPSIZE(target,stride,order)">const <ptype>GLdouble</ptype> *<name>points</name></param>
+            <glx type="render" opcode="143"/>
+        </command>
+        <command>
+            <proto>void <name>glMap1f</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>order</name></param>
+            <param group="CoordF" len="COMPSIZE(target,stride,order)">const <ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="render" opcode="144"/>
+        </command>
+        <command>
+            <proto>void <name>glMap1xOES</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLfixed</ptype> <name>u1</name></param>
+            <param><ptype>GLfixed</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param><ptype>GLint</ptype> <name>order</name></param>
+            <param><ptype>GLfixed</ptype> <name>points</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMap2d</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>ustride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>uorder</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>vstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>vorder</name></param>
+            <param group="CoordD" len="COMPSIZE(target,ustride,uorder,vstride,vorder)">const <ptype>GLdouble</ptype> *<name>points</name></param>
+            <glx type="render" opcode="145"/>
+        </command>
+        <command>
+            <proto>void <name>glMap2f</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>ustride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>uorder</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>vstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>vorder</name></param>
+            <param group="CoordF" len="COMPSIZE(target,ustride,uorder,vstride,vorder)">const <ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="render" opcode="146"/>
+        </command>
+        <command>
+            <proto>void <name>glMap2xOES</name></proto>
+            <param group="MapTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLfixed</ptype> <name>u1</name></param>
+            <param><ptype>GLfixed</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>ustride</name></param>
+            <param><ptype>GLint</ptype> <name>uorder</name></param>
+            <param><ptype>GLfixed</ptype> <name>v1</name></param>
+            <param><ptype>GLfixed</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>vstride</name></param>
+            <param><ptype>GLint</ptype> <name>vorder</name></param>
+            <param><ptype>GLfixed</ptype> <name>points</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapBuffer</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferAccessARB"><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapBufferARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferAccessARB"><ptype>GLenum</ptype> <name>access</name></param>
+            <alias name="glMapBuffer"/>
+        </command>
+        <command>
+            <proto>void *<name>glMapBufferOES</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferAccessARB"><ptype>GLenum</ptype> <name>access</name></param>
+            <alias name="glMapBuffer"/>
+        </command>
+        <command>
+            <proto>void *<name>glMapBufferRange</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>length</name></param>
+            <param group="MapBufferAccessMask"><ptype>GLbitfield</ptype> <name>access</name></param>
+            <glx type="single" opcode="205"/>
+        </command>
+        <command>
+            <proto>void *<name>glMapBufferRangeEXT</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>length</name></param>
+            <param group="MapBufferAccessMask"><ptype>GLbitfield</ptype> <name>access</name></param>
+            <alias name="glMapBufferRange"/>
+        </command>
+        <command>
+            <proto>void <name>glMapControlPointsNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="MapTypeNV"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>ustride</name></param>
+            <param><ptype>GLsizei</ptype> <name>vstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>uorder</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>vorder</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>packed</name></param>
+            <param len="COMPSIZE(target,uorder,vorder)">const void *<name>points</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapGrid1d</name></proto>
+            <param><ptype>GLint</ptype> <name>un</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u2</name></param>
+            <glx type="render" opcode="147"/>
+        </command>
+        <command>
+            <proto>void <name>glMapGrid1f</name></proto>
+            <param><ptype>GLint</ptype> <name>un</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u2</name></param>
+            <glx type="render" opcode="148"/>
+        </command>
+        <command>
+            <proto>void <name>glMapGrid1xOES</name></proto>
+            <param><ptype>GLint</ptype> <name>n</name></param>
+            <param><ptype>GLfixed</ptype> <name>u1</name></param>
+            <param><ptype>GLfixed</ptype> <name>u2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapGrid2d</name></proto>
+            <param><ptype>GLint</ptype> <name>un</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>vn</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v2</name></param>
+            <glx type="render" opcode="149"/>
+        </command>
+        <command>
+            <proto>void <name>glMapGrid2f</name></proto>
+            <param><ptype>GLint</ptype> <name>un</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>vn</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v2</name></param>
+            <glx type="render" opcode="150"/>
+        </command>
+        <command>
+            <proto>void <name>glMapGrid2xOES</name></proto>
+            <param><ptype>GLint</ptype> <name>n</name></param>
+            <param><ptype>GLfixed</ptype> <name>u1</name></param>
+            <param><ptype>GLfixed</ptype> <name>u2</name></param>
+            <param><ptype>GLfixed</ptype> <name>v1</name></param>
+            <param><ptype>GLfixed</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapNamedBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferAccessARB"><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapNamedBufferEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferAccessARB"><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapNamedBufferRange</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>length</name></param>
+            <param group="MapBufferAccessMask"><ptype>GLbitfield</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapNamedBufferRangeEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>length</name></param>
+            <param group="MapBufferAccessMask"><ptype>GLbitfield</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapObjectBufferATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapParameterfvNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(target,pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapParameterivNV</name></proto>
+            <param group="EvalTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="MapParameterNV"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(target,pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void *<name>glMapTexture2DINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLbitfield</ptype> <name>access</name></param>
+            <param len="1"><ptype>GLint</ptype> *<name>stride</name></param>
+            <param len="1"><ptype>GLenum</ptype> *<name>layout</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapVertexAttrib1dAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>size</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>order</name></param>
+            <param group="CoordD" len="COMPSIZE(size,stride,order)">const <ptype>GLdouble</ptype> *<name>points</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapVertexAttrib1fAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>size</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>order</name></param>
+            <param group="CoordF" len="COMPSIZE(size,stride,order)">const <ptype>GLfloat</ptype> *<name>points</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapVertexAttrib2dAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>size</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>ustride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>uorder</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>vstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>vorder</name></param>
+            <param group="CoordD" len="COMPSIZE(size,ustride,uorder,vstride,vorder)">const <ptype>GLdouble</ptype> *<name>points</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMapVertexAttrib2fAPPLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>size</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>u2</name></param>
+            <param><ptype>GLint</ptype> <name>ustride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>uorder</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>vstride</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>vorder</name></param>
+            <param group="CoordF" len="COMPSIZE(size,ustride,uorder,vstride,vorder)">const <ptype>GLfloat</ptype> *<name>points</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMaterialf</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="96"/>
+        </command>
+        <command>
+            <proto>void <name>glMaterialfv</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="97"/>
+        </command>
+        <command>
+            <proto>void <name>glMateriali</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="98"/>
+        </command>
+        <command>
+            <proto>void <name>glMaterialiv</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="99"/>
+        </command>
+        <command>
+            <proto>void <name>glMaterialx</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMaterialxOES</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMaterialxv</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMaterialxvOES</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixFrustumEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLdouble</ptype> <name>left</name></param>
+            <param><ptype>GLdouble</ptype> <name>right</name></param>
+            <param><ptype>GLdouble</ptype> <name>bottom</name></param>
+            <param><ptype>GLdouble</ptype> <name>top</name></param>
+            <param><ptype>GLdouble</ptype> <name>zNear</name></param>
+            <param><ptype>GLdouble</ptype> <name>zFar</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixIndexPointerARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="MatrixIndexPointerTypeARB"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixIndexPointerOES</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="MatrixIndexPointerTypeARB"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixIndexubvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLubyte</ptype> *<name>indices</name></param>
+            <glx type="render" opcode="4326"/>
+        </command>
+        <command>
+            <proto>void <name>glMatrixIndexuivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLuint</ptype> *<name>indices</name></param>
+            <glx type="render" opcode="4328"/>
+        </command>
+        <command>
+            <proto>void <name>glMatrixIndexusvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLushort</ptype> *<name>indices</name></param>
+            <glx type="render" opcode="4327"/>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoad3x2fNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>matrixMode</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoad3x3fNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>matrixMode</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoadIdentityEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoadTranspose3x3fNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>matrixMode</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoadTransposedEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoadTransposefEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoaddEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixLoadfEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMode</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="179"/>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMult3x2fNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>matrixMode</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMult3x3fNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>matrixMode</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMultTranspose3x3fNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>matrixMode</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMultTransposedEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMultTransposefEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMultdEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixMultfEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixOrthoEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLdouble</ptype> <name>left</name></param>
+            <param><ptype>GLdouble</ptype> <name>right</name></param>
+            <param><ptype>GLdouble</ptype> <name>bottom</name></param>
+            <param><ptype>GLdouble</ptype> <name>top</name></param>
+            <param><ptype>GLdouble</ptype> <name>zNear</name></param>
+            <param><ptype>GLdouble</ptype> <name>zFar</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixPopEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixPushEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixRotatedEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLdouble</ptype> <name>angle</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixRotatefEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLfloat</ptype> <name>angle</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixScaledEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixScalefEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixTranslatedEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMatrixTranslatefEXT</name></proto>
+            <param group="MatrixMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMaxShaderCompilerThreadsKHR</name></proto>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMaxShaderCompilerThreadsARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>count</name></param>
+            <alias name="glMaxShaderCompilerThreadsKHR"/>
+        </command>
+        <command>
+            <proto>void <name>glMemoryBarrier</name></proto>
+            <param group="MemoryBarrierMask"><ptype>GLbitfield</ptype> <name>barriers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMemoryBarrierByRegion</name></proto>
+            <param group="MemoryBarrierMask"><ptype>GLbitfield</ptype> <name>barriers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMemoryBarrierEXT</name></proto>
+            <param group="MemoryBarrierMask"><ptype>GLbitfield</ptype> <name>barriers</name></param>
+            <alias name="glMemoryBarrier"/>
+        </command>
+        <command>
+            <proto>void <name>glMemoryObjectParameterivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memoryObject</name></param>
+            <param group="MemoryObjectParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMinSampleShading</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMinSampleShadingARB</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>value</name></param>
+            <alias name="glMinSampleShading"/>
+        </command>
+        <command>
+            <proto>void <name>glMinSampleShadingOES</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>value</name></param>
+            <alias name="glMinSampleShading"/>
+        </command>
+        <command>
+            <proto>void <name>glMinmax</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>sink</name></param>
+            <glx type="render" opcode="4111"/>
+        </command>
+        <command>
+            <proto>void <name>glMinmaxEXT</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>sink</name></param>
+            <alias name="glMinmax"/>
+            <glx type="render" opcode="4111"/>
+        </command>
+        <command>
+            <proto>void <name>glMultMatrixd</name></proto>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+            <glx type="render" opcode="181"/>
+        </command>
+        <command>
+            <proto>void <name>glMultMatrixf</name></proto>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+            <glx type="render" opcode="180"/>
+        </command>
+        <command>
+            <proto>void <name>glMultMatrixx</name></proto>
+            <param len="16">const <ptype>GLfixed</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultMatrixxOES</name></proto>
+            <param len="16">const <ptype>GLfixed</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultTransposeMatrixd</name></proto>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultTransposeMatrixdARB</name></proto>
+            <param len="16">const <ptype>GLdouble</ptype> *<name>m</name></param>
+            <alias name="glMultTransposeMatrixd"/>
+        </command>
+        <command>
+            <proto>void <name>glMultTransposeMatrixf</name></proto>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultTransposeMatrixfARB</name></proto>
+            <param len="16">const <ptype>GLfloat</ptype> *<name>m</name></param>
+            <alias name="glMultTransposeMatrixf"/>
+        </command>
+        <command>
+            <proto>void <name>glMultTransposeMatrixxOES</name></proto>
+            <param len="16">const <ptype>GLfixed</ptype> *<name>m</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArrays</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLint</ptype> *<name>first</name></param>
+            <param len="COMPSIZE(drawcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(primcount)">const <ptype>GLint</ptype> *<name>first</name></param>
+            <param len="COMPSIZE(primcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glMultiDrawArrays"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysIndirect</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(drawcount,stride)">const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysIndirectAMD</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <alias name="glMultiDrawArraysIndirect"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysIndirectBindlessCountNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawCount</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxDrawCount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLint</ptype> <name>vertexBufferCount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysIndirectBindlessNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawCount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLint</ptype> <name>vertexBufferCount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysIndirectCount</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLintptr</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxdrawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysIndirectCountARB</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLintptr</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxdrawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <alias name="glMultiDrawArraysIndirectCount"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawArraysIndirectEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(drawcount,stride)">const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <alias name="glMultiDrawArraysIndirect"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementArrayAPPLE</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="primcount">const <ptype>GLint</ptype> *<name>first</name></param>
+            <param len="primcount">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElements</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(drawcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(drawcount)">const void *const*<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsBaseVertex</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(drawcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(drawcount)">const void *const*<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+            <param len="COMPSIZE(drawcount)">const <ptype>GLint</ptype> *<name>basevertex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsBaseVertexEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(drawcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(drawcount)">const void *const*<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <param len="COMPSIZE(drawcount)">const <ptype>GLint</ptype> *<name>basevertex</name></param>
+            <alias name="glMultiDrawElementsBaseVertex"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param len="COMPSIZE(primcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(primcount)">const void *const*<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <alias name="glMultiDrawElements"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsIndirect</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(drawcount,stride)">const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsIndirectAMD</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <alias name="glMultiDrawElementsIndirect"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsIndirectBindlessCountNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawCount</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxDrawCount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLint</ptype> <name>vertexBufferCount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsIndirectBindlessNV</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawCount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLint</ptype> <name>vertexBufferCount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsIndirectCount</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLintptr</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxdrawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsIndirectCountARB</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>indirect</name></param>
+            <param><ptype>GLintptr</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxdrawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <alias name="glMultiDrawElementsIndirectCount"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawElementsIndirectEXT</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(drawcount,stride)">const void *<name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <alias name="glMultiDrawElementsIndirect"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawMeshTasksIndirectNV</name></proto>
+            <param><ptype>GLintptr</ptype> <name>indirect</name></param>
+            <param><ptype>GLsizei</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawMeshTasksIndirectCountNV</name></proto>
+            <param><ptype>GLintptr</ptype> <name>indirect</name></param>
+            <param><ptype>GLintptr</ptype> <name>drawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>maxdrawcount</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiDrawRangeElementArrayAPPLE</name></proto>
+            <param group="PrimitiveType"><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLuint</ptype> <name>end</name></param>
+            <param len="primcount">const <ptype>GLint</ptype> *<name>first</name></param>
+            <param len="primcount">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiModeDrawArraysIBM</name></proto>
+            <param group="PrimitiveType" len="COMPSIZE(primcount)">const <ptype>GLenum</ptype> *<name>mode</name></param>
+            <param len="COMPSIZE(primcount)">const <ptype>GLint</ptype> *<name>first</name></param>
+            <param len="COMPSIZE(primcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <param><ptype>GLint</ptype> <name>modestride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiModeDrawElementsIBM</name></proto>
+            <param group="PrimitiveType" len="COMPSIZE(primcount)">const <ptype>GLenum</ptype> *<name>mode</name></param>
+            <param len="COMPSIZE(primcount)">const <ptype>GLsizei</ptype> *<name>count</name></param>
+            <param group="DrawElementsType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(primcount)">const void *const*<name>indices</name></param>
+            <param><ptype>GLsizei</ptype> <name>primcount</name></param>
+            <param><ptype>GLint</ptype> <name>modestride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexBufferEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1bOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1bvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="1">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1d</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1dv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1dARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1dv"/>
+            <alias name="glMultiTexCoord1d"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1dv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="198"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1dvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord1dv"/>
+            <glx type="render" opcode="198"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1f</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1fv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1fARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1fv"/>
+            <alias name="glMultiTexCoord1f"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1fv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="1">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="199"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1fvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="1">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord1fv"/>
+            <glx type="render" opcode="199"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1hNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1hvNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV" len="1">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4250"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1i</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1iv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1iARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1iv"/>
+            <alias name="glMultiTexCoord1i"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1iv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="1">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="200"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1ivARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="1">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord1iv"/>
+            <glx type="render" opcode="200"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1s</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1sv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1sARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <vecequiv name="glMultiTexCoord1sv"/>
+            <alias name="glMultiTexCoord1s"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1sv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="1">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="201"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1svARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="1">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord1sv"/>
+            <glx type="render" opcode="201"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1xOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord1xvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="1">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2bOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+            <param><ptype>GLbyte</ptype> <name>t</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2bvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="2">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2d</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2dARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2dv"/>
+            <alias name="glMultiTexCoord2d"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2dv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="202"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2dvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord2dv"/>
+            <glx type="render" opcode="202"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2f</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2fARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2fv"/>
+            <alias name="glMultiTexCoord2f"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2fv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="203"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2fvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord2fv"/>
+            <glx type="render" opcode="203"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2hNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2hvNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV" len="2">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4251"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2i</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2iARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2iv"/>
+            <alias name="glMultiTexCoord2i"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2iv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="204"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2ivARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord2iv"/>
+            <glx type="render" opcode="204"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2s</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2sv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2sARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <vecequiv name="glMultiTexCoord2sv"/>
+            <alias name="glMultiTexCoord2s"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2sv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="205"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2svARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord2sv"/>
+            <glx type="render" opcode="205"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2xOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord2xvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="2">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3bOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+            <param><ptype>GLbyte</ptype> <name>t</name></param>
+            <param><ptype>GLbyte</ptype> <name>r</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3bvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="3">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3d</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3dARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3dv"/>
+            <alias name="glMultiTexCoord3d"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3dv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="206"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3dvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord3dv"/>
+            <glx type="render" opcode="206"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3f</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3fARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3fv"/>
+            <alias name="glMultiTexCoord3f"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3fv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="207"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3fvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord3fv"/>
+            <glx type="render" opcode="207"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3hNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>t</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3hvNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV" len="3">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4252"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3i</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3iARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3iv"/>
+            <alias name="glMultiTexCoord3i"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3iv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="208"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3ivARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord3iv"/>
+            <glx type="render" opcode="208"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3s</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3sARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>r</name></param>
+            <vecequiv name="glMultiTexCoord3sv"/>
+            <alias name="glMultiTexCoord3s"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3sv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="209"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3svARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord3sv"/>
+            <glx type="render" opcode="209"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3xOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord3xvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="3">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4bOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+            <param><ptype>GLbyte</ptype> <name>t</name></param>
+            <param><ptype>GLbyte</ptype> <name>r</name></param>
+            <param><ptype>GLbyte</ptype> <name>q</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4bvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4d</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>r</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4dv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4dARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>r</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4dv"/>
+            <alias name="glMultiTexCoord4d"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4dv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="210"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4dvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordD" len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord4dv"/>
+            <glx type="render" opcode="210"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4f</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>r</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4fARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>r</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4fv"/>
+            <alias name="glMultiTexCoord4f"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4fv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="211"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4fvARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordF" len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord4fv"/>
+            <glx type="render" opcode="211"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4hNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>t</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>r</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4hvNV</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="Half16NV" len="4">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4253"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4i</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>r</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4iARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>r</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4iv"/>
+            <alias name="glMultiTexCoord4i"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4iv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="212"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4ivARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordI" len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord4iv"/>
+            <glx type="render" opcode="212"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4s</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>r</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4sv"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4sARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>r</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>q</name></param>
+            <vecequiv name="glMultiTexCoord4sv"/>
+            <alias name="glMultiTexCoord4s"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4sv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="213"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4svARB</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CoordS" len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glMultiTexCoord4sv"/>
+            <glx type="render" opcode="213"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4x</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+            <param><ptype>GLfixed</ptype> <name>q</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4xOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+            <param><ptype>GLfixed</ptype> <name>q</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoord4xvOES</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP1ui</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP1uiv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP2ui</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP2uiv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP3ui</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP3uiv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP4ui</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordP4uiv</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texture</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexCoordPointerEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexEnvfEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <vecequiv name="glMultiTexEnvfvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexEnvfvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexEnviEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <vecequiv name="glMultiTexEnvivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexEnvivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexGendEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLdouble</ptype> <name>param</name></param>
+            <vecequiv name="glMultiTexGendvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexGendvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexGenfEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <vecequiv name="glMultiTexGenfvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexGenfvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexGeniEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <vecequiv name="glMultiTexGenivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexGenivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexImage1DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexImage2DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexImage3DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexParameterIivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexParameterIuivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexParameterfEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <vecequiv name="glMultiTexParameterfvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexParameterfvEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexParameteriEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <vecequiv name="glMultiTexParameterivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexParameterivEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexRenderbufferEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexSubImage1DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexSubImage2DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMultiTexSubImage3DEXT</name></proto>
+            <param group="TextureUnit"><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastBarrierNV</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glMulticastBlitFramebufferNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>srcGpu</name></param>
+            <param><ptype>GLuint</ptype> <name>dstGpu</name></param>
+            <param><ptype>GLint</ptype> <name>srcX0</name></param>
+            <param><ptype>GLint</ptype> <name>srcY0</name></param>
+            <param><ptype>GLint</ptype> <name>srcX1</name></param>
+            <param><ptype>GLint</ptype> <name>srcY1</name></param>
+            <param><ptype>GLint</ptype> <name>dstX0</name></param>
+            <param><ptype>GLint</ptype> <name>dstY0</name></param>
+            <param><ptype>GLint</ptype> <name>dstX1</name></param>
+            <param><ptype>GLint</ptype> <name>dstY1</name></param>
+            <param group="ClearBufferMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <param><ptype>GLenum</ptype> <name>filter</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastBufferSubDataNV</name></proto>
+            <param><ptype>GLbitfield</ptype> <name>gpuMask</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param>const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastCopyBufferSubDataNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>readGpu</name></param>
+            <param><ptype>GLbitfield</ptype> <name>writeGpuMask</name></param>
+            <param><ptype>GLuint</ptype> <name>readBuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>writeBuffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>readOffset</name></param>
+            <param><ptype>GLintptr</ptype> <name>writeOffset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastCopyImageSubDataNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>srcGpu</name></param>
+            <param><ptype>GLbitfield</ptype> <name>dstGpuMask</name></param>
+            <param><ptype>GLuint</ptype> <name>srcName</name></param>
+            <param><ptype>GLenum</ptype> <name>srcTarget</name></param>
+            <param><ptype>GLint</ptype> <name>srcLevel</name></param>
+            <param><ptype>GLint</ptype> <name>srcX</name></param>
+            <param><ptype>GLint</ptype> <name>srcY</name></param>
+            <param><ptype>GLint</ptype> <name>srcZ</name></param>
+            <param><ptype>GLuint</ptype> <name>dstName</name></param>
+            <param><ptype>GLenum</ptype> <name>dstTarget</name></param>
+            <param><ptype>GLint</ptype> <name>dstLevel</name></param>
+            <param><ptype>GLint</ptype> <name>dstX</name></param>
+            <param><ptype>GLint</ptype> <name>dstY</name></param>
+            <param><ptype>GLint</ptype> <name>dstZ</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcWidth</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcHeight</name></param>
+            <param><ptype>GLsizei</ptype> <name>srcDepth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastFramebufferSampleLocationsfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>gpu</name></param>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastGetQueryObjecti64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>gpu</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastGetQueryObjectivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>gpu</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastGetQueryObjectui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>gpu</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastGetQueryObjectuivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>gpu</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glMulticastWaitSyncNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>signalGpu</name></param>
+            <param><ptype>GLbitfield</ptype> <name>waitGpuMask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferAttachMemoryNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferData</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param>const void *<name>data</name></param>
+            <param group="VertexBufferObjectUsage"><ptype>GLenum</ptype> <name>usage</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferDataEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="COMPSIZE(size)">const void *<name>data</name></param>
+            <param group="VertexBufferObjectUsage"><ptype>GLenum</ptype> <name>usage</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferPageCommitmentARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param><ptype>GLboolean</ptype> <name>commit</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferPageCommitmentEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param><ptype>GLboolean</ptype> <name>commit</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferStorage</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+            <param group="BufferStorageMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferStorageExternalEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param><ptype>GLeglClientBufferEXT</ptype> <name>clientBuffer</name></param>
+            <param group="BufferStorageMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferStorageEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="size">const void *<name>data</name></param>
+            <param group="BufferStorageMask"><ptype>GLbitfield</ptype> <name>flags</name></param>
+            <alias name="glNamedBufferStorage"/>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferStorageMemEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferSubData</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="COMPSIZE(size)">const void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedBufferSubDataEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <param len="COMPSIZE(size)">const void *<name>data</name></param>
+            <alias name="glNamedBufferSubData"/>
+        </command>
+        <command>
+            <proto>void <name>glNamedCopyBufferSubDataEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>readBuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>writeBuffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>readOffset</name></param>
+            <param><ptype>GLintptr</ptype> <name>writeOffset</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferDrawBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="ColorBuffer"><ptype>GLenum</ptype> <name>buf</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferDrawBuffers</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="ColorBuffer">const <ptype>GLenum</ptype> *<name>bufs</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferParameteri</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferParameteriEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferReadBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="ColorBuffer"><ptype>GLenum</ptype> <name>src</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferRenderbuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>renderbuffertarget</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferRenderbufferEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>renderbuffertarget</name></param>
+            <param group="Renderbuffer"><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferSampleLocationsfvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferSampleLocationsfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>start</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTexture</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferSamplePositionsfvAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param><ptype>GLuint</ptype> <name>numsamples</name></param>
+            <param><ptype>GLuint</ptype> <name>pixelindex</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTexture1DEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTexture2DEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTexture3DEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>textarget</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTextureEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTextureFaceEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>face</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTextureLayer</name></proto>
+            <param><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>layer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedFramebufferTextureLayerEXT</name></proto>
+            <param group="Framebuffer"><ptype>GLuint</ptype> <name>framebuffer</name></param>
+            <param group="FramebufferAttachment"><ptype>GLenum</ptype> <name>attachment</name></param>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>layer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameter4dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glNamedProgramLocalParameter4dvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameter4dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameter4fEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glNamedProgramLocalParameter4fvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameter4fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameterI4iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <param><ptype>GLint</ptype> <name>w</name></param>
+            <vecequiv name="glNamedProgramLocalParameterI4ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameterI4ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameterI4uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>z</name></param>
+            <param><ptype>GLuint</ptype> <name>w</name></param>
+            <vecequiv name="glNamedProgramLocalParameterI4uivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameterI4uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParameters4fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParametersI4ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramLocalParametersI4uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedProgramStringEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ProgramFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="len">const void *<name>string</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedRenderbufferStorage</name></proto>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedRenderbufferStorageEXT</name></proto>
+            <param group="Renderbuffer"><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedRenderbufferStorageMultisample</name></proto>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedRenderbufferStorageMultisampleAdvancedAMD</name></proto>
+            <param group="Renderbuffer"><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLsizei</ptype> <name>storageSamples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedRenderbufferStorageMultisampleCoverageEXT</name></proto>
+            <param group="Renderbuffer"><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>coverageSamples</name></param>
+            <param><ptype>GLsizei</ptype> <name>colorSamples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedRenderbufferStorageMultisampleEXT</name></proto>
+            <param group="Renderbuffer"><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNamedStringARB</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>namelen</name></param>
+            <param len="namelen">const <ptype>GLchar</ptype> *<name>name</name></param>
+            <param><ptype>GLint</ptype> <name>stringlen</name></param>
+            <param len="stringlen">const <ptype>GLchar</ptype> *<name>string</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNewList</name></proto>
+            <param group="List"><ptype>GLuint</ptype> <name>list</name></param>
+            <param group="ListMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="single" opcode="101"/>
+        </command>
+        <command>
+            <proto><ptype>GLuint</ptype> <name>glNewObjectBufferATI</name></proto>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param len="size">const void *<name>pointer</name></param>
+            <param group="ArrayObjectUsageATI"><ptype>GLenum</ptype> <name>usage</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormal3b</name></proto>
+            <param><ptype>GLbyte</ptype> <name>nx</name></param>
+            <param><ptype>GLbyte</ptype> <name>ny</name></param>
+            <param><ptype>GLbyte</ptype> <name>nz</name></param>
+            <vecequiv name="glNormal3bv"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3bv</name></proto>
+            <param len="3">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="28"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>nx</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>ny</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>nz</name></param>
+            <vecequiv name="glNormal3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3dv</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="29"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>nz</name></param>
+            <vecequiv name="glNormal3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3fVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormal3fVertex3fvSUN</name></proto>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormal3fv</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="30"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>nx</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>ny</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>nz</name></param>
+            <vecequiv name="glNormal3hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3hvNV</name></proto>
+            <param group="Half16NV" len="3">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4243"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3i</name></proto>
+            <param><ptype>GLint</ptype> <name>nx</name></param>
+            <param><ptype>GLint</ptype> <name>ny</name></param>
+            <param><ptype>GLint</ptype> <name>nz</name></param>
+            <vecequiv name="glNormal3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3iv</name></proto>
+            <param len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="31"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3s</name></proto>
+            <param><ptype>GLshort</ptype> <name>nx</name></param>
+            <param><ptype>GLshort</ptype> <name>ny</name></param>
+            <param><ptype>GLshort</ptype> <name>nz</name></param>
+            <vecequiv name="glNormal3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3sv</name></proto>
+            <param len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="32"/>
+        </command>
+        <command>
+            <proto>void <name>glNormal3x</name></proto>
+            <param><ptype>GLfixed</ptype> <name>nx</name></param>
+            <param><ptype>GLfixed</ptype> <name>ny</name></param>
+            <param><ptype>GLfixed</ptype> <name>nz</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormal3xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>nx</name></param>
+            <param><ptype>GLfixed</ptype> <name>ny</name></param>
+            <param><ptype>GLfixed</ptype> <name>nz</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormal3xvOES</name></proto>
+            <param len="3">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalFormatNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalP3ui</name></proto>
+            <param group="NormalPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalP3uiv</name></proto>
+            <param group="NormalPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalPointer</name></proto>
+            <param group="NormalPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalPointerEXT</name></proto>
+            <param group="NormalPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(type,stride,count)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalPointerListIBM</name></proto>
+            <param group="NormalPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalPointervINTEL</name></proto>
+            <param group="NormalPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="4">const void **<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3bATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLbyte</ptype> <name>nx</name></param>
+            <param><ptype>GLbyte</ptype> <name>ny</name></param>
+            <param><ptype>GLbyte</ptype> <name>nz</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3bvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3dATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLdouble</ptype> <name>nx</name></param>
+            <param><ptype>GLdouble</ptype> <name>ny</name></param>
+            <param><ptype>GLdouble</ptype> <name>nz</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3dvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLdouble</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3fATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3fvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3iATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLint</ptype> <name>nx</name></param>
+            <param><ptype>GLint</ptype> <name>ny</name></param>
+            <param><ptype>GLint</ptype> <name>nz</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3ivATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3sATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLshort</ptype> <name>nx</name></param>
+            <param><ptype>GLshort</ptype> <name>ny</name></param>
+            <param><ptype>GLshort</ptype> <name>nz</name></param>
+        </command>
+        <command>
+            <proto>void <name>glNormalStream3svATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLshort</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glObjectLabel</name></proto>
+            <param group="ObjectIdentifier"><ptype>GLenum</ptype> <name>identifier</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="COMPSIZE(label,length)">const <ptype>GLchar</ptype> *<name>label</name></param>
+        </command>
+        <command>
+            <proto>void <name>glObjectLabelKHR</name></proto>
+            <param group="ObjectIdentifier"><ptype>GLenum</ptype> <name>identifier</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>label</name></param>
+            <alias name="glObjectLabel"/>
+        </command>
+        <command>
+            <proto>void <name>glObjectPtrLabel</name></proto>
+            <param>const void *<name>ptr</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="COMPSIZE(label,length)">const <ptype>GLchar</ptype> *<name>label</name></param>
+        </command>
+        <command>
+            <proto>void <name>glObjectPtrLabelKHR</name></proto>
+            <param>const void *<name>ptr</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>label</name></param>
+            <alias name="glObjectPtrLabel"/>
+        </command>
+        <command>
+            <proto><ptype>GLenum</ptype> <name>glObjectPurgeableAPPLE</name></proto>
+            <param><ptype>GLenum</ptype> <name>objectType</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+            <param><ptype>GLenum</ptype> <name>option</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLenum</ptype> <name>glObjectUnpurgeableAPPLE</name></proto>
+            <param><ptype>GLenum</ptype> <name>objectType</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+            <param><ptype>GLenum</ptype> <name>option</name></param>
+        </command>
+        <command>
+            <proto>void <name>glOrtho</name></proto>
+            <param><ptype>GLdouble</ptype> <name>left</name></param>
+            <param><ptype>GLdouble</ptype> <name>right</name></param>
+            <param><ptype>GLdouble</ptype> <name>bottom</name></param>
+            <param><ptype>GLdouble</ptype> <name>top</name></param>
+            <param><ptype>GLdouble</ptype> <name>zNear</name></param>
+            <param><ptype>GLdouble</ptype> <name>zFar</name></param>
+            <glx type="render" opcode="182"/>
+        </command>
+        <command>
+            <proto>void <name>glOrthof</name></proto>
+            <param><ptype>GLfloat</ptype> <name>l</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>n</name></param>
+            <param><ptype>GLfloat</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glOrthofOES</name></proto>
+            <param><ptype>GLfloat</ptype> <name>l</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>n</name></param>
+            <param><ptype>GLfloat</ptype> <name>f</name></param>
+            <glx type="render" opcode="4311"/>
+        </command>
+        <command>
+            <proto>void <name>glOrthox</name></proto>
+            <param><ptype>GLfixed</ptype> <name>l</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+            <param><ptype>GLfixed</ptype> <name>b</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>n</name></param>
+            <param><ptype>GLfixed</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glOrthoxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>l</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+            <param><ptype>GLfixed</ptype> <name>b</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>n</name></param>
+            <param><ptype>GLfixed</ptype> <name>f</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPNTrianglesfATI</name></proto>
+            <param group="PNTrianglesPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPNTrianglesiATI</name></proto>
+            <param group="PNTrianglesPNameATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPassTexCoordATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>coord</name></param>
+            <param group="SwizzleOpATI"><ptype>GLenum</ptype> <name>swizzle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPassThrough</name></proto>
+            <param group="FeedbackElement"><ptype>GLfloat</ptype> <name>token</name></param>
+            <glx type="render" opcode="123"/>
+        </command>
+        <command>
+            <proto>void <name>glPassThroughxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>token</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPatchParameterfv</name></proto>
+            <param group="PatchParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPatchParameteri</name></proto>
+            <param group="PatchParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPatchParameteriEXT</name></proto>
+            <param group="PatchParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+            <alias name="glPatchParameteri"/>
+        </command>
+        <command>
+            <proto>void <name>glPatchParameteriOES</name></proto>
+            <param group="PatchParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+            <alias name="glPatchParameteri"/>
+        </command>
+        <command>
+            <proto>void <name>glPathColorGenNV</name></proto>
+            <param group="PathColor"><ptype>GLenum</ptype> <name>color</name></param>
+            <param group="PathGenMode"><ptype>GLenum</ptype> <name>genMode</name></param>
+            <param group="PathColorFormat"><ptype>GLenum</ptype> <name>colorFormat</name></param>
+            <param len="COMPSIZE(genMode,colorFormat)">const <ptype>GLfloat</ptype> *<name>coeffs</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathCommandsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>numCommands</name></param>
+            <param group="PathCommand" len="numCommands">const <ptype>GLubyte</ptype> *<name>commands</name></param>
+            <param><ptype>GLsizei</ptype> <name>numCoords</name></param>
+            <param group="PathCoordType"><ptype>GLenum</ptype> <name>coordType</name></param>
+            <param len="COMPSIZE(numCoords,coordType)">const void *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathCoordsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>numCoords</name></param>
+            <param group="PathCoordType"><ptype>GLenum</ptype> <name>coordType</name></param>
+            <param len="COMPSIZE(numCoords,coordType)">const void *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathCoverDepthFuncNV</name></proto>
+            <param group="DepthFunction"><ptype>GLenum</ptype> <name>func</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathDashArrayNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>dashCount</name></param>
+            <param len="dashCount">const <ptype>GLfloat</ptype> *<name>dashArray</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathFogGenNV</name></proto>
+            <param group="PathGenMode"><ptype>GLenum</ptype> <name>genMode</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLenum</ptype> <name>glPathGlyphIndexArrayNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>firstPathName</name></param>
+            <param><ptype>GLenum</ptype> <name>fontTarget</name></param>
+            <param>const void *<name>fontName</name></param>
+            <param group="PathFontStyle"><ptype>GLbitfield</ptype> <name>fontStyle</name></param>
+            <param><ptype>GLuint</ptype> <name>firstGlyphIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>numGlyphs</name></param>
+            <param><ptype>GLuint</ptype> <name>pathParameterTemplate</name></param>
+            <param><ptype>GLfloat</ptype> <name>emScale</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLenum</ptype> <name>glPathGlyphIndexRangeNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>fontTarget</name></param>
+            <param>const void *<name>fontName</name></param>
+            <param group="PathFontStyle"><ptype>GLbitfield</ptype> <name>fontStyle</name></param>
+            <param><ptype>GLuint</ptype> <name>pathParameterTemplate</name></param>
+            <param><ptype>GLfloat</ptype> <name>emScale</name></param>
+            <param><ptype>GLuint</ptype> <name>baseAndCount</name>[2]</param>
+        </command>
+        <command>
+            <proto>void <name>glPathGlyphRangeNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>firstPathName</name></param>
+            <param group="PathFontTarget"><ptype>GLenum</ptype> <name>fontTarget</name></param>
+            <param len="COMPSIZE(fontTarget,fontName)">const void *<name>fontName</name></param>
+            <param group="PathFontStyle"><ptype>GLbitfield</ptype> <name>fontStyle</name></param>
+            <param><ptype>GLuint</ptype> <name>firstGlyph</name></param>
+            <param><ptype>GLsizei</ptype> <name>numGlyphs</name></param>
+            <param group="PathHandleMissingGlyphs"><ptype>GLenum</ptype> <name>handleMissingGlyphs</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathParameterTemplate</name></param>
+            <param><ptype>GLfloat</ptype> <name>emScale</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathGlyphsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>firstPathName</name></param>
+            <param group="PathFontTarget"><ptype>GLenum</ptype> <name>fontTarget</name></param>
+            <param len="COMPSIZE(fontTarget,fontName)">const void *<name>fontName</name></param>
+            <param group="PathFontStyle"><ptype>GLbitfield</ptype> <name>fontStyle</name></param>
+            <param><ptype>GLsizei</ptype> <name>numGlyphs</name></param>
+            <param group="PathElementType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(numGlyphs,type,charcodes)">const void *<name>charcodes</name></param>
+            <param group="PathHandleMissingGlyphs"><ptype>GLenum</ptype> <name>handleMissingGlyphs</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathParameterTemplate</name></param>
+            <param><ptype>GLfloat</ptype> <name>emScale</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLenum</ptype> <name>glPathMemoryGlyphIndexArrayNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>firstPathName</name></param>
+            <param><ptype>GLenum</ptype> <name>fontTarget</name></param>
+            <param><ptype>GLsizeiptr</ptype> <name>fontSize</name></param>
+            <param>const void *<name>fontData</name></param>
+            <param><ptype>GLsizei</ptype> <name>faceIndex</name></param>
+            <param><ptype>GLuint</ptype> <name>firstGlyphIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>numGlyphs</name></param>
+            <param><ptype>GLuint</ptype> <name>pathParameterTemplate</name></param>
+            <param><ptype>GLfloat</ptype> <name>emScale</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathParameterfNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathParameterfvNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathParameteriNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathParameterivNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathStencilDepthOffsetNV</name></proto>
+            <param><ptype>GLfloat</ptype> <name>factor</name></param>
+            <param><ptype>GLfloat</ptype> <name>units</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathStencilFuncNV</name></proto>
+            <param group="StencilFunction"><ptype>GLenum</ptype> <name>func</name></param>
+            <param group="ClampedStencilValue"><ptype>GLint</ptype> <name>ref</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathStringNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathStringFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="length">const void *<name>pathString</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathSubCommandsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>commandStart</name></param>
+            <param><ptype>GLsizei</ptype> <name>commandsToDelete</name></param>
+            <param><ptype>GLsizei</ptype> <name>numCommands</name></param>
+            <param group="PathCommand" len="numCommands">const <ptype>GLubyte</ptype> *<name>commands</name></param>
+            <param><ptype>GLsizei</ptype> <name>numCoords</name></param>
+            <param group="PathCoordType"><ptype>GLenum</ptype> <name>coordType</name></param>
+            <param len="COMPSIZE(numCoords,coordType)">const void *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathSubCoordsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>coordStart</name></param>
+            <param><ptype>GLsizei</ptype> <name>numCoords</name></param>
+            <param group="PathCoordType"><ptype>GLenum</ptype> <name>coordType</name></param>
+            <param len="COMPSIZE(numCoords,coordType)">const void *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPathTexGenNV</name></proto>
+            <param group="PathColor"><ptype>GLenum</ptype> <name>texCoordSet</name></param>
+            <param group="PathGenMode"><ptype>GLenum</ptype> <name>genMode</name></param>
+            <param><ptype>GLint</ptype> <name>components</name></param>
+            <param len="COMPSIZE(genMode,components)">const <ptype>GLfloat</ptype> *<name>coeffs</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPauseTransformFeedback</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glPauseTransformFeedbackNV</name></proto>
+            <alias name="glPauseTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelDataRangeNV</name></proto>
+            <param group="PixelDataRangeTargetNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="length">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelMapfv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param group="CheckedInt32"><ptype>GLsizei</ptype> <name>mapsize</name></param>
+            <param len="mapsize">const <ptype>GLfloat</ptype> *<name>values</name></param>
+            <glx type="render" opcode="168"/>
+            <glx type="render" opcode="323" name="glPixelMapfvPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelMapuiv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param group="CheckedInt32"><ptype>GLsizei</ptype> <name>mapsize</name></param>
+            <param len="mapsize">const <ptype>GLuint</ptype> *<name>values</name></param>
+            <glx type="render" opcode="169"/>
+            <glx type="render" opcode="324" name="glPixelMapuivPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelMapusv</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param group="CheckedInt32"><ptype>GLsizei</ptype> <name>mapsize</name></param>
+            <param len="mapsize">const <ptype>GLushort</ptype> *<name>values</name></param>
+            <glx type="render" opcode="170"/>
+            <glx type="render" opcode="325" name="glPixelMapusvPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelMapx</name></proto>
+            <param group="PixelMap"><ptype>GLenum</ptype> <name>map</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLfixed</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelStoref</name></proto>
+            <param group="PixelStoreParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="single" opcode="109"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelStorei</name></proto>
+            <param group="PixelStoreParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="single" opcode="110"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelStorex</name></proto>
+            <param group="PixelStoreParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelTexGenParameterfSGIS</name></proto>
+            <param group="PixelTexGenParameterNameSGIS"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelTexGenParameterfvSGIS</name></proto>
+            <param group="PixelTexGenParameterNameSGIS"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelTexGenParameteriSGIS</name></proto>
+            <param group="PixelTexGenParameterNameSGIS"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelTexGenParameterivSGIS</name></proto>
+            <param group="PixelTexGenParameterNameSGIS"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelTexGenSGIX</name></proto>
+            <param group="PixelTexGenModeSGIX"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="2059"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelTransferf</name></proto>
+            <param group="PixelTransferParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="166"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelTransferi</name></proto>
+            <param group="PixelTransferParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="167"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelTransferxOES</name></proto>
+            <param group="PixelTransferParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelTransformParameterfEXT</name></proto>
+            <param group="PixelTransformTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelTransformPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="16385"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelTransformParameterfvEXT</name></proto>
+            <param group="PixelTransformTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelTransformPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelTransformParameteriEXT</name></proto>
+            <param group="PixelTransformTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelTransformPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="16386"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelTransformParameterivEXT</name></proto>
+            <param group="PixelTransformTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="PixelTransformPNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="1">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPixelZoom</name></proto>
+            <param><ptype>GLfloat</ptype> <name>xfactor</name></param>
+            <param><ptype>GLfloat</ptype> <name>yfactor</name></param>
+            <glx type="render" opcode="165"/>
+        </command>
+        <command>
+            <proto>void <name>glPixelZoomxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>xfactor</name></param>
+            <param><ptype>GLfixed</ptype> <name>yfactor</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glPointAlongPathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLsizei</ptype> <name>startSegment</name></param>
+            <param><ptype>GLsizei</ptype> <name>numSegments</name></param>
+            <param><ptype>GLfloat</ptype> <name>distance</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>x</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>y</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>tangentX</name></param>
+            <param len="1"><ptype>GLfloat</ptype> *<name>tangentY</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterf</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="2065"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterfARB</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <alias name="glPointParameterf"/>
+            <glx type="render" opcode="2065"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterfEXT</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <alias name="glPointParameterf"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterfSGIS</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <alias name="glPointParameterf"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterfv</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="2066"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterfvARB</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glPointParameterfv"/>
+            <glx type="render" opcode="2066"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterfvEXT</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glPointParameterfv"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterfvSGIS</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <alias name="glPointParameterfv"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameteri</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="4221"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameteriNV</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+            <alias name="glPointParameteri"/>
+            <glx type="render" opcode="4221"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameteriv</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4222"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterivNV</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glPointParameteriv"/>
+            <glx type="render" opcode="4222"/>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterx</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterxOES</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterxv</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPointParameterxvOES</name></proto>
+            <param group="PointParameterNameARB"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPointSize</name></proto>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>size</name></param>
+            <glx type="render" opcode="100"/>
+        </command>
+        <command>
+            <proto>void <name>glPointSizePointerOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPointSizex</name></proto>
+            <param><ptype>GLfixed</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPointSizexOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glPollAsyncSGIX</name></proto>
+            <param len="1"><ptype>GLuint</ptype> *<name>markerp</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glPollInstrumentsSGIX</name></proto>
+            <param len="1"><ptype>GLint</ptype> *<name>marker_p</name></param>
+            <glx type="vendor" opcode="4104"/>
+        </command>
+        <command>
+            <proto>void <name>glPolygonMode</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="PolygonMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="101"/>
+        </command>
+        <command>
+            <proto>void <name>glPolygonModeNV</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="PolygonMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glPolygonMode"/>
+        </command>
+        <command>
+            <proto>void <name>glPolygonOffset</name></proto>
+            <param><ptype>GLfloat</ptype> <name>factor</name></param>
+            <param><ptype>GLfloat</ptype> <name>units</name></param>
+            <glx type="render" opcode="192"/>
+        </command>
+        <command>
+            <proto>void <name>glPolygonOffsetClamp</name></proto>
+            <param><ptype>GLfloat</ptype> <name>factor</name></param>
+            <param><ptype>GLfloat</ptype> <name>units</name></param>
+            <param><ptype>GLfloat</ptype> <name>clamp</name></param>
+            <glx type="render" opcode="4225"/>
+        </command>
+        <command>
+            <proto>void <name>glPolygonOffsetClampEXT</name></proto>
+            <param><ptype>GLfloat</ptype> <name>factor</name></param>
+            <param><ptype>GLfloat</ptype> <name>units</name></param>
+            <param><ptype>GLfloat</ptype> <name>clamp</name></param>
+            <alias name="glPolygonOffsetClamp"/>
+        </command>
+        <command>
+            <proto>void <name>glPolygonOffsetEXT</name></proto>
+            <param><ptype>GLfloat</ptype> <name>factor</name></param>
+            <param><ptype>GLfloat</ptype> <name>bias</name></param>
+            <glx type="render" opcode="4098"/>
+        </command>
+        <command>
+            <proto>void <name>glPolygonOffsetx</name></proto>
+            <param><ptype>GLfixed</ptype> <name>factor</name></param>
+            <param><ptype>GLfixed</ptype> <name>units</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPolygonOffsetxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>factor</name></param>
+            <param><ptype>GLfixed</ptype> <name>units</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPolygonStipple</name></proto>
+            <param len="COMPSIZE()">const <ptype>GLubyte</ptype> *<name>mask</name></param>
+            <glx type="render" opcode="102"/>
+            <glx type="render" opcode="326" name="glPolygonStipplePBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glPopAttrib</name></proto>
+            <glx type="render" opcode="141"/>
+        </command>
+        <command>
+            <proto>void <name>glPopClientAttrib</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glPopDebugGroup</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glPopDebugGroupKHR</name></proto>
+            <alias name="glPopDebugGroup"/>
+        </command>
+        <command>
+            <proto>void <name>glPopGroupMarkerEXT</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glPopMatrix</name></proto>
+            <glx type="render" opcode="183"/>
+        </command>
+        <command>
+            <proto>void <name>glPopName</name></proto>
+            <glx type="render" opcode="124"/>
+        </command>
+        <command>
+            <proto>void <name>glPresentFrameDualFillNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_slot</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>minPresentTime</name></param>
+            <param><ptype>GLuint</ptype> <name>beginPresentTimeId</name></param>
+            <param><ptype>GLuint</ptype> <name>presentDurationId</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLenum</ptype> <name>target0</name></param>
+            <param><ptype>GLuint</ptype> <name>fill0</name></param>
+            <param><ptype>GLenum</ptype> <name>target1</name></param>
+            <param><ptype>GLuint</ptype> <name>fill1</name></param>
+            <param><ptype>GLenum</ptype> <name>target2</name></param>
+            <param><ptype>GLuint</ptype> <name>fill2</name></param>
+            <param><ptype>GLenum</ptype> <name>target3</name></param>
+            <param><ptype>GLuint</ptype> <name>fill3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPresentFrameKeyedNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_slot</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>minPresentTime</name></param>
+            <param><ptype>GLuint</ptype> <name>beginPresentTimeId</name></param>
+            <param><ptype>GLuint</ptype> <name>presentDurationId</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLenum</ptype> <name>target0</name></param>
+            <param><ptype>GLuint</ptype> <name>fill0</name></param>
+            <param><ptype>GLuint</ptype> <name>key0</name></param>
+            <param><ptype>GLenum</ptype> <name>target1</name></param>
+            <param><ptype>GLuint</ptype> <name>fill1</name></param>
+            <param><ptype>GLuint</ptype> <name>key1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPrimitiveBoundingBox</name></proto>
+            <param><ptype>GLfloat</ptype> <name>minX</name></param>
+            <param><ptype>GLfloat</ptype> <name>minY</name></param>
+            <param><ptype>GLfloat</ptype> <name>minZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>minW</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxX</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxY</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxW</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPrimitiveBoundingBoxARB</name></proto>
+            <param><ptype>GLfloat</ptype> <name>minX</name></param>
+            <param><ptype>GLfloat</ptype> <name>minY</name></param>
+            <param><ptype>GLfloat</ptype> <name>minZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>minW</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxX</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxY</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxW</name></param>
+            <alias name="glPrimitiveBoundingBox"/>
+        </command>
+        <command>
+            <proto>void <name>glPrimitiveBoundingBoxEXT</name></proto>
+            <param><ptype>GLfloat</ptype> <name>minX</name></param>
+            <param><ptype>GLfloat</ptype> <name>minY</name></param>
+            <param><ptype>GLfloat</ptype> <name>minZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>minW</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxX</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxY</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxW</name></param>
+            <alias name="glPrimitiveBoundingBox"/>
+        </command>
+        <command>
+            <proto>void <name>glPrimitiveBoundingBoxOES</name></proto>
+            <param><ptype>GLfloat</ptype> <name>minX</name></param>
+            <param><ptype>GLfloat</ptype> <name>minY</name></param>
+            <param><ptype>GLfloat</ptype> <name>minZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>minW</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxX</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxY</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxZ</name></param>
+            <param><ptype>GLfloat</ptype> <name>maxW</name></param>
+            <alias name="glPrimitiveBoundingBox"/>
+        </command>
+        <command>
+            <proto>void <name>glPrimitiveRestartIndex</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPrimitiveRestartIndexNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <glx type="render" opcode="365"/>
+        </command>
+        <command>
+            <proto>void <name>glPrimitiveRestartNV</name></proto>
+            <glx type="render" opcode="364"/>
+        </command>
+        <command>
+            <proto>void <name>glPrioritizeTextures</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <param len="n">const <ptype>GLfloat</ptype> *<name>priorities</name></param>
+            <glx type="render" opcode="4118"/>
+        </command>
+        <command>
+            <proto>void <name>glPrioritizeTexturesEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Texture" len="n">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <param group="ClampedFloat32" len="n">const <ptype>GLclampf</ptype> *<name>priorities</name></param>
+            <alias name="glPrioritizeTextures"/>
+            <glx type="render" opcode="4118"/>
+        </command>
+        <command>
+            <proto>void <name>glPrioritizeTexturesxOES</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <param group="ClampedFixed" len="n">const <ptype>GLfixed</ptype> *<name>priorities</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramBinary</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLenum</ptype> <name>binaryFormat</name></param>
+            <param len="length">const void *<name>binary</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramBinaryOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLenum</ptype> <name>binaryFormat</name></param>
+            <param len="length">const void *<name>binary</name></param>
+            <param><ptype>GLint</ptype> <name>length</name></param>
+            <alias name="glProgramBinary"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramBufferParametersIivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingIndex</name></param>
+            <param><ptype>GLuint</ptype> <name>wordIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramBufferParametersIuivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingIndex</name></param>
+            <param><ptype>GLuint</ptype> <name>wordIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramBufferParametersfvNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingIndex</name></param>
+            <param><ptype>GLuint</ptype> <name>wordIndex</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameter4dARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glProgramEnvParameter4dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameter4dvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameter4fARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glProgramEnvParameter4fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameter4fvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameterI4iNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <param><ptype>GLint</ptype> <name>w</name></param>
+            <vecequiv name="glProgramEnvParameterI4ivNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameterI4ivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameterI4uiNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>z</name></param>
+            <param><ptype>GLuint</ptype> <name>w</name></param>
+            <vecequiv name="glProgramEnvParameterI4uivNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameterI4uivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParameters4fvEXT</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4281"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParametersI4ivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramEnvParametersI4uivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameter4dARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glProgramLocalParameter4dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameter4dvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameter4fARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glProgramLocalParameter4fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameter4fvARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameterI4iNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <param><ptype>GLint</ptype> <name>w</name></param>
+            <vecequiv name="glProgramLocalParameterI4ivNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameterI4ivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameterI4uiNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>z</name></param>
+            <param><ptype>GLuint</ptype> <name>w</name></param>
+            <vecequiv name="glProgramLocalParameterI4uivNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameterI4uivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParameters4fvEXT</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="4282"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParametersI4ivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramLocalParametersI4uivNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramNamedParameter4dNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="1">const <ptype>GLubyte</ptype> *<name>name</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glProgramNamedParameter4dvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramNamedParameter4dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="1">const <ptype>GLubyte</ptype> *<name>name</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4219"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramNamedParameter4fNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="1">const <ptype>GLubyte</ptype> *<name>name</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glProgramNamedParameter4fvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramNamedParameter4fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="1">const <ptype>GLubyte</ptype> *<name>name</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4218"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameter4dNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glProgramParameter4dvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameter4dvNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4185"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameter4fNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glProgramParameter4fvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameter4fvNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4184"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameteri</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramParameterPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameteriARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramParameterPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+            <alias name="glProgramParameteri"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameteriEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param group="ProgramParameterPName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>value</name></param>
+            <alias name="glProgramParameteri"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameters4dvNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4187"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramParameters4fvNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4186"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramPathFragmentInputGenNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLenum</ptype> <name>genMode</name></param>
+            <param><ptype>GLint</ptype> <name>components</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>coeffs</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramStringARB</name></proto>
+            <param group="ProgramTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="ProgramFormatARB"><ptype>GLenum</ptype> <name>format</name></param>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="len">const void *<name>string</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramSubroutineParametersuivNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1d</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>v0</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1f</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1fEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <alias name="glProgramUniform1f"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform1fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1i</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1i64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1i64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <alias name="glProgramUniform1i"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform1iv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1ui64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1ui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <alias name="glProgramUniform1ui"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform1uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform1uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2d</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>v0</name></param>
+            <param><ptype>GLdouble</ptype> <name>v1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2f</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2fEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <alias name="glProgramUniform2f"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2i</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2i64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+            <param><ptype>GLint64</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2i64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <alias name="glProgramUniform2i"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2ui64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+            <param><ptype>GLuint64</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2ui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <alias name="glProgramUniform2ui"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform2uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform2uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3d</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>v0</name></param>
+            <param><ptype>GLdouble</ptype> <name>v1</name></param>
+            <param><ptype>GLdouble</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3f</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3fEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+            <alias name="glProgramUniform3f"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3i</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3i64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+            <param><ptype>GLint64</ptype> <name>y</name></param>
+            <param><ptype>GLint64</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3i64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+            <alias name="glProgramUniform3i"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3ui64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+            <param><ptype>GLuint64</ptype> <name>y</name></param>
+            <param><ptype>GLuint64</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3ui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+            <alias name="glProgramUniform3ui"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform3uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform3uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4d</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>v0</name></param>
+            <param><ptype>GLdouble</ptype> <name>v1</name></param>
+            <param><ptype>GLdouble</ptype> <name>v2</name></param>
+            <param><ptype>GLdouble</ptype> <name>v3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4f</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+            <param><ptype>GLfloat</ptype> <name>v3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4fEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+            <param><ptype>GLfloat</ptype> <name>v3</name></param>
+            <alias name="glProgramUniform4f"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4i</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>v3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4i64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+            <param><ptype>GLint64</ptype> <name>y</name></param>
+            <param><ptype>GLint64</ptype> <name>z</name></param>
+            <param><ptype>GLint64</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>z</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4i64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>v3</name></param>
+            <alias name="glProgramUniform4i"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+            <param><ptype>GLuint</ptype> <name>v3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4ui64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+            <param><ptype>GLuint64</ptype> <name>y</name></param>
+            <param><ptype>GLuint64</ptype> <name>z</name></param>
+            <param><ptype>GLuint64</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>z</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4ui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+            <param><ptype>GLuint</ptype> <name>v3</name></param>
+            <alias name="glProgramUniform4ui"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniform4uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glProgramUniform4uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformHandleui64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformHandleui64IMG</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>value</name></param>
+            <alias name="glProgramUniformHandleui64ARB"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformHandleui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformHandleui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformHandleui64vIMG</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64</ptype> *<name>values</name></param>
+            <alias name="glProgramUniformHandleui64vARB"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformHandleui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x3dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x3dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x3fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x3fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix2x3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x4dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x4dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x4fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix2x4fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix2x4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*9">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*9">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*9">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*9">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x2dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x2dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x2fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x2fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix3x2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x4dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x4dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x4fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix3x4fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix3x4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*16">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*16">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*16">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*16">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x2dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x2dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x2fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x2fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix4x2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x3dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x3dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x3fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformMatrix4x3fvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glProgramUniformMatrix4x3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramUniformui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProgramVertexLimitNV</name></proto>
+            <param group="ProgramTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>limit</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProvokingVertex</name></proto>
+            <param group="VertexProvokingMode"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glProvokingVertexEXT</name></proto>
+            <param group="VertexProvokingMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <alias name="glProvokingVertex"/>
+        </command>
+        <command>
+            <proto>void <name>glPushAttrib</name></proto>
+            <param group="AttribMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+            <glx type="render" opcode="142"/>
+        </command>
+        <command>
+            <proto>void <name>glPushClientAttrib</name></proto>
+            <param group="ClientAttribMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPushClientAttribDefaultEXT</name></proto>
+            <param group="ClientAttribMask"><ptype>GLbitfield</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPushDebugGroup</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="COMPSIZE(message,length)">const <ptype>GLchar</ptype> *<name>message</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPushDebugGroupKHR</name></proto>
+            <param group="DebugSource"><ptype>GLenum</ptype> <name>source</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>message</name></param>
+            <alias name="glPushDebugGroup"/>
+        </command>
+        <command>
+            <proto>void <name>glPushGroupMarkerEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>marker</name></param>
+        </command>
+        <command>
+            <proto>void <name>glPushMatrix</name></proto>
+            <glx type="render" opcode="184"/>
+        </command>
+        <command>
+            <proto>void <name>glPushName</name></proto>
+            <param group="SelectName"><ptype>GLuint</ptype> <name>name</name></param>
+            <glx type="render" opcode="125"/>
+        </command>
+        <command>
+            <proto>void <name>glQueryCounter</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryCounterTarget"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto>void <name>glQueryCounterEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="QueryCounterTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glQueryCounter"/>
+        </command>
+        <command>
+            <proto><ptype>GLbitfield</ptype> <name>glQueryMatrixxOES</name></proto>
+            <param len="16"><ptype>GLfixed</ptype> *<name>mantissa</name></param>
+            <param len="16"><ptype>GLint</ptype> *<name>exponent</name></param>
+        </command>
+        <command>
+            <proto>void <name>glQueryObjectParameteruiAMD</name></proto>
+            <param group="QueryTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="OcclusionQueryEventMaskAMD"><ptype>GLuint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glQueryResourceNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>queryType</name></param>
+            <param><ptype>GLint</ptype> <name>tagId</name></param>
+            <param><ptype>GLuint</ptype> <name>bufSize</name></param>
+            <param><ptype>GLint</ptype> *<name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glQueryResourceTagNV</name></proto>
+            <param><ptype>GLint</ptype> <name>tagId</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>tagString</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <vecequiv name="glRasterPos2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2dv</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="33"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <vecequiv name="glRasterPos2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2fv</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="34"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <vecequiv name="glRasterPos2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2iv</name></proto>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="35"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <vecequiv name="glRasterPos2sv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2sv</name></proto>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="36"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos2xvOES</name></proto>
+            <param len="2">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <vecequiv name="glRasterPos3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3dv</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="37"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <vecequiv name="glRasterPos3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3fv</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="38"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <vecequiv name="glRasterPos3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3iv</name></proto>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="39"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <vecequiv name="glRasterPos3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3sv</name></proto>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="40"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos3xvOES</name></proto>
+            <param len="3">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glRasterPos4dv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4dv</name></proto>
+            <param group="CoordD" len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="41"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glRasterPos4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4fv</name></proto>
+            <param group="CoordF" len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="42"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>w</name></param>
+            <vecequiv name="glRasterPos4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4iv</name></proto>
+            <param group="CoordI" len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="43"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>w</name></param>
+            <vecequiv name="glRasterPos4sv"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4sv</name></proto>
+            <param group="CoordS" len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="44"/>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+            <param><ptype>GLfixed</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRasterPos4xvOES</name></proto>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRasterSamplesEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>samples</name></param>
+            <param><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReadBuffer</name></proto>
+            <param group="ReadBufferMode"><ptype>GLenum</ptype> <name>src</name></param>
+            <glx type="render" opcode="171"/>
+        </command>
+        <command>
+            <proto>void <name>glReadBufferIndexedEXT</name></proto>
+            <param group="ReadBufferMode"><ptype>GLenum</ptype> <name>src</name></param>
+            <param><ptype>GLint</ptype> <name>index</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReadBufferNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReadInstrumentsSGIX</name></proto>
+            <param><ptype>GLint</ptype> <name>marker</name></param>
+            <glx type="render" opcode="2077"/>
+        </command>
+        <command>
+            <proto>void <name>glReadPixels</name></proto>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">void *<name>pixels</name></param>
+            <glx type="single" opcode="111"/>
+            <glx type="render" opcode="345" name="glReadPixelsPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glReadnPixels</name></proto>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>data</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReadnPixelsARB</name></proto>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>data</name></param>
+            <alias name="glReadnPixels"/>
+        </command>
+        <command>
+            <proto>void <name>glReadnPixelsEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>data</name></param>
+            <alias name="glReadnPixels"/>
+        </command>
+        <command>
+            <proto>void <name>glReadnPixelsKHR</name></proto>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param len="bufSize">void *<name>data</name></param>
+            <alias name="glReadnPixels"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glReleaseKeyedMutexWin32EXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>key</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRectd</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y1</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x2</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y2</name></param>
+            <vecequiv name="glRectdv"/>
+        </command>
+        <command>
+            <proto>void <name>glRectdv</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v1</name></param>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v2</name></param>
+            <glx type="render" opcode="45"/>
+        </command>
+        <command>
+            <proto>void <name>glRectf</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y1</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x2</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y2</name></param>
+            <vecequiv name="glRectfv"/>
+        </command>
+        <command>
+            <proto>void <name>glRectfv</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v1</name></param>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v2</name></param>
+            <glx type="render" opcode="46"/>
+        </command>
+        <command>
+            <proto>void <name>glRecti</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x1</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y1</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x2</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y2</name></param>
+            <vecequiv name="glRectiv"/>
+        </command>
+        <command>
+            <proto>void <name>glRectiv</name></proto>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v1</name></param>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v2</name></param>
+            <glx type="render" opcode="47"/>
+        </command>
+        <command>
+            <proto>void <name>glRects</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x1</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y1</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x2</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y2</name></param>
+            <vecequiv name="glRectsv"/>
+        </command>
+        <command>
+            <proto>void <name>glRectsv</name></proto>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v1</name></param>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v2</name></param>
+            <glx type="render" opcode="48"/>
+        </command>
+        <command>
+            <proto>void <name>glRectxOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x1</name></param>
+            <param><ptype>GLfixed</ptype> <name>y1</name></param>
+            <param><ptype>GLfixed</ptype> <name>x2</name></param>
+            <param><ptype>GLfixed</ptype> <name>y2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRectxvOES</name></proto>
+            <param len="2">const <ptype>GLfixed</ptype> *<name>v1</name></param>
+            <param len="2">const <ptype>GLfixed</ptype> *<name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReferencePlaneSGIX</name></proto>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>equation</name></param>
+            <glx type="render" opcode="2071"/>
+        </command>
+        <command>
+            <proto>void <name>glReleaseShaderCompiler</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glRenderGpuMaskNV</name></proto>
+            <param><ptype>GLbitfield</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLint</ptype> <name>glRenderMode</name></proto>
+            <param group="RenderingMode"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="single" opcode="107"/>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorage</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <glx type="render" opcode="4318"/>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageEXT</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glRenderbufferStorage"/>
+            <glx type="render" opcode="4318"/>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisample</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <glx type="render" opcode="4331"/>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisampleANGLE</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisampleAPPLE</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisampleAdvancedAMD</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLsizei</ptype> <name>storageSamples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisampleCoverageNV</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>coverageSamples</name></param>
+            <param><ptype>GLsizei</ptype> <name>colorSamples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisampleEXT</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glRenderbufferStorageMultisample"/>
+            <glx type="render" opcode="4331"/>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisampleIMG</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageMultisampleNV</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glRenderbufferStorageMultisample"/>
+        </command>
+        <command>
+            <proto>void <name>glRenderbufferStorageOES</name></proto>
+            <param group="RenderbufferTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodePointerSUN</name></proto>
+            <param group="ReplacementCodeTypeSUN"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void **<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeubSUN</name></proto>
+            <param><ptype>GLubyte</ptype> <name>code</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeubvSUN</name></proto>
+            <param len="COMPSIZE()">const <ptype>GLubyte</ptype> *<name>code</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiColor3fVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiColor3fVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiColor4fNormal3fVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiColor4fNormal3fVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiColor4ubVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLubyte</ptype> <name>r</name></param>
+            <param><ptype>GLubyte</ptype> <name>g</name></param>
+            <param><ptype>GLubyte</ptype> <name>b</name></param>
+            <param><ptype>GLubyte</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiColor4ubVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiNormal3fVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiNormal3fVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiSUN</name></proto>
+            <param><ptype>GLuint</ptype> <name>code</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiTexCoord2fVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiTexCoord2fVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiVertex3fSUN</name></proto>
+            <param group="ReplacementCodeSUN"><ptype>GLuint</ptype> <name>rc</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuiVertex3fvSUN</name></proto>
+            <param group="ReplacementCodeSUN" len="1">const <ptype>GLuint</ptype> *<name>rc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeuivSUN</name></proto>
+            <param len="COMPSIZE()">const <ptype>GLuint</ptype> *<name>code</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeusSUN</name></proto>
+            <param><ptype>GLushort</ptype> <name>code</name></param>
+        </command>
+        <command>
+            <proto>void <name>glReplacementCodeusvSUN</name></proto>
+            <param len="COMPSIZE()">const <ptype>GLushort</ptype> *<name>code</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRequestResidentProgramsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLuint</ptype> *<name>programs</name></param>
+            <glx type="render" opcode="4182"/>
+        </command>
+        <command>
+            <proto>void <name>glResetHistogram</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <glx type="render" opcode="4112"/>
+        </command>
+        <command>
+            <proto>void <name>glResetHistogramEXT</name></proto>
+            <param group="HistogramTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glResetHistogram"/>
+            <glx type="render" opcode="4112"/>
+        </command>
+        <command>
+            <proto>void <name>glResetMemoryObjectParameterNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+        </command>
+        <command>
+            <proto>void <name>glResetMinmax</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <glx type="render" opcode="4113"/>
+        </command>
+        <command>
+            <proto>void <name>glResetMinmaxEXT</name></proto>
+            <param group="MinmaxTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glResetMinmax"/>
+            <glx type="render" opcode="4113"/>
+        </command>
+        <command>
+            <proto>void <name>glResizeBuffersMESA</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glResolveDepthValuesNV</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glResolveMultisampleFramebufferAPPLE</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glResumeTransformFeedback</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glResumeTransformFeedbackNV</name></proto>
+            <alias name="glResumeTransformFeedback"/>
+        </command>
+        <command>
+            <proto>void <name>glRotated</name></proto>
+            <param><ptype>GLdouble</ptype> <name>angle</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <glx type="render" opcode="185"/>
+        </command>
+        <command>
+            <proto>void <name>glRotatef</name></proto>
+            <param><ptype>GLfloat</ptype> <name>angle</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <glx type="render" opcode="186"/>
+        </command>
+        <command>
+            <proto>void <name>glRotatex</name></proto>
+            <param><ptype>GLfixed</ptype> <name>angle</name></param>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glRotatexOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>angle</name></param>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSampleCoverage</name></proto>
+            <param><ptype>GLfloat</ptype> <name>value</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>invert</name></param>
+            <glx type="render" opcode="229"/>
+        </command>
+        <command>
+            <proto>void <name>glSampleCoverageARB</name></proto>
+            <param><ptype>GLfloat</ptype> <name>value</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>invert</name></param>
+            <alias name="glSampleCoverage"/>
+        </command>
+        <command>
+            <proto>void <name>glSampleCoveragex</name></proto>
+            <param><ptype>GLclampx</ptype> <name>value</name></param>
+            <param><ptype>GLboolean</ptype> <name>invert</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSampleCoveragexOES</name></proto>
+            <param><ptype>GLclampx</ptype> <name>value</name></param>
+            <param><ptype>GLboolean</ptype> <name>invert</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSampleMapATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param><ptype>GLuint</ptype> <name>interp</name></param>
+            <param group="SwizzleOpATI"><ptype>GLenum</ptype> <name>swizzle</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSampleMaskEXT</name></proto>
+            <param group="ClampedFloat32"><ptype>GLclampf</ptype> <name>value</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>invert</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSampleMaskIndexedNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="SampleMaskNV"><ptype>GLbitfield</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSampleMaskSGIS</name></proto>
+            <param group="ClampedFloat32"><ptype>GLclampf</ptype> <name>value</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>invert</name></param>
+            <alias name="glSampleMaskEXT"/>
+            <glx type="render" opcode="2048"/>
+        </command>
+        <command>
+            <proto>void <name>glSampleMaski</name></proto>
+            <param><ptype>GLuint</ptype> <name>maskNumber</name></param>
+            <param><ptype>GLbitfield</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSamplePatternEXT</name></proto>
+            <param group="SamplePatternEXT"><ptype>GLenum</ptype> <name>pattern</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSamplePatternSGIS</name></proto>
+            <param group="SamplePatternSGIS"><ptype>GLenum</ptype> <name>pattern</name></param>
+            <alias name="glSamplePatternEXT"/>
+            <glx type="render" opcode="2049"/>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterIiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterIivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>param</name></param>
+            <alias name="glSamplerParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterIivOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>param</name></param>
+            <alias name="glSamplerParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterIuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterIuivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>param</name></param>
+            <alias name="glSamplerParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterIuivOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>param</name></param>
+            <alias name="glSamplerParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterf</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameterfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameteri</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSamplerParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param group="SamplerParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScaled</name></proto>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <glx type="render" opcode="187"/>
+        </command>
+        <command>
+            <proto>void <name>glScalef</name></proto>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <glx type="render" opcode="188"/>
+        </command>
+        <command>
+            <proto>void <name>glScalex</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScalexOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScissor</name></proto>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <glx type="render" opcode="103"/>
+        </command>
+        <command>
+            <proto>void <name>glScissorArrayv</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScissorArrayvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glScissorArrayv"/>
+        </command>
+        <command>
+            <proto>void <name>glScissorArrayvOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glScissorArrayv"/>
+        </command>
+        <command>
+            <proto>void <name>glScissorExclusiveArrayvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScissorExclusiveNV</name></proto>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScissorIndexed</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>left</name></param>
+            <param><ptype>GLint</ptype> <name>bottom</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScissorIndexedNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>left</name></param>
+            <param><ptype>GLint</ptype> <name>bottom</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glScissorIndexed"/>
+        </command>
+        <command>
+            <proto>void <name>glScissorIndexedOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>left</name></param>
+            <param><ptype>GLint</ptype> <name>bottom</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glScissorIndexed"/>
+        </command>
+        <command>
+            <proto>void <name>glScissorIndexedv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glScissorIndexedvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glScissorIndexedv"/>
+        </command>
+        <command>
+            <proto>void <name>glScissorIndexedvOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glScissorIndexedv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3b</name></proto>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>red</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>green</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3bv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3bEXT</name></proto>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>red</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>green</name></param>
+            <param group="ColorB"><ptype>GLbyte</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3b"/>
+            <vecequiv name="glSecondaryColor3bvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3bv</name></proto>
+            <param group="ColorB" len="3">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4126"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3bvEXT</name></proto>
+            <param group="ColorB" len="3">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3bv"/>
+            <glx type="render" opcode="4126"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3d</name></proto>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>red</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>green</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3dEXT</name></proto>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>red</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>green</name></param>
+            <param group="ColorD"><ptype>GLdouble</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3d"/>
+            <vecequiv name="glSecondaryColor3dvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3dv</name></proto>
+            <param group="ColorD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4130"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3dvEXT</name></proto>
+            <param group="ColorD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3dv"/>
+            <glx type="render" opcode="4130"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3f</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>red</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>green</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3fEXT</name></proto>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>red</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>green</name></param>
+            <param group="ColorF"><ptype>GLfloat</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3f"/>
+            <vecequiv name="glSecondaryColor3fvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3fv</name></proto>
+            <param group="ColorF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4129"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3fvEXT</name></proto>
+            <param group="ColorF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3fv"/>
+            <glx type="render" opcode="4129"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>red</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>green</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3hvNV</name></proto>
+            <param group="Half16NV" len="3">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4255"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3i</name></proto>
+            <param group="ColorI"><ptype>GLint</ptype> <name>red</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>green</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3iEXT</name></proto>
+            <param group="ColorI"><ptype>GLint</ptype> <name>red</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>green</name></param>
+            <param group="ColorI"><ptype>GLint</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3i"/>
+            <vecequiv name="glSecondaryColor3ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3iv</name></proto>
+            <param group="ColorI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4128"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3ivEXT</name></proto>
+            <param group="ColorI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3iv"/>
+            <glx type="render" opcode="4128"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3s</name></proto>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>red</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>green</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3sEXT</name></proto>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>red</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>green</name></param>
+            <param group="ColorS"><ptype>GLshort</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3s"/>
+            <vecequiv name="glSecondaryColor3svEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3sv</name></proto>
+            <param group="ColorS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4127"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3svEXT</name></proto>
+            <param group="ColorS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3sv"/>
+            <glx type="render" opcode="4127"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3ub</name></proto>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>red</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>green</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3ubv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3ubEXT</name></proto>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>red</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>green</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3ub"/>
+            <vecequiv name="glSecondaryColor3ubvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3ubv</name></proto>
+            <param group="ColorUB" len="3">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4131"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3ubvEXT</name></proto>
+            <param group="ColorUB" len="3">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3ubv"/>
+            <glx type="render" opcode="4131"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3ui</name></proto>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>red</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>green</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3uiEXT</name></proto>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>red</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>green</name></param>
+            <param group="ColorUI"><ptype>GLuint</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3ui"/>
+            <vecequiv name="glSecondaryColor3uivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3uiv</name></proto>
+            <param group="ColorUI" len="3">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4133"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3uivEXT</name></proto>
+            <param group="ColorUI" len="3">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3uiv"/>
+            <glx type="render" opcode="4133"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3us</name></proto>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>red</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>green</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>blue</name></param>
+            <vecequiv name="glSecondaryColor3usv"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3usEXT</name></proto>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>red</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>green</name></param>
+            <param group="ColorUS"><ptype>GLushort</ptype> <name>blue</name></param>
+            <alias name="glSecondaryColor3us"/>
+            <vecequiv name="glSecondaryColor3usvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3usv</name></proto>
+            <param group="ColorUS" len="3">const <ptype>GLushort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4132"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColor3usvEXT</name></proto>
+            <param group="ColorUS" len="3">const <ptype>GLushort</ptype> *<name>v</name></param>
+            <alias name="glSecondaryColor3usv"/>
+            <glx type="render" opcode="4132"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColorFormatNV</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColorP3ui</name></proto>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>color</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColorP3uiv</name></proto>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>color</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColorPointer</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColorPointerEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+            <alias name="glSecondaryColorPointer"/>
+        </command>
+        <command>
+            <proto>void <name>glSecondaryColorPointerListIBM</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="SecondaryColorPointerTypeIBM"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSelectBuffer</name></proto>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param group="SelectName" len="size"><ptype>GLuint</ptype> *<name>buffer</name></param>
+            <glx type="single" opcode="106"/>
+        </command>
+        <command>
+            <proto>void <name>glSelectPerfMonitorCountersAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>monitor</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>enable</name></param>
+            <param><ptype>GLuint</ptype> <name>group</name></param>
+            <param><ptype>GLint</ptype> <name>numCounters</name></param>
+            <param len="numCounters"><ptype>GLuint</ptype> *<name>counterList</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSemaphoreParameterui64vEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+            <param group="SemaphoreParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>const <ptype>GLuint64</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSeparableFilter2D</name></proto>
+            <param group="SeparableTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type,width)">const void *<name>row</name></param>
+            <param len="COMPSIZE(target,format,type,height)">const void *<name>column</name></param>
+            <glx type="render" opcode="4109"/>
+            <glx type="render" opcode="327" name="glSeparableFilter2DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glSeparableFilter2DEXT</name></proto>
+            <param group="SeparableTargetEXT"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(target,format,type,width)">const void *<name>row</name></param>
+            <param len="COMPSIZE(target,format,type,height)">const void *<name>column</name></param>
+            <alias name="glSeparableFilter2D"/>
+            <glx type="render" opcode="4109"/>
+        </command>
+        <command>
+            <proto>void <name>glSetFenceAPPLE</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSetFenceNV</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+            <param group="FenceConditionNV"><ptype>GLenum</ptype> <name>condition</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSetFragmentShaderConstantATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>dst</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSetInvariantEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ScalarType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(id,type)">const void *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSetLocalConstantEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ScalarType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(id,type)">const void *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSetMultisamplefvAMD</name></proto>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>val</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShadeModel</name></proto>
+            <param group="ShadingModel"><ptype>GLenum</ptype> <name>mode</name></param>
+            <glx type="render" opcode="104"/>
+        </command>
+        <command>
+            <proto>void <name>glShaderBinary</name></proto>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>shaders</name></param>
+            <param><ptype>GLenum</ptype> <name>binaryformat</name></param>
+            <param len="length">const void *<name>binary</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShaderOp1EXT</name></proto>
+            <param group="VertexShaderOpEXT"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>res</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShaderOp2EXT</name></proto>
+            <param group="VertexShaderOpEXT"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>res</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShaderOp3EXT</name></proto>
+            <param group="VertexShaderOpEXT"><ptype>GLenum</ptype> <name>op</name></param>
+            <param><ptype>GLuint</ptype> <name>res</name></param>
+            <param><ptype>GLuint</ptype> <name>arg1</name></param>
+            <param><ptype>GLuint</ptype> <name>arg2</name></param>
+            <param><ptype>GLuint</ptype> <name>arg3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShaderSource</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLchar</ptype> *const*<name>string</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>length</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShaderSourceARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>shaderObj</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLcharARB</ptype> **<name>string</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>length</name></param>
+            <alias name="glShaderSource"/>
+        </command>
+        <command>
+            <proto>void <name>glShaderStorageBlockBinding</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>storageBlockIndex</name></param>
+            <param><ptype>GLuint</ptype> <name>storageBlockBinding</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShadingRateImageBarrierNV</name></proto>
+            <param><ptype>GLboolean</ptype> <name>synchronize</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShadingRateImagePaletteNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>viewport</name></param>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLenum</ptype> *<name>rates</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShadingRateSampleOrderNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>order</name></param>
+        </command>
+        <command>
+            <proto>void <name>glShadingRateSampleOrderCustomNV</name></proto>
+            <param><ptype>GLenum</ptype> <name>rate</name></param>
+            <param><ptype>GLuint</ptype> <name>samples</name></param>
+            <param len="COMPSIZE(rate,samples)">const <ptype>GLint</ptype> *<name>locations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSharpenTexFuncSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n*2">const <ptype>GLfloat</ptype> *<name>points</name></param>
+            <glx type="render" opcode="2052"/>
+        </command>
+        <command>
+            <proto>void <name>glSignalSemaphoreEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+            <param><ptype>GLuint</ptype> <name>numBufferBarriers</name></param>
+            <param len="COMPSIZE(numBufferBarriers)">const <ptype>GLuint</ptype> *<name>buffers</name></param>
+            <param><ptype>GLuint</ptype> <name>numTextureBarriers</name></param>
+            <param len="COMPSIZE(numTextureBarriers)">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <param group="TextureLayout" len="COMPSIZE(numTextureBarriers)">const <ptype>GLenum</ptype> *<name>dstLayouts</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSpecializeShader</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>pEntryPoint</name></param>
+            <param><ptype>GLuint</ptype> <name>numSpecializationConstants</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>pConstantIndex</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>pConstantValue</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSpecializeShaderARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>shader</name></param>
+            <param>const <ptype>GLchar</ptype> *<name>pEntryPoint</name></param>
+            <param><ptype>GLuint</ptype> <name>numSpecializationConstants</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>pConstantIndex</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>pConstantValue</name></param>
+            <alias name="glSpecializeShader"/>
+        </command>
+        <command>
+            <proto>void <name>glSpriteParameterfSGIX</name></proto>
+            <param group="SpriteParameterNameSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="2060"/>
+        </command>
+        <command>
+            <proto>void <name>glSpriteParameterfvSGIX</name></proto>
+            <param group="SpriteParameterNameSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="2061"/>
+        </command>
+        <command>
+            <proto>void <name>glSpriteParameteriSGIX</name></proto>
+            <param group="SpriteParameterNameSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="2062"/>
+        </command>
+        <command>
+            <proto>void <name>glSpriteParameterivSGIX</name></proto>
+            <param group="SpriteParameterNameSGIX"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="2063"/>
+        </command>
+        <command>
+            <proto>void <name>glStartInstrumentsSGIX</name></proto>
+            <glx type="render" opcode="2069"/>
+        </command>
+        <command>
+            <proto>void <name>glStartTilingQCOM</name></proto>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>width</name></param>
+            <param><ptype>GLuint</ptype> <name>height</name></param>
+            <param group="BufferBitQCOM"><ptype>GLbitfield</ptype> <name>preserveMask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStateCaptureNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>state</name></param>
+            <param><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilClearTagEXT</name></proto>
+            <param><ptype>GLsizei</ptype> <name>stencilTagBits</name></param>
+            <param><ptype>GLuint</ptype> <name>stencilClearTag</name></param>
+            <glx type="render" opcode="4223"/>
+        </command>
+        <command>
+            <proto>void <name>glStencilFillPathInstancedNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param group="PathElementType"><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param group="PathElement" len="COMPSIZE(numPaths,pathNameType,paths)">const void *<name>paths</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param group="PathFillMode"><ptype>GLenum</ptype> <name>fillMode</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+            <param group="PathTransformType"><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param len="COMPSIZE(numPaths,transformType)">const <ptype>GLfloat</ptype> *<name>transformValues</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilFillPathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="PathFillMode"><ptype>GLenum</ptype> <name>fillMode</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilFunc</name></proto>
+            <param group="StencilFunction"><ptype>GLenum</ptype> <name>func</name></param>
+            <param group="StencilValue"><ptype>GLint</ptype> <name>ref</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+            <glx type="render" opcode="162"/>
+        </command>
+        <command>
+            <proto>void <name>glStencilFuncSeparate</name></proto>
+            <param group="StencilFaceDirection"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="StencilFunction"><ptype>GLenum</ptype> <name>func</name></param>
+            <param group="StencilValue"><ptype>GLint</ptype> <name>ref</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilFuncSeparateATI</name></proto>
+            <param group="StencilFunction"><ptype>GLenum</ptype> <name>frontfunc</name></param>
+            <param group="StencilFunction"><ptype>GLenum</ptype> <name>backfunc</name></param>
+            <param group="ClampedStencilValue"><ptype>GLint</ptype> <name>ref</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilMask</name></proto>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+            <glx type="render" opcode="133"/>
+        </command>
+        <command>
+            <proto>void <name>glStencilMaskSeparate</name></proto>
+            <param group="StencilFaceDirection"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilOp</name></proto>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>fail</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>zfail</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>zpass</name></param>
+            <glx type="render" opcode="163"/>
+        </command>
+        <command>
+            <proto>void <name>glStencilOpSeparate</name></proto>
+            <param group="StencilFaceDirection"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>sfail</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>dpfail</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>dppass</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilOpSeparateATI</name></proto>
+            <param group="StencilFaceDirection"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>sfail</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>dpfail</name></param>
+            <param group="StencilOp"><ptype>GLenum</ptype> <name>dppass</name></param>
+            <alias name="glStencilOpSeparate"/>
+        </command>
+        <command>
+            <proto>void <name>glStencilOpValueAMD</name></proto>
+            <param group="StencilFaceDirection"><ptype>GLenum</ptype> <name>face</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilStrokePathInstancedNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param group="PathElementType"><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param group="PathElement" len="COMPSIZE(numPaths,pathNameType,paths)">const void *<name>paths</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param group="StencilValue"><ptype>GLint</ptype> <name>reference</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+            <param group="PathTransformType"><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param len="COMPSIZE(numPaths,transformType)">const <ptype>GLfloat</ptype> *<name>transformValues</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilStrokePathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>path</name></param>
+            <param group="StencilValue"><ptype>GLint</ptype> <name>reference</name></param>
+            <param group="MaskedStencilValue"><ptype>GLuint</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilThenCoverFillPathInstancedNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param>const void *<name>paths</name></param>
+            <param><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param><ptype>GLenum</ptype> <name>fillMode</name></param>
+            <param><ptype>GLuint</ptype> <name>mask</name></param>
+            <param><ptype>GLenum</ptype> <name>coverMode</name></param>
+            <param><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>transformValues</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilThenCoverFillPathNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLenum</ptype> <name>fillMode</name></param>
+            <param><ptype>GLuint</ptype> <name>mask</name></param>
+            <param><ptype>GLenum</ptype> <name>coverMode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilThenCoverStrokePathInstancedNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param><ptype>GLenum</ptype> <name>pathNameType</name></param>
+            <param>const void *<name>paths</name></param>
+            <param><ptype>GLuint</ptype> <name>pathBase</name></param>
+            <param><ptype>GLint</ptype> <name>reference</name></param>
+            <param><ptype>GLuint</ptype> <name>mask</name></param>
+            <param><ptype>GLenum</ptype> <name>coverMode</name></param>
+            <param><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>transformValues</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStencilThenCoverStrokePathNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>path</name></param>
+            <param><ptype>GLint</ptype> <name>reference</name></param>
+            <param><ptype>GLuint</ptype> <name>mask</name></param>
+            <param><ptype>GLenum</ptype> <name>coverMode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glStopInstrumentsSGIX</name></proto>
+            <param><ptype>GLint</ptype> <name>marker</name></param>
+            <glx type="render" opcode="2070"/>
+        </command>
+        <command>
+            <proto>void <name>glStringMarkerGREMEDY</name></proto>
+            <param><ptype>GLsizei</ptype> <name>len</name></param>
+            <param len="len">const void *<name>string</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSubpixelPrecisionBiasNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>xbits</name></param>
+            <param><ptype>GLuint</ptype> <name>ybits</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSwizzleEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>res</name></param>
+            <param><ptype>GLuint</ptype> <name>in</name></param>
+            <param group="VertexShaderCoordOutEXT"><ptype>GLenum</ptype> <name>outX</name></param>
+            <param group="VertexShaderCoordOutEXT"><ptype>GLenum</ptype> <name>outY</name></param>
+            <param group="VertexShaderCoordOutEXT"><ptype>GLenum</ptype> <name>outZ</name></param>
+            <param group="VertexShaderCoordOutEXT"><ptype>GLenum</ptype> <name>outW</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSyncTextureINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTagSampleBufferSGIX</name></proto>
+            <glx type="render" opcode="2050"/>
+        </command>
+        <command>
+            <proto>void <name>glTangent3bEXT</name></proto>
+            <param><ptype>GLbyte</ptype> <name>tx</name></param>
+            <param><ptype>GLbyte</ptype> <name>ty</name></param>
+            <param><ptype>GLbyte</ptype> <name>tz</name></param>
+            <vecequiv name="glTangent3bvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glTangent3bvEXT</name></proto>
+            <param len="3">const <ptype>GLbyte</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTangent3dEXT</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>tx</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>ty</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>tz</name></param>
+            <vecequiv name="glTangent3dvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glTangent3dvEXT</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTangent3fEXT</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>tx</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>ty</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>tz</name></param>
+            <vecequiv name="glTangent3fvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glTangent3fvEXT</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTangent3iEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>tx</name></param>
+            <param><ptype>GLint</ptype> <name>ty</name></param>
+            <param><ptype>GLint</ptype> <name>tz</name></param>
+            <vecequiv name="glTangent3ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glTangent3ivEXT</name></proto>
+            <param len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTangent3sEXT</name></proto>
+            <param><ptype>GLshort</ptype> <name>tx</name></param>
+            <param><ptype>GLshort</ptype> <name>ty</name></param>
+            <param><ptype>GLshort</ptype> <name>tz</name></param>
+            <vecequiv name="glTangent3svEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glTangent3svEXT</name></proto>
+            <param len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTangentPointerEXT</name></proto>
+            <param group="TangentPointerTypeEXT"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTbufferMask3DFX</name></proto>
+            <param><ptype>GLuint</ptype> <name>mask</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTessellationFactorAMD</name></proto>
+            <param><ptype>GLfloat</ptype> <name>factor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTessellationModeAMD</name></proto>
+            <param><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glTestFenceAPPLE</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glTestFenceNV</name></proto>
+            <param group="FenceNV"><ptype>GLuint</ptype> <name>fence</name></param>
+            <glx type="vendor" opcode="1279"/>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glTestObjectAPPLE</name></proto>
+            <param group="ObjectTypeAPPLE"><ptype>GLenum</ptype> <name>object</name></param>
+            <param><ptype>GLuint</ptype> <name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexAttachMemoryNV</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexBuffer</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexBufferARB</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <alias name="glTexBuffer"/>
+            <glx type="render" opcode="367"/>
+        </command>
+        <command>
+            <proto>void <name>glTexBufferEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <alias name="glTexBuffer"/>
+        </command>
+        <command>
+            <proto>void <name>glTexBufferOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <alias name="glTexBuffer"/>
+        </command>
+        <command>
+            <proto>void <name>glTexBufferRange</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexBufferRangeEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <alias name="glTexBufferRange"/>
+        </command>
+        <command>
+            <proto>void <name>glTexBufferRangeOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+            <alias name="glTexBufferRange"/>
+        </command>
+        <command>
+            <proto>void <name>glTexBumpParameterfvATI</name></proto>
+            <param group="TexBumpParameterATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexBumpParameterivATI</name></proto>
+            <param group="TexBumpParameterATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1bOES</name></proto>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1bvOES</name></proto>
+            <param len="1">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <vecequiv name="glTexCoord1dv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1dv</name></proto>
+            <param group="CoordD" len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="49"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <vecequiv name="glTexCoord1fv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1fv</name></proto>
+            <param group="CoordF" len="1">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="50"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <vecequiv name="glTexCoord1hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1hvNV</name></proto>
+            <param group="Half16NV" len="1">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4246"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <vecequiv name="glTexCoord1iv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1iv</name></proto>
+            <param group="CoordI" len="1">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="51"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <vecequiv name="glTexCoord1sv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1sv</name></proto>
+            <param group="CoordS" len="1">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="52"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord1xvOES</name></proto>
+            <param len="1">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2bOES</name></proto>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+            <param><ptype>GLbyte</ptype> <name>t</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2bvOES</name></proto>
+            <param len="2">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <vecequiv name="glTexCoord2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2dv</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="53"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <vecequiv name="glTexCoord2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fColor3fVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fColor3fVertex3fvSUN</name></proto>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fColor4fNormal3fVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fColor4fNormal3fVertex3fvSUN</name></proto>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fColor4ubVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLubyte</ptype> <name>r</name></param>
+            <param><ptype>GLubyte</ptype> <name>g</name></param>
+            <param><ptype>GLubyte</ptype> <name>b</name></param>
+            <param><ptype>GLubyte</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fColor4ubVertex3fvSUN</name></proto>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fNormal3fVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fNormal3fVertex3fvSUN</name></proto>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fVertex3fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fVertex3fvSUN</name></proto>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2fv</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="54"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>t</name></param>
+            <vecequiv name="glTexCoord2hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2hvNV</name></proto>
+            <param group="Half16NV" len="2">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4247"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <vecequiv name="glTexCoord2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2iv</name></proto>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="55"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <vecequiv name="glTexCoord2sv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2sv</name></proto>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="56"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord2xvOES</name></proto>
+            <param len="2">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3bOES</name></proto>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+            <param><ptype>GLbyte</ptype> <name>t</name></param>
+            <param><ptype>GLbyte</ptype> <name>r</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3bvOES</name></proto>
+            <param len="3">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>r</name></param>
+            <vecequiv name="glTexCoord3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3dv</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="57"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>r</name></param>
+            <vecequiv name="glTexCoord3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3fv</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="58"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>t</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>r</name></param>
+            <vecequiv name="glTexCoord3hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3hvNV</name></proto>
+            <param group="Half16NV" len="3">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4248"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>r</name></param>
+            <vecequiv name="glTexCoord3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3iv</name></proto>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="59"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>r</name></param>
+            <vecequiv name="glTexCoord3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3sv</name></proto>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="60"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord3xvOES</name></proto>
+            <param len="3">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4bOES</name></proto>
+            <param><ptype>GLbyte</ptype> <name>s</name></param>
+            <param><ptype>GLbyte</ptype> <name>t</name></param>
+            <param><ptype>GLbyte</ptype> <name>r</name></param>
+            <param><ptype>GLbyte</ptype> <name>q</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4bvOES</name></proto>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>s</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>t</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>r</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>q</name></param>
+            <vecequiv name="glTexCoord4dv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4dv</name></proto>
+            <param group="CoordD" len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="61"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>s</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>t</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>r</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>q</name></param>
+            <vecequiv name="glTexCoord4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4fColor4fNormal3fVertex4fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>p</name></param>
+            <param><ptype>GLfloat</ptype> <name>q</name></param>
+            <param><ptype>GLfloat</ptype> <name>r</name></param>
+            <param><ptype>GLfloat</ptype> <name>g</name></param>
+            <param><ptype>GLfloat</ptype> <name>b</name></param>
+            <param><ptype>GLfloat</ptype> <name>a</name></param>
+            <param><ptype>GLfloat</ptype> <name>nx</name></param>
+            <param><ptype>GLfloat</ptype> <name>ny</name></param>
+            <param><ptype>GLfloat</ptype> <name>nz</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4fColor4fNormal3fVertex4fvSUN</name></proto>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>c</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>n</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4fVertex4fSUN</name></proto>
+            <param><ptype>GLfloat</ptype> <name>s</name></param>
+            <param><ptype>GLfloat</ptype> <name>t</name></param>
+            <param><ptype>GLfloat</ptype> <name>p</name></param>
+            <param><ptype>GLfloat</ptype> <name>q</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4fVertex4fvSUN</name></proto>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>tc</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4fv</name></proto>
+            <param group="CoordF" len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="62"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>s</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>t</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>r</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>q</name></param>
+            <vecequiv name="glTexCoord4hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4hvNV</name></proto>
+            <param group="Half16NV" len="4">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4249"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>s</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>t</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>r</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>q</name></param>
+            <vecequiv name="glTexCoord4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4iv</name></proto>
+            <param group="CoordI" len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="63"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>s</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>t</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>r</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>q</name></param>
+            <vecequiv name="glTexCoord4sv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4sv</name></proto>
+            <param group="CoordS" len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="64"/>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>s</name></param>
+            <param><ptype>GLfixed</ptype> <name>t</name></param>
+            <param><ptype>GLfixed</ptype> <name>r</name></param>
+            <param><ptype>GLfixed</ptype> <name>q</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoord4xvOES</name></proto>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordFormatNV</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP1ui</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP1uiv</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP2ui</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP2uiv</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP3ui</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP3uiv</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP4ui</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordP4uiv</name></proto>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordPointer</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordPointerEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(size,type,stride,count)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordPointerListIBM</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexCoordPointervINTEL</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="4">const void **<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexEnvf</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="111"/>
+        </command>
+        <command>
+            <proto>void <name>glTexEnvfv</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="112"/>
+        </command>
+        <command>
+            <proto>void <name>glTexEnvi</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="113"/>
+        </command>
+        <command>
+            <proto>void <name>glTexEnviv</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="114"/>
+        </command>
+        <command>
+            <proto>void <name>glTexEnvx</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexEnvxOES</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexEnvxv</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexEnvxvOES</name></proto>
+            <param group="TextureEnvTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureEnvParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexFilterFuncSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureFilterSGIS"><ptype>GLenum</ptype> <name>filter</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param len="n">const <ptype>GLfloat</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="2064"/>
+        </command>
+        <command>
+            <proto>void <name>glTexGend</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLdouble</ptype> <name>param</name></param>
+            <glx type="render" opcode="115"/>
+        </command>
+        <command>
+            <proto>void <name>glTexGendv</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLdouble</ptype> *<name>params</name></param>
+            <glx type="render" opcode="116"/>
+        </command>
+        <command>
+            <proto>void <name>glTexGenf</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="117"/>
+        </command>
+        <command>
+            <proto>void <name>glTexGenfOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexGenfv</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="118"/>
+        </command>
+        <command>
+            <proto>void <name>glTexGenfvOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexGeni</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="119"/>
+        </command>
+        <command>
+            <proto>void <name>glTexGeniOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexGeniv</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="120"/>
+        </command>
+        <command>
+            <proto>void <name>glTexGenivOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexGenxOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexGenxvOES</name></proto>
+            <param group="TextureCoordName"><ptype>GLenum</ptype> <name>coord</name></param>
+            <param group="TextureGenParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexImage1D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="109"/>
+            <glx type="render" opcode="328" name="glTexImage1DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glTexImage2D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="110"/>
+            <glx type="render" opcode="329" name="glTexImage2DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glTexImage2DMultisample</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexImage2DMultisampleCoverageNV</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>coverageSamples</name></param>
+            <param><ptype>GLsizei</ptype> <name>colorSamples</name></param>
+            <param><ptype>GLint</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexImage3D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="4114"/>
+            <glx type="render" opcode="330" name="glTexImage3DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glTexImage3DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+            <alias name="glTexImage3D"/>
+            <glx type="render" opcode="4114"/>
+        </command>
+        <command>
+            <proto>void <name>glTexImage3DMultisample</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexImage3DMultisampleCoverageNV</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>coverageSamples</name></param>
+            <param><ptype>GLsizei</ptype> <name>colorSamples</name></param>
+            <param><ptype>GLint</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexImage3DOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexImage4DSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLsizei</ptype> <name>size4d</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth,size4d)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="2057"/>
+        </command>
+        <command>
+            <proto>void <name>glTexPageCommitmentARB</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLboolean</ptype> <name>commit</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexPageCommitmentEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLboolean</ptype> <name>commit</name></param>
+            <alias name="glTexPageCommitmentARB"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterIiv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="346"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterIivEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glTexParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterIivOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <alias name="glTexParameterIiv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterIuiv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="347"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterIuivEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glTexParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterIuivOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>params</name></param>
+            <alias name="glTexParameterIuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterf</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <glx type="render" opcode="105"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterfv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+            <glx type="render" opcode="106"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameteri</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <glx type="render" opcode="107"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameteriv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+            <glx type="render" opcode="108"/>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterx</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterxOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfixed</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterxv</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexParameterxvOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="GetTextureParameter"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfixed</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexRenderbufferNV</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage1D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage1DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <alias name="glTexStorage1D"/>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage2D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage2DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <alias name="glTexStorage2D"/>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage2DMultisample</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage3D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage3DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <alias name="glTexStorage3D"/>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage3DMultisample</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorage3DMultisampleOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+            <alias name="glTexStorage3DMultisample"/>
+        </command>
+        <command>
+            <proto>void <name>glTexStorageMem1DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorageMem2DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorageMem2DMultisampleEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorageMem3DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorageMem3DMultisampleEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexStorageSparseAMD</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLsizei</ptype> <name>layers</name></param>
+            <param group="TextureStorageMaskAMD"><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage1D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="4099"/>
+            <glx type="render" opcode="331" name="glTexSubImage1DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage1DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>pixels</name></param>
+            <alias name="glTexSubImage1D"/>
+            <glx type="render" opcode="4099"/>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage2D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="4100"/>
+            <glx type="render" opcode="332" name="glTexSubImage2DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage2DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+            <alias name="glTexSubImage2D"/>
+            <glx type="render" opcode="4100"/>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage3D</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="4115"/>
+            <glx type="render" opcode="333" name="glTexSubImage3DPBO" comment="PBO protocol"/>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage3DEXT</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+            <alias name="glTexSubImage3D"/>
+            <glx type="render" opcode="4115"/>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage3DOES</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexSubImage4DSGIS</name></proto>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>woffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLsizei</ptype> <name>size4d</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth,size4d)">const void *<name>pixels</name></param>
+            <glx type="render" opcode="2058"/>
+        </command>
+        <command>
+            <proto>void <name>glTextureAttachMemoryNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureBarrier</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glTextureBarrierNV</name></proto>
+            <glx type="render" opcode="4348"/>
+        </command>
+        <command>
+            <proto>void <name>glTextureBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureBufferEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureBufferRange</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureBufferRangeEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureColorMaskSGIS</name></proto>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>red</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>green</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>blue</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>alpha</name></param>
+            <glx type="render" opcode="2082"/>
+        </command>
+        <command>
+            <proto>void <name>glTextureFoveationParametersQCOM</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLuint</ptype> <name>layer</name></param>
+            <param><ptype>GLuint</ptype> <name>focalPoint</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>focalX</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>focalY</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>gainX</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>gainY</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>foveaArea</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureImage1DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureImage2DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureImage2DMultisampleCoverageNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>coverageSamples</name></param>
+            <param><ptype>GLsizei</ptype> <name>colorSamples</name></param>
+            <param><ptype>GLint</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureImage2DMultisampleNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLint</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureImage3DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="InternalFormat"><ptype>GLint</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>border</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureImage3DMultisampleCoverageNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>coverageSamples</name></param>
+            <param><ptype>GLsizei</ptype> <name>colorSamples</name></param>
+            <param><ptype>GLint</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureImage3DMultisampleNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLint</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureLightEXT</name></proto>
+            <param group="LightTexturePNameEXT"><ptype>GLenum</ptype> <name>pname</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureMaterialEXT</name></proto>
+            <param group="MaterialFace"><ptype>GLenum</ptype> <name>face</name></param>
+            <param group="MaterialParameter"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureNormalEXT</name></proto>
+            <param group="TextureNormalModeEXT"><ptype>GLenum</ptype> <name>mode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTexturePageCommitmentEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLboolean</ptype> <name>commit</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterIiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterIivEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterIuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterIuivEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLuint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterf</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterfEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32"><ptype>GLfloat</ptype> <name>param</name></param>
+            <vecequiv name="glTextureParameterfvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>const <ptype>GLfloat</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterfvEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedFloat32" len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameteri</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameteriEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>param</name></param>
+            <vecequiv name="glTextureParameterivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameteriv</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param>const <ptype>GLint</ptype> *<name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureParameterivEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="TextureParameterName"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param group="CheckedInt32" len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureRangeAPPLE</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="length">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureRenderbufferEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>renderbuffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage1D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage1DEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage2D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage2DEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage2DMultisample</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage2DMultisampleEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage3D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage3DEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage3DMultisample</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorage3DMultisampleEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>fixedsamplelocations</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorageMem1DEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorageMem2DEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorageMem2DMultisampleEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorageMem3DEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>levels</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorageMem3DMultisampleEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLsizei</ptype> <name>samples</name></param>
+            <param><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLboolean</ptype> <name>fixedSampleLocations</name></param>
+            <param><ptype>GLuint</ptype> <name>memory</name></param>
+            <param><ptype>GLuint64</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureStorageSparseAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalFormat</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param><ptype>GLsizei</ptype> <name>layers</name></param>
+            <param group="TextureStorageMaskAMD"><ptype>GLbitfield</ptype> <name>flags</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureSubImage1D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureSubImage1DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureSubImage2D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureSubImage2DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureSubImage3D</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+            <param><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param>const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureSubImage3DEXT</name></proto>
+            <param group="Texture"><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>level</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>xoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>yoffset</name></param>
+            <param group="CheckedInt32"><ptype>GLint</ptype> <name>zoffset</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <param><ptype>GLsizei</ptype> <name>depth</name></param>
+            <param group="PixelFormat"><ptype>GLenum</ptype> <name>format</name></param>
+            <param group="PixelType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="COMPSIZE(format,type,width,height,depth)">const void *<name>pixels</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureView</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>origtexture</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>minlevel</name></param>
+            <param><ptype>GLuint</ptype> <name>numlevels</name></param>
+            <param><ptype>GLuint</ptype> <name>minlayer</name></param>
+            <param><ptype>GLuint</ptype> <name>numlayers</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTextureViewEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>origtexture</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>minlevel</name></param>
+            <param><ptype>GLuint</ptype> <name>numlevels</name></param>
+            <param><ptype>GLuint</ptype> <name>minlayer</name></param>
+            <param><ptype>GLuint</ptype> <name>numlayers</name></param>
+            <alias name="glTextureView"/>
+        </command>
+        <command>
+            <proto>void <name>glTextureViewOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param group="TextureTarget"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>origtexture</name></param>
+            <param group="InternalFormat"><ptype>GLenum</ptype> <name>internalformat</name></param>
+            <param><ptype>GLuint</ptype> <name>minlevel</name></param>
+            <param><ptype>GLuint</ptype> <name>numlevels</name></param>
+            <param><ptype>GLuint</ptype> <name>minlayer</name></param>
+            <param><ptype>GLuint</ptype> <name>numlayers</name></param>
+            <alias name="glTextureView"/>
+        </command>
+        <command>
+            <proto>void <name>glTrackMatrixNV</name></proto>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLuint</ptype> <name>address</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>matrix</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>transform</name></param>
+            <glx type="render" opcode="4188"/>
+        </command>
+        <command>
+            <proto>void <name>glTransformFeedbackAttribsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLint</ptype> *<name>attribs</name></param>
+            <param><ptype>GLenum</ptype> <name>bufferMode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTransformFeedbackBufferBase</name></proto>
+            <param><ptype>GLuint</ptype> <name>xfb</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTransformFeedbackBufferRange</name></proto>
+            <param><ptype>GLuint</ptype> <name>xfb</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param group="BufferSize"><ptype>GLsizeiptr</ptype> <name>size</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTransformFeedbackStreamAttribsNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>attribs</name></param>
+            <param><ptype>GLsizei</ptype> <name>nbuffers</name></param>
+            <param len="nbuffers">const <ptype>GLint</ptype> *<name>bufstreams</name></param>
+            <param><ptype>GLenum</ptype> <name>bufferMode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTransformFeedbackVaryings</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLchar</ptype> *const*<name>varyings</name></param>
+            <param><ptype>GLenum</ptype> <name>bufferMode</name></param>
+            <glx type="render" opcode="359"/>
+        </command>
+        <command>
+            <proto>void <name>glTransformFeedbackVaryingsEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLchar</ptype> *const*<name>varyings</name></param>
+            <param><ptype>GLenum</ptype> <name>bufferMode</name></param>
+            <alias name="glTransformFeedbackVaryings"/>
+        </command>
+        <command>
+            <proto>void <name>glTransformFeedbackVaryingsNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLint</ptype> *<name>locations</name></param>
+            <param><ptype>GLenum</ptype> <name>bufferMode</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTransformPathNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>resultPath</name></param>
+            <param group="Path"><ptype>GLuint</ptype> <name>srcPath</name></param>
+            <param group="PathTransformType"><ptype>GLenum</ptype> <name>transformType</name></param>
+            <param len="COMPSIZE(transformType)">const <ptype>GLfloat</ptype> *<name>transformValues</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTranslated</name></proto>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <glx type="render" opcode="189"/>
+        </command>
+        <command>
+            <proto>void <name>glTranslatef</name></proto>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <glx type="render" opcode="190"/>
+        </command>
+        <command>
+            <proto>void <name>glTranslatex</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glTranslatexOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1d</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1f</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1fARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <alias name="glUniform1f"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform1fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1fvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniform1fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform1i</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1i64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1i64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1i64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1i64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1iARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <alias name="glUniform1i"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform1iv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1ivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glUniform1iv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform1ui</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1ui64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1ui64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1ui64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1ui64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1uiEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <alias name="glUniform1ui"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform1uiv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform1uivEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glUniform1uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform2d</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2f</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2fARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <alias name="glUniform2f"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform2fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2fvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniform2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform2i</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2i64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+            <param><ptype>GLint64</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2i64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2i64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2i64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2iARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <alias name="glUniform2i"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform2iv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2ivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glUniform2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform2ui</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2ui64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+            <param><ptype>GLuint64</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2ui64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2ui64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2ui64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2uiEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <alias name="glUniform2ui"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform2uiv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform2uivEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glUniform2uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform3d</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3f</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3fARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+            <alias name="glUniform3f"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform3fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3fvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniform3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform3i</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3i64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+            <param><ptype>GLint64</ptype> <name>y</name></param>
+            <param><ptype>GLint64</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3i64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3i64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3i64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3iARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+            <alias name="glUniform3i"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform3iv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3ivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glUniform3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform3ui</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3ui64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+            <param><ptype>GLuint64</ptype> <name>y</name></param>
+            <param><ptype>GLuint64</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3ui64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3ui64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3ui64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3uiEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+            <alias name="glUniform3ui"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform3uiv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform3uivEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glUniform3uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform4d</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4f</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+            <param><ptype>GLfloat</ptype> <name>v3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4fARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLfloat</ptype> <name>v0</name></param>
+            <param><ptype>GLfloat</ptype> <name>v1</name></param>
+            <param><ptype>GLfloat</ptype> <name>v2</name></param>
+            <param><ptype>GLfloat</ptype> <name>v3</name></param>
+            <alias name="glUniform4f"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform4fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4fvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniform4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform4i</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>v3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4i64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64</ptype> <name>x</name></param>
+            <param><ptype>GLint64</ptype> <name>y</name></param>
+            <param><ptype>GLint64</ptype> <name>z</name></param>
+            <param><ptype>GLint64</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4i64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>z</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4i64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4i64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4iARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLint</ptype> <name>v0</name></param>
+            <param><ptype>GLint</ptype> <name>v1</name></param>
+            <param><ptype>GLint</ptype> <name>v2</name></param>
+            <param><ptype>GLint</ptype> <name>v3</name></param>
+            <alias name="glUniform4i"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform4iv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4ivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLint</ptype> *<name>value</name></param>
+            <alias name="glUniform4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform4ui</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+            <param><ptype>GLuint</ptype> <name>v3</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4ui64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>x</name></param>
+            <param><ptype>GLuint64</ptype> <name>y</name></param>
+            <param><ptype>GLuint64</ptype> <name>z</name></param>
+            <param><ptype>GLuint64</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4ui64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>z</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4ui64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4ui64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4uiEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>v0</name></param>
+            <param><ptype>GLuint</ptype> <name>v1</name></param>
+            <param><ptype>GLuint</ptype> <name>v2</name></param>
+            <param><ptype>GLuint</ptype> <name>v3</name></param>
+            <alias name="glUniform4ui"/>
+        </command>
+        <command>
+            <proto>void <name>glUniform4uiv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniform4uivEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLuint</ptype> *<name>value</name></param>
+            <alias name="glUniform4uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformBlockBinding</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLuint</ptype> <name>uniformBlockIndex</name></param>
+            <param><ptype>GLuint</ptype> <name>uniformBlockBinding</name></param>
+            <glx type="render" opcode="366"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformBufferEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformHandleui64ARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformHandleui64IMG</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>value</name></param>
+            <alias name="glUniformHandleui64ARB"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformHandleui64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformHandleui64vARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformHandleui64vIMG</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64</ptype> *<name>value</name></param>
+            <alias name="glUniformHandleui64vARB"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformHandleui64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint64</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2fvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2x3dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2x3fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <glx type="render" opcode="305"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2x3fvNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix2x3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2x4dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2x4fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <glx type="render" opcode="307"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix2x4fvNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix2x4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*9">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*9">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3fvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*9">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3x2dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3x2fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <glx type="render" opcode="306"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3x2fvNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*6">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix3x2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3x4dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3x4fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <glx type="render" opcode="309"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix3x4fvNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix3x4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*16">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*16">const <ptype>GLfloat</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4fvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*16">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4x2dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4x2fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <glx type="render" opcode="308"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4x2fvNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*8">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix4x2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4x3dv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLdouble</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4x3fv</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <glx type="render" opcode="310"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformMatrix4x3fvNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>transpose</name></param>
+            <param len="count*12">const <ptype>GLfloat</ptype> *<name>value</name></param>
+            <alias name="glUniformMatrix4x3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glUniformSubroutinesuiv</name></proto>
+            <param group="ShaderType"><ptype>GLenum</ptype> <name>shadertype</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLuint</ptype> *<name>indices</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformui64NV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUniformui64vNV</name></proto>
+            <param><ptype>GLint</ptype> <name>location</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*1">const <ptype>GLuint64EXT</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUnlockArraysEXT</name></proto>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glUnmapBuffer</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glUnmapBufferARB</name></proto>
+            <param group="BufferTargetARB"><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glUnmapBuffer"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glUnmapBufferOES</name></proto>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <alias name="glUnmapBuffer"/>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glUnmapNamedBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto group="Boolean"><ptype>GLboolean</ptype> <name>glUnmapNamedBufferEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUnmapObjectBufferATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUnmapTexture2DINTEL</name></proto>
+            <param><ptype>GLuint</ptype> <name>texture</name></param>
+            <param><ptype>GLint</ptype> <name>level</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUpdateObjectBufferATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>offset</name></param>
+            <param><ptype>GLsizei</ptype> <name>size</name></param>
+            <param len="size">const void *<name>pointer</name></param>
+            <param group="PreserveModeATI"><ptype>GLenum</ptype> <name>preserve</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUseProgram</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUseProgramObjectARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <alias name="glUseProgram"/>
+        </command>
+        <command>
+            <proto>void <name>glUseProgramStages</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param group="UseProgramStageMask"><ptype>GLbitfield</ptype> <name>stages</name></param>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUseProgramStagesEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+            <param group="UseProgramStageMask"><ptype>GLbitfield</ptype> <name>stages</name></param>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glUseShaderProgramEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVDPAUFiniNV</name></proto>
+        </command>
+        <command>
+            <proto>void <name>glVDPAUGetSurfaceivNV</name></proto>
+            <param group="vdpauSurfaceNV"><ptype>GLvdpauSurfaceNV</ptype> <name>surface</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLsizei</ptype> <name>bufSize</name></param>
+            <param><ptype>GLsizei</ptype> *<name>length</name></param>
+            <param len="bufSize"><ptype>GLint</ptype> *<name>values</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVDPAUInitNV</name></proto>
+            <param>const void *<name>vdpDevice</name></param>
+            <param>const void *<name>getProcAddress</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLboolean</ptype> <name>glVDPAUIsSurfaceNV</name></proto>
+            <param group="vdpauSurfaceNV"><ptype>GLvdpauSurfaceNV</ptype> <name>surface</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVDPAUMapSurfacesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numSurfaces</name></param>
+            <param group="vdpauSurfaceNV" len="numSurfaces">const <ptype>GLvdpauSurfaceNV</ptype> *<name>surfaces</name></param>
+        </command>
+        <command>
+            <proto group="vdpauSurfaceNV"><ptype>GLvdpauSurfaceNV</ptype> <name>glVDPAURegisterOutputSurfaceNV</name></proto>
+            <param>const void *<name>vdpSurface</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>numTextureNames</name></param>
+            <param len="numTextureNames">const <ptype>GLuint</ptype> *<name>textureNames</name></param>
+        </command>
+        <command>
+            <proto group="vdpauSurfaceNV"><ptype>GLvdpauSurfaceNV</ptype> <name>glVDPAURegisterVideoSurfaceNV</name></proto>
+            <param>const void *<name>vdpSurface</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>numTextureNames</name></param>
+            <param len="numTextureNames">const <ptype>GLuint</ptype> *<name>textureNames</name></param>
+        </command>
+        <command>
+            <proto group="vdpauSurfaceNV"><ptype>GLvdpauSurfaceNV</ptype> <name>glVDPAURegisterVideoSurfaceWithPictureStructureNV</name></proto>
+            <param>const void *<name>vdpSurface</name></param>
+            <param><ptype>GLenum</ptype> <name>target</name></param>
+            <param><ptype>GLsizei</ptype> <name>numTextureNames</name></param>
+            <param len="numTextureNames">const <ptype>GLuint</ptype> *<name>textureNames</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>isFrameStructure</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVDPAUSurfaceAccessNV</name></proto>
+            <param group="vdpauSurfaceNV"><ptype>GLvdpauSurfaceNV</ptype> <name>surface</name></param>
+            <param><ptype>GLenum</ptype> <name>access</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVDPAUUnmapSurfacesNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>numSurface</name></param>
+            <param group="vdpauSurfaceNV" len="numSurface">const <ptype>GLvdpauSurfaceNV</ptype> *<name>surfaces</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVDPAUUnregisterSurfaceNV</name></proto>
+            <param group="vdpauSurfaceNV"><ptype>GLvdpauSurfaceNV</ptype> <name>surface</name></param>
+        </command>
+        <command>
+            <proto>void <name>glValidateProgram</name></proto>
+            <param><ptype>GLuint</ptype> <name>program</name></param>
+        </command>
+        <command>
+            <proto>void <name>glValidateProgramARB</name></proto>
+            <param group="handleARB"><ptype>GLhandleARB</ptype> <name>programObj</name></param>
+            <alias name="glValidateProgram"/>
+        </command>
+        <command>
+            <proto>void <name>glValidateProgramPipeline</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+        </command>
+        <command>
+            <proto>void <name>glValidateProgramPipelineEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>pipeline</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantArrayObjectATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ScalarType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantPointerEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param group="ScalarType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(id,type,stride)">const void *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantbvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLbyte</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantdvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLdouble</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantfvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLfloat</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLint</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantsvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLshort</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantubvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLubyte</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantuivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLuint</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVariantusvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>id</name></param>
+            <param len="COMPSIZE(id)">const <ptype>GLushort</ptype> *<name>addr</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex2bOES</name></proto>
+            <param><ptype>GLbyte</ptype> <name>x</name></param>
+            <param><ptype>GLbyte</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex2bvOES</name></proto>
+            <param len="2">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex2d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <vecequiv name="glVertex2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2dv</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="65"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <vecequiv name="glVertex2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2fv</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="66"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>x</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>y</name></param>
+            <vecequiv name="glVertex2hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2hvNV</name></proto>
+            <param group="Half16NV" len="2">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4240"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <vecequiv name="glVertex2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2iv</name></proto>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="67"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <vecequiv name="glVertex2sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2sv</name></proto>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="68"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex2xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex2xvOES</name></proto>
+            <param len="2">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex3bOES</name></proto>
+            <param><ptype>GLbyte</ptype> <name>x</name></param>
+            <param><ptype>GLbyte</ptype> <name>y</name></param>
+            <param><ptype>GLbyte</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex3bvOES</name></proto>
+            <param len="3">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex3d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <vecequiv name="glVertex3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3dv</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="69"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <vecequiv name="glVertex3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3fv</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="70"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>x</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>y</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>z</name></param>
+            <vecequiv name="glVertex3hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3hvNV</name></proto>
+            <param group="Half16NV" len="3">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4241"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <vecequiv name="glVertex3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3iv</name></proto>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="71"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <vecequiv name="glVertex3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3sv</name></proto>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="72"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex3xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex3xvOES</name></proto>
+            <param len="3">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex4bOES</name></proto>
+            <param><ptype>GLbyte</ptype> <name>x</name></param>
+            <param><ptype>GLbyte</ptype> <name>y</name></param>
+            <param><ptype>GLbyte</ptype> <name>z</name></param>
+            <param><ptype>GLbyte</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex4bvOES</name></proto>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex4d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glVertex4dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4dv</name></proto>
+            <param group="CoordD" len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="73"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glVertex4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4fv</name></proto>
+            <param group="CoordF" len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="74"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4hNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>x</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>y</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>z</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>w</name></param>
+            <vecequiv name="glVertex4hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4hvNV</name></proto>
+            <param group="Half16NV" len="4">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4242"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>w</name></param>
+            <vecequiv name="glVertex4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4iv</name></proto>
+            <param group="CoordI" len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="75"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>w</name></param>
+            <vecequiv name="glVertex4sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4sv</name></proto>
+            <param group="CoordS" len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="76"/>
+        </command>
+        <command>
+            <proto>void <name>glVertex4xOES</name></proto>
+            <param><ptype>GLfixed</ptype> <name>x</name></param>
+            <param><ptype>GLfixed</ptype> <name>y</name></param>
+            <param><ptype>GLfixed</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertex4xvOES</name></proto>
+            <param len="4">const <ptype>GLfixed</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayAttribBinding</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayAttribFormat</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayAttribIFormat</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayAttribLFormat</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayBindVertexBufferEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayBindingDivisor</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayColorOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayEdgeFlagOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayElementBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayFogCoordOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="FogCoordinatePointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayIndexOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="IndexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayMultiTexCoordOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLenum</ptype> <name>texunit</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayNormalOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param group="NormalPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayParameteriAPPLE</name></proto>
+            <param group="VertexArrayPNameAPPLE"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayRangeAPPLE</name></proto>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="length">void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayRangeNV</name></proto>
+            <param><ptype>GLsizei</ptype> <name>length</name></param>
+            <param len="COMPSIZE(length)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArraySecondaryColorOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="ColorPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayTexCoordOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="TexCoordPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribBindingEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribDivisorEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribFormatEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribIFormatEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribIOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribEnum"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribLFormatEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribLOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param group="BufferOffset"><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexAttribOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexBindingDivisorEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexBuffer</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexBuffers</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param>const <ptype>GLuint</ptype> *<name>buffers</name></param>
+            <param>const <ptype>GLintptr</ptype> *<name>offsets</name></param>
+            <param>const <ptype>GLsizei</ptype> *<name>strides</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexArrayVertexOffsetEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>vaobj</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLintptr</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <vecequiv name="glVertexAttrib1dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1dARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <alias name="glVertexAttrib1d"/>
+            <vecequiv name="glVertexAttrib1dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1dNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <alias name="glVertexAttrib1d"/>
+            <vecequiv name="glVertexAttrib1dvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4197"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1dvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib1dv"/>
+            <glx type="render" opcode="4197"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib1dv"/>
+            <glx type="render" opcode="4197"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1f</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <vecequiv name="glVertexAttrib1fv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1fARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <alias name="glVertexAttrib1f"/>
+            <vecequiv name="glVertexAttrib1fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1fNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <alias name="glVertexAttrib1f"/>
+            <vecequiv name="glVertexAttrib1fvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4193"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1fvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib1fv"/>
+            <glx type="render" opcode="4193"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib1fv"/>
+            <glx type="render" opcode="4193"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1hNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>x</name></param>
+            <vecequiv name="glVertexAttrib1hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV" len="1">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4257"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1s</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <vecequiv name="glVertexAttrib1sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1sARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <alias name="glVertexAttrib1s"/>
+            <vecequiv name="glVertexAttrib1svARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1sNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <alias name="glVertexAttrib1s"/>
+            <vecequiv name="glVertexAttrib1svNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1sv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4189"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1svARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib1sv"/>
+            <glx type="render" opcode="4189"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib1svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib1sv"/>
+            <glx type="render" opcode="4189"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <vecequiv name="glVertexAttrib2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2dARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <alias name="glVertexAttrib2d"/>
+            <vecequiv name="glVertexAttrib2dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2dNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <alias name="glVertexAttrib2d"/>
+            <vecequiv name="glVertexAttrib2dvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4198"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2dvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib2dv"/>
+            <glx type="render" opcode="4198"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib2dv"/>
+            <glx type="render" opcode="4198"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2f</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <vecequiv name="glVertexAttrib2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2fARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <alias name="glVertexAttrib2f"/>
+            <vecequiv name="glVertexAttrib2fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2fNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <alias name="glVertexAttrib2f"/>
+            <vecequiv name="glVertexAttrib2fvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4194"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2fvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib2fv"/>
+            <glx type="render" opcode="4194"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib2fv"/>
+            <glx type="render" opcode="4194"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2hNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>x</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>y</name></param>
+            <vecequiv name="glVertexAttrib2hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV" len="2">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4258"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2s</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <vecequiv name="glVertexAttrib2sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2sARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <alias name="glVertexAttrib2s"/>
+            <vecequiv name="glVertexAttrib2svARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2sNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <alias name="glVertexAttrib2s"/>
+            <vecequiv name="glVertexAttrib2svNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2sv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4190"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2svARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib2sv"/>
+            <glx type="render" opcode="4190"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib2svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib2sv"/>
+            <glx type="render" opcode="4190"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <vecequiv name="glVertexAttrib3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3dARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <alias name="glVertexAttrib3d"/>
+            <vecequiv name="glVertexAttrib3dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3dNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <alias name="glVertexAttrib3d"/>
+            <vecequiv name="glVertexAttrib3dvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4199"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3dvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib3dv"/>
+            <glx type="render" opcode="4199"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib3dv"/>
+            <glx type="render" opcode="4199"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3f</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <vecequiv name="glVertexAttrib3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3fARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <alias name="glVertexAttrib3f"/>
+            <vecequiv name="glVertexAttrib3fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3fNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <alias name="glVertexAttrib3f"/>
+            <vecequiv name="glVertexAttrib3fvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4195"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3fvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib3fv"/>
+            <glx type="render" opcode="4195"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib3fv"/>
+            <glx type="render" opcode="4195"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3hNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>x</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>y</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>z</name></param>
+            <vecequiv name="glVertexAttrib3hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV" len="3">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4259"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3s</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <vecequiv name="glVertexAttrib3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3sARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <alias name="glVertexAttrib3s"/>
+            <vecequiv name="glVertexAttrib3svARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3sNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <alias name="glVertexAttrib3s"/>
+            <vecequiv name="glVertexAttrib3svNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3sv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4191"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3svARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib3sv"/>
+            <glx type="render" opcode="4191"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib3svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib3sv"/>
+            <glx type="render" opcode="4191"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4Nbv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4NbvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4Nbv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4Niv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4NivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4Niv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4Nsv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4NsvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4Nsv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4Nub</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLubyte</ptype> <name>x</name></param>
+            <param><ptype>GLubyte</ptype> <name>y</name></param>
+            <param><ptype>GLubyte</ptype> <name>z</name></param>
+            <param><ptype>GLubyte</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4NubARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLubyte</ptype> <name>x</name></param>
+            <param><ptype>GLubyte</ptype> <name>y</name></param>
+            <param><ptype>GLubyte</ptype> <name>z</name></param>
+            <param><ptype>GLubyte</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4Nub"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4Nubv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4201"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4NubvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4Nubv"/>
+            <glx type="render" opcode="4201"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4Nuiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4NuivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4Nuiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4Nusv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLushort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4NusvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLushort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4Nusv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4bv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4bvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4bv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glVertexAttrib4dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4dARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4d"/>
+            <vecequiv name="glVertexAttrib4dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4dNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4d"/>
+            <vecequiv name="glVertexAttrib4dvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4200"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4dvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4dv"/>
+            <glx type="render" opcode="4200"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4dv"/>
+            <glx type="render" opcode="4200"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4f</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glVertexAttrib4fv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4fARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4f"/>
+            <vecequiv name="glVertexAttrib4fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4fNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4f"/>
+            <vecequiv name="glVertexAttrib4fvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4fv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4196"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4fvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4fv"/>
+            <glx type="render" opcode="4196"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4fv"/>
+            <glx type="render" opcode="4196"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4hNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>x</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>y</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>z</name></param>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>w</name></param>
+            <vecequiv name="glVertexAttrib4hvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="Half16NV" len="4">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4260"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4ivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4s</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <param><ptype>GLshort</ptype> <name>w</name></param>
+            <vecequiv name="glVertexAttrib4sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4sARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <param><ptype>GLshort</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4s"/>
+            <vecequiv name="glVertexAttrib4svARB"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4sNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <param><ptype>GLshort</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4s"/>
+            <vecequiv name="glVertexAttrib4svNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4sv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4192"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4svARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4sv"/>
+            <glx type="render" opcode="4192"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4sv"/>
+            <glx type="render" opcode="4192"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4ubNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>x</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>y</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>z</name></param>
+            <param group="ColorUB"><ptype>GLubyte</ptype> <name>w</name></param>
+            <alias name="glVertexAttrib4Nub"/>
+            <vecequiv name="glVertexAttrib4ubvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4ubv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4ubvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4ubv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4ubvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="ColorUB" len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4Nubv"/>
+            <glx type="render" opcode="4201"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4uivARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4usv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLushort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttrib4usvARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLushort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttrib4usv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribArrayObjectATI</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLuint</ptype> <name>buffer</name></param>
+            <param><ptype>GLuint</ptype> <name>offset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribBinding</name></proto>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribDivisor</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribDivisorANGLE</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+            <alias name="glVertexAttribDivisor"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribDivisorARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+            <alias name="glVertexAttribDivisor"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribDivisorEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+            <alias name="glVertexAttribDivisor"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribDivisorNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+            <alias name="glVertexAttribDivisor"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribFormat</name></proto>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribFormatNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1i</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <vecequiv name="glVertexAttribI1iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <alias name="glVertexAttribI1i"/>
+            <vecequiv name="glVertexAttribI1ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI1iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <vecequiv name="glVertexAttribI1uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <alias name="glVertexAttribI1ui"/>
+            <vecequiv name="glVertexAttribI1uivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI1uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI1uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2i</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <vecequiv name="glVertexAttribI2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <alias name="glVertexAttribI2i"/>
+            <vecequiv name="glVertexAttribI2ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <vecequiv name="glVertexAttribI2uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <alias name="glVertexAttribI2ui"/>
+            <vecequiv name="glVertexAttribI2uivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLuint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI2uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI2uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3i</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <vecequiv name="glVertexAttribI3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <alias name="glVertexAttribI3i"/>
+            <vecequiv name="glVertexAttribI3ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>z</name></param>
+            <vecequiv name="glVertexAttribI3uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>z</name></param>
+            <alias name="glVertexAttribI3ui"/>
+            <vecequiv name="glVertexAttribI3uivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLuint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI3uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI3uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4bv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4bvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLbyte</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI4bv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4i</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <param><ptype>GLint</ptype> <name>w</name></param>
+            <vecequiv name="glVertexAttribI4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4iEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <param><ptype>GLint</ptype> <name>w</name></param>
+            <alias name="glVertexAttribI4i"/>
+            <vecequiv name="glVertexAttribI4ivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4iv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4ivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI4iv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4sv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4svEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI4sv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4ubv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4ubvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI4ubv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>z</name></param>
+            <param><ptype>GLuint</ptype> <name>w</name></param>
+            <vecequiv name="glVertexAttribI4uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4uiEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint</ptype> <name>x</name></param>
+            <param><ptype>GLuint</ptype> <name>y</name></param>
+            <param><ptype>GLuint</ptype> <name>z</name></param>
+            <param><ptype>GLuint</ptype> <name>w</name></param>
+            <alias name="glVertexAttribI4ui"/>
+            <vecequiv name="glVertexAttribI4uivEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4uivEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI4uiv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4usv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLushort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribI4usvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLushort</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribI4usv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribIFormat</name></proto>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribIFormatNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribIPointer</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribIPointerEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+            <alias name="glVertexAttribIPointer"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <alias name="glVertexAttribL1d"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribL1dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1ui64ARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1ui64vARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param>const <ptype>GLuint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL1ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="1">const <ptype>GLuint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <alias name="glVertexAttribL2d"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribL2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL2ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="2">const <ptype>GLuint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <alias name="glVertexAttribL3d"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribL3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL3ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="3">const <ptype>GLuint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4d</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4dEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+            <alias name="glVertexAttribL4d"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4dv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4dvEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glVertexAttribL4dv"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4i64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>z</name></param>
+            <param><ptype>GLint64EXT</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4i64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4ui64NV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>x</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>y</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>z</name></param>
+            <param><ptype>GLuint64EXT</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribL4ui64vNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLuint64EXT</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribLFormat</name></proto>
+            <param><ptype>GLuint</ptype> <name>attribindex</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>relativeoffset</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribLFormatNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribLPointer</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="size">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribLPointerEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="size">const void *<name>pointer</name></param>
+            <alias name="glVertexAttribLPointer"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP1ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP1uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP2ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP2uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP3ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP3uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP4ui</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribP4uiv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribParameteriAMD</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribPointer</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribPointerARB</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexAttribPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param group="Boolean"><ptype>GLboolean</ptype> <name>normalized</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+            <alias name="glVertexAttribPointer"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribPointerNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLint</ptype> <name>fsize</name></param>
+            <param group="VertexAttribEnumNV"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(fsize,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs1dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4210"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs1fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4206"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs1hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Half16NV" len="n">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4261"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs1svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4202"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs2dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4211"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs2fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4207"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs2hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Half16NV" len="n">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4262"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs2svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4203"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs3dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4212"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs3fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4208"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs3hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Half16NV" len="n">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4263"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs3svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4204"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs4dvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4213"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs4fvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4209"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs4hvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>n</name></param>
+            <param group="Half16NV" len="n">const <ptype>GLhalfNV</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4264"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs4svNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="count*4">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4205"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexAttribs4ubvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param group="ColorUB" len="count*4">const <ptype>GLubyte</ptype> *<name>v</name></param>
+            <glx type="render" opcode="4214"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexBindingDivisor</name></proto>
+            <param><ptype>GLuint</ptype> <name>bindingindex</name></param>
+            <param><ptype>GLuint</ptype> <name>divisor</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexBlendARB</name></proto>
+            <param><ptype>GLint</ptype> <name>count</name></param>
+            <glx type="render" opcode="226"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexBlendEnvfATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLfloat</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexBlendEnviATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>pname</name></param>
+            <param><ptype>GLint</ptype> <name>param</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexFormatNV</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexP2ui</name></proto>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexP2uiv</name></proto>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexP3ui</name></proto>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexP3uiv</name></proto>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexP4ui</name></proto>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLuint</ptype> <name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexP4uiv</name></proto>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="1">const <ptype>GLuint</ptype> *<name>value</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexPointer</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexPointerEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(size,type,stride,count)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexPointerListIBM</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLint</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(size,type,stride)">const void **<name>pointer</name></param>
+            <param><ptype>GLint</ptype> <name>ptrstride</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexPointervINTEL</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexPointerType"><ptype>GLenum</ptype> <name>type</name></param>
+            <param len="4">const void **<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1dATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1dvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="1">const <ptype>GLdouble</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1fATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1fvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="1">const <ptype>GLfloat</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1iATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1ivATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="1">const <ptype>GLint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1sATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream1svATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="1">const <ptype>GLshort</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2dATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2dvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="2">const <ptype>GLdouble</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2fATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2fvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="2">const <ptype>GLfloat</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2iATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2ivATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="2">const <ptype>GLint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2sATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream2svATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="2">const <ptype>GLshort</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3dATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3dvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLdouble</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3fATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3fvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLfloat</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3iATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3ivATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3sATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream3svATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="3">const <ptype>GLshort</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4dATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLdouble</ptype> <name>x</name></param>
+            <param><ptype>GLdouble</ptype> <name>y</name></param>
+            <param><ptype>GLdouble</ptype> <name>z</name></param>
+            <param><ptype>GLdouble</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4dvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="4">const <ptype>GLdouble</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4fATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4fvATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4iATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLint</ptype> <name>x</name></param>
+            <param><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLint</ptype> <name>z</name></param>
+            <param><ptype>GLint</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4ivATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="4">const <ptype>GLint</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4sATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param><ptype>GLshort</ptype> <name>x</name></param>
+            <param><ptype>GLshort</ptype> <name>y</name></param>
+            <param><ptype>GLshort</ptype> <name>z</name></param>
+            <param><ptype>GLshort</ptype> <name>w</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexStream4svATI</name></proto>
+            <param group="VertexStreamATI"><ptype>GLenum</ptype> <name>stream</name></param>
+            <param len="4">const <ptype>GLshort</ptype> *<name>coords</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexWeightPointerEXT</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="VertexWeightPointerTypeEXT"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVertexWeightfEXT</name></proto>
+            <param><ptype>GLfloat</ptype> <name>weight</name></param>
+            <vecequiv name="glVertexWeightfvEXT"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexWeightfvEXT</name></proto>
+            <param len="1">const <ptype>GLfloat</ptype> *<name>weight</name></param>
+            <glx type="render" opcode="4135"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexWeighthNV</name></proto>
+            <param group="Half16NV"><ptype>GLhalfNV</ptype> <name>weight</name></param>
+            <vecequiv name="glVertexWeighthvNV"/>
+        </command>
+        <command>
+            <proto>void <name>glVertexWeighthvNV</name></proto>
+            <param group="Half16NV" len="1">const <ptype>GLhalfNV</ptype> *<name>weight</name></param>
+            <glx type="render" opcode="4256"/>
+        </command>
+        <command>
+            <proto><ptype>GLenum</ptype> <name>glVideoCaptureNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> *<name>sequence_num</name></param>
+            <param><ptype>GLuint64EXT</ptype> *<name>capture_time</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVideoCaptureStreamParameterdvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLdouble</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVideoCaptureStreamParameterfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLfloat</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glVideoCaptureStreamParameterivNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>video_capture_slot</name></param>
+            <param><ptype>GLuint</ptype> <name>stream</name></param>
+            <param><ptype>GLenum</ptype> <name>pname</name></param>
+            <param len="COMPSIZE(pname)">const <ptype>GLint</ptype> *<name>params</name></param>
+        </command>
+        <command>
+            <proto>void <name>glViewport</name></proto>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="WinCoord"><ptype>GLint</ptype> <name>y</name></param>
+            <param><ptype>GLsizei</ptype> <name>width</name></param>
+            <param><ptype>GLsizei</ptype> <name>height</name></param>
+            <glx type="render" opcode="191"/>
+        </command>
+        <command>
+            <proto>void <name>glViewportArrayv</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glViewportArrayvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glViewportArrayv"/>
+        </command>
+        <command>
+            <proto>void <name>glViewportArrayvOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>first</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glViewportArrayv"/>
+        </command>
+        <command>
+            <proto>void <name>glViewportIndexedf</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <param><ptype>GLfloat</ptype> <name>h</name></param>
+        </command>
+        <command>
+            <proto>void <name>glViewportIndexedfOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <param><ptype>GLfloat</ptype> <name>h</name></param>
+            <alias name="glViewportIndexedf"/>
+        </command>
+        <command>
+            <proto>void <name>glViewportIndexedfNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>x</name></param>
+            <param><ptype>GLfloat</ptype> <name>y</name></param>
+            <param><ptype>GLfloat</ptype> <name>w</name></param>
+            <param><ptype>GLfloat</ptype> <name>h</name></param>
+            <alias name="glViewportIndexedf"/>
+        </command>
+        <command>
+            <proto>void <name>glViewportIndexedfv</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glViewportIndexedfvOES</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glViewportIndexedfv"/>
+        </command>
+        <command>
+            <proto>void <name>glViewportIndexedfvNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glViewportIndexedfv"/>
+        </command>
+        <command>
+            <proto>void <name>glViewportPositionWScaleNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLfloat</ptype> <name>xcoeff</name></param>
+            <param><ptype>GLfloat</ptype> <name>ycoeff</name></param>
+        </command>
+        <command>
+            <proto>void <name>glViewportSwizzleNV</name></proto>
+            <param><ptype>GLuint</ptype> <name>index</name></param>
+            <param><ptype>GLenum</ptype> <name>swizzlex</name></param>
+            <param><ptype>GLenum</ptype> <name>swizzley</name></param>
+            <param><ptype>GLenum</ptype> <name>swizzlez</name></param>
+            <param><ptype>GLenum</ptype> <name>swizzlew</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWaitSemaphoreEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>semaphore</name></param>
+            <param><ptype>GLuint</ptype> <name>numBufferBarriers</name></param>
+            <param len="COMPSIZE(numBufferBarriers)">const <ptype>GLuint</ptype> *<name>buffers</name></param>
+            <param><ptype>GLuint</ptype> <name>numTextureBarriers</name></param>
+            <param len="COMPSIZE(numTextureBarriers)">const <ptype>GLuint</ptype> *<name>textures</name></param>
+            <param group="TextureLayout" len="COMPSIZE(numTextureBarriers)">const <ptype>GLenum</ptype> *<name>srcLayouts</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWaitSync</name></proto>
+            <param group="sync"><ptype>GLsync</ptype> <name>sync</name></param>
+            <param><ptype>GLbitfield</ptype> <name>flags</name></param>
+            <param><ptype>GLuint64</ptype> <name>timeout</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWaitSyncAPPLE</name></proto>
+            <param><ptype>GLsync</ptype> <name>sync</name></param>
+            <param><ptype>GLbitfield</ptype> <name>flags</name></param>
+            <param><ptype>GLuint64</ptype> <name>timeout</name></param>
+            <alias name="glWaitSync"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightPathsNV</name></proto>
+            <param group="Path"><ptype>GLuint</ptype> <name>resultPath</name></param>
+            <param><ptype>GLsizei</ptype> <name>numPaths</name></param>
+            <param group="Path" len="numPaths">const <ptype>GLuint</ptype> *<name>paths</name></param>
+            <param len="numPaths">const <ptype>GLfloat</ptype> *<name>weights</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWeightPointerARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param group="WeightPointerTypeARB"><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWeightPointerOES</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param><ptype>GLenum</ptype> <name>type</name></param>
+            <param><ptype>GLsizei</ptype> <name>stride</name></param>
+            <param len="COMPSIZE(type,stride)">const void *<name>pointer</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWeightbvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLbyte</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="220"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightdvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLdouble</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="228"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightfvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLfloat</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="227"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLint</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="224"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightsvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLshort</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="222"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightubvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLubyte</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="221"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightuivARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLuint</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="225"/>
+        </command>
+        <command>
+            <proto>void <name>glWeightusvARB</name></proto>
+            <param><ptype>GLint</ptype> <name>size</name></param>
+            <param len="size">const <ptype>GLushort</ptype> *<name>weights</name></param>
+            <glx type="render" opcode="223"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <vecequiv name="glWindowPos2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2dARB</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <alias name="glWindowPos2d"/>
+            <vecequiv name="glWindowPos2dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2dMESA</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <alias name="glWindowPos2d"/>
+            <vecequiv name="glWindowPos2dvMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2dv</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2dvARB</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2dv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2dvMESA</name></proto>
+            <param group="CoordD" len="2">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2dv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <vecequiv name="glWindowPos2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2fARB</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <alias name="glWindowPos2f"/>
+            <vecequiv name="glWindowPos2fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2fMESA</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <alias name="glWindowPos2f"/>
+            <vecequiv name="glWindowPos2fvMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2fv</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2fvARB</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2fv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2fvMESA</name></proto>
+            <param group="CoordF" len="2">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2fv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <vecequiv name="glWindowPos2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2iARB</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <alias name="glWindowPos2i"/>
+            <vecequiv name="glWindowPos2ivARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2iMESA</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <alias name="glWindowPos2i"/>
+            <vecequiv name="glWindowPos2ivMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2iv</name></proto>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2ivARB</name></proto>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2iv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2ivMESA</name></proto>
+            <param group="CoordI" len="2">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2iv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <vecequiv name="glWindowPos2sv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2sARB</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <alias name="glWindowPos2s"/>
+            <vecequiv name="glWindowPos2svARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2sMESA</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <alias name="glWindowPos2s"/>
+            <vecequiv name="glWindowPos2svMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2sv</name></proto>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2svARB</name></proto>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2sv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos2svMESA</name></proto>
+            <param group="CoordS" len="2">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glWindowPos2sv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3d</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <vecequiv name="glWindowPos3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3dARB</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <alias name="glWindowPos3d"/>
+            <vecequiv name="glWindowPos3dvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3dMESA</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <alias name="glWindowPos3d"/>
+            <vecequiv name="glWindowPos3dvMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3dv</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3dvARB</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3dv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3dvMESA</name></proto>
+            <param group="CoordD" len="3">const <ptype>GLdouble</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3dv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3f</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <vecequiv name="glWindowPos3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3fARB</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <alias name="glWindowPos3f"/>
+            <vecequiv name="glWindowPos3fvARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3fMESA</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <alias name="glWindowPos3f"/>
+            <vecequiv name="glWindowPos3fvMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3fv</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3fvARB</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3fv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3fvMESA</name></proto>
+            <param group="CoordF" len="3">const <ptype>GLfloat</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3fv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3i</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <vecequiv name="glWindowPos3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3iARB</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <alias name="glWindowPos3i"/>
+            <vecequiv name="glWindowPos3ivARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3iMESA</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <alias name="glWindowPos3i"/>
+            <vecequiv name="glWindowPos3ivMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3iv</name></proto>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3ivARB</name></proto>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3iv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3ivMESA</name></proto>
+            <param group="CoordI" len="3">const <ptype>GLint</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3iv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3s</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <vecequiv name="glWindowPos3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3sARB</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <alias name="glWindowPos3s"/>
+            <vecequiv name="glWindowPos3svARB"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3sMESA</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <alias name="glWindowPos3s"/>
+            <vecequiv name="glWindowPos3svMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3sv</name></proto>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3svARB</name></proto>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3sv"/>
+            <glx type="render" opcode="230"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos3svMESA</name></proto>
+            <param group="CoordS" len="3">const <ptype>GLshort</ptype> *<name>v</name></param>
+            <alias name="glWindowPos3sv"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4dMESA</name></proto>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>x</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>y</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>z</name></param>
+            <param group="CoordD"><ptype>GLdouble</ptype> <name>w</name></param>
+            <vecequiv name="glWindowPos4dvMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4dvMESA</name></proto>
+            <param group="CoordD" len="4">const <ptype>GLdouble</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4fMESA</name></proto>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>x</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>y</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>z</name></param>
+            <param group="CoordF"><ptype>GLfloat</ptype> <name>w</name></param>
+            <vecequiv name="glWindowPos4fvMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4fvMESA</name></proto>
+            <param group="CoordF" len="4">const <ptype>GLfloat</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4iMESA</name></proto>
+            <param group="CoordI"><ptype>GLint</ptype> <name>x</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>y</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>z</name></param>
+            <param group="CoordI"><ptype>GLint</ptype> <name>w</name></param>
+            <vecequiv name="glWindowPos4ivMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4ivMESA</name></proto>
+            <param group="CoordI" len="4">const <ptype>GLint</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4sMESA</name></proto>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>x</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>y</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>z</name></param>
+            <param group="CoordS"><ptype>GLshort</ptype> <name>w</name></param>
+            <vecequiv name="glWindowPos4svMESA"/>
+        </command>
+        <command>
+            <proto>void <name>glWindowPos4svMESA</name></proto>
+            <param group="CoordS" len="4">const <ptype>GLshort</ptype> *<name>v</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWindowRectanglesEXT</name></proto>
+            <param><ptype>GLenum</ptype> <name>mode</name></param>
+            <param><ptype>GLsizei</ptype> <name>count</name></param>
+            <param len="COMPSIZE(count)">const <ptype>GLint</ptype> *<name>box</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWriteMaskEXT</name></proto>
+            <param><ptype>GLuint</ptype> <name>res</name></param>
+            <param><ptype>GLuint</ptype> <name>in</name></param>
+            <param group="VertexShaderWriteMaskEXT"><ptype>GLenum</ptype> <name>outX</name></param>
+            <param group="VertexShaderWriteMaskEXT"><ptype>GLenum</ptype> <name>outY</name></param>
+            <param group="VertexShaderWriteMaskEXT"><ptype>GLenum</ptype> <name>outZ</name></param>
+            <param group="VertexShaderWriteMaskEXT"><ptype>GLenum</ptype> <name>outW</name></param>
+        </command>
+        <command>
+            <proto>void <name>glDrawVkImageNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>vkImage</name></param>
+            <param><ptype>GLuint</ptype> <name>sampler</name></param>
+            <param><ptype>GLfloat</ptype> <name>x0</name></param>
+            <param><ptype>GLfloat</ptype> <name>y0</name></param>
+            <param><ptype>GLfloat</ptype> <name>x1</name></param>
+            <param><ptype>GLfloat</ptype> <name>y1</name></param>
+            <param><ptype>GLfloat</ptype> <name>z</name></param>
+            <param><ptype>GLfloat</ptype> <name>s0</name></param>
+            <param><ptype>GLfloat</ptype> <name>t0</name></param>
+            <param><ptype>GLfloat</ptype> <name>s1</name></param>
+            <param><ptype>GLfloat</ptype> <name>t1</name></param>
+        </command>
+        <command>
+            <proto><ptype>GLVULKANPROCNV</ptype> <name>glGetVkProcAddrNV</name></proto>
+            <param len="COMPSIZE(name)">const <ptype>GLchar</ptype> *<name>name</name></param>
+        </command>
+        <command>
+            <proto>void <name>glWaitVkSemaphoreNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>vkSemaphore</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSignalVkSemaphoreNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>vkSemaphore</name></param>
+        </command>
+        <command>
+            <proto>void <name>glSignalVkFenceNV</name></proto>
+            <param><ptype>GLuint64</ptype> <name>vkFence</name></param>
+        </command>
+
+    </commands>
+
+    <!-- SECTION: GL API interface definitions. -->
+    <feature api="gl" name="GL_VERSION_1_0" number="1.0">
+        <require>
+            <type name="GLvoid" comment="No longer used in headers"/>
+            <enum name="GL_DEPTH_BUFFER_BIT"/>
+            <enum name="GL_STENCIL_BUFFER_BIT"/>
+            <enum name="GL_COLOR_BUFFER_BIT"/>
+            <enum name="GL_FALSE"/>
+            <enum name="GL_TRUE"/>
+            <enum name="GL_POINTS"/>
+            <enum name="GL_LINES"/>
+            <enum name="GL_LINE_LOOP"/>
+            <enum name="GL_LINE_STRIP"/>
+            <enum name="GL_TRIANGLES"/>
+            <enum name="GL_TRIANGLE_STRIP"/>
+            <enum name="GL_TRIANGLE_FAN"/>
+            <enum name="GL_QUADS"/>
+            <enum name="GL_NEVER"/>
+            <enum name="GL_LESS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_LEQUAL"/>
+            <enum name="GL_GREATER"/>
+            <enum name="GL_NOTEQUAL"/>
+            <enum name="GL_GEQUAL"/>
+            <enum name="GL_ALWAYS"/>
+            <enum name="GL_ZERO"/>
+            <enum name="GL_ONE"/>
+            <enum name="GL_SRC_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC_COLOR"/>
+            <enum name="GL_SRC_ALPHA"/>
+            <enum name="GL_ONE_MINUS_SRC_ALPHA"/>
+            <enum name="GL_DST_ALPHA"/>
+            <enum name="GL_ONE_MINUS_DST_ALPHA"/>
+            <enum name="GL_DST_COLOR"/>
+            <enum name="GL_ONE_MINUS_DST_COLOR"/>
+            <enum name="GL_SRC_ALPHA_SATURATE"/>
+            <enum name="GL_NONE"/>
+            <enum name="GL_FRONT_LEFT"/>
+            <enum name="GL_FRONT_RIGHT"/>
+            <enum name="GL_BACK_LEFT"/>
+            <enum name="GL_BACK_RIGHT"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_BACK"/>
+            <enum name="GL_LEFT"/>
+            <enum name="GL_RIGHT"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_INVALID_ENUM"/>
+            <enum name="GL_INVALID_VALUE"/>
+            <enum name="GL_INVALID_OPERATION"/>
+            <enum name="GL_OUT_OF_MEMORY"/>
+            <enum name="GL_CW"/>
+            <enum name="GL_CCW"/>
+            <enum name="GL_POINT_SIZE"/>
+            <enum name="GL_POINT_SIZE_RANGE"/>
+            <enum name="GL_POINT_SIZE_GRANULARITY"/>
+            <enum name="GL_LINE_SMOOTH"/>
+            <enum name="GL_LINE_WIDTH"/>
+            <enum name="GL_LINE_WIDTH_RANGE"/>
+            <enum name="GL_LINE_WIDTH_GRANULARITY"/>
+            <enum name="GL_POLYGON_MODE"/>
+            <enum name="GL_POLYGON_SMOOTH"/>
+            <enum name="GL_CULL_FACE"/>
+            <enum name="GL_CULL_FACE_MODE"/>
+            <enum name="GL_FRONT_FACE"/>
+            <enum name="GL_DEPTH_RANGE"/>
+            <enum name="GL_DEPTH_TEST"/>
+            <enum name="GL_DEPTH_WRITEMASK"/>
+            <enum name="GL_DEPTH_CLEAR_VALUE"/>
+            <enum name="GL_DEPTH_FUNC"/>
+            <enum name="GL_STENCIL_TEST"/>
+            <enum name="GL_STENCIL_CLEAR_VALUE"/>
+            <enum name="GL_STENCIL_FUNC"/>
+            <enum name="GL_STENCIL_VALUE_MASK"/>
+            <enum name="GL_STENCIL_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_REF"/>
+            <enum name="GL_STENCIL_WRITEMASK"/>
+            <enum name="GL_VIEWPORT"/>
+            <enum name="GL_DITHER"/>
+            <enum name="GL_BLEND_DST"/>
+            <enum name="GL_BLEND_SRC"/>
+            <enum name="GL_BLEND"/>
+            <enum name="GL_LOGIC_OP_MODE"/>
+            <enum name="GL_DRAW_BUFFER"/>
+            <enum name="GL_READ_BUFFER"/>
+            <enum name="GL_SCISSOR_BOX"/>
+            <enum name="GL_SCISSOR_TEST"/>
+            <enum name="GL_COLOR_CLEAR_VALUE"/>
+            <enum name="GL_COLOR_WRITEMASK"/>
+            <enum name="GL_DOUBLEBUFFER"/>
+            <enum name="GL_STEREO"/>
+            <enum name="GL_LINE_SMOOTH_HINT"/>
+            <enum name="GL_POLYGON_SMOOTH_HINT"/>
+            <enum name="GL_UNPACK_SWAP_BYTES"/>
+            <enum name="GL_UNPACK_LSB_FIRST"/>
+            <enum name="GL_UNPACK_ROW_LENGTH"/>
+            <enum name="GL_UNPACK_SKIP_ROWS"/>
+            <enum name="GL_UNPACK_SKIP_PIXELS"/>
+            <enum name="GL_UNPACK_ALIGNMENT"/>
+            <enum name="GL_PACK_SWAP_BYTES"/>
+            <enum name="GL_PACK_LSB_FIRST"/>
+            <enum name="GL_PACK_ROW_LENGTH"/>
+            <enum name="GL_PACK_SKIP_ROWS"/>
+            <enum name="GL_PACK_SKIP_PIXELS"/>
+            <enum name="GL_PACK_ALIGNMENT"/>
+            <enum name="GL_MAX_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_VIEWPORT_DIMS"/>
+            <enum name="GL_SUBPIXEL_BITS"/>
+            <enum name="GL_TEXTURE_1D"/>
+            <enum name="GL_TEXTURE_2D"/>
+            <enum name="GL_TEXTURE_WIDTH"/>
+            <enum name="GL_TEXTURE_HEIGHT"/>
+            <enum name="GL_TEXTURE_BORDER_COLOR"/>
+            <enum name="GL_DONT_CARE"/>
+            <enum name="GL_FASTEST"/>
+            <enum name="GL_NICEST"/>
+            <enum name="GL_BYTE"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_STACK_OVERFLOW"/>
+            <enum name="GL_STACK_UNDERFLOW"/>
+            <enum name="GL_CLEAR"/>
+            <enum name="GL_AND"/>
+            <enum name="GL_AND_REVERSE"/>
+            <enum name="GL_COPY"/>
+            <enum name="GL_AND_INVERTED"/>
+            <enum name="GL_NOOP"/>
+            <enum name="GL_XOR"/>
+            <enum name="GL_OR"/>
+            <enum name="GL_NOR"/>
+            <enum name="GL_EQUIV"/>
+            <enum name="GL_INVERT"/>
+            <enum name="GL_OR_REVERSE"/>
+            <enum name="GL_COPY_INVERTED"/>
+            <enum name="GL_OR_INVERTED"/>
+            <enum name="GL_NAND"/>
+            <enum name="GL_SET"/>
+            <enum name="GL_TEXTURE"/>
+            <enum name="GL_COLOR"/>
+            <enum name="GL_DEPTH"/>
+            <enum name="GL_STENCIL"/>
+            <enum name="GL_STENCIL_INDEX"/>
+            <enum name="GL_DEPTH_COMPONENT"/>
+            <enum name="GL_RED"/>
+            <enum name="GL_GREEN"/>
+            <enum name="GL_BLUE"/>
+            <enum name="GL_ALPHA"/>
+            <enum name="GL_RGB"/>
+            <enum name="GL_RGBA"/>
+            <enum name="GL_POINT"/>
+            <enum name="GL_LINE"/>
+            <enum name="GL_FILL"/>
+            <enum name="GL_KEEP"/>
+            <enum name="GL_REPLACE"/>
+            <enum name="GL_INCR"/>
+            <enum name="GL_DECR"/>
+            <enum name="GL_VENDOR"/>
+            <enum name="GL_RENDERER"/>
+            <enum name="GL_VERSION"/>
+            <enum name="GL_EXTENSIONS"/>
+            <enum name="GL_NEAREST"/>
+            <enum name="GL_LINEAR"/>
+            <enum name="GL_NEAREST_MIPMAP_NEAREST"/>
+            <enum name="GL_LINEAR_MIPMAP_NEAREST"/>
+            <enum name="GL_NEAREST_MIPMAP_LINEAR"/>
+            <enum name="GL_LINEAR_MIPMAP_LINEAR"/>
+            <enum name="GL_TEXTURE_MAG_FILTER"/>
+            <enum name="GL_TEXTURE_MIN_FILTER"/>
+            <enum name="GL_TEXTURE_WRAP_S"/>
+            <enum name="GL_TEXTURE_WRAP_T"/>
+            <enum name="GL_REPEAT"/>
+            <enum name="GL_CURRENT_BIT"/>
+            <enum name="GL_POINT_BIT"/>
+            <enum name="GL_LINE_BIT"/>
+            <enum name="GL_POLYGON_BIT"/>
+            <enum name="GL_POLYGON_STIPPLE_BIT"/>
+            <enum name="GL_PIXEL_MODE_BIT"/>
+            <enum name="GL_LIGHTING_BIT"/>
+            <enum name="GL_FOG_BIT"/>
+            <enum name="GL_ACCUM_BUFFER_BIT"/>
+            <enum name="GL_VIEWPORT_BIT"/>
+            <enum name="GL_TRANSFORM_BIT"/>
+            <enum name="GL_ENABLE_BIT"/>
+            <enum name="GL_HINT_BIT"/>
+            <enum name="GL_EVAL_BIT"/>
+            <enum name="GL_LIST_BIT"/>
+            <enum name="GL_TEXTURE_BIT"/>
+            <enum name="GL_SCISSOR_BIT"/>
+            <enum name="GL_ALL_ATTRIB_BITS"/>
+            <enum name="GL_QUAD_STRIP"/>
+            <enum name="GL_POLYGON"/>
+            <enum name="GL_ACCUM"/>
+            <enum name="GL_LOAD"/>
+            <enum name="GL_RETURN"/>
+            <enum name="GL_MULT"/>
+            <enum name="GL_ADD"/>
+            <enum name="GL_AUX0"/>
+            <enum name="GL_AUX1"/>
+            <enum name="GL_AUX2"/>
+            <enum name="GL_AUX3"/>
+            <enum name="GL_2D"/>
+            <enum name="GL_3D"/>
+            <enum name="GL_3D_COLOR"/>
+            <enum name="GL_3D_COLOR_TEXTURE"/>
+            <enum name="GL_4D_COLOR_TEXTURE"/>
+            <enum name="GL_PASS_THROUGH_TOKEN"/>
+            <enum name="GL_POINT_TOKEN"/>
+            <enum name="GL_LINE_TOKEN"/>
+            <enum name="GL_POLYGON_TOKEN"/>
+            <enum name="GL_BITMAP_TOKEN"/>
+            <enum name="GL_DRAW_PIXEL_TOKEN"/>
+            <enum name="GL_COPY_PIXEL_TOKEN"/>
+            <enum name="GL_LINE_RESET_TOKEN"/>
+            <enum name="GL_EXP"/>
+            <enum name="GL_EXP2"/>
+            <enum name="GL_COEFF"/>
+            <enum name="GL_ORDER"/>
+            <enum name="GL_DOMAIN"/>
+            <enum name="GL_PIXEL_MAP_I_TO_I"/>
+            <enum name="GL_PIXEL_MAP_S_TO_S"/>
+            <enum name="GL_PIXEL_MAP_I_TO_R"/>
+            <enum name="GL_PIXEL_MAP_I_TO_G"/>
+            <enum name="GL_PIXEL_MAP_I_TO_B"/>
+            <enum name="GL_PIXEL_MAP_I_TO_A"/>
+            <enum name="GL_PIXEL_MAP_R_TO_R"/>
+            <enum name="GL_PIXEL_MAP_G_TO_G"/>
+            <enum name="GL_PIXEL_MAP_B_TO_B"/>
+            <enum name="GL_PIXEL_MAP_A_TO_A"/>
+            <enum name="GL_CURRENT_COLOR"/>
+            <enum name="GL_CURRENT_INDEX"/>
+            <enum name="GL_CURRENT_NORMAL"/>
+            <enum name="GL_CURRENT_TEXTURE_COORDS"/>
+            <enum name="GL_CURRENT_RASTER_COLOR"/>
+            <enum name="GL_CURRENT_RASTER_INDEX"/>
+            <enum name="GL_CURRENT_RASTER_TEXTURE_COORDS"/>
+            <enum name="GL_CURRENT_RASTER_POSITION"/>
+            <enum name="GL_CURRENT_RASTER_POSITION_VALID"/>
+            <enum name="GL_CURRENT_RASTER_DISTANCE"/>
+            <enum name="GL_POINT_SMOOTH"/>
+            <enum name="GL_LINE_STIPPLE"/>
+            <enum name="GL_LINE_STIPPLE_PATTERN"/>
+            <enum name="GL_LINE_STIPPLE_REPEAT"/>
+            <enum name="GL_LIST_MODE"/>
+            <enum name="GL_MAX_LIST_NESTING"/>
+            <enum name="GL_LIST_BASE"/>
+            <enum name="GL_LIST_INDEX"/>
+            <enum name="GL_POLYGON_STIPPLE"/>
+            <enum name="GL_EDGE_FLAG"/>
+            <enum name="GL_LIGHTING"/>
+            <enum name="GL_LIGHT_MODEL_LOCAL_VIEWER"/>
+            <enum name="GL_LIGHT_MODEL_TWO_SIDE"/>
+            <enum name="GL_LIGHT_MODEL_AMBIENT"/>
+            <enum name="GL_SHADE_MODEL"/>
+            <enum name="GL_COLOR_MATERIAL_FACE"/>
+            <enum name="GL_COLOR_MATERIAL_PARAMETER"/>
+            <enum name="GL_COLOR_MATERIAL"/>
+            <enum name="GL_FOG"/>
+            <enum name="GL_FOG_INDEX"/>
+            <enum name="GL_FOG_DENSITY"/>
+            <enum name="GL_FOG_START"/>
+            <enum name="GL_FOG_END"/>
+            <enum name="GL_FOG_MODE"/>
+            <enum name="GL_FOG_COLOR"/>
+            <enum name="GL_ACCUM_CLEAR_VALUE"/>
+            <enum name="GL_MATRIX_MODE"/>
+            <enum name="GL_NORMALIZE"/>
+            <enum name="GL_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_MODELVIEW_MATRIX"/>
+            <enum name="GL_PROJECTION_MATRIX"/>
+            <enum name="GL_TEXTURE_MATRIX"/>
+            <enum name="GL_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_ALPHA_TEST"/>
+            <enum name="GL_ALPHA_TEST_FUNC"/>
+            <enum name="GL_ALPHA_TEST_REF"/>
+            <enum name="GL_LOGIC_OP"/>
+            <enum name="GL_AUX_BUFFERS"/>
+            <enum name="GL_INDEX_CLEAR_VALUE"/>
+            <enum name="GL_INDEX_WRITEMASK"/>
+            <enum name="GL_INDEX_MODE"/>
+            <enum name="GL_RGBA_MODE"/>
+            <enum name="GL_RENDER_MODE"/>
+            <enum name="GL_PERSPECTIVE_CORRECTION_HINT"/>
+            <enum name="GL_POINT_SMOOTH_HINT"/>
+            <enum name="GL_FOG_HINT"/>
+            <enum name="GL_TEXTURE_GEN_S"/>
+            <enum name="GL_TEXTURE_GEN_T"/>
+            <enum name="GL_TEXTURE_GEN_R"/>
+            <enum name="GL_TEXTURE_GEN_Q"/>
+            <enum name="GL_PIXEL_MAP_I_TO_I_SIZE"/>
+            <enum name="GL_PIXEL_MAP_S_TO_S_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_R_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_G_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_B_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_A_SIZE"/>
+            <enum name="GL_PIXEL_MAP_R_TO_R_SIZE"/>
+            <enum name="GL_PIXEL_MAP_G_TO_G_SIZE"/>
+            <enum name="GL_PIXEL_MAP_B_TO_B_SIZE"/>
+            <enum name="GL_PIXEL_MAP_A_TO_A_SIZE"/>
+            <enum name="GL_MAP_COLOR"/>
+            <enum name="GL_MAP_STENCIL"/>
+            <enum name="GL_INDEX_SHIFT"/>
+            <enum name="GL_INDEX_OFFSET"/>
+            <enum name="GL_RED_SCALE"/>
+            <enum name="GL_RED_BIAS"/>
+            <enum name="GL_ZOOM_X"/>
+            <enum name="GL_ZOOM_Y"/>
+            <enum name="GL_GREEN_SCALE"/>
+            <enum name="GL_GREEN_BIAS"/>
+            <enum name="GL_BLUE_SCALE"/>
+            <enum name="GL_BLUE_BIAS"/>
+            <enum name="GL_ALPHA_SCALE"/>
+            <enum name="GL_ALPHA_BIAS"/>
+            <enum name="GL_DEPTH_SCALE"/>
+            <enum name="GL_DEPTH_BIAS"/>
+            <enum name="GL_MAX_EVAL_ORDER"/>
+            <enum name="GL_MAX_LIGHTS"/>
+            <enum name="GL_MAX_CLIP_PLANES"/>
+            <enum name="GL_MAX_PIXEL_MAP_TABLE"/>
+            <enum name="GL_MAX_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_MAX_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_MAX_NAME_STACK_DEPTH"/>
+            <enum name="GL_MAX_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_MAX_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_INDEX_BITS"/>
+            <enum name="GL_RED_BITS"/>
+            <enum name="GL_GREEN_BITS"/>
+            <enum name="GL_BLUE_BITS"/>
+            <enum name="GL_ALPHA_BITS"/>
+            <enum name="GL_DEPTH_BITS"/>
+            <enum name="GL_STENCIL_BITS"/>
+            <enum name="GL_ACCUM_RED_BITS"/>
+            <enum name="GL_ACCUM_GREEN_BITS"/>
+            <enum name="GL_ACCUM_BLUE_BITS"/>
+            <enum name="GL_ACCUM_ALPHA_BITS"/>
+            <enum name="GL_NAME_STACK_DEPTH"/>
+            <enum name="GL_AUTO_NORMAL"/>
+            <enum name="GL_MAP1_COLOR_4"/>
+            <enum name="GL_MAP1_INDEX"/>
+            <enum name="GL_MAP1_NORMAL"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP1_VERTEX_3"/>
+            <enum name="GL_MAP1_VERTEX_4"/>
+            <enum name="GL_MAP2_COLOR_4"/>
+            <enum name="GL_MAP2_INDEX"/>
+            <enum name="GL_MAP2_NORMAL"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP2_VERTEX_3"/>
+            <enum name="GL_MAP2_VERTEX_4"/>
+            <enum name="GL_MAP1_GRID_DOMAIN"/>
+            <enum name="GL_MAP1_GRID_SEGMENTS"/>
+            <enum name="GL_MAP2_GRID_DOMAIN"/>
+            <enum name="GL_MAP2_GRID_SEGMENTS"/>
+            <enum name="GL_TEXTURE_COMPONENTS"/>
+            <enum name="GL_TEXTURE_BORDER"/>
+            <enum name="GL_AMBIENT"/>
+            <enum name="GL_DIFFUSE"/>
+            <enum name="GL_SPECULAR"/>
+            <enum name="GL_POSITION"/>
+            <enum name="GL_SPOT_DIRECTION"/>
+            <enum name="GL_SPOT_EXPONENT"/>
+            <enum name="GL_SPOT_CUTOFF"/>
+            <enum name="GL_CONSTANT_ATTENUATION"/>
+            <enum name="GL_LINEAR_ATTENUATION"/>
+            <enum name="GL_QUADRATIC_ATTENUATION"/>
+            <enum name="GL_COMPILE"/>
+            <enum name="GL_COMPILE_AND_EXECUTE"/>
+            <enum name="GL_2_BYTES"/>
+            <enum name="GL_3_BYTES"/>
+            <enum name="GL_4_BYTES"/>
+            <enum name="GL_EMISSION"/>
+            <enum name="GL_SHININESS"/>
+            <enum name="GL_AMBIENT_AND_DIFFUSE"/>
+            <enum name="GL_COLOR_INDEXES"/>
+            <enum name="GL_MODELVIEW"/>
+            <enum name="GL_PROJECTION"/>
+            <enum name="GL_COLOR_INDEX"/>
+            <enum name="GL_LUMINANCE"/>
+            <enum name="GL_LUMINANCE_ALPHA"/>
+            <enum name="GL_BITMAP"/>
+            <enum name="GL_RENDER"/>
+            <enum name="GL_FEEDBACK"/>
+            <enum name="GL_SELECT"/>
+            <enum name="GL_FLAT"/>
+            <enum name="GL_SMOOTH"/>
+            <enum name="GL_S"/>
+            <enum name="GL_T"/>
+            <enum name="GL_R"/>
+            <enum name="GL_Q"/>
+            <enum name="GL_MODULATE"/>
+            <enum name="GL_DECAL"/>
+            <enum name="GL_TEXTURE_ENV_MODE"/>
+            <enum name="GL_TEXTURE_ENV_COLOR"/>
+            <enum name="GL_TEXTURE_ENV"/>
+            <enum name="GL_EYE_LINEAR"/>
+            <enum name="GL_OBJECT_LINEAR"/>
+            <enum name="GL_SPHERE_MAP"/>
+            <enum name="GL_TEXTURE_GEN_MODE"/>
+            <enum name="GL_OBJECT_PLANE"/>
+            <enum name="GL_EYE_PLANE"/>
+            <enum name="GL_CLAMP"/>
+            <enum name="GL_CLIP_PLANE0"/>
+            <enum name="GL_CLIP_PLANE1"/>
+            <enum name="GL_CLIP_PLANE2"/>
+            <enum name="GL_CLIP_PLANE3"/>
+            <enum name="GL_CLIP_PLANE4"/>
+            <enum name="GL_CLIP_PLANE5"/>
+            <enum name="GL_LIGHT0"/>
+            <enum name="GL_LIGHT1"/>
+            <enum name="GL_LIGHT2"/>
+            <enum name="GL_LIGHT3"/>
+            <enum name="GL_LIGHT4"/>
+            <enum name="GL_LIGHT5"/>
+            <enum name="GL_LIGHT6"/>
+            <enum name="GL_LIGHT7"/>
+            <command name="glCullFace"/>
+            <command name="glFrontFace"/>
+            <command name="glHint"/>
+            <command name="glLineWidth"/>
+            <command name="glPointSize"/>
+            <command name="glPolygonMode"/>
+            <command name="glScissor"/>
+            <command name="glTexParameterf"/>
+            <command name="glTexParameterfv"/>
+            <command name="glTexParameteri"/>
+            <command name="glTexParameteriv"/>
+            <command name="glTexImage1D"/>
+            <command name="glTexImage2D"/>
+            <command name="glDrawBuffer"/>
+            <command name="glClear"/>
+            <command name="glClearColor"/>
+            <command name="glClearStencil"/>
+            <command name="glClearDepth"/>
+            <command name="glStencilMask"/>
+            <command name="glColorMask"/>
+            <command name="glDepthMask"/>
+            <command name="glDisable"/>
+            <command name="glEnable"/>
+            <command name="glFinish"/>
+            <command name="glFlush"/>
+            <command name="glBlendFunc"/>
+            <command name="glLogicOp"/>
+            <command name="glStencilFunc"/>
+            <command name="glStencilOp"/>
+            <command name="glDepthFunc"/>
+            <command name="glPixelStoref"/>
+            <command name="glPixelStorei"/>
+            <command name="glReadBuffer"/>
+            <command name="glReadPixels"/>
+            <command name="glGetBooleanv"/>
+            <command name="glGetDoublev"/>
+            <command name="glGetError"/>
+            <command name="glGetFloatv"/>
+            <command name="glGetIntegerv"/>
+            <command name="glGetString"/>
+            <command name="glGetTexImage"/>
+            <command name="glGetTexParameterfv"/>
+            <command name="glGetTexParameteriv"/>
+            <command name="glGetTexLevelParameterfv"/>
+            <command name="glGetTexLevelParameteriv"/>
+            <command name="glIsEnabled"/>
+            <command name="glDepthRange"/>
+            <command name="glViewport"/>
+            <command name="glNewList"/>
+            <command name="glEndList"/>
+            <command name="glCallList"/>
+            <command name="glCallLists"/>
+            <command name="glDeleteLists"/>
+            <command name="glGenLists"/>
+            <command name="glListBase"/>
+            <command name="glBegin"/>
+            <command name="glBitmap"/>
+            <command name="glColor3b"/>
+            <command name="glColor3bv"/>
+            <command name="glColor3d"/>
+            <command name="glColor3dv"/>
+            <command name="glColor3f"/>
+            <command name="glColor3fv"/>
+            <command name="glColor3i"/>
+            <command name="glColor3iv"/>
+            <command name="glColor3s"/>
+            <command name="glColor3sv"/>
+            <command name="glColor3ub"/>
+            <command name="glColor3ubv"/>
+            <command name="glColor3ui"/>
+            <command name="glColor3uiv"/>
+            <command name="glColor3us"/>
+            <command name="glColor3usv"/>
+            <command name="glColor4b"/>
+            <command name="glColor4bv"/>
+            <command name="glColor4d"/>
+            <command name="glColor4dv"/>
+            <command name="glColor4f"/>
+            <command name="glColor4fv"/>
+            <command name="glColor4i"/>
+            <command name="glColor4iv"/>
+            <command name="glColor4s"/>
+            <command name="glColor4sv"/>
+            <command name="glColor4ub"/>
+            <command name="glColor4ubv"/>
+            <command name="glColor4ui"/>
+            <command name="glColor4uiv"/>
+            <command name="glColor4us"/>
+            <command name="glColor4usv"/>
+            <command name="glEdgeFlag"/>
+            <command name="glEdgeFlagv"/>
+            <command name="glEnd"/>
+            <command name="glIndexd"/>
+            <command name="glIndexdv"/>
+            <command name="glIndexf"/>
+            <command name="glIndexfv"/>
+            <command name="glIndexi"/>
+            <command name="glIndexiv"/>
+            <command name="glIndexs"/>
+            <command name="glIndexsv"/>
+            <command name="glNormal3b"/>
+            <command name="glNormal3bv"/>
+            <command name="glNormal3d"/>
+            <command name="glNormal3dv"/>
+            <command name="glNormal3f"/>
+            <command name="glNormal3fv"/>
+            <command name="glNormal3i"/>
+            <command name="glNormal3iv"/>
+            <command name="glNormal3s"/>
+            <command name="glNormal3sv"/>
+            <command name="glRasterPos2d"/>
+            <command name="glRasterPos2dv"/>
+            <command name="glRasterPos2f"/>
+            <command name="glRasterPos2fv"/>
+            <command name="glRasterPos2i"/>
+            <command name="glRasterPos2iv"/>
+            <command name="glRasterPos2s"/>
+            <command name="glRasterPos2sv"/>
+            <command name="glRasterPos3d"/>
+            <command name="glRasterPos3dv"/>
+            <command name="glRasterPos3f"/>
+            <command name="glRasterPos3fv"/>
+            <command name="glRasterPos3i"/>
+            <command name="glRasterPos3iv"/>
+            <command name="glRasterPos3s"/>
+            <command name="glRasterPos3sv"/>
+            <command name="glRasterPos4d"/>
+            <command name="glRasterPos4dv"/>
+            <command name="glRasterPos4f"/>
+            <command name="glRasterPos4fv"/>
+            <command name="glRasterPos4i"/>
+            <command name="glRasterPos4iv"/>
+            <command name="glRasterPos4s"/>
+            <command name="glRasterPos4sv"/>
+            <command name="glRectd"/>
+            <command name="glRectdv"/>
+            <command name="glRectf"/>
+            <command name="glRectfv"/>
+            <command name="glRecti"/>
+            <command name="glRectiv"/>
+            <command name="glRects"/>
+            <command name="glRectsv"/>
+            <command name="glTexCoord1d"/>
+            <command name="glTexCoord1dv"/>
+            <command name="glTexCoord1f"/>
+            <command name="glTexCoord1fv"/>
+            <command name="glTexCoord1i"/>
+            <command name="glTexCoord1iv"/>
+            <command name="glTexCoord1s"/>
+            <command name="glTexCoord1sv"/>
+            <command name="glTexCoord2d"/>
+            <command name="glTexCoord2dv"/>
+            <command name="glTexCoord2f"/>
+            <command name="glTexCoord2fv"/>
+            <command name="glTexCoord2i"/>
+            <command name="glTexCoord2iv"/>
+            <command name="glTexCoord2s"/>
+            <command name="glTexCoord2sv"/>
+            <command name="glTexCoord3d"/>
+            <command name="glTexCoord3dv"/>
+            <command name="glTexCoord3f"/>
+            <command name="glTexCoord3fv"/>
+            <command name="glTexCoord3i"/>
+            <command name="glTexCoord3iv"/>
+            <command name="glTexCoord3s"/>
+            <command name="glTexCoord3sv"/>
+            <command name="glTexCoord4d"/>
+            <command name="glTexCoord4dv"/>
+            <command name="glTexCoord4f"/>
+            <command name="glTexCoord4fv"/>
+            <command name="glTexCoord4i"/>
+            <command name="glTexCoord4iv"/>
+            <command name="glTexCoord4s"/>
+            <command name="glTexCoord4sv"/>
+            <command name="glVertex2d"/>
+            <command name="glVertex2dv"/>
+            <command name="glVertex2f"/>
+            <command name="glVertex2fv"/>
+            <command name="glVertex2i"/>
+            <command name="glVertex2iv"/>
+            <command name="glVertex2s"/>
+            <command name="glVertex2sv"/>
+            <command name="glVertex3d"/>
+            <command name="glVertex3dv"/>
+            <command name="glVertex3f"/>
+            <command name="glVertex3fv"/>
+            <command name="glVertex3i"/>
+            <command name="glVertex3iv"/>
+            <command name="glVertex3s"/>
+            <command name="glVertex3sv"/>
+            <command name="glVertex4d"/>
+            <command name="glVertex4dv"/>
+            <command name="glVertex4f"/>
+            <command name="glVertex4fv"/>
+            <command name="glVertex4i"/>
+            <command name="glVertex4iv"/>
+            <command name="glVertex4s"/>
+            <command name="glVertex4sv"/>
+            <command name="glClipPlane"/>
+            <command name="glColorMaterial"/>
+            <command name="glFogf"/>
+            <command name="glFogfv"/>
+            <command name="glFogi"/>
+            <command name="glFogiv"/>
+            <command name="glLightf"/>
+            <command name="glLightfv"/>
+            <command name="glLighti"/>
+            <command name="glLightiv"/>
+            <command name="glLightModelf"/>
+            <command name="glLightModelfv"/>
+            <command name="glLightModeli"/>
+            <command name="glLightModeliv"/>
+            <command name="glLineStipple"/>
+            <command name="glMaterialf"/>
+            <command name="glMaterialfv"/>
+            <command name="glMateriali"/>
+            <command name="glMaterialiv"/>
+            <command name="glPolygonStipple"/>
+            <command name="glShadeModel"/>
+            <command name="glTexEnvf"/>
+            <command name="glTexEnvfv"/>
+            <command name="glTexEnvi"/>
+            <command name="glTexEnviv"/>
+            <command name="glTexGend"/>
+            <command name="glTexGendv"/>
+            <command name="glTexGenf"/>
+            <command name="glTexGenfv"/>
+            <command name="glTexGeni"/>
+            <command name="glTexGeniv"/>
+            <command name="glFeedbackBuffer"/>
+            <command name="glSelectBuffer"/>
+            <command name="glRenderMode"/>
+            <command name="glInitNames"/>
+            <command name="glLoadName"/>
+            <command name="glPassThrough"/>
+            <command name="glPopName"/>
+            <command name="glPushName"/>
+            <command name="glClearAccum"/>
+            <command name="glClearIndex"/>
+            <command name="glIndexMask"/>
+            <command name="glAccum"/>
+            <command name="glPopAttrib"/>
+            <command name="glPushAttrib"/>
+            <command name="glMap1d"/>
+            <command name="glMap1f"/>
+            <command name="glMap2d"/>
+            <command name="glMap2f"/>
+            <command name="glMapGrid1d"/>
+            <command name="glMapGrid1f"/>
+            <command name="glMapGrid2d"/>
+            <command name="glMapGrid2f"/>
+            <command name="glEvalCoord1d"/>
+            <command name="glEvalCoord1dv"/>
+            <command name="glEvalCoord1f"/>
+            <command name="glEvalCoord1fv"/>
+            <command name="glEvalCoord2d"/>
+            <command name="glEvalCoord2dv"/>
+            <command name="glEvalCoord2f"/>
+            <command name="glEvalCoord2fv"/>
+            <command name="glEvalMesh1"/>
+            <command name="glEvalPoint1"/>
+            <command name="glEvalMesh2"/>
+            <command name="glEvalPoint2"/>
+            <command name="glAlphaFunc"/>
+            <command name="glPixelZoom"/>
+            <command name="glPixelTransferf"/>
+            <command name="glPixelTransferi"/>
+            <command name="glPixelMapfv"/>
+            <command name="glPixelMapuiv"/>
+            <command name="glPixelMapusv"/>
+            <command name="glCopyPixels"/>
+            <command name="glDrawPixels"/>
+            <command name="glGetClipPlane"/>
+            <command name="glGetLightfv"/>
+            <command name="glGetLightiv"/>
+            <command name="glGetMapdv"/>
+            <command name="glGetMapfv"/>
+            <command name="glGetMapiv"/>
+            <command name="glGetMaterialfv"/>
+            <command name="glGetMaterialiv"/>
+            <command name="glGetPixelMapfv"/>
+            <command name="glGetPixelMapuiv"/>
+            <command name="glGetPixelMapusv"/>
+            <command name="glGetPolygonStipple"/>
+            <command name="glGetTexEnvfv"/>
+            <command name="glGetTexEnviv"/>
+            <command name="glGetTexGendv"/>
+            <command name="glGetTexGenfv"/>
+            <command name="glGetTexGeniv"/>
+            <command name="glIsList"/>
+            <command name="glFrustum"/>
+            <command name="glLoadIdentity"/>
+            <command name="glLoadMatrixf"/>
+            <command name="glLoadMatrixd"/>
+            <command name="glMatrixMode"/>
+            <command name="glMultMatrixf"/>
+            <command name="glMultMatrixd"/>
+            <command name="glOrtho"/>
+            <command name="glPopMatrix"/>
+            <command name="glPushMatrix"/>
+            <command name="glRotated"/>
+            <command name="glRotatef"/>
+            <command name="glScaled"/>
+            <command name="glScalef"/>
+            <command name="glTranslated"/>
+            <command name="glTranslatef"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_1_1" number="1.1">
+        <require>
+            <type name="GLclampf" comment="No longer used in GL 1.1, but still defined in Mesa gl.h"/>
+            <type name="GLclampd" comment="No longer used in GL 1.1, but still defined in Mesa gl.h"/>
+            <enum name="GL_COLOR_LOGIC_OP"/>
+            <enum name="GL_POLYGON_OFFSET_UNITS"/>
+            <enum name="GL_POLYGON_OFFSET_POINT"/>
+            <enum name="GL_POLYGON_OFFSET_LINE"/>
+            <enum name="GL_POLYGON_OFFSET_FILL"/>
+            <enum name="GL_POLYGON_OFFSET_FACTOR"/>
+            <enum name="GL_TEXTURE_BINDING_1D"/>
+            <enum name="GL_TEXTURE_BINDING_2D"/>
+            <enum name="GL_TEXTURE_INTERNAL_FORMAT"/>
+            <enum name="GL_TEXTURE_RED_SIZE"/>
+            <enum name="GL_TEXTURE_GREEN_SIZE"/>
+            <enum name="GL_TEXTURE_BLUE_SIZE"/>
+            <enum name="GL_TEXTURE_ALPHA_SIZE"/>
+            <enum name="GL_DOUBLE"/>
+            <enum name="GL_PROXY_TEXTURE_1D"/>
+            <enum name="GL_PROXY_TEXTURE_2D"/>
+            <enum name="GL_R3_G3_B2"/>
+            <enum name="GL_RGB4"/>
+            <enum name="GL_RGB5"/>
+            <enum name="GL_RGB8"/>
+            <enum name="GL_RGB10"/>
+            <enum name="GL_RGB12"/>
+            <enum name="GL_RGB16"/>
+            <enum name="GL_RGBA2"/>
+            <enum name="GL_RGBA4"/>
+            <enum name="GL_RGB5_A1"/>
+            <enum name="GL_RGBA8"/>
+            <enum name="GL_RGB10_A2"/>
+            <enum name="GL_RGBA12"/>
+            <enum name="GL_RGBA16"/>
+            <enum name="GL_CLIENT_PIXEL_STORE_BIT"/>
+            <enum name="GL_CLIENT_VERTEX_ARRAY_BIT"/>
+            <enum name="GL_CLIENT_ALL_ATTRIB_BITS"/>
+            <enum name="GL_VERTEX_ARRAY_POINTER"/>
+            <enum name="GL_NORMAL_ARRAY_POINTER"/>
+            <enum name="GL_COLOR_ARRAY_POINTER"/>
+            <enum name="GL_INDEX_ARRAY_POINTER"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_POINTER"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_POINTER"/>
+            <enum name="GL_FEEDBACK_BUFFER_POINTER"/>
+            <enum name="GL_SELECTION_BUFFER_POINTER"/>
+            <enum name="GL_CLIENT_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_INDEX_LOGIC_OP"/>
+            <enum name="GL_MAX_CLIENT_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_FEEDBACK_BUFFER_SIZE"/>
+            <enum name="GL_FEEDBACK_BUFFER_TYPE"/>
+            <enum name="GL_SELECTION_BUFFER_SIZE"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+            <enum name="GL_NORMAL_ARRAY"/>
+            <enum name="GL_COLOR_ARRAY"/>
+            <enum name="GL_INDEX_ARRAY"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY"/>
+            <enum name="GL_EDGE_FLAG_ARRAY"/>
+            <enum name="GL_VERTEX_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_ARRAY_STRIDE"/>
+            <enum name="GL_NORMAL_ARRAY_TYPE"/>
+            <enum name="GL_NORMAL_ARRAY_STRIDE"/>
+            <enum name="GL_COLOR_ARRAY_SIZE"/>
+            <enum name="GL_COLOR_ARRAY_TYPE"/>
+            <enum name="GL_COLOR_ARRAY_STRIDE"/>
+            <enum name="GL_INDEX_ARRAY_TYPE"/>
+            <enum name="GL_INDEX_ARRAY_STRIDE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_SIZE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_TYPE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_STRIDE"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_STRIDE"/>
+            <enum name="GL_TEXTURE_LUMINANCE_SIZE"/>
+            <enum name="GL_TEXTURE_INTENSITY_SIZE"/>
+            <enum name="GL_TEXTURE_PRIORITY"/>
+            <enum name="GL_TEXTURE_RESIDENT"/>
+            <enum name="GL_ALPHA4"/>
+            <enum name="GL_ALPHA8"/>
+            <enum name="GL_ALPHA12"/>
+            <enum name="GL_ALPHA16"/>
+            <enum name="GL_LUMINANCE4"/>
+            <enum name="GL_LUMINANCE8"/>
+            <enum name="GL_LUMINANCE12"/>
+            <enum name="GL_LUMINANCE16"/>
+            <enum name="GL_LUMINANCE4_ALPHA4"/>
+            <enum name="GL_LUMINANCE6_ALPHA2"/>
+            <enum name="GL_LUMINANCE8_ALPHA8"/>
+            <enum name="GL_LUMINANCE12_ALPHA4"/>
+            <enum name="GL_LUMINANCE12_ALPHA12"/>
+            <enum name="GL_LUMINANCE16_ALPHA16"/>
+            <enum name="GL_INTENSITY"/>
+            <enum name="GL_INTENSITY4"/>
+            <enum name="GL_INTENSITY8"/>
+            <enum name="GL_INTENSITY12"/>
+            <enum name="GL_INTENSITY16"/>
+            <enum name="GL_V2F"/>
+            <enum name="GL_V3F"/>
+            <enum name="GL_C4UB_V2F"/>
+            <enum name="GL_C4UB_V3F"/>
+            <enum name="GL_C3F_V3F"/>
+            <enum name="GL_N3F_V3F"/>
+            <enum name="GL_C4F_N3F_V3F"/>
+            <enum name="GL_T2F_V3F"/>
+            <enum name="GL_T4F_V4F"/>
+            <enum name="GL_T2F_C4UB_V3F"/>
+            <enum name="GL_T2F_C3F_V3F"/>
+            <enum name="GL_T2F_N3F_V3F"/>
+            <enum name="GL_T2F_C4F_N3F_V3F"/>
+            <enum name="GL_T4F_C4F_N3F_V4F"/>
+            <command name="glDrawArrays"/>
+            <command name="glDrawElements"/>
+            <command name="glGetPointerv"/>
+            <command name="glPolygonOffset"/>
+            <command name="glCopyTexImage1D"/>
+            <command name="glCopyTexImage2D"/>
+            <command name="glCopyTexSubImage1D"/>
+            <command name="glCopyTexSubImage2D"/>
+            <command name="glTexSubImage1D"/>
+            <command name="glTexSubImage2D"/>
+            <command name="glBindTexture"/>
+            <command name="glDeleteTextures"/>
+            <command name="glGenTextures"/>
+            <command name="glIsTexture"/>
+            <command name="glArrayElement"/>
+            <command name="glColorPointer"/>
+            <command name="glDisableClientState"/>
+            <command name="glEdgeFlagPointer"/>
+            <command name="glEnableClientState"/>
+            <command name="glIndexPointer"/>
+            <command name="glInterleavedArrays"/>
+            <command name="glNormalPointer"/>
+            <command name="glTexCoordPointer"/>
+            <command name="glVertexPointer"/>
+            <command name="glAreTexturesResident"/>
+            <command name="glPrioritizeTextures"/>
+            <command name="glIndexub"/>
+            <command name="glIndexubv"/>
+            <command name="glPopClientAttrib"/>
+            <command name="glPushClientAttrib"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_1_2" number="1.2">
+        <require>
+            <enum name="GL_UNSIGNED_BYTE_3_3_2"/>
+            <enum name="GL_UNSIGNED_SHORT_4_4_4_4"/>
+            <enum name="GL_UNSIGNED_SHORT_5_5_5_1"/>
+            <enum name="GL_UNSIGNED_INT_8_8_8_8"/>
+            <enum name="GL_UNSIGNED_INT_10_10_10_2"/>
+            <enum name="GL_TEXTURE_BINDING_3D"/>
+            <enum name="GL_PACK_SKIP_IMAGES"/>
+            <enum name="GL_PACK_IMAGE_HEIGHT"/>
+            <enum name="GL_UNPACK_SKIP_IMAGES"/>
+            <enum name="GL_UNPACK_IMAGE_HEIGHT"/>
+            <enum name="GL_TEXTURE_3D"/>
+            <enum name="GL_PROXY_TEXTURE_3D"/>
+            <enum name="GL_TEXTURE_DEPTH"/>
+            <enum name="GL_TEXTURE_WRAP_R"/>
+            <enum name="GL_MAX_3D_TEXTURE_SIZE"/>
+            <enum name="GL_UNSIGNED_BYTE_2_3_3_REV"/>
+            <enum name="GL_UNSIGNED_SHORT_5_6_5"/>
+            <enum name="GL_UNSIGNED_SHORT_5_6_5_REV"/>
+            <enum name="GL_UNSIGNED_SHORT_4_4_4_4_REV"/>
+            <enum name="GL_UNSIGNED_SHORT_1_5_5_5_REV"/>
+            <enum name="GL_UNSIGNED_INT_8_8_8_8_REV"/>
+            <enum name="GL_UNSIGNED_INT_2_10_10_10_REV"/>
+            <enum name="GL_BGR"/>
+            <enum name="GL_BGRA"/>
+            <enum name="GL_MAX_ELEMENTS_VERTICES"/>
+            <enum name="GL_MAX_ELEMENTS_INDICES"/>
+            <enum name="GL_CLAMP_TO_EDGE"/>
+            <enum name="GL_TEXTURE_MIN_LOD"/>
+            <enum name="GL_TEXTURE_MAX_LOD"/>
+            <enum name="GL_TEXTURE_BASE_LEVEL"/>
+            <enum name="GL_TEXTURE_MAX_LEVEL"/>
+            <enum name="GL_SMOOTH_POINT_SIZE_RANGE"/>
+            <enum name="GL_SMOOTH_POINT_SIZE_GRANULARITY"/>
+            <enum name="GL_SMOOTH_LINE_WIDTH_RANGE"/>
+            <enum name="GL_SMOOTH_LINE_WIDTH_GRANULARITY"/>
+            <enum name="GL_ALIASED_LINE_WIDTH_RANGE"/>
+            <enum name="GL_RESCALE_NORMAL"/>
+            <enum name="GL_LIGHT_MODEL_COLOR_CONTROL"/>
+            <enum name="GL_SINGLE_COLOR"/>
+            <enum name="GL_SEPARATE_SPECULAR_COLOR"/>
+            <enum name="GL_ALIASED_POINT_SIZE_RANGE"/>
+            <command name="glDrawRangeElements"/>
+            <command name="glTexImage3D"/>
+            <command name="glTexSubImage3D"/>
+            <command name="glCopyTexSubImage3D"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_1_3" number="1.3">
+        <require>
+            <enum name="GL_TEXTURE0"/>
+            <enum name="GL_TEXTURE1"/>
+            <enum name="GL_TEXTURE2"/>
+            <enum name="GL_TEXTURE3"/>
+            <enum name="GL_TEXTURE4"/>
+            <enum name="GL_TEXTURE5"/>
+            <enum name="GL_TEXTURE6"/>
+            <enum name="GL_TEXTURE7"/>
+            <enum name="GL_TEXTURE8"/>
+            <enum name="GL_TEXTURE9"/>
+            <enum name="GL_TEXTURE10"/>
+            <enum name="GL_TEXTURE11"/>
+            <enum name="GL_TEXTURE12"/>
+            <enum name="GL_TEXTURE13"/>
+            <enum name="GL_TEXTURE14"/>
+            <enum name="GL_TEXTURE15"/>
+            <enum name="GL_TEXTURE16"/>
+            <enum name="GL_TEXTURE17"/>
+            <enum name="GL_TEXTURE18"/>
+            <enum name="GL_TEXTURE19"/>
+            <enum name="GL_TEXTURE20"/>
+            <enum name="GL_TEXTURE21"/>
+            <enum name="GL_TEXTURE22"/>
+            <enum name="GL_TEXTURE23"/>
+            <enum name="GL_TEXTURE24"/>
+            <enum name="GL_TEXTURE25"/>
+            <enum name="GL_TEXTURE26"/>
+            <enum name="GL_TEXTURE27"/>
+            <enum name="GL_TEXTURE28"/>
+            <enum name="GL_TEXTURE29"/>
+            <enum name="GL_TEXTURE30"/>
+            <enum name="GL_TEXTURE31"/>
+            <enum name="GL_ACTIVE_TEXTURE"/>
+            <enum name="GL_MULTISAMPLE"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_COVERAGE"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_ONE"/>
+            <enum name="GL_SAMPLE_COVERAGE"/>
+            <enum name="GL_SAMPLE_BUFFERS"/>
+            <enum name="GL_SAMPLES"/>
+            <enum name="GL_SAMPLE_COVERAGE_VALUE"/>
+            <enum name="GL_SAMPLE_COVERAGE_INVERT"/>
+            <enum name="GL_TEXTURE_CUBE_MAP"/>
+            <enum name="GL_TEXTURE_BINDING_CUBE_MAP"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_X"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"/>
+            <enum name="GL_PROXY_TEXTURE_CUBE_MAP"/>
+            <enum name="GL_MAX_CUBE_MAP_TEXTURE_SIZE"/>
+            <enum name="GL_COMPRESSED_RGB"/>
+            <enum name="GL_COMPRESSED_RGBA"/>
+            <enum name="GL_TEXTURE_COMPRESSION_HINT"/>
+            <enum name="GL_TEXTURE_COMPRESSED_IMAGE_SIZE"/>
+            <enum name="GL_TEXTURE_COMPRESSED"/>
+            <enum name="GL_NUM_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_CLAMP_TO_BORDER"/>
+            <enum name="GL_CLIENT_ACTIVE_TEXTURE"/>
+            <enum name="GL_MAX_TEXTURE_UNITS"/>
+            <enum name="GL_TRANSPOSE_MODELVIEW_MATRIX"/>
+            <enum name="GL_TRANSPOSE_PROJECTION_MATRIX"/>
+            <enum name="GL_TRANSPOSE_TEXTURE_MATRIX"/>
+            <enum name="GL_TRANSPOSE_COLOR_MATRIX"/>
+            <enum name="GL_MULTISAMPLE_BIT"/>
+            <enum name="GL_NORMAL_MAP"/>
+            <enum name="GL_REFLECTION_MAP"/>
+            <enum name="GL_COMPRESSED_ALPHA"/>
+            <enum name="GL_COMPRESSED_LUMINANCE"/>
+            <enum name="GL_COMPRESSED_LUMINANCE_ALPHA"/>
+            <enum name="GL_COMPRESSED_INTENSITY"/>
+            <enum name="GL_COMBINE"/>
+            <enum name="GL_COMBINE_RGB"/>
+            <enum name="GL_COMBINE_ALPHA"/>
+            <enum name="GL_SOURCE0_RGB"/>
+            <enum name="GL_SOURCE1_RGB"/>
+            <enum name="GL_SOURCE2_RGB"/>
+            <enum name="GL_SOURCE0_ALPHA"/>
+            <enum name="GL_SOURCE1_ALPHA"/>
+            <enum name="GL_SOURCE2_ALPHA"/>
+            <enum name="GL_OPERAND0_RGB"/>
+            <enum name="GL_OPERAND1_RGB"/>
+            <enum name="GL_OPERAND2_RGB"/>
+            <enum name="GL_OPERAND0_ALPHA"/>
+            <enum name="GL_OPERAND1_ALPHA"/>
+            <enum name="GL_OPERAND2_ALPHA"/>
+            <enum name="GL_RGB_SCALE"/>
+            <enum name="GL_ADD_SIGNED"/>
+            <enum name="GL_INTERPOLATE"/>
+            <enum name="GL_SUBTRACT"/>
+            <enum name="GL_CONSTANT"/>
+            <enum name="GL_PRIMARY_COLOR"/>
+            <enum name="GL_PREVIOUS"/>
+            <enum name="GL_DOT3_RGB"/>
+            <enum name="GL_DOT3_RGBA"/>
+            <command name="glActiveTexture"/>
+            <command name="glSampleCoverage"/>
+            <command name="glCompressedTexImage3D"/>
+            <command name="glCompressedTexImage2D"/>
+            <command name="glCompressedTexImage1D"/>
+            <command name="glCompressedTexSubImage3D"/>
+            <command name="glCompressedTexSubImage2D"/>
+            <command name="glCompressedTexSubImage1D"/>
+            <command name="glGetCompressedTexImage"/>
+            <command name="glClientActiveTexture"/>
+            <command name="glMultiTexCoord1d"/>
+            <command name="glMultiTexCoord1dv"/>
+            <command name="glMultiTexCoord1f"/>
+            <command name="glMultiTexCoord1fv"/>
+            <command name="glMultiTexCoord1i"/>
+            <command name="glMultiTexCoord1iv"/>
+            <command name="glMultiTexCoord1s"/>
+            <command name="glMultiTexCoord1sv"/>
+            <command name="glMultiTexCoord2d"/>
+            <command name="glMultiTexCoord2dv"/>
+            <command name="glMultiTexCoord2f"/>
+            <command name="glMultiTexCoord2fv"/>
+            <command name="glMultiTexCoord2i"/>
+            <command name="glMultiTexCoord2iv"/>
+            <command name="glMultiTexCoord2s"/>
+            <command name="glMultiTexCoord2sv"/>
+            <command name="glMultiTexCoord3d"/>
+            <command name="glMultiTexCoord3dv"/>
+            <command name="glMultiTexCoord3f"/>
+            <command name="glMultiTexCoord3fv"/>
+            <command name="glMultiTexCoord3i"/>
+            <command name="glMultiTexCoord3iv"/>
+            <command name="glMultiTexCoord3s"/>
+            <command name="glMultiTexCoord3sv"/>
+            <command name="glMultiTexCoord4d"/>
+            <command name="glMultiTexCoord4dv"/>
+            <command name="glMultiTexCoord4f"/>
+            <command name="glMultiTexCoord4fv"/>
+            <command name="glMultiTexCoord4i"/>
+            <command name="glMultiTexCoord4iv"/>
+            <command name="glMultiTexCoord4s"/>
+            <command name="glMultiTexCoord4sv"/>
+            <command name="glLoadTransposeMatrixf"/>
+            <command name="glLoadTransposeMatrixd"/>
+            <command name="glMultTransposeMatrixf"/>
+            <command name="glMultTransposeMatrixd"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_1_4" number="1.4">
+        <require>
+            <enum name="GL_BLEND_DST_RGB"/>
+            <enum name="GL_BLEND_SRC_RGB"/>
+            <enum name="GL_BLEND_DST_ALPHA"/>
+            <enum name="GL_BLEND_SRC_ALPHA"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE"/>
+            <enum name="GL_DEPTH_COMPONENT16"/>
+            <enum name="GL_DEPTH_COMPONENT24"/>
+            <enum name="GL_DEPTH_COMPONENT32"/>
+            <enum name="GL_MIRRORED_REPEAT"/>
+            <enum name="GL_MAX_TEXTURE_LOD_BIAS"/>
+            <enum name="GL_TEXTURE_LOD_BIAS"/>
+            <enum name="GL_INCR_WRAP"/>
+            <enum name="GL_DECR_WRAP"/>
+            <enum name="GL_TEXTURE_DEPTH_SIZE"/>
+            <enum name="GL_TEXTURE_COMPARE_MODE"/>
+            <enum name="GL_TEXTURE_COMPARE_FUNC"/>
+            <enum name="GL_POINT_SIZE_MIN"/>
+            <enum name="GL_POINT_SIZE_MAX"/>
+            <enum name="GL_POINT_DISTANCE_ATTENUATION"/>
+            <enum name="GL_GENERATE_MIPMAP"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT"/>
+            <enum name="GL_FOG_COORDINATE_SOURCE"/>
+            <enum name="GL_FOG_COORDINATE"/>
+            <enum name="GL_FRAGMENT_DEPTH"/>
+            <enum name="GL_CURRENT_FOG_COORDINATE"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_TYPE"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_STRIDE"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_POINTER"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY"/>
+            <enum name="GL_COLOR_SUM"/>
+            <enum name="GL_CURRENT_SECONDARY_COLOR"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_SIZE"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_TYPE"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_STRIDE"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_POINTER"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY"/>
+            <enum name="GL_TEXTURE_FILTER_CONTROL"/>
+            <enum name="GL_DEPTH_TEXTURE_MODE"/>
+            <enum name="GL_COMPARE_R_TO_TEXTURE"/>
+            <command name="glBlendFuncSeparate"/>
+            <command name="glMultiDrawArrays"/>
+            <command name="glMultiDrawElements"/>
+            <command name="glPointParameterf"/>
+            <command name="glPointParameterfv"/>
+            <command name="glPointParameteri"/>
+            <command name="glPointParameteriv"/>
+            <command name="glFogCoordf"/>
+            <command name="glFogCoordfv"/>
+            <command name="glFogCoordd"/>
+            <command name="glFogCoorddv"/>
+            <command name="glFogCoordPointer"/>
+            <command name="glSecondaryColor3b"/>
+            <command name="glSecondaryColor3bv"/>
+            <command name="glSecondaryColor3d"/>
+            <command name="glSecondaryColor3dv"/>
+            <command name="glSecondaryColor3f"/>
+            <command name="glSecondaryColor3fv"/>
+            <command name="glSecondaryColor3i"/>
+            <command name="glSecondaryColor3iv"/>
+            <command name="glSecondaryColor3s"/>
+            <command name="glSecondaryColor3sv"/>
+            <command name="glSecondaryColor3ub"/>
+            <command name="glSecondaryColor3ubv"/>
+            <command name="glSecondaryColor3ui"/>
+            <command name="glSecondaryColor3uiv"/>
+            <command name="glSecondaryColor3us"/>
+            <command name="glSecondaryColor3usv"/>
+            <command name="glSecondaryColorPointer"/>
+            <command name="glWindowPos2d"/>
+            <command name="glWindowPos2dv"/>
+            <command name="glWindowPos2f"/>
+            <command name="glWindowPos2fv"/>
+            <command name="glWindowPos2i"/>
+            <command name="glWindowPos2iv"/>
+            <command name="glWindowPos2s"/>
+            <command name="glWindowPos2sv"/>
+            <command name="glWindowPos3d"/>
+            <command name="glWindowPos3dv"/>
+            <command name="glWindowPos3f"/>
+            <command name="glWindowPos3fv"/>
+            <command name="glWindowPos3i"/>
+            <command name="glWindowPos3iv"/>
+            <command name="glWindowPos3s"/>
+            <command name="glWindowPos3sv"/>
+        </require>
+        <require comment="Promoted from ARB_imaging subset to core">
+            <enum name="GL_BLEND_COLOR"/>
+            <enum name="GL_BLEND_EQUATION"/>
+            <enum name="GL_CONSTANT_COLOR"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+            <enum name="GL_CONSTANT_ALPHA"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+            <enum name="GL_FUNC_ADD"/>
+            <enum name="GL_FUNC_REVERSE_SUBTRACT"/>
+            <enum name="GL_FUNC_SUBTRACT"/>
+            <enum name="GL_MIN"/>
+            <enum name="GL_MAX"/>
+            <command name="glBlendColor"/>
+            <command name="glBlendEquation"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_1_5" number="1.5">
+        <require>
+            <enum name="GL_BUFFER_SIZE"/>
+            <enum name="GL_BUFFER_USAGE"/>
+            <enum name="GL_QUERY_COUNTER_BITS"/>
+            <enum name="GL_CURRENT_QUERY"/>
+            <enum name="GL_QUERY_RESULT"/>
+            <enum name="GL_QUERY_RESULT_AVAILABLE"/>
+            <enum name="GL_ARRAY_BUFFER"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER"/>
+            <enum name="GL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_READ_ONLY"/>
+            <enum name="GL_WRITE_ONLY"/>
+            <enum name="GL_READ_WRITE"/>
+            <enum name="GL_BUFFER_ACCESS"/>
+            <enum name="GL_BUFFER_MAPPED"/>
+            <enum name="GL_BUFFER_MAP_POINTER"/>
+            <enum name="GL_STREAM_DRAW"/>
+            <enum name="GL_STREAM_READ"/>
+            <enum name="GL_STREAM_COPY"/>
+            <enum name="GL_STATIC_DRAW"/>
+            <enum name="GL_STATIC_READ"/>
+            <enum name="GL_STATIC_COPY"/>
+            <enum name="GL_DYNAMIC_DRAW"/>
+            <enum name="GL_DYNAMIC_READ"/>
+            <enum name="GL_DYNAMIC_COPY"/>
+            <enum name="GL_SAMPLES_PASSED"/>
+            <enum name="GL_SRC1_ALPHA"/>
+            <enum name="GL_VERTEX_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_NORMAL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_COLOR_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_INDEX_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_WEIGHT_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_FOG_COORD_SRC"/>
+            <enum name="GL_FOG_COORD"/>
+            <enum name="GL_CURRENT_FOG_COORD"/>
+            <enum name="GL_FOG_COORD_ARRAY_TYPE"/>
+            <enum name="GL_FOG_COORD_ARRAY_STRIDE"/>
+            <enum name="GL_FOG_COORD_ARRAY_POINTER"/>
+            <enum name="GL_FOG_COORD_ARRAY"/>
+            <enum name="GL_FOG_COORD_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_SRC0_RGB"/>
+            <enum name="GL_SRC1_RGB"/>
+            <enum name="GL_SRC2_RGB"/>
+            <enum name="GL_SRC0_ALPHA"/>
+            <enum name="GL_SRC2_ALPHA"/>
+            <command name="glGenQueries"/>
+            <command name="glDeleteQueries"/>
+            <command name="glIsQuery"/>
+            <command name="glBeginQuery"/>
+            <command name="glEndQuery"/>
+            <command name="glGetQueryiv"/>
+            <command name="glGetQueryObjectiv"/>
+            <command name="glGetQueryObjectuiv"/>
+            <command name="glBindBuffer"/>
+            <command name="glDeleteBuffers"/>
+            <command name="glGenBuffers"/>
+            <command name="glIsBuffer"/>
+            <command name="glBufferData"/>
+            <command name="glBufferSubData"/>
+            <command name="glGetBufferSubData"/>
+            <command name="glMapBuffer"/>
+            <command name="glUnmapBuffer"/>
+            <command name="glGetBufferParameteriv"/>
+            <command name="glGetBufferPointerv"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_2_0" number="2.0">
+        <require>
+            <enum name="GL_BLEND_EQUATION_RGB"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_ENABLED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_STRIDE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_TYPE"/>
+            <enum name="GL_CURRENT_VERTEX_ATTRIB"/>
+            <enum name="GL_VERTEX_PROGRAM_POINT_SIZE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_POINTER"/>
+            <enum name="GL_STENCIL_BACK_FUNC"/>
+            <enum name="GL_STENCIL_BACK_FAIL"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_PASS"/>
+            <enum name="GL_MAX_DRAW_BUFFERS"/>
+            <enum name="GL_DRAW_BUFFER0"/>
+            <enum name="GL_DRAW_BUFFER1"/>
+            <enum name="GL_DRAW_BUFFER2"/>
+            <enum name="GL_DRAW_BUFFER3"/>
+            <enum name="GL_DRAW_BUFFER4"/>
+            <enum name="GL_DRAW_BUFFER5"/>
+            <enum name="GL_DRAW_BUFFER6"/>
+            <enum name="GL_DRAW_BUFFER7"/>
+            <enum name="GL_DRAW_BUFFER8"/>
+            <enum name="GL_DRAW_BUFFER9"/>
+            <enum name="GL_DRAW_BUFFER10"/>
+            <enum name="GL_DRAW_BUFFER11"/>
+            <enum name="GL_DRAW_BUFFER12"/>
+            <enum name="GL_DRAW_BUFFER13"/>
+            <enum name="GL_DRAW_BUFFER14"/>
+            <enum name="GL_DRAW_BUFFER15"/>
+            <enum name="GL_BLEND_EQUATION_ALPHA"/>
+            <enum name="GL_MAX_VERTEX_ATTRIBS"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"/>
+            <enum name="GL_MAX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_FRAGMENT_SHADER"/>
+            <enum name="GL_VERTEX_SHADER"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_VARYING_FLOATS"/>
+            <enum name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_SHADER_TYPE"/>
+            <enum name="GL_FLOAT_VEC2"/>
+            <enum name="GL_FLOAT_VEC3"/>
+            <enum name="GL_FLOAT_VEC4"/>
+            <enum name="GL_INT_VEC2"/>
+            <enum name="GL_INT_VEC3"/>
+            <enum name="GL_INT_VEC4"/>
+            <enum name="GL_BOOL"/>
+            <enum name="GL_BOOL_VEC2"/>
+            <enum name="GL_BOOL_VEC3"/>
+            <enum name="GL_BOOL_VEC4"/>
+            <enum name="GL_FLOAT_MAT2"/>
+            <enum name="GL_FLOAT_MAT3"/>
+            <enum name="GL_FLOAT_MAT4"/>
+            <enum name="GL_SAMPLER_1D"/>
+            <enum name="GL_SAMPLER_2D"/>
+            <enum name="GL_SAMPLER_3D"/>
+            <enum name="GL_SAMPLER_CUBE"/>
+            <enum name="GL_SAMPLER_1D_SHADOW"/>
+            <enum name="GL_SAMPLER_2D_SHADOW"/>
+            <enum name="GL_DELETE_STATUS"/>
+            <enum name="GL_COMPILE_STATUS"/>
+            <enum name="GL_LINK_STATUS"/>
+            <enum name="GL_VALIDATE_STATUS"/>
+            <enum name="GL_INFO_LOG_LENGTH"/>
+            <enum name="GL_ATTACHED_SHADERS"/>
+            <enum name="GL_ACTIVE_UNIFORMS"/>
+            <enum name="GL_ACTIVE_UNIFORM_MAX_LENGTH"/>
+            <enum name="GL_SHADER_SOURCE_LENGTH"/>
+            <enum name="GL_ACTIVE_ATTRIBUTES"/>
+            <enum name="GL_ACTIVE_ATTRIBUTE_MAX_LENGTH"/>
+            <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT"/>
+            <enum name="GL_SHADING_LANGUAGE_VERSION"/>
+            <enum name="GL_CURRENT_PROGRAM"/>
+            <enum name="GL_POINT_SPRITE_COORD_ORIGIN"/>
+            <enum name="GL_LOWER_LEFT"/>
+            <enum name="GL_UPPER_LEFT"/>
+            <enum name="GL_STENCIL_BACK_REF"/>
+            <enum name="GL_STENCIL_BACK_VALUE_MASK"/>
+            <enum name="GL_STENCIL_BACK_WRITEMASK"/>
+            <enum name="GL_VERTEX_PROGRAM_TWO_SIDE"/>
+            <enum name="GL_POINT_SPRITE"/>
+            <enum name="GL_COORD_REPLACE"/>
+            <enum name="GL_MAX_TEXTURE_COORDS"/>
+            <command name="glBlendEquationSeparate"/>
+            <command name="glDrawBuffers"/>
+            <command name="glStencilOpSeparate"/>
+            <command name="glStencilFuncSeparate"/>
+            <command name="glStencilMaskSeparate"/>
+            <command name="glAttachShader"/>
+            <command name="glBindAttribLocation"/>
+            <command name="glCompileShader"/>
+            <command name="glCreateProgram"/>
+            <command name="glCreateShader"/>
+            <command name="glDeleteProgram"/>
+            <command name="glDeleteShader"/>
+            <command name="glDetachShader"/>
+            <command name="glDisableVertexAttribArray"/>
+            <command name="glEnableVertexAttribArray"/>
+            <command name="glGetActiveAttrib"/>
+            <command name="glGetActiveUniform"/>
+            <command name="glGetAttachedShaders"/>
+            <command name="glGetAttribLocation"/>
+            <command name="glGetProgramiv"/>
+            <command name="glGetProgramInfoLog"/>
+            <command name="glGetShaderiv"/>
+            <command name="glGetShaderInfoLog"/>
+            <command name="glGetShaderSource"/>
+            <command name="glGetUniformLocation"/>
+            <command name="glGetUniformfv"/>
+            <command name="glGetUniformiv"/>
+            <command name="glGetVertexAttribdv"/>
+            <command name="glGetVertexAttribfv"/>
+            <command name="glGetVertexAttribiv"/>
+            <command name="glGetVertexAttribPointerv"/>
+            <command name="glIsProgram"/>
+            <command name="glIsShader"/>
+            <command name="glLinkProgram"/>
+            <command name="glShaderSource"/>
+            <command name="glUseProgram"/>
+            <command name="glUniform1f"/>
+            <command name="glUniform2f"/>
+            <command name="glUniform3f"/>
+            <command name="glUniform4f"/>
+            <command name="glUniform1i"/>
+            <command name="glUniform2i"/>
+            <command name="glUniform3i"/>
+            <command name="glUniform4i"/>
+            <command name="glUniform1fv"/>
+            <command name="glUniform2fv"/>
+            <command name="glUniform3fv"/>
+            <command name="glUniform4fv"/>
+            <command name="glUniform1iv"/>
+            <command name="glUniform2iv"/>
+            <command name="glUniform3iv"/>
+            <command name="glUniform4iv"/>
+            <command name="glUniformMatrix2fv"/>
+            <command name="glUniformMatrix3fv"/>
+            <command name="glUniformMatrix4fv"/>
+            <command name="glValidateProgram"/>
+            <command name="glVertexAttrib1d"/>
+            <command name="glVertexAttrib1dv"/>
+            <command name="glVertexAttrib1f"/>
+            <command name="glVertexAttrib1fv"/>
+            <command name="glVertexAttrib1s"/>
+            <command name="glVertexAttrib1sv"/>
+            <command name="glVertexAttrib2d"/>
+            <command name="glVertexAttrib2dv"/>
+            <command name="glVertexAttrib2f"/>
+            <command name="glVertexAttrib2fv"/>
+            <command name="glVertexAttrib2s"/>
+            <command name="glVertexAttrib2sv"/>
+            <command name="glVertexAttrib3d"/>
+            <command name="glVertexAttrib3dv"/>
+            <command name="glVertexAttrib3f"/>
+            <command name="glVertexAttrib3fv"/>
+            <command name="glVertexAttrib3s"/>
+            <command name="glVertexAttrib3sv"/>
+            <command name="glVertexAttrib4Nbv"/>
+            <command name="glVertexAttrib4Niv"/>
+            <command name="glVertexAttrib4Nsv"/>
+            <command name="glVertexAttrib4Nub"/>
+            <command name="glVertexAttrib4Nubv"/>
+            <command name="glVertexAttrib4Nuiv"/>
+            <command name="glVertexAttrib4Nusv"/>
+            <command name="glVertexAttrib4bv"/>
+            <command name="glVertexAttrib4d"/>
+            <command name="glVertexAttrib4dv"/>
+            <command name="glVertexAttrib4f"/>
+            <command name="glVertexAttrib4fv"/>
+            <command name="glVertexAttrib4iv"/>
+            <command name="glVertexAttrib4s"/>
+            <command name="glVertexAttrib4sv"/>
+            <command name="glVertexAttrib4ubv"/>
+            <command name="glVertexAttrib4uiv"/>
+            <command name="glVertexAttrib4usv"/>
+            <command name="glVertexAttribPointer"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_2_1" number="2.1">
+        <require>
+            <enum name="GL_PIXEL_PACK_BUFFER"/>
+            <enum name="GL_PIXEL_UNPACK_BUFFER"/>
+            <enum name="GL_PIXEL_PACK_BUFFER_BINDING"/>
+            <enum name="GL_PIXEL_UNPACK_BUFFER_BINDING"/>
+            <enum name="GL_FLOAT_MAT2x3"/>
+            <enum name="GL_FLOAT_MAT2x4"/>
+            <enum name="GL_FLOAT_MAT3x2"/>
+            <enum name="GL_FLOAT_MAT3x4"/>
+            <enum name="GL_FLOAT_MAT4x2"/>
+            <enum name="GL_FLOAT_MAT4x3"/>
+            <enum name="GL_SRGB"/>
+            <enum name="GL_SRGB8"/>
+            <enum name="GL_SRGB_ALPHA"/>
+            <enum name="GL_SRGB8_ALPHA8"/>
+            <enum name="GL_COMPRESSED_SRGB"/>
+            <enum name="GL_COMPRESSED_SRGB_ALPHA"/>
+            <enum name="GL_CURRENT_RASTER_SECONDARY_COLOR"/>
+            <enum name="GL_SLUMINANCE_ALPHA"/>
+            <enum name="GL_SLUMINANCE8_ALPHA8"/>
+            <enum name="GL_SLUMINANCE"/>
+            <enum name="GL_SLUMINANCE8"/>
+            <enum name="GL_COMPRESSED_SLUMINANCE"/>
+            <enum name="GL_COMPRESSED_SLUMINANCE_ALPHA"/>
+            <command name="glUniformMatrix2x3fv"/>
+            <command name="glUniformMatrix3x2fv"/>
+            <command name="glUniformMatrix2x4fv"/>
+            <command name="glUniformMatrix4x2fv"/>
+            <command name="glUniformMatrix3x4fv"/>
+            <command name="glUniformMatrix4x3fv"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_3_0" number="3.0">
+        <require>
+            <enum name="GL_COMPARE_REF_TO_TEXTURE"/>
+            <enum name="GL_CLIP_DISTANCE0"/>
+            <enum name="GL_CLIP_DISTANCE1"/>
+            <enum name="GL_CLIP_DISTANCE2"/>
+            <enum name="GL_CLIP_DISTANCE3"/>
+            <enum name="GL_CLIP_DISTANCE4"/>
+            <enum name="GL_CLIP_DISTANCE5"/>
+            <enum name="GL_CLIP_DISTANCE6"/>
+            <enum name="GL_CLIP_DISTANCE7"/>
+            <enum name="GL_MAX_CLIP_DISTANCES"/>
+            <enum name="GL_MAJOR_VERSION"/>
+            <enum name="GL_MINOR_VERSION"/>
+            <enum name="GL_NUM_EXTENSIONS"/>
+            <enum name="GL_CONTEXT_FLAGS"/>
+            <enum name="GL_COMPRESSED_RED"/>
+            <enum name="GL_COMPRESSED_RG"/>
+            <enum name="GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT"/>
+            <enum name="GL_RGBA32F"/>
+            <enum name="GL_RGB32F"/>
+            <enum name="GL_RGBA16F"/>
+            <enum name="GL_RGB16F"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_INTEGER"/>
+            <enum name="GL_MAX_ARRAY_TEXTURE_LAYERS"/>
+            <enum name="GL_MIN_PROGRAM_TEXEL_OFFSET"/>
+            <enum name="GL_MAX_PROGRAM_TEXEL_OFFSET"/>
+            <enum name="GL_CLAMP_READ_COLOR"/>
+            <enum name="GL_FIXED_ONLY"/>
+            <enum name="GL_MAX_VARYING_COMPONENTS"/>
+            <enum name="GL_TEXTURE_1D_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_1D_ARRAY"/>
+            <enum name="GL_TEXTURE_2D_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_2D_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_1D_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D_ARRAY"/>
+            <enum name="GL_R11F_G11F_B10F"/>
+            <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV"/>
+            <enum name="GL_RGB9_E5"/>
+            <enum name="GL_UNSIGNED_INT_5_9_9_9_REV"/>
+            <enum name="GL_TEXTURE_SHARED_SIZE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE"/>
+            <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYINGS"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_START"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE"/>
+            <enum name="GL_PRIMITIVES_GENERATED"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN"/>
+            <enum name="GL_RASTERIZER_DISCARD"/>
+            <enum name="GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS"/>
+            <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS"/>
+            <enum name="GL_INTERLEAVED_ATTRIBS"/>
+            <enum name="GL_SEPARATE_ATTRIBS"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING"/>
+            <enum name="GL_RGBA32UI"/>
+            <enum name="GL_RGB32UI"/>
+            <enum name="GL_RGBA16UI"/>
+            <enum name="GL_RGB16UI"/>
+            <enum name="GL_RGBA8UI"/>
+            <enum name="GL_RGB8UI"/>
+            <enum name="GL_RGBA32I"/>
+            <enum name="GL_RGB32I"/>
+            <enum name="GL_RGBA16I"/>
+            <enum name="GL_RGB16I"/>
+            <enum name="GL_RGBA8I"/>
+            <enum name="GL_RGB8I"/>
+            <enum name="GL_RED_INTEGER"/>
+            <enum name="GL_GREEN_INTEGER"/>
+            <enum name="GL_BLUE_INTEGER"/>
+            <enum name="GL_RGB_INTEGER"/>
+            <enum name="GL_RGBA_INTEGER"/>
+            <enum name="GL_BGR_INTEGER"/>
+            <enum name="GL_BGRA_INTEGER"/>
+            <enum name="GL_SAMPLER_1D_ARRAY"/>
+            <enum name="GL_SAMPLER_2D_ARRAY"/>
+            <enum name="GL_SAMPLER_1D_ARRAY_SHADOW"/>
+            <enum name="GL_SAMPLER_2D_ARRAY_SHADOW"/>
+            <enum name="GL_SAMPLER_CUBE_SHADOW"/>
+            <enum name="GL_UNSIGNED_INT_VEC2"/>
+            <enum name="GL_UNSIGNED_INT_VEC3"/>
+            <enum name="GL_UNSIGNED_INT_VEC4"/>
+            <enum name="GL_INT_SAMPLER_1D"/>
+            <enum name="GL_INT_SAMPLER_2D"/>
+            <enum name="GL_INT_SAMPLER_3D"/>
+            <enum name="GL_INT_SAMPLER_CUBE"/>
+            <enum name="GL_INT_SAMPLER_1D_ARRAY"/>
+            <enum name="GL_INT_SAMPLER_2D_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_1D"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_3D"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_1D_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D_ARRAY"/>
+            <enum name="GL_QUERY_WAIT"/>
+            <enum name="GL_QUERY_NO_WAIT"/>
+            <enum name="GL_QUERY_BY_REGION_WAIT"/>
+            <enum name="GL_QUERY_BY_REGION_NO_WAIT"/>
+            <enum name="GL_BUFFER_ACCESS_FLAGS"/>
+            <enum name="GL_BUFFER_MAP_LENGTH"/>
+            <enum name="GL_BUFFER_MAP_OFFSET"/>
+            <command name="glColorMaski"/>
+            <command name="glGetBooleani_v"/>
+            <command name="glGetIntegeri_v"/>
+            <command name="glEnablei"/>
+            <command name="glDisablei"/>
+            <command name="glIsEnabledi"/>
+            <command name="glBeginTransformFeedback"/>
+            <command name="glEndTransformFeedback"/>
+            <command name="glBindBufferRange"/>
+            <command name="glBindBufferBase"/>
+            <command name="glTransformFeedbackVaryings"/>
+            <command name="glGetTransformFeedbackVarying"/>
+            <command name="glClampColor"/>
+            <command name="glBeginConditionalRender"/>
+            <command name="glEndConditionalRender"/>
+            <command name="glVertexAttribIPointer"/>
+            <command name="glGetVertexAttribIiv"/>
+            <command name="glGetVertexAttribIuiv"/>
+            <command name="glVertexAttribI1i"/>
+            <command name="glVertexAttribI2i"/>
+            <command name="glVertexAttribI3i"/>
+            <command name="glVertexAttribI4i"/>
+            <command name="glVertexAttribI1ui"/>
+            <command name="glVertexAttribI2ui"/>
+            <command name="glVertexAttribI3ui"/>
+            <command name="glVertexAttribI4ui"/>
+            <command name="glVertexAttribI1iv"/>
+            <command name="glVertexAttribI2iv"/>
+            <command name="glVertexAttribI3iv"/>
+            <command name="glVertexAttribI4iv"/>
+            <command name="glVertexAttribI1uiv"/>
+            <command name="glVertexAttribI2uiv"/>
+            <command name="glVertexAttribI3uiv"/>
+            <command name="glVertexAttribI4uiv"/>
+            <command name="glVertexAttribI4bv"/>
+            <command name="glVertexAttribI4sv"/>
+            <command name="glVertexAttribI4ubv"/>
+            <command name="glVertexAttribI4usv"/>
+            <command name="glGetUniformuiv"/>
+            <command name="glBindFragDataLocation"/>
+            <command name="glGetFragDataLocation"/>
+            <command name="glUniform1ui"/>
+            <command name="glUniform2ui"/>
+            <command name="glUniform3ui"/>
+            <command name="glUniform4ui"/>
+            <command name="glUniform1uiv"/>
+            <command name="glUniform2uiv"/>
+            <command name="glUniform3uiv"/>
+            <command name="glUniform4uiv"/>
+            <command name="glTexParameterIiv"/>
+            <command name="glTexParameterIuiv"/>
+            <command name="glGetTexParameterIiv"/>
+            <command name="glGetTexParameterIuiv"/>
+            <command name="glClearBufferiv"/>
+            <command name="glClearBufferuiv"/>
+            <command name="glClearBufferfv"/>
+            <command name="glClearBufferfi"/>
+            <command name="glGetStringi"/>
+        </require>
+        <require comment="Reuse ARB_depth_buffer_float">
+            <enum name="GL_DEPTH_COMPONENT32F"/>
+            <enum name="GL_DEPTH32F_STENCIL8"/>
+            <enum name="GL_FLOAT_32_UNSIGNED_INT_24_8_REV"/>
+        </require>
+        <require comment="Reuse ARB_framebuffer_object">
+            <enum name="GL_INVALID_FRAMEBUFFER_OPERATION"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT"/>
+            <enum name="GL_FRAMEBUFFER_UNDEFINED"/>
+            <enum name="GL_DEPTH_STENCIL_ATTACHMENT"/>
+            <enum name="GL_MAX_RENDERBUFFER_SIZE"/>
+            <enum name="GL_DEPTH_STENCIL"/>
+            <enum name="GL_UNSIGNED_INT_24_8"/>
+            <enum name="GL_DEPTH24_STENCIL8"/>
+            <enum name="GL_TEXTURE_STENCIL_SIZE"/>
+            <enum name="GL_TEXTURE_RED_TYPE"/>
+            <enum name="GL_TEXTURE_GREEN_TYPE"/>
+            <enum name="GL_TEXTURE_BLUE_TYPE"/>
+            <enum name="GL_TEXTURE_ALPHA_TYPE"/>
+            <enum name="GL_TEXTURE_DEPTH_TYPE"/>
+            <enum name="GL_UNSIGNED_NORMALIZED"/>
+            <enum name="GL_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_DRAW_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_RENDERBUFFER_BINDING"/>
+            <enum name="GL_READ_FRAMEBUFFER"/>
+            <enum name="GL_DRAW_FRAMEBUFFER"/>
+            <enum name="GL_READ_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_RENDERBUFFER_SAMPLES"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER"/>
+            <enum name="GL_FRAMEBUFFER_COMPLETE"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER"/>
+            <enum name="GL_FRAMEBUFFER_UNSUPPORTED"/>
+            <enum name="GL_MAX_COLOR_ATTACHMENTS"/>
+            <enum name="GL_COLOR_ATTACHMENT0"/>
+            <enum name="GL_COLOR_ATTACHMENT1"/>
+            <enum name="GL_COLOR_ATTACHMENT2"/>
+            <enum name="GL_COLOR_ATTACHMENT3"/>
+            <enum name="GL_COLOR_ATTACHMENT4"/>
+            <enum name="GL_COLOR_ATTACHMENT5"/>
+            <enum name="GL_COLOR_ATTACHMENT6"/>
+            <enum name="GL_COLOR_ATTACHMENT7"/>
+            <enum name="GL_COLOR_ATTACHMENT8"/>
+            <enum name="GL_COLOR_ATTACHMENT9"/>
+            <enum name="GL_COLOR_ATTACHMENT10"/>
+            <enum name="GL_COLOR_ATTACHMENT11"/>
+            <enum name="GL_COLOR_ATTACHMENT12"/>
+            <enum name="GL_COLOR_ATTACHMENT13"/>
+            <enum name="GL_COLOR_ATTACHMENT14"/>
+            <enum name="GL_COLOR_ATTACHMENT15"/>
+            <enum name="GL_COLOR_ATTACHMENT16"/>
+            <enum name="GL_COLOR_ATTACHMENT17"/>
+            <enum name="GL_COLOR_ATTACHMENT18"/>
+            <enum name="GL_COLOR_ATTACHMENT19"/>
+            <enum name="GL_COLOR_ATTACHMENT20"/>
+            <enum name="GL_COLOR_ATTACHMENT21"/>
+            <enum name="GL_COLOR_ATTACHMENT22"/>
+            <enum name="GL_COLOR_ATTACHMENT23"/>
+            <enum name="GL_COLOR_ATTACHMENT24"/>
+            <enum name="GL_COLOR_ATTACHMENT25"/>
+            <enum name="GL_COLOR_ATTACHMENT26"/>
+            <enum name="GL_COLOR_ATTACHMENT27"/>
+            <enum name="GL_COLOR_ATTACHMENT28"/>
+            <enum name="GL_COLOR_ATTACHMENT29"/>
+            <enum name="GL_COLOR_ATTACHMENT30"/>
+            <enum name="GL_COLOR_ATTACHMENT31"/>
+            <enum name="GL_DEPTH_ATTACHMENT"/>
+            <enum name="GL_STENCIL_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER"/>
+            <enum name="GL_RENDERBUFFER"/>
+            <enum name="GL_RENDERBUFFER_WIDTH"/>
+            <enum name="GL_RENDERBUFFER_HEIGHT"/>
+            <enum name="GL_RENDERBUFFER_INTERNAL_FORMAT"/>
+            <enum name="GL_STENCIL_INDEX1"/>
+            <enum name="GL_STENCIL_INDEX4"/>
+            <enum name="GL_STENCIL_INDEX8"/>
+            <enum name="GL_STENCIL_INDEX16"/>
+            <enum name="GL_RENDERBUFFER_RED_SIZE"/>
+            <enum name="GL_RENDERBUFFER_GREEN_SIZE"/>
+            <enum name="GL_RENDERBUFFER_BLUE_SIZE"/>
+            <enum name="GL_RENDERBUFFER_ALPHA_SIZE"/>
+            <enum name="GL_RENDERBUFFER_DEPTH_SIZE"/>
+            <enum name="GL_RENDERBUFFER_STENCIL_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE"/>
+            <enum name="GL_MAX_SAMPLES"/>
+            <enum name="GL_INDEX"/>
+            <command name="glIsRenderbuffer"/>
+            <command name="glBindRenderbuffer"/>
+            <command name="glDeleteRenderbuffers"/>
+            <command name="glGenRenderbuffers"/>
+            <command name="glRenderbufferStorage"/>
+            <command name="glGetRenderbufferParameteriv"/>
+            <command name="glIsFramebuffer"/>
+            <command name="glBindFramebuffer"/>
+            <command name="glDeleteFramebuffers"/>
+            <command name="glGenFramebuffers"/>
+            <command name="glCheckFramebufferStatus"/>
+            <command name="glFramebufferTexture1D"/>
+            <command name="glFramebufferTexture2D"/>
+            <command name="glFramebufferTexture3D"/>
+            <command name="glFramebufferRenderbuffer"/>
+            <command name="glGetFramebufferAttachmentParameteriv"/>
+            <command name="glGenerateMipmap"/>
+            <command name="glBlitFramebuffer"/>
+            <command name="glRenderbufferStorageMultisample"/>
+            <command name="glFramebufferTextureLayer"/>
+        </require>
+        <require comment="Reuse ARB_texture_float">
+            <enum name="GL_TEXTURE_LUMINANCE_TYPE"/>
+            <enum name="GL_TEXTURE_INTENSITY_TYPE"/>
+        </require>
+        <require comment="Reuse ARB_framebuffer_sRGB">
+            <enum name="GL_FRAMEBUFFER_SRGB"/>
+        </require>
+        <require comment="Reuse ARB_half_float_vertex">
+            <type name="GLhalf"/>
+            <enum name="GL_HALF_FLOAT"/>
+        </require>
+        <require comment="Reuse ARB_map_buffer_range">
+            <enum name="GL_MAP_READ_BIT"/>
+            <enum name="GL_MAP_WRITE_BIT"/>
+            <enum name="GL_MAP_INVALIDATE_RANGE_BIT"/>
+            <enum name="GL_MAP_INVALIDATE_BUFFER_BIT"/>
+            <enum name="GL_MAP_FLUSH_EXPLICIT_BIT"/>
+            <enum name="GL_MAP_UNSYNCHRONIZED_BIT"/>
+            <command name="glMapBufferRange"/>
+            <command name="glFlushMappedBufferRange"/>
+        </require>
+        <require comment="Reuse ARB_texture_compression_rgtc">
+            <enum name="GL_COMPRESSED_RED_RGTC1"/>
+            <enum name="GL_COMPRESSED_SIGNED_RED_RGTC1"/>
+            <enum name="GL_COMPRESSED_RG_RGTC2"/>
+            <enum name="GL_COMPRESSED_SIGNED_RG_RGTC2"/>
+        </require>
+        <require comment="Reuse ARB_texture_rg">
+            <enum name="GL_RG"/>
+            <enum name="GL_RG_INTEGER"/>
+            <enum name="GL_R8"/>
+            <enum name="GL_R16"/>
+            <enum name="GL_RG8"/>
+            <enum name="GL_RG16"/>
+            <enum name="GL_R16F"/>
+            <enum name="GL_R32F"/>
+            <enum name="GL_RG16F"/>
+            <enum name="GL_RG32F"/>
+            <enum name="GL_R8I"/>
+            <enum name="GL_R8UI"/>
+            <enum name="GL_R16I"/>
+            <enum name="GL_R16UI"/>
+            <enum name="GL_R32I"/>
+            <enum name="GL_R32UI"/>
+            <enum name="GL_RG8I"/>
+            <enum name="GL_RG8UI"/>
+            <enum name="GL_RG16I"/>
+            <enum name="GL_RG16UI"/>
+            <enum name="GL_RG32I"/>
+            <enum name="GL_RG32UI"/>
+        </require>
+        <require comment="Reuse ARB_vertex_array_object">
+            <enum name="GL_VERTEX_ARRAY_BINDING"/>
+            <enum name="GL_CLAMP_VERTEX_COLOR"/>
+            <enum name="GL_CLAMP_FRAGMENT_COLOR"/>
+            <enum name="GL_ALPHA_INTEGER"/>
+            <command name="glBindVertexArray"/>
+            <command name="glDeleteVertexArrays"/>
+            <command name="glGenVertexArrays"/>
+            <command name="glIsVertexArray"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_3_1" number="3.1">
+        <require>
+            <enum name="GL_SAMPLER_2D_RECT"/>
+            <enum name="GL_SAMPLER_2D_RECT_SHADOW"/>
+            <enum name="GL_SAMPLER_BUFFER"/>
+            <enum name="GL_INT_SAMPLER_2D_RECT"/>
+            <enum name="GL_INT_SAMPLER_BUFFER"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D_RECT"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_BUFFER"/>
+            <enum name="GL_TEXTURE_BUFFER"/>
+            <enum name="GL_MAX_TEXTURE_BUFFER_SIZE"/>
+            <enum name="GL_TEXTURE_BINDING_BUFFER"/>
+            <enum name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING"/>
+            <enum name="GL_TEXTURE_RECTANGLE"/>
+            <enum name="GL_TEXTURE_BINDING_RECTANGLE"/>
+            <enum name="GL_PROXY_TEXTURE_RECTANGLE"/>
+            <enum name="GL_MAX_RECTANGLE_TEXTURE_SIZE"/>
+            <enum name="GL_R8_SNORM"/>
+            <enum name="GL_RG8_SNORM"/>
+            <enum name="GL_RGB8_SNORM"/>
+            <enum name="GL_RGBA8_SNORM"/>
+            <enum name="GL_R16_SNORM"/>
+            <enum name="GL_RG16_SNORM"/>
+            <enum name="GL_RGB16_SNORM"/>
+            <enum name="GL_RGBA16_SNORM"/>
+            <enum name="GL_SIGNED_NORMALIZED"/>
+            <enum name="GL_PRIMITIVE_RESTART"/>
+            <enum name="GL_PRIMITIVE_RESTART_INDEX"/>
+            <command name="glDrawArraysInstanced"/>
+            <command name="glDrawElementsInstanced"/>
+            <command name="glTexBuffer"/>
+            <command name="glPrimitiveRestartIndex"/>
+        </require>
+        <require comment="Reuse ARB_copy_buffer">
+            <enum name="GL_COPY_READ_BUFFER"/>
+            <enum name="GL_COPY_WRITE_BUFFER"/>
+            <command name="glCopyBufferSubData"/>
+        </require>
+        <require comment="Reuse ARB_uniform_buffer_object">
+            <enum name="GL_UNIFORM_BUFFER"/>
+            <enum name="GL_UNIFORM_BUFFER_BINDING"/>
+            <enum name="GL_UNIFORM_BUFFER_START"/>
+            <enum name="GL_UNIFORM_BUFFER_SIZE"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_COMBINED_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_UNIFORM_BUFFER_BINDINGS"/>
+            <enum name="GL_MAX_UNIFORM_BLOCK_SIZE"/>
+            <enum name="GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS"/>
+            <enum name="GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH"/>
+            <enum name="GL_ACTIVE_UNIFORM_BLOCKS"/>
+            <enum name="GL_UNIFORM_TYPE"/>
+            <enum name="GL_UNIFORM_SIZE"/>
+            <enum name="GL_UNIFORM_NAME_LENGTH"/>
+            <enum name="GL_UNIFORM_BLOCK_INDEX"/>
+            <enum name="GL_UNIFORM_OFFSET"/>
+            <enum name="GL_UNIFORM_ARRAY_STRIDE"/>
+            <enum name="GL_UNIFORM_MATRIX_STRIDE"/>
+            <enum name="GL_UNIFORM_IS_ROW_MAJOR"/>
+            <enum name="GL_UNIFORM_BLOCK_BINDING"/>
+            <enum name="GL_UNIFORM_BLOCK_DATA_SIZE"/>
+            <enum name="GL_UNIFORM_BLOCK_NAME_LENGTH"/>
+            <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS"/>
+            <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <enum name="GL_INVALID_INDEX"/>
+            <command name="glGetUniformIndices"/>
+            <command name="glGetActiveUniformsiv"/>
+            <command name="glGetActiveUniformName"/>
+            <command name="glGetUniformBlockIndex"/>
+            <command name="glGetActiveUniformBlockiv"/>
+            <command name="glGetActiveUniformBlockName"/>
+            <command name="glUniformBlockBinding"/>
+            <command name="glBindBufferRange"/>
+            <command name="glBindBufferBase"/>
+            <command name="glGetIntegeri_v"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_3_2" number="3.2">
+        <require>
+            <enum name="GL_CONTEXT_CORE_PROFILE_BIT"/>
+            <enum name="GL_CONTEXT_COMPATIBILITY_PROFILE_BIT"/>
+            <enum name="GL_LINES_ADJACENCY"/>
+            <enum name="GL_LINE_STRIP_ADJACENCY"/>
+            <enum name="GL_TRIANGLES_ADJACENCY"/>
+            <enum name="GL_TRIANGLE_STRIP_ADJACENCY"/>
+            <enum name="GL_PROGRAM_POINT_SIZE"/>
+            <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS"/>
+            <enum name="GL_GEOMETRY_SHADER"/>
+            <enum name="GL_GEOMETRY_VERTICES_OUT"/>
+            <enum name="GL_GEOMETRY_INPUT_TYPE"/>
+            <enum name="GL_GEOMETRY_OUTPUT_TYPE"/>
+            <enum name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_OUTPUT_VERTICES"/>
+            <enum name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_VERTEX_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_FRAGMENT_INPUT_COMPONENTS"/>
+            <enum name="GL_CONTEXT_PROFILE_MASK"/>
+        </require>
+        <require comment="Reuse ARB_depth_clamp">
+            <enum name="GL_DEPTH_CLAMP"/>
+        </require>
+        <require comment="Reuse ARB_draw_elements_base_vertex">
+            <command name="glDrawElementsBaseVertex"/>
+            <command name="glDrawRangeElementsBaseVertex"/>
+            <command name="glDrawElementsInstancedBaseVertex"/>
+            <command name="glMultiDrawElementsBaseVertex"/>
+        </require>
+        <require comment="Reuse ARB_fragment_coord_conventions (none)">
+        </require>
+        <require comment="Reuse ARB_provoking_vertex">
+            <enum name="GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION"/>
+            <enum name="GL_FIRST_VERTEX_CONVENTION"/>
+            <enum name="GL_LAST_VERTEX_CONVENTION"/>
+            <enum name="GL_PROVOKING_VERTEX"/>
+            <command name="glProvokingVertex"/>
+        </require>
+        <require comment="Reuse ARB_seamless_cube_map">
+            <enum name="GL_TEXTURE_CUBE_MAP_SEAMLESS"/>
+        </require>
+        <require comment="Reuse ARB_sync">
+            <enum name="GL_MAX_SERVER_WAIT_TIMEOUT"/>
+            <enum name="GL_OBJECT_TYPE"/>
+            <enum name="GL_SYNC_CONDITION"/>
+            <enum name="GL_SYNC_STATUS"/>
+            <enum name="GL_SYNC_FLAGS"/>
+            <enum name="GL_SYNC_FENCE"/>
+            <enum name="GL_SYNC_GPU_COMMANDS_COMPLETE"/>
+            <enum name="GL_UNSIGNALED"/>
+            <enum name="GL_SIGNALED"/>
+            <enum name="GL_ALREADY_SIGNALED"/>
+            <enum name="GL_TIMEOUT_EXPIRED"/>
+            <enum name="GL_CONDITION_SATISFIED"/>
+            <enum name="GL_WAIT_FAILED"/>
+            <enum name="GL_TIMEOUT_IGNORED"/>
+            <enum name="GL_SYNC_FLUSH_COMMANDS_BIT"/>
+            <command name="glFenceSync"/>
+            <command name="glIsSync"/>
+            <command name="glDeleteSync"/>
+            <command name="glClientWaitSync"/>
+            <command name="glWaitSync"/>
+            <command name="glGetInteger64v"/>
+            <command name="glGetSynciv"/>
+        </require>
+        <require comment="Reuse ARB_texture_multisample">
+            <enum name="GL_SAMPLE_POSITION"/>
+            <enum name="GL_SAMPLE_MASK"/>
+            <enum name="GL_SAMPLE_MASK_VALUE"/>
+            <enum name="GL_MAX_SAMPLE_MASK_WORDS"/>
+            <enum name="GL_TEXTURE_2D_MULTISAMPLE"/>
+            <enum name="GL_PROXY_TEXTURE_2D_MULTISAMPLE"/>
+            <enum name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_TEXTURE_SAMPLES"/>
+            <enum name="GL_TEXTURE_FIXED_SAMPLE_LOCATIONS"/>
+            <enum name="GL_SAMPLER_2D_MULTISAMPLE"/>
+            <enum name="GL_INT_SAMPLER_2D_MULTISAMPLE"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE"/>
+            <enum name="GL_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_MAX_COLOR_TEXTURE_SAMPLES"/>
+            <enum name="GL_MAX_DEPTH_TEXTURE_SAMPLES"/>
+            <enum name="GL_MAX_INTEGER_SAMPLES"/>
+            <!-- /* Don't need to reuse tokens from ARB_vertex_array_bgra since they're already in 1.2 core */ -->
+            <command name="glGetInteger64i_v"/>
+            <command name="glGetBufferParameteri64v"/>
+            <command name="glFramebufferTexture"/>
+            <command name="glTexImage2DMultisample"/>
+            <command name="glTexImage3DMultisample"/>
+            <command name="glGetMultisamplefv"/>
+            <command name="glSampleMaski"/>
+        </require>
+        <!-- OpenGL 3.2 is where core and compatibility profiles were first
+             introduced, so many, many things were removed from the core
+             profile in this version. A few were reintroduced later (e.g.
+             GetPointerv / STACK_{UNDER,OVER}FLOW by OpenGL 4.3 for debug
+             functionality). -->
+        <remove profile="core" comment="Compatibility-only GL 1.0 features removed from GL 3.2">
+            <command name="glNewList"/>
+            <command name="glEndList"/>
+            <command name="glCallList"/>
+            <command name="glCallLists"/>
+            <command name="glDeleteLists"/>
+            <command name="glGenLists"/>
+            <command name="glListBase"/>
+            <command name="glBegin"/>
+            <command name="glBitmap"/>
+            <command name="glColor3b"/>
+            <command name="glColor3bv"/>
+            <command name="glColor3d"/>
+            <command name="glColor3dv"/>
+            <command name="glColor3f"/>
+            <command name="glColor3fv"/>
+            <command name="glColor3i"/>
+            <command name="glColor3iv"/>
+            <command name="glColor3s"/>
+            <command name="glColor3sv"/>
+            <command name="glColor3ub"/>
+            <command name="glColor3ubv"/>
+            <command name="glColor3ui"/>
+            <command name="glColor3uiv"/>
+            <command name="glColor3us"/>
+            <command name="glColor3usv"/>
+            <command name="glColor4b"/>
+            <command name="glColor4bv"/>
+            <command name="glColor4d"/>
+            <command name="glColor4dv"/>
+            <command name="glColor4f"/>
+            <command name="glColor4fv"/>
+            <command name="glColor4i"/>
+            <command name="glColor4iv"/>
+            <command name="glColor4s"/>
+            <command name="glColor4sv"/>
+            <command name="glColor4ub"/>
+            <command name="glColor4ubv"/>
+            <command name="glColor4ui"/>
+            <command name="glColor4uiv"/>
+            <command name="glColor4us"/>
+            <command name="glColor4usv"/>
+            <command name="glEdgeFlag"/>
+            <command name="glEdgeFlagv"/>
+            <command name="glEnd"/>
+            <command name="glIndexd"/>
+            <command name="glIndexdv"/>
+            <command name="glIndexf"/>
+            <command name="glIndexfv"/>
+            <command name="glIndexi"/>
+            <command name="glIndexiv"/>
+            <command name="glIndexs"/>
+            <command name="glIndexsv"/>
+            <command name="glNormal3b"/>
+            <command name="glNormal3bv"/>
+            <command name="glNormal3d"/>
+            <command name="glNormal3dv"/>
+            <command name="glNormal3f"/>
+            <command name="glNormal3fv"/>
+            <command name="glNormal3i"/>
+            <command name="glNormal3iv"/>
+            <command name="glNormal3s"/>
+            <command name="glNormal3sv"/>
+            <command name="glRasterPos2d"/>
+            <command name="glRasterPos2dv"/>
+            <command name="glRasterPos2f"/>
+            <command name="glRasterPos2fv"/>
+            <command name="glRasterPos2i"/>
+            <command name="glRasterPos2iv"/>
+            <command name="glRasterPos2s"/>
+            <command name="glRasterPos2sv"/>
+            <command name="glRasterPos3d"/>
+            <command name="glRasterPos3dv"/>
+            <command name="glRasterPos3f"/>
+            <command name="glRasterPos3fv"/>
+            <command name="glRasterPos3i"/>
+            <command name="glRasterPos3iv"/>
+            <command name="glRasterPos3s"/>
+            <command name="glRasterPos3sv"/>
+            <command name="glRasterPos4d"/>
+            <command name="glRasterPos4dv"/>
+            <command name="glRasterPos4f"/>
+            <command name="glRasterPos4fv"/>
+            <command name="glRasterPos4i"/>
+            <command name="glRasterPos4iv"/>
+            <command name="glRasterPos4s"/>
+            <command name="glRasterPos4sv"/>
+            <command name="glRectd"/>
+            <command name="glRectdv"/>
+            <command name="glRectf"/>
+            <command name="glRectfv"/>
+            <command name="glRecti"/>
+            <command name="glRectiv"/>
+            <command name="glRects"/>
+            <command name="glRectsv"/>
+            <command name="glTexCoord1d"/>
+            <command name="glTexCoord1dv"/>
+            <command name="glTexCoord1f"/>
+            <command name="glTexCoord1fv"/>
+            <command name="glTexCoord1i"/>
+            <command name="glTexCoord1iv"/>
+            <command name="glTexCoord1s"/>
+            <command name="glTexCoord1sv"/>
+            <command name="glTexCoord2d"/>
+            <command name="glTexCoord2dv"/>
+            <command name="glTexCoord2f"/>
+            <command name="glTexCoord2fv"/>
+            <command name="glTexCoord2i"/>
+            <command name="glTexCoord2iv"/>
+            <command name="glTexCoord2s"/>
+            <command name="glTexCoord2sv"/>
+            <command name="glTexCoord3d"/>
+            <command name="glTexCoord3dv"/>
+            <command name="glTexCoord3f"/>
+            <command name="glTexCoord3fv"/>
+            <command name="glTexCoord3i"/>
+            <command name="glTexCoord3iv"/>
+            <command name="glTexCoord3s"/>
+            <command name="glTexCoord3sv"/>
+            <command name="glTexCoord4d"/>
+            <command name="glTexCoord4dv"/>
+            <command name="glTexCoord4f"/>
+            <command name="glTexCoord4fv"/>
+            <command name="glTexCoord4i"/>
+            <command name="glTexCoord4iv"/>
+            <command name="glTexCoord4s"/>
+            <command name="glTexCoord4sv"/>
+            <command name="glVertex2d"/>
+            <command name="glVertex2dv"/>
+            <command name="glVertex2f"/>
+            <command name="glVertex2fv"/>
+            <command name="glVertex2i"/>
+            <command name="glVertex2iv"/>
+            <command name="glVertex2s"/>
+            <command name="glVertex2sv"/>
+            <command name="glVertex3d"/>
+            <command name="glVertex3dv"/>
+            <command name="glVertex3f"/>
+            <command name="glVertex3fv"/>
+            <command name="glVertex3i"/>
+            <command name="glVertex3iv"/>
+            <command name="glVertex3s"/>
+            <command name="glVertex3sv"/>
+            <command name="glVertex4d"/>
+            <command name="glVertex4dv"/>
+            <command name="glVertex4f"/>
+            <command name="glVertex4fv"/>
+            <command name="glVertex4i"/>
+            <command name="glVertex4iv"/>
+            <command name="glVertex4s"/>
+            <command name="glVertex4sv"/>
+            <command name="glClipPlane"/>
+            <command name="glColorMaterial"/>
+            <command name="glFogf"/>
+            <command name="glFogfv"/>
+            <command name="glFogi"/>
+            <command name="glFogiv"/>
+            <command name="glLightf"/>
+            <command name="glLightfv"/>
+            <command name="glLighti"/>
+            <command name="glLightiv"/>
+            <command name="glLightModelf"/>
+            <command name="glLightModelfv"/>
+            <command name="glLightModeli"/>
+            <command name="glLightModeliv"/>
+            <command name="glLineStipple"/>
+            <command name="glMaterialf"/>
+            <command name="glMaterialfv"/>
+            <command name="glMateriali"/>
+            <command name="glMaterialiv"/>
+            <command name="glPolygonStipple"/>
+            <command name="glShadeModel"/>
+            <command name="glTexEnvf"/>
+            <command name="glTexEnvfv"/>
+            <command name="glTexEnvi"/>
+            <command name="glTexEnviv"/>
+            <command name="glTexGend"/>
+            <command name="glTexGendv"/>
+            <command name="glTexGenf"/>
+            <command name="glTexGenfv"/>
+            <command name="glTexGeni"/>
+            <command name="glTexGeniv"/>
+            <command name="glFeedbackBuffer"/>
+            <command name="glSelectBuffer"/>
+            <command name="glRenderMode"/>
+            <command name="glInitNames"/>
+            <command name="glLoadName"/>
+            <command name="glPassThrough"/>
+            <command name="glPopName"/>
+            <command name="glPushName"/>
+            <command name="glClearAccum"/>
+            <command name="glClearIndex"/>
+            <command name="glIndexMask"/>
+            <command name="glAccum"/>
+            <command name="glPopAttrib"/>
+            <command name="glPushAttrib"/>
+            <command name="glMap1d"/>
+            <command name="glMap1f"/>
+            <command name="glMap2d"/>
+            <command name="glMap2f"/>
+            <command name="glMapGrid1d"/>
+            <command name="glMapGrid1f"/>
+            <command name="glMapGrid2d"/>
+            <command name="glMapGrid2f"/>
+            <command name="glEvalCoord1d"/>
+            <command name="glEvalCoord1dv"/>
+            <command name="glEvalCoord1f"/>
+            <command name="glEvalCoord1fv"/>
+            <command name="glEvalCoord2d"/>
+            <command name="glEvalCoord2dv"/>
+            <command name="glEvalCoord2f"/>
+            <command name="glEvalCoord2fv"/>
+            <command name="glEvalMesh1"/>
+            <command name="glEvalPoint1"/>
+            <command name="glEvalMesh2"/>
+            <command name="glEvalPoint2"/>
+            <command name="glAlphaFunc"/>
+            <command name="glPixelZoom"/>
+            <command name="glPixelTransferf"/>
+            <command name="glPixelTransferi"/>
+            <command name="glPixelMapfv"/>
+            <command name="glPixelMapuiv"/>
+            <command name="glPixelMapusv"/>
+            <command name="glCopyPixels"/>
+            <command name="glDrawPixels"/>
+            <command name="glGetClipPlane"/>
+            <command name="glGetLightfv"/>
+            <command name="glGetLightiv"/>
+            <command name="glGetMapdv"/>
+            <command name="glGetMapfv"/>
+            <command name="glGetMapiv"/>
+            <command name="glGetMaterialfv"/>
+            <command name="glGetMaterialiv"/>
+            <command name="glGetPixelMapfv"/>
+            <command name="glGetPixelMapuiv"/>
+            <command name="glGetPixelMapusv"/>
+            <command name="glGetPolygonStipple"/>
+            <command name="glGetTexEnvfv"/>
+            <command name="glGetTexEnviv"/>
+            <command name="glGetTexGendv"/>
+            <command name="glGetTexGenfv"/>
+            <command name="glGetTexGeniv"/>
+            <command name="glIsList"/>
+            <command name="glFrustum"/>
+            <command name="glLoadIdentity"/>
+            <command name="glLoadMatrixf"/>
+            <command name="glLoadMatrixd"/>
+            <command name="glMatrixMode"/>
+            <command name="glMultMatrixf"/>
+            <command name="glMultMatrixd"/>
+            <command name="glOrtho"/>
+            <command name="glPopMatrix"/>
+            <command name="glPushMatrix"/>
+            <command name="glRotated"/>
+            <command name="glRotatef"/>
+            <command name="glScaled"/>
+            <command name="glScalef"/>
+            <command name="glTranslated"/>
+            <command name="glTranslatef"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 1.1 features removed from GL 3.2">
+            <enum name="GL_CURRENT_BIT"/>
+            <enum name="GL_POINT_BIT"/>
+            <enum name="GL_LINE_BIT"/>
+            <enum name="GL_POLYGON_BIT"/>
+            <enum name="GL_POLYGON_STIPPLE_BIT"/>
+            <enum name="GL_PIXEL_MODE_BIT"/>
+            <enum name="GL_LIGHTING_BIT"/>
+            <enum name="GL_FOG_BIT"/>
+            <enum name="GL_ACCUM_BUFFER_BIT"/>
+            <enum name="GL_VIEWPORT_BIT"/>
+            <enum name="GL_TRANSFORM_BIT"/>
+            <enum name="GL_ENABLE_BIT"/>
+            <enum name="GL_HINT_BIT"/>
+            <enum name="GL_EVAL_BIT"/>
+            <enum name="GL_LIST_BIT"/>
+            <enum name="GL_TEXTURE_BIT"/>
+            <enum name="GL_SCISSOR_BIT"/>
+            <enum name="GL_ALL_ATTRIB_BITS"/>
+            <enum name="GL_CLIENT_PIXEL_STORE_BIT"/>
+            <enum name="GL_CLIENT_VERTEX_ARRAY_BIT"/>
+            <enum name="GL_CLIENT_ALL_ATTRIB_BITS"/>
+            <enum name="GL_QUAD_STRIP"/>
+            <enum name="GL_QUADS"/>
+            <enum name="GL_POLYGON"/>
+            <enum name="GL_ACCUM"/>
+            <enum name="GL_LOAD"/>
+            <enum name="GL_RETURN"/>
+            <enum name="GL_MULT"/>
+            <enum name="GL_ADD"/>
+            <enum name="GL_STACK_OVERFLOW"/>
+            <enum name="GL_STACK_UNDERFLOW"/>
+            <enum name="GL_AUX0"/>
+            <enum name="GL_AUX1"/>
+            <enum name="GL_AUX2"/>
+            <enum name="GL_AUX3"/>
+            <enum name="GL_2D"/>
+            <enum name="GL_3D"/>
+            <enum name="GL_3D_COLOR"/>
+            <enum name="GL_3D_COLOR_TEXTURE"/>
+            <enum name="GL_4D_COLOR_TEXTURE"/>
+            <enum name="GL_PASS_THROUGH_TOKEN"/>
+            <enum name="GL_POINT_TOKEN"/>
+            <enum name="GL_LINE_TOKEN"/>
+            <enum name="GL_POLYGON_TOKEN"/>
+            <enum name="GL_BITMAP_TOKEN"/>
+            <enum name="GL_DRAW_PIXEL_TOKEN"/>
+            <enum name="GL_COPY_PIXEL_TOKEN"/>
+            <enum name="GL_LINE_RESET_TOKEN"/>
+            <enum name="GL_EXP"/>
+            <enum name="GL_EXP2"/>
+            <enum name="GL_COEFF"/>
+            <enum name="GL_ORDER"/>
+            <enum name="GL_DOMAIN"/>
+            <enum name="GL_PIXEL_MAP_I_TO_I"/>
+            <enum name="GL_PIXEL_MAP_S_TO_S"/>
+            <enum name="GL_PIXEL_MAP_I_TO_R"/>
+            <enum name="GL_PIXEL_MAP_I_TO_G"/>
+            <enum name="GL_PIXEL_MAP_I_TO_B"/>
+            <enum name="GL_PIXEL_MAP_I_TO_A"/>
+            <enum name="GL_PIXEL_MAP_R_TO_R"/>
+            <enum name="GL_PIXEL_MAP_G_TO_G"/>
+            <enum name="GL_PIXEL_MAP_B_TO_B"/>
+            <enum name="GL_PIXEL_MAP_A_TO_A"/>
+            <enum name="GL_VERTEX_ARRAY_POINTER"/>
+            <enum name="GL_NORMAL_ARRAY_POINTER"/>
+            <enum name="GL_COLOR_ARRAY_POINTER"/>
+            <enum name="GL_INDEX_ARRAY_POINTER"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_POINTER"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_POINTER"/>
+            <enum name="GL_FEEDBACK_BUFFER_POINTER"/>
+            <enum name="GL_SELECTION_BUFFER_POINTER"/>
+            <enum name="GL_CURRENT_COLOR"/>
+            <enum name="GL_CURRENT_INDEX"/>
+            <enum name="GL_CURRENT_NORMAL"/>
+            <enum name="GL_CURRENT_TEXTURE_COORDS"/>
+            <enum name="GL_CURRENT_RASTER_COLOR"/>
+            <enum name="GL_CURRENT_RASTER_INDEX"/>
+            <enum name="GL_CURRENT_RASTER_TEXTURE_COORDS"/>
+            <enum name="GL_CURRENT_RASTER_POSITION"/>
+            <enum name="GL_CURRENT_RASTER_POSITION_VALID"/>
+            <enum name="GL_CURRENT_RASTER_DISTANCE"/>
+            <enum name="GL_POINT_SMOOTH"/>
+            <enum name="GL_LINE_STIPPLE"/>
+            <enum name="GL_LINE_STIPPLE_PATTERN"/>
+            <enum name="GL_LINE_STIPPLE_REPEAT"/>
+            <enum name="GL_LIST_MODE"/>
+            <enum name="GL_MAX_LIST_NESTING"/>
+            <enum name="GL_LIST_BASE"/>
+            <enum name="GL_LIST_INDEX"/>
+            <enum name="GL_POLYGON_STIPPLE"/>
+            <enum name="GL_EDGE_FLAG"/>
+            <enum name="GL_LIGHTING"/>
+            <enum name="GL_LIGHT_MODEL_LOCAL_VIEWER"/>
+            <enum name="GL_LIGHT_MODEL_TWO_SIDE"/>
+            <enum name="GL_LIGHT_MODEL_AMBIENT"/>
+            <enum name="GL_SHADE_MODEL"/>
+            <enum name="GL_COLOR_MATERIAL_FACE"/>
+            <enum name="GL_COLOR_MATERIAL_PARAMETER"/>
+            <enum name="GL_COLOR_MATERIAL"/>
+            <enum name="GL_FOG"/>
+            <enum name="GL_FOG_INDEX"/>
+            <enum name="GL_FOG_DENSITY"/>
+            <enum name="GL_FOG_START"/>
+            <enum name="GL_FOG_END"/>
+            <enum name="GL_FOG_MODE"/>
+            <enum name="GL_FOG_COLOR"/>
+            <enum name="GL_ACCUM_CLEAR_VALUE"/>
+            <enum name="GL_MATRIX_MODE"/>
+            <enum name="GL_NORMALIZE"/>
+            <enum name="GL_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_MODELVIEW_MATRIX"/>
+            <enum name="GL_PROJECTION_MATRIX"/>
+            <enum name="GL_TEXTURE_MATRIX"/>
+            <enum name="GL_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_CLIENT_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_ALPHA_TEST"/>
+            <enum name="GL_ALPHA_TEST_FUNC"/>
+            <enum name="GL_ALPHA_TEST_REF"/>
+            <enum name="GL_INDEX_LOGIC_OP"/>
+            <enum name="GL_LOGIC_OP"/>
+            <enum name="GL_AUX_BUFFERS"/>
+            <enum name="GL_INDEX_CLEAR_VALUE"/>
+            <enum name="GL_INDEX_WRITEMASK"/>
+            <enum name="GL_INDEX_MODE"/>
+            <enum name="GL_RGBA_MODE"/>
+            <enum name="GL_RENDER_MODE"/>
+            <enum name="GL_PERSPECTIVE_CORRECTION_HINT"/>
+            <enum name="GL_POINT_SMOOTH_HINT"/>
+            <enum name="GL_FOG_HINT"/>
+            <enum name="GL_TEXTURE_GEN_S"/>
+            <enum name="GL_TEXTURE_GEN_T"/>
+            <enum name="GL_TEXTURE_GEN_R"/>
+            <enum name="GL_TEXTURE_GEN_Q"/>
+            <enum name="GL_PIXEL_MAP_I_TO_I_SIZE"/>
+            <enum name="GL_PIXEL_MAP_S_TO_S_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_R_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_G_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_B_SIZE"/>
+            <enum name="GL_PIXEL_MAP_I_TO_A_SIZE"/>
+            <enum name="GL_PIXEL_MAP_R_TO_R_SIZE"/>
+            <enum name="GL_PIXEL_MAP_G_TO_G_SIZE"/>
+            <enum name="GL_PIXEL_MAP_B_TO_B_SIZE"/>
+            <enum name="GL_PIXEL_MAP_A_TO_A_SIZE"/>
+            <enum name="GL_MAP_COLOR"/>
+            <enum name="GL_MAP_STENCIL"/>
+            <enum name="GL_INDEX_SHIFT"/>
+            <enum name="GL_INDEX_OFFSET"/>
+            <enum name="GL_RED_SCALE"/>
+            <enum name="GL_RED_BIAS"/>
+            <enum name="GL_ZOOM_X"/>
+            <enum name="GL_ZOOM_Y"/>
+            <enum name="GL_GREEN_SCALE"/>
+            <enum name="GL_GREEN_BIAS"/>
+            <enum name="GL_BLUE_SCALE"/>
+            <enum name="GL_BLUE_BIAS"/>
+            <enum name="GL_ALPHA_SCALE"/>
+            <enum name="GL_ALPHA_BIAS"/>
+            <enum name="GL_DEPTH_SCALE"/>
+            <enum name="GL_DEPTH_BIAS"/>
+            <enum name="GL_MAX_EVAL_ORDER"/>
+            <enum name="GL_MAX_LIGHTS"/>
+            <enum name="GL_MAX_CLIP_PLANES"/>
+            <enum name="GL_MAX_PIXEL_MAP_TABLE"/>
+            <enum name="GL_MAX_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_MAX_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_MAX_NAME_STACK_DEPTH"/>
+            <enum name="GL_MAX_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_MAX_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_MAX_CLIENT_ATTRIB_STACK_DEPTH"/>
+            <enum name="GL_INDEX_BITS"/>
+            <enum name="GL_RED_BITS"/>
+            <enum name="GL_GREEN_BITS"/>
+            <enum name="GL_BLUE_BITS"/>
+            <enum name="GL_ALPHA_BITS"/>
+            <enum name="GL_DEPTH_BITS"/>
+            <enum name="GL_STENCIL_BITS"/>
+            <enum name="GL_ACCUM_RED_BITS"/>
+            <enum name="GL_ACCUM_GREEN_BITS"/>
+            <enum name="GL_ACCUM_BLUE_BITS"/>
+            <enum name="GL_ACCUM_ALPHA_BITS"/>
+            <enum name="GL_NAME_STACK_DEPTH"/>
+            <enum name="GL_AUTO_NORMAL"/>
+            <enum name="GL_MAP1_COLOR_4"/>
+            <enum name="GL_MAP1_INDEX"/>
+            <enum name="GL_MAP1_NORMAL"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP1_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP1_VERTEX_3"/>
+            <enum name="GL_MAP1_VERTEX_4"/>
+            <enum name="GL_MAP2_COLOR_4"/>
+            <enum name="GL_MAP2_INDEX"/>
+            <enum name="GL_MAP2_NORMAL"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_1"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_2"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_3"/>
+            <enum name="GL_MAP2_TEXTURE_COORD_4"/>
+            <enum name="GL_MAP2_VERTEX_3"/>
+            <enum name="GL_MAP2_VERTEX_4"/>
+            <enum name="GL_MAP1_GRID_DOMAIN"/>
+            <enum name="GL_MAP1_GRID_SEGMENTS"/>
+            <enum name="GL_MAP2_GRID_DOMAIN"/>
+            <enum name="GL_MAP2_GRID_SEGMENTS"/>
+            <enum name="GL_FEEDBACK_BUFFER_SIZE"/>
+            <enum name="GL_FEEDBACK_BUFFER_TYPE"/>
+            <enum name="GL_SELECTION_BUFFER_SIZE"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+            <enum name="GL_NORMAL_ARRAY"/>
+            <enum name="GL_COLOR_ARRAY"/>
+            <enum name="GL_INDEX_ARRAY"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY"/>
+            <enum name="GL_EDGE_FLAG_ARRAY"/>
+            <enum name="GL_VERTEX_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_ARRAY_STRIDE"/>
+            <enum name="GL_NORMAL_ARRAY_TYPE"/>
+            <enum name="GL_NORMAL_ARRAY_STRIDE"/>
+            <enum name="GL_COLOR_ARRAY_SIZE"/>
+            <enum name="GL_COLOR_ARRAY_TYPE"/>
+            <enum name="GL_COLOR_ARRAY_STRIDE"/>
+            <enum name="GL_INDEX_ARRAY_TYPE"/>
+            <enum name="GL_INDEX_ARRAY_STRIDE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_SIZE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_TYPE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_STRIDE"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_STRIDE"/>
+            <enum name="GL_TEXTURE_COMPONENTS"/>
+            <enum name="GL_TEXTURE_BORDER"/>
+            <enum name="GL_TEXTURE_LUMINANCE_SIZE"/>
+            <enum name="GL_TEXTURE_INTENSITY_SIZE"/>
+            <enum name="GL_TEXTURE_PRIORITY"/>
+            <enum name="GL_TEXTURE_RESIDENT"/>
+            <enum name="GL_AMBIENT"/>
+            <enum name="GL_DIFFUSE"/>
+            <enum name="GL_SPECULAR"/>
+            <enum name="GL_POSITION"/>
+            <enum name="GL_SPOT_DIRECTION"/>
+            <enum name="GL_SPOT_EXPONENT"/>
+            <enum name="GL_SPOT_CUTOFF"/>
+            <enum name="GL_CONSTANT_ATTENUATION"/>
+            <enum name="GL_LINEAR_ATTENUATION"/>
+            <enum name="GL_QUADRATIC_ATTENUATION"/>
+            <enum name="GL_COMPILE"/>
+            <enum name="GL_COMPILE_AND_EXECUTE"/>
+            <enum name="GL_2_BYTES"/>
+            <enum name="GL_3_BYTES"/>
+            <enum name="GL_4_BYTES"/>
+            <enum name="GL_EMISSION"/>
+            <enum name="GL_SHININESS"/>
+            <enum name="GL_AMBIENT_AND_DIFFUSE"/>
+            <enum name="GL_COLOR_INDEXES"/>
+            <enum name="GL_MODELVIEW"/>
+            <enum name="GL_PROJECTION"/>
+            <enum name="GL_COLOR_INDEX"/>
+            <enum name="GL_LUMINANCE"/>
+            <enum name="GL_LUMINANCE_ALPHA"/>
+            <enum name="GL_BITMAP"/>
+            <enum name="GL_RENDER"/>
+            <enum name="GL_FEEDBACK"/>
+            <enum name="GL_SELECT"/>
+            <enum name="GL_FLAT"/>
+            <enum name="GL_SMOOTH"/>
+            <enum name="GL_S"/>
+            <enum name="GL_T"/>
+            <enum name="GL_R"/>
+            <enum name="GL_Q"/>
+            <enum name="GL_MODULATE"/>
+            <enum name="GL_DECAL"/>
+            <enum name="GL_TEXTURE_ENV_MODE"/>
+            <enum name="GL_TEXTURE_ENV_COLOR"/>
+            <enum name="GL_TEXTURE_ENV"/>
+            <enum name="GL_EYE_LINEAR"/>
+            <enum name="GL_OBJECT_LINEAR"/>
+            <enum name="GL_SPHERE_MAP"/>
+            <enum name="GL_TEXTURE_GEN_MODE"/>
+            <enum name="GL_OBJECT_PLANE"/>
+            <enum name="GL_EYE_PLANE"/>
+            <enum name="GL_CLAMP"/>
+            <enum name="GL_ALPHA4"/>
+            <enum name="GL_ALPHA8"/>
+            <enum name="GL_ALPHA12"/>
+            <enum name="GL_ALPHA16"/>
+            <enum name="GL_LUMINANCE4"/>
+            <enum name="GL_LUMINANCE8"/>
+            <enum name="GL_LUMINANCE12"/>
+            <enum name="GL_LUMINANCE16"/>
+            <enum name="GL_LUMINANCE4_ALPHA4"/>
+            <enum name="GL_LUMINANCE6_ALPHA2"/>
+            <enum name="GL_LUMINANCE8_ALPHA8"/>
+            <enum name="GL_LUMINANCE12_ALPHA4"/>
+            <enum name="GL_LUMINANCE12_ALPHA12"/>
+            <enum name="GL_LUMINANCE16_ALPHA16"/>
+            <enum name="GL_INTENSITY"/>
+            <enum name="GL_INTENSITY4"/>
+            <enum name="GL_INTENSITY8"/>
+            <enum name="GL_INTENSITY12"/>
+            <enum name="GL_INTENSITY16"/>
+            <enum name="GL_V2F"/>
+            <enum name="GL_V3F"/>
+            <enum name="GL_C4UB_V2F"/>
+            <enum name="GL_C4UB_V3F"/>
+            <enum name="GL_C3F_V3F"/>
+            <enum name="GL_N3F_V3F"/>
+            <enum name="GL_C4F_N3F_V3F"/>
+            <enum name="GL_T2F_V3F"/>
+            <enum name="GL_T4F_V4F"/>
+            <enum name="GL_T2F_C4UB_V3F"/>
+            <enum name="GL_T2F_C3F_V3F"/>
+            <enum name="GL_T2F_N3F_V3F"/>
+            <enum name="GL_T2F_C4F_N3F_V3F"/>
+            <enum name="GL_T4F_C4F_N3F_V4F"/>
+            <enum name="GL_CLIP_PLANE0"/>
+            <enum name="GL_CLIP_PLANE1"/>
+            <enum name="GL_CLIP_PLANE2"/>
+            <enum name="GL_CLIP_PLANE3"/>
+            <enum name="GL_CLIP_PLANE4"/>
+            <enum name="GL_CLIP_PLANE5"/>
+            <enum name="GL_LIGHT0"/>
+            <enum name="GL_LIGHT1"/>
+            <enum name="GL_LIGHT2"/>
+            <enum name="GL_LIGHT3"/>
+            <enum name="GL_LIGHT4"/>
+            <enum name="GL_LIGHT5"/>
+            <enum name="GL_LIGHT6"/>
+            <enum name="GL_LIGHT7"/>
+            <command name="glArrayElement"/>
+            <command name="glColorPointer"/>
+            <command name="glDisableClientState"/>
+            <command name="glEdgeFlagPointer"/>
+            <command name="glEnableClientState"/>
+            <command name="glIndexPointer"/>
+            <command name="glGetPointerv"/>
+            <command name="glInterleavedArrays"/>
+            <command name="glNormalPointer"/>
+            <command name="glTexCoordPointer"/>
+            <command name="glVertexPointer"/>
+            <command name="glAreTexturesResident"/>
+            <command name="glPrioritizeTextures"/>
+            <command name="glIndexub"/>
+            <command name="glIndexubv"/>
+            <command name="glPopClientAttrib"/>
+            <command name="glPushClientAttrib"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 1.2 features removed from GL 3.2">
+            <enum name="GL_RESCALE_NORMAL"/>
+            <enum name="GL_LIGHT_MODEL_COLOR_CONTROL"/>
+            <enum name="GL_SINGLE_COLOR"/>
+            <enum name="GL_SEPARATE_SPECULAR_COLOR"/>
+            <enum name="GL_ALIASED_POINT_SIZE_RANGE"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 1.3 features removed from GL 3.2">
+            <enum name="GL_CLIENT_ACTIVE_TEXTURE"/>
+            <enum name="GL_MAX_TEXTURE_UNITS"/>
+            <enum name="GL_TRANSPOSE_MODELVIEW_MATRIX"/>
+            <enum name="GL_TRANSPOSE_PROJECTION_MATRIX"/>
+            <enum name="GL_TRANSPOSE_TEXTURE_MATRIX"/>
+            <enum name="GL_TRANSPOSE_COLOR_MATRIX"/>
+            <enum name="GL_MULTISAMPLE_BIT"/>
+            <enum name="GL_NORMAL_MAP"/>
+            <enum name="GL_REFLECTION_MAP"/>
+            <enum name="GL_COMPRESSED_ALPHA"/>
+            <enum name="GL_COMPRESSED_LUMINANCE"/>
+            <enum name="GL_COMPRESSED_LUMINANCE_ALPHA"/>
+            <enum name="GL_COMPRESSED_INTENSITY"/>
+            <enum name="GL_COMBINE"/>
+            <enum name="GL_COMBINE_RGB"/>
+            <enum name="GL_COMBINE_ALPHA"/>
+            <enum name="GL_SOURCE0_RGB"/>
+            <enum name="GL_SOURCE1_RGB"/>
+            <enum name="GL_SOURCE2_RGB"/>
+            <enum name="GL_SOURCE0_ALPHA"/>
+            <enum name="GL_SOURCE1_ALPHA"/>
+            <enum name="GL_SOURCE2_ALPHA"/>
+            <enum name="GL_OPERAND0_RGB"/>
+            <enum name="GL_OPERAND1_RGB"/>
+            <enum name="GL_OPERAND2_RGB"/>
+            <enum name="GL_OPERAND0_ALPHA"/>
+            <enum name="GL_OPERAND1_ALPHA"/>
+            <enum name="GL_OPERAND2_ALPHA"/>
+            <enum name="GL_RGB_SCALE"/>
+            <enum name="GL_ADD_SIGNED"/>
+            <enum name="GL_INTERPOLATE"/>
+            <enum name="GL_SUBTRACT"/>
+            <enum name="GL_CONSTANT"/>
+            <enum name="GL_PRIMARY_COLOR"/>
+            <enum name="GL_PREVIOUS"/>
+            <enum name="GL_DOT3_RGB"/>
+            <enum name="GL_DOT3_RGBA"/>
+            <command name="glClientActiveTexture"/>
+            <command name="glMultiTexCoord1d"/>
+            <command name="glMultiTexCoord1dv"/>
+            <command name="glMultiTexCoord1f"/>
+            <command name="glMultiTexCoord1fv"/>
+            <command name="glMultiTexCoord1i"/>
+            <command name="glMultiTexCoord1iv"/>
+            <command name="glMultiTexCoord1s"/>
+            <command name="glMultiTexCoord1sv"/>
+            <command name="glMultiTexCoord2d"/>
+            <command name="glMultiTexCoord2dv"/>
+            <command name="glMultiTexCoord2f"/>
+            <command name="glMultiTexCoord2fv"/>
+            <command name="glMultiTexCoord2i"/>
+            <command name="glMultiTexCoord2iv"/>
+            <command name="glMultiTexCoord2s"/>
+            <command name="glMultiTexCoord2sv"/>
+            <command name="glMultiTexCoord3d"/>
+            <command name="glMultiTexCoord3dv"/>
+            <command name="glMultiTexCoord3f"/>
+            <command name="glMultiTexCoord3fv"/>
+            <command name="glMultiTexCoord3i"/>
+            <command name="glMultiTexCoord3iv"/>
+            <command name="glMultiTexCoord3s"/>
+            <command name="glMultiTexCoord3sv"/>
+            <command name="glMultiTexCoord4d"/>
+            <command name="glMultiTexCoord4dv"/>
+            <command name="glMultiTexCoord4f"/>
+            <command name="glMultiTexCoord4fv"/>
+            <command name="glMultiTexCoord4i"/>
+            <command name="glMultiTexCoord4iv"/>
+            <command name="glMultiTexCoord4s"/>
+            <command name="glMultiTexCoord4sv"/>
+            <command name="glLoadTransposeMatrixf"/>
+            <command name="glLoadTransposeMatrixd"/>
+            <command name="glMultTransposeMatrixf"/>
+            <command name="glMultTransposeMatrixd"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 1.4 features removed from GL 3.2">
+            <enum name="GL_POINT_SIZE_MIN"/>
+            <enum name="GL_POINT_SIZE_MAX"/>
+            <enum name="GL_POINT_DISTANCE_ATTENUATION"/>
+            <enum name="GL_GENERATE_MIPMAP"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT"/>
+            <enum name="GL_FOG_COORDINATE_SOURCE"/>
+            <enum name="GL_FOG_COORDINATE"/>
+            <enum name="GL_FRAGMENT_DEPTH"/>
+            <enum name="GL_CURRENT_FOG_COORDINATE"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_TYPE"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_STRIDE"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_POINTER"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY"/>
+            <enum name="GL_COLOR_SUM"/>
+            <enum name="GL_CURRENT_SECONDARY_COLOR"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_SIZE"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_TYPE"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_STRIDE"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_POINTER"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY"/>
+            <enum name="GL_TEXTURE_FILTER_CONTROL"/>
+            <enum name="GL_DEPTH_TEXTURE_MODE"/>
+            <enum name="GL_COMPARE_R_TO_TEXTURE"/>
+            <command name="glFogCoordf"/>
+            <command name="glFogCoordfv"/>
+            <command name="glFogCoordd"/>
+            <command name="glFogCoorddv"/>
+            <command name="glFogCoordPointer"/>
+            <command name="glSecondaryColor3b"/>
+            <command name="glSecondaryColor3bv"/>
+            <command name="glSecondaryColor3d"/>
+            <command name="glSecondaryColor3dv"/>
+            <command name="glSecondaryColor3f"/>
+            <command name="glSecondaryColor3fv"/>
+            <command name="glSecondaryColor3i"/>
+            <command name="glSecondaryColor3iv"/>
+            <command name="glSecondaryColor3s"/>
+            <command name="glSecondaryColor3sv"/>
+            <command name="glSecondaryColor3ub"/>
+            <command name="glSecondaryColor3ubv"/>
+            <command name="glSecondaryColor3ui"/>
+            <command name="glSecondaryColor3uiv"/>
+            <command name="glSecondaryColor3us"/>
+            <command name="glSecondaryColor3usv"/>
+            <command name="glSecondaryColorPointer"/>
+            <command name="glWindowPos2d"/>
+            <command name="glWindowPos2dv"/>
+            <command name="glWindowPos2f"/>
+            <command name="glWindowPos2fv"/>
+            <command name="glWindowPos2i"/>
+            <command name="glWindowPos2iv"/>
+            <command name="glWindowPos2s"/>
+            <command name="glWindowPos2sv"/>
+            <command name="glWindowPos3d"/>
+            <command name="glWindowPos3dv"/>
+            <command name="glWindowPos3f"/>
+            <command name="glWindowPos3fv"/>
+            <command name="glWindowPos3i"/>
+            <command name="glWindowPos3iv"/>
+            <command name="glWindowPos3s"/>
+            <command name="glWindowPos3sv"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 1.5 features removed from GL 3.2">
+            <enum name="GL_VERTEX_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_NORMAL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_COLOR_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_INDEX_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_EDGE_FLAG_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_WEIGHT_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_FOG_COORD_SRC"/>
+            <enum name="GL_FOG_COORD"/>
+            <enum name="GL_CURRENT_FOG_COORD"/>
+            <enum name="GL_FOG_COORD_ARRAY_TYPE"/>
+            <enum name="GL_FOG_COORD_ARRAY_STRIDE"/>
+            <enum name="GL_FOG_COORD_ARRAY_POINTER"/>
+            <enum name="GL_FOG_COORD_ARRAY"/>
+            <enum name="GL_FOG_COORD_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_SRC0_RGB"/>
+            <enum name="GL_SRC1_RGB"/>
+            <enum name="GL_SRC2_RGB"/>
+            <enum name="GL_SRC0_ALPHA"/>
+            <enum name="GL_SRC2_ALPHA"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 2.0 features removed from GL 3.2">
+            <enum name="GL_VERTEX_PROGRAM_TWO_SIDE"/>
+            <enum name="GL_POINT_SPRITE"/>
+            <enum name="GL_COORD_REPLACE"/>
+            <enum name="GL_MAX_TEXTURE_COORDS"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 2.1 features removed from GL 3.2">
+            <enum name="GL_CURRENT_RASTER_SECONDARY_COLOR"/>
+            <enum name="GL_SLUMINANCE_ALPHA"/>
+            <enum name="GL_SLUMINANCE8_ALPHA8"/>
+            <enum name="GL_SLUMINANCE"/>
+            <enum name="GL_SLUMINANCE8"/>
+            <enum name="GL_COMPRESSED_SLUMINANCE"/>
+            <enum name="GL_COMPRESSED_SLUMINANCE_ALPHA"/>
+        </remove>
+        <remove profile="core" comment="Compatibility-only GL 3.0 features removed from GL 3.2">
+            <enum name="GL_CLAMP_VERTEX_COLOR"/>
+            <enum name="GL_CLAMP_FRAGMENT_COLOR"/>
+            <enum name="GL_ALPHA_INTEGER"/>
+            <enum name="GL_INDEX"/>
+            <enum name="GL_TEXTURE_LUMINANCE_TYPE"/>
+            <enum name="GL_TEXTURE_INTENSITY_TYPE"/>
+        </remove>
+        <!-- Deprecated (not removed) in OpenGL 3.2 core;
+             deprecate tag not defined/supported yet
+          <deprecate profile="core">
+            <enum name="GL_MAX_VARYING_FLOATS"/>
+            <enum name="GL_MAX_VARYING_COMPONENTS"/>
+          </deprecate>
+        -->
+    </feature>
+    <feature api="gl" name="GL_VERSION_3_3" number="3.3">
+        <require>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR"/>
+        </require>
+        <require comment="Reuse ARB_blend_func_extended">
+            <enum name="GL_SRC1_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC1_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC1_ALPHA"/>
+            <enum name="GL_MAX_DUAL_SOURCE_DRAW_BUFFERS"/>
+            <command name="glBindFragDataLocationIndexed"/>
+            <command name="glGetFragDataIndex"/>
+        </require>
+        <require comment="Reuse ARB_explicit_attrib_location (none)">
+        </require>
+        <require comment="Reuse ARB_occlusion_query2">
+            <enum name="GL_ANY_SAMPLES_PASSED"/>
+        </require>
+        <require comment="Reuse ARB_sampler_objects">
+            <enum name="GL_SAMPLER_BINDING"/>
+            <command name="glGenSamplers"/>
+            <command name="glDeleteSamplers"/>
+            <command name="glIsSampler"/>
+            <command name="glBindSampler"/>
+            <command name="glSamplerParameteri"/>
+            <command name="glSamplerParameteriv"/>
+            <command name="glSamplerParameterf"/>
+            <command name="glSamplerParameterfv"/>
+            <command name="glSamplerParameterIiv"/>
+            <command name="glSamplerParameterIuiv"/>
+            <command name="glGetSamplerParameteriv"/>
+            <command name="glGetSamplerParameterIiv"/>
+            <command name="glGetSamplerParameterfv"/>
+            <command name="glGetSamplerParameterIuiv"/>
+        </require>
+        <require comment="Reuse ARB_shader_bit_encoding (none)">
+        </require>
+        <require comment="Reuse ARB_texture_rgb10_a2ui">
+            <enum name="GL_RGB10_A2UI"/>
+        </require>
+        <require comment="Reuse ARB_texture_swizzle">
+            <enum name="GL_TEXTURE_SWIZZLE_R"/>
+            <enum name="GL_TEXTURE_SWIZZLE_G"/>
+            <enum name="GL_TEXTURE_SWIZZLE_B"/>
+            <enum name="GL_TEXTURE_SWIZZLE_A"/>
+            <enum name="GL_TEXTURE_SWIZZLE_RGBA"/>
+        </require>
+        <require comment="Reuse ARB_timer_query">
+            <enum name="GL_TIME_ELAPSED"/>
+            <enum name="GL_TIMESTAMP"/>
+            <command name="glQueryCounter"/>
+            <command name="glGetQueryObjecti64v"/>
+            <command name="glGetQueryObjectui64v"/>
+        </require>
+        <require comment="Reuse ARB_vertex_type_2_10_10_10_rev">
+            <enum name="GL_INT_2_10_10_10_REV"/>
+            <command name="glVertexAttribDivisor"/>
+            <command name="glVertexAttribP1ui"/>
+            <command name="glVertexAttribP1uiv"/>
+            <command name="glVertexAttribP2ui"/>
+            <command name="glVertexAttribP2uiv"/>
+            <command name="glVertexAttribP3ui"/>
+            <command name="glVertexAttribP3uiv"/>
+            <command name="glVertexAttribP4ui"/>
+            <command name="glVertexAttribP4uiv"/>
+        </require>
+        <require profile="compatibility" comment="Reuse ARB_vertex_type_2_10_10_10_rev compatibility profile">
+            <command name="glVertexP2ui"/>
+            <command name="glVertexP2uiv"/>
+            <command name="glVertexP3ui"/>
+            <command name="glVertexP3uiv"/>
+            <command name="glVertexP4ui"/>
+            <command name="glVertexP4uiv"/>
+            <command name="glTexCoordP1ui"/>
+            <command name="glTexCoordP1uiv"/>
+            <command name="glTexCoordP2ui"/>
+            <command name="glTexCoordP2uiv"/>
+            <command name="glTexCoordP3ui"/>
+            <command name="glTexCoordP3uiv"/>
+            <command name="glTexCoordP4ui"/>
+            <command name="glTexCoordP4uiv"/>
+            <command name="glMultiTexCoordP1ui"/>
+            <command name="glMultiTexCoordP1uiv"/>
+            <command name="glMultiTexCoordP2ui"/>
+            <command name="glMultiTexCoordP2uiv"/>
+            <command name="glMultiTexCoordP3ui"/>
+            <command name="glMultiTexCoordP3uiv"/>
+            <command name="glMultiTexCoordP4ui"/>
+            <command name="glMultiTexCoordP4uiv"/>
+            <command name="glNormalP3ui"/>
+            <command name="glNormalP3uiv"/>
+            <command name="glColorP3ui"/>
+            <command name="glColorP3uiv"/>
+            <command name="glColorP4ui"/>
+            <command name="glColorP4uiv"/>
+            <command name="glSecondaryColorP3ui"/>
+            <command name="glSecondaryColorP3uiv"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_4_0" number="4.0">
+        <require>
+            <enum name="GL_SAMPLE_SHADING"/>
+            <enum name="GL_MIN_SAMPLE_SHADING_VALUE"/>
+            <enum name="GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET"/>
+            <enum name="GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY"/>
+            <enum name="GL_PROXY_TEXTURE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_SAMPLER_CUBE_MAP_ARRAY"/>
+            <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW"/>
+            <enum name="GL_INT_SAMPLER_CUBE_MAP_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY"/>
+            <command name="glMinSampleShading"/>
+            <command name="glBlendEquationi"/>
+            <command name="glBlendEquationSeparatei"/>
+            <command name="glBlendFunci"/>
+            <command name="glBlendFuncSeparatei"/>
+        </require>
+        <require comment="Reuse ARB_draw_buffers_blend (none)">
+        </require>
+        <require comment="Reuse ARB_draw_indirect">
+            <enum name="GL_DRAW_INDIRECT_BUFFER"/>
+            <enum name="GL_DRAW_INDIRECT_BUFFER_BINDING"/>
+            <command name="glDrawArraysIndirect"/>
+            <command name="glDrawElementsIndirect"/>
+        </require>
+        <require comment="Reuse ARB_gpu_shader5">
+            <enum name="GL_GEOMETRY_SHADER_INVOCATIONS"/>
+            <enum name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS"/>
+            <enum name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET"/>
+            <enum name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET"/>
+            <enum name="GL_FRAGMENT_INTERPOLATION_OFFSET_BITS"/>
+            <enum name="GL_MAX_VERTEX_STREAMS"/>
+        </require>
+        <require comment="Reuse ARB_gpu_shader_fp64">
+            <enum name="GL_DOUBLE_VEC2"/>
+            <enum name="GL_DOUBLE_VEC3"/>
+            <enum name="GL_DOUBLE_VEC4"/>
+            <enum name="GL_DOUBLE_MAT2"/>
+            <enum name="GL_DOUBLE_MAT3"/>
+            <enum name="GL_DOUBLE_MAT4"/>
+            <enum name="GL_DOUBLE_MAT2x3"/>
+            <enum name="GL_DOUBLE_MAT2x4"/>
+            <enum name="GL_DOUBLE_MAT3x2"/>
+            <enum name="GL_DOUBLE_MAT3x4"/>
+            <enum name="GL_DOUBLE_MAT4x2"/>
+            <enum name="GL_DOUBLE_MAT4x3"/>
+            <command name="glUniform1d"/>
+            <command name="glUniform2d"/>
+            <command name="glUniform3d"/>
+            <command name="glUniform4d"/>
+            <command name="glUniform1dv"/>
+            <command name="glUniform2dv"/>
+            <command name="glUniform3dv"/>
+            <command name="glUniform4dv"/>
+            <command name="glUniformMatrix2dv"/>
+            <command name="glUniformMatrix3dv"/>
+            <command name="glUniformMatrix4dv"/>
+            <command name="glUniformMatrix2x3dv"/>
+            <command name="glUniformMatrix2x4dv"/>
+            <command name="glUniformMatrix3x2dv"/>
+            <command name="glUniformMatrix3x4dv"/>
+            <command name="glUniformMatrix4x2dv"/>
+            <command name="glUniformMatrix4x3dv"/>
+            <command name="glGetUniformdv"/>
+        </require>
+        <require comment="Reuse ARB_shader_subroutine">
+            <enum name="GL_ACTIVE_SUBROUTINES"/>
+            <enum name="GL_ACTIVE_SUBROUTINE_UNIFORMS"/>
+            <enum name="GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS"/>
+            <enum name="GL_ACTIVE_SUBROUTINE_MAX_LENGTH"/>
+            <enum name="GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH"/>
+            <enum name="GL_MAX_SUBROUTINES"/>
+            <enum name="GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS"/>
+            <enum name="GL_NUM_COMPATIBLE_SUBROUTINES"/>
+            <enum name="GL_COMPATIBLE_SUBROUTINES"/>
+            <command name="glGetSubroutineUniformLocation"/>
+            <command name="glGetSubroutineIndex"/>
+            <command name="glGetActiveSubroutineUniformiv"/>
+            <command name="glGetActiveSubroutineUniformName"/>
+            <command name="glGetActiveSubroutineName"/>
+            <command name="glUniformSubroutinesuiv"/>
+            <command name="glGetUniformSubroutineuiv"/>
+            <command name="glGetProgramStageiv"/>
+        </require>
+        <require comment="Reuse ARB_tessellation_shader">
+            <enum name="GL_PATCHES"/>
+            <enum name="GL_PATCH_VERTICES"/>
+            <enum name="GL_PATCH_DEFAULT_INNER_LEVEL"/>
+            <enum name="GL_PATCH_DEFAULT_OUTER_LEVEL"/>
+            <enum name="GL_TESS_CONTROL_OUTPUT_VERTICES"/>
+            <enum name="GL_TESS_GEN_MODE"/>
+            <enum name="GL_TESS_GEN_SPACING"/>
+            <enum name="GL_TESS_GEN_VERTEX_ORDER"/>
+            <enum name="GL_TESS_GEN_POINT_MODE"/>
+            <enum name="GL_ISOLINES"/>
+            <enum name="GL_QUADS"/>
+            <enum name="GL_FRACTIONAL_ODD"/>
+            <enum name="GL_FRACTIONAL_EVEN"/>
+            <enum name="GL_MAX_PATCH_VERTICES"/>
+            <enum name="GL_MAX_TESS_GEN_LEVEL"/>
+            <enum name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_PATCH_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_TESS_CONTROL_SHADER"/>
+            <command name="glPatchParameteri"/>
+            <command name="glPatchParameterfv"/>
+        </require>
+        <require comment="Reuse ARB_texture_buffer_object_rgb32 (none)">
+        </require>
+        <require comment="Reuse ARB_texture_cube_map_array (none)">
+        </require>
+        <require comment="Reuse ARB_texture_gather (none)">
+        </require>
+        <require comment="Reuse ARB_texture_query_lod (none)">
+        </require>
+        <require comment="Reuse ARB_transform_feedback2">
+            <enum name="GL_TRANSFORM_FEEDBACK"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BINDING"/>
+            <command name="glBindTransformFeedback"/>
+            <command name="glDeleteTransformFeedbacks"/>
+            <command name="glGenTransformFeedbacks"/>
+            <command name="glIsTransformFeedback"/>
+            <command name="glPauseTransformFeedback"/>
+            <command name="glResumeTransformFeedback"/>
+            <command name="glDrawTransformFeedback"/>
+        </require>
+        <require comment="Reuse ARB_transform_feedback3">
+            <enum name="GL_MAX_TRANSFORM_FEEDBACK_BUFFERS"/>
+            <enum name="GL_MAX_VERTEX_STREAMS"/>
+            <command name="glDrawTransformFeedbackStream"/>
+            <command name="glBeginQueryIndexed"/>
+            <command name="glEndQueryIndexed"/>
+            <command name="glGetQueryIndexediv"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_4_1" number="4.1">
+        <require comment="Reuse tokens from ARB_ES2_compatibility">
+            <enum name="GL_FIXED"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_TYPE"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_FORMAT"/>
+            <enum name="GL_LOW_FLOAT"/>
+            <enum name="GL_MEDIUM_FLOAT"/>
+            <enum name="GL_HIGH_FLOAT"/>
+            <enum name="GL_LOW_INT"/>
+            <enum name="GL_MEDIUM_INT"/>
+            <enum name="GL_HIGH_INT"/>
+            <enum name="GL_SHADER_COMPILER"/>
+            <enum name="GL_SHADER_BINARY_FORMATS"/>
+            <enum name="GL_NUM_SHADER_BINARY_FORMATS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_VECTORS"/>
+            <enum name="GL_MAX_VARYING_VECTORS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_VECTORS"/>
+            <enum name="GL_RGB565"/>
+        </require>
+        <require comment="Reuse tokens from ARB_get_program_binary">
+            <enum name="GL_PROGRAM_BINARY_RETRIEVABLE_HINT"/>
+            <enum name="GL_PROGRAM_BINARY_LENGTH"/>
+            <enum name="GL_NUM_PROGRAM_BINARY_FORMATS"/>
+            <enum name="GL_PROGRAM_BINARY_FORMATS"/>
+        </require>
+        <require comment="Reuse tokens from ARB_separate_shader_objects">
+            <enum name="GL_VERTEX_SHADER_BIT"/>
+            <enum name="GL_FRAGMENT_SHADER_BIT"/>
+            <enum name="GL_GEOMETRY_SHADER_BIT"/>
+            <enum name="GL_TESS_CONTROL_SHADER_BIT"/>
+            <enum name="GL_TESS_EVALUATION_SHADER_BIT"/>
+            <enum name="GL_ALL_SHADER_BITS"/>
+            <enum name="GL_PROGRAM_SEPARABLE"/>
+            <enum name="GL_ACTIVE_PROGRAM"/>
+            <enum name="GL_PROGRAM_PIPELINE_BINDING"/>
+        </require>
+        <require comment="Reuse tokens from ARB_shader_precision (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_vertex_attrib_64bit - all are in GL 3.0 and 4.0 already">
+        </require>
+        <require comment="Reuse tokens from ARB_viewport_array - some are in GL 1.1 and ARB_provoking_vertex already">
+            <enum name="GL_MAX_VIEWPORTS"/>
+            <enum name="GL_VIEWPORT_SUBPIXEL_BITS"/>
+            <enum name="GL_VIEWPORT_BOUNDS_RANGE"/>
+            <enum name="GL_LAYER_PROVOKING_VERTEX"/>
+            <enum name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX"/>
+            <enum name="GL_UNDEFINED_VERTEX"/>
+        </require>
+        <require comment="Reuse commands from ARB_ES2_compatibility">
+            <command name="glReleaseShaderCompiler"/>
+            <command name="glShaderBinary"/>
+            <command name="glGetShaderPrecisionFormat"/>
+            <command name="glDepthRangef"/>
+            <command name="glClearDepthf"/>
+        </require>
+        <require comment="Reuse commands from ARB_get_program_binary">
+            <command name="glGetProgramBinary"/>
+            <command name="glProgramBinary"/>
+            <command name="glProgramParameteri"/>
+        </require>
+        <require comment="Reuse commands from ARB_separate_shader_objects">
+            <command name="glUseProgramStages"/>
+            <command name="glActiveShaderProgram"/>
+            <command name="glCreateShaderProgramv"/>
+            <command name="glBindProgramPipeline"/>
+            <command name="glDeleteProgramPipelines"/>
+            <command name="glGenProgramPipelines"/>
+            <command name="glIsProgramPipeline"/>
+            <command name="glGetProgramPipelineiv"/>
+            <command name="glProgramParameteri"/>
+            <command name="glProgramUniform1i"/>
+            <command name="glProgramUniform1iv"/>
+            <command name="glProgramUniform1f"/>
+            <command name="glProgramUniform1fv"/>
+            <command name="glProgramUniform1d"/>
+            <command name="glProgramUniform1dv"/>
+            <command name="glProgramUniform1ui"/>
+            <command name="glProgramUniform1uiv"/>
+            <command name="glProgramUniform2i"/>
+            <command name="glProgramUniform2iv"/>
+            <command name="glProgramUniform2f"/>
+            <command name="glProgramUniform2fv"/>
+            <command name="glProgramUniform2d"/>
+            <command name="glProgramUniform2dv"/>
+            <command name="glProgramUniform2ui"/>
+            <command name="glProgramUniform2uiv"/>
+            <command name="glProgramUniform3i"/>
+            <command name="glProgramUniform3iv"/>
+            <command name="glProgramUniform3f"/>
+            <command name="glProgramUniform3fv"/>
+            <command name="glProgramUniform3d"/>
+            <command name="glProgramUniform3dv"/>
+            <command name="glProgramUniform3ui"/>
+            <command name="glProgramUniform3uiv"/>
+            <command name="glProgramUniform4i"/>
+            <command name="glProgramUniform4iv"/>
+            <command name="glProgramUniform4f"/>
+            <command name="glProgramUniform4fv"/>
+            <command name="glProgramUniform4d"/>
+            <command name="glProgramUniform4dv"/>
+            <command name="glProgramUniform4ui"/>
+            <command name="glProgramUniform4uiv"/>
+            <command name="glProgramUniformMatrix2fv"/>
+            <command name="glProgramUniformMatrix3fv"/>
+            <command name="glProgramUniformMatrix4fv"/>
+            <command name="glProgramUniformMatrix2dv"/>
+            <command name="glProgramUniformMatrix3dv"/>
+            <command name="glProgramUniformMatrix4dv"/>
+            <command name="glProgramUniformMatrix2x3fv"/>
+            <command name="glProgramUniformMatrix3x2fv"/>
+            <command name="glProgramUniformMatrix2x4fv"/>
+            <command name="glProgramUniformMatrix4x2fv"/>
+            <command name="glProgramUniformMatrix3x4fv"/>
+            <command name="glProgramUniformMatrix4x3fv"/>
+            <command name="glProgramUniformMatrix2x3dv"/>
+            <command name="glProgramUniformMatrix3x2dv"/>
+            <command name="glProgramUniformMatrix2x4dv"/>
+            <command name="glProgramUniformMatrix4x2dv"/>
+            <command name="glProgramUniformMatrix3x4dv"/>
+            <command name="glProgramUniformMatrix4x3dv"/>
+            <command name="glValidateProgramPipeline"/>
+            <command name="glGetProgramPipelineInfoLog"/>
+        </require>
+        <require comment="Reuse commands from ARB_shader_precision (none)">
+        </require>
+        <require comment="Reuse commands from ARB_vertex_attrib_64bit">
+            <command name="glVertexAttribL1d"/>
+            <command name="glVertexAttribL2d"/>
+            <command name="glVertexAttribL3d"/>
+            <command name="glVertexAttribL4d"/>
+            <command name="glVertexAttribL1dv"/>
+            <command name="glVertexAttribL2dv"/>
+            <command name="glVertexAttribL3dv"/>
+            <command name="glVertexAttribL4dv"/>
+            <command name="glVertexAttribLPointer"/>
+            <command name="glGetVertexAttribLdv"/>
+        </require>
+        <require comment="Reuse commands from ARB_viewport_array">
+            <command name="glViewportArrayv"/>
+            <command name="glViewportIndexedf"/>
+            <command name="glViewportIndexedfv"/>
+            <command name="glScissorArrayv"/>
+            <command name="glScissorIndexed"/>
+            <command name="glScissorIndexedv"/>
+            <command name="glDepthRangeArrayv"/>
+            <command name="glDepthRangeIndexed"/>
+            <command name="glGetFloati_v"/>
+            <command name="glGetDoublei_v"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_4_2" number="4.2">
+        <require comment="New aliases for old tokens">
+            <enum name="GL_COPY_READ_BUFFER_BINDING"/>
+            <enum name="GL_COPY_WRITE_BUFFER_BINDING"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_ACTIVE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_PAUSED"/>
+        </require>
+        <require comment="Reuse tokens from ARB_base_instance (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_shading_language_420pack (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_transform_feedback_instanced (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_compressed_texture_pixel_storage">
+            <enum name="GL_UNPACK_COMPRESSED_BLOCK_WIDTH"/>
+            <enum name="GL_UNPACK_COMPRESSED_BLOCK_HEIGHT"/>
+            <enum name="GL_UNPACK_COMPRESSED_BLOCK_DEPTH"/>
+            <enum name="GL_UNPACK_COMPRESSED_BLOCK_SIZE"/>
+            <enum name="GL_PACK_COMPRESSED_BLOCK_WIDTH"/>
+            <enum name="GL_PACK_COMPRESSED_BLOCK_HEIGHT"/>
+            <enum name="GL_PACK_COMPRESSED_BLOCK_DEPTH"/>
+            <enum name="GL_PACK_COMPRESSED_BLOCK_SIZE"/>
+        </require>
+        <require comment="Reuse tokens from ARB_conservative_depth (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_internalformat_query">
+            <enum name="GL_NUM_SAMPLE_COUNTS"/>
+        </require>
+        <require comment="Reuse tokens from ARB_map_buffer_alignment">
+            <enum name="GL_MIN_MAP_BUFFER_ALIGNMENT"/>
+        </require>
+        <require comment="Reuse tokens from ARB_shader_atomic_counters">
+            <enum name="GL_ATOMIC_COUNTER_BUFFER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_BINDING"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_START"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_SIZE"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <enum name="GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_VERTEX_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_FRAGMENT_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_COMBINED_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE"/>
+            <enum name="GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS"/>
+            <enum name="GL_ACTIVE_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX"/>
+            <enum name="GL_UNSIGNED_INT_ATOMIC_COUNTER"/>
+        </require>
+        <require comment="Reuse tokens from ARB_shader_image_load_store">
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT"/>
+            <enum name="GL_ELEMENT_ARRAY_BARRIER_BIT"/>
+            <enum name="GL_UNIFORM_BARRIER_BIT"/>
+            <enum name="GL_TEXTURE_FETCH_BARRIER_BIT"/>
+            <enum name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT"/>
+            <enum name="GL_COMMAND_BARRIER_BIT"/>
+            <enum name="GL_PIXEL_BUFFER_BARRIER_BIT"/>
+            <enum name="GL_TEXTURE_UPDATE_BARRIER_BIT"/>
+            <enum name="GL_BUFFER_UPDATE_BARRIER_BIT"/>
+            <enum name="GL_FRAMEBUFFER_BARRIER_BIT"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT"/>
+            <enum name="GL_ATOMIC_COUNTER_BARRIER_BIT"/>
+            <enum name="GL_ALL_BARRIER_BITS"/>
+            <enum name="GL_MAX_IMAGE_UNITS"/>
+            <enum name="GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS"/>
+            <enum name="GL_IMAGE_BINDING_NAME"/>
+            <enum name="GL_IMAGE_BINDING_LEVEL"/>
+            <enum name="GL_IMAGE_BINDING_LAYERED"/>
+            <enum name="GL_IMAGE_BINDING_LAYER"/>
+            <enum name="GL_IMAGE_BINDING_ACCESS"/>
+            <enum name="GL_IMAGE_1D"/>
+            <enum name="GL_IMAGE_2D"/>
+            <enum name="GL_IMAGE_3D"/>
+            <enum name="GL_IMAGE_2D_RECT"/>
+            <enum name="GL_IMAGE_CUBE"/>
+            <enum name="GL_IMAGE_BUFFER"/>
+            <enum name="GL_IMAGE_1D_ARRAY"/>
+            <enum name="GL_IMAGE_2D_ARRAY"/>
+            <enum name="GL_IMAGE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_IMAGE_2D_MULTISAMPLE"/>
+            <enum name="GL_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_INT_IMAGE_1D"/>
+            <enum name="GL_INT_IMAGE_2D"/>
+            <enum name="GL_INT_IMAGE_3D"/>
+            <enum name="GL_INT_IMAGE_2D_RECT"/>
+            <enum name="GL_INT_IMAGE_CUBE"/>
+            <enum name="GL_INT_IMAGE_BUFFER"/>
+            <enum name="GL_INT_IMAGE_1D_ARRAY"/>
+            <enum name="GL_INT_IMAGE_2D_ARRAY"/>
+            <enum name="GL_INT_IMAGE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_INT_IMAGE_2D_MULTISAMPLE"/>
+            <enum name="GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_1D"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_2D"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_3D"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_2D_RECT"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_CUBE"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_BUFFER"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_1D_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_2D_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_MAX_IMAGE_SAMPLES"/>
+            <enum name="GL_IMAGE_BINDING_FORMAT"/>
+            <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_TYPE"/>
+            <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE"/>
+            <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS"/>
+            <enum name="GL_MAX_VERTEX_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_FRAGMENT_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_COMBINED_IMAGE_UNIFORMS"/>
+        </require>
+        <require comment="Reuse tokens from ARB_shading_language_packing (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_texture_compression_bptc">
+            <enum name="GL_COMPRESSED_RGBA_BPTC_UNORM"/>
+            <enum name="GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM"/>
+            <enum name="GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT"/>
+            <enum name="GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT"/>
+        </require>
+        <require comment="Reuse tokens from ARB_texture_storage">
+            <enum name="GL_TEXTURE_IMMUTABLE_FORMAT"/>
+        </require>
+        <require comment="Reuse commands from ARB_base_instance">
+            <command name="glDrawArraysInstancedBaseInstance"/>
+            <command name="glDrawElementsInstancedBaseInstance"/>
+            <command name="glDrawElementsInstancedBaseVertexBaseInstance"/>
+        </require>
+        <require comment="Reuse commands from ARB_compressed_texture_pixel_storage (none)">
+        </require>
+        <require comment="Reuse commands from ARB_conservative_depth (none)">
+        </require>
+        <require comment="Reuse commands from ARB_internalformat_query">
+            <command name="glGetInternalformativ"/>
+        </require>
+        <require comment="Reuse commands from ARB_map_buffer_alignment (none)">
+        </require>
+        <require comment="Reuse commands from ARB_shader_atomic_counters">
+            <command name="glGetActiveAtomicCounterBufferiv"/>
+        </require>
+        <require comment="Reuse commands from ARB_shader_image_load_store">
+            <command name="glBindImageTexture"/>
+            <command name="glMemoryBarrier"/>
+        </require>
+        <require comment="Reuse commands from ARB_shading_language_420pack (none)">
+        </require>
+        <require comment="Reuse commands from ARB_shading_language_packing (none)">
+        </require>
+        <require comment="Reuse commands from ARB_texture_storage">
+            <command name="glTexStorage1D"/>
+            <command name="glTexStorage2D"/>
+            <command name="glTexStorage3D"/>
+        </require>
+        <require comment="Reuse commands from ARB_transform_feedback_instanced">
+            <command name="glDrawTransformFeedbackInstanced"/>
+            <command name="glDrawTransformFeedbackStreamInstanced"/>
+        </require>
+        <!-- Deprecated in OpenGL 4.2 core;
+             deprecate tag not defined/supported yet
+          <deprecate profile="core">
+            <enum name="GL_NUM_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_COMPRESSED_TEXTURE_FORMATS"/>
+          </deprecate>
+        -->
+    </feature>
+    <feature api="gl" name="GL_VERSION_4_3" number="4.3">
+        <require>
+            <enum name="GL_NUM_SHADING_LANGUAGE_VERSIONS"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_LONG"/>
+        </require>
+        <require comment="Reuse tokens from ARB_arrays_of_arrays (none, GLSL only)">
+        </require>
+        <require comment="Reuse tokens from ARB_fragment_layer_viewport (none, GLSL only)">
+        </require>
+        <require comment="Reuse tokens from ARB_shader_image_size (none, GLSL only)">
+        </require>
+        <require comment="Reuse tokens from ARB_ES3_compatibility">
+            <enum name="GL_COMPRESSED_RGB8_ETC2"/>
+            <enum name="GL_COMPRESSED_SRGB8_ETC2"/>
+            <enum name="GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+            <enum name="GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+            <enum name="GL_COMPRESSED_RGBA8_ETC2_EAC"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC"/>
+            <enum name="GL_COMPRESSED_R11_EAC"/>
+            <enum name="GL_COMPRESSED_SIGNED_R11_EAC"/>
+            <enum name="GL_COMPRESSED_RG11_EAC"/>
+            <enum name="GL_COMPRESSED_SIGNED_RG11_EAC"/>
+            <enum name="GL_PRIMITIVE_RESTART_FIXED_INDEX"/>
+            <enum name="GL_ANY_SAMPLES_PASSED_CONSERVATIVE"/>
+            <enum name="GL_MAX_ELEMENT_INDEX"/>
+        </require>
+        <require comment="Reuse tokens from ARB_clear_buffer_object (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_compute_shader">
+            <enum name="GL_COMPUTE_SHADER"/>
+            <enum name="GL_MAX_COMPUTE_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_COMPUTE_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_COMPUTE_SHARED_MEMORY_SIZE"/>
+            <enum name="GL_MAX_COMPUTE_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_COUNT"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_SIZE"/>
+            <enum name="GL_COMPUTE_WORK_GROUP_SIZE"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER"/>
+            <enum name="GL_DISPATCH_INDIRECT_BUFFER"/>
+            <enum name="GL_DISPATCH_INDIRECT_BUFFER_BINDING"/>
+            <enum name="GL_COMPUTE_SHADER_BIT"/>
+        </require>
+        <require comment="Reuse tokens from ARB_copy_image (none)">
+        </require>
+        <require comment="Reuse tokens from KHR_debug">
+            <enum name="GL_DEBUG_OUTPUT_SYNCHRONOUS"/>
+            <enum name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH"/>
+            <enum name="GL_DEBUG_CALLBACK_FUNCTION"/>
+            <enum name="GL_DEBUG_CALLBACK_USER_PARAM"/>
+            <enum name="GL_DEBUG_SOURCE_API"/>
+            <enum name="GL_DEBUG_SOURCE_WINDOW_SYSTEM"/>
+            <enum name="GL_DEBUG_SOURCE_SHADER_COMPILER"/>
+            <enum name="GL_DEBUG_SOURCE_THIRD_PARTY"/>
+            <enum name="GL_DEBUG_SOURCE_APPLICATION"/>
+            <enum name="GL_DEBUG_SOURCE_OTHER"/>
+            <enum name="GL_DEBUG_TYPE_ERROR"/>
+            <enum name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR"/>
+            <enum name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR"/>
+            <enum name="GL_DEBUG_TYPE_PORTABILITY"/>
+            <enum name="GL_DEBUG_TYPE_PERFORMANCE"/>
+            <enum name="GL_DEBUG_TYPE_OTHER"/>
+            <enum name="GL_MAX_DEBUG_MESSAGE_LENGTH"/>
+            <enum name="GL_MAX_DEBUG_LOGGED_MESSAGES"/>
+            <enum name="GL_DEBUG_LOGGED_MESSAGES"/>
+            <enum name="GL_DEBUG_SEVERITY_HIGH"/>
+            <enum name="GL_DEBUG_SEVERITY_MEDIUM"/>
+            <enum name="GL_DEBUG_SEVERITY_LOW"/>
+            <enum name="GL_DEBUG_TYPE_MARKER"/>
+            <enum name="GL_DEBUG_TYPE_PUSH_GROUP"/>
+            <enum name="GL_DEBUG_TYPE_POP_GROUP"/>
+            <enum name="GL_DEBUG_SEVERITY_NOTIFICATION"/>
+            <enum name="GL_MAX_DEBUG_GROUP_STACK_DEPTH"/>
+            <enum name="GL_DEBUG_GROUP_STACK_DEPTH"/>
+            <enum name="GL_BUFFER"/>
+            <enum name="GL_SHADER"/>
+            <enum name="GL_PROGRAM"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+            <enum name="GL_QUERY"/>
+            <enum name="GL_PROGRAM_PIPELINE"/>
+            <enum name="GL_SAMPLER"/>
+            <enum name="GL_MAX_LABEL_LENGTH"/>
+            <enum name="GL_DEBUG_OUTPUT"/>
+            <enum name="GL_CONTEXT_FLAG_DEBUG_BIT"/>
+        </require>
+        <require comment="Reuse tokens from ARB_explicit_uniform_location">
+            <enum name="GL_MAX_UNIFORM_LOCATIONS"/>
+        </require>
+        <require comment="Reuse tokens from ARB_framebuffer_no_attachments">
+            <enum name="GL_FRAMEBUFFER_DEFAULT_WIDTH"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_HEIGHT"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_LAYERS"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_SAMPLES"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS"/>
+            <enum name="GL_MAX_FRAMEBUFFER_WIDTH"/>
+            <enum name="GL_MAX_FRAMEBUFFER_HEIGHT"/>
+            <enum name="GL_MAX_FRAMEBUFFER_LAYERS"/>
+            <enum name="GL_MAX_FRAMEBUFFER_SAMPLES"/>
+        </require>
+        <require comment="Reuse tokens from ARB_internalformat_query2">
+            <enum name="GL_INTERNALFORMAT_SUPPORTED"/>
+            <enum name="GL_INTERNALFORMAT_PREFERRED"/>
+            <enum name="GL_INTERNALFORMAT_RED_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_GREEN_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_BLUE_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_ALPHA_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_DEPTH_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_STENCIL_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_SHARED_SIZE"/>
+            <enum name="GL_INTERNALFORMAT_RED_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_GREEN_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_BLUE_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_ALPHA_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_DEPTH_TYPE"/>
+            <enum name="GL_INTERNALFORMAT_STENCIL_TYPE"/>
+            <enum name="GL_MAX_WIDTH"/>
+            <enum name="GL_MAX_HEIGHT"/>
+            <enum name="GL_MAX_DEPTH"/>
+            <enum name="GL_MAX_LAYERS"/>
+            <enum name="GL_MAX_COMBINED_DIMENSIONS"/>
+            <enum name="GL_COLOR_COMPONENTS"/>
+            <enum name="GL_DEPTH_COMPONENTS"/>
+            <enum name="GL_STENCIL_COMPONENTS"/>
+            <enum name="GL_COLOR_RENDERABLE"/>
+            <enum name="GL_DEPTH_RENDERABLE"/>
+            <enum name="GL_STENCIL_RENDERABLE"/>
+            <enum name="GL_FRAMEBUFFER_RENDERABLE"/>
+            <enum name="GL_FRAMEBUFFER_RENDERABLE_LAYERED"/>
+            <enum name="GL_FRAMEBUFFER_BLEND"/>
+            <enum name="GL_READ_PIXELS"/>
+            <enum name="GL_READ_PIXELS_FORMAT"/>
+            <enum name="GL_READ_PIXELS_TYPE"/>
+            <enum name="GL_TEXTURE_IMAGE_FORMAT"/>
+            <enum name="GL_TEXTURE_IMAGE_TYPE"/>
+            <enum name="GL_GET_TEXTURE_IMAGE_FORMAT"/>
+            <enum name="GL_GET_TEXTURE_IMAGE_TYPE"/>
+            <enum name="GL_MIPMAP"/>
+            <enum name="GL_MANUAL_GENERATE_MIPMAP"/>
+            <enum name="GL_AUTO_GENERATE_MIPMAP"/>
+            <enum name="GL_COLOR_ENCODING"/>
+            <enum name="GL_SRGB_READ"/>
+            <enum name="GL_SRGB_WRITE"/>
+            <enum name="GL_FILTER"/>
+            <enum name="GL_VERTEX_TEXTURE"/>
+            <enum name="GL_TESS_CONTROL_TEXTURE"/>
+            <enum name="GL_TESS_EVALUATION_TEXTURE"/>
+            <enum name="GL_GEOMETRY_TEXTURE"/>
+            <enum name="GL_FRAGMENT_TEXTURE"/>
+            <enum name="GL_COMPUTE_TEXTURE"/>
+            <enum name="GL_TEXTURE_SHADOW"/>
+            <enum name="GL_TEXTURE_GATHER"/>
+            <enum name="GL_TEXTURE_GATHER_SHADOW"/>
+            <enum name="GL_SHADER_IMAGE_LOAD"/>
+            <enum name="GL_SHADER_IMAGE_STORE"/>
+            <enum name="GL_SHADER_IMAGE_ATOMIC"/>
+            <enum name="GL_IMAGE_TEXEL_SIZE"/>
+            <enum name="GL_IMAGE_COMPATIBILITY_CLASS"/>
+            <enum name="GL_IMAGE_PIXEL_FORMAT"/>
+            <enum name="GL_IMAGE_PIXEL_TYPE"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE"/>
+            <enum name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE"/>
+            <enum name="GL_TEXTURE_COMPRESSED_BLOCK_WIDTH"/>
+            <enum name="GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT"/>
+            <enum name="GL_TEXTURE_COMPRESSED_BLOCK_SIZE"/>
+            <enum name="GL_CLEAR_BUFFER"/>
+            <enum name="GL_TEXTURE_VIEW"/>
+            <enum name="GL_VIEW_COMPATIBILITY_CLASS"/>
+            <enum name="GL_FULL_SUPPORT"/>
+            <enum name="GL_CAVEAT_SUPPORT"/>
+            <enum name="GL_IMAGE_CLASS_4_X_32"/>
+            <enum name="GL_IMAGE_CLASS_2_X_32"/>
+            <enum name="GL_IMAGE_CLASS_1_X_32"/>
+            <enum name="GL_IMAGE_CLASS_4_X_16"/>
+            <enum name="GL_IMAGE_CLASS_2_X_16"/>
+            <enum name="GL_IMAGE_CLASS_1_X_16"/>
+            <enum name="GL_IMAGE_CLASS_4_X_8"/>
+            <enum name="GL_IMAGE_CLASS_2_X_8"/>
+            <enum name="GL_IMAGE_CLASS_1_X_8"/>
+            <enum name="GL_IMAGE_CLASS_11_11_10"/>
+            <enum name="GL_IMAGE_CLASS_10_10_10_2"/>
+            <enum name="GL_VIEW_CLASS_128_BITS"/>
+            <enum name="GL_VIEW_CLASS_96_BITS"/>
+            <enum name="GL_VIEW_CLASS_64_BITS"/>
+            <enum name="GL_VIEW_CLASS_48_BITS"/>
+            <enum name="GL_VIEW_CLASS_32_BITS"/>
+            <enum name="GL_VIEW_CLASS_24_BITS"/>
+            <enum name="GL_VIEW_CLASS_16_BITS"/>
+            <enum name="GL_VIEW_CLASS_8_BITS"/>
+            <enum name="GL_VIEW_CLASS_S3TC_DXT1_RGB"/>
+            <enum name="GL_VIEW_CLASS_S3TC_DXT1_RGBA"/>
+            <enum name="GL_VIEW_CLASS_S3TC_DXT3_RGBA"/>
+            <enum name="GL_VIEW_CLASS_S3TC_DXT5_RGBA"/>
+            <enum name="GL_VIEW_CLASS_RGTC1_RED"/>
+            <enum name="GL_VIEW_CLASS_RGTC2_RG"/>
+            <enum name="GL_VIEW_CLASS_BPTC_UNORM"/>
+            <enum name="GL_VIEW_CLASS_BPTC_FLOAT"/>
+        </require>
+        <require comment="Reuse tokens from ARB_invalidate_subdata (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_multi_draw_indirect (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_program_interface_query">
+            <enum name="GL_UNIFORM"/>
+            <enum name="GL_UNIFORM_BLOCK"/>
+            <enum name="GL_PROGRAM_INPUT"/>
+            <enum name="GL_PROGRAM_OUTPUT"/>
+            <enum name="GL_BUFFER_VARIABLE"/>
+            <enum name="GL_SHADER_STORAGE_BLOCK"/>
+            <enum name="GL_VERTEX_SUBROUTINE"/>
+            <enum name="GL_TESS_CONTROL_SUBROUTINE"/>
+            <enum name="GL_TESS_EVALUATION_SUBROUTINE"/>
+            <enum name="GL_GEOMETRY_SUBROUTINE"/>
+            <enum name="GL_FRAGMENT_SUBROUTINE"/>
+            <enum name="GL_COMPUTE_SUBROUTINE"/>
+            <enum name="GL_VERTEX_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_TESS_CONTROL_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_TESS_EVALUATION_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_GEOMETRY_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_FRAGMENT_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_COMPUTE_SUBROUTINE_UNIFORM"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYING"/>
+            <enum name="GL_ACTIVE_RESOURCES"/>
+            <enum name="GL_MAX_NAME_LENGTH"/>
+            <enum name="GL_MAX_NUM_ACTIVE_VARIABLES"/>
+            <enum name="GL_MAX_NUM_COMPATIBLE_SUBROUTINES"/>
+            <enum name="GL_NAME_LENGTH"/>
+            <enum name="GL_TYPE"/>
+            <enum name="GL_ARRAY_SIZE"/>
+            <enum name="GL_OFFSET"/>
+            <enum name="GL_BLOCK_INDEX"/>
+            <enum name="GL_ARRAY_STRIDE"/>
+            <enum name="GL_MATRIX_STRIDE"/>
+            <enum name="GL_IS_ROW_MAJOR"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_INDEX"/>
+            <enum name="GL_BUFFER_BINDING"/>
+            <enum name="GL_BUFFER_DATA_SIZE"/>
+            <enum name="GL_NUM_ACTIVE_VARIABLES"/>
+            <enum name="GL_ACTIVE_VARIABLES"/>
+            <enum name="GL_REFERENCED_BY_VERTEX_SHADER"/>
+            <enum name="GL_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+            <enum name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_REFERENCED_BY_GEOMETRY_SHADER"/>
+            <enum name="GL_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <enum name="GL_REFERENCED_BY_COMPUTE_SHADER"/>
+            <enum name="GL_TOP_LEVEL_ARRAY_SIZE"/>
+            <enum name="GL_TOP_LEVEL_ARRAY_STRIDE"/>
+            <enum name="GL_LOCATION"/>
+            <enum name="GL_LOCATION_INDEX"/>
+            <enum name="GL_IS_PER_PATCH"/>
+        </require>
+        <require comment="Reuse tokens from ARB_robust_buffer_access_behavior (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_shader_storage_buffer_object">
+            <enum name="GL_SHADER_STORAGE_BUFFER"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_BINDING"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_START"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_SIZE"/>
+            <enum name="GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS"/>
+            <enum name="GL_MAX_SHADER_STORAGE_BLOCK_SIZE"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_SHADER_STORAGE_BARRIER_BIT"/>
+            <enum name="GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES"/>
+        </require>
+        <require comment="Reuse tokens from ARB_stencil_texturing">
+            <enum name="GL_DEPTH_STENCIL_TEXTURE_MODE"/>
+        </require>
+        <require comment="Reuse tokens from ARB_texture_buffer_range">
+            <enum name="GL_TEXTURE_BUFFER_OFFSET"/>
+            <enum name="GL_TEXTURE_BUFFER_SIZE"/>
+            <enum name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT"/>
+        </require>
+        <require comment="Reuse tokens from ARB_texture_query_levels (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_texture_storage_multisample (none)">
+        </require>
+        <require comment="Reuse tokens from ARB_texture_view">
+            <enum name="GL_TEXTURE_VIEW_MIN_LEVEL"/>
+            <enum name="GL_TEXTURE_VIEW_NUM_LEVELS"/>
+            <enum name="GL_TEXTURE_VIEW_MIN_LAYER"/>
+            <enum name="GL_TEXTURE_VIEW_NUM_LAYERS"/>
+            <enum name="GL_TEXTURE_IMMUTABLE_LEVELS"/>
+        </require>
+        <require comment="Reuse tokens from ARB_vertex_attrib_binding">
+            <enum name="GL_VERTEX_ATTRIB_BINDING"/>
+            <enum name="GL_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+            <enum name="GL_VERTEX_BINDING_DIVISOR"/>
+            <enum name="GL_VERTEX_BINDING_OFFSET"/>
+            <enum name="GL_VERTEX_BINDING_STRIDE"/>
+            <enum name="GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+            <enum name="GL_MAX_VERTEX_ATTRIB_BINDINGS"/>
+            <enum name="GL_VERTEX_BINDING_BUFFER" comment="Added in 2013/10/22 update to the spec"/>
+        </require>
+        <require comment="Reuse commands from ARB_arrays_of_arrays (none, GLSL only)">
+        </require>
+        <require comment="Reuse commands from ARB_clear_buffer_object">
+            <command name="glClearBufferData"/>
+            <command name="glClearBufferSubData"/>
+        </require>
+        <require comment="Reuse commands from ARB_compute_shader">
+            <command name="glDispatchCompute"/>
+            <command name="glDispatchComputeIndirect"/>
+        </require>
+        <require comment="Reuse commands from ARB_copy_image">
+            <command name="glCopyImageSubData"/>
+        </require>
+        <require comment="Reuse commands from ARB_ES3_compatibility (none)">
+        </require>
+        <require comment="Reuse commands from ARB_explicit_uniform_location (none)">
+        </require>
+        <require comment="Reuse commands from ARB_fragment_layer_viewport (none, GLSL only)">
+        </require>
+        <require comment="Reuse commands from ARB_framebuffer_no_attachments">
+            <command name="glFramebufferParameteri"/>
+            <command name="glGetFramebufferParameteriv"/>
+        </require>
+        <require comment="Reuse commands from ARB_internalformat_query2">
+            <command name="glGetInternalformati64v"/>
+        </require>
+        <require comment="Reuse commands from ARB_invalidate_subdata">
+            <command name="glInvalidateTexSubImage"/>
+            <command name="glInvalidateTexImage"/>
+            <command name="glInvalidateBufferSubData"/>
+            <command name="glInvalidateBufferData"/>
+            <command name="glInvalidateFramebuffer"/>
+            <command name="glInvalidateSubFramebuffer"/>
+        </require>
+        <require comment="Reuse commands from ARB_multi_draw_indirect">
+            <command name="glMultiDrawArraysIndirect"/>
+            <command name="glMultiDrawElementsIndirect"/>
+        </require>
+        <require comment="Reuse commands from ARB_program_interface_query">
+            <command name="glGetProgramInterfaceiv"/>
+            <command name="glGetProgramResourceIndex"/>
+            <command name="glGetProgramResourceName"/>
+            <command name="glGetProgramResourceiv"/>
+            <command name="glGetProgramResourceLocation"/>
+            <command name="glGetProgramResourceLocationIndex"/>
+        </require>
+        <require comment="Reuse commands from ARB_robust_buffer_access_behavior (none)">
+        </require>
+        <require comment="Reuse commands from ARB_shader_image_size (none, GLSL only)">
+        </require>
+        <require comment="Reuse commands from ARB_shader_storage_buffer_object">
+            <command name="glShaderStorageBlockBinding"/>
+        </require>
+        <require comment="Reuse commands from ARB_stencil_texturing (none)">
+        </require>
+        <require comment="Reuse commands from ARB_texture_buffer_range">
+            <command name="glTexBufferRange"/>
+        </require>
+        <require comment="Reuse commands from ARB_texture_query_levels (none)">
+        </require>
+        <require comment="Reuse commands from ARB_texture_storage_multisample">
+            <command name="glTexStorage2DMultisample"/>
+            <command name="glTexStorage3DMultisample"/>
+        </require>
+        <require comment="Reuse commands from ARB_texture_view">
+            <command name="glTextureView"/>
+        </require>
+        <require comment="Reuse commands from ARB_vertex_attrib_binding">
+            <command name="glBindVertexBuffer"/>
+            <command name="glVertexAttribFormat"/>
+            <command name="glVertexAttribIFormat"/>
+            <command name="glVertexAttribLFormat"/>
+            <command name="glVertexAttribBinding"/>
+            <command name="glVertexBindingDivisor"/>
+        </require>
+        <require comment="Reuse commands from KHR_debug (includes ARB_debug_output commands promoted to KHR without suffixes)">
+            <command name="glDebugMessageControl"/>
+            <command name="glDebugMessageInsert"/>
+            <command name="glDebugMessageCallback"/>
+            <command name="glGetDebugMessageLog"/>
+            <command name="glPushDebugGroup"/>
+            <command name="glPopDebugGroup"/>
+            <command name="glObjectLabel"/>
+            <command name="glGetObjectLabel"/>
+            <command name="glObjectPtrLabel"/>
+            <command name="glGetObjectPtrLabel"/>
+            <command name="glGetPointerv"/>
+        </require>
+        <require profile="compatibility" comment="KHR_debug functionality not supported in core profile">
+            <enum name="GL_DISPLAY_LIST"/>
+        </require>
+        <require profile="core" comment="Restore functionality removed in GL 3.2 core to GL 4.3. Needed for debug interface.">
+            <enum name="GL_STACK_UNDERFLOW"/>
+            <enum name="GL_STACK_OVERFLOW"/>
+        </require>
+        <!-- Deprecated in OpenGL 4.3 core;
+             deprecate tag not defined/supported yet
+          <deprecate profile="core">
+            <enum name="GL_UNPACK_LSB_FIRST"/>
+            <enum name="GL_PACK_LSB_FIRST"/>
+          </deprecate>
+        -->
+    </feature>
+    <feature api="gl" name="GL_VERSION_4_4" number="4.4">
+        <require>
+            <enum name="GL_MAX_VERTEX_ATTRIB_STRIDE"/>
+            <enum name="GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED"/>
+            <enum name="GL_TEXTURE_BUFFER_BINDING"/>
+        </require>
+        <require comment="Reuse GL_ARB_buffer_storage">
+            <enum name="GL_MAP_READ_BIT"/>
+            <enum name="GL_MAP_WRITE_BIT"/>
+            <enum name="GL_MAP_PERSISTENT_BIT"/>
+            <enum name="GL_MAP_COHERENT_BIT"/>
+            <enum name="GL_DYNAMIC_STORAGE_BIT"/>
+            <enum name="GL_CLIENT_STORAGE_BIT"/>
+            <enum name="GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT"/>
+            <enum name="GL_BUFFER_IMMUTABLE_STORAGE"/>
+            <enum name="GL_BUFFER_STORAGE_FLAGS"/>
+            <command name="glBufferStorage"/>
+        </require>
+        <require comment="Reuse GL_ARB_clear_texture">
+            <enum name="GL_CLEAR_TEXTURE"/>
+            <command name="glClearTexImage"/>
+            <command name="glClearTexSubImage"/>
+        </require>
+        <require comment="Reuse GL_ARB_enhanced_layouts">
+            <enum name="GL_LOCATION_COMPONENT"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_INDEX"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE"/>
+        </require>
+        <require comment="Reuse GL_ARB_multi_bind (none)">
+            <command name="glBindBuffersBase"/>
+            <command name="glBindBuffersRange"/>
+            <command name="glBindTextures"/>
+            <command name="glBindSamplers"/>
+            <command name="glBindImageTextures"/>
+            <command name="glBindVertexBuffers"/>
+        </require>
+        <require comment="Reuse GL_ARB_query_buffer_object">
+            <enum name="GL_QUERY_BUFFER"/>
+            <enum name="GL_QUERY_BUFFER_BARRIER_BIT"/>
+            <enum name="GL_QUERY_BUFFER_BINDING"/>
+            <enum name="GL_QUERY_RESULT_NO_WAIT"/>
+        </require>
+        <require comment="Reuse GL_ARB_texture_mirror_clamp_to_edge">
+            <enum name="GL_MIRROR_CLAMP_TO_EDGE"/>
+        </require>
+        <require comment="Reuse GL_ARB_texture_stencil8">
+            <enum name="GL_STENCIL_INDEX"/>
+            <enum name="GL_STENCIL_INDEX8"/>
+        </require>
+        <require comment="Reuse GL_ARB_vertex_type_10f_11f_11f_rev">
+            <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_4_5" number="4.5">
+        <require comment="Added robustness functionality">
+            <enum name="GL_CONTEXT_LOST"/>
+        </require>
+        <require comment="Reuse GL_ARB_clip_control">
+            <command name="glClipControl"/>
+            <enum name="GL_LOWER_LEFT"/>
+            <enum name="GL_UPPER_LEFT"/>
+            <enum name="GL_NEGATIVE_ONE_TO_ONE"/>
+            <enum name="GL_ZERO_TO_ONE"/>
+            <enum name="GL_CLIP_ORIGIN"/>
+            <enum name="GL_CLIP_DEPTH_MODE"/>
+        </require>
+        <require comment="Reuse GL_ARB_conditional_render_inverted">
+            <enum name="GL_QUERY_WAIT_INVERTED"/>
+            <enum name="GL_QUERY_NO_WAIT_INVERTED"/>
+            <enum name="GL_QUERY_BY_REGION_WAIT_INVERTED"/>
+            <enum name="GL_QUERY_BY_REGION_NO_WAIT_INVERTED"/>
+        </require>
+        <require comment="Reuse GL_ARB_cull_distance">
+            <enum name="GL_MAX_CULL_DISTANCES"/>
+            <enum name="GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES"/>
+        </require>
+        <require comment="Reuse GL_ARB_direct_state_access">
+            <enum name="GL_TEXTURE_TARGET"/>
+            <enum name="GL_QUERY_TARGET"/>
+            <enum name="GL_TEXTURE_BINDING_1D"/>
+            <enum name="GL_TEXTURE_BINDING_1D_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D"/>
+            <enum name="GL_TEXTURE_BINDING_2D_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_3D"/>
+            <enum name="GL_TEXTURE_BINDING_BUFFER"/>
+            <enum name="GL_TEXTURE_BINDING_CUBE_MAP"/>
+            <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_RECTANGLE"/>
+            <command name="glCreateTransformFeedbacks"/>
+            <command name="glTransformFeedbackBufferBase"/>
+            <command name="glTransformFeedbackBufferRange"/>
+            <command name="glGetTransformFeedbackiv"/>
+            <command name="glGetTransformFeedbacki_v"/>
+            <command name="glGetTransformFeedbacki64_v"/>
+            <command name="glCreateBuffers"/>
+            <command name="glNamedBufferStorage"/>
+            <command name="glNamedBufferData"/>
+            <command name="glNamedBufferSubData"/>
+            <command name="glCopyNamedBufferSubData"/>
+            <command name="glClearNamedBufferData"/>
+            <command name="glClearNamedBufferSubData"/>
+            <command name="glMapNamedBuffer"/>
+            <command name="glMapNamedBufferRange"/>
+            <command name="glUnmapNamedBuffer"/>
+            <command name="glFlushMappedNamedBufferRange"/>
+            <command name="glGetNamedBufferParameteriv"/>
+            <command name="glGetNamedBufferParameteri64v"/>
+            <command name="glGetNamedBufferPointerv"/>
+            <command name="glGetNamedBufferSubData"/>
+            <command name="glCreateFramebuffers"/>
+            <command name="glNamedFramebufferRenderbuffer"/>
+            <command name="glNamedFramebufferParameteri"/>
+            <command name="glNamedFramebufferTexture"/>
+            <command name="glNamedFramebufferTextureLayer"/>
+            <command name="glNamedFramebufferDrawBuffer"/>
+            <command name="glNamedFramebufferDrawBuffers"/>
+            <command name="glNamedFramebufferReadBuffer"/>
+            <command name="glInvalidateNamedFramebufferData"/>
+            <command name="glInvalidateNamedFramebufferSubData"/>
+            <command name="glClearNamedFramebufferiv"/>
+            <command name="glClearNamedFramebufferuiv"/>
+            <command name="glClearNamedFramebufferfv"/>
+            <command name="glClearNamedFramebufferfi"/>
+            <command name="glBlitNamedFramebuffer"/>
+            <command name="glCheckNamedFramebufferStatus"/>
+            <command name="glGetNamedFramebufferParameteriv"/>
+            <command name="glGetNamedFramebufferAttachmentParameteriv"/>
+            <command name="glCreateRenderbuffers"/>
+            <command name="glNamedRenderbufferStorage"/>
+            <command name="glNamedRenderbufferStorageMultisample"/>
+            <command name="glGetNamedRenderbufferParameteriv"/>
+            <command name="glCreateTextures"/>
+            <command name="glTextureBuffer"/>
+            <command name="glTextureBufferRange"/>
+            <command name="glTextureStorage1D"/>
+            <command name="glTextureStorage2D"/>
+            <command name="glTextureStorage3D"/>
+            <command name="glTextureStorage2DMultisample"/>
+            <command name="glTextureStorage3DMultisample"/>
+            <command name="glTextureSubImage1D"/>
+            <command name="glTextureSubImage2D"/>
+            <command name="glTextureSubImage3D"/>
+            <command name="glCompressedTextureSubImage1D"/>
+            <command name="glCompressedTextureSubImage2D"/>
+            <command name="glCompressedTextureSubImage3D"/>
+            <command name="glCopyTextureSubImage1D"/>
+            <command name="glCopyTextureSubImage2D"/>
+            <command name="glCopyTextureSubImage3D"/>
+            <command name="glTextureParameterf"/>
+            <command name="glTextureParameterfv"/>
+            <command name="glTextureParameteri"/>
+            <command name="glTextureParameterIiv"/>
+            <command name="glTextureParameterIuiv"/>
+            <command name="glTextureParameteriv"/>
+            <command name="glGenerateTextureMipmap"/>
+            <command name="glBindTextureUnit"/>
+            <command name="glGetTextureImage"/>
+            <command name="glGetCompressedTextureImage"/>
+            <command name="glGetTextureLevelParameterfv"/>
+            <command name="glGetTextureLevelParameteriv"/>
+            <command name="glGetTextureParameterfv"/>
+            <command name="glGetTextureParameterIiv"/>
+            <command name="glGetTextureParameterIuiv"/>
+            <command name="glGetTextureParameteriv"/>
+            <command name="glCreateVertexArrays"/>
+            <command name="glDisableVertexArrayAttrib"/>
+            <command name="glEnableVertexArrayAttrib"/>
+            <command name="glVertexArrayElementBuffer"/>
+            <command name="glVertexArrayVertexBuffer"/>
+            <command name="glVertexArrayVertexBuffers"/>
+            <command name="glVertexArrayAttribBinding"/>
+            <command name="glVertexArrayAttribFormat"/>
+            <command name="glVertexArrayAttribIFormat"/>
+            <command name="glVertexArrayAttribLFormat"/>
+            <command name="glVertexArrayBindingDivisor"/>
+            <command name="glGetVertexArrayiv"/>
+            <command name="glGetVertexArrayIndexediv"/>
+            <command name="glGetVertexArrayIndexed64iv"/>
+            <command name="glCreateSamplers"/>
+            <command name="glCreateProgramPipelines"/>
+            <command name="glCreateQueries"/>
+            <command name="glGetQueryBufferObjecti64v"/>
+            <command name="glGetQueryBufferObjectiv"/>
+            <command name="glGetQueryBufferObjectui64v"/>
+            <command name="glGetQueryBufferObjectuiv"/>
+        </require>
+        <require comment="Reuse GL_ARB_ES3_1_compatibility">
+            <enum name="GL_BACK"/>
+            <command name="glMemoryBarrierByRegion"/>
+        </require>
+        <require comment="Reuse GL_ARB_get_texture_sub_image">
+            <command name="glGetTextureSubImage"/>
+            <command name="glGetCompressedTextureSubImage"/>
+        </require>
+        <require comment="Reuse GL_ARB_robustness">
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_GUILTY_CONTEXT_RESET"/>
+            <enum name="GL_INNOCENT_CONTEXT_RESET"/>
+            <enum name="GL_UNKNOWN_CONTEXT_RESET"/>
+            <enum name="GL_RESET_NOTIFICATION_STRATEGY"/>
+            <enum name="GL_LOSE_CONTEXT_ON_RESET"/>
+            <enum name="GL_NO_RESET_NOTIFICATION"/>
+            <enum name="GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT"/>
+            <enum name="GL_CONTEXT_LOST"/>
+            <command name="glGetGraphicsResetStatus"/>
+            <command name="glGetnCompressedTexImage"/>
+            <command name="glGetnTexImage"/>
+            <command name="glGetnUniformdv"/>
+            <command name="glGetnUniformfv"/>
+            <command name="glGetnUniformiv"/>
+            <command name="glGetnUniformuiv"/>
+            <command name="glReadnPixels"/>
+        </require>
+        <require profile="compatibility" comment="Reuse GL_ARB_robustness">
+            <command name="glGetnMapdv"/>
+            <command name="glGetnMapfv"/>
+            <command name="glGetnMapiv"/>
+            <command name="glGetnPixelMapfv"/>
+            <command name="glGetnPixelMapuiv"/>
+            <command name="glGetnPixelMapusv"/>
+            <command name="glGetnPolygonStipple"/>
+            <command name="glGetnColorTable"/>
+            <command name="glGetnConvolutionFilter"/>
+            <command name="glGetnSeparableFilter"/>
+            <command name="glGetnHistogram"/>
+            <command name="glGetnMinmax"/>
+        </require>
+        <require comment="Reuse GL_ARB_texture_barrier">
+            <command name="glTextureBarrier"/>
+        </require>
+        <require comment="Reuse GL_KHR_context_flush_control">
+            <enum name="GL_CONTEXT_RELEASE_BEHAVIOR"/>
+            <enum name="GL_NONE"/>
+            <enum name="GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH"/>
+        </require>
+    </feature>
+    <feature api="gl" name="GL_VERSION_4_6" number="4.6">
+        <require comment="Reuse GL_KHR_context_flush_control">
+            <enum name="GL_CONTEXT_RELEASE_BEHAVIOR"/>
+            <enum name="GL_NONE"/>
+            <enum name="GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH"/>
+        </require>
+        <require comment="Reuse GL_ARB_gl_spirv">
+            <enum name="GL_SHADER_BINARY_FORMAT_SPIR_V"/>
+            <enum name="GL_SPIR_V_BINARY"/>
+            <command name="glSpecializeShader"/>
+        </require>
+        <require comment="Reuse GL_ARB_indirect_parameters">
+            <enum name="GL_PARAMETER_BUFFER"/>
+            <enum name="GL_PARAMETER_BUFFER_BINDING"/>
+            <command name="glMultiDrawArraysIndirectCount"/>
+            <command name="glMultiDrawElementsIndirectCount"/>
+        </require>
+        <require comment="Reuse GL_KHR_no_error">
+            <enum name="GL_CONTEXT_FLAG_NO_ERROR_BIT"/>
+        </require>
+        <require comment="Reuse GL_ARB_pipeline_statistics_query">
+            <enum name="GL_VERTICES_SUBMITTED"/>
+            <enum name="GL_PRIMITIVES_SUBMITTED"/>
+            <enum name="GL_VERTEX_SHADER_INVOCATIONS"/>
+            <enum name="GL_TESS_CONTROL_SHADER_PATCHES"/>
+            <enum name="GL_TESS_EVALUATION_SHADER_INVOCATIONS"/>
+            <enum name="GL_GEOMETRY_SHADER_INVOCATIONS"/>
+            <enum name="GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED"/>
+            <enum name="GL_FRAGMENT_SHADER_INVOCATIONS"/>
+            <enum name="GL_COMPUTE_SHADER_INVOCATIONS"/>
+            <enum name="GL_CLIPPING_INPUT_PRIMITIVES"/>
+            <enum name="GL_CLIPPING_OUTPUT_PRIMITIVES"/>
+        </require>
+        <require comment="Reuse GL_ARB_polygon_offset_clamp">
+            <enum name="GL_POLYGON_OFFSET_CLAMP"/>
+            <command name="glPolygonOffsetClamp"/>
+        </require>
+        <require comment="Reuse GL_ARB_shader_atomic_counter_ops (none)"/>
+        <require comment="Reuse GL_ARB_shader_draw_parameters (none)"/>
+        <require comment="Reuse GL_ARB_shader_group_vote (none)"/>
+        <require comment="Reuse GL_ARB_spirv_extensions">
+            <enum name="GL_SPIR_V_EXTENSIONS"/>
+            <enum name="GL_NUM_SPIR_V_EXTENSIONS"/>
+        </require>
+        <require comment="Reuse GL_ARB_texture_filter_anisotropic">
+            <enum name="GL_TEXTURE_MAX_ANISOTROPY"/>
+            <enum name="GL_MAX_TEXTURE_MAX_ANISOTROPY"/>
+        </require>
+        <require comment="Reuse GL_ARB_transform_feedback_overflow_query">
+            <enum name="GL_TRANSFORM_FEEDBACK_OVERFLOW"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW"/>
+        </require>
+    </feature>
+
+
+    <!-- SECTION: OpenGL ES 1.0/1.1 API interface definitions. -->
+    <feature api="gles1" name="GL_VERSION_ES_CM_1_0" number="1.0">
+        <require comment="Not used by the API, for compatibility with old gl.h">
+            <type name="GLbyte"/>
+            <type name="GLclampf"/>
+            <type name="GLshort"/>
+            <type name="GLushort"/>
+        </require>
+        <require>
+            <!-- Additional API definition macros - ES 1.0/1.1, common/common-lite all in one header -->
+            <enum name="GL_VERSION_ES_CL_1_0"/>
+            <enum name="GL_VERSION_ES_CM_1_1"/>
+            <enum name="GL_VERSION_ES_CL_1_1"/>
+            <type name="GLvoid" comment="No longer used in headers"/>
+            <enum name="GL_DEPTH_BUFFER_BIT"/>
+            <enum name="GL_STENCIL_BUFFER_BIT"/>
+            <enum name="GL_COLOR_BUFFER_BIT"/>
+            <enum name="GL_FALSE"/>
+            <enum name="GL_TRUE"/>
+            <enum name="GL_POINTS"/>
+            <enum name="GL_LINES"/>
+            <enum name="GL_LINE_LOOP"/>
+            <enum name="GL_LINE_STRIP"/>
+            <enum name="GL_TRIANGLES"/>
+            <enum name="GL_TRIANGLE_STRIP"/>
+            <enum name="GL_TRIANGLE_FAN"/>
+            <enum name="GL_NEVER"/>
+            <enum name="GL_LESS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_LEQUAL"/>
+            <enum name="GL_GREATER"/>
+            <enum name="GL_NOTEQUAL"/>
+            <enum name="GL_GEQUAL"/>
+            <enum name="GL_ALWAYS"/>
+            <enum name="GL_ZERO"/>
+            <enum name="GL_ONE"/>
+            <enum name="GL_SRC_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC_COLOR"/>
+            <enum name="GL_SRC_ALPHA"/>
+            <enum name="GL_ONE_MINUS_SRC_ALPHA"/>
+            <enum name="GL_DST_ALPHA"/>
+            <enum name="GL_ONE_MINUS_DST_ALPHA"/>
+            <enum name="GL_DST_COLOR"/>
+            <enum name="GL_ONE_MINUS_DST_COLOR"/>
+            <enum name="GL_SRC_ALPHA_SATURATE"/>
+            <enum name="GL_CLIP_PLANE0"/>
+            <enum name="GL_CLIP_PLANE1"/>
+            <enum name="GL_CLIP_PLANE2"/>
+            <enum name="GL_CLIP_PLANE3"/>
+            <enum name="GL_CLIP_PLANE4"/>
+            <enum name="GL_CLIP_PLANE5"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_BACK"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+            <enum name="GL_FOG"/>
+            <enum name="GL_LIGHTING"/>
+            <enum name="GL_TEXTURE_2D"/>
+            <enum name="GL_CULL_FACE"/>
+            <enum name="GL_ALPHA_TEST"/>
+            <enum name="GL_BLEND"/>
+            <enum name="GL_COLOR_LOGIC_OP"/>
+            <enum name="GL_DITHER"/>
+            <enum name="GL_STENCIL_TEST"/>
+            <enum name="GL_DEPTH_TEST"/>
+            <enum name="GL_POINT_SMOOTH"/>
+            <enum name="GL_LINE_SMOOTH"/>
+            <enum name="GL_SCISSOR_TEST"/>
+            <enum name="GL_COLOR_MATERIAL"/>
+            <enum name="GL_NORMALIZE"/>
+            <enum name="GL_RESCALE_NORMAL"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+            <enum name="GL_NORMAL_ARRAY"/>
+            <enum name="GL_COLOR_ARRAY"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY"/>
+            <enum name="GL_MULTISAMPLE"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_COVERAGE"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_ONE"/>
+            <enum name="GL_SAMPLE_COVERAGE"/>
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_INVALID_ENUM"/>
+            <enum name="GL_INVALID_VALUE"/>
+            <enum name="GL_INVALID_OPERATION"/>
+            <enum name="GL_STACK_OVERFLOW"/>
+            <enum name="GL_STACK_UNDERFLOW"/>
+            <enum name="GL_OUT_OF_MEMORY"/>
+            <enum name="GL_EXP"/>
+            <enum name="GL_EXP2"/>
+            <enum name="GL_FOG_DENSITY"/>
+            <enum name="GL_FOG_START"/>
+            <enum name="GL_FOG_END"/>
+            <enum name="GL_FOG_MODE"/>
+            <enum name="GL_FOG_COLOR"/>
+            <enum name="GL_CW"/>
+            <enum name="GL_CCW"/>
+            <enum name="GL_CURRENT_COLOR"/>
+            <enum name="GL_CURRENT_NORMAL"/>
+            <enum name="GL_CURRENT_TEXTURE_COORDS"/>
+            <enum name="GL_POINT_SIZE"/>
+            <enum name="GL_POINT_SIZE_MIN"/>
+            <enum name="GL_POINT_SIZE_MAX"/>
+            <enum name="GL_POINT_FADE_THRESHOLD_SIZE"/>
+            <enum name="GL_POINT_DISTANCE_ATTENUATION"/>
+            <enum name="GL_SMOOTH_POINT_SIZE_RANGE"/>
+            <enum name="GL_LINE_WIDTH"/>
+            <enum name="GL_SMOOTH_LINE_WIDTH_RANGE"/>
+            <enum name="GL_ALIASED_POINT_SIZE_RANGE"/>
+            <enum name="GL_ALIASED_LINE_WIDTH_RANGE"/>
+            <enum name="GL_CULL_FACE_MODE"/>
+            <enum name="GL_FRONT_FACE"/>
+            <enum name="GL_SHADE_MODEL"/>
+            <enum name="GL_DEPTH_RANGE"/>
+            <enum name="GL_DEPTH_WRITEMASK"/>
+            <enum name="GL_DEPTH_CLEAR_VALUE"/>
+            <enum name="GL_DEPTH_FUNC"/>
+            <enum name="GL_STENCIL_CLEAR_VALUE"/>
+            <enum name="GL_STENCIL_FUNC"/>
+            <enum name="GL_STENCIL_VALUE_MASK"/>
+            <enum name="GL_STENCIL_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_REF"/>
+            <enum name="GL_STENCIL_WRITEMASK"/>
+            <enum name="GL_MATRIX_MODE"/>
+            <enum name="GL_VIEWPORT"/>
+            <enum name="GL_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_MODELVIEW_MATRIX"/>
+            <enum name="GL_PROJECTION_MATRIX"/>
+            <enum name="GL_TEXTURE_MATRIX"/>
+            <enum name="GL_ALPHA_TEST_FUNC"/>
+            <enum name="GL_ALPHA_TEST_REF"/>
+            <enum name="GL_BLEND_DST"/>
+            <enum name="GL_BLEND_SRC"/>
+            <enum name="GL_LOGIC_OP_MODE"/>
+            <enum name="GL_SCISSOR_BOX"/>
+            <enum name="GL_COLOR_CLEAR_VALUE"/>
+            <enum name="GL_COLOR_WRITEMASK"/>
+            <enum name="GL_MAX_LIGHTS"/>
+            <enum name="GL_MAX_CLIP_PLANES"/>
+            <enum name="GL_MAX_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_MODELVIEW_STACK_DEPTH"/>
+            <enum name="GL_MAX_PROJECTION_STACK_DEPTH"/>
+            <enum name="GL_MAX_TEXTURE_STACK_DEPTH"/>
+            <enum name="GL_MAX_VIEWPORT_DIMS"/>
+            <enum name="GL_MAX_TEXTURE_UNITS"/>
+            <enum name="GL_SUBPIXEL_BITS"/>
+            <enum name="GL_RED_BITS"/>
+            <enum name="GL_GREEN_BITS"/>
+            <enum name="GL_BLUE_BITS"/>
+            <enum name="GL_ALPHA_BITS"/>
+            <enum name="GL_DEPTH_BITS"/>
+            <enum name="GL_STENCIL_BITS"/>
+            <enum name="GL_POLYGON_OFFSET_UNITS"/>
+            <enum name="GL_POLYGON_OFFSET_FILL"/>
+            <enum name="GL_POLYGON_OFFSET_FACTOR"/>
+            <enum name="GL_TEXTURE_BINDING_2D"/>
+            <enum name="GL_VERTEX_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_ARRAY_STRIDE"/>
+            <enum name="GL_NORMAL_ARRAY_TYPE"/>
+            <enum name="GL_NORMAL_ARRAY_STRIDE"/>
+            <enum name="GL_COLOR_ARRAY_SIZE"/>
+            <enum name="GL_COLOR_ARRAY_TYPE"/>
+            <enum name="GL_COLOR_ARRAY_STRIDE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_SIZE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_TYPE"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_STRIDE"/>
+            <enum name="GL_VERTEX_ARRAY_POINTER"/>
+            <enum name="GL_NORMAL_ARRAY_POINTER"/>
+            <enum name="GL_COLOR_ARRAY_POINTER"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_POINTER"/>
+            <enum name="GL_SAMPLE_BUFFERS"/>
+            <enum name="GL_SAMPLES"/>
+            <enum name="GL_SAMPLE_COVERAGE_VALUE"/>
+            <enum name="GL_SAMPLE_COVERAGE_INVERT"/>
+            <enum name="GL_NUM_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_DONT_CARE"/>
+            <enum name="GL_FASTEST"/>
+            <enum name="GL_NICEST"/>
+            <enum name="GL_PERSPECTIVE_CORRECTION_HINT"/>
+            <enum name="GL_POINT_SMOOTH_HINT"/>
+            <enum name="GL_LINE_SMOOTH_HINT"/>
+            <enum name="GL_FOG_HINT"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT"/>
+            <enum name="GL_LIGHT_MODEL_AMBIENT"/>
+            <enum name="GL_LIGHT_MODEL_TWO_SIDE"/>
+            <enum name="GL_AMBIENT"/>
+            <enum name="GL_DIFFUSE"/>
+            <enum name="GL_SPECULAR"/>
+            <enum name="GL_POSITION"/>
+            <enum name="GL_SPOT_DIRECTION"/>
+            <enum name="GL_SPOT_EXPONENT"/>
+            <enum name="GL_SPOT_CUTOFF"/>
+            <enum name="GL_CONSTANT_ATTENUATION"/>
+            <enum name="GL_LINEAR_ATTENUATION"/>
+            <enum name="GL_QUADRATIC_ATTENUATION"/>
+            <enum name="GL_BYTE"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_FIXED"/>
+            <enum name="GL_CLEAR"/>
+            <enum name="GL_AND"/>
+            <enum name="GL_AND_REVERSE"/>
+            <enum name="GL_COPY"/>
+            <enum name="GL_AND_INVERTED"/>
+            <enum name="GL_NOOP"/>
+            <enum name="GL_XOR"/>
+            <enum name="GL_OR"/>
+            <enum name="GL_NOR"/>
+            <enum name="GL_EQUIV"/>
+            <enum name="GL_INVERT"/>
+            <enum name="GL_OR_REVERSE"/>
+            <enum name="GL_COPY_INVERTED"/>
+            <enum name="GL_OR_INVERTED"/>
+            <enum name="GL_NAND"/>
+            <enum name="GL_SET"/>
+            <enum name="GL_EMISSION"/>
+            <enum name="GL_SHININESS"/>
+            <enum name="GL_AMBIENT_AND_DIFFUSE"/>
+            <enum name="GL_MODELVIEW"/>
+            <enum name="GL_PROJECTION"/>
+            <enum name="GL_TEXTURE"/>
+            <enum name="GL_ALPHA"/>
+            <enum name="GL_RGB"/>
+            <enum name="GL_RGBA"/>
+            <enum name="GL_LUMINANCE"/>
+            <enum name="GL_LUMINANCE_ALPHA"/>
+            <enum name="GL_UNPACK_ALIGNMENT"/>
+            <enum name="GL_PACK_ALIGNMENT"/>
+            <enum name="GL_UNSIGNED_SHORT_4_4_4_4"/>
+            <enum name="GL_UNSIGNED_SHORT_5_5_5_1"/>
+            <enum name="GL_UNSIGNED_SHORT_5_6_5"/>
+            <enum name="GL_FLAT"/>
+            <enum name="GL_SMOOTH"/>
+            <enum name="GL_KEEP"/>
+            <enum name="GL_REPLACE"/>
+            <enum name="GL_INCR"/>
+            <enum name="GL_DECR"/>
+            <enum name="GL_VENDOR"/>
+            <enum name="GL_RENDERER"/>
+            <enum name="GL_VERSION"/>
+            <enum name="GL_EXTENSIONS"/>
+            <enum name="GL_MODULATE"/>
+            <enum name="GL_DECAL"/>
+            <enum name="GL_ADD"/>
+            <enum name="GL_TEXTURE_ENV_MODE"/>
+            <enum name="GL_TEXTURE_ENV_COLOR"/>
+            <enum name="GL_TEXTURE_ENV"/>
+            <enum name="GL_NEAREST"/>
+            <enum name="GL_LINEAR"/>
+            <enum name="GL_NEAREST_MIPMAP_NEAREST"/>
+            <enum name="GL_LINEAR_MIPMAP_NEAREST"/>
+            <enum name="GL_NEAREST_MIPMAP_LINEAR"/>
+            <enum name="GL_LINEAR_MIPMAP_LINEAR"/>
+            <enum name="GL_TEXTURE_MAG_FILTER"/>
+            <enum name="GL_TEXTURE_MIN_FILTER"/>
+            <enum name="GL_TEXTURE_WRAP_S"/>
+            <enum name="GL_TEXTURE_WRAP_T"/>
+            <enum name="GL_GENERATE_MIPMAP"/>
+            <enum name="GL_TEXTURE0"/>
+            <enum name="GL_TEXTURE1"/>
+            <enum name="GL_TEXTURE2"/>
+            <enum name="GL_TEXTURE3"/>
+            <enum name="GL_TEXTURE4"/>
+            <enum name="GL_TEXTURE5"/>
+            <enum name="GL_TEXTURE6"/>
+            <enum name="GL_TEXTURE7"/>
+            <enum name="GL_TEXTURE8"/>
+            <enum name="GL_TEXTURE9"/>
+            <enum name="GL_TEXTURE10"/>
+            <enum name="GL_TEXTURE11"/>
+            <enum name="GL_TEXTURE12"/>
+            <enum name="GL_TEXTURE13"/>
+            <enum name="GL_TEXTURE14"/>
+            <enum name="GL_TEXTURE15"/>
+            <enum name="GL_TEXTURE16"/>
+            <enum name="GL_TEXTURE17"/>
+            <enum name="GL_TEXTURE18"/>
+            <enum name="GL_TEXTURE19"/>
+            <enum name="GL_TEXTURE20"/>
+            <enum name="GL_TEXTURE21"/>
+            <enum name="GL_TEXTURE22"/>
+            <enum name="GL_TEXTURE23"/>
+            <enum name="GL_TEXTURE24"/>
+            <enum name="GL_TEXTURE25"/>
+            <enum name="GL_TEXTURE26"/>
+            <enum name="GL_TEXTURE27"/>
+            <enum name="GL_TEXTURE28"/>
+            <enum name="GL_TEXTURE29"/>
+            <enum name="GL_TEXTURE30"/>
+            <enum name="GL_TEXTURE31"/>
+            <enum name="GL_ACTIVE_TEXTURE"/>
+            <enum name="GL_CLIENT_ACTIVE_TEXTURE"/>
+            <enum name="GL_REPEAT"/>
+            <enum name="GL_CLAMP_TO_EDGE"/>
+            <enum name="GL_LIGHT0"/>
+            <enum name="GL_LIGHT1"/>
+            <enum name="GL_LIGHT2"/>
+            <enum name="GL_LIGHT3"/>
+            <enum name="GL_LIGHT4"/>
+            <enum name="GL_LIGHT5"/>
+            <enum name="GL_LIGHT6"/>
+            <enum name="GL_LIGHT7"/>
+            <enum name="GL_ARRAY_BUFFER"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER"/>
+            <enum name="GL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_VERTEX_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_NORMAL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_COLOR_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_STATIC_DRAW"/>
+            <enum name="GL_DYNAMIC_DRAW"/>
+            <enum name="GL_BUFFER_SIZE"/>
+            <enum name="GL_BUFFER_USAGE"/>
+            <enum name="GL_SUBTRACT"/>
+            <enum name="GL_COMBINE"/>
+            <enum name="GL_COMBINE_RGB"/>
+            <enum name="GL_COMBINE_ALPHA"/>
+            <enum name="GL_RGB_SCALE"/>
+            <enum name="GL_ADD_SIGNED"/>
+            <enum name="GL_INTERPOLATE"/>
+            <enum name="GL_CONSTANT"/>
+            <enum name="GL_PRIMARY_COLOR"/>
+            <enum name="GL_PREVIOUS"/>
+            <enum name="GL_OPERAND0_RGB"/>
+            <enum name="GL_OPERAND1_RGB"/>
+            <enum name="GL_OPERAND2_RGB"/>
+            <enum name="GL_OPERAND0_ALPHA"/>
+            <enum name="GL_OPERAND1_ALPHA"/>
+            <enum name="GL_OPERAND2_ALPHA"/>
+            <enum name="GL_ALPHA_SCALE"/>
+            <enum name="GL_SRC0_RGB"/>
+            <enum name="GL_SRC1_RGB"/>
+            <enum name="GL_SRC2_RGB"/>
+            <enum name="GL_SRC0_ALPHA"/>
+            <enum name="GL_SRC1_ALPHA"/>
+            <enum name="GL_SRC2_ALPHA"/>
+            <enum name="GL_DOT3_RGB"/>
+            <enum name="GL_DOT3_RGBA"/>
+        </require>
+        <require profile="common">
+            <command name="glAlphaFunc"/>
+            <command name="glClearColor"/>
+            <command name="glClearDepthf"/>
+            <command name="glClipPlanef"/>
+            <command name="glColor4f"/>
+            <command name="glDepthRangef"/>
+            <command name="glFogf"/>
+            <command name="glFogfv"/>
+            <command name="glFrustumf"/>
+            <command name="glGetClipPlanef"/>
+            <command name="glGetFloatv"/>
+            <command name="glGetLightfv"/>
+            <command name="glGetMaterialfv"/>
+            <command name="glGetTexEnvfv"/>
+            <command name="glGetTexParameterfv"/>
+            <command name="glLightModelf"/>
+            <command name="glLightModelfv"/>
+            <command name="glLightf"/>
+            <command name="glLightfv"/>
+            <command name="glLineWidth"/>
+            <command name="glLoadMatrixf"/>
+            <command name="glMaterialf"/>
+            <command name="glMaterialfv"/>
+            <command name="glMultMatrixf"/>
+            <command name="glMultiTexCoord4f"/>
+            <command name="glNormal3f"/>
+            <command name="glOrthof"/>
+            <command name="glPointParameterf"/>
+            <command name="glPointParameterfv"/>
+            <command name="glPointSize"/>
+            <command name="glPolygonOffset"/>
+            <command name="glRotatef"/>
+            <command name="glScalef"/>
+            <command name="glTexEnvf"/>
+            <command name="glTexEnvfv"/>
+            <command name="glTexParameterf"/>
+            <command name="glTexParameterfv"/>
+            <command name="glTranslatef"/>
+        </require>
+        <require>
+            <command name="glActiveTexture"/>
+            <command name="glAlphaFuncx"/>
+            <command name="glBindBuffer"/>
+            <command name="glBindTexture"/>
+            <command name="glBlendFunc"/>
+            <command name="glBufferData"/>
+            <command name="glBufferSubData"/>
+            <command name="glClear"/>
+            <command name="glClearColorx"/>
+            <command name="glClearDepthx"/>
+            <command name="glClearStencil"/>
+            <command name="glClientActiveTexture"/>
+            <command name="glClipPlanex"/>
+            <command name="glColor4ub"/>
+            <command name="glColor4x"/>
+            <command name="glColorMask"/>
+            <command name="glColorPointer"/>
+            <command name="glCompressedTexImage2D"/>
+            <command name="glCompressedTexSubImage2D"/>
+            <command name="glCopyTexImage2D"/>
+            <command name="glCopyTexSubImage2D"/>
+            <command name="glCullFace"/>
+            <command name="glDeleteBuffers"/>
+            <command name="glDeleteTextures"/>
+            <command name="glDepthFunc"/>
+            <command name="glDepthMask"/>
+            <command name="glDepthRangex"/>
+            <command name="glDisable"/>
+            <command name="glDisableClientState"/>
+            <command name="glDrawArrays"/>
+            <command name="glDrawElements"/>
+            <command name="glEnable"/>
+            <command name="glEnableClientState"/>
+            <command name="glFinish"/>
+            <command name="glFlush"/>
+            <command name="glFogx"/>
+            <command name="glFogxv"/>
+            <command name="glFrontFace"/>
+            <command name="glFrustumx"/>
+            <command name="glGetBooleanv"/>
+            <command name="glGetBufferParameteriv"/>
+            <command name="glGetClipPlanex"/>
+            <command name="glGenBuffers"/>
+            <command name="glGenTextures"/>
+            <command name="glGetError"/>
+            <command name="glGetFixedv"/>
+            <command name="glGetIntegerv"/>
+            <command name="glGetLightxv"/>
+            <command name="glGetMaterialxv"/>
+            <command name="glGetPointerv"/>
+            <command name="glGetString"/>
+            <command name="glGetTexEnviv"/>
+            <command name="glGetTexEnvxv"/>
+            <command name="glGetTexParameteriv"/>
+            <command name="glGetTexParameterxv"/>
+            <command name="glHint"/>
+            <command name="glIsBuffer"/>
+            <command name="glIsEnabled"/>
+            <command name="glIsTexture"/>
+            <command name="glLightModelx"/>
+            <command name="glLightModelxv"/>
+            <command name="glLightx"/>
+            <command name="glLightxv"/>
+            <command name="glLineWidthx"/>
+            <command name="glLoadIdentity"/>
+            <command name="glLoadMatrixx"/>
+            <command name="glLogicOp"/>
+            <command name="glMaterialx"/>
+            <command name="glMaterialxv"/>
+            <command name="glMatrixMode"/>
+            <command name="glMultMatrixx"/>
+            <command name="glMultiTexCoord4x"/>
+            <command name="glNormal3x"/>
+            <command name="glNormalPointer"/>
+            <command name="glOrthox"/>
+            <command name="glPixelStorei"/>
+            <command name="glPointParameterx"/>
+            <command name="glPointParameterxv"/>
+            <command name="glPointSizex"/>
+            <command name="glPolygonOffsetx"/>
+            <command name="glPopMatrix"/>
+            <command name="glPushMatrix"/>
+            <command name="glReadPixels"/>
+            <command name="glRotatex"/>
+            <command name="glSampleCoverage"/>
+            <command name="glSampleCoveragex"/>
+            <command name="glScalex"/>
+            <command name="glScissor"/>
+            <command name="glShadeModel"/>
+            <command name="glStencilFunc"/>
+            <command name="glStencilMask"/>
+            <command name="glStencilOp"/>
+            <command name="glTexCoordPointer"/>
+            <command name="glTexEnvi"/>
+            <command name="glTexEnvx"/>
+            <command name="glTexEnviv"/>
+            <command name="glTexEnvxv"/>
+            <command name="glTexImage2D"/>
+            <command name="glTexParameteri"/>
+            <command name="glTexParameterx"/>
+            <command name="glTexParameteriv"/>
+            <command name="glTexParameterxv"/>
+            <command name="glTexSubImage2D"/>
+            <command name="glTranslatex"/>
+            <command name="glVertexPointer"/>
+            <command name="glViewport"/>
+        </require>
+    </feature>
+    <feature api="gles2" name="GL_ES_VERSION_2_0" number="2.0">
+        <require comment="Not used by the API, for compatibility with old gl2.h">
+            <type name="GLbyte"/>
+            <type name="GLclampf"/>
+            <type name="GLfixed"/>
+            <type name="GLshort"/>
+            <type name="GLushort"/>
+            <type name="GLvoid" comment="No longer used in headers"/>
+        </require>
+        <require comment="Not used by the API; put here so this type doesn't need to be declared in gl2ext.h">
+            <type name="GLsync"/>
+            <type name="GLint64"/>
+            <type name="GLuint64"/>
+        </require>
+        <require>
+            <enum name="GL_DEPTH_BUFFER_BIT"/>
+            <enum name="GL_STENCIL_BUFFER_BIT"/>
+            <enum name="GL_COLOR_BUFFER_BIT"/>
+            <enum name="GL_FALSE"/>
+            <enum name="GL_TRUE"/>
+            <enum name="GL_POINTS"/>
+            <enum name="GL_LINES"/>
+            <enum name="GL_LINE_LOOP"/>
+            <enum name="GL_LINE_STRIP"/>
+            <enum name="GL_TRIANGLES"/>
+            <enum name="GL_TRIANGLE_STRIP"/>
+            <enum name="GL_TRIANGLE_FAN"/>
+            <enum name="GL_ZERO"/>
+            <enum name="GL_ONE"/>
+            <enum name="GL_SRC_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC_COLOR"/>
+            <enum name="GL_SRC_ALPHA"/>
+            <enum name="GL_ONE_MINUS_SRC_ALPHA"/>
+            <enum name="GL_DST_ALPHA"/>
+            <enum name="GL_ONE_MINUS_DST_ALPHA"/>
+            <enum name="GL_DST_COLOR"/>
+            <enum name="GL_ONE_MINUS_DST_COLOR"/>
+            <enum name="GL_SRC_ALPHA_SATURATE"/>
+            <enum name="GL_FUNC_ADD"/>
+            <enum name="GL_BLEND_EQUATION"/>
+            <enum name="GL_BLEND_EQUATION_RGB"/>
+            <enum name="GL_BLEND_EQUATION_ALPHA"/>
+            <enum name="GL_FUNC_SUBTRACT"/>
+            <enum name="GL_FUNC_REVERSE_SUBTRACT"/>
+            <enum name="GL_BLEND_DST_RGB"/>
+            <enum name="GL_BLEND_SRC_RGB"/>
+            <enum name="GL_BLEND_DST_ALPHA"/>
+            <enum name="GL_BLEND_SRC_ALPHA"/>
+            <enum name="GL_CONSTANT_COLOR"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+            <enum name="GL_CONSTANT_ALPHA"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+            <enum name="GL_BLEND_COLOR"/>
+            <enum name="GL_ARRAY_BUFFER"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER"/>
+            <enum name="GL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_STREAM_DRAW"/>
+            <enum name="GL_STATIC_DRAW"/>
+            <enum name="GL_DYNAMIC_DRAW"/>
+            <enum name="GL_BUFFER_SIZE"/>
+            <enum name="GL_BUFFER_USAGE"/>
+            <enum name="GL_CURRENT_VERTEX_ATTRIB"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_BACK"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+            <enum name="GL_TEXTURE_2D"/>
+            <enum name="GL_CULL_FACE"/>
+            <enum name="GL_BLEND"/>
+            <enum name="GL_DITHER"/>
+            <enum name="GL_STENCIL_TEST"/>
+            <enum name="GL_DEPTH_TEST"/>
+            <enum name="GL_SCISSOR_TEST"/>
+            <enum name="GL_POLYGON_OFFSET_FILL"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_COVERAGE"/>
+            <enum name="GL_SAMPLE_COVERAGE"/>
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_INVALID_ENUM"/>
+            <enum name="GL_INVALID_VALUE"/>
+            <enum name="GL_INVALID_OPERATION"/>
+            <enum name="GL_OUT_OF_MEMORY"/>
+            <enum name="GL_CW"/>
+            <enum name="GL_CCW"/>
+            <enum name="GL_LINE_WIDTH"/>
+            <enum name="GL_ALIASED_POINT_SIZE_RANGE"/>
+            <enum name="GL_ALIASED_LINE_WIDTH_RANGE"/>
+            <enum name="GL_CULL_FACE_MODE"/>
+            <enum name="GL_FRONT_FACE"/>
+            <enum name="GL_DEPTH_RANGE"/>
+            <enum name="GL_DEPTH_WRITEMASK"/>
+            <enum name="GL_DEPTH_CLEAR_VALUE"/>
+            <enum name="GL_DEPTH_FUNC"/>
+            <enum name="GL_STENCIL_CLEAR_VALUE"/>
+            <enum name="GL_STENCIL_FUNC"/>
+            <enum name="GL_STENCIL_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_REF"/>
+            <enum name="GL_STENCIL_VALUE_MASK"/>
+            <enum name="GL_STENCIL_WRITEMASK"/>
+            <enum name="GL_STENCIL_BACK_FUNC"/>
+            <enum name="GL_STENCIL_BACK_FAIL"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_BACK_REF"/>
+            <enum name="GL_STENCIL_BACK_VALUE_MASK"/>
+            <enum name="GL_STENCIL_BACK_WRITEMASK"/>
+            <enum name="GL_VIEWPORT"/>
+            <enum name="GL_SCISSOR_BOX"/>
+            <enum name="GL_COLOR_CLEAR_VALUE"/>
+            <enum name="GL_COLOR_WRITEMASK"/>
+            <enum name="GL_UNPACK_ALIGNMENT"/>
+            <enum name="GL_PACK_ALIGNMENT"/>
+            <enum name="GL_MAX_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_VIEWPORT_DIMS"/>
+            <enum name="GL_SUBPIXEL_BITS"/>
+            <enum name="GL_RED_BITS"/>
+            <enum name="GL_GREEN_BITS"/>
+            <enum name="GL_BLUE_BITS"/>
+            <enum name="GL_ALPHA_BITS"/>
+            <enum name="GL_DEPTH_BITS"/>
+            <enum name="GL_STENCIL_BITS"/>
+            <enum name="GL_POLYGON_OFFSET_UNITS"/>
+            <enum name="GL_POLYGON_OFFSET_FACTOR"/>
+            <enum name="GL_TEXTURE_BINDING_2D"/>
+            <enum name="GL_SAMPLE_BUFFERS"/>
+            <enum name="GL_SAMPLES"/>
+            <enum name="GL_SAMPLE_COVERAGE_VALUE"/>
+            <enum name="GL_SAMPLE_COVERAGE_INVERT"/>
+            <enum name="GL_NUM_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_DONT_CARE"/>
+            <enum name="GL_FASTEST"/>
+            <enum name="GL_NICEST"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT"/>
+            <enum name="GL_BYTE"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_FIXED"/>
+            <enum name="GL_DEPTH_COMPONENT"/>
+            <enum name="GL_ALPHA"/>
+            <enum name="GL_RGB"/>
+            <enum name="GL_RGBA"/>
+            <enum name="GL_LUMINANCE"/>
+            <enum name="GL_LUMINANCE_ALPHA"/>
+            <enum name="GL_UNSIGNED_SHORT_4_4_4_4"/>
+            <enum name="GL_UNSIGNED_SHORT_5_5_5_1"/>
+            <enum name="GL_UNSIGNED_SHORT_5_6_5"/>
+            <enum name="GL_FRAGMENT_SHADER"/>
+            <enum name="GL_VERTEX_SHADER"/>
+            <enum name="GL_MAX_VERTEX_ATTRIBS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_VECTORS"/>
+            <enum name="GL_MAX_VARYING_VECTORS"/>
+            <enum name="GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_VECTORS"/>
+            <enum name="GL_SHADER_TYPE"/>
+            <enum name="GL_DELETE_STATUS"/>
+            <enum name="GL_LINK_STATUS"/>
+            <enum name="GL_VALIDATE_STATUS"/>
+            <enum name="GL_ATTACHED_SHADERS"/>
+            <enum name="GL_ACTIVE_UNIFORMS"/>
+            <enum name="GL_ACTIVE_UNIFORM_MAX_LENGTH"/>
+            <enum name="GL_ACTIVE_ATTRIBUTES"/>
+            <enum name="GL_ACTIVE_ATTRIBUTE_MAX_LENGTH"/>
+            <enum name="GL_SHADING_LANGUAGE_VERSION"/>
+            <enum name="GL_CURRENT_PROGRAM"/>
+            <enum name="GL_NEVER"/>
+            <enum name="GL_LESS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_LEQUAL"/>
+            <enum name="GL_GREATER"/>
+            <enum name="GL_NOTEQUAL"/>
+            <enum name="GL_GEQUAL"/>
+            <enum name="GL_ALWAYS"/>
+            <enum name="GL_KEEP"/>
+            <enum name="GL_REPLACE"/>
+            <enum name="GL_INCR"/>
+            <enum name="GL_DECR"/>
+            <enum name="GL_INVERT"/>
+            <enum name="GL_INCR_WRAP"/>
+            <enum name="GL_DECR_WRAP"/>
+            <enum name="GL_VENDOR"/>
+            <enum name="GL_RENDERER"/>
+            <enum name="GL_VERSION"/>
+            <enum name="GL_EXTENSIONS"/>
+            <enum name="GL_NEAREST"/>
+            <enum name="GL_LINEAR"/>
+            <enum name="GL_NEAREST_MIPMAP_NEAREST"/>
+            <enum name="GL_LINEAR_MIPMAP_NEAREST"/>
+            <enum name="GL_NEAREST_MIPMAP_LINEAR"/>
+            <enum name="GL_LINEAR_MIPMAP_LINEAR"/>
+            <enum name="GL_TEXTURE_MAG_FILTER"/>
+            <enum name="GL_TEXTURE_MIN_FILTER"/>
+            <enum name="GL_TEXTURE_WRAP_S"/>
+            <enum name="GL_TEXTURE_WRAP_T"/>
+            <enum name="GL_TEXTURE"/>
+            <enum name="GL_TEXTURE_CUBE_MAP"/>
+            <enum name="GL_TEXTURE_BINDING_CUBE_MAP"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_X"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z"/>
+            <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"/>
+            <enum name="GL_MAX_CUBE_MAP_TEXTURE_SIZE"/>
+            <enum name="GL_TEXTURE0"/>
+            <enum name="GL_TEXTURE1"/>
+            <enum name="GL_TEXTURE2"/>
+            <enum name="GL_TEXTURE3"/>
+            <enum name="GL_TEXTURE4"/>
+            <enum name="GL_TEXTURE5"/>
+            <enum name="GL_TEXTURE6"/>
+            <enum name="GL_TEXTURE7"/>
+            <enum name="GL_TEXTURE8"/>
+            <enum name="GL_TEXTURE9"/>
+            <enum name="GL_TEXTURE10"/>
+            <enum name="GL_TEXTURE11"/>
+            <enum name="GL_TEXTURE12"/>
+            <enum name="GL_TEXTURE13"/>
+            <enum name="GL_TEXTURE14"/>
+            <enum name="GL_TEXTURE15"/>
+            <enum name="GL_TEXTURE16"/>
+            <enum name="GL_TEXTURE17"/>
+            <enum name="GL_TEXTURE18"/>
+            <enum name="GL_TEXTURE19"/>
+            <enum name="GL_TEXTURE20"/>
+            <enum name="GL_TEXTURE21"/>
+            <enum name="GL_TEXTURE22"/>
+            <enum name="GL_TEXTURE23"/>
+            <enum name="GL_TEXTURE24"/>
+            <enum name="GL_TEXTURE25"/>
+            <enum name="GL_TEXTURE26"/>
+            <enum name="GL_TEXTURE27"/>
+            <enum name="GL_TEXTURE28"/>
+            <enum name="GL_TEXTURE29"/>
+            <enum name="GL_TEXTURE30"/>
+            <enum name="GL_TEXTURE31"/>
+            <enum name="GL_ACTIVE_TEXTURE"/>
+            <enum name="GL_REPEAT"/>
+            <enum name="GL_CLAMP_TO_EDGE"/>
+            <enum name="GL_MIRRORED_REPEAT"/>
+            <enum name="GL_FLOAT_VEC2"/>
+            <enum name="GL_FLOAT_VEC3"/>
+            <enum name="GL_FLOAT_VEC4"/>
+            <enum name="GL_INT_VEC2"/>
+            <enum name="GL_INT_VEC3"/>
+            <enum name="GL_INT_VEC4"/>
+            <enum name="GL_BOOL"/>
+            <enum name="GL_BOOL_VEC2"/>
+            <enum name="GL_BOOL_VEC3"/>
+            <enum name="GL_BOOL_VEC4"/>
+            <enum name="GL_FLOAT_MAT2"/>
+            <enum name="GL_FLOAT_MAT3"/>
+            <enum name="GL_FLOAT_MAT4"/>
+            <enum name="GL_SAMPLER_2D"/>
+            <enum name="GL_SAMPLER_CUBE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_ENABLED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_STRIDE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_POINTER"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_TYPE"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_FORMAT"/>
+            <enum name="GL_COMPILE_STATUS"/>
+            <enum name="GL_INFO_LOG_LENGTH"/>
+            <enum name="GL_SHADER_SOURCE_LENGTH"/>
+            <enum name="GL_SHADER_COMPILER"/>
+            <enum name="GL_SHADER_BINARY_FORMATS"/>
+            <enum name="GL_NUM_SHADER_BINARY_FORMATS"/>
+            <enum name="GL_LOW_FLOAT"/>
+            <enum name="GL_MEDIUM_FLOAT"/>
+            <enum name="GL_HIGH_FLOAT"/>
+            <enum name="GL_LOW_INT"/>
+            <enum name="GL_MEDIUM_INT"/>
+            <enum name="GL_HIGH_INT"/>
+            <enum name="GL_FRAMEBUFFER"/>
+            <enum name="GL_RENDERBUFFER"/>
+            <enum name="GL_RGBA4"/>
+            <enum name="GL_RGB5_A1"/>
+            <enum name="GL_RGB565"/>
+            <enum name="GL_DEPTH_COMPONENT16"/>
+            <enum name="GL_STENCIL_INDEX8"/>
+            <enum name="GL_RENDERBUFFER_WIDTH"/>
+            <enum name="GL_RENDERBUFFER_HEIGHT"/>
+            <enum name="GL_RENDERBUFFER_INTERNAL_FORMAT"/>
+            <enum name="GL_RENDERBUFFER_RED_SIZE"/>
+            <enum name="GL_RENDERBUFFER_GREEN_SIZE"/>
+            <enum name="GL_RENDERBUFFER_BLUE_SIZE"/>
+            <enum name="GL_RENDERBUFFER_ALPHA_SIZE"/>
+            <enum name="GL_RENDERBUFFER_DEPTH_SIZE"/>
+            <enum name="GL_RENDERBUFFER_STENCIL_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE"/>
+            <enum name="GL_COLOR_ATTACHMENT0"/>
+            <enum name="GL_DEPTH_ATTACHMENT"/>
+            <enum name="GL_STENCIL_ATTACHMENT"/>
+            <enum name="GL_NONE"/>
+            <enum name="GL_FRAMEBUFFER_COMPLETE"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS"/>
+            <enum name="GL_FRAMEBUFFER_UNSUPPORTED"/>
+            <enum name="GL_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_RENDERBUFFER_BINDING"/>
+            <enum name="GL_MAX_RENDERBUFFER_SIZE"/>
+            <enum name="GL_INVALID_FRAMEBUFFER_OPERATION"/>
+            <command name="glActiveTexture"/>
+            <command name="glAttachShader"/>
+            <command name="glBindAttribLocation"/>
+            <command name="glBindBuffer"/>
+            <command name="glBindFramebuffer"/>
+            <command name="glBindRenderbuffer"/>
+            <command name="glBindTexture"/>
+            <command name="glBlendColor"/>
+            <command name="glBlendEquation"/>
+            <command name="glBlendEquationSeparate"/>
+            <command name="glBlendFunc"/>
+            <command name="glBlendFuncSeparate"/>
+            <command name="glBufferData"/>
+            <command name="glBufferSubData"/>
+            <command name="glCheckFramebufferStatus"/>
+            <command name="glClear"/>
+            <command name="glClearColor"/>
+            <command name="glClearDepthf"/>
+            <command name="glClearStencil"/>
+            <command name="glColorMask"/>
+            <command name="glCompileShader"/>
+            <command name="glCompressedTexImage2D"/>
+            <command name="glCompressedTexSubImage2D"/>
+            <command name="glCopyTexImage2D"/>
+            <command name="glCopyTexSubImage2D"/>
+            <command name="glCreateProgram"/>
+            <command name="glCreateShader"/>
+            <command name="glCullFace"/>
+            <command name="glDeleteBuffers"/>
+            <command name="glDeleteFramebuffers"/>
+            <command name="glDeleteProgram"/>
+            <command name="glDeleteRenderbuffers"/>
+            <command name="glDeleteShader"/>
+            <command name="glDeleteTextures"/>
+            <command name="glDepthFunc"/>
+            <command name="glDepthMask"/>
+            <command name="glDepthRangef"/>
+            <command name="glDetachShader"/>
+            <command name="glDisable"/>
+            <command name="glDisableVertexAttribArray"/>
+            <command name="glDrawArrays"/>
+            <command name="glDrawElements"/>
+            <command name="glEnable"/>
+            <command name="glEnableVertexAttribArray"/>
+            <command name="glFinish"/>
+            <command name="glFlush"/>
+            <command name="glFramebufferRenderbuffer"/>
+            <command name="glFramebufferTexture2D"/>
+            <command name="glFrontFace"/>
+            <command name="glGenBuffers"/>
+            <command name="glGenerateMipmap"/>
+            <command name="glGenFramebuffers"/>
+            <command name="glGenRenderbuffers"/>
+            <command name="glGenTextures"/>
+            <command name="glGetActiveAttrib"/>
+            <command name="glGetActiveUniform"/>
+            <command name="glGetAttachedShaders"/>
+            <command name="glGetAttribLocation"/>
+            <command name="glGetBooleanv"/>
+            <command name="glGetBufferParameteriv"/>
+            <command name="glGetError"/>
+            <command name="glGetFloatv"/>
+            <command name="glGetFramebufferAttachmentParameteriv"/>
+            <command name="glGetIntegerv"/>
+            <command name="glGetProgramiv"/>
+            <command name="glGetProgramInfoLog"/>
+            <command name="glGetRenderbufferParameteriv"/>
+            <command name="glGetShaderiv"/>
+            <command name="glGetShaderInfoLog"/>
+            <command name="glGetShaderPrecisionFormat"/>
+            <command name="glGetShaderSource"/>
+            <command name="glGetString"/>
+            <command name="glGetTexParameterfv"/>
+            <command name="glGetTexParameteriv"/>
+            <command name="glGetUniformfv"/>
+            <command name="glGetUniformiv"/>
+            <command name="glGetUniformLocation"/>
+            <command name="glGetVertexAttribfv"/>
+            <command name="glGetVertexAttribiv"/>
+            <command name="glGetVertexAttribPointerv"/>
+            <command name="glHint"/>
+            <command name="glIsBuffer"/>
+            <command name="glIsEnabled"/>
+            <command name="glIsFramebuffer"/>
+            <command name="glIsProgram"/>
+            <command name="glIsRenderbuffer"/>
+            <command name="glIsShader"/>
+            <command name="glIsTexture"/>
+            <command name="glLineWidth"/>
+            <command name="glLinkProgram"/>
+            <command name="glPixelStorei"/>
+            <command name="glPolygonOffset"/>
+            <command name="glReadPixels"/>
+            <command name="glReleaseShaderCompiler"/>
+            <command name="glRenderbufferStorage"/>
+            <command name="glSampleCoverage"/>
+            <command name="glScissor"/>
+            <command name="glShaderBinary"/>
+            <command name="glShaderSource"/>
+            <command name="glStencilFunc"/>
+            <command name="glStencilFuncSeparate"/>
+            <command name="glStencilMask"/>
+            <command name="glStencilMaskSeparate"/>
+            <command name="glStencilOp"/>
+            <command name="glStencilOpSeparate"/>
+            <command name="glTexImage2D"/>
+            <command name="glTexParameterf"/>
+            <command name="glTexParameterfv"/>
+            <command name="glTexParameteri"/>
+            <command name="glTexParameteriv"/>
+            <command name="glTexSubImage2D"/>
+            <command name="glUniform1f"/>
+            <command name="glUniform1fv"/>
+            <command name="glUniform1i"/>
+            <command name="glUniform1iv"/>
+            <command name="glUniform2f"/>
+            <command name="glUniform2fv"/>
+            <command name="glUniform2i"/>
+            <command name="glUniform2iv"/>
+            <command name="glUniform3f"/>
+            <command name="glUniform3fv"/>
+            <command name="glUniform3i"/>
+            <command name="glUniform3iv"/>
+            <command name="glUniform4f"/>
+            <command name="glUniform4fv"/>
+            <command name="glUniform4i"/>
+            <command name="glUniform4iv"/>
+            <command name="glUniformMatrix2fv"/>
+            <command name="glUniformMatrix3fv"/>
+            <command name="glUniformMatrix4fv"/>
+            <command name="glUseProgram"/>
+            <command name="glValidateProgram"/>
+            <command name="glVertexAttrib1f"/>
+            <command name="glVertexAttrib1fv"/>
+            <command name="glVertexAttrib2f"/>
+            <command name="glVertexAttrib2fv"/>
+            <command name="glVertexAttrib3f"/>
+            <command name="glVertexAttrib3fv"/>
+            <command name="glVertexAttrib4f"/>
+            <command name="glVertexAttrib4fv"/>
+            <command name="glVertexAttribPointer"/>
+            <command name="glViewport"/>
+        </require>
+    </feature>
+    <feature api="gles2" name="GL_ES_VERSION_3_0" number="3.0">
+        <require comment="Not used by the API, for compatibility with old gl2.h">
+            <type name="GLhalf"/>
+        </require>
+        <require>
+            <enum name="GL_READ_BUFFER"/>
+            <enum name="GL_UNPACK_ROW_LENGTH"/>
+            <enum name="GL_UNPACK_SKIP_ROWS"/>
+            <enum name="GL_UNPACK_SKIP_PIXELS"/>
+            <enum name="GL_PACK_ROW_LENGTH"/>
+            <enum name="GL_PACK_SKIP_ROWS"/>
+            <enum name="GL_PACK_SKIP_PIXELS"/>
+            <enum name="GL_COLOR"/>
+            <enum name="GL_DEPTH"/>
+            <enum name="GL_STENCIL"/>
+            <enum name="GL_RED"/>
+            <enum name="GL_RGB8"/>
+            <enum name="GL_RGBA8"/>
+            <enum name="GL_RGB10_A2"/>
+            <enum name="GL_TEXTURE_BINDING_3D"/>
+            <enum name="GL_UNPACK_SKIP_IMAGES"/>
+            <enum name="GL_UNPACK_IMAGE_HEIGHT"/>
+            <enum name="GL_TEXTURE_3D"/>
+            <enum name="GL_TEXTURE_WRAP_R"/>
+            <enum name="GL_MAX_3D_TEXTURE_SIZE"/>
+            <enum name="GL_UNSIGNED_INT_2_10_10_10_REV"/>
+            <enum name="GL_MAX_ELEMENTS_VERTICES"/>
+            <enum name="GL_MAX_ELEMENTS_INDICES"/>
+            <enum name="GL_TEXTURE_MIN_LOD"/>
+            <enum name="GL_TEXTURE_MAX_LOD"/>
+            <enum name="GL_TEXTURE_BASE_LEVEL"/>
+            <enum name="GL_TEXTURE_MAX_LEVEL"/>
+            <enum name="GL_MIN"/>
+            <enum name="GL_MAX"/>
+            <enum name="GL_DEPTH_COMPONENT24"/>
+            <enum name="GL_MAX_TEXTURE_LOD_BIAS"/>
+            <enum name="GL_TEXTURE_COMPARE_MODE"/>
+            <enum name="GL_TEXTURE_COMPARE_FUNC"/>
+            <enum name="GL_CURRENT_QUERY"/>
+            <enum name="GL_QUERY_RESULT"/>
+            <enum name="GL_QUERY_RESULT_AVAILABLE"/>
+            <enum name="GL_BUFFER_MAPPED"/>
+            <enum name="GL_BUFFER_MAP_POINTER"/>
+            <enum name="GL_STREAM_READ"/>
+            <enum name="GL_STREAM_COPY"/>
+            <enum name="GL_STATIC_READ"/>
+            <enum name="GL_STATIC_COPY"/>
+            <enum name="GL_DYNAMIC_READ"/>
+            <enum name="GL_DYNAMIC_COPY"/>
+            <enum name="GL_MAX_DRAW_BUFFERS"/>
+            <enum name="GL_DRAW_BUFFER0"/>
+            <enum name="GL_DRAW_BUFFER1"/>
+            <enum name="GL_DRAW_BUFFER2"/>
+            <enum name="GL_DRAW_BUFFER3"/>
+            <enum name="GL_DRAW_BUFFER4"/>
+            <enum name="GL_DRAW_BUFFER5"/>
+            <enum name="GL_DRAW_BUFFER6"/>
+            <enum name="GL_DRAW_BUFFER7"/>
+            <enum name="GL_DRAW_BUFFER8"/>
+            <enum name="GL_DRAW_BUFFER9"/>
+            <enum name="GL_DRAW_BUFFER10"/>
+            <enum name="GL_DRAW_BUFFER11"/>
+            <enum name="GL_DRAW_BUFFER12"/>
+            <enum name="GL_DRAW_BUFFER13"/>
+            <enum name="GL_DRAW_BUFFER14"/>
+            <enum name="GL_DRAW_BUFFER15"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_COMPONENTS"/>
+            <enum name="GL_SAMPLER_3D"/>
+            <enum name="GL_SAMPLER_2D_SHADOW"/>
+            <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT"/>
+            <enum name="GL_PIXEL_PACK_BUFFER"/>
+            <enum name="GL_PIXEL_UNPACK_BUFFER"/>
+            <enum name="GL_PIXEL_PACK_BUFFER_BINDING"/>
+            <enum name="GL_PIXEL_UNPACK_BUFFER_BINDING"/>
+            <enum name="GL_FLOAT_MAT2x3"/>
+            <enum name="GL_FLOAT_MAT2x4"/>
+            <enum name="GL_FLOAT_MAT3x2"/>
+            <enum name="GL_FLOAT_MAT3x4"/>
+            <enum name="GL_FLOAT_MAT4x2"/>
+            <enum name="GL_FLOAT_MAT4x3"/>
+            <enum name="GL_SRGB"/>
+            <enum name="GL_SRGB8"/>
+            <enum name="GL_SRGB8_ALPHA8"/>
+            <enum name="GL_COMPARE_REF_TO_TEXTURE"/>
+            <enum name="GL_MAJOR_VERSION"/>
+            <enum name="GL_MINOR_VERSION"/>
+            <enum name="GL_NUM_EXTENSIONS"/>
+            <enum name="GL_RGBA32F"/>
+            <enum name="GL_RGB32F"/>
+            <enum name="GL_RGBA16F"/>
+            <enum name="GL_RGB16F"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_INTEGER"/>
+            <enum name="GL_MAX_ARRAY_TEXTURE_LAYERS"/>
+            <enum name="GL_MIN_PROGRAM_TEXEL_OFFSET"/>
+            <enum name="GL_MAX_PROGRAM_TEXEL_OFFSET"/>
+            <enum name="GL_MAX_VARYING_COMPONENTS"/>
+            <enum name="GL_TEXTURE_2D_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D_ARRAY"/>
+            <enum name="GL_R11F_G11F_B10F"/>
+            <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV"/>
+            <enum name="GL_RGB9_E5"/>
+            <enum name="GL_UNSIGNED_INT_5_9_9_9_REV"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE"/>
+            <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYINGS"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_START"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN"/>
+            <enum name="GL_RASTERIZER_DISCARD"/>
+            <enum name="GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS"/>
+            <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS"/>
+            <enum name="GL_INTERLEAVED_ATTRIBS"/>
+            <enum name="GL_SEPARATE_ATTRIBS"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING"/>
+            <enum name="GL_RGBA32UI"/>
+            <enum name="GL_RGB32UI"/>
+            <enum name="GL_RGBA16UI"/>
+            <enum name="GL_RGB16UI"/>
+            <enum name="GL_RGBA8UI"/>
+            <enum name="GL_RGB8UI"/>
+            <enum name="GL_RGBA32I"/>
+            <enum name="GL_RGB32I"/>
+            <enum name="GL_RGBA16I"/>
+            <enum name="GL_RGB16I"/>
+            <enum name="GL_RGBA8I"/>
+            <enum name="GL_RGB8I"/>
+            <enum name="GL_RED_INTEGER"/>
+            <enum name="GL_RGB_INTEGER"/>
+            <enum name="GL_RGBA_INTEGER"/>
+            <enum name="GL_SAMPLER_2D_ARRAY"/>
+            <enum name="GL_SAMPLER_2D_ARRAY_SHADOW"/>
+            <enum name="GL_SAMPLER_CUBE_SHADOW"/>
+            <enum name="GL_UNSIGNED_INT_VEC2"/>
+            <enum name="GL_UNSIGNED_INT_VEC3"/>
+            <enum name="GL_UNSIGNED_INT_VEC4"/>
+            <enum name="GL_INT_SAMPLER_2D"/>
+            <enum name="GL_INT_SAMPLER_3D"/>
+            <enum name="GL_INT_SAMPLER_CUBE"/>
+            <enum name="GL_INT_SAMPLER_2D_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_3D"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D_ARRAY"/>
+            <enum name="GL_BUFFER_ACCESS_FLAGS"/>
+            <enum name="GL_BUFFER_MAP_LENGTH"/>
+            <enum name="GL_BUFFER_MAP_OFFSET"/>
+            <enum name="GL_DEPTH_COMPONENT32F"/>
+            <enum name="GL_DEPTH32F_STENCIL8"/>
+            <enum name="GL_FLOAT_32_UNSIGNED_INT_24_8_REV"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT"/>
+            <enum name="GL_FRAMEBUFFER_UNDEFINED"/>
+            <enum name="GL_DEPTH_STENCIL_ATTACHMENT"/>
+            <enum name="GL_DEPTH_STENCIL"/>
+            <enum name="GL_UNSIGNED_INT_24_8"/>
+            <enum name="GL_DEPTH24_STENCIL8"/>
+            <enum name="GL_UNSIGNED_NORMALIZED"/>
+            <enum name="GL_DRAW_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_READ_FRAMEBUFFER"/>
+            <enum name="GL_DRAW_FRAMEBUFFER"/>
+            <enum name="GL_READ_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_RENDERBUFFER_SAMPLES"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER"/>
+            <enum name="GL_MAX_COLOR_ATTACHMENTS"/>
+            <enum name="GL_COLOR_ATTACHMENT1"/>
+            <enum name="GL_COLOR_ATTACHMENT2"/>
+            <enum name="GL_COLOR_ATTACHMENT3"/>
+            <enum name="GL_COLOR_ATTACHMENT4"/>
+            <enum name="GL_COLOR_ATTACHMENT5"/>
+            <enum name="GL_COLOR_ATTACHMENT6"/>
+            <enum name="GL_COLOR_ATTACHMENT7"/>
+            <enum name="GL_COLOR_ATTACHMENT8"/>
+            <enum name="GL_COLOR_ATTACHMENT9"/>
+            <enum name="GL_COLOR_ATTACHMENT10"/>
+            <enum name="GL_COLOR_ATTACHMENT11"/>
+            <enum name="GL_COLOR_ATTACHMENT12"/>
+            <enum name="GL_COLOR_ATTACHMENT13"/>
+            <enum name="GL_COLOR_ATTACHMENT14"/>
+            <enum name="GL_COLOR_ATTACHMENT15"/>
+            <enum name="GL_COLOR_ATTACHMENT16"/>
+            <enum name="GL_COLOR_ATTACHMENT17"/>
+            <enum name="GL_COLOR_ATTACHMENT18"/>
+            <enum name="GL_COLOR_ATTACHMENT19"/>
+            <enum name="GL_COLOR_ATTACHMENT20"/>
+            <enum name="GL_COLOR_ATTACHMENT21"/>
+            <enum name="GL_COLOR_ATTACHMENT22"/>
+            <enum name="GL_COLOR_ATTACHMENT23"/>
+            <enum name="GL_COLOR_ATTACHMENT24"/>
+            <enum name="GL_COLOR_ATTACHMENT25"/>
+            <enum name="GL_COLOR_ATTACHMENT26"/>
+            <enum name="GL_COLOR_ATTACHMENT27"/>
+            <enum name="GL_COLOR_ATTACHMENT28"/>
+            <enum name="GL_COLOR_ATTACHMENT29"/>
+            <enum name="GL_COLOR_ATTACHMENT30"/>
+            <enum name="GL_COLOR_ATTACHMENT31"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE"/>
+            <enum name="GL_MAX_SAMPLES"/>
+            <enum name="GL_HALF_FLOAT"/>
+            <enum name="GL_MAP_READ_BIT"/>
+            <enum name="GL_MAP_WRITE_BIT"/>
+            <enum name="GL_MAP_INVALIDATE_RANGE_BIT"/>
+            <enum name="GL_MAP_INVALIDATE_BUFFER_BIT"/>
+            <enum name="GL_MAP_FLUSH_EXPLICIT_BIT"/>
+            <enum name="GL_MAP_UNSYNCHRONIZED_BIT"/>
+            <enum name="GL_RG"/>
+            <enum name="GL_RG_INTEGER"/>
+            <enum name="GL_R8"/>
+            <enum name="GL_RG8"/>
+            <enum name="GL_R16F"/>
+            <enum name="GL_R32F"/>
+            <enum name="GL_RG16F"/>
+            <enum name="GL_RG32F"/>
+            <enum name="GL_R8I"/>
+            <enum name="GL_R8UI"/>
+            <enum name="GL_R16I"/>
+            <enum name="GL_R16UI"/>
+            <enum name="GL_R32I"/>
+            <enum name="GL_R32UI"/>
+            <enum name="GL_RG8I"/>
+            <enum name="GL_RG8UI"/>
+            <enum name="GL_RG16I"/>
+            <enum name="GL_RG16UI"/>
+            <enum name="GL_RG32I"/>
+            <enum name="GL_RG32UI"/>
+            <enum name="GL_VERTEX_ARRAY_BINDING"/>
+            <enum name="GL_R8_SNORM"/>
+            <enum name="GL_RG8_SNORM"/>
+            <enum name="GL_RGB8_SNORM"/>
+            <enum name="GL_RGBA8_SNORM"/>
+            <enum name="GL_SIGNED_NORMALIZED"/>
+            <enum name="GL_PRIMITIVE_RESTART_FIXED_INDEX"/>
+            <enum name="GL_COPY_READ_BUFFER"/>
+            <enum name="GL_COPY_WRITE_BUFFER"/>
+            <enum name="GL_COPY_READ_BUFFER_BINDING"/>
+            <enum name="GL_COPY_WRITE_BUFFER_BINDING"/>
+            <enum name="GL_UNIFORM_BUFFER"/>
+            <enum name="GL_UNIFORM_BUFFER_BINDING"/>
+            <enum name="GL_UNIFORM_BUFFER_START"/>
+            <enum name="GL_UNIFORM_BUFFER_SIZE"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_COMBINED_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_UNIFORM_BUFFER_BINDINGS"/>
+            <enum name="GL_MAX_UNIFORM_BLOCK_SIZE"/>
+            <enum name="GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS"/>
+            <enum name="GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH"/>
+            <enum name="GL_ACTIVE_UNIFORM_BLOCKS"/>
+            <enum name="GL_UNIFORM_TYPE"/>
+            <enum name="GL_UNIFORM_SIZE"/>
+            <enum name="GL_UNIFORM_NAME_LENGTH"/>
+            <enum name="GL_UNIFORM_BLOCK_INDEX"/>
+            <enum name="GL_UNIFORM_OFFSET"/>
+            <enum name="GL_UNIFORM_ARRAY_STRIDE"/>
+            <enum name="GL_UNIFORM_MATRIX_STRIDE"/>
+            <enum name="GL_UNIFORM_IS_ROW_MAJOR"/>
+            <enum name="GL_UNIFORM_BLOCK_BINDING"/>
+            <enum name="GL_UNIFORM_BLOCK_DATA_SIZE"/>
+            <enum name="GL_UNIFORM_BLOCK_NAME_LENGTH"/>
+            <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS"/>
+            <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER"/>
+            <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <enum name="GL_INVALID_INDEX"/>
+            <enum name="GL_MAX_VERTEX_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_FRAGMENT_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_SERVER_WAIT_TIMEOUT"/>
+            <enum name="GL_OBJECT_TYPE"/>
+            <enum name="GL_SYNC_CONDITION"/>
+            <enum name="GL_SYNC_STATUS"/>
+            <enum name="GL_SYNC_FLAGS"/>
+            <enum name="GL_SYNC_FENCE"/>
+            <enum name="GL_SYNC_GPU_COMMANDS_COMPLETE"/>
+            <enum name="GL_UNSIGNALED"/>
+            <enum name="GL_SIGNALED"/>
+            <enum name="GL_ALREADY_SIGNALED"/>
+            <enum name="GL_TIMEOUT_EXPIRED"/>
+            <enum name="GL_CONDITION_SATISFIED"/>
+            <enum name="GL_WAIT_FAILED"/>
+            <enum name="GL_SYNC_FLUSH_COMMANDS_BIT"/>
+            <enum name="GL_TIMEOUT_IGNORED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR"/>
+            <enum name="GL_ANY_SAMPLES_PASSED"/>
+            <enum name="GL_ANY_SAMPLES_PASSED_CONSERVATIVE"/>
+            <enum name="GL_SAMPLER_BINDING"/>
+            <enum name="GL_RGB10_A2UI"/>
+            <enum name="GL_TEXTURE_SWIZZLE_R"/>
+            <enum name="GL_TEXTURE_SWIZZLE_G"/>
+            <enum name="GL_TEXTURE_SWIZZLE_B"/>
+            <enum name="GL_TEXTURE_SWIZZLE_A"/>
+            <enum name="GL_GREEN"/>
+            <enum name="GL_BLUE"/>
+            <enum name="GL_INT_2_10_10_10_REV"/>
+            <enum name="GL_TRANSFORM_FEEDBACK"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_PAUSED"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_ACTIVE"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BINDING"/>
+            <enum name="GL_PROGRAM_BINARY_RETRIEVABLE_HINT"/>
+            <enum name="GL_PROGRAM_BINARY_LENGTH"/>
+            <enum name="GL_NUM_PROGRAM_BINARY_FORMATS"/>
+            <enum name="GL_PROGRAM_BINARY_FORMATS"/>
+            <enum name="GL_COMPRESSED_R11_EAC"/>
+            <enum name="GL_COMPRESSED_SIGNED_R11_EAC"/>
+            <enum name="GL_COMPRESSED_RG11_EAC"/>
+            <enum name="GL_COMPRESSED_SIGNED_RG11_EAC"/>
+            <enum name="GL_COMPRESSED_RGB8_ETC2"/>
+            <enum name="GL_COMPRESSED_SRGB8_ETC2"/>
+            <enum name="GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+            <enum name="GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+            <enum name="GL_COMPRESSED_RGBA8_ETC2_EAC"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC"/>
+            <enum name="GL_TEXTURE_IMMUTABLE_FORMAT"/>
+            <enum name="GL_MAX_ELEMENT_INDEX"/>
+            <enum name="GL_NUM_SAMPLE_COUNTS"/>
+            <enum name="GL_TEXTURE_IMMUTABLE_LEVELS"/>
+            <command name="glReadBuffer"/>
+            <command name="glDrawRangeElements"/>
+            <command name="glTexImage3D"/>
+            <command name="glTexSubImage3D"/>
+            <command name="glCopyTexSubImage3D"/>
+            <command name="glCompressedTexImage3D"/>
+            <command name="glCompressedTexSubImage3D"/>
+            <command name="glGenQueries"/>
+            <command name="glDeleteQueries"/>
+            <command name="glIsQuery"/>
+            <command name="glBeginQuery"/>
+            <command name="glEndQuery"/>
+            <command name="glGetQueryiv"/>
+            <command name="glGetQueryObjectuiv"/>
+            <command name="glUnmapBuffer"/>
+            <command name="glGetBufferPointerv"/>
+            <command name="glDrawBuffers"/>
+            <command name="glUniformMatrix2x3fv"/>
+            <command name="glUniformMatrix3x2fv"/>
+            <command name="glUniformMatrix2x4fv"/>
+            <command name="glUniformMatrix4x2fv"/>
+            <command name="glUniformMatrix3x4fv"/>
+            <command name="glUniformMatrix4x3fv"/>
+            <command name="glBlitFramebuffer"/>
+            <command name="glRenderbufferStorageMultisample"/>
+            <command name="glFramebufferTextureLayer"/>
+            <command name="glMapBufferRange"/>
+            <command name="glFlushMappedBufferRange"/>
+            <command name="glBindVertexArray"/>
+            <command name="glDeleteVertexArrays"/>
+            <command name="glGenVertexArrays"/>
+            <command name="glIsVertexArray"/>
+            <command name="glGetIntegeri_v"/>
+            <command name="glBeginTransformFeedback"/>
+            <command name="glEndTransformFeedback"/>
+            <command name="glBindBufferRange"/>
+            <command name="glBindBufferBase"/>
+            <command name="glTransformFeedbackVaryings"/>
+            <command name="glGetTransformFeedbackVarying"/>
+            <command name="glVertexAttribIPointer"/>
+            <command name="glGetVertexAttribIiv"/>
+            <command name="glGetVertexAttribIuiv"/>
+            <command name="glVertexAttribI4i"/>
+            <command name="glVertexAttribI4ui"/>
+            <command name="glVertexAttribI4iv"/>
+            <command name="glVertexAttribI4uiv"/>
+            <command name="glGetUniformuiv"/>
+            <command name="glGetFragDataLocation"/>
+            <command name="glUniform1ui"/>
+            <command name="glUniform2ui"/>
+            <command name="glUniform3ui"/>
+            <command name="glUniform4ui"/>
+            <command name="glUniform1uiv"/>
+            <command name="glUniform2uiv"/>
+            <command name="glUniform3uiv"/>
+            <command name="glUniform4uiv"/>
+            <command name="glClearBufferiv"/>
+            <command name="glClearBufferuiv"/>
+            <command name="glClearBufferfv"/>
+            <command name="glClearBufferfi"/>
+            <command name="glGetStringi"/>
+            <command name="glCopyBufferSubData"/>
+            <command name="glGetUniformIndices"/>
+            <command name="glGetActiveUniformsiv"/>
+            <command name="glGetUniformBlockIndex"/>
+            <command name="glGetActiveUniformBlockiv"/>
+            <command name="glGetActiveUniformBlockName"/>
+            <command name="glUniformBlockBinding"/>
+            <command name="glDrawArraysInstanced"/>
+            <command name="glDrawElementsInstanced"/>
+            <command name="glFenceSync"/>
+            <command name="glIsSync"/>
+            <command name="glDeleteSync"/>
+            <command name="glClientWaitSync"/>
+            <command name="glWaitSync"/>
+            <command name="glGetInteger64v"/>
+            <command name="glGetSynciv"/>
+            <command name="glGetInteger64i_v"/>
+            <command name="glGetBufferParameteri64v"/>
+            <command name="glGenSamplers"/>
+            <command name="glDeleteSamplers"/>
+            <command name="glIsSampler"/>
+            <command name="glBindSampler"/>
+            <command name="glSamplerParameteri"/>
+            <command name="glSamplerParameteriv"/>
+            <command name="glSamplerParameterf"/>
+            <command name="glSamplerParameterfv"/>
+            <command name="glGetSamplerParameteriv"/>
+            <command name="glGetSamplerParameterfv"/>
+            <command name="glVertexAttribDivisor"/>
+            <command name="glBindTransformFeedback"/>
+            <command name="glDeleteTransformFeedbacks"/>
+            <command name="glGenTransformFeedbacks"/>
+            <command name="glIsTransformFeedback"/>
+            <command name="glPauseTransformFeedback"/>
+            <command name="glResumeTransformFeedback"/>
+            <command name="glGetProgramBinary"/>
+            <command name="glProgramBinary"/>
+            <command name="glProgramParameteri"/>
+            <command name="glInvalidateFramebuffer"/>
+            <command name="glInvalidateSubFramebuffer"/>
+            <command name="glTexStorage2D"/>
+            <command name="glTexStorage3D"/>
+            <command name="glGetInternalformativ"/>
+        </require>
+    </feature>
+    <feature api="gles2" name="GL_ES_VERSION_3_1" number="3.1">
+        <!-- arrays_of_arrays features -->
+        <require/>
+        <!-- compute_shader features -->
+        <require>
+            <command name="glDispatchCompute"/>
+            <command name="glDispatchComputeIndirect"/>
+            <enum name="GL_COMPUTE_SHADER"/>
+            <enum name="GL_MAX_COMPUTE_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_COMPUTE_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_COMPUTE_SHARED_MEMORY_SIZE"/>
+            <enum name="GL_MAX_COMPUTE_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_COUNT"/>
+            <enum name="GL_MAX_COMPUTE_WORK_GROUP_SIZE"/>
+            <enum name="GL_COMPUTE_WORK_GROUP_SIZE"/>
+            <enum name="GL_DISPATCH_INDIRECT_BUFFER"/>
+            <enum name="GL_DISPATCH_INDIRECT_BUFFER_BINDING"/>
+            <enum name="GL_COMPUTE_SHADER_BIT"/>
+        </require>
+        <!-- draw_indirect features -->
+        <require>
+            <command name="glDrawArraysIndirect"/>
+            <command name="glDrawElementsIndirect"/>
+            <enum name="GL_DRAW_INDIRECT_BUFFER"/>
+            <enum name="GL_DRAW_INDIRECT_BUFFER_BINDING"/>
+        </require>
+        <!-- explicit_uniform_location features -->
+        <require>
+            <enum name="GL_MAX_UNIFORM_LOCATIONS"/>
+        </require>
+        <!-- framebuffer_no_attachments features -->
+        <require>
+            <command name="glFramebufferParameteri"/>
+            <command name="glGetFramebufferParameteriv"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_WIDTH"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_HEIGHT"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_SAMPLES"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS"/>
+            <enum name="GL_MAX_FRAMEBUFFER_WIDTH"/>
+            <enum name="GL_MAX_FRAMEBUFFER_HEIGHT"/>
+            <enum name="GL_MAX_FRAMEBUFFER_SAMPLES"/>
+        </require>
+        <!-- program_interface_query features -->
+        <require>
+            <command name="glGetProgramInterfaceiv"/>
+            <command name="glGetProgramResourceIndex"/>
+            <command name="glGetProgramResourceName"/>
+            <command name="glGetProgramResourceiv"/>
+            <command name="glGetProgramResourceLocation"/>
+            <enum name="GL_UNIFORM"/>
+            <enum name="GL_UNIFORM_BLOCK"/>
+            <enum name="GL_PROGRAM_INPUT"/>
+            <enum name="GL_PROGRAM_OUTPUT"/>
+            <enum name="GL_BUFFER_VARIABLE"/>
+            <enum name="GL_SHADER_STORAGE_BLOCK"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_VARYING"/>
+            <enum name="GL_ACTIVE_RESOURCES"/>
+            <enum name="GL_MAX_NAME_LENGTH"/>
+            <enum name="GL_MAX_NUM_ACTIVE_VARIABLES"/>
+            <enum name="GL_NAME_LENGTH"/>
+            <enum name="GL_TYPE"/>
+            <enum name="GL_ARRAY_SIZE"/>
+            <enum name="GL_OFFSET"/>
+            <enum name="GL_BLOCK_INDEX"/>
+            <enum name="GL_ARRAY_STRIDE"/>
+            <enum name="GL_MATRIX_STRIDE"/>
+            <enum name="GL_IS_ROW_MAJOR"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_INDEX"/>
+            <enum name="GL_BUFFER_BINDING"/>
+            <enum name="GL_BUFFER_DATA_SIZE"/>
+            <enum name="GL_NUM_ACTIVE_VARIABLES"/>
+            <enum name="GL_ACTIVE_VARIABLES"/>
+            <enum name="GL_REFERENCED_BY_VERTEX_SHADER"/>
+            <enum name="GL_REFERENCED_BY_FRAGMENT_SHADER"/>
+            <enum name="GL_REFERENCED_BY_COMPUTE_SHADER"/>
+            <enum name="GL_TOP_LEVEL_ARRAY_SIZE"/>
+            <enum name="GL_TOP_LEVEL_ARRAY_STRIDE"/>
+            <enum name="GL_LOCATION"/>
+        </require>
+        <!-- separate_shader_objects features -->
+        <require>
+            <command name="glUseProgramStages"/>
+            <command name="glActiveShaderProgram"/>
+            <command name="glCreateShaderProgramv"/>
+            <command name="glBindProgramPipeline"/>
+            <command name="glDeleteProgramPipelines"/>
+            <command name="glGenProgramPipelines"/>
+            <command name="glIsProgramPipeline"/>
+            <command name="glGetProgramPipelineiv"/>
+            <command name="glProgramUniform1i"/>
+            <command name="glProgramUniform2i"/>
+            <command name="glProgramUniform3i"/>
+            <command name="glProgramUniform4i"/>
+            <command name="glProgramUniform1ui"/>
+            <command name="glProgramUniform2ui"/>
+            <command name="glProgramUniform3ui"/>
+            <command name="glProgramUniform4ui"/>
+            <command name="glProgramUniform1f"/>
+            <command name="glProgramUniform2f"/>
+            <command name="glProgramUniform3f"/>
+            <command name="glProgramUniform4f"/>
+            <command name="glProgramUniform1iv"/>
+            <command name="glProgramUniform2iv"/>
+            <command name="glProgramUniform3iv"/>
+            <command name="glProgramUniform4iv"/>
+            <command name="glProgramUniform1uiv"/>
+            <command name="glProgramUniform2uiv"/>
+            <command name="glProgramUniform3uiv"/>
+            <command name="glProgramUniform4uiv"/>
+            <command name="glProgramUniform1fv"/>
+            <command name="glProgramUniform2fv"/>
+            <command name="glProgramUniform3fv"/>
+            <command name="glProgramUniform4fv"/>
+            <command name="glProgramUniformMatrix2fv"/>
+            <command name="glProgramUniformMatrix3fv"/>
+            <command name="glProgramUniformMatrix4fv"/>
+            <command name="glProgramUniformMatrix2x3fv"/>
+            <command name="glProgramUniformMatrix3x2fv"/>
+            <command name="glProgramUniformMatrix2x4fv"/>
+            <command name="glProgramUniformMatrix4x2fv"/>
+            <command name="glProgramUniformMatrix3x4fv"/>
+            <command name="glProgramUniformMatrix4x3fv"/>
+            <command name="glValidateProgramPipeline"/>
+            <command name="glGetProgramPipelineInfoLog"/>
+            <enum name="GL_VERTEX_SHADER_BIT"/>
+            <enum name="GL_FRAGMENT_SHADER_BIT"/>
+            <enum name="GL_ALL_SHADER_BITS"/>
+            <enum name="GL_PROGRAM_SEPARABLE"/>
+            <enum name="GL_ACTIVE_PROGRAM"/>
+            <enum name="GL_PROGRAM_PIPELINE_BINDING"/>
+        </require>
+        <!-- shader_atomic_counters features -->
+        <require>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_BINDING"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_START"/>
+            <enum name="GL_ATOMIC_COUNTER_BUFFER_SIZE"/>
+            <enum name="GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_VERTEX_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_FRAGMENT_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_COMBINED_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE"/>
+            <enum name="GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS"/>
+            <enum name="GL_ACTIVE_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_UNSIGNED_INT_ATOMIC_COUNTER"/>
+        </require>
+        <!-- shader_bitfield_operations features -->
+        <require/>
+        <!-- shader_image_load_store features -->
+        <require>
+            <command name="glBindImageTexture"/>
+            <command name="glGetBooleani_v"/>
+            <command name="glMemoryBarrier"/>
+            <command name="glMemoryBarrierByRegion"/>
+            <enum name="GL_MAX_IMAGE_UNITS"/>
+            <enum name="GL_MAX_VERTEX_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_FRAGMENT_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_COMPUTE_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_COMBINED_IMAGE_UNIFORMS"/>
+            <enum name="GL_IMAGE_BINDING_NAME"/>
+            <enum name="GL_IMAGE_BINDING_LEVEL"/>
+            <enum name="GL_IMAGE_BINDING_LAYERED"/>
+            <enum name="GL_IMAGE_BINDING_LAYER"/>
+            <enum name="GL_IMAGE_BINDING_ACCESS"/>
+            <enum name="GL_IMAGE_BINDING_FORMAT"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT"/>
+            <enum name="GL_ELEMENT_ARRAY_BARRIER_BIT"/>
+            <enum name="GL_UNIFORM_BARRIER_BIT"/>
+            <enum name="GL_TEXTURE_FETCH_BARRIER_BIT"/>
+            <enum name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT"/>
+            <enum name="GL_COMMAND_BARRIER_BIT"/>
+            <enum name="GL_PIXEL_BUFFER_BARRIER_BIT"/>
+            <enum name="GL_TEXTURE_UPDATE_BARRIER_BIT"/>
+            <enum name="GL_BUFFER_UPDATE_BARRIER_BIT"/>
+            <enum name="GL_FRAMEBUFFER_BARRIER_BIT"/>
+            <enum name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT"/>
+            <enum name="GL_ATOMIC_COUNTER_BARRIER_BIT"/>
+            <enum name="GL_ALL_BARRIER_BITS"/>
+            <enum name="GL_IMAGE_2D"/>
+            <enum name="GL_IMAGE_3D"/>
+            <enum name="GL_IMAGE_CUBE"/>
+            <enum name="GL_IMAGE_2D_ARRAY"/>
+            <enum name="GL_INT_IMAGE_2D"/>
+            <enum name="GL_INT_IMAGE_3D"/>
+            <enum name="GL_INT_IMAGE_CUBE"/>
+            <enum name="GL_INT_IMAGE_2D_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_2D"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_3D"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_CUBE"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_2D_ARRAY"/>
+            <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_TYPE"/>
+            <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE"/>
+            <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS"/>
+            <enum name="GL_READ_ONLY"/>
+            <enum name="GL_WRITE_ONLY"/>
+            <enum name="GL_READ_WRITE"/>
+        </require>
+        <!-- shader_layout_binding features -->
+        <require/>
+        <!-- shader_storage_buffer_object features -->
+        <require>
+            <enum name="GL_SHADER_STORAGE_BUFFER"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_BINDING"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_START"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_SIZE"/>
+            <enum name="GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS"/>
+            <enum name="GL_MAX_SHADER_STORAGE_BLOCK_SIZE"/>
+            <enum name="GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_SHADER_STORAGE_BARRIER_BIT"/>
+            <enum name="GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES"/>
+        </require>
+        <!-- stencil_texturing features -->
+        <require>
+            <enum name="GL_DEPTH_STENCIL_TEXTURE_MODE"/>
+            <enum name="GL_STENCIL_INDEX"/>
+        </require>
+        <!-- texture_gather features -->
+        <require>
+            <enum name="GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET"/>
+            <enum name="GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET"/>
+        </require>
+        <!-- texture_storage_multisample features -->
+        <require>
+            <command name="glTexStorage2DMultisample"/>
+            <command name="glGetMultisamplefv"/>
+            <command name="glSampleMaski"/>
+            <command name="glGetTexLevelParameteriv"/>
+            <command name="glGetTexLevelParameterfv"/>
+            <enum name="GL_SAMPLE_POSITION"/>
+            <enum name="GL_SAMPLE_MASK"/>
+            <enum name="GL_SAMPLE_MASK_VALUE"/>
+            <enum name="GL_TEXTURE_2D_MULTISAMPLE"/>
+            <enum name="GL_MAX_SAMPLE_MASK_WORDS"/>
+            <enum name="GL_MAX_COLOR_TEXTURE_SAMPLES"/>
+            <enum name="GL_MAX_DEPTH_TEXTURE_SAMPLES"/>
+            <enum name="GL_MAX_INTEGER_SAMPLES"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE"/>
+            <enum name="GL_TEXTURE_SAMPLES"/>
+            <enum name="GL_TEXTURE_FIXED_SAMPLE_LOCATIONS"/>
+            <enum name="GL_TEXTURE_WIDTH"/>
+            <enum name="GL_TEXTURE_HEIGHT"/>
+            <enum name="GL_TEXTURE_DEPTH"/>
+            <enum name="GL_TEXTURE_INTERNAL_FORMAT"/>
+            <enum name="GL_TEXTURE_RED_SIZE"/>
+            <enum name="GL_TEXTURE_GREEN_SIZE"/>
+            <enum name="GL_TEXTURE_BLUE_SIZE"/>
+            <enum name="GL_TEXTURE_ALPHA_SIZE"/>
+            <enum name="GL_TEXTURE_DEPTH_SIZE"/>
+            <enum name="GL_TEXTURE_STENCIL_SIZE"/>
+            <enum name="GL_TEXTURE_SHARED_SIZE"/>
+            <enum name="GL_TEXTURE_RED_TYPE"/>
+            <enum name="GL_TEXTURE_GREEN_TYPE"/>
+            <enum name="GL_TEXTURE_BLUE_TYPE"/>
+            <enum name="GL_TEXTURE_ALPHA_TYPE"/>
+            <enum name="GL_TEXTURE_DEPTH_TYPE"/>
+            <enum name="GL_TEXTURE_COMPRESSED"/>
+            <enum name="GL_SAMPLER_2D_MULTISAMPLE"/>
+            <enum name="GL_INT_SAMPLER_2D_MULTISAMPLE"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE"/>
+        </require>
+        <!-- vertex_attrib_binding features -->
+        <require>
+            <command name="glBindVertexBuffer"/>
+            <command name="glVertexAttribFormat"/>
+            <command name="glVertexAttribIFormat"/>
+            <command name="glVertexAttribBinding"/>
+            <command name="glVertexBindingDivisor"/>
+            <enum name="GL_VERTEX_ATTRIB_BINDING"/>
+            <enum name="GL_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+            <enum name="GL_VERTEX_BINDING_DIVISOR"/>
+            <enum name="GL_VERTEX_BINDING_OFFSET"/>
+            <enum name="GL_VERTEX_BINDING_STRIDE"/>
+            <enum name="GL_VERTEX_BINDING_BUFFER"/>
+            <enum name="GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+            <enum name="GL_MAX_VERTEX_ATTRIB_BINDINGS"/>
+            <enum name="GL_MAX_VERTEX_ATTRIB_STRIDE"/>
+        </require>
+    </feature>
+    <feature api="gles2" name="GL_ES_VERSION_3_2" number="3.2">
+        <!-- 3.2-specific point features -->
+        <require>
+            <enum name="GL_MULTISAMPLE_LINE_WIDTH_RANGE"/>
+            <enum name="GL_MULTISAMPLE_LINE_WIDTH_GRANULARITY"/>
+        </require>
+        <!-- Android extension pack features -->
+        <require/>
+        <!-- blend_equation_advanced features -->
+        <require>
+            <enum name="GL_MULTIPLY"/>
+            <enum name="GL_SCREEN"/>
+            <enum name="GL_OVERLAY"/>
+            <enum name="GL_DARKEN"/>
+            <enum name="GL_LIGHTEN"/>
+            <enum name="GL_COLORDODGE"/>
+            <enum name="GL_COLORBURN"/>
+            <enum name="GL_HARDLIGHT"/>
+            <enum name="GL_SOFTLIGHT"/>
+            <enum name="GL_DIFFERENCE"/>
+            <enum name="GL_EXCLUSION"/>
+            <enum name="GL_HSL_HUE"/>
+            <enum name="GL_HSL_SATURATION"/>
+            <enum name="GL_HSL_COLOR"/>
+            <enum name="GL_HSL_LUMINOSITY"/>
+            <command name="glBlendBarrier"/>
+        </require>
+        <!-- color_buffer_float features -->
+        <require/>
+        <!-- copy_image features -->
+        <require>
+            <command name="glCopyImageSubData"/>
+        </require>
+        <!-- debug features -->
+        <require>
+            <enum name="GL_DEBUG_OUTPUT_SYNCHRONOUS"/>
+            <enum name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH"/>
+            <enum name="GL_DEBUG_CALLBACK_FUNCTION"/>
+            <enum name="GL_DEBUG_CALLBACK_USER_PARAM"/>
+            <enum name="GL_DEBUG_SOURCE_API"/>
+            <enum name="GL_DEBUG_SOURCE_WINDOW_SYSTEM"/>
+            <enum name="GL_DEBUG_SOURCE_SHADER_COMPILER"/>
+            <enum name="GL_DEBUG_SOURCE_THIRD_PARTY"/>
+            <enum name="GL_DEBUG_SOURCE_APPLICATION"/>
+            <enum name="GL_DEBUG_SOURCE_OTHER"/>
+            <enum name="GL_DEBUG_TYPE_ERROR"/>
+            <enum name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR"/>
+            <enum name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR"/>
+            <enum name="GL_DEBUG_TYPE_PORTABILITY"/>
+            <enum name="GL_DEBUG_TYPE_PERFORMANCE"/>
+            <enum name="GL_DEBUG_TYPE_OTHER"/>
+            <enum name="GL_DEBUG_TYPE_MARKER"/>
+            <enum name="GL_DEBUG_TYPE_PUSH_GROUP"/>
+            <enum name="GL_DEBUG_TYPE_POP_GROUP"/>
+            <enum name="GL_DEBUG_SEVERITY_NOTIFICATION"/>
+            <enum name="GL_MAX_DEBUG_GROUP_STACK_DEPTH"/>
+            <enum name="GL_DEBUG_GROUP_STACK_DEPTH"/>
+            <enum name="GL_BUFFER"/>
+            <enum name="GL_SHADER"/>
+            <enum name="GL_PROGRAM"/>
+            <enum name="GL_VERTEX_ARRAY"/>
+            <enum name="GL_QUERY"/>
+            <enum name="GL_PROGRAM_PIPELINE"/>
+            <enum name="GL_SAMPLER"/>
+            <enum name="GL_MAX_LABEL_LENGTH"/>
+            <enum name="GL_MAX_DEBUG_MESSAGE_LENGTH"/>
+            <enum name="GL_MAX_DEBUG_LOGGED_MESSAGES"/>
+            <enum name="GL_DEBUG_LOGGED_MESSAGES"/>
+            <enum name="GL_DEBUG_SEVERITY_HIGH"/>
+            <enum name="GL_DEBUG_SEVERITY_MEDIUM"/>
+            <enum name="GL_DEBUG_SEVERITY_LOW"/>
+            <enum name="GL_DEBUG_OUTPUT"/>
+            <enum name="GL_CONTEXT_FLAG_DEBUG_BIT"/>
+            <enum name="GL_STACK_OVERFLOW"/>
+            <enum name="GL_STACK_UNDERFLOW"/>
+            <command name="glDebugMessageControl"/>
+            <command name="glDebugMessageInsert"/>
+            <command name="glDebugMessageCallback"/>
+            <command name="glGetDebugMessageLog"/>
+            <command name="glPushDebugGroup"/>
+            <command name="glPopDebugGroup"/>
+            <command name="glObjectLabel"/>
+            <command name="glGetObjectLabel"/>
+            <command name="glObjectPtrLabel"/>
+            <command name="glGetObjectPtrLabel"/>
+            <command name="glGetPointerv"/>
+        </require>
+        <!-- draw_buffers_indexed features -->
+        <require>
+            <!-- All tokens are already part of ES 3.0 -->
+            <command name="glEnablei"/>
+            <command name="glDisablei"/>
+            <command name="glBlendEquationi"/>
+            <command name="glBlendEquationSeparatei"/>
+            <command name="glBlendFunci"/>
+            <command name="glBlendFuncSeparatei"/>
+            <command name="glColorMaski"/>
+            <command name="glIsEnabledi"/>
+        </require>
+        <!-- draw_elements_base_vertex features -->
+        <require>
+            <command name="glDrawElementsBaseVertex"/>
+            <command name="glDrawRangeElementsBaseVertex"/>
+            <command name="glDrawElementsInstancedBaseVertex"/>
+        </require>
+        <!-- geometry_shader features -->
+        <require>
+            <enum name="GL_GEOMETRY_SHADER"/>
+            <enum name="GL_GEOMETRY_SHADER_BIT"/>
+            <enum name="GL_GEOMETRY_VERTICES_OUT"/>
+            <enum name="GL_GEOMETRY_INPUT_TYPE"/>
+            <enum name="GL_GEOMETRY_OUTPUT_TYPE"/>
+            <enum name="GL_GEOMETRY_SHADER_INVOCATIONS"/>
+            <enum name="GL_LAYER_PROVOKING_VERTEX"/>
+            <enum name="GL_LINES_ADJACENCY"/>
+            <enum name="GL_LINE_STRIP_ADJACENCY"/>
+            <enum name="GL_TRIANGLES_ADJACENCY"/>
+            <enum name="GL_TRIANGLE_STRIP_ADJACENCY"/>
+            <enum name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_OUTPUT_VERTICES"/>
+            <enum name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS"/>
+            <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_FIRST_VERTEX_CONVENTION"/>
+            <enum name="GL_LAST_VERTEX_CONVENTION"/>
+            <enum name="GL_UNDEFINED_VERTEX"/>
+            <enum name="GL_PRIMITIVES_GENERATED"/>
+            <enum name="GL_FRAMEBUFFER_DEFAULT_LAYERS"/>
+            <enum name="GL_MAX_FRAMEBUFFER_LAYERS"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED"/>
+            <enum name="GL_REFERENCED_BY_GEOMETRY_SHADER"/>
+            <command name="glFramebufferTexture"/>
+        </require>
+        <!-- gpu_shader5 features -->
+        <require/>
+        <!-- primitive_bounding_box features -->
+        <require>
+            <enum name="GL_PRIMITIVE_BOUNDING_BOX"/>
+            <command name="glPrimitiveBoundingBox"/>
+        </require>
+        <!-- robustness features -->
+        <require>
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT"/>
+            <enum name="GL_CONTEXT_FLAGS"/>
+            <enum name="GL_LOSE_CONTEXT_ON_RESET"/>
+            <enum name="GL_GUILTY_CONTEXT_RESET"/>
+            <enum name="GL_INNOCENT_CONTEXT_RESET"/>
+            <enum name="GL_UNKNOWN_CONTEXT_RESET"/>
+            <enum name="GL_RESET_NOTIFICATION_STRATEGY"/>
+            <enum name="GL_NO_RESET_NOTIFICATION"/>
+            <enum name="GL_CONTEXT_LOST"/>
+            <command name="glGetGraphicsResetStatus"/>
+            <command name="glReadnPixels"/>
+            <command name="glGetnUniformfv"/>
+            <command name="glGetnUniformiv"/>
+            <command name="glGetnUniformuiv"/>
+        </require>
+        <!-- sample_shading features -->
+        <require>
+            <command name="glMinSampleShading"/>
+            <enum name="GL_SAMPLE_SHADING"/>
+            <enum name="GL_MIN_SAMPLE_SHADING_VALUE"/>
+        </require>
+        <!-- sample_variables features -->
+        <require/>
+        <!-- shader_image_atomic features -->
+        <require/>
+        <!-- shader_io_blocks features -->
+        <require/>
+        <!-- shader_multisample_interpolation features -->
+        <require>
+            <enum name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET"/>
+            <enum name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET"/>
+            <enum name="GL_FRAGMENT_INTERPOLATION_OFFSET_BITS"/>
+        </require>
+        <!-- tessellation_shader features -->
+        <require>
+            <enum name="GL_PATCHES"/>
+            <enum name="GL_PATCH_VERTICES"/>
+            <enum name="GL_TESS_CONTROL_OUTPUT_VERTICES"/>
+            <enum name="GL_TESS_GEN_MODE"/>
+            <enum name="GL_TESS_GEN_SPACING"/>
+            <enum name="GL_TESS_GEN_VERTEX_ORDER"/>
+            <enum name="GL_TESS_GEN_POINT_MODE"/>
+            <enum name="GL_TRIANGLES"/>
+            <enum name="GL_ISOLINES"/>
+            <enum name="GL_QUADS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_FRACTIONAL_ODD"/>
+            <enum name="GL_FRACTIONAL_EVEN"/>
+            <enum name="GL_CCW"/>
+            <enum name="GL_CW"/>
+            <enum name="GL_MAX_PATCH_VERTICES"/>
+            <enum name="GL_MAX_TESS_GEN_LEVEL"/>
+            <enum name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_PATCH_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS"/>
+            <enum name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+            <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS"/>
+            <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS"/>
+            <enum name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS"/>
+            <enum name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS"/>
+            <enum name="GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED"/>
+            <enum name="GL_IS_PER_PATCH"/>
+            <enum name="GL_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+            <enum name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_TESS_CONTROL_SHADER"/>
+            <enum name="GL_TESS_EVALUATION_SHADER"/>
+            <enum name="GL_TESS_CONTROL_SHADER_BIT"/>
+            <enum name="GL_TESS_EVALUATION_SHADER_BIT"/>
+            <command name="glPatchParameteri"/>
+        </require>
+        <!-- texture_border_clamp features -->
+        <require>
+            <enum name="GL_TEXTURE_BORDER_COLOR"/>
+            <enum name="GL_CLAMP_TO_BORDER"/>
+            <command name="glTexParameterIiv"/>
+            <command name="glTexParameterIuiv"/>
+            <command name="glGetTexParameterIiv"/>
+            <command name="glGetTexParameterIuiv"/>
+            <command name="glSamplerParameterIiv"/>
+            <command name="glSamplerParameterIuiv"/>
+            <command name="glGetSamplerParameterIiv"/>
+            <command name="glGetSamplerParameterIuiv"/>
+        </require>
+        <!-- texture_buffer features -->
+        <require>
+            <enum name="GL_TEXTURE_BUFFER"/>
+            <enum name="GL_TEXTURE_BUFFER_BINDING"/>
+            <enum name="GL_MAX_TEXTURE_BUFFER_SIZE"/>
+            <enum name="GL_TEXTURE_BINDING_BUFFER"/>
+            <enum name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING"/>
+            <enum name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT"/>
+            <enum name="GL_SAMPLER_BUFFER"/>
+            <enum name="GL_INT_SAMPLER_BUFFER"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_BUFFER"/>
+            <enum name="GL_IMAGE_BUFFER"/>
+            <enum name="GL_INT_IMAGE_BUFFER"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_BUFFER"/>
+            <enum name="GL_TEXTURE_BUFFER_OFFSET"/>
+            <enum name="GL_TEXTURE_BUFFER_SIZE"/>
+            <command name="glTexBuffer"/>
+            <command name="glTexBufferRange"/>
+        </require>
+        <!-- texture_compression_astc_ldr features -->
+        <require>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_4x4"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_5x4"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_5x5"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_6x5"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_6x6"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_8x5"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_8x6"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_8x8"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_10x5"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_10x6"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_10x8"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_10x10"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_12x10"/>
+            <enum name="GL_COMPRESSED_RGBA_ASTC_12x12"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10"/>
+            <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12"/>
+        </require>
+        <!-- texture_cube_map_array features -->
+        <require>
+            <enum name="GL_TEXTURE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY"/>
+            <enum name="GL_SAMPLER_CUBE_MAP_ARRAY"/>
+            <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW"/>
+            <enum name="GL_INT_SAMPLER_CUBE_MAP_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY"/>
+            <enum name="GL_IMAGE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_INT_IMAGE_CUBE_MAP_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY"/>
+        </require>
+        <!-- texture_stencil8 features -->
+        <require>
+            <enum name="GL_STENCIL_INDEX"/>
+            <enum name="GL_STENCIL_INDEX8"/>
+        </require>
+        <!-- texture_storage_multisample_2d_array features -->
+        <require>
+            <enum name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+            <enum name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+            <command name="glTexStorage3DMultisample"/>
+        </require>
+    </feature>
+    <feature api="glsc2" name="GL_SC_VERSION_2_0" number="2.0">
+        <require comment="Not used by the API, but could be used by applications">
+            <type name="GLbyte" comment="Used to define GL_BYTE data"/>
+            <type name="GLshort" comment="Used to define GL_SHORT data"/>
+            <type name="GLushort" comment="Used to define GL_UNSIGNED_SHORT data"/>
+        </require>
+        <require>
+            <enum name="GL_DEPTH_BUFFER_BIT"/>
+            <enum name="GL_STENCIL_BUFFER_BIT"/>
+            <enum name="GL_COLOR_BUFFER_BIT"/>
+            <enum name="GL_FALSE"/>
+            <enum name="GL_TRUE"/>
+            <enum name="GL_POINTS"/>
+            <enum name="GL_LINES"/>
+            <enum name="GL_LINE_LOOP"/>
+            <enum name="GL_LINE_STRIP"/>
+            <enum name="GL_TRIANGLES"/>
+            <enum name="GL_TRIANGLE_STRIP"/>
+            <enum name="GL_TRIANGLE_FAN"/>
+            <enum name="GL_ZERO"/>
+            <enum name="GL_ONE"/>
+            <enum name="GL_SRC_COLOR"/>
+            <enum name="GL_ONE_MINUS_SRC_COLOR"/>
+            <enum name="GL_SRC_ALPHA"/>
+            <enum name="GL_ONE_MINUS_SRC_ALPHA"/>
+            <enum name="GL_DST_ALPHA"/>
+            <enum name="GL_ONE_MINUS_DST_ALPHA"/>
+            <enum name="GL_DST_COLOR"/>
+            <enum name="GL_ONE_MINUS_DST_COLOR"/>
+            <enum name="GL_SRC_ALPHA_SATURATE"/>
+            <enum name="GL_FUNC_ADD"/>
+            <enum name="GL_BLEND_EQUATION"/>
+            <enum name="GL_BLEND_EQUATION_RGB"/>
+            <enum name="GL_BLEND_EQUATION_ALPHA"/>
+            <enum name="GL_FUNC_SUBTRACT"/>
+            <enum name="GL_FUNC_REVERSE_SUBTRACT"/>
+            <enum name="GL_BLEND_DST_RGB"/>
+            <enum name="GL_BLEND_SRC_RGB"/>
+            <enum name="GL_BLEND_DST_ALPHA"/>
+            <enum name="GL_BLEND_SRC_ALPHA"/>
+            <enum name="GL_CONSTANT_COLOR"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+            <enum name="GL_CONSTANT_ALPHA"/>
+            <enum name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+            <enum name="GL_BLEND_COLOR"/>
+            <enum name="GL_ARRAY_BUFFER"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER"/>
+            <enum name="GL_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_ELEMENT_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_STREAM_DRAW"/>
+            <enum name="GL_STATIC_DRAW"/>
+            <enum name="GL_DYNAMIC_DRAW"/>
+            <enum name="GL_BUFFER_SIZE"/>
+            <enum name="GL_BUFFER_USAGE"/>
+            <enum name="GL_CURRENT_VERTEX_ATTRIB"/>
+            <enum name="GL_FRONT"/>
+            <enum name="GL_BACK"/>
+            <enum name="GL_FRONT_AND_BACK"/>
+            <enum name="GL_TEXTURE_2D"/>
+            <enum name="GL_CULL_FACE"/>
+            <enum name="GL_BLEND"/>
+            <enum name="GL_DITHER"/>
+            <enum name="GL_STENCIL_TEST"/>
+            <enum name="GL_DEPTH_TEST"/>
+            <enum name="GL_SCISSOR_TEST"/>
+            <enum name="GL_POLYGON_OFFSET_FILL"/>
+            <enum name="GL_SAMPLE_ALPHA_TO_COVERAGE"/>
+            <enum name="GL_SAMPLE_COVERAGE"/>
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_INVALID_ENUM"/>
+            <enum name="GL_INVALID_VALUE"/>
+            <enum name="GL_INVALID_OPERATION"/>
+            <enum name="GL_OUT_OF_MEMORY"/>
+            <enum name="GL_INVALID_FRAMEBUFFER_OPERATION"/>
+            <enum name="GL_CONTEXT_LOST"/>
+            <enum name="GL_CW"/>
+            <enum name="GL_CCW"/>
+            <enum name="GL_LINE_WIDTH"/>
+            <enum name="GL_ALIASED_POINT_SIZE_RANGE"/>
+            <enum name="GL_ALIASED_LINE_WIDTH_RANGE"/>
+            <enum name="GL_CULL_FACE_MODE"/>
+            <enum name="GL_FRONT_FACE"/>
+            <enum name="GL_DEPTH_RANGE"/>
+            <enum name="GL_DEPTH_WRITEMASK"/>
+            <enum name="GL_DEPTH_CLEAR_VALUE"/>
+            <enum name="GL_DEPTH_FUNC"/>
+            <enum name="GL_STENCIL_CLEAR_VALUE"/>
+            <enum name="GL_STENCIL_FUNC"/>
+            <enum name="GL_STENCIL_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_REF"/>
+            <enum name="GL_STENCIL_VALUE_MASK"/>
+            <enum name="GL_STENCIL_WRITEMASK"/>
+            <enum name="GL_STENCIL_BACK_FUNC"/>
+            <enum name="GL_STENCIL_BACK_FAIL"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_FAIL"/>
+            <enum name="GL_STENCIL_BACK_PASS_DEPTH_PASS"/>
+            <enum name="GL_STENCIL_BACK_REF"/>
+            <enum name="GL_STENCIL_BACK_VALUE_MASK"/>
+            <enum name="GL_STENCIL_BACK_WRITEMASK"/>
+            <enum name="GL_VIEWPORT"/>
+            <enum name="GL_SCISSOR_BOX"/>
+            <enum name="GL_COLOR_CLEAR_VALUE"/>
+            <enum name="GL_COLOR_WRITEMASK"/>
+            <enum name="GL_UNPACK_ALIGNMENT"/>
+            <enum name="GL_PACK_ALIGNMENT"/>
+            <enum name="GL_MAX_TEXTURE_SIZE"/>
+            <enum name="GL_MAX_VIEWPORT_DIMS"/>
+            <enum name="GL_SUBPIXEL_BITS"/>
+            <enum name="GL_RED_BITS"/>
+            <enum name="GL_GREEN_BITS"/>
+            <enum name="GL_BLUE_BITS"/>
+            <enum name="GL_ALPHA_BITS"/>
+            <enum name="GL_DEPTH_BITS"/>
+            <enum name="GL_STENCIL_BITS"/>
+            <enum name="GL_POLYGON_OFFSET_UNITS"/>
+            <enum name="GL_POLYGON_OFFSET_FACTOR"/>
+            <enum name="GL_TEXTURE_BINDING_2D"/>
+            <enum name="GL_SAMPLE_BUFFERS"/>
+            <enum name="GL_SAMPLES"/>
+            <enum name="GL_SAMPLE_COVERAGE_VALUE"/>
+            <enum name="GL_SAMPLE_COVERAGE_INVERT"/>
+            <enum name="GL_NUM_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_COMPRESSED_TEXTURE_FORMATS"/>
+            <enum name="GL_DONT_CARE"/>
+            <enum name="GL_FASTEST"/>
+            <enum name="GL_NICEST"/>
+            <enum name="GL_GENERATE_MIPMAP_HINT"/>
+            <enum name="GL_BYTE"/>
+            <enum name="GL_UNSIGNED_BYTE"/>
+            <enum name="GL_SHORT"/>
+            <enum name="GL_UNSIGNED_SHORT"/>
+            <enum name="GL_INT"/>
+            <enum name="GL_UNSIGNED_INT"/>
+            <enum name="GL_FLOAT"/>
+            <enum name="GL_RED"/>
+            <enum name="GL_RG"/>
+            <enum name="GL_RGB"/>
+            <enum name="GL_RGBA"/>
+            <enum name="GL_UNSIGNED_SHORT_4_4_4_4"/>
+            <enum name="GL_UNSIGNED_SHORT_5_5_5_1"/>
+            <enum name="GL_UNSIGNED_SHORT_5_6_5"/>
+            <enum name="GL_MAX_VERTEX_ATTRIBS"/>
+            <enum name="GL_MAX_VERTEX_UNIFORM_VECTORS"/>
+            <enum name="GL_MAX_VARYING_VECTORS"/>
+            <enum name="GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_TEXTURE_IMAGE_UNITS"/>
+            <enum name="GL_MAX_FRAGMENT_UNIFORM_VECTORS"/>
+            <enum name="GL_LINK_STATUS"/>
+            <enum name="GL_SHADING_LANGUAGE_VERSION"/>
+            <enum name="GL_CURRENT_PROGRAM"/>
+            <enum name="GL_NEVER"/>
+            <enum name="GL_LESS"/>
+            <enum name="GL_EQUAL"/>
+            <enum name="GL_LEQUAL"/>
+            <enum name="GL_GREATER"/>
+            <enum name="GL_NOTEQUAL"/>
+            <enum name="GL_GEQUAL"/>
+            <enum name="GL_ALWAYS"/>
+            <enum name="GL_KEEP"/>
+            <enum name="GL_REPLACE"/>
+            <enum name="GL_INCR"/>
+            <enum name="GL_DECR"/>
+            <enum name="GL_INVERT"/>
+            <enum name="GL_INCR_WRAP"/>
+            <enum name="GL_DECR_WRAP"/>
+            <enum name="GL_VENDOR"/>
+            <enum name="GL_RENDERER"/>
+            <enum name="GL_VERSION"/>
+            <enum name="GL_EXTENSIONS"/>
+            <enum name="GL_NEAREST"/>
+            <enum name="GL_LINEAR"/>
+            <enum name="GL_NEAREST_MIPMAP_NEAREST"/>
+            <enum name="GL_LINEAR_MIPMAP_NEAREST"/>
+            <enum name="GL_NEAREST_MIPMAP_LINEAR"/>
+            <enum name="GL_LINEAR_MIPMAP_LINEAR"/>
+            <enum name="GL_TEXTURE_MAG_FILTER"/>
+            <enum name="GL_TEXTURE_MIN_FILTER"/>
+            <enum name="GL_TEXTURE_WRAP_S"/>
+            <enum name="GL_TEXTURE_WRAP_T"/>
+            <enum name="GL_TEXTURE_IMMUTABLE_FORMAT"/>
+            <enum name="GL_TEXTURE"/>
+            <enum name="GL_TEXTURE0"/>
+            <enum name="GL_TEXTURE1"/>
+            <enum name="GL_TEXTURE2"/>
+            <enum name="GL_TEXTURE3"/>
+            <enum name="GL_TEXTURE4"/>
+            <enum name="GL_TEXTURE5"/>
+            <enum name="GL_TEXTURE6"/>
+            <enum name="GL_TEXTURE7"/>
+            <enum name="GL_TEXTURE8"/>
+            <enum name="GL_TEXTURE9"/>
+            <enum name="GL_TEXTURE10"/>
+            <enum name="GL_TEXTURE11"/>
+            <enum name="GL_TEXTURE12"/>
+            <enum name="GL_TEXTURE13"/>
+            <enum name="GL_TEXTURE14"/>
+            <enum name="GL_TEXTURE15"/>
+            <enum name="GL_TEXTURE16"/>
+            <enum name="GL_TEXTURE17"/>
+            <enum name="GL_TEXTURE18"/>
+            <enum name="GL_TEXTURE19"/>
+            <enum name="GL_TEXTURE20"/>
+            <enum name="GL_TEXTURE21"/>
+            <enum name="GL_TEXTURE22"/>
+            <enum name="GL_TEXTURE23"/>
+            <enum name="GL_TEXTURE24"/>
+            <enum name="GL_TEXTURE25"/>
+            <enum name="GL_TEXTURE26"/>
+            <enum name="GL_TEXTURE27"/>
+            <enum name="GL_TEXTURE28"/>
+            <enum name="GL_TEXTURE29"/>
+            <enum name="GL_TEXTURE30"/>
+            <enum name="GL_TEXTURE31"/>
+            <enum name="GL_ACTIVE_TEXTURE"/>
+            <enum name="GL_REPEAT"/>
+            <enum name="GL_CLAMP_TO_EDGE"/>
+            <enum name="GL_MIRRORED_REPEAT"/>
+            <enum name="GL_SAMPLER_2D"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_ENABLED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_SIZE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_STRIDE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_TYPE"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_POINTER"/>
+            <enum name="GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_TYPE"/>
+            <enum name="GL_IMPLEMENTATION_COLOR_READ_FORMAT"/>
+            <enum name="GL_NUM_PROGRAM_BINARY_FORMATS"/>
+            <enum name="GL_PROGRAM_BINARY_FORMATS"/>
+            <enum name="GL_LOW_FLOAT"/>
+            <enum name="GL_MEDIUM_FLOAT"/>
+            <enum name="GL_HIGH_FLOAT"/>
+            <enum name="GL_LOW_INT"/>
+            <enum name="GL_MEDIUM_INT"/>
+            <enum name="GL_HIGH_INT"/>
+            <enum name="GL_FRAMEBUFFER"/>
+            <enum name="GL_RENDERBUFFER"/>
+            <enum name="GL_R8"/>
+            <enum name="GL_RG8"/>
+            <enum name="GL_RGB8"/>
+            <enum name="GL_RGBA8"/>
+            <enum name="GL_RGBA4"/>
+            <enum name="GL_RGB5_A1"/>
+            <enum name="GL_RGB565"/>
+            <enum name="GL_DEPTH_COMPONENT16"/>
+            <enum name="GL_STENCIL_INDEX8"/>
+            <enum name="GL_RENDERBUFFER_WIDTH"/>
+            <enum name="GL_RENDERBUFFER_HEIGHT"/>
+            <enum name="GL_RENDERBUFFER_INTERNAL_FORMAT"/>
+            <enum name="GL_RENDERBUFFER_RED_SIZE"/>
+            <enum name="GL_RENDERBUFFER_GREEN_SIZE"/>
+            <enum name="GL_RENDERBUFFER_BLUE_SIZE"/>
+            <enum name="GL_RENDERBUFFER_ALPHA_SIZE"/>
+            <enum name="GL_RENDERBUFFER_DEPTH_SIZE"/>
+            <enum name="GL_RENDERBUFFER_STENCIL_SIZE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"/>
+            <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"/>
+            <enum name="GL_COLOR_ATTACHMENT0"/>
+            <enum name="GL_DEPTH_ATTACHMENT"/>
+            <enum name="GL_STENCIL_ATTACHMENT"/>
+            <enum name="GL_NONE"/>
+            <enum name="GL_FRAMEBUFFER_COMPLETE"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"/>
+            <enum name="GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS"/>
+            <enum name="GL_FRAMEBUFFER_UNSUPPORTED"/>
+            <enum name="GL_FRAMEBUFFER_UNDEFINED"/>
+            <enum name="GL_FRAMEBUFFER_BINDING"/>
+            <enum name="GL_RENDERBUFFER_BINDING"/>
+            <enum name="GL_MAX_RENDERBUFFER_SIZE"/>
+            <enum name="GL_NO_ERROR"/>
+            <enum name="GL_GUILTY_CONTEXT_RESET"/>
+            <enum name="GL_INNOCENT_CONTEXT_RESET"/>
+            <enum name="GL_UNKNOWN_CONTEXT_RESET"/>
+            <enum name="GL_CONTEXT_ROBUST_ACCESS"/>
+            <enum name="GL_RESET_NOTIFICATION_STRATEGY"/>
+            <enum name="GL_LOSE_CONTEXT_ON_RESET"/>
+            <command name="glActiveTexture"/>
+            <command name="glBindBuffer"/>
+            <command name="glBindFramebuffer"/>
+            <command name="glBindRenderbuffer"/>
+            <command name="glBindTexture"/>
+            <command name="glBlendColor"/>
+            <command name="glBlendEquation"/>
+            <command name="glBlendEquationSeparate"/>
+            <command name="glBlendFunc"/>
+            <command name="glBlendFuncSeparate"/>
+            <command name="glBufferData"/>
+            <command name="glBufferSubData"/>
+            <command name="glCheckFramebufferStatus"/>
+            <command name="glClear"/>
+            <command name="glClearColor"/>
+            <command name="glClearDepthf"/>
+            <command name="glClearStencil"/>
+            <command name="glColorMask"/>
+            <command name="glCompressedTexSubImage2D"/>
+            <command name="glCreateProgram"/>
+            <command name="glCullFace"/>
+            <command name="glDepthFunc"/>
+            <command name="glDepthMask"/>
+            <command name="glDepthRangef"/>
+            <command name="glDisable"/>
+            <command name="glDisableVertexAttribArray"/>
+            <command name="glDrawArrays"/>
+            <command name="glDrawRangeElements"/>
+            <command name="glEnable"/>
+            <command name="glEnableVertexAttribArray"/>
+            <command name="glFinish"/>
+            <command name="glFlush"/>
+            <command name="glFramebufferRenderbuffer"/>
+            <command name="glFramebufferTexture2D"/>
+            <command name="glFrontFace"/>
+            <command name="glGenBuffers"/>
+            <command name="glGenerateMipmap"/>
+            <command name="glGenFramebuffers"/>
+            <command name="glGenRenderbuffers"/>
+            <command name="glGenTextures"/>
+            <command name="glGetAttribLocation"/>
+            <command name="glGetBooleanv"/>
+            <command name="glGetBufferParameteriv"/>
+            <command name="glGetError"/>
+            <command name="glGetFloatv"/>
+            <command name="glGetFramebufferAttachmentParameteriv"/>
+            <command name="glGetGraphicsResetStatus"/>
+            <command name="glGetIntegerv"/>
+            <command name="glGetProgramiv"/>
+            <command name="glGetRenderbufferParameteriv"/>
+            <command name="glGetString"/>
+            <command name="glGetTexParameterfv"/>
+            <command name="glGetTexParameteriv"/>
+            <command name="glGetnUniformfv"/>
+            <command name="glGetnUniformiv"/>
+            <command name="glGetUniformLocation"/>
+            <command name="glGetVertexAttribfv"/>
+            <command name="glGetVertexAttribiv"/>
+            <command name="glGetVertexAttribPointerv"/>
+            <command name="glHint"/>
+            <command name="glIsEnabled"/>
+            <command name="glLineWidth"/>
+            <command name="glPixelStorei"/>
+            <command name="glPolygonOffset"/>
+            <command name="glProgramBinary"/>
+            <command name="glReadnPixels"/>
+            <command name="glRenderbufferStorage"/>
+            <command name="glSampleCoverage"/>
+            <command name="glScissor"/>
+            <command name="glStencilFunc"/>
+            <command name="glStencilFuncSeparate"/>
+            <command name="glStencilMask"/>
+            <command name="glStencilMaskSeparate"/>
+            <command name="glStencilOp"/>
+            <command name="glStencilOpSeparate"/>
+            <command name="glTexStorage2D"/>
+            <command name="glTexParameterf"/>
+            <command name="glTexParameterfv"/>
+            <command name="glTexParameteri"/>
+            <command name="glTexParameteriv"/>
+            <command name="glTexSubImage2D"/>
+            <command name="glUniform1f"/>
+            <command name="glUniform1fv"/>
+            <command name="glUniform1i"/>
+            <command name="glUniform1iv"/>
+            <command name="glUniform2f"/>
+            <command name="glUniform2fv"/>
+            <command name="glUniform2i"/>
+            <command name="glUniform2iv"/>
+            <command name="glUniform3f"/>
+            <command name="glUniform3fv"/>
+            <command name="glUniform3i"/>
+            <command name="glUniform3iv"/>
+            <command name="glUniform4f"/>
+            <command name="glUniform4fv"/>
+            <command name="glUniform4i"/>
+            <command name="glUniform4iv"/>
+            <command name="glUniformMatrix2fv"/>
+            <command name="glUniformMatrix3fv"/>
+            <command name="glUniformMatrix4fv"/>
+            <command name="glUseProgram"/>
+            <command name="glVertexAttrib1f"/>
+            <command name="glVertexAttrib1fv"/>
+            <command name="glVertexAttrib2f"/>
+            <command name="glVertexAttrib2fv"/>
+            <command name="glVertexAttrib3f"/>
+            <command name="glVertexAttrib3fv"/>
+            <command name="glVertexAttrib4f"/>
+            <command name="glVertexAttrib4fv"/>
+            <command name="glVertexAttribPointer"/>
+            <command name="glViewport"/>
+        </require>
+    </feature>
+
+    <!-- SECTION: OpenGL / OpenGL ES extension interface definitions -->
+    <extensions>
+        <extension name="GL_3DFX_multisample" supported="gl">
+            <require>
+                <enum name="GL_MULTISAMPLE_3DFX"/>
+                <enum name="GL_SAMPLE_BUFFERS_3DFX"/>
+                <enum name="GL_SAMPLES_3DFX"/>
+                <enum name="GL_MULTISAMPLE_BIT_3DFX"/>
+            </require>
+        </extension>
+        <extension name="GL_3DFX_tbuffer" supported="gl">
+            <require>
+                <command name="glTbufferMask3DFX"/>
+            </require>
+        </extension>
+        <extension name="GL_3DFX_texture_compression_FXT1" supported="gl">
+            <require>
+                <enum name="GL_COMPRESSED_RGB_FXT1_3DFX"/>
+                <enum name="GL_COMPRESSED_RGBA_FXT1_3DFX"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_blend_minmax_factor" supported="gl">
+            <require>
+                <enum name="GL_FACTOR_MIN_AMD"/>
+                <enum name="GL_FACTOR_MAX_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_compressed_3DC_texture" supported="gles1|gles2">
+            <require>
+                <enum name="GL_3DC_X_AMD"/>
+                <enum name="GL_3DC_XY_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_compressed_ATC_texture" supported="gles1|gles2">
+            <require>
+                <enum name="GL_ATC_RGB_AMD"/>
+                <enum name="GL_ATC_RGBA_EXPLICIT_ALPHA_AMD"/>
+                <enum name="GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_conservative_depth" supported="gl"/>
+        <extension name="GL_AMD_debug_output" supported="gl">
+            <require>
+                <enum name="GL_MAX_DEBUG_MESSAGE_LENGTH_AMD"/>
+                <enum name="GL_MAX_DEBUG_LOGGED_MESSAGES_AMD"/>
+                <enum name="GL_DEBUG_LOGGED_MESSAGES_AMD"/>
+                <enum name="GL_DEBUG_SEVERITY_HIGH_AMD"/>
+                <enum name="GL_DEBUG_SEVERITY_MEDIUM_AMD"/>
+                <enum name="GL_DEBUG_SEVERITY_LOW_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_API_ERROR_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_DEPRECATION_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_PERFORMANCE_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_APPLICATION_AMD"/>
+                <enum name="GL_DEBUG_CATEGORY_OTHER_AMD"/>
+                <command name="glDebugMessageEnableAMD"/>
+                <command name="glDebugMessageInsertAMD"/>
+                <command name="glDebugMessageCallbackAMD"/>
+                <command name="glGetDebugMessageLogAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_depth_clamp_separate" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_CLAMP_NEAR_AMD"/>
+                <enum name="GL_DEPTH_CLAMP_FAR_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_draw_buffers_blend" supported="gl">
+            <require>
+                <command name="glBlendFuncIndexedAMD"/>
+                <command name="glBlendFuncSeparateIndexedAMD"/>
+                <command name="glBlendEquationIndexedAMD"/>
+                <command name="glBlendEquationSeparateIndexedAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_framebuffer_multisample_advanced" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_RENDERBUFFER_STORAGE_SAMPLES_AMD"/>
+                <enum name="GL_MAX_COLOR_FRAMEBUFFER_SAMPLES_AMD"/>
+                <enum name="GL_MAX_COLOR_FRAMEBUFFER_STORAGE_SAMPLES_AMD"/>
+                <enum name="GL_MAX_DEPTH_STENCIL_FRAMEBUFFER_SAMPLES_AMD"/>
+                <enum name="GL_NUM_SUPPORTED_MULTISAMPLE_MODES_AMD"/>
+                <enum name="GL_SUPPORTED_MULTISAMPLE_MODES_AMD"/>
+                <command name="glRenderbufferStorageMultisampleAdvancedAMD"/>
+                <command name="glNamedRenderbufferStorageMultisampleAdvancedAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_framebuffer_sample_positions" supported="gl">
+            <require>
+                <enum name="GL_SUBSAMPLE_DISTANCE_AMD"/>
+                <enum name="GL_PIXELS_PER_SAMPLE_PATTERN_X_AMD"/>
+                <enum name="GL_PIXELS_PER_SAMPLE_PATTERN_Y_AMD"/>
+                <enum name="GL_ALL_PIXELS_AMD"/>
+                <command name="glFramebufferSamplePositionsfvAMD"/>
+                <command name="glNamedFramebufferSamplePositionsfvAMD"/>
+                <command name="glGetFramebufferParameterfvAMD"/>
+                <command name="glGetNamedFramebufferParameterfvAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_gcn_shader" supported="gl"/>
+        <extension name="GL_AMD_gpu_shader_half_float" supported="gl">
+            <require>
+                <enum name="GL_FLOAT16_NV"/>
+                <enum name="GL_FLOAT16_VEC2_NV"/>
+                <enum name="GL_FLOAT16_VEC3_NV"/>
+                <enum name="GL_FLOAT16_VEC4_NV"/>
+                <enum name="GL_FLOAT16_MAT2_AMD"/>
+                <enum name="GL_FLOAT16_MAT3_AMD"/>
+                <enum name="GL_FLOAT16_MAT4_AMD"/>
+                <enum name="GL_FLOAT16_MAT2x3_AMD"/>
+                <enum name="GL_FLOAT16_MAT2x4_AMD"/>
+                <enum name="GL_FLOAT16_MAT3x2_AMD"/>
+                <enum name="GL_FLOAT16_MAT3x4_AMD"/>
+                <enum name="GL_FLOAT16_MAT4x2_AMD"/>
+                <enum name="GL_FLOAT16_MAT4x3_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_gpu_shader_int16" supported="gl"/>
+        <extension name="GL_AMD_gpu_shader_int64" supported="gl">
+            <require>
+                <enum name="GL_INT64_NV"/>
+                <enum name="GL_UNSIGNED_INT64_NV"/>
+                <enum name="GL_INT8_NV"/>
+                <enum name="GL_INT8_VEC2_NV"/>
+                <enum name="GL_INT8_VEC3_NV"/>
+                <enum name="GL_INT8_VEC4_NV"/>
+                <enum name="GL_INT16_NV"/>
+                <enum name="GL_INT16_VEC2_NV"/>
+                <enum name="GL_INT16_VEC3_NV"/>
+                <enum name="GL_INT16_VEC4_NV"/>
+                <enum name="GL_INT64_VEC2_NV"/>
+                <enum name="GL_INT64_VEC3_NV"/>
+                <enum name="GL_INT64_VEC4_NV"/>
+                <enum name="GL_UNSIGNED_INT8_NV"/>
+                <enum name="GL_UNSIGNED_INT8_VEC2_NV"/>
+                <enum name="GL_UNSIGNED_INT8_VEC3_NV"/>
+                <enum name="GL_UNSIGNED_INT8_VEC4_NV"/>
+                <enum name="GL_UNSIGNED_INT16_NV"/>
+                <enum name="GL_UNSIGNED_INT16_VEC2_NV"/>
+                <enum name="GL_UNSIGNED_INT16_VEC3_NV"/>
+                <enum name="GL_UNSIGNED_INT16_VEC4_NV"/>
+                <enum name="GL_UNSIGNED_INT64_VEC2_NV"/>
+                <enum name="GL_UNSIGNED_INT64_VEC3_NV"/>
+                <enum name="GL_UNSIGNED_INT64_VEC4_NV"/>
+                <enum name="GL_FLOAT16_NV"/>
+                <enum name="GL_FLOAT16_VEC2_NV"/>
+                <enum name="GL_FLOAT16_VEC3_NV"/>
+                <enum name="GL_FLOAT16_VEC4_NV"/>
+                <command name="glUniform1i64NV"/>
+                <command name="glUniform2i64NV"/>
+                <command name="glUniform3i64NV"/>
+                <command name="glUniform4i64NV"/>
+                <command name="glUniform1i64vNV"/>
+                <command name="glUniform2i64vNV"/>
+                <command name="glUniform3i64vNV"/>
+                <command name="glUniform4i64vNV"/>
+                <command name="glUniform1ui64NV"/>
+                <command name="glUniform2ui64NV"/>
+                <command name="glUniform3ui64NV"/>
+                <command name="glUniform4ui64NV"/>
+                <command name="glUniform1ui64vNV"/>
+                <command name="glUniform2ui64vNV"/>
+                <command name="glUniform3ui64vNV"/>
+                <command name="glUniform4ui64vNV"/>
+                <command name="glGetUniformi64vNV"/>
+                <command name="glGetUniformui64vNV"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glProgramUniform1i64NV"/>
+                <command name="glProgramUniform2i64NV"/>
+                <command name="glProgramUniform3i64NV"/>
+                <command name="glProgramUniform4i64NV"/>
+                <command name="glProgramUniform1i64vNV"/>
+                <command name="glProgramUniform2i64vNV"/>
+                <command name="glProgramUniform3i64vNV"/>
+                <command name="glProgramUniform4i64vNV"/>
+                <command name="glProgramUniform1ui64NV"/>
+                <command name="glProgramUniform2ui64NV"/>
+                <command name="glProgramUniform3ui64NV"/>
+                <command name="glProgramUniform4ui64NV"/>
+                <command name="glProgramUniform1ui64vNV"/>
+                <command name="glProgramUniform2ui64vNV"/>
+                <command name="glProgramUniform3ui64vNV"/>
+                <command name="glProgramUniform4ui64vNV"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_interleaved_elements" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ELEMENT_SWIZZLE_AMD"/>
+                <enum name="GL_VERTEX_ID_SWIZZLE_AMD"/>
+                <enum name="GL_RED"/>
+                <enum name="GL_GREEN"/>
+                <enum name="GL_BLUE"/>
+                <enum name="GL_ALPHA"/>
+                <enum name="GL_RG8UI"/>
+                <enum name="GL_RG16UI"/>
+                <enum name="GL_RGBA8UI"/>
+                <command name="glVertexAttribParameteriAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_multi_draw_indirect" supported="gl">
+            <require>
+                <command name="glMultiDrawArraysIndirectAMD"/>
+                <command name="glMultiDrawElementsIndirectAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_name_gen_delete" supported="gl">
+            <require>
+                <enum name="GL_DATA_BUFFER_AMD"/>
+                <enum name="GL_PERFORMANCE_MONITOR_AMD"/>
+                <enum name="GL_QUERY_OBJECT_AMD"/>
+                <enum name="GL_VERTEX_ARRAY_OBJECT_AMD"/>
+                <enum name="GL_SAMPLER_OBJECT_AMD"/>
+                <command name="glGenNamesAMD"/>
+                <command name="glDeleteNamesAMD"/>
+                <command name="glIsNameAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_occlusion_query_event" supported="gl">
+            <require>
+                <enum name="GL_OCCLUSION_QUERY_EVENT_MASK_AMD"/>
+                <enum name="GL_QUERY_DEPTH_PASS_EVENT_BIT_AMD"/>
+                <enum name="GL_QUERY_DEPTH_FAIL_EVENT_BIT_AMD"/>
+                <enum name="GL_QUERY_STENCIL_FAIL_EVENT_BIT_AMD"/>
+                <enum name="GL_QUERY_DEPTH_BOUNDS_FAIL_EVENT_BIT_AMD"/>
+                <enum name="GL_QUERY_ALL_EVENT_BITS_AMD"/>
+                <command name="glQueryObjectParameteruiAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_performance_monitor" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_COUNTER_TYPE_AMD"/>
+                <enum name="GL_COUNTER_RANGE_AMD"/>
+                <enum name="GL_UNSIGNED_INT64_AMD"/>
+                <enum name="GL_PERCENTAGE_AMD"/>
+                <enum name="GL_PERFMON_RESULT_AVAILABLE_AMD"/>
+                <enum name="GL_PERFMON_RESULT_SIZE_AMD"/>
+                <enum name="GL_PERFMON_RESULT_AMD"/>
+                <command name="glGetPerfMonitorGroupsAMD"/>
+                <command name="glGetPerfMonitorCountersAMD"/>
+                <command name="glGetPerfMonitorGroupStringAMD"/>
+                <command name="glGetPerfMonitorCounterStringAMD"/>
+                <command name="glGetPerfMonitorCounterInfoAMD"/>
+                <command name="glGenPerfMonitorsAMD"/>
+                <command name="glDeletePerfMonitorsAMD"/>
+                <command name="glSelectPerfMonitorCountersAMD"/>
+                <command name="glBeginPerfMonitorAMD"/>
+                <command name="glEndPerfMonitorAMD"/>
+                <command name="glGetPerfMonitorCounterDataAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_pinned_memory" supported="gl">
+            <require>
+                <enum name="GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_program_binary_Z400" supported="gles2">
+            <require>
+                <enum name="GL_Z400_BINARY_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_query_buffer_object" supported="gl">
+            <require>
+                <enum name="GL_QUERY_BUFFER_AMD"/>
+                <enum name="GL_QUERY_BUFFER_BINDING_AMD"/>
+                <enum name="GL_QUERY_RESULT_NO_WAIT_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_sample_positions" supported="gl">
+            <require>
+                <enum name="GL_SUBSAMPLE_DISTANCE_AMD"/>
+                <command name="glSetMultisamplefvAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_seamless_cubemap_per_texture" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_CUBE_MAP_SEAMLESS"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_shader_atomic_counter_ops" supported="gl"/>
+        <extension name="GL_AMD_shader_ballot" supported="gl"/>
+        <extension name="GL_AMD_shader_gpu_shader_half_float_fetch" supported="gl"/>
+        <extension name="GL_AMD_shader_image_load_store_lod" supported="gl"/>
+        <extension name="GL_AMD_shader_stencil_export" supported="gl"/>
+        <extension name="GL_AMD_shader_trinary_minmax" supported="gl"/>
+        <extension name="GL_AMD_shader_explicit_vertex_parameter" supported="gl"/>
+        <extension name="GL_AMD_sparse_texture" supported="gl">
+            <require>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_X_AMD"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_Y_AMD"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_Z_AMD"/>
+                <enum name="GL_MAX_SPARSE_TEXTURE_SIZE_AMD"/>
+                <enum name="GL_MAX_SPARSE_3D_TEXTURE_SIZE_AMD"/>
+                <enum name="GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS" comment="Should have an AMD suffix, but probably too late now"/>
+                <enum name="GL_MIN_SPARSE_LEVEL_AMD"/>
+                <enum name="GL_MIN_LOD_WARNING_AMD"/>
+                <enum name="GL_TEXTURE_STORAGE_SPARSE_BIT_AMD"/>
+                <command name="glTexStorageSparseAMD"/>
+                <command name="glTextureStorageSparseAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_stencil_operation_extended" supported="gl">
+            <require>
+                <enum name="GL_SET_AMD"/>
+                <enum name="GL_REPLACE_VALUE_AMD"/>
+                <enum name="GL_STENCIL_OP_VALUE_AMD"/>
+                <enum name="GL_STENCIL_BACK_OP_VALUE_AMD"/>
+                <command name="glStencilOpValueAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_texture_gather_bias_lod" supported="gl"/>
+        <extension name="GL_AMD_texture_texture4" supported="gl"/>
+        <extension name="GL_AMD_transform_feedback3_lines_triangles" supported="gl"/>
+        <extension name="GL_AMD_transform_feedback4" supported="gl">
+            <require>
+                <enum name="GL_STREAM_RASTERIZATION_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_vertex_shader_layer" supported="gl"/>
+        <extension name="GL_AMD_vertex_shader_tessellator" supported="gl">
+            <require>
+                <enum name="GL_SAMPLER_BUFFER_AMD"/>
+                <enum name="GL_INT_SAMPLER_BUFFER_AMD"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_BUFFER_AMD"/>
+                <enum name="GL_TESSELLATION_MODE_AMD"/>
+                <enum name="GL_TESSELLATION_FACTOR_AMD"/>
+                <enum name="GL_DISCRETE_AMD"/>
+                <enum name="GL_CONTINUOUS_AMD"/>
+                <command name="glTessellationFactorAMD"/>
+                <command name="glTessellationModeAMD"/>
+            </require>
+        </extension>
+        <extension name="GL_AMD_vertex_shader_viewport_index" supported="gl"/>
+        <extension name="GL_ANDROID_extension_pack_es31a" supported="gles2">
+            <require comment="This is an alias for the following extensions. At present gl.xml doesn't actually replicate all their interfaces here.">
+                <!--
+                    KHR_debug
+                    KHR_texture_compression_astc_ldr
+                    KHR_blend_equation_advanced
+                    OES_sample_shading
+                    OES_sample_variables
+                    OES_shader_image_atomic
+                    OES_shader_multisample_interpolation
+                    OES_texture_stencil8
+                    OES_texture_storage_multisample_2d_array
+                    EXT_copy_image
+                    EXT_draw_buffers_indexed
+                    EXT_geometry_shader
+                    EXT_gpu_shader5
+                    EXT_primitive_bounding_box
+                    EXT_shader_io_blocks
+                    EXT_tessellation_shader
+                    EXT_texture_border_clamp
+                    EXT_texture_buffer
+                    EXT_texture_cube_map_array
+                    EXT_texture_srgb_decode
+                -->
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_depth_texture" supported="gles2">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT"/>
+                <enum name="GL_DEPTH_STENCIL_OES"/>
+                <enum name="GL_UNSIGNED_SHORT"/>
+                <enum name="GL_UNSIGNED_INT"/>
+                <enum name="GL_UNSIGNED_INT_24_8_OES"/>
+                <enum name="GL_DEPTH_COMPONENT16"/>
+                <enum name="GL_DEPTH_COMPONENT32_OES"/>
+                <enum name="GL_DEPTH24_STENCIL8_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_framebuffer_blit" supported="gles2">
+            <require>
+                <enum name="GL_READ_FRAMEBUFFER_ANGLE"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_ANGLE"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_BINDING_ANGLE"/>
+                <enum name="GL_READ_FRAMEBUFFER_BINDING_ANGLE"/>
+                <command name="glBlitFramebufferANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_framebuffer_multisample" supported="gles2">
+            <require>
+                <enum name="GL_RENDERBUFFER_SAMPLES_ANGLE"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_ANGLE"/>
+                <enum name="GL_MAX_SAMPLES_ANGLE"/>
+                <command name="glRenderbufferStorageMultisampleANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_instanced_arrays" supported="gles2">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE"/>
+                <command name="glDrawArraysInstancedANGLE"/>
+                <command name="glDrawElementsInstancedANGLE"/>
+                <command name="glVertexAttribDivisorANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_pack_reverse_row_order" supported="gles2">
+            <require>
+                <enum name="GL_PACK_REVERSE_ROW_ORDER_ANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_program_binary" supported="gles2">
+            <require>
+                <enum name="GL_PROGRAM_BINARY_ANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_texture_compression_dxt3" supported="gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_S3TC_DXT3_ANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_texture_compression_dxt5" supported="gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_S3TC_DXT5_ANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_texture_usage" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_USAGE_ANGLE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_ANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ANGLE_translated_shader_source" supported="gles2">
+            <require>
+                <enum name="GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE"/>
+                <command name="glGetTranslatedShaderSourceANGLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_aux_depth_stencil" supported="gl">
+            <require>
+                <enum name="GL_AUX_DEPTH_STENCIL_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_client_storage" supported="gl">
+            <require>
+                <enum name="GL_UNPACK_CLIENT_STORAGE_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_clip_distance" supported="gles2">
+            <require>
+                <enum name="GL_MAX_CLIP_DISTANCES_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE0_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE1_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE2_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE3_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE4_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE5_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE6_APPLE"/>
+                <enum name="GL_CLIP_DISTANCE7_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_color_buffer_packed_float" supported="gles2"/>
+        <extension name="GL_APPLE_copy_texture_levels" supported="gles1|gles2">
+            <require>
+                <command name="glCopyTextureLevelsAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_element_array" supported="gl">
+            <require>
+                <enum name="GL_ELEMENT_ARRAY_APPLE"/>
+                <enum name="GL_ELEMENT_ARRAY_TYPE_APPLE"/>
+                <enum name="GL_ELEMENT_ARRAY_POINTER_APPLE"/>
+                <command name="glElementPointerAPPLE"/>
+                <command name="glDrawElementArrayAPPLE"/>
+                <command name="glDrawRangeElementArrayAPPLE"/>
+                <command name="glMultiDrawElementArrayAPPLE"/>
+                <command name="glMultiDrawRangeElementArrayAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_fence" supported="gl">
+            <require>
+                <enum name="GL_DRAW_PIXELS_APPLE"/>
+                <enum name="GL_FENCE_APPLE"/>
+                <command name="glGenFencesAPPLE"/>
+                <command name="glDeleteFencesAPPLE"/>
+                <command name="glSetFenceAPPLE"/>
+                <command name="glIsFenceAPPLE"/>
+                <command name="glTestFenceAPPLE"/>
+                <command name="glFinishFenceAPPLE"/>
+                <command name="glTestObjectAPPLE"/>
+                <command name="glFinishObjectAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_float_pixels" supported="gl">
+            <require>
+                <enum name="GL_HALF_APPLE"/>
+                <enum name="GL_RGBA_FLOAT32_APPLE"/>
+                <enum name="GL_RGB_FLOAT32_APPLE"/>
+                <enum name="GL_ALPHA_FLOAT32_APPLE"/>
+                <enum name="GL_INTENSITY_FLOAT32_APPLE"/>
+                <enum name="GL_LUMINANCE_FLOAT32_APPLE"/>
+                <enum name="GL_LUMINANCE_ALPHA_FLOAT32_APPLE"/>
+                <enum name="GL_RGBA_FLOAT16_APPLE"/>
+                <enum name="GL_RGB_FLOAT16_APPLE"/>
+                <enum name="GL_ALPHA_FLOAT16_APPLE"/>
+                <enum name="GL_INTENSITY_FLOAT16_APPLE"/>
+                <enum name="GL_LUMINANCE_FLOAT16_APPLE"/>
+                <enum name="GL_LUMINANCE_ALPHA_FLOAT16_APPLE"/>
+                <enum name="GL_COLOR_FLOAT_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_flush_buffer_range" supported="gl">
+            <require>
+                <enum name="GL_BUFFER_SERIALIZED_MODIFY_APPLE"/>
+                <enum name="GL_BUFFER_FLUSHING_UNMAP_APPLE"/>
+                <command name="glBufferParameteriAPPLE"/>
+                <command name="glFlushMappedBufferRangeAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_framebuffer_multisample" supported="gles1|gles2">
+            <require>
+                <enum name="GL_RENDERBUFFER_SAMPLES_APPLE"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE"/>
+                <enum name="GL_MAX_SAMPLES_APPLE"/>
+                <enum name="GL_READ_FRAMEBUFFER_APPLE"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_APPLE"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_BINDING_APPLE"/>
+                <enum name="GL_READ_FRAMEBUFFER_BINDING_APPLE"/>
+                <command name="glRenderbufferStorageMultisampleAPPLE"/>
+                <command name="glResolveMultisampleFramebufferAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_object_purgeable" supported="gl">
+            <require>
+                <enum name="GL_BUFFER_OBJECT_APPLE"/>
+                <enum name="GL_RELEASED_APPLE"/>
+                <enum name="GL_VOLATILE_APPLE"/>
+                <enum name="GL_RETAINED_APPLE"/>
+                <enum name="GL_UNDEFINED_APPLE"/>
+                <enum name="GL_PURGEABLE_APPLE"/>
+                <command name="glObjectPurgeableAPPLE"/>
+                <command name="glObjectUnpurgeableAPPLE"/>
+                <command name="glGetObjectParameterivAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_rgb_422" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_RGB_422_APPLE"/>
+                <enum name="GL_UNSIGNED_SHORT_8_8_APPLE"/>
+                <enum name="GL_UNSIGNED_SHORT_8_8_REV_APPLE"/>
+            </require>
+            <require comment="Depends on TexStorage* (EXT_texture_storage / ES 3.0 / GL 4.4 / etc.)">
+                <enum name="GL_RGB_RAW_422_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_row_bytes" supported="gl">
+            <require>
+                <enum name="GL_PACK_ROW_BYTES_APPLE"/>
+                <enum name="GL_UNPACK_ROW_BYTES_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_specular_vector" supported="gl">
+            <require>
+                <enum name="GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_sync" supported="gles1|gles2">
+            <require>
+                <enum name="GL_SYNC_OBJECT_APPLE"/>
+                <enum name="GL_MAX_SERVER_WAIT_TIMEOUT_APPLE"/>
+                <enum name="GL_OBJECT_TYPE_APPLE"/>
+                <enum name="GL_SYNC_CONDITION_APPLE"/>
+                <enum name="GL_SYNC_STATUS_APPLE"/>
+                <enum name="GL_SYNC_FLAGS_APPLE"/>
+                <enum name="GL_SYNC_FENCE_APPLE"/>
+                <enum name="GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE"/>
+                <enum name="GL_UNSIGNALED_APPLE"/>
+                <enum name="GL_SIGNALED_APPLE"/>
+                <enum name="GL_ALREADY_SIGNALED_APPLE"/>
+                <enum name="GL_TIMEOUT_EXPIRED_APPLE"/>
+                <enum name="GL_CONDITION_SATISFIED_APPLE"/>
+                <enum name="GL_WAIT_FAILED_APPLE"/>
+                <enum name="GL_SYNC_FLUSH_COMMANDS_BIT_APPLE"/>
+                <enum name="GL_TIMEOUT_IGNORED_APPLE"/>
+                <command name="glFenceSyncAPPLE"/>
+                <command name="glIsSyncAPPLE"/>
+                <command name="glDeleteSyncAPPLE"/>
+                <command name="glClientWaitSyncAPPLE"/>
+                <command name="glWaitSyncAPPLE"/>
+                <command name="glGetInteger64vAPPLE"/>
+                <command name="glGetSyncivAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_texture_2D_limited_npot" supported="gles1"/>
+        <extension name="GL_APPLE_texture_format_BGRA8888" supported="gles1|gles2">
+            <require>
+                <enum name="GL_BGRA_EXT"/>
+            </require>
+            <require comment="Depends on TexStorage* (EXT_texture_storage / ES 3.0 / GL 4.4 / etc.)">
+                <enum name="GL_BGRA8_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_texture_max_level" supported="gles1|gles2">
+            <require>
+                <enum name="GL_TEXTURE_MAX_LEVEL_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_texture_packed_float" supported="gles2">
+            <require>
+                <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV_APPLE"/>
+                <enum name="GL_UNSIGNED_INT_5_9_9_9_REV_APPLE"/>
+                <enum name="GL_R11F_G11F_B10F_APPLE"/>
+                <enum name="GL_RGB9_E5_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_texture_range" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_RANGE_LENGTH_APPLE"/>
+                <enum name="GL_TEXTURE_RANGE_POINTER_APPLE"/>
+                <enum name="GL_TEXTURE_STORAGE_HINT_APPLE"/>
+                <enum name="GL_STORAGE_PRIVATE_APPLE"/>
+                <enum name="GL_STORAGE_CACHED_APPLE"/>
+                <enum name="GL_STORAGE_SHARED_APPLE"/>
+                <command name="glTextureRangeAPPLE"/>
+                <command name="glGetTexParameterPointervAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_transform_hint" supported="gl">
+            <require>
+                <enum name="GL_TRANSFORM_HINT_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_vertex_array_object" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_BINDING_APPLE"/>
+                <command name="glBindVertexArrayAPPLE"/>
+                <command name="glDeleteVertexArraysAPPLE"/>
+                <command name="glGenVertexArraysAPPLE"/>
+                <command name="glIsVertexArrayAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_vertex_array_range" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_RANGE_APPLE"/>
+                <enum name="GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE"/>
+                <enum name="GL_VERTEX_ARRAY_STORAGE_HINT_APPLE"/>
+                <enum name="GL_VERTEX_ARRAY_RANGE_POINTER_APPLE"/>
+                <enum name="GL_STORAGE_CLIENT_APPLE"/>
+                <enum name="GL_STORAGE_CACHED_APPLE"/>
+                <enum name="GL_STORAGE_SHARED_APPLE"/>
+                <command name="glVertexArrayRangeAPPLE"/>
+                <command name="glFlushVertexArrayRangeAPPLE"/>
+                <command name="glVertexArrayParameteriAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_vertex_program_evaluators" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_MAP1_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP2_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP1_SIZE_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP1_COEFF_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP1_ORDER_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP1_DOMAIN_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP2_SIZE_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP2_COEFF_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP2_ORDER_APPLE"/>
+                <enum name="GL_VERTEX_ATTRIB_MAP2_DOMAIN_APPLE"/>
+                <command name="glEnableVertexAttribAPPLE"/>
+                <command name="glDisableVertexAttribAPPLE"/>
+                <command name="glIsVertexAttribEnabledAPPLE"/>
+                <command name="glMapVertexAttrib1dAPPLE"/>
+                <command name="glMapVertexAttrib1fAPPLE"/>
+                <command name="glMapVertexAttrib2dAPPLE"/>
+                <command name="glMapVertexAttrib2fAPPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_APPLE_ycbcr_422" supported="gl">
+            <require>
+                <enum name="GL_YCBCR_422_APPLE"/>
+                <enum name="GL_UNSIGNED_SHORT_8_8_APPLE"/>
+                <enum name="GL_UNSIGNED_SHORT_8_8_REV_APPLE"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_ES2_compatibility" supported="gl|glcore">
+            <require>
+                <enum name="GL_FIXED"/>
+                <enum name="GL_IMPLEMENTATION_COLOR_READ_TYPE"/>
+                <enum name="GL_IMPLEMENTATION_COLOR_READ_FORMAT"/>
+                <enum name="GL_LOW_FLOAT"/>
+                <enum name="GL_MEDIUM_FLOAT"/>
+                <enum name="GL_HIGH_FLOAT"/>
+                <enum name="GL_LOW_INT"/>
+                <enum name="GL_MEDIUM_INT"/>
+                <enum name="GL_HIGH_INT"/>
+                <enum name="GL_SHADER_COMPILER"/>
+                <enum name="GL_SHADER_BINARY_FORMATS"/>
+                <enum name="GL_NUM_SHADER_BINARY_FORMATS"/>
+                <enum name="GL_MAX_VERTEX_UNIFORM_VECTORS"/>
+                <enum name="GL_MAX_VARYING_VECTORS"/>
+                <enum name="GL_MAX_FRAGMENT_UNIFORM_VECTORS"/>
+                <enum name="GL_RGB565"/>
+                <command name="glReleaseShaderCompiler"/>
+                <command name="glShaderBinary"/>
+                <command name="glGetShaderPrecisionFormat"/>
+                <command name="glDepthRangef"/>
+                <command name="glClearDepthf"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_ES3_1_compatibility" supported="gl|glcore">
+            <require>
+                <enum name="GL_BACK"/>
+                <command name="glMemoryBarrierByRegion"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_ES3_2_compatibility" supported="gl|glcore">
+            <require>
+                <enum name="GL_PRIMITIVE_BOUNDING_BOX_ARB"/>
+                <enum name="GL_MULTISAMPLE_LINE_WIDTH_RANGE_ARB"/>
+                <enum name="GL_MULTISAMPLE_LINE_WIDTH_GRANULARITY_ARB"/>
+                <command name="glPrimitiveBoundingBoxARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_ES3_compatibility" supported="gl|glcore">
+            <require>
+                <enum name="GL_COMPRESSED_RGB8_ETC2"/>
+                <enum name="GL_COMPRESSED_SRGB8_ETC2"/>
+                <enum name="GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+                <enum name="GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2"/>
+                <enum name="GL_COMPRESSED_RGBA8_ETC2_EAC"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC"/>
+                <enum name="GL_COMPRESSED_R11_EAC"/>
+                <enum name="GL_COMPRESSED_SIGNED_R11_EAC"/>
+                <enum name="GL_COMPRESSED_RG11_EAC"/>
+                <enum name="GL_COMPRESSED_SIGNED_RG11_EAC"/>
+                <enum name="GL_PRIMITIVE_RESTART_FIXED_INDEX"/>
+                <enum name="GL_ANY_SAMPLES_PASSED_CONSERVATIVE"/>
+                <enum name="GL_MAX_ELEMENT_INDEX"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_arrays_of_arrays" supported="gl|glcore"/>
+        <extension name="GL_ARB_base_instance" supported="gl|glcore">
+            <require>
+                <command name="glDrawArraysInstancedBaseInstance"/>
+                <command name="glDrawElementsInstancedBaseInstance"/>
+                <command name="glDrawElementsInstancedBaseVertexBaseInstance"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_bindless_texture" supported="gl|glcore">
+            <require>
+                <enum name="GL_UNSIGNED_INT64_ARB"/>
+                <command name="glGetTextureHandleARB"/>
+                <command name="glGetTextureSamplerHandleARB"/>
+                <command name="glMakeTextureHandleResidentARB"/>
+                <command name="glMakeTextureHandleNonResidentARB"/>
+                <command name="glGetImageHandleARB"/>
+                <command name="glMakeImageHandleResidentARB"/>
+                <command name="glMakeImageHandleNonResidentARB"/>
+                <command name="glUniformHandleui64ARB"/>
+                <command name="glUniformHandleui64vARB"/>
+                <command name="glProgramUniformHandleui64ARB"/>
+                <command name="glProgramUniformHandleui64vARB"/>
+                <command name="glIsTextureHandleResidentARB"/>
+                <command name="glIsImageHandleResidentARB"/>
+                <command name="glVertexAttribL1ui64ARB"/>
+                <command name="glVertexAttribL1ui64vARB"/>
+                <command name="glGetVertexAttribLui64vARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_blend_func_extended" supported="gl|glcore">
+            <require>
+                <enum name="GL_SRC1_COLOR"/>
+                <enum name="GL_SRC1_ALPHA"/>
+                <enum name="GL_ONE_MINUS_SRC1_COLOR"/>
+                <enum name="GL_ONE_MINUS_SRC1_ALPHA"/>
+                <enum name="GL_MAX_DUAL_SOURCE_DRAW_BUFFERS"/>
+                <command name="glBindFragDataLocationIndexed"/>
+                <command name="glGetFragDataIndex"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_buffer_storage" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAP_READ_BIT"/>
+                <enum name="GL_MAP_WRITE_BIT"/>
+                <enum name="GL_MAP_PERSISTENT_BIT"/>
+                <enum name="GL_MAP_COHERENT_BIT"/>
+                <enum name="GL_DYNAMIC_STORAGE_BIT"/>
+                <enum name="GL_CLIENT_STORAGE_BIT"/>
+                <enum name="GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT"/>
+                <enum name="GL_BUFFER_IMMUTABLE_STORAGE"/>
+                <enum name="GL_BUFFER_STORAGE_FLAGS"/>
+                <command name="glBufferStorage"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_cl_event" supported="gl|glcore">
+            <require>
+                <enum name="GL_SYNC_CL_EVENT_ARB"/>
+                <enum name="GL_SYNC_CL_EVENT_COMPLETE_ARB"/>
+                <command name="glCreateSyncFromCLeventARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_clear_buffer_object" supported="gl|glcore">
+            <require>
+                <command name="glClearBufferData"/>
+                <command name="glClearBufferSubData"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_clear_texture" supported="gl|glcore">
+            <require>
+                <enum name="GL_CLEAR_TEXTURE"/>
+                <command name="glClearTexImage"/>
+                <command name="glClearTexSubImage"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_clip_control" supported="gl|glcore">
+            <require>
+                <command name="glClipControl"/>
+                <enum name="GL_LOWER_LEFT"/>
+                <enum name="GL_UPPER_LEFT"/>
+                <enum name="GL_NEGATIVE_ONE_TO_ONE"/>
+                <enum name="GL_ZERO_TO_ONE"/>
+                <enum name="GL_CLIP_ORIGIN"/>
+                <enum name="GL_CLIP_DEPTH_MODE"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_color_buffer_float" supported="gl">
+            <require>
+                <enum name="GL_RGBA_FLOAT_MODE_ARB"/>
+                <enum name="GL_CLAMP_VERTEX_COLOR_ARB"/>
+                <enum name="GL_CLAMP_FRAGMENT_COLOR_ARB"/>
+                <enum name="GL_CLAMP_READ_COLOR_ARB"/>
+                <enum name="GL_FIXED_ONLY_ARB"/>
+                <command name="glClampColorARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_compatibility" supported="gl">
+            <require comment="Defines features from OpenGL 3.0 that were removed in OpenGL 3.1 - not enumerated here yet">
+            </require>
+        </extension>
+        <extension name="GL_ARB_compressed_texture_pixel_storage" supported="gl|glcore">
+            <require>
+                <enum name="GL_UNPACK_COMPRESSED_BLOCK_WIDTH"/>
+                <enum name="GL_UNPACK_COMPRESSED_BLOCK_HEIGHT"/>
+                <enum name="GL_UNPACK_COMPRESSED_BLOCK_DEPTH"/>
+                <enum name="GL_UNPACK_COMPRESSED_BLOCK_SIZE"/>
+                <enum name="GL_PACK_COMPRESSED_BLOCK_WIDTH"/>
+                <enum name="GL_PACK_COMPRESSED_BLOCK_HEIGHT"/>
+                <enum name="GL_PACK_COMPRESSED_BLOCK_DEPTH"/>
+                <enum name="GL_PACK_COMPRESSED_BLOCK_SIZE"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_compute_shader" supported="gl|glcore">
+            <require>
+                <enum name="GL_COMPUTE_SHADER"/>
+                <enum name="GL_MAX_COMPUTE_UNIFORM_BLOCKS"/>
+                <enum name="GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS"/>
+                <enum name="GL_MAX_COMPUTE_IMAGE_UNIFORMS"/>
+                <enum name="GL_MAX_COMPUTE_SHARED_MEMORY_SIZE"/>
+                <enum name="GL_MAX_COMPUTE_UNIFORM_COMPONENTS"/>
+                <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_MAX_COMPUTE_ATOMIC_COUNTERS"/>
+                <enum name="GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS"/>
+                <enum name="GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS"/>
+                <enum name="GL_MAX_COMPUTE_WORK_GROUP_COUNT"/>
+                <enum name="GL_MAX_COMPUTE_WORK_GROUP_SIZE"/>
+                <enum name="GL_COMPUTE_WORK_GROUP_SIZE"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER"/>
+                <enum name="GL_DISPATCH_INDIRECT_BUFFER"/>
+                <enum name="GL_DISPATCH_INDIRECT_BUFFER_BINDING"/>
+                <enum name="GL_COMPUTE_SHADER_BIT"/>
+                <command name="glDispatchCompute"/>
+                <command name="glDispatchComputeIndirect"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_compute_variable_group_size" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAX_COMPUTE_VARIABLE_GROUP_INVOCATIONS_ARB"/>
+                <enum name="GL_MAX_COMPUTE_FIXED_GROUP_INVOCATIONS_ARB"/>
+                <enum name="GL_MAX_COMPUTE_VARIABLE_GROUP_SIZE_ARB"/>
+                <enum name="GL_MAX_COMPUTE_FIXED_GROUP_SIZE_ARB"/>
+                <command name="glDispatchComputeGroupSizeARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_conditional_render_inverted" supported="gl|glcore">
+            <require>
+                <enum name="GL_QUERY_WAIT_INVERTED"/>
+                <enum name="GL_QUERY_NO_WAIT_INVERTED"/>
+                <enum name="GL_QUERY_BY_REGION_WAIT_INVERTED"/>
+                <enum name="GL_QUERY_BY_REGION_NO_WAIT_INVERTED"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_conservative_depth" supported="gl|glcore"/>
+        <extension name="GL_ARB_copy_buffer" supported="gl|glcore">
+            <require>
+                <enum name="GL_COPY_READ_BUFFER"/>
+                <enum name="GL_COPY_WRITE_BUFFER"/>
+                <command name="glCopyBufferSubData"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_copy_image" supported="gl|glcore">
+            <require>
+                <command name="glCopyImageSubData"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_cull_distance" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAX_CULL_DISTANCES"/>
+                <enum name="GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_debug_output" supported="gl|glcore">
+            <require>
+                <enum name="GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB"/>
+                <enum name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_ARB"/>
+                <enum name="GL_DEBUG_CALLBACK_FUNCTION_ARB"/>
+                <enum name="GL_DEBUG_CALLBACK_USER_PARAM_ARB"/>
+                <enum name="GL_DEBUG_SOURCE_API_ARB"/>
+                <enum name="GL_DEBUG_SOURCE_WINDOW_SYSTEM_ARB"/>
+                <enum name="GL_DEBUG_SOURCE_SHADER_COMPILER_ARB"/>
+                <enum name="GL_DEBUG_SOURCE_THIRD_PARTY_ARB"/>
+                <enum name="GL_DEBUG_SOURCE_APPLICATION_ARB"/>
+                <enum name="GL_DEBUG_SOURCE_OTHER_ARB"/>
+                <enum name="GL_DEBUG_TYPE_ERROR_ARB"/>
+                <enum name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_ARB"/>
+                <enum name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_ARB"/>
+                <enum name="GL_DEBUG_TYPE_PORTABILITY_ARB"/>
+                <enum name="GL_DEBUG_TYPE_PERFORMANCE_ARB"/>
+                <enum name="GL_DEBUG_TYPE_OTHER_ARB"/>
+                <enum name="GL_MAX_DEBUG_MESSAGE_LENGTH_ARB"/>
+                <enum name="GL_MAX_DEBUG_LOGGED_MESSAGES_ARB"/>
+                <enum name="GL_DEBUG_LOGGED_MESSAGES_ARB"/>
+                <enum name="GL_DEBUG_SEVERITY_HIGH_ARB"/>
+                <enum name="GL_DEBUG_SEVERITY_MEDIUM_ARB"/>
+                <enum name="GL_DEBUG_SEVERITY_LOW_ARB"/>
+                <command name="glDebugMessageControlARB"/>
+                <command name="glDebugMessageInsertARB"/>
+                <command name="glDebugMessageCallbackARB"/>
+                <command name="glGetDebugMessageLogARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_depth_buffer_float" supported="gl|glcore">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT32F"/>
+                <enum name="GL_DEPTH32F_STENCIL8"/>
+                <enum name="GL_FLOAT_32_UNSIGNED_INT_24_8_REV"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_depth_clamp" supported="gl|glcore">
+            <require>
+                <enum name="GL_DEPTH_CLAMP"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_depth_texture" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT16_ARB"/>
+                <enum name="GL_DEPTH_COMPONENT24_ARB"/>
+                <enum name="GL_DEPTH_COMPONENT32_ARB"/>
+                <enum name="GL_TEXTURE_DEPTH_SIZE_ARB"/>
+                <enum name="GL_DEPTH_TEXTURE_MODE_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_derivative_control" supported="gl|glcore"/>
+        <extension name="GL_ARB_direct_state_access" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_TARGET"/>
+                <enum name="GL_QUERY_TARGET"/>
+                <enum name="GL_TEXTURE_BINDING_1D"/>
+                <enum name="GL_TEXTURE_BINDING_1D_ARRAY"/>
+                <enum name="GL_TEXTURE_BINDING_2D"/>
+                <enum name="GL_TEXTURE_BINDING_2D_ARRAY"/>
+                <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE"/>
+                <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_TEXTURE_BINDING_3D"/>
+                <enum name="GL_TEXTURE_BINDING_BUFFER"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY"/>
+                <enum name="GL_TEXTURE_BINDING_RECTANGLE"/>
+            </require>
+            <require comment="Transform Feedback object functions">
+                <command name="glCreateTransformFeedbacks"/>
+                <command name="glTransformFeedbackBufferBase"/>
+                <command name="glTransformFeedbackBufferRange"/>
+                <command name="glGetTransformFeedbackiv"/>
+                <command name="glGetTransformFeedbacki_v"/>
+                <command name="glGetTransformFeedbacki64_v"/>
+            </require>
+            <require comment="Buffer object functions">
+                <command name="glCreateBuffers"/>
+                <command name="glNamedBufferStorage"/>
+                <command name="glNamedBufferData"/>
+                <command name="glNamedBufferSubData"/>
+                <command name="glCopyNamedBufferSubData"/>
+                <command name="glClearNamedBufferData"/>
+                <command name="glClearNamedBufferSubData"/>
+                <command name="glMapNamedBuffer"/>
+                <command name="glMapNamedBufferRange"/>
+                <command name="glUnmapNamedBuffer"/>
+                <command name="glFlushMappedNamedBufferRange"/>
+                <command name="glGetNamedBufferParameteriv"/>
+                <command name="glGetNamedBufferParameteri64v"/>
+                <command name="glGetNamedBufferPointerv"/>
+                <command name="glGetNamedBufferSubData"/>
+            </require>
+            <require comment="Framebuffer object functions">
+                <command name="glCreateFramebuffers"/>
+                <command name="glNamedFramebufferRenderbuffer"/>
+                <command name="glNamedFramebufferParameteri"/>
+                <command name="glNamedFramebufferTexture"/>
+                <command name="glNamedFramebufferTextureLayer"/>
+                <command name="glNamedFramebufferDrawBuffer"/>
+                <command name="glNamedFramebufferDrawBuffers"/>
+                <command name="glNamedFramebufferReadBuffer"/>
+                <command name="glInvalidateNamedFramebufferData"/>
+                <command name="glInvalidateNamedFramebufferSubData"/>
+                <command name="glClearNamedFramebufferiv"/>
+                <command name="glClearNamedFramebufferuiv"/>
+                <command name="glClearNamedFramebufferfv"/>
+                <command name="glClearNamedFramebufferfi"/>
+                <command name="glBlitNamedFramebuffer"/>
+                <command name="glCheckNamedFramebufferStatus"/>
+                <command name="glGetNamedFramebufferParameteriv"/>
+                <command name="glGetNamedFramebufferAttachmentParameteriv"/>
+            </require>
+            <require comment="Renderbuffer object functions">
+                <command name="glCreateRenderbuffers"/>
+                <command name="glNamedRenderbufferStorage"/>
+                <command name="glNamedRenderbufferStorageMultisample"/>
+                <command name="glGetNamedRenderbufferParameteriv"/>
+            </require>
+            <require comment="Texture object functions">
+                <command name="glCreateTextures"/>
+                <command name="glTextureBuffer"/>
+                <command name="glTextureBufferRange"/>
+                <command name="glTextureStorage1D"/>
+                <command name="glTextureStorage2D"/>
+                <command name="glTextureStorage3D"/>
+                <command name="glTextureStorage2DMultisample"/>
+                <command name="glTextureStorage3DMultisample"/>
+                <command name="glTextureSubImage1D"/>
+                <command name="glTextureSubImage2D"/>
+                <command name="glTextureSubImage3D"/>
+                <command name="glCompressedTextureSubImage1D"/>
+                <command name="glCompressedTextureSubImage2D"/>
+                <command name="glCompressedTextureSubImage3D"/>
+                <command name="glCopyTextureSubImage1D"/>
+                <command name="glCopyTextureSubImage2D"/>
+                <command name="glCopyTextureSubImage3D"/>
+                <command name="glTextureParameterf"/>
+                <command name="glTextureParameterfv"/>
+                <command name="glTextureParameteri"/>
+                <command name="glTextureParameterIiv"/>
+                <command name="glTextureParameterIuiv"/>
+                <command name="glTextureParameteriv"/>
+                <command name="glGenerateTextureMipmap"/>
+                <command name="glBindTextureUnit"/>
+                <command name="glGetTextureImage"/>
+                <command name="glGetCompressedTextureImage"/>
+                <command name="glGetTextureLevelParameterfv"/>
+                <command name="glGetTextureLevelParameteriv"/>
+                <command name="glGetTextureParameterfv"/>
+                <command name="glGetTextureParameterIiv"/>
+                <command name="glGetTextureParameterIuiv"/>
+                <command name="glGetTextureParameteriv"/>
+            </require>
+            <require comment="Vertex Array object functions">
+                <command name="glCreateVertexArrays"/>
+                <command name="glDisableVertexArrayAttrib"/>
+                <command name="glEnableVertexArrayAttrib"/>
+                <command name="glVertexArrayElementBuffer"/>
+                <command name="glVertexArrayVertexBuffer"/>
+                <command name="glVertexArrayVertexBuffers"/>
+                <command name="glVertexArrayAttribBinding"/>
+                <command name="glVertexArrayAttribFormat"/>
+                <command name="glVertexArrayAttribIFormat"/>
+                <command name="glVertexArrayAttribLFormat"/>
+                <command name="glVertexArrayBindingDivisor"/>
+                <command name="glGetVertexArrayiv"/>
+                <command name="glGetVertexArrayIndexediv"/>
+                <command name="glGetVertexArrayIndexed64iv"/>
+            </require>
+            <require comment="Sampler object functions">
+                <command name="glCreateSamplers"/>
+            </require>
+            <require comment="Program Pipeline object functions">
+                <command name="glCreateProgramPipelines"/>
+            </require>
+            <require comment="Query object functions">
+                <command name="glCreateQueries"/>
+                <command name="glGetQueryBufferObjecti64v"/>
+                <command name="glGetQueryBufferObjectiv"/>
+                <command name="glGetQueryBufferObjectui64v"/>
+                <command name="glGetQueryBufferObjectuiv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_draw_buffers" supported="gl">
+            <require>
+                <enum name="GL_MAX_DRAW_BUFFERS_ARB"/>
+                <enum name="GL_DRAW_BUFFER0_ARB"/>
+                <enum name="GL_DRAW_BUFFER1_ARB"/>
+                <enum name="GL_DRAW_BUFFER2_ARB"/>
+                <enum name="GL_DRAW_BUFFER3_ARB"/>
+                <enum name="GL_DRAW_BUFFER4_ARB"/>
+                <enum name="GL_DRAW_BUFFER5_ARB"/>
+                <enum name="GL_DRAW_BUFFER6_ARB"/>
+                <enum name="GL_DRAW_BUFFER7_ARB"/>
+                <enum name="GL_DRAW_BUFFER8_ARB"/>
+                <enum name="GL_DRAW_BUFFER9_ARB"/>
+                <enum name="GL_DRAW_BUFFER10_ARB"/>
+                <enum name="GL_DRAW_BUFFER11_ARB"/>
+                <enum name="GL_DRAW_BUFFER12_ARB"/>
+                <enum name="GL_DRAW_BUFFER13_ARB"/>
+                <enum name="GL_DRAW_BUFFER14_ARB"/>
+                <enum name="GL_DRAW_BUFFER15_ARB"/>
+                <command name="glDrawBuffersARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_draw_buffers_blend" supported="gl|glcore">
+            <require>
+                <command name="glBlendEquationiARB"/>
+                <command name="glBlendEquationSeparateiARB"/>
+                <command name="glBlendFunciARB"/>
+                <command name="glBlendFuncSeparateiARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_draw_elements_base_vertex" supported="gl|glcore">
+            <require>
+                <command name="glDrawElementsBaseVertex"/>
+                <command name="glDrawRangeElementsBaseVertex"/>
+                <command name="glDrawElementsInstancedBaseVertex"/>
+                <command name="glMultiDrawElementsBaseVertex"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_draw_indirect" supported="gl|glcore">
+            <require>
+                <enum name="GL_DRAW_INDIRECT_BUFFER"/>
+                <enum name="GL_DRAW_INDIRECT_BUFFER_BINDING"/>
+                <command name="glDrawArraysIndirect"/>
+                <command name="glDrawElementsIndirect"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_draw_instanced" supported="gl|glcore">
+            <require>
+                <command name="glDrawArraysInstancedARB"/>
+                <command name="glDrawElementsInstancedARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_enhanced_layouts" supported="gl|glcore">
+            <require>
+                <enum name="GL_LOCATION_COMPONENT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_INDEX"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_explicit_attrib_location" supported="gl|glcore"/>
+        <extension name="GL_ARB_explicit_uniform_location" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAX_UNIFORM_LOCATIONS"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_fragment_coord_conventions" supported="gl|glcore"/>
+        <extension name="GL_ARB_fragment_layer_viewport" supported="gl|glcore"/>
+        <extension name="GL_ARB_fragment_program" supported="gl">
+            <require>
+                <enum name="GL_FRAGMENT_PROGRAM_ARB"/>
+                <enum name="GL_PROGRAM_FORMAT_ASCII_ARB"/>
+                <enum name="GL_PROGRAM_LENGTH_ARB"/>
+                <enum name="GL_PROGRAM_FORMAT_ARB"/>
+                <enum name="GL_PROGRAM_BINDING_ARB"/>
+                <enum name="GL_PROGRAM_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_TEMPORARIES_ARB"/>
+                <enum name="GL_MAX_PROGRAM_TEMPORARIES_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_TEMPORARIES_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB"/>
+                <enum name="GL_PROGRAM_PARAMETERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_PARAMETERS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_PARAMETERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB"/>
+                <enum name="GL_PROGRAM_ATTRIBS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_ATTRIBS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_ATTRIBS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_ENV_PARAMETERS_ARB"/>
+                <enum name="GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB"/>
+                <enum name="GL_PROGRAM_ALU_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_TEX_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_TEX_INDIRECTIONS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB"/>
+                <enum name="GL_PROGRAM_STRING_ARB"/>
+                <enum name="GL_PROGRAM_ERROR_POSITION_ARB"/>
+                <enum name="GL_CURRENT_MATRIX_ARB"/>
+                <enum name="GL_TRANSPOSE_CURRENT_MATRIX_ARB"/>
+                <enum name="GL_CURRENT_MATRIX_STACK_DEPTH_ARB"/>
+                <enum name="GL_MAX_PROGRAM_MATRICES_ARB"/>
+                <enum name="GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB"/>
+                <enum name="GL_MAX_TEXTURE_COORDS_ARB"/>
+                <enum name="GL_MAX_TEXTURE_IMAGE_UNITS_ARB"/>
+                <enum name="GL_PROGRAM_ERROR_STRING_ARB"/>
+                <enum name="GL_MATRIX0_ARB"/>
+                <enum name="GL_MATRIX1_ARB"/>
+                <enum name="GL_MATRIX2_ARB"/>
+                <enum name="GL_MATRIX3_ARB"/>
+                <enum name="GL_MATRIX4_ARB"/>
+                <enum name="GL_MATRIX5_ARB"/>
+                <enum name="GL_MATRIX6_ARB"/>
+                <enum name="GL_MATRIX7_ARB"/>
+                <enum name="GL_MATRIX8_ARB"/>
+                <enum name="GL_MATRIX9_ARB"/>
+                <enum name="GL_MATRIX10_ARB"/>
+                <enum name="GL_MATRIX11_ARB"/>
+                <enum name="GL_MATRIX12_ARB"/>
+                <enum name="GL_MATRIX13_ARB"/>
+                <enum name="GL_MATRIX14_ARB"/>
+                <enum name="GL_MATRIX15_ARB"/>
+                <enum name="GL_MATRIX16_ARB"/>
+                <enum name="GL_MATRIX17_ARB"/>
+                <enum name="GL_MATRIX18_ARB"/>
+                <enum name="GL_MATRIX19_ARB"/>
+                <enum name="GL_MATRIX20_ARB"/>
+                <enum name="GL_MATRIX21_ARB"/>
+                <enum name="GL_MATRIX22_ARB"/>
+                <enum name="GL_MATRIX23_ARB"/>
+                <enum name="GL_MATRIX24_ARB"/>
+                <enum name="GL_MATRIX25_ARB"/>
+                <enum name="GL_MATRIX26_ARB"/>
+                <enum name="GL_MATRIX27_ARB"/>
+                <enum name="GL_MATRIX28_ARB"/>
+                <enum name="GL_MATRIX29_ARB"/>
+                <enum name="GL_MATRIX30_ARB"/>
+                <enum name="GL_MATRIX31_ARB"/>
+            </require>
+            <require comment="Shared with ARB_vertex_program">
+                <command name="glProgramStringARB"/>
+                <command name="glBindProgramARB"/>
+                <command name="glDeleteProgramsARB"/>
+                <command name="glGenProgramsARB"/>
+                <command name="glProgramEnvParameter4dARB"/>
+                <command name="glProgramEnvParameter4dvARB"/>
+                <command name="glProgramEnvParameter4fARB"/>
+                <command name="glProgramEnvParameter4fvARB"/>
+                <command name="glProgramLocalParameter4dARB"/>
+                <command name="glProgramLocalParameter4dvARB"/>
+                <command name="glProgramLocalParameter4fARB"/>
+                <command name="glProgramLocalParameter4fvARB"/>
+                <command name="glGetProgramEnvParameterdvARB"/>
+                <command name="glGetProgramEnvParameterfvARB"/>
+                <command name="glGetProgramLocalParameterdvARB"/>
+                <command name="glGetProgramLocalParameterfvARB"/>
+                <command name="glGetProgramivARB"/>
+                <command name="glGetProgramStringARB"/>
+                <command name="glIsProgramARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_fragment_program_shadow" supported="gl"/>
+        <extension name="GL_ARB_fragment_shader" supported="gl">
+            <require>
+                <enum name="GL_FRAGMENT_SHADER_ARB"/>
+                <enum name="GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB"/>
+                <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_fragment_shader_interlock" supported="gl|glcore"/>
+        <extension name="GL_ARB_framebuffer_no_attachments" supported="gl|glcore">
+            <require>
+                <enum name="GL_FRAMEBUFFER_DEFAULT_WIDTH"/>
+                <enum name="GL_FRAMEBUFFER_DEFAULT_HEIGHT"/>
+                <enum name="GL_FRAMEBUFFER_DEFAULT_LAYERS"/>
+                <enum name="GL_FRAMEBUFFER_DEFAULT_SAMPLES"/>
+                <enum name="GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS"/>
+                <enum name="GL_MAX_FRAMEBUFFER_WIDTH"/>
+                <enum name="GL_MAX_FRAMEBUFFER_HEIGHT"/>
+                <enum name="GL_MAX_FRAMEBUFFER_LAYERS"/>
+                <enum name="GL_MAX_FRAMEBUFFER_SAMPLES"/>
+                <command name="glFramebufferParameteri"/>
+                <command name="glGetFramebufferParameteriv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_framebuffer_object" supported="gl|glcore">
+            <require>
+                <enum name="GL_INVALID_FRAMEBUFFER_OPERATION"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE"/>
+                <enum name="GL_FRAMEBUFFER_DEFAULT"/>
+                <enum name="GL_FRAMEBUFFER_UNDEFINED"/>
+                <enum name="GL_DEPTH_STENCIL_ATTACHMENT"/>
+                <enum name="GL_MAX_RENDERBUFFER_SIZE"/>
+                <enum name="GL_DEPTH_STENCIL"/>
+                <enum name="GL_UNSIGNED_INT_24_8"/>
+                <enum name="GL_DEPTH24_STENCIL8"/>
+                <enum name="GL_TEXTURE_STENCIL_SIZE"/>
+                <enum name="GL_UNSIGNED_NORMALIZED"/>
+                <enum name="GL_FRAMEBUFFER_BINDING"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_BINDING"/>
+                <enum name="GL_RENDERBUFFER_BINDING"/>
+                <enum name="GL_READ_FRAMEBUFFER"/>
+                <enum name="GL_DRAW_FRAMEBUFFER"/>
+                <enum name="GL_READ_FRAMEBUFFER_BINDING"/>
+                <enum name="GL_RENDERBUFFER_SAMPLES"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER"/>
+                <enum name="GL_FRAMEBUFFER_COMPLETE"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER"/>
+                <enum name="GL_FRAMEBUFFER_UNSUPPORTED"/>
+                <enum name="GL_MAX_COLOR_ATTACHMENTS"/>
+                <enum name="GL_COLOR_ATTACHMENT0"/>
+                <enum name="GL_COLOR_ATTACHMENT1"/>
+                <enum name="GL_COLOR_ATTACHMENT2"/>
+                <enum name="GL_COLOR_ATTACHMENT3"/>
+                <enum name="GL_COLOR_ATTACHMENT4"/>
+                <enum name="GL_COLOR_ATTACHMENT5"/>
+                <enum name="GL_COLOR_ATTACHMENT6"/>
+                <enum name="GL_COLOR_ATTACHMENT7"/>
+                <enum name="GL_COLOR_ATTACHMENT8"/>
+                <enum name="GL_COLOR_ATTACHMENT9"/>
+                <enum name="GL_COLOR_ATTACHMENT10"/>
+                <enum name="GL_COLOR_ATTACHMENT11"/>
+                <enum name="GL_COLOR_ATTACHMENT12"/>
+                <enum name="GL_COLOR_ATTACHMENT13"/>
+                <enum name="GL_COLOR_ATTACHMENT14"/>
+                <enum name="GL_COLOR_ATTACHMENT15"/>
+                <enum name="GL_DEPTH_ATTACHMENT"/>
+                <enum name="GL_STENCIL_ATTACHMENT"/>
+                <enum name="GL_FRAMEBUFFER"/>
+                <enum name="GL_RENDERBUFFER"/>
+                <enum name="GL_RENDERBUFFER_WIDTH"/>
+                <enum name="GL_RENDERBUFFER_HEIGHT"/>
+                <enum name="GL_RENDERBUFFER_INTERNAL_FORMAT"/>
+                <enum name="GL_STENCIL_INDEX1"/>
+                <enum name="GL_STENCIL_INDEX4"/>
+                <enum name="GL_STENCIL_INDEX8"/>
+                <enum name="GL_STENCIL_INDEX16"/>
+                <enum name="GL_RENDERBUFFER_RED_SIZE"/>
+                <enum name="GL_RENDERBUFFER_GREEN_SIZE"/>
+                <enum name="GL_RENDERBUFFER_BLUE_SIZE"/>
+                <enum name="GL_RENDERBUFFER_ALPHA_SIZE"/>
+                <enum name="GL_RENDERBUFFER_DEPTH_SIZE"/>
+                <enum name="GL_RENDERBUFFER_STENCIL_SIZE"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE"/>
+                <enum name="GL_MAX_SAMPLES"/>
+                <command name="glIsRenderbuffer"/>
+                <command name="glBindRenderbuffer"/>
+                <command name="glDeleteRenderbuffers"/>
+                <command name="glGenRenderbuffers"/>
+                <command name="glRenderbufferStorage"/>
+                <command name="glGetRenderbufferParameteriv"/>
+                <command name="glIsFramebuffer"/>
+                <command name="glBindFramebuffer"/>
+                <command name="glDeleteFramebuffers"/>
+                <command name="glGenFramebuffers"/>
+                <command name="glCheckFramebufferStatus"/>
+                <command name="glFramebufferTexture1D"/>
+                <command name="glFramebufferTexture2D"/>
+                <command name="glFramebufferTexture3D"/>
+                <command name="glFramebufferRenderbuffer"/>
+                <command name="glGetFramebufferAttachmentParameteriv"/>
+                <command name="glGenerateMipmap"/>
+                <command name="glBlitFramebuffer"/>
+                <command name="glRenderbufferStorageMultisample"/>
+                <command name="glFramebufferTextureLayer"/>
+            </require>
+            <require api="gl" profile="compatibility">
+                <enum name="GL_INDEX"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_framebuffer_sRGB" supported="gl|glcore">
+            <require>
+                <enum name="GL_FRAMEBUFFER_SRGB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_geometry_shader4" supported="gl|glcore">
+            <require>
+                <enum name="GL_LINES_ADJACENCY_ARB"/>
+                <enum name="GL_LINE_STRIP_ADJACENCY_ARB"/>
+                <enum name="GL_TRIANGLES_ADJACENCY_ARB"/>
+                <enum name="GL_TRIANGLE_STRIP_ADJACENCY_ARB"/>
+                <enum name="GL_PROGRAM_POINT_SIZE_ARB"/>
+                <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_ARB"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_ARB"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_ARB"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_ARB"/>
+                <enum name="GL_GEOMETRY_SHADER_ARB"/>
+                <enum name="GL_GEOMETRY_VERTICES_OUT_ARB"/>
+                <enum name="GL_GEOMETRY_INPUT_TYPE_ARB"/>
+                <enum name="GL_GEOMETRY_OUTPUT_TYPE_ARB"/>
+                <enum name="GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB"/>
+                <enum name="GL_MAX_VERTEX_VARYING_COMPONENTS_ARB"/>
+                <enum name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_ARB"/>
+                <enum name="GL_MAX_GEOMETRY_OUTPUT_VERTICES_ARB"/>
+                <enum name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_ARB"/>
+                <enum name="GL_MAX_VARYING_COMPONENTS"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER"/>
+                <command name="glProgramParameteriARB"/>
+                <command name="glFramebufferTextureARB"/>
+                <command name="glFramebufferTextureLayerARB"/>
+                <command name="glFramebufferTextureFaceARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_get_program_binary" supported="gl|glcore">
+            <require>
+                <enum name="GL_PROGRAM_BINARY_RETRIEVABLE_HINT"/>
+                <enum name="GL_PROGRAM_BINARY_LENGTH"/>
+                <enum name="GL_NUM_PROGRAM_BINARY_FORMATS"/>
+                <enum name="GL_PROGRAM_BINARY_FORMATS"/>
+                <command name="glGetProgramBinary"/>
+                <command name="glProgramBinary"/>
+                <command name="glProgramParameteri"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_get_texture_sub_image" supported="gl|glcore">
+            <require>
+                <command name="glGetTextureSubImage"/>
+                <command name="glGetCompressedTextureSubImage"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_gl_spirv" supported="gl|glcore">
+            <require>
+                <enum name="GL_SHADER_BINARY_FORMAT_SPIR_V_ARB"/>
+                <enum name="GL_SPIR_V_BINARY_ARB"/>
+                <command name="glSpecializeShaderARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_gpu_shader5" supported="gl|glcore">
+            <require>
+                <enum name="GL_GEOMETRY_SHADER_INVOCATIONS"/>
+                <enum name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS"/>
+                <enum name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET"/>
+                <enum name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET"/>
+                <enum name="GL_FRAGMENT_INTERPOLATION_OFFSET_BITS"/>
+                <enum name="GL_MAX_VERTEX_STREAMS"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_gpu_shader_fp64" supported="gl|glcore">
+            <require>
+                <enum name="GL_DOUBLE"/>
+                <enum name="GL_DOUBLE_VEC2"/>
+                <enum name="GL_DOUBLE_VEC3"/>
+                <enum name="GL_DOUBLE_VEC4"/>
+                <enum name="GL_DOUBLE_MAT2"/>
+                <enum name="GL_DOUBLE_MAT3"/>
+                <enum name="GL_DOUBLE_MAT4"/>
+                <enum name="GL_DOUBLE_MAT2x3"/>
+                <enum name="GL_DOUBLE_MAT2x4"/>
+                <enum name="GL_DOUBLE_MAT3x2"/>
+                <enum name="GL_DOUBLE_MAT3x4"/>
+                <enum name="GL_DOUBLE_MAT4x2"/>
+                <enum name="GL_DOUBLE_MAT4x3"/>
+                <command name="glUniform1d"/>
+                <command name="glUniform2d"/>
+                <command name="glUniform3d"/>
+                <command name="glUniform4d"/>
+                <command name="glUniform1dv"/>
+                <command name="glUniform2dv"/>
+                <command name="glUniform3dv"/>
+                <command name="glUniform4dv"/>
+                <command name="glUniformMatrix2dv"/>
+                <command name="glUniformMatrix3dv"/>
+                <command name="glUniformMatrix4dv"/>
+                <command name="glUniformMatrix2x3dv"/>
+                <command name="glUniformMatrix2x4dv"/>
+                <command name="glUniformMatrix3x2dv"/>
+                <command name="glUniformMatrix3x4dv"/>
+                <command name="glUniformMatrix4x2dv"/>
+                <command name="glUniformMatrix4x3dv"/>
+                <command name="glGetUniformdv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_gpu_shader_int64" supported="gl|glcore">
+            <require>
+                <enum name="GL_INT64_ARB"/>
+                <enum name="GL_UNSIGNED_INT64_ARB"/>
+                <enum name="GL_INT64_VEC2_ARB"/>
+                <enum name="GL_INT64_VEC3_ARB"/>
+                <enum name="GL_INT64_VEC4_ARB"/>
+                <enum name="GL_UNSIGNED_INT64_VEC2_ARB"/>
+                <enum name="GL_UNSIGNED_INT64_VEC3_ARB"/>
+                <enum name="GL_UNSIGNED_INT64_VEC4_ARB"/>
+                <command name="glUniform1i64ARB"/>
+                <command name="glUniform2i64ARB"/>
+                <command name="glUniform3i64ARB"/>
+                <command name="glUniform4i64ARB"/>
+                <command name="glUniform1i64vARB"/>
+                <command name="glUniform2i64vARB"/>
+                <command name="glUniform3i64vARB"/>
+                <command name="glUniform4i64vARB"/>
+                <command name="glUniform1ui64ARB"/>
+                <command name="glUniform2ui64ARB"/>
+                <command name="glUniform3ui64ARB"/>
+                <command name="glUniform4ui64ARB"/>
+                <command name="glUniform1ui64vARB"/>
+                <command name="glUniform2ui64vARB"/>
+                <command name="glUniform3ui64vARB"/>
+                <command name="glUniform4ui64vARB"/>
+                <command name="glGetUniformi64vARB"/>
+                <command name="glGetUniformui64vARB"/>
+                <command name="glGetnUniformi64vARB"/>
+                <command name="glGetnUniformui64vARB"/>
+                <command name="glProgramUniform1i64ARB"/>
+                <command name="glProgramUniform2i64ARB"/>
+                <command name="glProgramUniform3i64ARB"/>
+                <command name="glProgramUniform4i64ARB"/>
+                <command name="glProgramUniform1i64vARB"/>
+                <command name="glProgramUniform2i64vARB"/>
+                <command name="glProgramUniform3i64vARB"/>
+                <command name="glProgramUniform4i64vARB"/>
+                <command name="glProgramUniform1ui64ARB"/>
+                <command name="glProgramUniform2ui64ARB"/>
+                <command name="glProgramUniform3ui64ARB"/>
+                <command name="glProgramUniform4ui64ARB"/>
+                <command name="glProgramUniform1ui64vARB"/>
+                <command name="glProgramUniform2ui64vARB"/>
+                <command name="glProgramUniform3ui64vARB"/>
+                <command name="glProgramUniform4ui64vARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_half_float_pixel" supported="gl">
+            <require>
+                <type name="GLhalfARB"/>
+                <enum name="GL_HALF_FLOAT_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_half_float_vertex" supported="gl|glcore">
+            <require>
+                <type name="GLhalf"/>
+                <enum name="GL_HALF_FLOAT"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_imaging" supported="gl|glcore" comment="Now treating ARB_imaging as an extension, not a GL API version">
+            <require>
+                <enum name="GL_BLEND_COLOR"/>
+                <enum name="GL_BLEND_EQUATION"/>
+                <enum name="GL_CONSTANT_COLOR"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+                <enum name="GL_CONSTANT_ALPHA"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+                <enum name="GL_FUNC_ADD"/>
+                <enum name="GL_FUNC_REVERSE_SUBTRACT"/>
+                <enum name="GL_FUNC_SUBTRACT"/>
+                <enum name="GL_MIN"/>
+                <enum name="GL_MAX"/>
+                <command name="glBlendColor"/>
+                <command name="glBlendEquation"/>
+            </require>
+            <require api="gl" profile="compatibility">
+                <enum name="GL_CONVOLUTION_1D"/>
+                <enum name="GL_CONVOLUTION_2D"/>
+                <enum name="GL_SEPARABLE_2D"/>
+                <enum name="GL_CONVOLUTION_BORDER_MODE"/>
+                <enum name="GL_CONVOLUTION_FILTER_SCALE"/>
+                <enum name="GL_CONVOLUTION_FILTER_BIAS"/>
+                <enum name="GL_REDUCE"/>
+                <enum name="GL_CONVOLUTION_FORMAT"/>
+                <enum name="GL_CONVOLUTION_WIDTH"/>
+                <enum name="GL_CONVOLUTION_HEIGHT"/>
+                <enum name="GL_MAX_CONVOLUTION_WIDTH"/>
+                <enum name="GL_MAX_CONVOLUTION_HEIGHT"/>
+                <enum name="GL_POST_CONVOLUTION_RED_SCALE"/>
+                <enum name="GL_POST_CONVOLUTION_GREEN_SCALE"/>
+                <enum name="GL_POST_CONVOLUTION_BLUE_SCALE"/>
+                <enum name="GL_POST_CONVOLUTION_ALPHA_SCALE"/>
+                <enum name="GL_POST_CONVOLUTION_RED_BIAS"/>
+                <enum name="GL_POST_CONVOLUTION_GREEN_BIAS"/>
+                <enum name="GL_POST_CONVOLUTION_BLUE_BIAS"/>
+                <enum name="GL_POST_CONVOLUTION_ALPHA_BIAS"/>
+                <enum name="GL_HISTOGRAM"/>
+                <enum name="GL_PROXY_HISTOGRAM"/>
+                <enum name="GL_HISTOGRAM_WIDTH"/>
+                <enum name="GL_HISTOGRAM_FORMAT"/>
+                <enum name="GL_HISTOGRAM_RED_SIZE"/>
+                <enum name="GL_HISTOGRAM_GREEN_SIZE"/>
+                <enum name="GL_HISTOGRAM_BLUE_SIZE"/>
+                <enum name="GL_HISTOGRAM_ALPHA_SIZE"/>
+                <enum name="GL_HISTOGRAM_LUMINANCE_SIZE"/>
+                <enum name="GL_HISTOGRAM_SINK"/>
+                <enum name="GL_MINMAX"/>
+                <enum name="GL_MINMAX_FORMAT"/>
+                <enum name="GL_MINMAX_SINK"/>
+                <enum name="GL_TABLE_TOO_LARGE"/>
+                <enum name="GL_COLOR_MATRIX"/>
+                <enum name="GL_COLOR_MATRIX_STACK_DEPTH"/>
+                <enum name="GL_MAX_COLOR_MATRIX_STACK_DEPTH"/>
+                <enum name="GL_POST_COLOR_MATRIX_RED_SCALE"/>
+                <enum name="GL_POST_COLOR_MATRIX_GREEN_SCALE"/>
+                <enum name="GL_POST_COLOR_MATRIX_BLUE_SCALE"/>
+                <enum name="GL_POST_COLOR_MATRIX_ALPHA_SCALE"/>
+                <enum name="GL_POST_COLOR_MATRIX_RED_BIAS"/>
+                <enum name="GL_POST_COLOR_MATRIX_GREEN_BIAS"/>
+                <enum name="GL_POST_COLOR_MATRIX_BLUE_BIAS"/>
+                <enum name="GL_POST_COLOR_MATRIX_ALPHA_BIAS"/>
+                <enum name="GL_COLOR_TABLE"/>
+                <enum name="GL_POST_CONVOLUTION_COLOR_TABLE"/>
+                <enum name="GL_POST_COLOR_MATRIX_COLOR_TABLE"/>
+                <enum name="GL_PROXY_COLOR_TABLE"/>
+                <enum name="GL_PROXY_POST_CONVOLUTION_COLOR_TABLE"/>
+                <enum name="GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE"/>
+                <enum name="GL_COLOR_TABLE_SCALE"/>
+                <enum name="GL_COLOR_TABLE_BIAS"/>
+                <enum name="GL_COLOR_TABLE_FORMAT"/>
+                <enum name="GL_COLOR_TABLE_WIDTH"/>
+                <enum name="GL_COLOR_TABLE_RED_SIZE"/>
+                <enum name="GL_COLOR_TABLE_GREEN_SIZE"/>
+                <enum name="GL_COLOR_TABLE_BLUE_SIZE"/>
+                <enum name="GL_COLOR_TABLE_ALPHA_SIZE"/>
+                <enum name="GL_COLOR_TABLE_LUMINANCE_SIZE"/>
+                <enum name="GL_COLOR_TABLE_INTENSITY_SIZE"/>
+                <enum name="GL_CONSTANT_BORDER"/>
+                <enum name="GL_REPLICATE_BORDER"/>
+                <enum name="GL_CONVOLUTION_BORDER_COLOR"/>
+                <command name="glColorTable"/>
+                <command name="glColorTableParameterfv"/>
+                <command name="glColorTableParameteriv"/>
+                <command name="glCopyColorTable"/>
+                <command name="glGetColorTable"/>
+                <command name="glGetColorTableParameterfv"/>
+                <command name="glGetColorTableParameteriv"/>
+                <command name="glColorSubTable"/>
+                <command name="glCopyColorSubTable"/>
+                <command name="glConvolutionFilter1D"/>
+                <command name="glConvolutionFilter2D"/>
+                <command name="glConvolutionParameterf"/>
+                <command name="glConvolutionParameterfv"/>
+                <command name="glConvolutionParameteri"/>
+                <command name="glConvolutionParameteriv"/>
+                <command name="glCopyConvolutionFilter1D"/>
+                <command name="glCopyConvolutionFilter2D"/>
+                <command name="glGetConvolutionFilter"/>
+                <command name="glGetConvolutionParameterfv"/>
+                <command name="glGetConvolutionParameteriv"/>
+                <command name="glGetSeparableFilter"/>
+                <command name="glSeparableFilter2D"/>
+                <command name="glGetHistogram"/>
+                <command name="glGetHistogramParameterfv"/>
+                <command name="glGetHistogramParameteriv"/>
+                <command name="glGetMinmax"/>
+                <command name="glGetMinmaxParameterfv"/>
+                <command name="glGetMinmaxParameteriv"/>
+                <command name="glHistogram"/>
+                <command name="glMinmax"/>
+                <command name="glResetHistogram"/>
+                <command name="glResetMinmax"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_indirect_parameters" supported="gl|glcore">
+            <require>
+                <enum name="GL_PARAMETER_BUFFER_ARB"/>
+                <enum name="GL_PARAMETER_BUFFER_BINDING_ARB"/>
+                <command name="glMultiDrawArraysIndirectCountARB"/>
+                <command name="glMultiDrawElementsIndirectCountARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_instanced_arrays" supported="gl|glcore">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ARB"/>
+                <command name="glVertexAttribDivisorARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_internalformat_query" supported="gl|glcore">
+            <require>
+                <enum name="GL_NUM_SAMPLE_COUNTS"/>
+                <command name="glGetInternalformativ"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_internalformat_query2" supported="gl|glcore">
+            <require>
+                <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_TYPE"/>
+                <enum name="GL_NUM_SAMPLE_COUNTS"/>
+                <enum name="GL_RENDERBUFFER"/>
+                <enum name="GL_SAMPLES"/>
+                <enum name="GL_TEXTURE_1D"/>
+                <enum name="GL_TEXTURE_1D_ARRAY"/>
+                <enum name="GL_TEXTURE_2D"/>
+                <enum name="GL_TEXTURE_2D_ARRAY"/>
+                <enum name="GL_TEXTURE_3D"/>
+                <enum name="GL_TEXTURE_CUBE_MAP"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_ARRAY"/>
+                <enum name="GL_TEXTURE_RECTANGLE"/>
+                <enum name="GL_TEXTURE_BUFFER"/>
+                <enum name="GL_TEXTURE_2D_MULTISAMPLE"/>
+                <enum name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_TEXTURE_COMPRESSED"/>
+                <enum name="GL_INTERNALFORMAT_SUPPORTED"/>
+                <enum name="GL_INTERNALFORMAT_PREFERRED"/>
+                <enum name="GL_INTERNALFORMAT_RED_SIZE"/>
+                <enum name="GL_INTERNALFORMAT_GREEN_SIZE"/>
+                <enum name="GL_INTERNALFORMAT_BLUE_SIZE"/>
+                <enum name="GL_INTERNALFORMAT_ALPHA_SIZE"/>
+                <enum name="GL_INTERNALFORMAT_DEPTH_SIZE"/>
+                <enum name="GL_INTERNALFORMAT_STENCIL_SIZE"/>
+                <enum name="GL_INTERNALFORMAT_SHARED_SIZE"/>
+                <enum name="GL_INTERNALFORMAT_RED_TYPE"/>
+                <enum name="GL_INTERNALFORMAT_GREEN_TYPE"/>
+                <enum name="GL_INTERNALFORMAT_BLUE_TYPE"/>
+                <enum name="GL_INTERNALFORMAT_ALPHA_TYPE"/>
+                <enum name="GL_INTERNALFORMAT_DEPTH_TYPE"/>
+                <enum name="GL_INTERNALFORMAT_STENCIL_TYPE"/>
+                <enum name="GL_MAX_WIDTH"/>
+                <enum name="GL_MAX_HEIGHT"/>
+                <enum name="GL_MAX_DEPTH"/>
+                <enum name="GL_MAX_LAYERS"/>
+                <enum name="GL_MAX_COMBINED_DIMENSIONS"/>
+                <enum name="GL_COLOR_COMPONENTS"/>
+                <enum name="GL_DEPTH_COMPONENTS"/>
+                <enum name="GL_STENCIL_COMPONENTS"/>
+                <enum name="GL_COLOR_RENDERABLE"/>
+                <enum name="GL_DEPTH_RENDERABLE"/>
+                <enum name="GL_STENCIL_RENDERABLE"/>
+                <enum name="GL_FRAMEBUFFER_RENDERABLE"/>
+                <enum name="GL_FRAMEBUFFER_RENDERABLE_LAYERED"/>
+                <enum name="GL_FRAMEBUFFER_BLEND"/>
+                <enum name="GL_READ_PIXELS"/>
+                <enum name="GL_READ_PIXELS_FORMAT"/>
+                <enum name="GL_READ_PIXELS_TYPE"/>
+                <enum name="GL_TEXTURE_IMAGE_FORMAT"/>
+                <enum name="GL_TEXTURE_IMAGE_TYPE"/>
+                <enum name="GL_GET_TEXTURE_IMAGE_FORMAT"/>
+                <enum name="GL_GET_TEXTURE_IMAGE_TYPE"/>
+                <enum name="GL_MIPMAP"/>
+                <enum name="GL_MANUAL_GENERATE_MIPMAP"/>
+                <enum name="GL_AUTO_GENERATE_MIPMAP"/>
+                <enum name="GL_COLOR_ENCODING"/>
+                <enum name="GL_SRGB_READ"/>
+                <enum name="GL_SRGB_WRITE"/>
+                <enum name="GL_SRGB_DECODE_ARB"/>
+                <enum name="GL_FILTER"/>
+                <enum name="GL_VERTEX_TEXTURE"/>
+                <enum name="GL_TESS_CONTROL_TEXTURE"/>
+                <enum name="GL_TESS_EVALUATION_TEXTURE"/>
+                <enum name="GL_GEOMETRY_TEXTURE"/>
+                <enum name="GL_FRAGMENT_TEXTURE"/>
+                <enum name="GL_COMPUTE_TEXTURE"/>
+                <enum name="GL_TEXTURE_SHADOW"/>
+                <enum name="GL_TEXTURE_GATHER"/>
+                <enum name="GL_TEXTURE_GATHER_SHADOW"/>
+                <enum name="GL_SHADER_IMAGE_LOAD"/>
+                <enum name="GL_SHADER_IMAGE_STORE"/>
+                <enum name="GL_SHADER_IMAGE_ATOMIC"/>
+                <enum name="GL_IMAGE_TEXEL_SIZE"/>
+                <enum name="GL_IMAGE_COMPATIBILITY_CLASS"/>
+                <enum name="GL_IMAGE_PIXEL_FORMAT"/>
+                <enum name="GL_IMAGE_PIXEL_TYPE"/>
+                <enum name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST"/>
+                <enum name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST"/>
+                <enum name="GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE"/>
+                <enum name="GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE"/>
+                <enum name="GL_TEXTURE_COMPRESSED_BLOCK_WIDTH"/>
+                <enum name="GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT"/>
+                <enum name="GL_TEXTURE_COMPRESSED_BLOCK_SIZE"/>
+                <enum name="GL_CLEAR_BUFFER"/>
+                <enum name="GL_TEXTURE_VIEW"/>
+                <enum name="GL_VIEW_COMPATIBILITY_CLASS"/>
+                <enum name="GL_FULL_SUPPORT"/>
+                <enum name="GL_CAVEAT_SUPPORT"/>
+                <enum name="GL_IMAGE_CLASS_4_X_32"/>
+                <enum name="GL_IMAGE_CLASS_2_X_32"/>
+                <enum name="GL_IMAGE_CLASS_1_X_32"/>
+                <enum name="GL_IMAGE_CLASS_4_X_16"/>
+                <enum name="GL_IMAGE_CLASS_2_X_16"/>
+                <enum name="GL_IMAGE_CLASS_1_X_16"/>
+                <enum name="GL_IMAGE_CLASS_4_X_8"/>
+                <enum name="GL_IMAGE_CLASS_2_X_8"/>
+                <enum name="GL_IMAGE_CLASS_1_X_8"/>
+                <enum name="GL_IMAGE_CLASS_11_11_10"/>
+                <enum name="GL_IMAGE_CLASS_10_10_10_2"/>
+                <enum name="GL_VIEW_CLASS_128_BITS"/>
+                <enum name="GL_VIEW_CLASS_96_BITS"/>
+                <enum name="GL_VIEW_CLASS_64_BITS"/>
+                <enum name="GL_VIEW_CLASS_48_BITS"/>
+                <enum name="GL_VIEW_CLASS_32_BITS"/>
+                <enum name="GL_VIEW_CLASS_24_BITS"/>
+                <enum name="GL_VIEW_CLASS_16_BITS"/>
+                <enum name="GL_VIEW_CLASS_8_BITS"/>
+                <enum name="GL_VIEW_CLASS_S3TC_DXT1_RGB"/>
+                <enum name="GL_VIEW_CLASS_S3TC_DXT1_RGBA"/>
+                <enum name="GL_VIEW_CLASS_S3TC_DXT3_RGBA"/>
+                <enum name="GL_VIEW_CLASS_S3TC_DXT5_RGBA"/>
+                <enum name="GL_VIEW_CLASS_RGTC1_RED"/>
+                <enum name="GL_VIEW_CLASS_RGTC2_RG"/>
+                <enum name="GL_VIEW_CLASS_BPTC_UNORM"/>
+                <enum name="GL_VIEW_CLASS_BPTC_FLOAT"/>
+            </require>
+            <require comment="Supported only if GL_ARB_ES3_compatibility is supported">
+                <enum name="GL_VIEW_CLASS_EAC_R11"/>
+                <enum name="GL_VIEW_CLASS_EAC_RG11"/>
+                <enum name="GL_VIEW_CLASS_ETC2_RGB"/>
+                <enum name="GL_VIEW_CLASS_ETC2_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ETC2_EAC_RGBA"/>
+            </require>
+            <require comment="Supported only if GL_KHR_texture_compression_astc_ldr is supported">
+                <enum name="GL_VIEW_CLASS_ASTC_4x4_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_5x4_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_5x5_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_6x5_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_6x6_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_8x5_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_8x6_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_8x8_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_10x5_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_10x6_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_10x8_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_10x10_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_12x10_RGBA"/>
+                <enum name="GL_VIEW_CLASS_ASTC_12x12_RGBA"/>
+                <command name="glGetInternalformati64v"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_invalidate_subdata" supported="gl|glcore">
+            <require>
+                <command name="glInvalidateTexSubImage"/>
+                <command name="glInvalidateTexImage"/>
+                <command name="glInvalidateBufferSubData"/>
+                <command name="glInvalidateBufferData"/>
+                <command name="glInvalidateFramebuffer"/>
+                <command name="glInvalidateSubFramebuffer"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_map_buffer_alignment" supported="gl|glcore">
+            <require>
+                <enum name="GL_MIN_MAP_BUFFER_ALIGNMENT"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_map_buffer_range" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAP_READ_BIT"/>
+                <enum name="GL_MAP_WRITE_BIT"/>
+                <enum name="GL_MAP_INVALIDATE_RANGE_BIT"/>
+                <enum name="GL_MAP_INVALIDATE_BUFFER_BIT"/>
+                <enum name="GL_MAP_FLUSH_EXPLICIT_BIT"/>
+                <enum name="GL_MAP_UNSYNCHRONIZED_BIT"/>
+                <command name="glMapBufferRange"/>
+                <command name="glFlushMappedBufferRange"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_matrix_palette" supported="gl">
+            <require>
+                <enum name="GL_MATRIX_PALETTE_ARB"/>
+                <enum name="GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB"/>
+                <enum name="GL_MAX_PALETTE_MATRICES_ARB"/>
+                <enum name="GL_CURRENT_PALETTE_MATRIX_ARB"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_ARB"/>
+                <enum name="GL_CURRENT_MATRIX_INDEX_ARB"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_SIZE_ARB"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_TYPE_ARB"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_STRIDE_ARB"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_POINTER_ARB"/>
+                <command name="glCurrentPaletteMatrixARB"/>
+                <command name="glMatrixIndexubvARB"/>
+                <command name="glMatrixIndexusvARB"/>
+                <command name="glMatrixIndexuivARB"/>
+                <command name="glMatrixIndexPointerARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_multi_bind" supported="gl|glcore">
+            <require>
+                <command name="glBindBuffersBase"/>
+                <command name="glBindBuffersRange"/>
+                <command name="glBindTextures"/>
+                <command name="glBindSamplers"/>
+                <command name="glBindImageTextures"/>
+                <command name="glBindVertexBuffers"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_multi_draw_indirect" supported="gl|glcore">
+            <require>
+                <command name="glMultiDrawArraysIndirect"/>
+                <command name="glMultiDrawElementsIndirect"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_multisample" supported="gl">
+            <require>
+                <enum name="GL_MULTISAMPLE_ARB"/>
+                <enum name="GL_SAMPLE_ALPHA_TO_COVERAGE_ARB"/>
+                <enum name="GL_SAMPLE_ALPHA_TO_ONE_ARB"/>
+                <enum name="GL_SAMPLE_COVERAGE_ARB"/>
+                <enum name="GL_SAMPLE_BUFFERS_ARB"/>
+                <enum name="GL_SAMPLES_ARB"/>
+                <enum name="GL_SAMPLE_COVERAGE_VALUE_ARB"/>
+                <enum name="GL_SAMPLE_COVERAGE_INVERT_ARB"/>
+                <enum name="GL_MULTISAMPLE_BIT_ARB"/>
+                <command name="glSampleCoverageARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_multitexture" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE0_ARB"/>
+                <enum name="GL_TEXTURE1_ARB"/>
+                <enum name="GL_TEXTURE2_ARB"/>
+                <enum name="GL_TEXTURE3_ARB"/>
+                <enum name="GL_TEXTURE4_ARB"/>
+                <enum name="GL_TEXTURE5_ARB"/>
+                <enum name="GL_TEXTURE6_ARB"/>
+                <enum name="GL_TEXTURE7_ARB"/>
+                <enum name="GL_TEXTURE8_ARB"/>
+                <enum name="GL_TEXTURE9_ARB"/>
+                <enum name="GL_TEXTURE10_ARB"/>
+                <enum name="GL_TEXTURE11_ARB"/>
+                <enum name="GL_TEXTURE12_ARB"/>
+                <enum name="GL_TEXTURE13_ARB"/>
+                <enum name="GL_TEXTURE14_ARB"/>
+                <enum name="GL_TEXTURE15_ARB"/>
+                <enum name="GL_TEXTURE16_ARB"/>
+                <enum name="GL_TEXTURE17_ARB"/>
+                <enum name="GL_TEXTURE18_ARB"/>
+                <enum name="GL_TEXTURE19_ARB"/>
+                <enum name="GL_TEXTURE20_ARB"/>
+                <enum name="GL_TEXTURE21_ARB"/>
+                <enum name="GL_TEXTURE22_ARB"/>
+                <enum name="GL_TEXTURE23_ARB"/>
+                <enum name="GL_TEXTURE24_ARB"/>
+                <enum name="GL_TEXTURE25_ARB"/>
+                <enum name="GL_TEXTURE26_ARB"/>
+                <enum name="GL_TEXTURE27_ARB"/>
+                <enum name="GL_TEXTURE28_ARB"/>
+                <enum name="GL_TEXTURE29_ARB"/>
+                <enum name="GL_TEXTURE30_ARB"/>
+                <enum name="GL_TEXTURE31_ARB"/>
+                <enum name="GL_ACTIVE_TEXTURE_ARB"/>
+                <enum name="GL_CLIENT_ACTIVE_TEXTURE_ARB"/>
+                <enum name="GL_MAX_TEXTURE_UNITS_ARB"/>
+                <command name="glActiveTextureARB"/>
+                <command name="glClientActiveTextureARB"/>
+                <command name="glMultiTexCoord1dARB"/>
+                <command name="glMultiTexCoord1dvARB"/>
+                <command name="glMultiTexCoord1fARB"/>
+                <command name="glMultiTexCoord1fvARB"/>
+                <command name="glMultiTexCoord1iARB"/>
+                <command name="glMultiTexCoord1ivARB"/>
+                <command name="glMultiTexCoord1sARB"/>
+                <command name="glMultiTexCoord1svARB"/>
+                <command name="glMultiTexCoord2dARB"/>
+                <command name="glMultiTexCoord2dvARB"/>
+                <command name="glMultiTexCoord2fARB"/>
+                <command name="glMultiTexCoord2fvARB"/>
+                <command name="glMultiTexCoord2iARB"/>
+                <command name="glMultiTexCoord2ivARB"/>
+                <command name="glMultiTexCoord2sARB"/>
+                <command name="glMultiTexCoord2svARB"/>
+                <command name="glMultiTexCoord3dARB"/>
+                <command name="glMultiTexCoord3dvARB"/>
+                <command name="glMultiTexCoord3fARB"/>
+                <command name="glMultiTexCoord3fvARB"/>
+                <command name="glMultiTexCoord3iARB"/>
+                <command name="glMultiTexCoord3ivARB"/>
+                <command name="glMultiTexCoord3sARB"/>
+                <command name="glMultiTexCoord3svARB"/>
+                <command name="glMultiTexCoord4dARB"/>
+                <command name="glMultiTexCoord4dvARB"/>
+                <command name="glMultiTexCoord4fARB"/>
+                <command name="glMultiTexCoord4fvARB"/>
+                <command name="glMultiTexCoord4iARB"/>
+                <command name="glMultiTexCoord4ivARB"/>
+                <command name="glMultiTexCoord4sARB"/>
+                <command name="glMultiTexCoord4svARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_occlusion_query" supported="gl">
+            <require>
+                <enum name="GL_QUERY_COUNTER_BITS_ARB"/>
+                <enum name="GL_CURRENT_QUERY_ARB"/>
+                <enum name="GL_QUERY_RESULT_ARB"/>
+                <enum name="GL_QUERY_RESULT_AVAILABLE_ARB"/>
+                <enum name="GL_SAMPLES_PASSED_ARB"/>
+                <command name="glGenQueriesARB"/>
+                <command name="glDeleteQueriesARB"/>
+                <command name="glIsQueryARB"/>
+                <command name="glBeginQueryARB"/>
+                <command name="glEndQueryARB"/>
+                <command name="glGetQueryivARB"/>
+                <command name="glGetQueryObjectivARB"/>
+                <command name="glGetQueryObjectuivARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_occlusion_query2" supported="gl|glcore">
+            <require>
+                <enum name="GL_ANY_SAMPLES_PASSED"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_parallel_shader_compile" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAX_SHADER_COMPILER_THREADS_ARB"/>
+                <enum name="GL_COMPLETION_STATUS_ARB"/>
+                <command name="glMaxShaderCompilerThreadsARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_pipeline_statistics_query" supported="gl|glcore">
+            <require>
+                <enum name="GL_VERTICES_SUBMITTED_ARB"/>
+                <enum name="GL_PRIMITIVES_SUBMITTED_ARB"/>
+                <enum name="GL_VERTEX_SHADER_INVOCATIONS_ARB"/>
+                <enum name="GL_TESS_CONTROL_SHADER_PATCHES_ARB"/>
+                <enum name="GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB"/>
+                <enum name="GL_GEOMETRY_SHADER_INVOCATIONS"/>
+                <enum name="GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB"/>
+                <enum name="GL_FRAGMENT_SHADER_INVOCATIONS_ARB"/>
+                <enum name="GL_COMPUTE_SHADER_INVOCATIONS_ARB"/>
+                <enum name="GL_CLIPPING_INPUT_PRIMITIVES_ARB"/>
+                <enum name="GL_CLIPPING_OUTPUT_PRIMITIVES_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_pixel_buffer_object" supported="gl|glcore">
+            <require>
+                <enum name="GL_PIXEL_PACK_BUFFER_ARB"/>
+                <enum name="GL_PIXEL_UNPACK_BUFFER_ARB"/>
+                <enum name="GL_PIXEL_PACK_BUFFER_BINDING_ARB"/>
+                <enum name="GL_PIXEL_UNPACK_BUFFER_BINDING_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_point_parameters" supported="gl">
+            <require>
+                <enum name="GL_POINT_SIZE_MIN_ARB"/>
+                <enum name="GL_POINT_SIZE_MAX_ARB"/>
+                <enum name="GL_POINT_FADE_THRESHOLD_SIZE_ARB"/>
+                <enum name="GL_POINT_DISTANCE_ATTENUATION_ARB"/>
+                <command name="glPointParameterfARB"/>
+                <command name="glPointParameterfvARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_point_sprite" supported="gl">
+            <require>
+                <enum name="GL_POINT_SPRITE_ARB"/>
+                <enum name="GL_COORD_REPLACE_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_polygon_offset_clamp" supported="gl|glcore">
+            <require>
+                <enum name="GL_POLYGON_OFFSET_CLAMP"/>
+                <command name="glPolygonOffsetClamp"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_post_depth_coverage" supported="gl|glcore"/>
+        <extension name="GL_ARB_program_interface_query" supported="gl|glcore">
+            <require>
+                <enum name="GL_UNIFORM"/>
+                <enum name="GL_UNIFORM_BLOCK"/>
+                <enum name="GL_PROGRAM_INPUT"/>
+                <enum name="GL_PROGRAM_OUTPUT"/>
+                <enum name="GL_BUFFER_VARIABLE"/>
+                <enum name="GL_SHADER_STORAGE_BLOCK"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER"/>
+                <enum name="GL_VERTEX_SUBROUTINE"/>
+                <enum name="GL_TESS_CONTROL_SUBROUTINE"/>
+                <enum name="GL_TESS_EVALUATION_SUBROUTINE"/>
+                <enum name="GL_GEOMETRY_SUBROUTINE"/>
+                <enum name="GL_FRAGMENT_SUBROUTINE"/>
+                <enum name="GL_COMPUTE_SUBROUTINE"/>
+                <enum name="GL_VERTEX_SUBROUTINE_UNIFORM"/>
+                <enum name="GL_TESS_CONTROL_SUBROUTINE_UNIFORM"/>
+                <enum name="GL_TESS_EVALUATION_SUBROUTINE_UNIFORM"/>
+                <enum name="GL_GEOMETRY_SUBROUTINE_UNIFORM"/>
+                <enum name="GL_FRAGMENT_SUBROUTINE_UNIFORM"/>
+                <enum name="GL_COMPUTE_SUBROUTINE_UNIFORM"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_VARYING"/>
+                <enum name="GL_ACTIVE_RESOURCES"/>
+                <enum name="GL_MAX_NAME_LENGTH"/>
+                <enum name="GL_MAX_NUM_ACTIVE_VARIABLES"/>
+                <enum name="GL_MAX_NUM_COMPATIBLE_SUBROUTINES"/>
+                <enum name="GL_NAME_LENGTH"/>
+                <enum name="GL_TYPE"/>
+                <enum name="GL_ARRAY_SIZE"/>
+                <enum name="GL_OFFSET"/>
+                <enum name="GL_BLOCK_INDEX"/>
+                <enum name="GL_ARRAY_STRIDE"/>
+                <enum name="GL_MATRIX_STRIDE"/>
+                <enum name="GL_IS_ROW_MAJOR"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_INDEX"/>
+                <enum name="GL_BUFFER_BINDING"/>
+                <enum name="GL_BUFFER_DATA_SIZE"/>
+                <enum name="GL_NUM_ACTIVE_VARIABLES"/>
+                <enum name="GL_ACTIVE_VARIABLES"/>
+                <enum name="GL_REFERENCED_BY_VERTEX_SHADER"/>
+                <enum name="GL_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+                <enum name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+                <enum name="GL_REFERENCED_BY_GEOMETRY_SHADER"/>
+                <enum name="GL_REFERENCED_BY_FRAGMENT_SHADER"/>
+                <enum name="GL_REFERENCED_BY_COMPUTE_SHADER"/>
+                <enum name="GL_TOP_LEVEL_ARRAY_SIZE"/>
+                <enum name="GL_TOP_LEVEL_ARRAY_STRIDE"/>
+                <enum name="GL_LOCATION"/>
+                <enum name="GL_LOCATION_INDEX"/>
+                <enum name="GL_IS_PER_PATCH"/>
+                <enum name="GL_NUM_COMPATIBLE_SUBROUTINES"/>
+                <enum name="GL_COMPATIBLE_SUBROUTINES"/>
+                <command name="glGetProgramInterfaceiv"/>
+                <command name="glGetProgramResourceIndex"/>
+                <command name="glGetProgramResourceName"/>
+                <command name="glGetProgramResourceiv"/>
+                <command name="glGetProgramResourceLocation"/>
+                <command name="glGetProgramResourceLocationIndex"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_provoking_vertex" supported="gl|glcore">
+            <require>
+                <enum name="GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION"/>
+                <enum name="GL_FIRST_VERTEX_CONVENTION"/>
+                <enum name="GL_LAST_VERTEX_CONVENTION"/>
+                <enum name="GL_PROVOKING_VERTEX"/>
+                <command name="glProvokingVertex"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_query_buffer_object" supported="gl|glcore">
+            <require>
+                <enum name="GL_QUERY_BUFFER"/>
+                <enum name="GL_QUERY_BUFFER_BARRIER_BIT"/>
+                <enum name="GL_QUERY_BUFFER_BINDING"/>
+                <enum name="GL_QUERY_RESULT_NO_WAIT"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_robust_buffer_access_behavior" supported="gl|glcore"/>
+        <extension name="GL_ARB_robustness" supported="gl|glcore">
+            <require>
+                <enum name="GL_NO_ERROR"/>
+                <enum name="GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB"/>
+                <enum name="GL_LOSE_CONTEXT_ON_RESET_ARB"/>
+                <enum name="GL_GUILTY_CONTEXT_RESET_ARB"/>
+                <enum name="GL_INNOCENT_CONTEXT_RESET_ARB"/>
+                <enum name="GL_UNKNOWN_CONTEXT_RESET_ARB"/>
+                <enum name="GL_RESET_NOTIFICATION_STRATEGY_ARB"/>
+                <enum name="GL_NO_RESET_NOTIFICATION_ARB"/>
+                <command name="glGetGraphicsResetStatusARB"/>
+                <command name="glGetnTexImageARB"/>
+                <command name="glReadnPixelsARB"/>
+                <command name="glGetnCompressedTexImageARB"/>
+                <command name="glGetnUniformfvARB"/>
+                <command name="glGetnUniformivARB"/>
+                <command name="glGetnUniformuivARB"/>
+                <command name="glGetnUniformdvARB"/>
+            </require>
+            <require api="gl" profile="compatibility">
+                <command name="glGetnMapdvARB"/>
+                <command name="glGetnMapfvARB"/>
+                <command name="glGetnMapivARB"/>
+                <command name="glGetnPixelMapfvARB"/>
+                <command name="glGetnPixelMapuivARB"/>
+                <command name="glGetnPixelMapusvARB"/>
+                <command name="glGetnPolygonStippleARB"/>
+                <command name="glGetnColorTableARB"/>
+                <command name="glGetnConvolutionFilterARB"/>
+                <command name="glGetnSeparableFilterARB"/>
+                <command name="glGetnHistogramARB"/>
+                <command name="glGetnMinmaxARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_robustness_isolation" supported="gl|glcore"/>
+        <extension name="GL_ARB_sample_locations" supported="gl|glcore">
+            <require>
+                <enum name="GL_SAMPLE_LOCATION_SUBPIXEL_BITS_ARB"/>
+                <enum name="GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_ARB"/>
+                <enum name="GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_ARB"/>
+                <enum name="GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_ARB"/>
+                <enum name="GL_SAMPLE_LOCATION_ARB"/>
+                <enum name="GL_PROGRAMMABLE_SAMPLE_LOCATION_ARB"/>
+                <enum name="GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_ARB"/>
+                <enum name="GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_ARB"/>
+                <command name="glFramebufferSampleLocationsfvARB"/>
+                <command name="glNamedFramebufferSampleLocationsfvARB"/>
+                <command name="glEvaluateDepthValuesARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_sample_shading" supported="gl|glcore">
+            <require>
+                <enum name="GL_SAMPLE_SHADING_ARB"/>
+                <enum name="GL_MIN_SAMPLE_SHADING_VALUE_ARB"/>
+                <command name="glMinSampleShadingARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_sampler_objects" supported="gl|glcore">
+            <require>
+                <enum name="GL_SAMPLER_BINDING"/>
+                <command name="glGenSamplers"/>
+                <command name="glDeleteSamplers"/>
+                <command name="glIsSampler"/>
+                <command name="glBindSampler"/>
+                <command name="glSamplerParameteri"/>
+                <command name="glSamplerParameteriv"/>
+                <command name="glSamplerParameterf"/>
+                <command name="glSamplerParameterfv"/>
+                <command name="glSamplerParameterIiv"/>
+                <command name="glSamplerParameterIuiv"/>
+                <command name="glGetSamplerParameteriv"/>
+                <command name="glGetSamplerParameterIiv"/>
+                <command name="glGetSamplerParameterfv"/>
+                <command name="glGetSamplerParameterIuiv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_seamless_cube_map" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_CUBE_MAP_SEAMLESS"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_seamless_cubemap_per_texture" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_CUBE_MAP_SEAMLESS"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_separate_shader_objects" supported="gl|glcore">
+            <require>
+                <enum name="GL_VERTEX_SHADER_BIT"/>
+                <enum name="GL_FRAGMENT_SHADER_BIT"/>
+                <enum name="GL_GEOMETRY_SHADER_BIT"/>
+                <enum name="GL_TESS_CONTROL_SHADER_BIT"/>
+                <enum name="GL_TESS_EVALUATION_SHADER_BIT"/>
+                <enum name="GL_ALL_SHADER_BITS"/>
+                <enum name="GL_PROGRAM_SEPARABLE"/>
+                <enum name="GL_ACTIVE_PROGRAM"/>
+                <enum name="GL_PROGRAM_PIPELINE_BINDING"/>
+                <command name="glUseProgramStages"/>
+                <command name="glActiveShaderProgram"/>
+                <command name="glCreateShaderProgramv"/>
+                <command name="glBindProgramPipeline"/>
+                <command name="glDeleteProgramPipelines"/>
+                <command name="glGenProgramPipelines"/>
+                <command name="glIsProgramPipeline"/>
+                <command name="glGetProgramPipelineiv"/>
+                <command name="glProgramParameteri"/>
+                <command name="glProgramUniform1i"/>
+                <command name="glProgramUniform1iv"/>
+                <command name="glProgramUniform1f"/>
+                <command name="glProgramUniform1fv"/>
+                <command name="glProgramUniform1d"/>
+                <command name="glProgramUniform1dv"/>
+                <command name="glProgramUniform1ui"/>
+                <command name="glProgramUniform1uiv"/>
+                <command name="glProgramUniform2i"/>
+                <command name="glProgramUniform2iv"/>
+                <command name="glProgramUniform2f"/>
+                <command name="glProgramUniform2fv"/>
+                <command name="glProgramUniform2d"/>
+                <command name="glProgramUniform2dv"/>
+                <command name="glProgramUniform2ui"/>
+                <command name="glProgramUniform2uiv"/>
+                <command name="glProgramUniform3i"/>
+                <command name="glProgramUniform3iv"/>
+                <command name="glProgramUniform3f"/>
+                <command name="glProgramUniform3fv"/>
+                <command name="glProgramUniform3d"/>
+                <command name="glProgramUniform3dv"/>
+                <command name="glProgramUniform3ui"/>
+                <command name="glProgramUniform3uiv"/>
+                <command name="glProgramUniform4i"/>
+                <command name="glProgramUniform4iv"/>
+                <command name="glProgramUniform4f"/>
+                <command name="glProgramUniform4fv"/>
+                <command name="glProgramUniform4d"/>
+                <command name="glProgramUniform4dv"/>
+                <command name="glProgramUniform4ui"/>
+                <command name="glProgramUniform4uiv"/>
+                <command name="glProgramUniformMatrix2fv"/>
+                <command name="glProgramUniformMatrix3fv"/>
+                <command name="glProgramUniformMatrix4fv"/>
+                <command name="glProgramUniformMatrix2dv"/>
+                <command name="glProgramUniformMatrix3dv"/>
+                <command name="glProgramUniformMatrix4dv"/>
+                <command name="glProgramUniformMatrix2x3fv"/>
+                <command name="glProgramUniformMatrix3x2fv"/>
+                <command name="glProgramUniformMatrix2x4fv"/>
+                <command name="glProgramUniformMatrix4x2fv"/>
+                <command name="glProgramUniformMatrix3x4fv"/>
+                <command name="glProgramUniformMatrix4x3fv"/>
+                <command name="glProgramUniformMatrix2x3dv"/>
+                <command name="glProgramUniformMatrix3x2dv"/>
+                <command name="glProgramUniformMatrix2x4dv"/>
+                <command name="glProgramUniformMatrix4x2dv"/>
+                <command name="glProgramUniformMatrix3x4dv"/>
+                <command name="glProgramUniformMatrix4x3dv"/>
+                <command name="glValidateProgramPipeline"/>
+                <command name="glGetProgramPipelineInfoLog"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shader_atomic_counter_ops" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_atomic_counters" supported="gl|glcore">
+            <require>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_BINDING"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_START"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_SIZE"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER"/>
+                <enum name="GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_MAX_VERTEX_ATOMIC_COUNTERS"/>
+                <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS"/>
+                <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS"/>
+                <enum name="GL_MAX_FRAGMENT_ATOMIC_COUNTERS"/>
+                <enum name="GL_MAX_COMBINED_ATOMIC_COUNTERS"/>
+                <enum name="GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE"/>
+                <enum name="GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS"/>
+                <enum name="GL_ACTIVE_ATOMIC_COUNTER_BUFFERS"/>
+                <enum name="GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX"/>
+                <enum name="GL_UNSIGNED_INT_ATOMIC_COUNTER"/>
+                <command name="glGetActiveAtomicCounterBufferiv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shader_ballot" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_bit_encoding" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_clock" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_draw_parameters" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_group_vote" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_image_load_store" supported="gl|glcore">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT"/>
+                <enum name="GL_ELEMENT_ARRAY_BARRIER_BIT"/>
+                <enum name="GL_UNIFORM_BARRIER_BIT"/>
+                <enum name="GL_TEXTURE_FETCH_BARRIER_BIT"/>
+                <enum name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT"/>
+                <enum name="GL_COMMAND_BARRIER_BIT"/>
+                <enum name="GL_PIXEL_BUFFER_BARRIER_BIT"/>
+                <enum name="GL_TEXTURE_UPDATE_BARRIER_BIT"/>
+                <enum name="GL_BUFFER_UPDATE_BARRIER_BIT"/>
+                <enum name="GL_FRAMEBUFFER_BARRIER_BIT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT"/>
+                <enum name="GL_ATOMIC_COUNTER_BARRIER_BIT"/>
+                <enum name="GL_ALL_BARRIER_BITS"/>
+                <enum name="GL_MAX_IMAGE_UNITS"/>
+                <enum name="GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS"/>
+                <enum name="GL_IMAGE_BINDING_NAME"/>
+                <enum name="GL_IMAGE_BINDING_LEVEL"/>
+                <enum name="GL_IMAGE_BINDING_LAYERED"/>
+                <enum name="GL_IMAGE_BINDING_LAYER"/>
+                <enum name="GL_IMAGE_BINDING_ACCESS"/>
+                <enum name="GL_IMAGE_1D"/>
+                <enum name="GL_IMAGE_2D"/>
+                <enum name="GL_IMAGE_3D"/>
+                <enum name="GL_IMAGE_2D_RECT"/>
+                <enum name="GL_IMAGE_CUBE"/>
+                <enum name="GL_IMAGE_BUFFER"/>
+                <enum name="GL_IMAGE_1D_ARRAY"/>
+                <enum name="GL_IMAGE_2D_ARRAY"/>
+                <enum name="GL_IMAGE_CUBE_MAP_ARRAY"/>
+                <enum name="GL_IMAGE_2D_MULTISAMPLE"/>
+                <enum name="GL_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_INT_IMAGE_1D"/>
+                <enum name="GL_INT_IMAGE_2D"/>
+                <enum name="GL_INT_IMAGE_3D"/>
+                <enum name="GL_INT_IMAGE_2D_RECT"/>
+                <enum name="GL_INT_IMAGE_CUBE"/>
+                <enum name="GL_INT_IMAGE_BUFFER"/>
+                <enum name="GL_INT_IMAGE_1D_ARRAY"/>
+                <enum name="GL_INT_IMAGE_2D_ARRAY"/>
+                <enum name="GL_INT_IMAGE_CUBE_MAP_ARRAY"/>
+                <enum name="GL_INT_IMAGE_2D_MULTISAMPLE"/>
+                <enum name="GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_1D"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_3D"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_RECT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_CUBE"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_BUFFER"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_1D_ARRAY"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_ARRAY"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_MAX_IMAGE_SAMPLES"/>
+                <enum name="GL_IMAGE_BINDING_FORMAT"/>
+                <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_TYPE"/>
+                <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE"/>
+                <enum name="GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS"/>
+                <enum name="GL_MAX_VERTEX_IMAGE_UNIFORMS"/>
+                <enum name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS"/>
+                <enum name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS"/>
+                <enum name="GL_MAX_FRAGMENT_IMAGE_UNIFORMS"/>
+                <enum name="GL_MAX_COMBINED_IMAGE_UNIFORMS"/>
+                <command name="glBindImageTexture"/>
+                <command name="glMemoryBarrier"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shader_image_size" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_objects" supported="gl">
+            <require>
+                <enum name="GL_PROGRAM_OBJECT_ARB"/>
+                <enum name="GL_SHADER_OBJECT_ARB"/>
+                <enum name="GL_OBJECT_TYPE_ARB"/>
+                <enum name="GL_OBJECT_SUBTYPE_ARB"/>
+                <enum name="GL_FLOAT_VEC2_ARB"/>
+                <enum name="GL_FLOAT_VEC3_ARB"/>
+                <enum name="GL_FLOAT_VEC4_ARB"/>
+                <enum name="GL_INT_VEC2_ARB"/>
+                <enum name="GL_INT_VEC3_ARB"/>
+                <enum name="GL_INT_VEC4_ARB"/>
+                <enum name="GL_BOOL_ARB"/>
+                <enum name="GL_BOOL_VEC2_ARB"/>
+                <enum name="GL_BOOL_VEC3_ARB"/>
+                <enum name="GL_BOOL_VEC4_ARB"/>
+                <enum name="GL_FLOAT_MAT2_ARB"/>
+                <enum name="GL_FLOAT_MAT3_ARB"/>
+                <enum name="GL_FLOAT_MAT4_ARB"/>
+                <enum name="GL_SAMPLER_1D_ARB"/>
+                <enum name="GL_SAMPLER_2D_ARB"/>
+                <enum name="GL_SAMPLER_3D_ARB"/>
+                <enum name="GL_SAMPLER_CUBE_ARB"/>
+                <enum name="GL_SAMPLER_1D_SHADOW_ARB"/>
+                <enum name="GL_SAMPLER_2D_SHADOW_ARB"/>
+                <enum name="GL_SAMPLER_2D_RECT_ARB"/>
+                <enum name="GL_SAMPLER_2D_RECT_SHADOW_ARB"/>
+                <enum name="GL_OBJECT_DELETE_STATUS_ARB"/>
+                <enum name="GL_OBJECT_COMPILE_STATUS_ARB"/>
+                <enum name="GL_OBJECT_LINK_STATUS_ARB"/>
+                <enum name="GL_OBJECT_VALIDATE_STATUS_ARB"/>
+                <enum name="GL_OBJECT_INFO_LOG_LENGTH_ARB"/>
+                <enum name="GL_OBJECT_ATTACHED_OBJECTS_ARB"/>
+                <enum name="GL_OBJECT_ACTIVE_UNIFORMS_ARB"/>
+                <enum name="GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB"/>
+                <enum name="GL_OBJECT_SHADER_SOURCE_LENGTH_ARB"/>
+                <command name="glDeleteObjectARB"/>
+                <command name="glGetHandleARB"/>
+                <command name="glDetachObjectARB"/>
+                <command name="glCreateShaderObjectARB"/>
+                <command name="glShaderSourceARB"/>
+                <command name="glCompileShaderARB"/>
+                <command name="glCreateProgramObjectARB"/>
+                <command name="glAttachObjectARB"/>
+                <command name="glLinkProgramARB"/>
+                <command name="glUseProgramObjectARB"/>
+                <command name="glValidateProgramARB"/>
+                <command name="glUniform1fARB"/>
+                <command name="glUniform2fARB"/>
+                <command name="glUniform3fARB"/>
+                <command name="glUniform4fARB"/>
+                <command name="glUniform1iARB"/>
+                <command name="glUniform2iARB"/>
+                <command name="glUniform3iARB"/>
+                <command name="glUniform4iARB"/>
+                <command name="glUniform1fvARB"/>
+                <command name="glUniform2fvARB"/>
+                <command name="glUniform3fvARB"/>
+                <command name="glUniform4fvARB"/>
+                <command name="glUniform1ivARB"/>
+                <command name="glUniform2ivARB"/>
+                <command name="glUniform3ivARB"/>
+                <command name="glUniform4ivARB"/>
+                <command name="glUniformMatrix2fvARB"/>
+                <command name="glUniformMatrix3fvARB"/>
+                <command name="glUniformMatrix4fvARB"/>
+                <command name="glGetObjectParameterfvARB"/>
+                <command name="glGetObjectParameterivARB"/>
+                <command name="glGetInfoLogARB"/>
+                <command name="glGetAttachedObjectsARB"/>
+                <command name="glGetUniformLocationARB"/>
+                <command name="glGetActiveUniformARB"/>
+                <command name="glGetUniformfvARB"/>
+                <command name="glGetUniformivARB"/>
+                <command name="glGetShaderSourceARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shader_precision" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_stencil_export" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_storage_buffer_object" supported="gl|glcore">
+            <require>
+                <enum name="GL_SHADER_STORAGE_BUFFER"/>
+                <enum name="GL_SHADER_STORAGE_BUFFER_BINDING"/>
+                <enum name="GL_SHADER_STORAGE_BUFFER_START"/>
+                <enum name="GL_SHADER_STORAGE_BUFFER_SIZE"/>
+                <enum name="GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS"/>
+                <enum name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS"/>
+                <enum name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS"/>
+                <enum name="GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS"/>
+                <enum name="GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS"/>
+                <enum name="GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS"/>
+                <enum name="GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS"/>
+                <enum name="GL_MAX_SHADER_STORAGE_BLOCK_SIZE"/>
+                <enum name="GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT"/>
+                <enum name="GL_SHADER_STORAGE_BARRIER_BIT"/>
+                <enum name="GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES"/>
+                <enum name="GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS"/>
+                <command name="glShaderStorageBlockBinding"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shader_subroutine" supported="gl|glcore">
+            <require>
+                <enum name="GL_ACTIVE_SUBROUTINES"/>
+                <enum name="GL_ACTIVE_SUBROUTINE_UNIFORMS"/>
+                <enum name="GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS"/>
+                <enum name="GL_ACTIVE_SUBROUTINE_MAX_LENGTH"/>
+                <enum name="GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH"/>
+                <enum name="GL_MAX_SUBROUTINES"/>
+                <enum name="GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS"/>
+                <enum name="GL_NUM_COMPATIBLE_SUBROUTINES"/>
+                <enum name="GL_COMPATIBLE_SUBROUTINES"/>
+                <enum name="GL_UNIFORM_SIZE"/>
+                <enum name="GL_UNIFORM_NAME_LENGTH"/>
+                <command name="glGetSubroutineUniformLocation"/>
+                <command name="glGetSubroutineIndex"/>
+                <command name="glGetActiveSubroutineUniformiv"/>
+                <command name="glGetActiveSubroutineUniformName"/>
+                <command name="glGetActiveSubroutineName"/>
+                <command name="glUniformSubroutinesuiv"/>
+                <command name="glGetUniformSubroutineuiv"/>
+                <command name="glGetProgramStageiv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shader_texture_image_samples" supported="gl|glcore"/>
+        <extension name="GL_ARB_shader_texture_lod" supported="gl"/>
+        <extension name="GL_ARB_shader_viewport_layer_array" supported="gl|glcore"/>
+        <extension name="GL_ARB_shading_language_100" supported="gl">
+            <require>
+                <enum name="GL_SHADING_LANGUAGE_VERSION_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shading_language_420pack" supported="gl|glcore"/>
+        <extension name="GL_ARB_shading_language_include" supported="gl|glcore">
+            <require>
+                <enum name="GL_SHADER_INCLUDE_ARB"/>
+                <enum name="GL_NAMED_STRING_LENGTH_ARB"/>
+                <enum name="GL_NAMED_STRING_TYPE_ARB"/>
+                <command name="glNamedStringARB"/>
+                <command name="glDeleteNamedStringARB"/>
+                <command name="glCompileShaderIncludeARB"/>
+                <command name="glIsNamedStringARB"/>
+                <command name="glGetNamedStringARB"/>
+                <command name="glGetNamedStringivARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shading_language_packing" supported="gl|glcore"/>
+        <extension name="GL_ARB_shadow" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_COMPARE_MODE_ARB"/>
+                <enum name="GL_TEXTURE_COMPARE_FUNC_ARB"/>
+                <enum name="GL_COMPARE_R_TO_TEXTURE_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_shadow_ambient" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_COMPARE_FAIL_VALUE_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_sparse_buffer" supported="gl|glcore">
+            <require>
+                <enum name="GL_SPARSE_STORAGE_BIT_ARB"/>
+                <enum name="GL_SPARSE_BUFFER_PAGE_SIZE_ARB"/>
+                <command name="glBufferPageCommitmentARB"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glNamedBufferPageCommitmentEXT"/>
+            </require>
+            <require comment="Supported only if GL_ARB_direct_state_access or GL 4.5 is supported">
+                <command name="glNamedBufferPageCommitmentARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_sparse_texture" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_SPARSE_ARB"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_INDEX_ARB"/>
+                <enum name="GL_NUM_SPARSE_LEVELS_ARB"/>
+                <enum name="GL_NUM_VIRTUAL_PAGE_SIZES_ARB"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_X_ARB"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_Y_ARB"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_Z_ARB"/>
+                <enum name="GL_MAX_SPARSE_TEXTURE_SIZE_ARB"/>
+                <enum name="GL_MAX_SPARSE_3D_TEXTURE_SIZE_ARB"/>
+                <enum name="GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_ARB"/>
+                <enum name="GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_ARB"/>
+                <command name="glTexPageCommitmentARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_sparse_texture2" supported="gl|glcore"/>
+        <extension name="GL_ARB_sparse_texture_clamp" supported="gl|glcore"/>
+        <extension name="GL_ARB_spirv_extensions" supported="gl|glcore">
+            <require>
+                <enum name="GL_SPIR_V_EXTENSIONS"/>
+                <enum name="GL_NUM_SPIR_V_EXTENSIONS"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_stencil_texturing" supported="gl|glcore">
+            <require>
+                <enum name="GL_DEPTH_STENCIL_TEXTURE_MODE"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_sync" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAX_SERVER_WAIT_TIMEOUT"/>
+                <enum name="GL_OBJECT_TYPE"/>
+                <enum name="GL_SYNC_CONDITION"/>
+                <enum name="GL_SYNC_STATUS"/>
+                <enum name="GL_SYNC_FLAGS"/>
+                <enum name="GL_SYNC_FENCE"/>
+                <enum name="GL_SYNC_GPU_COMMANDS_COMPLETE"/>
+                <enum name="GL_UNSIGNALED"/>
+                <enum name="GL_SIGNALED"/>
+                <enum name="GL_ALREADY_SIGNALED"/>
+                <enum name="GL_TIMEOUT_EXPIRED"/>
+                <enum name="GL_CONDITION_SATISFIED"/>
+                <enum name="GL_WAIT_FAILED"/>
+                <enum name="GL_SYNC_FLUSH_COMMANDS_BIT"/>
+                <enum name="GL_TIMEOUT_IGNORED"/>
+                <command name="glFenceSync"/>
+                <command name="glIsSync"/>
+                <command name="glDeleteSync"/>
+                <command name="glClientWaitSync"/>
+                <command name="glWaitSync"/>
+                <command name="glGetInteger64v"/>
+                <command name="glGetSynciv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_tessellation_shader" supported="gl|glcore">
+            <require>
+                <enum name="GL_PATCHES"/>
+                <enum name="GL_PATCH_VERTICES"/>
+                <enum name="GL_PATCH_DEFAULT_INNER_LEVEL"/>
+                <enum name="GL_PATCH_DEFAULT_OUTER_LEVEL"/>
+                <enum name="GL_TESS_CONTROL_OUTPUT_VERTICES"/>
+                <enum name="GL_TESS_GEN_MODE"/>
+                <enum name="GL_TESS_GEN_SPACING"/>
+                <enum name="GL_TESS_GEN_VERTEX_ORDER"/>
+                <enum name="GL_TESS_GEN_POINT_MODE"/>
+                <enum name="GL_TRIANGLES"/>
+                <enum name="GL_ISOLINES"/>
+                <enum name="GL_QUADS"/>
+                <enum name="GL_EQUAL"/>
+                <enum name="GL_FRACTIONAL_ODD"/>
+                <enum name="GL_FRACTIONAL_EVEN"/>
+                <enum name="GL_CCW"/>
+                <enum name="GL_CW"/>
+                <enum name="GL_MAX_PATCH_VERTICES"/>
+                <enum name="GL_MAX_TESS_GEN_LEVEL"/>
+                <enum name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+                <enum name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS"/>
+                <enum name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS"/>
+                <enum name="GL_MAX_TESS_PATCH_COMPONENTS"/>
+                <enum name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS"/>
+                <enum name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS"/>
+                <enum name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS"/>
+                <enum name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS"/>
+                <enum name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS"/>
+                <enum name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER"/>
+                <enum name="GL_TESS_EVALUATION_SHADER"/>
+                <enum name="GL_TESS_CONTROL_SHADER"/>
+                <command name="glPatchParameteri"/>
+                <command name="glPatchParameterfv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_barrier" supported="gl|glcore">
+            <require>
+                <command name="glTextureBarrier"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_border_clamp" supported="gl|glcore">
+            <require>
+                <enum name="GL_CLAMP_TO_BORDER_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_buffer_object" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_BUFFER_ARB"/>
+                <enum name="GL_MAX_TEXTURE_BUFFER_SIZE_ARB"/>
+                <enum name="GL_TEXTURE_BINDING_BUFFER_ARB"/>
+                <enum name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING_ARB"/>
+                <enum name="GL_TEXTURE_BUFFER_FORMAT_ARB"/>
+                <command name="glTexBufferARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_buffer_object_rgb32" supported="gl|glcore">
+            <require>
+                <enum name="GL_RGB32F"/>
+                <enum name="GL_RGB32UI"/>
+                <enum name="GL_RGB32I"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_buffer_range" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_BUFFER_OFFSET"/>
+                <enum name="GL_TEXTURE_BUFFER_SIZE"/>
+                <enum name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT"/>
+                <command name="glTexBufferRange"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_compression" supported="gl">
+            <require>
+                <enum name="GL_COMPRESSED_ALPHA_ARB"/>
+                <enum name="GL_COMPRESSED_LUMINANCE_ARB"/>
+                <enum name="GL_COMPRESSED_LUMINANCE_ALPHA_ARB"/>
+                <enum name="GL_COMPRESSED_INTENSITY_ARB"/>
+                <enum name="GL_COMPRESSED_RGB_ARB"/>
+                <enum name="GL_COMPRESSED_RGBA_ARB"/>
+                <enum name="GL_TEXTURE_COMPRESSION_HINT_ARB"/>
+                <enum name="GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB"/>
+                <enum name="GL_TEXTURE_COMPRESSED_ARB"/>
+                <enum name="GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB"/>
+                <enum name="GL_COMPRESSED_TEXTURE_FORMATS_ARB"/>
+                <command name="glCompressedTexImage3DARB"/>
+                <command name="glCompressedTexImage2DARB"/>
+                <command name="glCompressedTexImage1DARB"/>
+                <command name="glCompressedTexSubImage3DARB"/>
+                <command name="glCompressedTexSubImage2DARB"/>
+                <command name="glCompressedTexSubImage1DARB"/>
+                <command name="glGetCompressedTexImageARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_compression_bptc" supported="gl|glcore">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_BPTC_UNORM_ARB"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB"/>
+                <enum name="GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB"/>
+                <enum name="GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_compression_rgtc" supported="gl|glcore">
+            <require>
+                <enum name="GL_COMPRESSED_RED_RGTC1"/>
+                <enum name="GL_COMPRESSED_SIGNED_RED_RGTC1"/>
+                <enum name="GL_COMPRESSED_RG_RGTC2"/>
+                <enum name="GL_COMPRESSED_SIGNED_RG_RGTC2"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_cube_map" supported="gl">
+            <require>
+                <enum name="GL_NORMAL_MAP_ARB"/>
+                <enum name="GL_REFLECTION_MAP_ARB"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_ARB"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARB"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB"/>
+                <enum name="GL_PROXY_TEXTURE_CUBE_MAP_ARB"/>
+                <enum name="GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_cube_map_array" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_CUBE_MAP_ARRAY_ARB"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_ARB"/>
+                <enum name="GL_PROXY_TEXTURE_CUBE_MAP_ARRAY_ARB"/>
+                <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_ARB"/>
+                <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB"/>
+                <enum name="GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_env_add" supported="gl"/>
+        <extension name="GL_ARB_texture_env_combine" supported="gl">
+            <require>
+                <enum name="GL_COMBINE_ARB"/>
+                <enum name="GL_COMBINE_RGB_ARB"/>
+                <enum name="GL_COMBINE_ALPHA_ARB"/>
+                <enum name="GL_SOURCE0_RGB_ARB"/>
+                <enum name="GL_SOURCE1_RGB_ARB"/>
+                <enum name="GL_SOURCE2_RGB_ARB"/>
+                <enum name="GL_SOURCE0_ALPHA_ARB"/>
+                <enum name="GL_SOURCE1_ALPHA_ARB"/>
+                <enum name="GL_SOURCE2_ALPHA_ARB"/>
+                <enum name="GL_OPERAND0_RGB_ARB"/>
+                <enum name="GL_OPERAND1_RGB_ARB"/>
+                <enum name="GL_OPERAND2_RGB_ARB"/>
+                <enum name="GL_OPERAND0_ALPHA_ARB"/>
+                <enum name="GL_OPERAND1_ALPHA_ARB"/>
+                <enum name="GL_OPERAND2_ALPHA_ARB"/>
+                <enum name="GL_RGB_SCALE_ARB"/>
+                <enum name="GL_ADD_SIGNED_ARB"/>
+                <enum name="GL_INTERPOLATE_ARB"/>
+                <enum name="GL_SUBTRACT_ARB"/>
+                <enum name="GL_CONSTANT_ARB"/>
+                <enum name="GL_PRIMARY_COLOR_ARB"/>
+                <enum name="GL_PREVIOUS_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_env_crossbar" supported="gl"/>
+        <extension name="GL_ARB_texture_env_dot3" supported="gl">
+            <require>
+                <enum name="GL_DOT3_RGB_ARB"/>
+                <enum name="GL_DOT3_RGBA_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_filter_anisotropic" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_MAX_ANISOTROPY"/>
+                <enum name="GL_MAX_TEXTURE_MAX_ANISOTROPY"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_filter_minmax" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_REDUCTION_MODE_ARB"/>
+                <enum name="GL_WEIGHTED_AVERAGE_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_float" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_RED_TYPE_ARB"/>
+                <enum name="GL_TEXTURE_GREEN_TYPE_ARB"/>
+                <enum name="GL_TEXTURE_BLUE_TYPE_ARB"/>
+                <enum name="GL_TEXTURE_ALPHA_TYPE_ARB"/>
+                <enum name="GL_TEXTURE_LUMINANCE_TYPE_ARB"/>
+                <enum name="GL_TEXTURE_INTENSITY_TYPE_ARB"/>
+                <enum name="GL_TEXTURE_DEPTH_TYPE_ARB"/>
+                <enum name="GL_UNSIGNED_NORMALIZED_ARB"/>
+                <enum name="GL_RGBA32F_ARB"/>
+                <enum name="GL_RGB32F_ARB"/>
+                <enum name="GL_ALPHA32F_ARB"/>
+                <enum name="GL_INTENSITY32F_ARB"/>
+                <enum name="GL_LUMINANCE32F_ARB"/>
+                <enum name="GL_LUMINANCE_ALPHA32F_ARB"/>
+                <enum name="GL_RGBA16F_ARB"/>
+                <enum name="GL_RGB16F_ARB"/>
+                <enum name="GL_ALPHA16F_ARB"/>
+                <enum name="GL_INTENSITY16F_ARB"/>
+                <enum name="GL_LUMINANCE16F_ARB"/>
+                <enum name="GL_LUMINANCE_ALPHA16F_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_gather" supported="gl|glcore">
+            <require>
+                <enum name="GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_ARB"/>
+                <enum name="GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_ARB"/>
+                <enum name="GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_mirror_clamp_to_edge" supported="gl|glcore">
+            <require>
+                <enum name="GL_MIRROR_CLAMP_TO_EDGE"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_mirrored_repeat" supported="gl|glcore">
+            <require>
+                <enum name="GL_MIRRORED_REPEAT_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_multisample" supported="gl|glcore">
+            <require>
+                <enum name="GL_SAMPLE_POSITION"/>
+                <enum name="GL_SAMPLE_MASK"/>
+                <enum name="GL_SAMPLE_MASK_VALUE"/>
+                <enum name="GL_MAX_SAMPLE_MASK_WORDS"/>
+                <enum name="GL_TEXTURE_2D_MULTISAMPLE"/>
+                <enum name="GL_PROXY_TEXTURE_2D_MULTISAMPLE"/>
+                <enum name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE"/>
+                <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_TEXTURE_SAMPLES"/>
+                <enum name="GL_TEXTURE_FIXED_SAMPLE_LOCATIONS"/>
+                <enum name="GL_SAMPLER_2D_MULTISAMPLE"/>
+                <enum name="GL_INT_SAMPLER_2D_MULTISAMPLE"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE"/>
+                <enum name="GL_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_MAX_COLOR_TEXTURE_SAMPLES"/>
+                <enum name="GL_MAX_DEPTH_TEXTURE_SAMPLES"/>
+                <enum name="GL_MAX_INTEGER_SAMPLES"/>
+                <command name="glTexImage2DMultisample"/>
+                <command name="glTexImage3DMultisample"/>
+                <command name="glGetMultisamplefv"/>
+                <command name="glSampleMaski"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_non_power_of_two" supported="gl|glcore"/>
+        <extension name="GL_ARB_texture_query_levels" supported="gl|glcore"/>
+        <extension name="GL_ARB_texture_query_lod" supported="gl|glcore"/>
+        <extension name="GL_ARB_texture_rectangle" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_RECTANGLE_ARB"/>
+                <enum name="GL_TEXTURE_BINDING_RECTANGLE_ARB"/>
+                <enum name="GL_PROXY_TEXTURE_RECTANGLE_ARB"/>
+                <enum name="GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_rg" supported="gl|glcore">
+            <require>
+                <enum name="GL_RG"/>
+                <enum name="GL_RG_INTEGER"/>
+                <enum name="GL_R8"/>
+                <enum name="GL_R16"/>
+                <enum name="GL_RG8"/>
+                <enum name="GL_RG16"/>
+                <enum name="GL_R16F"/>
+                <enum name="GL_R32F"/>
+                <enum name="GL_RG16F"/>
+                <enum name="GL_RG32F"/>
+                <enum name="GL_R8I"/>
+                <enum name="GL_R8UI"/>
+                <enum name="GL_R16I"/>
+                <enum name="GL_R16UI"/>
+                <enum name="GL_R32I"/>
+                <enum name="GL_R32UI"/>
+                <enum name="GL_RG8I"/>
+                <enum name="GL_RG8UI"/>
+                <enum name="GL_RG16I"/>
+                <enum name="GL_RG16UI"/>
+                <enum name="GL_RG32I"/>
+                <enum name="GL_RG32UI"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_rgb10_a2ui" supported="gl|glcore">
+            <require>
+                <enum name="GL_RGB10_A2UI"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_stencil8" supported="gl|glcore">
+            <require>
+                <enum name="GL_STENCIL_INDEX"/>
+                <enum name="GL_STENCIL_INDEX8"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_storage" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_IMMUTABLE_FORMAT"/>
+                <command name="glTexStorage1D"/>
+                <command name="glTexStorage2D"/>
+                <command name="glTexStorage3D"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_storage_multisample" supported="gl|glcore">
+            <require>
+                <command name="glTexStorage2DMultisample"/>
+                <command name="glTexStorage3DMultisample"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_swizzle" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_SWIZZLE_R"/>
+                <enum name="GL_TEXTURE_SWIZZLE_G"/>
+                <enum name="GL_TEXTURE_SWIZZLE_B"/>
+                <enum name="GL_TEXTURE_SWIZZLE_A"/>
+                <enum name="GL_TEXTURE_SWIZZLE_RGBA"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_texture_view" supported="gl|glcore">
+            <require>
+                <enum name="GL_TEXTURE_VIEW_MIN_LEVEL"/>
+                <enum name="GL_TEXTURE_VIEW_NUM_LEVELS"/>
+                <enum name="GL_TEXTURE_VIEW_MIN_LAYER"/>
+                <enum name="GL_TEXTURE_VIEW_NUM_LAYERS"/>
+                <enum name="GL_TEXTURE_IMMUTABLE_LEVELS"/>
+                <command name="glTextureView"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_timer_query" supported="gl|glcore">
+            <require>
+                <enum name="GL_TIME_ELAPSED"/>
+                <enum name="GL_TIMESTAMP"/>
+                <command name="glQueryCounter"/>
+                <command name="glGetQueryObjecti64v"/>
+                <command name="glGetQueryObjectui64v"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_transform_feedback2" supported="gl|glcore">
+            <require>
+                <enum name="GL_TRANSFORM_FEEDBACK"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BINDING"/>
+                <command name="glBindTransformFeedback"/>
+                <command name="glDeleteTransformFeedbacks"/>
+                <command name="glGenTransformFeedbacks"/>
+                <command name="glIsTransformFeedback"/>
+                <command name="glPauseTransformFeedback"/>
+                <command name="glResumeTransformFeedback"/>
+                <command name="glDrawTransformFeedback"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_transform_feedback3" supported="gl|glcore">
+            <require>
+                <enum name="GL_MAX_TRANSFORM_FEEDBACK_BUFFERS"/>
+                <enum name="GL_MAX_VERTEX_STREAMS"/>
+                <command name="glDrawTransformFeedbackStream"/>
+                <command name="glBeginQueryIndexed"/>
+                <command name="glEndQueryIndexed"/>
+                <command name="glGetQueryIndexediv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_transform_feedback_instanced" supported="gl|glcore">
+            <require>
+                <command name="glDrawTransformFeedbackInstanced"/>
+                <command name="glDrawTransformFeedbackStreamInstanced"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_transform_feedback_overflow_query" supported="gl|glcore">
+            <require>
+                <enum name="GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_transpose_matrix" supported="gl">
+            <require>
+                <enum name="GL_TRANSPOSE_MODELVIEW_MATRIX_ARB"/>
+                <enum name="GL_TRANSPOSE_PROJECTION_MATRIX_ARB"/>
+                <enum name="GL_TRANSPOSE_TEXTURE_MATRIX_ARB"/>
+                <enum name="GL_TRANSPOSE_COLOR_MATRIX_ARB"/>
+                <command name="glLoadTransposeMatrixfARB"/>
+                <command name="glLoadTransposeMatrixdARB"/>
+                <command name="glMultTransposeMatrixfARB"/>
+                <command name="glMultTransposeMatrixdARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_uniform_buffer_object" supported="gl|glcore">
+            <require>
+                <enum name="GL_UNIFORM_BUFFER"/>
+                <enum name="GL_UNIFORM_BUFFER_BINDING"/>
+                <enum name="GL_UNIFORM_BUFFER_START"/>
+                <enum name="GL_UNIFORM_BUFFER_SIZE"/>
+                <enum name="GL_MAX_VERTEX_UNIFORM_BLOCKS"/>
+                <enum name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS"/>
+                <enum name="GL_MAX_FRAGMENT_UNIFORM_BLOCKS"/>
+                <enum name="GL_MAX_COMBINED_UNIFORM_BLOCKS"/>
+                <enum name="GL_MAX_UNIFORM_BUFFER_BINDINGS"/>
+                <enum name="GL_MAX_UNIFORM_BLOCK_SIZE"/>
+                <enum name="GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS"/>
+                <enum name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS"/>
+                <enum name="GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS"/>
+                <enum name="GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT"/>
+                <enum name="GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH"/>
+                <enum name="GL_ACTIVE_UNIFORM_BLOCKS"/>
+                <enum name="GL_UNIFORM_TYPE"/>
+                <enum name="GL_UNIFORM_SIZE"/>
+                <enum name="GL_UNIFORM_NAME_LENGTH"/>
+                <enum name="GL_UNIFORM_BLOCK_INDEX"/>
+                <enum name="GL_UNIFORM_OFFSET"/>
+                <enum name="GL_UNIFORM_ARRAY_STRIDE"/>
+                <enum name="GL_UNIFORM_MATRIX_STRIDE"/>
+                <enum name="GL_UNIFORM_IS_ROW_MAJOR"/>
+                <enum name="GL_UNIFORM_BLOCK_BINDING"/>
+                <enum name="GL_UNIFORM_BLOCK_DATA_SIZE"/>
+                <enum name="GL_UNIFORM_BLOCK_NAME_LENGTH"/>
+                <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS"/>
+                <enum name="GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER"/>
+                <enum name="GL_INVALID_INDEX"/>
+                <command name="glGetUniformIndices"/>
+                <command name="glGetActiveUniformsiv"/>
+                <command name="glGetActiveUniformName"/>
+                <command name="glGetUniformBlockIndex"/>
+                <command name="glGetActiveUniformBlockiv"/>
+                <command name="glGetActiveUniformBlockName"/>
+                <command name="glUniformBlockBinding"/>
+                <command name="glBindBufferRange"/>
+                <command name="glBindBufferBase"/>
+                <command name="glGetIntegeri_v"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_array_bgra" supported="gl|glcore">
+            <require>
+                <enum name="GL_BGRA"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_array_object" supported="gl|glcore">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_BINDING"/>
+                <command name="glBindVertexArray"/>
+                <command name="glDeleteVertexArrays"/>
+                <command name="glGenVertexArrays"/>
+                <command name="glIsVertexArray"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_attrib_64bit" supported="gl|glcore">
+            <require>
+                <enum name="GL_RGB32I"/>
+                <enum name="GL_DOUBLE_VEC2"/>
+                <enum name="GL_DOUBLE_VEC3"/>
+                <enum name="GL_DOUBLE_VEC4"/>
+                <enum name="GL_DOUBLE_MAT2"/>
+                <enum name="GL_DOUBLE_MAT3"/>
+                <enum name="GL_DOUBLE_MAT4"/>
+                <enum name="GL_DOUBLE_MAT2x3"/>
+                <enum name="GL_DOUBLE_MAT2x4"/>
+                <enum name="GL_DOUBLE_MAT3x2"/>
+                <enum name="GL_DOUBLE_MAT3x4"/>
+                <enum name="GL_DOUBLE_MAT4x2"/>
+                <enum name="GL_DOUBLE_MAT4x3"/>
+                <command name="glVertexAttribL1d"/>
+                <command name="glVertexAttribL2d"/>
+                <command name="glVertexAttribL3d"/>
+                <command name="glVertexAttribL4d"/>
+                <command name="glVertexAttribL1dv"/>
+                <command name="glVertexAttribL2dv"/>
+                <command name="glVertexAttribL3dv"/>
+                <command name="glVertexAttribL4dv"/>
+                <command name="glVertexAttribLPointer"/>
+                <command name="glGetVertexAttribLdv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_attrib_binding" supported="gl|glcore">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_BINDING"/>
+                <enum name="GL_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+                <enum name="GL_VERTEX_BINDING_DIVISOR"/>
+                <enum name="GL_VERTEX_BINDING_OFFSET"/>
+                <enum name="GL_VERTEX_BINDING_STRIDE"/>
+                <enum name="GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET"/>
+                <enum name="GL_MAX_VERTEX_ATTRIB_BINDINGS"/>
+                <command name="glBindVertexBuffer"/>
+                <command name="glVertexAttribFormat"/>
+                <command name="glVertexAttribIFormat"/>
+                <command name="glVertexAttribLFormat"/>
+                <command name="glVertexAttribBinding"/>
+                <command name="glVertexBindingDivisor"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_blend" supported="gl">
+            <require>
+                <enum name="GL_MAX_VERTEX_UNITS_ARB"/>
+                <enum name="GL_ACTIVE_VERTEX_UNITS_ARB"/>
+                <enum name="GL_WEIGHT_SUM_UNITY_ARB"/>
+                <enum name="GL_VERTEX_BLEND_ARB"/>
+                <enum name="GL_CURRENT_WEIGHT_ARB"/>
+                <enum name="GL_WEIGHT_ARRAY_TYPE_ARB"/>
+                <enum name="GL_WEIGHT_ARRAY_STRIDE_ARB"/>
+                <enum name="GL_WEIGHT_ARRAY_SIZE_ARB"/>
+                <enum name="GL_WEIGHT_ARRAY_POINTER_ARB"/>
+                <enum name="GL_WEIGHT_ARRAY_ARB"/>
+                <enum name="GL_MODELVIEW0_ARB"/>
+                <enum name="GL_MODELVIEW1_ARB"/>
+                <enum name="GL_MODELVIEW2_ARB"/>
+                <enum name="GL_MODELVIEW3_ARB"/>
+                <enum name="GL_MODELVIEW4_ARB"/>
+                <enum name="GL_MODELVIEW5_ARB"/>
+                <enum name="GL_MODELVIEW6_ARB"/>
+                <enum name="GL_MODELVIEW7_ARB"/>
+                <enum name="GL_MODELVIEW8_ARB"/>
+                <enum name="GL_MODELVIEW9_ARB"/>
+                <enum name="GL_MODELVIEW10_ARB"/>
+                <enum name="GL_MODELVIEW11_ARB"/>
+                <enum name="GL_MODELVIEW12_ARB"/>
+                <enum name="GL_MODELVIEW13_ARB"/>
+                <enum name="GL_MODELVIEW14_ARB"/>
+                <enum name="GL_MODELVIEW15_ARB"/>
+                <enum name="GL_MODELVIEW16_ARB"/>
+                <enum name="GL_MODELVIEW17_ARB"/>
+                <enum name="GL_MODELVIEW18_ARB"/>
+                <enum name="GL_MODELVIEW19_ARB"/>
+                <enum name="GL_MODELVIEW20_ARB"/>
+                <enum name="GL_MODELVIEW21_ARB"/>
+                <enum name="GL_MODELVIEW22_ARB"/>
+                <enum name="GL_MODELVIEW23_ARB"/>
+                <enum name="GL_MODELVIEW24_ARB"/>
+                <enum name="GL_MODELVIEW25_ARB"/>
+                <enum name="GL_MODELVIEW26_ARB"/>
+                <enum name="GL_MODELVIEW27_ARB"/>
+                <enum name="GL_MODELVIEW28_ARB"/>
+                <enum name="GL_MODELVIEW29_ARB"/>
+                <enum name="GL_MODELVIEW30_ARB"/>
+                <enum name="GL_MODELVIEW31_ARB"/>
+                <command name="glWeightbvARB"/>
+                <command name="glWeightsvARB"/>
+                <command name="glWeightivARB"/>
+                <command name="glWeightfvARB"/>
+                <command name="glWeightdvARB"/>
+                <command name="glWeightubvARB"/>
+                <command name="glWeightusvARB"/>
+                <command name="glWeightuivARB"/>
+                <command name="glWeightPointerARB"/>
+                <command name="glVertexBlendARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_buffer_object" supported="gl">
+            <require>
+                <enum name="GL_BUFFER_SIZE_ARB"/>
+                <enum name="GL_BUFFER_USAGE_ARB"/>
+                <enum name="GL_ARRAY_BUFFER_ARB"/>
+                <enum name="GL_ELEMENT_ARRAY_BUFFER_ARB"/>
+                <enum name="GL_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_VERTEX_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_NORMAL_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_COLOR_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_INDEX_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB"/>
+                <enum name="GL_READ_ONLY_ARB"/>
+                <enum name="GL_WRITE_ONLY_ARB"/>
+                <enum name="GL_READ_WRITE_ARB"/>
+                <enum name="GL_BUFFER_ACCESS_ARB"/>
+                <enum name="GL_BUFFER_MAPPED_ARB"/>
+                <enum name="GL_BUFFER_MAP_POINTER_ARB"/>
+                <enum name="GL_STREAM_DRAW_ARB"/>
+                <enum name="GL_STREAM_READ_ARB"/>
+                <enum name="GL_STREAM_COPY_ARB"/>
+                <enum name="GL_STATIC_DRAW_ARB"/>
+                <enum name="GL_STATIC_READ_ARB"/>
+                <enum name="GL_STATIC_COPY_ARB"/>
+                <enum name="GL_DYNAMIC_DRAW_ARB"/>
+                <enum name="GL_DYNAMIC_READ_ARB"/>
+                <enum name="GL_DYNAMIC_COPY_ARB"/>
+                <command name="glBindBufferARB"/>
+                <command name="glDeleteBuffersARB"/>
+                <command name="glGenBuffersARB"/>
+                <command name="glIsBufferARB"/>
+                <command name="glBufferDataARB"/>
+                <command name="glBufferSubDataARB"/>
+                <command name="glGetBufferSubDataARB"/>
+                <command name="glMapBufferARB"/>
+                <command name="glUnmapBufferARB"/>
+                <command name="glGetBufferParameterivARB"/>
+                <command name="glGetBufferPointervARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_program" supported="gl">
+            <require>
+                <enum name="GL_COLOR_SUM_ARB"/>
+                <enum name="GL_VERTEX_PROGRAM_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB"/>
+                <enum name="GL_CURRENT_VERTEX_ATTRIB_ARB"/>
+                <enum name="GL_PROGRAM_LENGTH_ARB"/>
+                <enum name="GL_PROGRAM_STRING_ARB"/>
+                <enum name="GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB"/>
+                <enum name="GL_MAX_PROGRAM_MATRICES_ARB"/>
+                <enum name="GL_CURRENT_MATRIX_STACK_DEPTH_ARB"/>
+                <enum name="GL_CURRENT_MATRIX_ARB"/>
+                <enum name="GL_VERTEX_PROGRAM_POINT_SIZE_ARB"/>
+                <enum name="GL_VERTEX_PROGRAM_TWO_SIDE_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB"/>
+                <enum name="GL_PROGRAM_ERROR_POSITION_ARB"/>
+                <enum name="GL_PROGRAM_BINDING_ARB"/>
+                <enum name="GL_MAX_VERTEX_ATTRIBS_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB"/>
+                <enum name="GL_PROGRAM_ERROR_STRING_ARB"/>
+                <enum name="GL_PROGRAM_FORMAT_ASCII_ARB"/>
+                <enum name="GL_PROGRAM_FORMAT_ARB"/>
+                <enum name="GL_PROGRAM_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB"/>
+                <enum name="GL_PROGRAM_TEMPORARIES_ARB"/>
+                <enum name="GL_MAX_PROGRAM_TEMPORARIES_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_TEMPORARIES_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB"/>
+                <enum name="GL_PROGRAM_PARAMETERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_PARAMETERS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_PARAMETERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB"/>
+                <enum name="GL_PROGRAM_ATTRIBS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_ATTRIBS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_ATTRIBS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB"/>
+                <enum name="GL_PROGRAM_ADDRESS_REGISTERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB"/>
+                <enum name="GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB"/>
+                <enum name="GL_MAX_PROGRAM_ENV_PARAMETERS_ARB"/>
+                <enum name="GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB"/>
+                <enum name="GL_TRANSPOSE_CURRENT_MATRIX_ARB"/>
+                <enum name="GL_MATRIX0_ARB"/>
+                <enum name="GL_MATRIX1_ARB"/>
+                <enum name="GL_MATRIX2_ARB"/>
+                <enum name="GL_MATRIX3_ARB"/>
+                <enum name="GL_MATRIX4_ARB"/>
+                <enum name="GL_MATRIX5_ARB"/>
+                <enum name="GL_MATRIX6_ARB"/>
+                <enum name="GL_MATRIX7_ARB"/>
+                <enum name="GL_MATRIX8_ARB"/>
+                <enum name="GL_MATRIX9_ARB"/>
+                <enum name="GL_MATRIX10_ARB"/>
+                <enum name="GL_MATRIX11_ARB"/>
+                <enum name="GL_MATRIX12_ARB"/>
+                <enum name="GL_MATRIX13_ARB"/>
+                <enum name="GL_MATRIX14_ARB"/>
+                <enum name="GL_MATRIX15_ARB"/>
+                <enum name="GL_MATRIX16_ARB"/>
+                <enum name="GL_MATRIX17_ARB"/>
+                <enum name="GL_MATRIX18_ARB"/>
+                <enum name="GL_MATRIX19_ARB"/>
+                <enum name="GL_MATRIX20_ARB"/>
+                <enum name="GL_MATRIX21_ARB"/>
+                <enum name="GL_MATRIX22_ARB"/>
+                <enum name="GL_MATRIX23_ARB"/>
+                <enum name="GL_MATRIX24_ARB"/>
+                <enum name="GL_MATRIX25_ARB"/>
+                <enum name="GL_MATRIX26_ARB"/>
+                <enum name="GL_MATRIX27_ARB"/>
+                <enum name="GL_MATRIX28_ARB"/>
+                <enum name="GL_MATRIX29_ARB"/>
+                <enum name="GL_MATRIX30_ARB"/>
+                <enum name="GL_MATRIX31_ARB"/>
+                <command name="glVertexAttrib1dARB"/>
+                <command name="glVertexAttrib1dvARB"/>
+                <command name="glVertexAttrib1fARB"/>
+                <command name="glVertexAttrib1fvARB"/>
+                <command name="glVertexAttrib1sARB"/>
+                <command name="glVertexAttrib1svARB"/>
+                <command name="glVertexAttrib2dARB"/>
+                <command name="glVertexAttrib2dvARB"/>
+                <command name="glVertexAttrib2fARB"/>
+                <command name="glVertexAttrib2fvARB"/>
+                <command name="glVertexAttrib2sARB"/>
+                <command name="glVertexAttrib2svARB"/>
+                <command name="glVertexAttrib3dARB"/>
+                <command name="glVertexAttrib3dvARB"/>
+                <command name="glVertexAttrib3fARB"/>
+                <command name="glVertexAttrib3fvARB"/>
+                <command name="glVertexAttrib3sARB"/>
+                <command name="glVertexAttrib3svARB"/>
+                <command name="glVertexAttrib4NbvARB"/>
+                <command name="glVertexAttrib4NivARB"/>
+                <command name="glVertexAttrib4NsvARB"/>
+                <command name="glVertexAttrib4NubARB"/>
+                <command name="glVertexAttrib4NubvARB"/>
+                <command name="glVertexAttrib4NuivARB"/>
+                <command name="glVertexAttrib4NusvARB"/>
+                <command name="glVertexAttrib4bvARB"/>
+                <command name="glVertexAttrib4dARB"/>
+                <command name="glVertexAttrib4dvARB"/>
+                <command name="glVertexAttrib4fARB"/>
+                <command name="glVertexAttrib4fvARB"/>
+                <command name="glVertexAttrib4ivARB"/>
+                <command name="glVertexAttrib4sARB"/>
+                <command name="glVertexAttrib4svARB"/>
+                <command name="glVertexAttrib4ubvARB"/>
+                <command name="glVertexAttrib4uivARB"/>
+                <command name="glVertexAttrib4usvARB"/>
+                <command name="glVertexAttribPointerARB"/>
+                <command name="glEnableVertexAttribArrayARB"/>
+                <command name="glDisableVertexAttribArrayARB"/>
+                <command name="glProgramStringARB"/>
+                <command name="glBindProgramARB"/>
+                <command name="glDeleteProgramsARB"/>
+                <command name="glGenProgramsARB"/>
+                <command name="glProgramEnvParameter4dARB"/>
+                <command name="glProgramEnvParameter4dvARB"/>
+                <command name="glProgramEnvParameter4fARB"/>
+                <command name="glProgramEnvParameter4fvARB"/>
+                <command name="glProgramLocalParameter4dARB"/>
+                <command name="glProgramLocalParameter4dvARB"/>
+                <command name="glProgramLocalParameter4fARB"/>
+                <command name="glProgramLocalParameter4fvARB"/>
+                <command name="glGetProgramEnvParameterdvARB"/>
+                <command name="glGetProgramEnvParameterfvARB"/>
+                <command name="glGetProgramLocalParameterdvARB"/>
+                <command name="glGetProgramLocalParameterfvARB"/>
+                <command name="glGetProgramivARB"/>
+                <command name="glGetProgramStringARB"/>
+                <command name="glGetVertexAttribdvARB"/>
+                <command name="glGetVertexAttribfvARB"/>
+                <command name="glGetVertexAttribivARB"/>
+                <command name="glGetVertexAttribPointervARB"/>
+                <command name="glIsProgramARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_shader" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_SHADER_ARB"/>
+                <enum name="GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB"/>
+                <enum name="GL_MAX_VARYING_FLOATS_ARB"/>
+                <enum name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB"/>
+                <enum name="GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB"/>
+                <enum name="GL_OBJECT_ACTIVE_ATTRIBUTES_ARB"/>
+                <enum name="GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB"/>
+                <enum name="GL_MAX_VERTEX_ATTRIBS_ARB"/>
+                <enum name="GL_MAX_TEXTURE_IMAGE_UNITS_ARB"/>
+                <enum name="GL_MAX_TEXTURE_COORDS_ARB"/>
+                <enum name="GL_VERTEX_PROGRAM_POINT_SIZE_ARB"/>
+                <enum name="GL_VERTEX_PROGRAM_TWO_SIDE_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB"/>
+                <enum name="GL_CURRENT_VERTEX_ATTRIB_ARB"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB"/>
+                <enum name="GL_FLOAT"/>
+                <enum name="GL_FLOAT_VEC2_ARB"/>
+                <enum name="GL_FLOAT_VEC3_ARB"/>
+                <enum name="GL_FLOAT_VEC4_ARB"/>
+                <enum name="GL_FLOAT_MAT2_ARB"/>
+                <enum name="GL_FLOAT_MAT3_ARB"/>
+                <enum name="GL_FLOAT_MAT4_ARB"/>
+                <command name="glVertexAttrib1fARB"/>
+                <command name="glVertexAttrib1sARB"/>
+                <command name="glVertexAttrib1dARB"/>
+                <command name="glVertexAttrib2fARB"/>
+                <command name="glVertexAttrib2sARB"/>
+                <command name="glVertexAttrib2dARB"/>
+                <command name="glVertexAttrib3fARB"/>
+                <command name="glVertexAttrib3sARB"/>
+                <command name="glVertexAttrib3dARB"/>
+                <command name="glVertexAttrib4fARB"/>
+                <command name="glVertexAttrib4sARB"/>
+                <command name="glVertexAttrib4dARB"/>
+                <command name="glVertexAttrib4NubARB"/>
+                <command name="glVertexAttrib1fvARB"/>
+                <command name="glVertexAttrib1svARB"/>
+                <command name="glVertexAttrib1dvARB"/>
+                <command name="glVertexAttrib2fvARB"/>
+                <command name="glVertexAttrib2svARB"/>
+                <command name="glVertexAttrib2dvARB"/>
+                <command name="glVertexAttrib3fvARB"/>
+                <command name="glVertexAttrib3svARB"/>
+                <command name="glVertexAttrib3dvARB"/>
+                <command name="glVertexAttrib4fvARB"/>
+                <command name="glVertexAttrib4svARB"/>
+                <command name="glVertexAttrib4dvARB"/>
+                <command name="glVertexAttrib4ivARB"/>
+                <command name="glVertexAttrib4bvARB"/>
+                <command name="glVertexAttrib4ubvARB"/>
+                <command name="glVertexAttrib4usvARB"/>
+                <command name="glVertexAttrib4uivARB"/>
+                <command name="glVertexAttrib4NbvARB"/>
+                <command name="glVertexAttrib4NsvARB"/>
+                <command name="glVertexAttrib4NivARB"/>
+                <command name="glVertexAttrib4NubvARB"/>
+                <command name="glVertexAttrib4NusvARB"/>
+                <command name="glVertexAttrib4NuivARB"/>
+                <command name="glVertexAttribPointerARB"/>
+                <command name="glEnableVertexAttribArrayARB"/>
+                <command name="glDisableVertexAttribArrayARB"/>
+                <command name="glBindAttribLocationARB"/>
+                <command name="glGetActiveAttribARB"/>
+                <command name="glGetAttribLocationARB"/>
+                <command name="glGetVertexAttribdvARB"/>
+                <command name="glGetVertexAttribfvARB"/>
+                <command name="glGetVertexAttribivARB"/>
+                <command name="glGetVertexAttribPointervARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_type_10f_11f_11f_rev" supported="gl|glcore">
+            <require>
+                <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_vertex_type_2_10_10_10_rev" supported="gl|glcore">
+            <require>
+                <enum name="GL_UNSIGNED_INT_2_10_10_10_REV"/>
+                <enum name="GL_INT_2_10_10_10_REV"/>
+                <command name="glVertexAttribP1ui"/>
+                <command name="glVertexAttribP1uiv"/>
+                <command name="glVertexAttribP2ui"/>
+                <command name="glVertexAttribP2uiv"/>
+                <command name="glVertexAttribP3ui"/>
+                <command name="glVertexAttribP3uiv"/>
+                <command name="glVertexAttribP4ui"/>
+                <command name="glVertexAttribP4uiv"/>
+            </require>
+            <require api="gl" profile="compatibility">
+                <command name="glVertexP2ui"/>
+                <command name="glVertexP2uiv"/>
+                <command name="glVertexP3ui"/>
+                <command name="glVertexP3uiv"/>
+                <command name="glVertexP4ui"/>
+                <command name="glVertexP4uiv"/>
+                <command name="glTexCoordP1ui"/>
+                <command name="glTexCoordP1uiv"/>
+                <command name="glTexCoordP2ui"/>
+                <command name="glTexCoordP2uiv"/>
+                <command name="glTexCoordP3ui"/>
+                <command name="glTexCoordP3uiv"/>
+                <command name="glTexCoordP4ui"/>
+                <command name="glTexCoordP4uiv"/>
+                <command name="glMultiTexCoordP1ui"/>
+                <command name="glMultiTexCoordP1uiv"/>
+                <command name="glMultiTexCoordP2ui"/>
+                <command name="glMultiTexCoordP2uiv"/>
+                <command name="glMultiTexCoordP3ui"/>
+                <command name="glMultiTexCoordP3uiv"/>
+                <command name="glMultiTexCoordP4ui"/>
+                <command name="glMultiTexCoordP4uiv"/>
+                <command name="glNormalP3ui"/>
+                <command name="glNormalP3uiv"/>
+                <command name="glColorP3ui"/>
+                <command name="glColorP3uiv"/>
+                <command name="glColorP4ui"/>
+                <command name="glColorP4uiv"/>
+                <command name="glSecondaryColorP3ui"/>
+                <command name="glSecondaryColorP3uiv"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_viewport_array" supported="gl|glcore">
+            <require>
+                <enum name="GL_SCISSOR_BOX"/>
+                <enum name="GL_VIEWPORT"/>
+                <enum name="GL_DEPTH_RANGE"/>
+                <enum name="GL_SCISSOR_TEST"/>
+                <enum name="GL_MAX_VIEWPORTS"/>
+                <enum name="GL_VIEWPORT_SUBPIXEL_BITS"/>
+                <enum name="GL_VIEWPORT_BOUNDS_RANGE"/>
+                <enum name="GL_LAYER_PROVOKING_VERTEX"/>
+                <enum name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX"/>
+                <enum name="GL_UNDEFINED_VERTEX"/>
+                <enum name="GL_FIRST_VERTEX_CONVENTION"/>
+                <enum name="GL_LAST_VERTEX_CONVENTION"/>
+                <enum name="GL_PROVOKING_VERTEX"/>
+                <command name="glViewportArrayv"/>
+                <command name="glViewportIndexedf"/>
+                <command name="glViewportIndexedfv"/>
+                <command name="glScissorArrayv"/>
+                <command name="glScissorIndexed"/>
+                <command name="glScissorIndexedv"/>
+                <command name="glDepthRangeArrayv"/>
+                <command name="glDepthRangeIndexed"/>
+                <command name="glGetFloati_v"/>
+                <command name="glGetDoublei_v"/>
+            </require>
+        </extension>
+        <extension name="GL_ARB_window_pos" supported="gl">
+            <require>
+                <command name="glWindowPos2dARB"/>
+                <command name="glWindowPos2dvARB"/>
+                <command name="glWindowPos2fARB"/>
+                <command name="glWindowPos2fvARB"/>
+                <command name="glWindowPos2iARB"/>
+                <command name="glWindowPos2ivARB"/>
+                <command name="glWindowPos2sARB"/>
+                <command name="glWindowPos2svARB"/>
+                <command name="glWindowPos3dARB"/>
+                <command name="glWindowPos3dvARB"/>
+                <command name="glWindowPos3fARB"/>
+                <command name="glWindowPos3fvARB"/>
+                <command name="glWindowPos3iARB"/>
+                <command name="glWindowPos3ivARB"/>
+                <command name="glWindowPos3sARB"/>
+                <command name="glWindowPos3svARB"/>
+            </require>
+        </extension>
+        <extension name="GL_ARM_mali_program_binary" supported="gles2">
+            <require>
+                <enum name="GL_MALI_PROGRAM_BINARY_ARM"/>
+            </require>
+        </extension>
+        <extension name="GL_ARM_mali_shader_binary" supported="gles2">
+            <require>
+                <enum name="GL_MALI_SHADER_BINARY_ARM"/>
+            </require>
+        </extension>
+        <extension name="GL_ARM_rgba8" supported="gles1|gles2"/>
+        <extension name="GL_ARM_shader_framebuffer_fetch" supported="gles2">
+            <require>
+                <enum name="GL_FETCH_PER_SAMPLE_ARM"/>
+                <enum name="GL_FRAGMENT_SHADER_FRAMEBUFFER_FETCH_MRT_ARM"/>
+            </require>
+        </extension>
+        <extension name="GL_ARM_shader_framebuffer_fetch_depth_stencil" supported="gles2"/>
+        <extension name="GL_ATI_draw_buffers" supported="gl">
+            <require>
+                <enum name="GL_MAX_DRAW_BUFFERS_ATI"/>
+                <enum name="GL_DRAW_BUFFER0_ATI"/>
+                <enum name="GL_DRAW_BUFFER1_ATI"/>
+                <enum name="GL_DRAW_BUFFER2_ATI"/>
+                <enum name="GL_DRAW_BUFFER3_ATI"/>
+                <enum name="GL_DRAW_BUFFER4_ATI"/>
+                <enum name="GL_DRAW_BUFFER5_ATI"/>
+                <enum name="GL_DRAW_BUFFER6_ATI"/>
+                <enum name="GL_DRAW_BUFFER7_ATI"/>
+                <enum name="GL_DRAW_BUFFER8_ATI"/>
+                <enum name="GL_DRAW_BUFFER9_ATI"/>
+                <enum name="GL_DRAW_BUFFER10_ATI"/>
+                <enum name="GL_DRAW_BUFFER11_ATI"/>
+                <enum name="GL_DRAW_BUFFER12_ATI"/>
+                <enum name="GL_DRAW_BUFFER13_ATI"/>
+                <enum name="GL_DRAW_BUFFER14_ATI"/>
+                <enum name="GL_DRAW_BUFFER15_ATI"/>
+                <command name="glDrawBuffersATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_element_array" supported="gl">
+            <require>
+                <enum name="GL_ELEMENT_ARRAY_ATI"/>
+                <enum name="GL_ELEMENT_ARRAY_TYPE_ATI"/>
+                <enum name="GL_ELEMENT_ARRAY_POINTER_ATI"/>
+                <command name="glElementPointerATI"/>
+                <command name="glDrawElementArrayATI"/>
+                <command name="glDrawRangeElementArrayATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_envmap_bumpmap" supported="gl">
+            <require>
+                <enum name="GL_BUMP_ROT_MATRIX_ATI"/>
+                <enum name="GL_BUMP_ROT_MATRIX_SIZE_ATI"/>
+                <enum name="GL_BUMP_NUM_TEX_UNITS_ATI"/>
+                <enum name="GL_BUMP_TEX_UNITS_ATI"/>
+                <enum name="GL_DUDV_ATI"/>
+                <enum name="GL_DU8DV8_ATI"/>
+                <enum name="GL_BUMP_ENVMAP_ATI"/>
+                <enum name="GL_BUMP_TARGET_ATI"/>
+                <command name="glTexBumpParameterivATI"/>
+                <command name="glTexBumpParameterfvATI"/>
+                <command name="glGetTexBumpParameterivATI"/>
+                <command name="glGetTexBumpParameterfvATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_fragment_shader" supported="gl">
+            <require>
+                <enum name="GL_FRAGMENT_SHADER_ATI"/>
+                <enum name="GL_REG_0_ATI"/>
+                <enum name="GL_REG_1_ATI"/>
+                <enum name="GL_REG_2_ATI"/>
+                <enum name="GL_REG_3_ATI"/>
+                <enum name="GL_REG_4_ATI"/>
+                <enum name="GL_REG_5_ATI"/>
+                <enum name="GL_REG_6_ATI"/>
+                <enum name="GL_REG_7_ATI"/>
+                <enum name="GL_REG_8_ATI"/>
+                <enum name="GL_REG_9_ATI"/>
+                <enum name="GL_REG_10_ATI"/>
+                <enum name="GL_REG_11_ATI"/>
+                <enum name="GL_REG_12_ATI"/>
+                <enum name="GL_REG_13_ATI"/>
+                <enum name="GL_REG_14_ATI"/>
+                <enum name="GL_REG_15_ATI"/>
+                <enum name="GL_REG_16_ATI"/>
+                <enum name="GL_REG_17_ATI"/>
+                <enum name="GL_REG_18_ATI"/>
+                <enum name="GL_REG_19_ATI"/>
+                <enum name="GL_REG_20_ATI"/>
+                <enum name="GL_REG_21_ATI"/>
+                <enum name="GL_REG_22_ATI"/>
+                <enum name="GL_REG_23_ATI"/>
+                <enum name="GL_REG_24_ATI"/>
+                <enum name="GL_REG_25_ATI"/>
+                <enum name="GL_REG_26_ATI"/>
+                <enum name="GL_REG_27_ATI"/>
+                <enum name="GL_REG_28_ATI"/>
+                <enum name="GL_REG_29_ATI"/>
+                <enum name="GL_REG_30_ATI"/>
+                <enum name="GL_REG_31_ATI"/>
+                <enum name="GL_CON_0_ATI"/>
+                <enum name="GL_CON_1_ATI"/>
+                <enum name="GL_CON_2_ATI"/>
+                <enum name="GL_CON_3_ATI"/>
+                <enum name="GL_CON_4_ATI"/>
+                <enum name="GL_CON_5_ATI"/>
+                <enum name="GL_CON_6_ATI"/>
+                <enum name="GL_CON_7_ATI"/>
+                <enum name="GL_CON_8_ATI"/>
+                <enum name="GL_CON_9_ATI"/>
+                <enum name="GL_CON_10_ATI"/>
+                <enum name="GL_CON_11_ATI"/>
+                <enum name="GL_CON_12_ATI"/>
+                <enum name="GL_CON_13_ATI"/>
+                <enum name="GL_CON_14_ATI"/>
+                <enum name="GL_CON_15_ATI"/>
+                <enum name="GL_CON_16_ATI"/>
+                <enum name="GL_CON_17_ATI"/>
+                <enum name="GL_CON_18_ATI"/>
+                <enum name="GL_CON_19_ATI"/>
+                <enum name="GL_CON_20_ATI"/>
+                <enum name="GL_CON_21_ATI"/>
+                <enum name="GL_CON_22_ATI"/>
+                <enum name="GL_CON_23_ATI"/>
+                <enum name="GL_CON_24_ATI"/>
+                <enum name="GL_CON_25_ATI"/>
+                <enum name="GL_CON_26_ATI"/>
+                <enum name="GL_CON_27_ATI"/>
+                <enum name="GL_CON_28_ATI"/>
+                <enum name="GL_CON_29_ATI"/>
+                <enum name="GL_CON_30_ATI"/>
+                <enum name="GL_CON_31_ATI"/>
+                <enum name="GL_MOV_ATI"/>
+                <enum name="GL_ADD_ATI"/>
+                <enum name="GL_MUL_ATI"/>
+                <enum name="GL_SUB_ATI"/>
+                <enum name="GL_DOT3_ATI"/>
+                <enum name="GL_DOT4_ATI"/>
+                <enum name="GL_MAD_ATI"/>
+                <enum name="GL_LERP_ATI"/>
+                <enum name="GL_CND_ATI"/>
+                <enum name="GL_CND0_ATI"/>
+                <enum name="GL_DOT2_ADD_ATI"/>
+                <enum name="GL_SECONDARY_INTERPOLATOR_ATI"/>
+                <enum name="GL_NUM_FRAGMENT_REGISTERS_ATI"/>
+                <enum name="GL_NUM_FRAGMENT_CONSTANTS_ATI"/>
+                <enum name="GL_NUM_PASSES_ATI"/>
+                <enum name="GL_NUM_INSTRUCTIONS_PER_PASS_ATI"/>
+                <enum name="GL_NUM_INSTRUCTIONS_TOTAL_ATI"/>
+                <enum name="GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI"/>
+                <enum name="GL_NUM_LOOPBACK_COMPONENTS_ATI"/>
+                <enum name="GL_COLOR_ALPHA_PAIRING_ATI"/>
+                <enum name="GL_SWIZZLE_STR_ATI"/>
+                <enum name="GL_SWIZZLE_STQ_ATI"/>
+                <enum name="GL_SWIZZLE_STR_DR_ATI"/>
+                <enum name="GL_SWIZZLE_STQ_DQ_ATI"/>
+                <enum name="GL_SWIZZLE_STRQ_ATI"/>
+                <enum name="GL_SWIZZLE_STRQ_DQ_ATI"/>
+                <enum name="GL_RED_BIT_ATI"/>
+                <enum name="GL_GREEN_BIT_ATI"/>
+                <enum name="GL_BLUE_BIT_ATI"/>
+                <enum name="GL_2X_BIT_ATI"/>
+                <enum name="GL_4X_BIT_ATI"/>
+                <enum name="GL_8X_BIT_ATI"/>
+                <enum name="GL_HALF_BIT_ATI"/>
+                <enum name="GL_QUARTER_BIT_ATI"/>
+                <enum name="GL_EIGHTH_BIT_ATI"/>
+                <enum name="GL_SATURATE_BIT_ATI"/>
+                <enum name="GL_COMP_BIT_ATI"/>
+                <enum name="GL_NEGATE_BIT_ATI"/>
+                <enum name="GL_BIAS_BIT_ATI"/>
+                <command name="glGenFragmentShadersATI"/>
+                <command name="glBindFragmentShaderATI"/>
+                <command name="glDeleteFragmentShaderATI"/>
+                <command name="glBeginFragmentShaderATI"/>
+                <command name="glEndFragmentShaderATI"/>
+                <command name="glPassTexCoordATI"/>
+                <command name="glSampleMapATI"/>
+                <command name="glColorFragmentOp1ATI"/>
+                <command name="glColorFragmentOp2ATI"/>
+                <command name="glColorFragmentOp3ATI"/>
+                <command name="glAlphaFragmentOp1ATI"/>
+                <command name="glAlphaFragmentOp2ATI"/>
+                <command name="glAlphaFragmentOp3ATI"/>
+                <command name="glSetFragmentShaderConstantATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_map_object_buffer" supported="gl">
+            <require>
+                <command name="glMapObjectBufferATI"/>
+                <command name="glUnmapObjectBufferATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_meminfo" supported="gl">
+            <require>
+                <enum name="GL_VBO_FREE_MEMORY_ATI"/>
+                <enum name="GL_TEXTURE_FREE_MEMORY_ATI"/>
+                <enum name="GL_RENDERBUFFER_FREE_MEMORY_ATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_pixel_format_float" supported="gl" comment="WGL extension defining some associated GL enums. ATI does not export this extension.">
+            <require>
+                <enum name="GL_RGBA_FLOAT_MODE_ATI"/>
+                <enum name="GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_pn_triangles" supported="gl">
+            <require>
+                <enum name="GL_PN_TRIANGLES_ATI"/>
+                <enum name="GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI"/>
+                <enum name="GL_PN_TRIANGLES_POINT_MODE_ATI"/>
+                <enum name="GL_PN_TRIANGLES_NORMAL_MODE_ATI"/>
+                <enum name="GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI"/>
+                <enum name="GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI"/>
+                <enum name="GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI"/>
+                <enum name="GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI"/>
+                <enum name="GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI"/>
+                <command name="glPNTrianglesiATI"/>
+                <command name="glPNTrianglesfATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_separate_stencil" supported="gl">
+            <require>
+                <enum name="GL_STENCIL_BACK_FUNC_ATI"/>
+                <enum name="GL_STENCIL_BACK_FAIL_ATI"/>
+                <enum name="GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI"/>
+                <enum name="GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI"/>
+                <command name="glStencilOpSeparateATI"/>
+                <command name="glStencilFuncSeparateATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_text_fragment_shader" supported="gl">
+            <require>
+                <enum name="GL_TEXT_FRAGMENT_SHADER_ATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_texture_env_combine3" supported="gl">
+            <require>
+                <enum name="GL_MODULATE_ADD_ATI"/>
+                <enum name="GL_MODULATE_SIGNED_ADD_ATI"/>
+                <enum name="GL_MODULATE_SUBTRACT_ATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_texture_float" supported="gl">
+            <require>
+                <enum name="GL_RGBA_FLOAT32_ATI"/>
+                <enum name="GL_RGB_FLOAT32_ATI"/>
+                <enum name="GL_ALPHA_FLOAT32_ATI"/>
+                <enum name="GL_INTENSITY_FLOAT32_ATI"/>
+                <enum name="GL_LUMINANCE_FLOAT32_ATI"/>
+                <enum name="GL_LUMINANCE_ALPHA_FLOAT32_ATI"/>
+                <enum name="GL_RGBA_FLOAT16_ATI"/>
+                <enum name="GL_RGB_FLOAT16_ATI"/>
+                <enum name="GL_ALPHA_FLOAT16_ATI"/>
+                <enum name="GL_INTENSITY_FLOAT16_ATI"/>
+                <enum name="GL_LUMINANCE_FLOAT16_ATI"/>
+                <enum name="GL_LUMINANCE_ALPHA_FLOAT16_ATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_texture_mirror_once" supported="gl">
+            <require>
+                <enum name="GL_MIRROR_CLAMP_ATI"/>
+                <enum name="GL_MIRROR_CLAMP_TO_EDGE_ATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_vertex_array_object" supported="gl">
+            <require>
+                <enum name="GL_STATIC_ATI"/>
+                <enum name="GL_DYNAMIC_ATI"/>
+                <enum name="GL_PRESERVE_ATI"/>
+                <enum name="GL_DISCARD_ATI"/>
+                <enum name="GL_OBJECT_BUFFER_SIZE_ATI"/>
+                <enum name="GL_OBJECT_BUFFER_USAGE_ATI"/>
+                <enum name="GL_ARRAY_OBJECT_BUFFER_ATI"/>
+                <enum name="GL_ARRAY_OBJECT_OFFSET_ATI"/>
+                <command name="glNewObjectBufferATI"/>
+                <command name="glIsObjectBufferATI"/>
+                <command name="glUpdateObjectBufferATI"/>
+                <command name="glGetObjectBufferfvATI"/>
+                <command name="glGetObjectBufferivATI"/>
+                <command name="glFreeObjectBufferATI"/>
+                <command name="glArrayObjectATI"/>
+                <command name="glGetArrayObjectfvATI"/>
+                <command name="glGetArrayObjectivATI"/>
+                <command name="glVariantArrayObjectATI"/>
+                <command name="glGetVariantArrayObjectfvATI"/>
+                <command name="glGetVariantArrayObjectivATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_vertex_attrib_array_object" supported="gl">
+            <require>
+                <command name="glVertexAttribArrayObjectATI"/>
+                <command name="glGetVertexAttribArrayObjectfvATI"/>
+                <command name="glGetVertexAttribArrayObjectivATI"/>
+            </require>
+        </extension>
+        <extension name="GL_ATI_vertex_streams" supported="gl">
+            <require>
+                <enum name="GL_MAX_VERTEX_STREAMS_ATI"/>
+                <enum name="GL_VERTEX_STREAM0_ATI"/>
+                <enum name="GL_VERTEX_STREAM1_ATI"/>
+                <enum name="GL_VERTEX_STREAM2_ATI"/>
+                <enum name="GL_VERTEX_STREAM3_ATI"/>
+                <enum name="GL_VERTEX_STREAM4_ATI"/>
+                <enum name="GL_VERTEX_STREAM5_ATI"/>
+                <enum name="GL_VERTEX_STREAM6_ATI"/>
+                <enum name="GL_VERTEX_STREAM7_ATI"/>
+                <enum name="GL_VERTEX_SOURCE_ATI"/>
+                <command name="glVertexStream1sATI"/>
+                <command name="glVertexStream1svATI"/>
+                <command name="glVertexStream1iATI"/>
+                <command name="glVertexStream1ivATI"/>
+                <command name="glVertexStream1fATI"/>
+                <command name="glVertexStream1fvATI"/>
+                <command name="glVertexStream1dATI"/>
+                <command name="glVertexStream1dvATI"/>
+                <command name="glVertexStream2sATI"/>
+                <command name="glVertexStream2svATI"/>
+                <command name="glVertexStream2iATI"/>
+                <command name="glVertexStream2ivATI"/>
+                <command name="glVertexStream2fATI"/>
+                <command name="glVertexStream2fvATI"/>
+                <command name="glVertexStream2dATI"/>
+                <command name="glVertexStream2dvATI"/>
+                <command name="glVertexStream3sATI"/>
+                <command name="glVertexStream3svATI"/>
+                <command name="glVertexStream3iATI"/>
+                <command name="glVertexStream3ivATI"/>
+                <command name="glVertexStream3fATI"/>
+                <command name="glVertexStream3fvATI"/>
+                <command name="glVertexStream3dATI"/>
+                <command name="glVertexStream3dvATI"/>
+                <command name="glVertexStream4sATI"/>
+                <command name="glVertexStream4svATI"/>
+                <command name="glVertexStream4iATI"/>
+                <command name="glVertexStream4ivATI"/>
+                <command name="glVertexStream4fATI"/>
+                <command name="glVertexStream4fvATI"/>
+                <command name="glVertexStream4dATI"/>
+                <command name="glVertexStream4dvATI"/>
+                <command name="glNormalStream3bATI"/>
+                <command name="glNormalStream3bvATI"/>
+                <command name="glNormalStream3sATI"/>
+                <command name="glNormalStream3svATI"/>
+                <command name="glNormalStream3iATI"/>
+                <command name="glNormalStream3ivATI"/>
+                <command name="glNormalStream3fATI"/>
+                <command name="glNormalStream3fvATI"/>
+                <command name="glNormalStream3dATI"/>
+                <command name="glNormalStream3dvATI"/>
+                <command name="glClientActiveVertexStreamATI"/>
+                <command name="glVertexBlendEnviATI"/>
+                <command name="glVertexBlendEnvfATI"/>
+            </require>
+        </extension>
+        <extension name="GL_DMP_program_binary" supported="gles2">
+            <require>
+                <enum name="GL_SMAPHS30_PROGRAM_BINARY_DMP"/>
+                <enum name="GL_SMAPHS_PROGRAM_BINARY_DMP"/>
+                <enum name="GL_DMP_PROGRAM_BINARY_DMP"/>
+            </require>
+        </extension>
+        <extension name="GL_DMP_shader_binary" supported="gles2">
+            <require>
+                <enum name="GL_SHADER_BINARY_DMP"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_422_pixels" supported="gl">
+            <require>
+                <enum name="GL_422_EXT"/>
+                <enum name="GL_422_REV_EXT"/>
+                <enum name="GL_422_AVERAGE_EXT"/>
+                <enum name="GL_422_REV_AVERAGE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_EGL_image_array" supported="gles2">
+        </extension>
+        <extension name="GL_EXT_EGL_image_storage" supported="gl|glcore|gles2">
+            <require>
+                <type name="GLeglImageOES"/>
+                <command name="glEGLImageTargetTexStorageEXT"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access, ARB_direct_state_access, or OpenGL 4.5 are supported">
+                <command name="glEGLImageTargetTextureStorageEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_YUV_target" supported="gles2">
+            <require>
+                <enum name="GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT"/>
+                <enum name="GL_TEXTURE_EXTERNAL_OES"/>
+                <enum name="GL_TEXTURE_BINDING_EXTERNAL_OES"/>
+                <enum name="GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_abgr" supported="gl">
+            <require>
+                <enum name="GL_ABGR_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_base_instance" supported="gles2">
+            <require>
+                <command name="glDrawArraysInstancedBaseInstanceEXT"/>
+                <command name="glDrawElementsInstancedBaseInstanceEXT"/>
+                <command name="glDrawElementsInstancedBaseVertexBaseInstanceEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_bgra" supported="gl">
+            <require>
+                <enum name="GL_BGR_EXT"/>
+                <enum name="GL_BGRA_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_bindable_uniform" supported="gl">
+            <require>
+                <enum name="GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT"/>
+                <enum name="GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT"/>
+                <enum name="GL_MAX_BINDABLE_UNIFORM_SIZE_EXT"/>
+                <enum name="GL_UNIFORM_BUFFER_EXT"/>
+                <enum name="GL_UNIFORM_BUFFER_BINDING_EXT"/>
+                <command name="glUniformBufferEXT"/>
+                <command name="glGetUniformBufferSizeEXT"/>
+                <command name="glGetUniformOffsetEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_blend_color" supported="gl">
+            <require>
+                <enum name="GL_CONSTANT_COLOR_EXT"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_COLOR_EXT"/>
+                <enum name="GL_CONSTANT_ALPHA_EXT"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_ALPHA_EXT"/>
+                <enum name="GL_BLEND_COLOR_EXT"/>
+                <command name="glBlendColorEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_blend_equation_separate" supported="gl">
+            <require>
+                <enum name="GL_BLEND_EQUATION_RGB_EXT"/>
+                <enum name="GL_BLEND_EQUATION_ALPHA_EXT"/>
+                <command name="glBlendEquationSeparateEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_blend_func_extended" supported="gles2">
+            <require>
+                <enum name="GL_SRC1_COLOR_EXT"/>
+                <enum name="GL_SRC1_ALPHA_EXT"/>
+                <enum name="GL_ONE_MINUS_SRC1_COLOR_EXT"/>
+                <enum name="GL_ONE_MINUS_SRC1_ALPHA_EXT"/>
+                <enum name="GL_SRC_ALPHA_SATURATE_EXT"/>
+                <enum name="GL_LOCATION_INDEX_EXT"/>
+                <enum name="GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT"/>
+                <command name="glBindFragDataLocationIndexedEXT"/>
+                <command name="glBindFragDataLocationEXT"/>
+                <command name="glGetProgramResourceLocationIndexEXT"/>
+                <command name="glGetFragDataIndexEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_blend_func_separate" supported="gl">
+            <require>
+                <enum name="GL_BLEND_DST_RGB_EXT"/>
+                <enum name="GL_BLEND_SRC_RGB_EXT"/>
+                <enum name="GL_BLEND_DST_ALPHA_EXT"/>
+                <enum name="GL_BLEND_SRC_ALPHA_EXT"/>
+                <command name="glBlendFuncSeparateEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_blend_logic_op" supported="gl"/>
+        <extension name="GL_EXT_blend_minmax" supported="gl|gles1|gles2">
+            <require>
+                <enum name="GL_MIN_EXT"/>
+                <enum name="GL_MAX_EXT"/>
+            </require>
+            <require api="gl">
+                <enum name="GL_FUNC_ADD_EXT"/>
+                <enum name="GL_BLEND_EQUATION_EXT"/>
+                <command name="glBlendEquationEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_blend_subtract" supported="gl">
+            <require>
+                <enum name="GL_FUNC_SUBTRACT_EXT"/>
+                <enum name="GL_FUNC_REVERSE_SUBTRACT_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_buffer_storage" supported="gles2">
+            <require>
+                <enum name="GL_MAP_READ_BIT"/>
+                <enum name="GL_MAP_WRITE_BIT"/>
+                <enum name="GL_MAP_PERSISTENT_BIT_EXT"/>
+                <enum name="GL_MAP_COHERENT_BIT_EXT"/>
+                <enum name="GL_DYNAMIC_STORAGE_BIT_EXT"/>
+                <enum name="GL_CLIENT_STORAGE_BIT_EXT"/>
+                <enum name="GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT"/>
+                <enum name="GL_BUFFER_IMMUTABLE_STORAGE_EXT"/>
+                <enum name="GL_BUFFER_STORAGE_FLAGS_EXT"/>
+                <command name="glBufferStorageEXT"/>
+                <!-- <command name="glNamedBufferStorageEXT"/> -->
+            </require>
+        </extension>
+        <extension name="GL_EXT_clear_texture" supported="gles2">
+            <require>
+                <command name="glClearTexImageEXT"/>
+                <command name="glClearTexSubImageEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_clip_control" supported="gles2">
+            <require comment="Port of GL_ARB_clip_control">
+                <command name="glClipControlEXT"/>
+                <enum name="GL_LOWER_LEFT_EXT"/>
+                <enum name="GL_UPPER_LEFT_EXT"/>
+                <enum name="GL_NEGATIVE_ONE_TO_ONE_EXT"/>
+                <enum name="GL_ZERO_TO_ONE_EXT"/>
+                <enum name="GL_CLIP_ORIGIN_EXT"/>
+                <enum name="GL_CLIP_DEPTH_MODE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_clip_cull_distance" supported="gles2">
+            <require>
+                <enum name="GL_MAX_CLIP_DISTANCES_EXT"/>
+                <enum name="GL_MAX_CULL_DISTANCES_EXT"/>
+                <enum name="GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES_EXT"/>
+                <enum name="GL_CLIP_DISTANCE0_EXT"/>
+                <enum name="GL_CLIP_DISTANCE1_EXT"/>
+                <enum name="GL_CLIP_DISTANCE2_EXT"/>
+                <enum name="GL_CLIP_DISTANCE3_EXT"/>
+                <enum name="GL_CLIP_DISTANCE4_EXT"/>
+                <enum name="GL_CLIP_DISTANCE5_EXT"/>
+                <enum name="GL_CLIP_DISTANCE6_EXT"/>
+                <enum name="GL_CLIP_DISTANCE7_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_clip_volume_hint" supported="gl">
+            <require>
+                <enum name="GL_CLIP_VOLUME_CLIPPING_HINT_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_cmyka" supported="gl">
+            <require>
+                <enum name="GL_CMYK_EXT"/>
+                <enum name="GL_CMYKA_EXT"/>
+                <enum name="GL_PACK_CMYK_HINT_EXT"/>
+                <enum name="GL_UNPACK_CMYK_HINT_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_color_buffer_float" supported="gles2"/>
+        <extension name="GL_EXT_color_buffer_half_float" supported="gles2">
+            <require>
+                <enum name="GL_RGBA16F_EXT"/>
+                <enum name="GL_RGB16F_EXT"/>
+                <enum name="GL_RG16F_EXT"/>
+                <enum name="GL_R16F_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE_EXT"/>
+                <enum name="GL_UNSIGNED_NORMALIZED_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_color_subtable" supported="gl">
+            <require>
+                <command name="glColorSubTableEXT"/>
+                <command name="glCopyColorSubTableEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_compiled_vertex_array" supported="gl">
+            <require>
+                <enum name="GL_ARRAY_ELEMENT_LOCK_FIRST_EXT"/>
+                <enum name="GL_ARRAY_ELEMENT_LOCK_COUNT_EXT"/>
+                <command name="glLockArraysEXT"/>
+                <command name="glUnlockArraysEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_conservative_depth" supported="gles2"/>
+        <extension name="GL_EXT_convolution" supported="gl">
+            <require>
+                <enum name="GL_CONVOLUTION_1D_EXT"/>
+                <enum name="GL_CONVOLUTION_2D_EXT"/>
+                <enum name="GL_SEPARABLE_2D_EXT"/>
+                <enum name="GL_CONVOLUTION_BORDER_MODE_EXT"/>
+                <enum name="GL_CONVOLUTION_FILTER_SCALE_EXT"/>
+                <enum name="GL_CONVOLUTION_FILTER_BIAS_EXT"/>
+                <enum name="GL_REDUCE_EXT"/>
+                <enum name="GL_CONVOLUTION_FORMAT_EXT"/>
+                <enum name="GL_CONVOLUTION_WIDTH_EXT"/>
+                <enum name="GL_CONVOLUTION_HEIGHT_EXT"/>
+                <enum name="GL_MAX_CONVOLUTION_WIDTH_EXT"/>
+                <enum name="GL_MAX_CONVOLUTION_HEIGHT_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_RED_SCALE_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_GREEN_SCALE_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_BLUE_SCALE_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_ALPHA_SCALE_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_RED_BIAS_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_GREEN_BIAS_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_BLUE_BIAS_EXT"/>
+                <enum name="GL_POST_CONVOLUTION_ALPHA_BIAS_EXT"/>
+                <command name="glConvolutionFilter1DEXT"/>
+                <command name="glConvolutionFilter2DEXT"/>
+                <command name="glConvolutionParameterfEXT"/>
+                <command name="glConvolutionParameterfvEXT"/>
+                <command name="glConvolutionParameteriEXT"/>
+                <command name="glConvolutionParameterivEXT"/>
+                <command name="glCopyConvolutionFilter1DEXT"/>
+                <command name="glCopyConvolutionFilter2DEXT"/>
+                <command name="glGetConvolutionFilterEXT"/>
+                <command name="glGetConvolutionParameterfvEXT"/>
+                <command name="glGetConvolutionParameterivEXT"/>
+                <command name="glGetSeparableFilterEXT"/>
+                <command name="glSeparableFilter2DEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_coordinate_frame" supported="gl">
+            <require>
+                <enum name="GL_TANGENT_ARRAY_EXT"/>
+                <enum name="GL_BINORMAL_ARRAY_EXT"/>
+                <enum name="GL_CURRENT_TANGENT_EXT"/>
+                <enum name="GL_CURRENT_BINORMAL_EXT"/>
+                <enum name="GL_TANGENT_ARRAY_TYPE_EXT"/>
+                <enum name="GL_TANGENT_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_BINORMAL_ARRAY_TYPE_EXT"/>
+                <enum name="GL_BINORMAL_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_TANGENT_ARRAY_POINTER_EXT"/>
+                <enum name="GL_BINORMAL_ARRAY_POINTER_EXT"/>
+                <enum name="GL_MAP1_TANGENT_EXT"/>
+                <enum name="GL_MAP2_TANGENT_EXT"/>
+                <enum name="GL_MAP1_BINORMAL_EXT"/>
+                <enum name="GL_MAP2_BINORMAL_EXT"/>
+                <command name="glTangent3bEXT"/>
+                <command name="glTangent3bvEXT"/>
+                <command name="glTangent3dEXT"/>
+                <command name="glTangent3dvEXT"/>
+                <command name="glTangent3fEXT"/>
+                <command name="glTangent3fvEXT"/>
+                <command name="glTangent3iEXT"/>
+                <command name="glTangent3ivEXT"/>
+                <command name="glTangent3sEXT"/>
+                <command name="glTangent3svEXT"/>
+                <command name="glBinormal3bEXT"/>
+                <command name="glBinormal3bvEXT"/>
+                <command name="glBinormal3dEXT"/>
+                <command name="glBinormal3dvEXT"/>
+                <command name="glBinormal3fEXT"/>
+                <command name="glBinormal3fvEXT"/>
+                <command name="glBinormal3iEXT"/>
+                <command name="glBinormal3ivEXT"/>
+                <command name="glBinormal3sEXT"/>
+                <command name="glBinormal3svEXT"/>
+                <command name="glTangentPointerEXT"/>
+                <command name="glBinormalPointerEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_copy_image" supported="gles2">
+            <require>
+                <command name="glCopyImageSubDataEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_copy_texture" supported="gl">
+            <require>
+                <command name="glCopyTexImage1DEXT"/>
+                <command name="glCopyTexImage2DEXT"/>
+                <command name="glCopyTexSubImage1DEXT"/>
+                <command name="glCopyTexSubImage2DEXT"/>
+                <command name="glCopyTexSubImage3DEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_cull_vertex" supported="gl">
+            <require>
+                <enum name="GL_CULL_VERTEX_EXT"/>
+                <enum name="GL_CULL_VERTEX_EYE_POSITION_EXT"/>
+                <enum name="GL_CULL_VERTEX_OBJECT_POSITION_EXT"/>
+                <command name="glCullParameterdvEXT"/>
+                <command name="glCullParameterfvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_debug_label" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_PROGRAM_PIPELINE_OBJECT_EXT"/>
+                <enum name="GL_PROGRAM_OBJECT_EXT"/>
+                <enum name="GL_SHADER_OBJECT_EXT"/>
+                <enum name="GL_BUFFER_OBJECT_EXT"/>
+                <enum name="GL_QUERY_OBJECT_EXT"/>
+                <enum name="GL_VERTEX_ARRAY_OBJECT_EXT"/>
+                <command name="glLabelObjectEXT"/>
+                <command name="glGetObjectLabelEXT"/>
+            </require>
+            <require comment="Depends on OpenGL ES 3.0">
+                <enum name="GL_SAMPLER"/>
+                <enum name="GL_TRANSFORM_FEEDBACK"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_debug_marker" supported="gl|glcore|gles1|gles2">
+            <require>
+                <command name="glInsertEventMarkerEXT"/>
+                <command name="glPushGroupMarkerEXT"/>
+                <command name="glPopGroupMarkerEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_depth_bounds_test" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_BOUNDS_TEST_EXT"/>
+                <enum name="GL_DEPTH_BOUNDS_EXT"/>
+                <command name="glDepthBoundsEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_depth_clamp" supported="gles2">
+            <require>
+                <enum name="GL_DEPTH_CLAMP_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_direct_state_access" supported="gl|glcore" comment="DSA extension doesn't identify which interfaces are core profile and keeps getting expanded. This is in sync with revision 34, 2010/09/07">
+            <require>
+                <enum name="GL_PROGRAM_MATRIX_EXT"/>
+                <enum name="GL_TRANSPOSE_PROGRAM_MATRIX_EXT"/>
+                <enum name="GL_PROGRAM_MATRIX_STACK_DEPTH_EXT"/>
+            </require>
+            <require comment="OpenGL 1.0: New matrix commands">
+                <command name="glMatrixLoadfEXT"/>
+                <command name="glMatrixLoaddEXT"/>
+                <command name="glMatrixMultfEXT"/>
+                <command name="glMatrixMultdEXT"/>
+                <command name="glMatrixLoadIdentityEXT"/>
+                <command name="glMatrixRotatefEXT"/>
+                <command name="glMatrixRotatedEXT"/>
+                <command name="glMatrixScalefEXT"/>
+                <command name="glMatrixScaledEXT"/>
+                <command name="glMatrixTranslatefEXT"/>
+                <command name="glMatrixTranslatedEXT"/>
+                <command name="glMatrixFrustumEXT"/>
+                <command name="glMatrixOrthoEXT"/>
+                <command name="glMatrixPopEXT"/>
+                <command name="glMatrixPushEXT"/>
+            </require>
+            <require comment="OpenGL 1.1: New client commands">
+                <command name="glClientAttribDefaultEXT"/>
+                <command name="glPushClientAttribDefaultEXT"/>
+            </require>
+            <require comment="OpenGL 1.1: New texture object commands">
+                <command name="glTextureParameterfEXT"/>
+                <command name="glTextureParameterfvEXT"/>
+                <command name="glTextureParameteriEXT"/>
+                <command name="glTextureParameterivEXT"/>
+                <command name="glTextureImage1DEXT"/>
+                <command name="glTextureImage2DEXT"/>
+                <command name="glTextureSubImage1DEXT"/>
+                <command name="glTextureSubImage2DEXT"/>
+                <command name="glCopyTextureImage1DEXT"/>
+                <command name="glCopyTextureImage2DEXT"/>
+                <command name="glCopyTextureSubImage1DEXT"/>
+                <command name="glCopyTextureSubImage2DEXT"/>
+                <command name="glGetTextureImageEXT"/>
+                <command name="glGetTextureParameterfvEXT"/>
+                <command name="glGetTextureParameterivEXT"/>
+                <command name="glGetTextureLevelParameterfvEXT"/>
+                <command name="glGetTextureLevelParameterivEXT"/>
+            </require>
+            <require comment="OpenGL 1.2: New 3D texture object commands">
+                <command name="glTextureImage3DEXT"/>
+                <command name="glTextureSubImage3DEXT"/>
+                <command name="glCopyTextureSubImage3DEXT"/>
+            </require>
+            <require comment="OpenGL 1.2.1: New multitexture commands">
+                <command name="glBindMultiTextureEXT"/>
+                <command name="glMultiTexCoordPointerEXT"/>
+                <command name="glMultiTexEnvfEXT"/>
+                <command name="glMultiTexEnvfvEXT"/>
+                <command name="glMultiTexEnviEXT"/>
+                <command name="glMultiTexEnvivEXT"/>
+                <command name="glMultiTexGendEXT"/>
+                <command name="glMultiTexGendvEXT"/>
+                <command name="glMultiTexGenfEXT"/>
+                <command name="glMultiTexGenfvEXT"/>
+                <command name="glMultiTexGeniEXT"/>
+                <command name="glMultiTexGenivEXT"/>
+                <command name="glGetMultiTexEnvfvEXT"/>
+                <command name="glGetMultiTexEnvivEXT"/>
+                <command name="glGetMultiTexGendvEXT"/>
+                <command name="glGetMultiTexGenfvEXT"/>
+                <command name="glGetMultiTexGenivEXT"/>
+                <command name="glMultiTexParameteriEXT"/>
+                <command name="glMultiTexParameterivEXT"/>
+                <command name="glMultiTexParameterfEXT"/>
+                <command name="glMultiTexParameterfvEXT"/>
+                <command name="glMultiTexImage1DEXT"/>
+                <command name="glMultiTexImage2DEXT"/>
+                <command name="glMultiTexSubImage1DEXT"/>
+                <command name="glMultiTexSubImage2DEXT"/>
+                <command name="glCopyMultiTexImage1DEXT"/>
+                <command name="glCopyMultiTexImage2DEXT"/>
+                <command name="glCopyMultiTexSubImage1DEXT"/>
+                <command name="glCopyMultiTexSubImage2DEXT"/>
+                <command name="glGetMultiTexImageEXT"/>
+                <command name="glGetMultiTexParameterfvEXT"/>
+                <command name="glGetMultiTexParameterivEXT"/>
+                <command name="glGetMultiTexLevelParameterfvEXT"/>
+                <command name="glGetMultiTexLevelParameterivEXT"/>
+                <command name="glMultiTexImage3DEXT"/>
+                <command name="glMultiTexSubImage3DEXT"/>
+                <command name="glCopyMultiTexSubImage3DEXT"/>
+            </require>
+            <require comment="OpenGL 1.2.1: New indexed texture commands">
+                <command name="glEnableClientStateIndexedEXT"/>
+                <command name="glDisableClientStateIndexedEXT"/>
+            </require>
+            <require comment="OpenGL 1.2.1: New indexed generic queries">
+                <command name="glGetFloatIndexedvEXT"/>
+                <command name="glGetDoubleIndexedvEXT"/>
+                <command name="glGetPointerIndexedvEXT"/>
+            </require>
+            <require comment="OpenGL 1.2.1: Extend EXT_draw_buffers2 commands">
+                <command name="glEnableIndexedEXT"/>
+                <command name="glDisableIndexedEXT"/>
+                <command name="glIsEnabledIndexedEXT"/>
+                <command name="glGetIntegerIndexedvEXT"/>
+                <command name="glGetBooleanIndexedvEXT"/>
+            </require>
+            <require comment="OpenGL 1.3: New compressed texture object commands">
+                <command name="glCompressedTextureImage3DEXT"/>
+                <command name="glCompressedTextureImage2DEXT"/>
+                <command name="glCompressedTextureImage1DEXT"/>
+                <command name="glCompressedTextureSubImage3DEXT"/>
+                <command name="glCompressedTextureSubImage2DEXT"/>
+                <command name="glCompressedTextureSubImage1DEXT"/>
+                <command name="glGetCompressedTextureImageEXT"/>
+            </require>
+            <require comment="OpenGL 1.3: New multitexture compressed texture commands">
+                <command name="glCompressedMultiTexImage3DEXT"/>
+                <command name="glCompressedMultiTexImage2DEXT"/>
+                <command name="glCompressedMultiTexImage1DEXT"/>
+                <command name="glCompressedMultiTexSubImage3DEXT"/>
+                <command name="glCompressedMultiTexSubImage2DEXT"/>
+                <command name="glCompressedMultiTexSubImage1DEXT"/>
+                <command name="glGetCompressedMultiTexImageEXT"/>
+            </require>
+            <require comment="OpenGL 1.3: New transpose matrix commands">
+                <command name="glMatrixLoadTransposefEXT"/>
+                <command name="glMatrixLoadTransposedEXT"/>
+                <command name="glMatrixMultTransposefEXT"/>
+                <command name="glMatrixMultTransposedEXT"/>
+            </require>
+            <require comment="OpenGL 1.5: New buffer commands">
+                <command name="glNamedBufferDataEXT"/>
+                <command name="glNamedBufferSubDataEXT"/>
+                <command name="glMapNamedBufferEXT"/>
+                <command name="glUnmapNamedBufferEXT"/>
+                <command name="glGetNamedBufferParameterivEXT"/>
+                <command name="glGetNamedBufferPointervEXT"/>
+                <command name="glGetNamedBufferSubDataEXT"/>
+            </require>
+            <require comment="OpenGL 2.0: New uniform commands">
+                <command name="glProgramUniform1fEXT"/>
+                <command name="glProgramUniform2fEXT"/>
+                <command name="glProgramUniform3fEXT"/>
+                <command name="glProgramUniform4fEXT"/>
+                <command name="glProgramUniform1iEXT"/>
+                <command name="glProgramUniform2iEXT"/>
+                <command name="glProgramUniform3iEXT"/>
+                <command name="glProgramUniform4iEXT"/>
+                <command name="glProgramUniform1fvEXT"/>
+                <command name="glProgramUniform2fvEXT"/>
+                <command name="glProgramUniform3fvEXT"/>
+                <command name="glProgramUniform4fvEXT"/>
+                <command name="glProgramUniform1ivEXT"/>
+                <command name="glProgramUniform2ivEXT"/>
+                <command name="glProgramUniform3ivEXT"/>
+                <command name="glProgramUniform4ivEXT"/>
+                <command name="glProgramUniformMatrix2fvEXT"/>
+                <command name="glProgramUniformMatrix3fvEXT"/>
+                <command name="glProgramUniformMatrix4fvEXT"/>
+            </require>
+            <require comment="OpenGL 2.1: New uniform matrix commands">
+                <command name="glProgramUniformMatrix2x3fvEXT"/>
+                <command name="glProgramUniformMatrix3x2fvEXT"/>
+                <command name="glProgramUniformMatrix2x4fvEXT"/>
+                <command name="glProgramUniformMatrix4x2fvEXT"/>
+                <command name="glProgramUniformMatrix3x4fvEXT"/>
+                <command name="glProgramUniformMatrix4x3fvEXT"/>
+            </require>
+            <require comment="Extend EXT_texture_buffer_object commands">
+                <command name="glTextureBufferEXT"/>
+                <command name="glMultiTexBufferEXT"/>
+            </require>
+            <require comment="Extend EXT_texture_integer commands">
+                <command name="glTextureParameterIivEXT"/>
+                <command name="glTextureParameterIuivEXT"/>
+                <command name="glGetTextureParameterIivEXT"/>
+                <command name="glGetTextureParameterIuivEXT"/>
+                <command name="glMultiTexParameterIivEXT"/>
+                <command name="glMultiTexParameterIuivEXT"/>
+                <command name="glGetMultiTexParameterIivEXT"/>
+                <command name="glGetMultiTexParameterIuivEXT"/>
+            </require>
+            <require comment="Extend EXT_gpu_shader4 commands">
+                <command name="glProgramUniform1uiEXT"/>
+                <command name="glProgramUniform2uiEXT"/>
+                <command name="glProgramUniform3uiEXT"/>
+                <command name="glProgramUniform4uiEXT"/>
+                <command name="glProgramUniform1uivEXT"/>
+                <command name="glProgramUniform2uivEXT"/>
+                <command name="glProgramUniform3uivEXT"/>
+                <command name="glProgramUniform4uivEXT"/>
+            </require>
+            <require comment="Extend EXT_gpu_program_parameters commands">
+                <command name="glNamedProgramLocalParameters4fvEXT"/>
+            </require>
+            <require comment="Extend NV_gpu_program4 commands">
+                <command name="glNamedProgramLocalParameterI4iEXT"/>
+                <command name="glNamedProgramLocalParameterI4ivEXT"/>
+                <command name="glNamedProgramLocalParametersI4ivEXT"/>
+                <command name="glNamedProgramLocalParameterI4uiEXT"/>
+                <command name="glNamedProgramLocalParameterI4uivEXT"/>
+                <command name="glNamedProgramLocalParametersI4uivEXT"/>
+                <command name="glGetNamedProgramLocalParameterIivEXT"/>
+                <command name="glGetNamedProgramLocalParameterIuivEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New indexed texture commands">
+                <command name="glEnableClientStateiEXT"/>
+                <command name="glDisableClientStateiEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New indexed generic queries">
+                <command name="glGetFloati_vEXT"/>
+                <command name="glGetDoublei_vEXT"/>
+                <command name="glGetPointeri_vEXT"/>
+            </require>
+            <require comment="Extend GL_ARB_vertex_program commands">
+                <command name="glNamedProgramStringEXT"/>
+                <command name="glNamedProgramLocalParameter4dEXT"/>
+                <command name="glNamedProgramLocalParameter4dvEXT"/>
+                <command name="glNamedProgramLocalParameter4fEXT"/>
+                <command name="glNamedProgramLocalParameter4fvEXT"/>
+                <command name="glGetNamedProgramLocalParameterdvEXT"/>
+                <command name="glGetNamedProgramLocalParameterfvEXT"/>
+                <command name="glGetNamedProgramivEXT"/>
+                <command name="glGetNamedProgramStringEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New renderbuffer commands">
+                <command name="glNamedRenderbufferStorageEXT"/>
+                <command name="glGetNamedRenderbufferParameterivEXT"/>
+                <command name="glNamedRenderbufferStorageMultisampleEXT"/>
+            </require>
+            <require comment="Extend NV_framebuffer_multisample_coverage">
+                <command name="glNamedRenderbufferStorageMultisampleCoverageEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New framebuffer commands">
+                <command name="glCheckNamedFramebufferStatusEXT"/>
+                <command name="glNamedFramebufferTexture1DEXT"/>
+                <command name="glNamedFramebufferTexture2DEXT"/>
+                <command name="glNamedFramebufferTexture3DEXT"/>
+                <command name="glNamedFramebufferRenderbufferEXT"/>
+                <command name="glGetNamedFramebufferAttachmentParameterivEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New texture commands">
+                <command name="glGenerateTextureMipmapEXT"/>
+                <command name="glGenerateMultiTexMipmapEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New framebuffer commands">
+                <command name="glFramebufferDrawBufferEXT"/>
+                <command name="glFramebufferDrawBuffersEXT"/>
+                <command name="glFramebufferReadBufferEXT"/>
+                <command name="glGetFramebufferParameterivEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New buffer data copy command">
+                <command name="glNamedCopyBufferSubDataEXT"/>
+            </require>
+            <require comment="Extend EXT_geometry_shader4 or NV_gpu_program4">
+                <command name="glNamedFramebufferTextureEXT"/>
+                <command name="glNamedFramebufferTextureLayerEXT"/>
+                <command name="glNamedFramebufferTextureFaceEXT"/>
+            </require>
+            <require comment="Extend NV_explicit_multisample">
+                <command name="glTextureRenderbufferEXT"/>
+                <command name="glMultiTexRenderbufferEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New vertex array specification commands for VAO">
+                <command name="glVertexArrayVertexOffsetEXT"/>
+                <command name="glVertexArrayColorOffsetEXT"/>
+                <command name="glVertexArrayEdgeFlagOffsetEXT"/>
+                <command name="glVertexArrayIndexOffsetEXT"/>
+                <command name="glVertexArrayNormalOffsetEXT"/>
+                <command name="glVertexArrayTexCoordOffsetEXT"/>
+                <command name="glVertexArrayMultiTexCoordOffsetEXT"/>
+                <command name="glVertexArrayFogCoordOffsetEXT"/>
+                <command name="glVertexArraySecondaryColorOffsetEXT"/>
+                <command name="glVertexArrayVertexAttribOffsetEXT"/>
+                <command name="glVertexArrayVertexAttribIOffsetEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New vertex array enable commands for VAO">
+                <command name="glEnableVertexArrayEXT"/>
+                <command name="glDisableVertexArrayEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New vertex attrib array enable commands for VAO">
+                <command name="glEnableVertexArrayAttribEXT"/>
+                <command name="glDisableVertexArrayAttribEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New queries for VAO">
+                <command name="glGetVertexArrayIntegervEXT"/>
+                <command name="glGetVertexArrayPointervEXT"/>
+                <command name="glGetVertexArrayIntegeri_vEXT"/>
+                <command name="glGetVertexArrayPointeri_vEXT"/>
+            </require>
+            <require comment="OpenGL 3.0: New buffer commands">
+                <command name="glMapNamedBufferRangeEXT"/>
+                <command name="glFlushMappedNamedBufferRangeEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_buffer_storage">
+                <command name="glNamedBufferStorageEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_clear_buffer_object">
+                <command name="glClearNamedBufferDataEXT"/>
+                <command name="glClearNamedBufferSubDataEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_framebuffer_no_attachments">
+                <command name="glNamedFramebufferParameteriEXT"/>
+                <command name="glGetNamedFramebufferParameterivEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_gpu_shader_fp64">
+                <command name="glProgramUniform1dEXT"/>
+                <command name="glProgramUniform2dEXT"/>
+                <command name="glProgramUniform3dEXT"/>
+                <command name="glProgramUniform4dEXT"/>
+                <command name="glProgramUniform1dvEXT"/>
+                <command name="glProgramUniform2dvEXT"/>
+                <command name="glProgramUniform3dvEXT"/>
+                <command name="glProgramUniform4dvEXT"/>
+                <command name="glProgramUniformMatrix2dvEXT"/>
+                <command name="glProgramUniformMatrix3dvEXT"/>
+                <command name="glProgramUniformMatrix4dvEXT"/>
+                <command name="glProgramUniformMatrix2x3dvEXT"/>
+                <command name="glProgramUniformMatrix2x4dvEXT"/>
+                <command name="glProgramUniformMatrix3x2dvEXT"/>
+                <command name="glProgramUniformMatrix3x4dvEXT"/>
+                <command name="glProgramUniformMatrix4x2dvEXT"/>
+                <command name="glProgramUniformMatrix4x3dvEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_texture_buffer_range">
+                <command name="glTextureBufferRangeEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_texture_storage">
+                <command name="glTextureStorage1DEXT"/>
+                <command name="glTextureStorage2DEXT"/>
+                <command name="glTextureStorage3DEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_texture_storage_multisample">
+                <command name="glTextureStorage2DMultisampleEXT"/>
+                <command name="glTextureStorage3DMultisampleEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_vertex_attrib_binding">
+                <command name="glVertexArrayBindVertexBufferEXT"/>
+                <command name="glVertexArrayVertexAttribFormatEXT"/>
+                <command name="glVertexArrayVertexAttribIFormatEXT"/>
+                <command name="glVertexArrayVertexAttribLFormatEXT"/>
+                <command name="glVertexArrayVertexAttribBindingEXT"/>
+                <command name="glVertexArrayVertexBindingDivisorEXT"/>
+            </require>
+            <require comment="Extended by GL_EXT_vertex_attrib_64bit">
+                <command name="glVertexArrayVertexAttribLOffsetEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_sparse_texture">
+                <command name="glTexturePageCommitmentEXT"/>
+            </require>
+            <require comment="Extended by GL_ARB_instanced_arrays">
+                <command name="glVertexArrayVertexAttribDivisorEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_discard_framebuffer" supported="gles1|gles2">
+            <require>
+                <enum name="GL_COLOR_EXT"/>
+                <enum name="GL_DEPTH_EXT"/>
+                <enum name="GL_STENCIL_EXT"/>
+                <command name="glDiscardFramebufferEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_disjoint_timer_query" supported="gles2">
+            <require>
+                <enum name="GL_QUERY_COUNTER_BITS_EXT"/>
+                <enum name="GL_CURRENT_QUERY_EXT"/>
+                <enum name="GL_QUERY_RESULT_EXT"/>
+                <enum name="GL_QUERY_RESULT_AVAILABLE_EXT"/>
+                <enum name="GL_TIME_ELAPSED_EXT"/>
+                <enum name="GL_TIMESTAMP_EXT"/>
+                <enum name="GL_GPU_DISJOINT_EXT"/>
+                <command name="glGenQueriesEXT"/>
+                <command name="glDeleteQueriesEXT"/>
+                <command name="glIsQueryEXT"/>
+                <command name="glBeginQueryEXT"/>
+                <command name="glEndQueryEXT"/>
+                <command name="glQueryCounterEXT"/>
+                <command name="glGetQueryivEXT"/>
+                <command name="glGetQueryObjectivEXT"/>
+                <command name="glGetQueryObjectuivEXT"/>
+                <command name="glGetQueryObjecti64vEXT"/>
+                <command name="glGetQueryObjectui64vEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_draw_buffers" supported="gles2">
+            <require>
+                <enum name="GL_MAX_COLOR_ATTACHMENTS_EXT"/>
+                <enum name="GL_MAX_DRAW_BUFFERS_EXT"/>
+                <enum name="GL_DRAW_BUFFER0_EXT"/>
+                <enum name="GL_DRAW_BUFFER1_EXT"/>
+                <enum name="GL_DRAW_BUFFER2_EXT"/>
+                <enum name="GL_DRAW_BUFFER3_EXT"/>
+                <enum name="GL_DRAW_BUFFER4_EXT"/>
+                <enum name="GL_DRAW_BUFFER5_EXT"/>
+                <enum name="GL_DRAW_BUFFER6_EXT"/>
+                <enum name="GL_DRAW_BUFFER7_EXT"/>
+                <enum name="GL_DRAW_BUFFER8_EXT"/>
+                <enum name="GL_DRAW_BUFFER9_EXT"/>
+                <enum name="GL_DRAW_BUFFER10_EXT"/>
+                <enum name="GL_DRAW_BUFFER11_EXT"/>
+                <enum name="GL_DRAW_BUFFER12_EXT"/>
+                <enum name="GL_DRAW_BUFFER13_EXT"/>
+                <enum name="GL_DRAW_BUFFER14_EXT"/>
+                <enum name="GL_DRAW_BUFFER15_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT0_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT1_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT2_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT3_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT4_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT5_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT6_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT7_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT8_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT9_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT10_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT11_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT12_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT13_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT14_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT15_EXT"/>
+                <command name="glDrawBuffersEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_draw_buffers2" supported="gl">
+            <require>
+                <command name="glColorMaskIndexedEXT"/>
+                <command name="glGetBooleanIndexedvEXT"/>
+                <command name="glGetIntegerIndexedvEXT"/>
+                <command name="glEnableIndexedEXT"/>
+                <command name="glDisableIndexedEXT"/>
+                <command name="glIsEnabledIndexedEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_draw_buffers_indexed" supported="gles2">
+            <require>
+                <enum name="GL_BLEND_EQUATION_RGB"/>
+                <enum name="GL_BLEND_EQUATION_ALPHA"/>
+                <enum name="GL_BLEND_SRC_RGB"/>
+                <enum name="GL_BLEND_SRC_ALPHA"/>
+                <enum name="GL_BLEND_DST_RGB"/>
+                <enum name="GL_BLEND_DST_ALPHA"/>
+                <enum name="GL_COLOR_WRITEMASK"/>
+                <enum name="GL_BLEND"/>
+                <enum name="GL_FUNC_ADD"/>
+                <enum name="GL_FUNC_SUBTRACT"/>
+                <enum name="GL_FUNC_REVERSE_SUBTRACT"/>
+                <enum name="GL_MIN"/>
+                <enum name="GL_MAX"/>
+                <enum name="GL_ZERO"/>
+                <enum name="GL_ONE"/>
+                <enum name="GL_SRC_COLOR"/>
+                <enum name="GL_ONE_MINUS_SRC_COLOR"/>
+                <enum name="GL_DST_COLOR"/>
+                <enum name="GL_ONE_MINUS_DST_COLOR"/>
+                <enum name="GL_SRC_ALPHA"/>
+                <enum name="GL_ONE_MINUS_SRC_ALPHA"/>
+                <enum name="GL_DST_ALPHA"/>
+                <enum name="GL_ONE_MINUS_DST_ALPHA"/>
+                <enum name="GL_CONSTANT_COLOR"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+                <enum name="GL_CONSTANT_ALPHA"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+                <enum name="GL_SRC_ALPHA_SATURATE"/>
+                <command name="glEnableiEXT"/>
+                <command name="glDisableiEXT"/>
+                <command name="glBlendEquationiEXT"/>
+                <command name="glBlendEquationSeparateiEXT"/>
+                <command name="glBlendFunciEXT"/>
+                <command name="glBlendFuncSeparateiEXT"/>
+                <command name="glColorMaskiEXT"/>
+                <command name="glIsEnablediEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_draw_elements_base_vertex" supported="gles2">
+            <require>
+                <command name="glDrawElementsBaseVertexEXT"/>
+                <command name="glDrawRangeElementsBaseVertexEXT" comment="Supported only if OpenGL ES 3.0 is supported"/>
+                <command name="glDrawElementsInstancedBaseVertexEXT" comment="Supported only if OpenGL ES 3.0 is supported"/>
+                <command name="glMultiDrawElementsBaseVertexEXT" comment="Supported only if GL_EXT_multi_draw_arrays is supported"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_draw_instanced" supported="gl|glcore|gles2">
+            <require>
+                <command name="glDrawArraysInstancedEXT"/>
+                <command name="glDrawElementsInstancedEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_draw_range_elements" supported="gl">
+            <require>
+                <enum name="GL_MAX_ELEMENTS_VERTICES_EXT"/>
+                <enum name="GL_MAX_ELEMENTS_INDICES_EXT"/>
+                <command name="glDrawRangeElementsEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_draw_transform_feedback" supported="gles2">
+            <require>
+                <command name="glDrawTransformFeedbackEXT"/>
+                <command name="glDrawTransformFeedbackInstancedEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_external_buffer" supported="gl|gles2">
+            <require>
+                <command name="glBufferStorageExternalEXT"/>
+                <command name="glNamedBufferStorageExternalEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_float_blend" supported="gles2"/>
+        <extension name="GL_EXT_fog_coord" supported="gl">
+            <require>
+                <enum name="GL_FOG_COORDINATE_SOURCE_EXT"/>
+                <enum name="GL_FOG_COORDINATE_EXT"/>
+                <enum name="GL_FRAGMENT_DEPTH_EXT"/>
+                <enum name="GL_CURRENT_FOG_COORDINATE_EXT"/>
+                <enum name="GL_FOG_COORDINATE_ARRAY_TYPE_EXT"/>
+                <enum name="GL_FOG_COORDINATE_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_FOG_COORDINATE_ARRAY_POINTER_EXT"/>
+                <enum name="GL_FOG_COORDINATE_ARRAY_EXT"/>
+                <command name="glFogCoordfEXT"/>
+                <command name="glFogCoordfvEXT"/>
+                <command name="glFogCoorddEXT"/>
+                <command name="glFogCoorddvEXT"/>
+                <command name="glFogCoordPointerEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_framebuffer_blit" supported="gl">
+            <require>
+                <enum name="GL_READ_FRAMEBUFFER_EXT"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_EXT"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_BINDING_EXT"/>
+                <enum name="GL_READ_FRAMEBUFFER_BINDING_EXT"/>
+                <command name="glBlitFramebufferEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_framebuffer_multisample" supported="gl">
+            <require>
+                <enum name="GL_RENDERBUFFER_SAMPLES_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT"/>
+                <enum name="GL_MAX_SAMPLES_EXT"/>
+                <command name="glRenderbufferStorageMultisampleEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_framebuffer_multisample_blit_scaled" supported="gl">
+            <require>
+                <enum name="GL_SCALED_RESOLVE_FASTEST_EXT"/>
+                <enum name="GL_SCALED_RESOLVE_NICEST_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_framebuffer_object" supported="gl">
+            <require>
+                <enum name="GL_INVALID_FRAMEBUFFER_OPERATION_EXT"/>
+                <enum name="GL_MAX_RENDERBUFFER_SIZE_EXT"/>
+                <enum name="GL_FRAMEBUFFER_BINDING_EXT"/>
+                <enum name="GL_RENDERBUFFER_BINDING_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT"/>
+                <enum name="GL_FRAMEBUFFER_COMPLETE_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT"/>
+                <enum name="GL_FRAMEBUFFER_UNSUPPORTED_EXT"/>
+                <enum name="GL_MAX_COLOR_ATTACHMENTS_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT0_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT1_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT2_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT3_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT4_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT5_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT6_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT7_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT8_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT9_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT10_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT11_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT12_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT13_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT14_EXT"/>
+                <enum name="GL_COLOR_ATTACHMENT15_EXT"/>
+                <enum name="GL_DEPTH_ATTACHMENT_EXT"/>
+                <enum name="GL_STENCIL_ATTACHMENT_EXT"/>
+                <enum name="GL_FRAMEBUFFER_EXT"/>
+                <enum name="GL_RENDERBUFFER_EXT"/>
+                <enum name="GL_RENDERBUFFER_WIDTH_EXT"/>
+                <enum name="GL_RENDERBUFFER_HEIGHT_EXT"/>
+                <enum name="GL_RENDERBUFFER_INTERNAL_FORMAT_EXT"/>
+                <enum name="GL_STENCIL_INDEX1_EXT"/>
+                <enum name="GL_STENCIL_INDEX4_EXT"/>
+                <enum name="GL_STENCIL_INDEX8_EXT"/>
+                <enum name="GL_STENCIL_INDEX16_EXT"/>
+                <enum name="GL_RENDERBUFFER_RED_SIZE_EXT"/>
+                <enum name="GL_RENDERBUFFER_GREEN_SIZE_EXT"/>
+                <enum name="GL_RENDERBUFFER_BLUE_SIZE_EXT"/>
+                <enum name="GL_RENDERBUFFER_ALPHA_SIZE_EXT"/>
+                <enum name="GL_RENDERBUFFER_DEPTH_SIZE_EXT"/>
+                <enum name="GL_RENDERBUFFER_STENCIL_SIZE_EXT"/>
+                <command name="glIsRenderbufferEXT"/>
+                <command name="glBindRenderbufferEXT"/>
+                <command name="glDeleteRenderbuffersEXT"/>
+                <command name="glGenRenderbuffersEXT"/>
+                <command name="glRenderbufferStorageEXT"/>
+                <command name="glGetRenderbufferParameterivEXT"/>
+                <command name="glIsFramebufferEXT"/>
+                <command name="glBindFramebufferEXT"/>
+                <command name="glDeleteFramebuffersEXT"/>
+                <command name="glGenFramebuffersEXT"/>
+                <command name="glCheckFramebufferStatusEXT"/>
+                <command name="glFramebufferTexture1DEXT"/>
+                <command name="glFramebufferTexture2DEXT"/>
+                <command name="glFramebufferTexture3DEXT"/>
+                <command name="glFramebufferRenderbufferEXT"/>
+                <command name="glGetFramebufferAttachmentParameterivEXT"/>
+                <command name="glGenerateMipmapEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_framebuffer_sRGB" supported="gl">
+            <require>
+                <enum name="GL_FRAMEBUFFER_SRGB_EXT"/>
+                <enum name="GL_FRAMEBUFFER_SRGB_CAPABLE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_geometry_point_size" supported="gles2"/>
+        <extension name="GL_EXT_geometry_shader" supported="gles2">
+            <require>
+                <enum name="GL_GEOMETRY_SHADER_EXT"/>
+                <enum name="GL_GEOMETRY_SHADER_BIT_EXT"/>
+                <enum name="GL_GEOMETRY_LINKED_VERTICES_OUT_EXT"/>
+                <enum name="GL_GEOMETRY_LINKED_INPUT_TYPE_EXT"/>
+                <enum name="GL_GEOMETRY_LINKED_OUTPUT_TYPE_EXT"/>
+                <enum name="GL_GEOMETRY_SHADER_INVOCATIONS_EXT"/>
+                <enum name="GL_LAYER_PROVOKING_VERTEX_EXT"/>
+                <enum name="GL_LINES_ADJACENCY_EXT"/>
+                <enum name="GL_LINE_STRIP_ADJACENCY_EXT"/>
+                <enum name="GL_TRIANGLES_ADJACENCY_EXT"/>
+                <enum name="GL_TRIANGLE_STRIP_ADJACENCY_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS_EXT"/>
+                <enum name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_INPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_EXT"/>
+                <enum name="GL_FIRST_VERTEX_CONVENTION_EXT"/>
+                <enum name="GL_LAST_VERTEX_CONVENTION_EXT"/>
+                <enum name="GL_UNDEFINED_VERTEX_EXT"/>
+                <enum name="GL_PRIMITIVES_GENERATED_EXT"/>
+                <enum name="GL_FRAMEBUFFER_DEFAULT_LAYERS_EXT"/>
+                <enum name="GL_MAX_FRAMEBUFFER_LAYERS_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT"/>
+                <enum name="GL_REFERENCED_BY_GEOMETRY_SHADER_EXT"/>
+                <command name="glFramebufferTextureEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_geometry_shader4" supported="gl">
+            <require>
+                <enum name="GL_GEOMETRY_SHADER_EXT"/>
+                <enum name="GL_GEOMETRY_VERTICES_OUT_EXT"/>
+                <enum name="GL_GEOMETRY_INPUT_TYPE_EXT"/>
+                <enum name="GL_GEOMETRY_OUTPUT_TYPE_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_VARYING_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_VERTEX_VARYING_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_VARYING_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT"/>
+                <enum name="GL_LINES_ADJACENCY_EXT"/>
+                <enum name="GL_LINE_STRIP_ADJACENCY_EXT"/>
+                <enum name="GL_TRIANGLES_ADJACENCY_EXT"/>
+                <enum name="GL_TRIANGLE_STRIP_ADJACENCY_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT"/>
+                <enum name="GL_PROGRAM_POINT_SIZE_EXT"/>
+                <command name="glProgramParameteriEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_gpu_program_parameters" supported="gl">
+            <require>
+                <command name="glProgramEnvParameters4fvEXT"/>
+                <command name="glProgramLocalParameters4fvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_gpu_shader4" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_INTEGER_EXT"/>
+                <enum name="GL_SAMPLER_1D_ARRAY_EXT"/>
+                <enum name="GL_SAMPLER_2D_ARRAY_EXT"/>
+                <enum name="GL_SAMPLER_BUFFER_EXT"/>
+                <enum name="GL_SAMPLER_1D_ARRAY_SHADOW_EXT"/>
+                <enum name="GL_SAMPLER_2D_ARRAY_SHADOW_EXT"/>
+                <enum name="GL_SAMPLER_CUBE_SHADOW_EXT"/>
+                <enum name="GL_UNSIGNED_INT_VEC2_EXT"/>
+                <enum name="GL_UNSIGNED_INT_VEC3_EXT"/>
+                <enum name="GL_UNSIGNED_INT_VEC4_EXT"/>
+                <enum name="GL_INT_SAMPLER_1D_EXT"/>
+                <enum name="GL_INT_SAMPLER_2D_EXT"/>
+                <enum name="GL_INT_SAMPLER_3D_EXT"/>
+                <enum name="GL_INT_SAMPLER_CUBE_EXT"/>
+                <enum name="GL_INT_SAMPLER_2D_RECT_EXT"/>
+                <enum name="GL_INT_SAMPLER_1D_ARRAY_EXT"/>
+                <enum name="GL_INT_SAMPLER_2D_ARRAY_EXT"/>
+                <enum name="GL_INT_SAMPLER_BUFFER_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_1D_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_2D_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_3D_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_2D_RECT_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_1D_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_2D_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT"/>
+                <enum name="GL_MIN_PROGRAM_TEXEL_OFFSET_EXT"/>
+                <enum name="GL_MAX_PROGRAM_TEXEL_OFFSET_EXT"/>
+                <command name="glGetUniformuivEXT"/>
+                <command name="glBindFragDataLocationEXT"/>
+                <command name="glGetFragDataLocationEXT"/>
+                <command name="glUniform1uiEXT"/>
+                <command name="glUniform2uiEXT"/>
+                <command name="glUniform3uiEXT"/>
+                <command name="glUniform4uiEXT"/>
+                <command name="glUniform1uivEXT"/>
+                <command name="glUniform2uivEXT"/>
+                <command name="glUniform3uivEXT"/>
+                <command name="glUniform4uivEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_gpu_shader5" supported="gles2"/>
+        <extension name="GL_EXT_histogram" supported="gl">
+            <require>
+                <enum name="GL_HISTOGRAM_EXT"/>
+                <enum name="GL_PROXY_HISTOGRAM_EXT"/>
+                <enum name="GL_HISTOGRAM_WIDTH_EXT"/>
+                <enum name="GL_HISTOGRAM_FORMAT_EXT"/>
+                <enum name="GL_HISTOGRAM_RED_SIZE_EXT"/>
+                <enum name="GL_HISTOGRAM_GREEN_SIZE_EXT"/>
+                <enum name="GL_HISTOGRAM_BLUE_SIZE_EXT"/>
+                <enum name="GL_HISTOGRAM_ALPHA_SIZE_EXT"/>
+                <enum name="GL_HISTOGRAM_LUMINANCE_SIZE_EXT"/>
+                <enum name="GL_HISTOGRAM_SINK_EXT"/>
+                <enum name="GL_MINMAX_EXT"/>
+                <enum name="GL_MINMAX_FORMAT_EXT"/>
+                <enum name="GL_MINMAX_SINK_EXT"/>
+                <enum name="GL_TABLE_TOO_LARGE_EXT"/>
+                <command name="glGetHistogramEXT"/>
+                <command name="glGetHistogramParameterfvEXT"/>
+                <command name="glGetHistogramParameterivEXT"/>
+                <command name="glGetMinmaxEXT"/>
+                <command name="glGetMinmaxParameterfvEXT"/>
+                <command name="glGetMinmaxParameterivEXT"/>
+                <command name="glHistogramEXT"/>
+                <command name="glMinmaxEXT"/>
+                <command name="glResetHistogramEXT"/>
+                <command name="glResetMinmaxEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_index_array_formats" supported="gl">
+            <require>
+                <enum name="GL_IUI_V2F_EXT"/>
+                <enum name="GL_IUI_V3F_EXT"/>
+                <enum name="GL_IUI_N3F_V2F_EXT"/>
+                <enum name="GL_IUI_N3F_V3F_EXT"/>
+                <enum name="GL_T2F_IUI_V2F_EXT"/>
+                <enum name="GL_T2F_IUI_V3F_EXT"/>
+                <enum name="GL_T2F_IUI_N3F_V2F_EXT"/>
+                <enum name="GL_T2F_IUI_N3F_V3F_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_index_func" supported="gl">
+            <require>
+                <enum name="GL_INDEX_TEST_EXT"/>
+                <enum name="GL_INDEX_TEST_FUNC_EXT"/>
+                <enum name="GL_INDEX_TEST_REF_EXT"/>
+                <command name="glIndexFuncEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_index_material" supported="gl">
+            <require>
+                <enum name="GL_INDEX_MATERIAL_EXT"/>
+                <enum name="GL_INDEX_MATERIAL_PARAMETER_EXT"/>
+                <enum name="GL_INDEX_MATERIAL_FACE_EXT"/>
+                <command name="glIndexMaterialEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_index_texture" supported="gl"/>
+        <extension name="GL_EXT_instanced_arrays" supported="gles2">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_EXT"/>
+                <command name="glDrawArraysInstancedEXT"/>
+                <command name="glDrawElementsInstancedEXT"/>
+                <command name="glVertexAttribDivisorEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_light_texture" supported="gl">
+            <require>
+                <enum name="GL_FRAGMENT_MATERIAL_EXT"/>
+                <enum name="GL_FRAGMENT_NORMAL_EXT"/>
+                <enum name="GL_FRAGMENT_COLOR_EXT"/>
+                <enum name="GL_ATTENUATION_EXT"/>
+                <enum name="GL_SHADOW_ATTENUATION_EXT"/>
+                <enum name="GL_TEXTURE_APPLICATION_MODE_EXT"/>
+                <enum name="GL_TEXTURE_LIGHT_EXT"/>
+                <enum name="GL_TEXTURE_MATERIAL_FACE_EXT"/>
+                <enum name="GL_TEXTURE_MATERIAL_PARAMETER_EXT"/>
+                <enum name="GL_FRAGMENT_DEPTH_EXT"/>
+                <command name="glApplyTextureEXT"/>
+                <command name="glTextureLightEXT"/>
+                <command name="glTextureMaterialEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_map_buffer_range" supported="gles1|gles2">
+            <require>
+                <enum name="GL_MAP_READ_BIT_EXT"/>
+                <enum name="GL_MAP_WRITE_BIT_EXT"/>
+                <enum name="GL_MAP_INVALIDATE_RANGE_BIT_EXT"/>
+                <enum name="GL_MAP_INVALIDATE_BUFFER_BIT_EXT"/>
+                <enum name="GL_MAP_FLUSH_EXPLICIT_BIT_EXT"/>
+                <enum name="GL_MAP_UNSYNCHRONIZED_BIT_EXT"/>
+                <command name="glMapBufferRangeEXT"/>
+                <command name="glFlushMappedBufferRangeEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_memory_object" supported="gl|gles2">
+            <require>
+                <enum name="GL_TEXTURE_TILING_EXT"/>
+                <enum name="GL_DEDICATED_MEMORY_OBJECT_EXT"/>
+                <enum name="GL_PROTECTED_MEMORY_OBJECT_EXT"/>
+                <enum name="GL_NUM_TILING_TYPES_EXT"/>
+                <enum name="GL_TILING_TYPES_EXT"/>
+                <enum name="GL_OPTIMAL_TILING_EXT"/>
+                <enum name="GL_LINEAR_TILING_EXT"/>
+                <enum name="GL_NUM_DEVICE_UUIDS_EXT"/>
+                <enum name="GL_DEVICE_UUID_EXT"/>
+                <enum name="GL_DRIVER_UUID_EXT"/>
+                <enum name="GL_UUID_SIZE_EXT"/>
+                <command name="glGetUnsignedBytevEXT"/>
+                <command name="glGetUnsignedBytei_vEXT"/>
+                <command name="glDeleteMemoryObjectsEXT"/>
+                <command name="glIsMemoryObjectEXT"/>
+                <command name="glCreateMemoryObjectsEXT"/>
+                <command name="glMemoryObjectParameterivEXT"/>
+                <command name="glGetMemoryObjectParameterivEXT"/>
+                <command name="glTexStorageMem2DEXT"/>
+                <command name="glTexStorageMem2DMultisampleEXT"/>
+                <command name="glTexStorageMem3DEXT"/>
+                <command name="glTexStorageMem3DMultisampleEXT"/>
+                <command name="glBufferStorageMemEXT"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glTextureStorageMem2DEXT"/>
+                <command name="glTextureStorageMem2DMultisampleEXT"/>
+                <command name="glTextureStorageMem3DEXT"/>
+                <command name="glTextureStorageMem3DMultisampleEXT"/>
+                <command name="glNamedBufferStorageMemEXT"/>
+            </require>
+            <require api="gl">
+                <command name="glTexStorageMem1DEXT"/>
+            </require>
+            <require api="gl" comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glTextureStorageMem1DEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_memory_object_fd" supported="gl|gles2">
+            <require>
+                <enum name="GL_HANDLE_TYPE_OPAQUE_FD_EXT"/>
+                <command name="glImportMemoryFdEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_memory_object_win32" supported="gl|gles2">
+            <require>
+                <enum name="GL_HANDLE_TYPE_OPAQUE_WIN32_EXT"/>
+                <enum name="GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT"/>
+                <enum name="GL_DEVICE_LUID_EXT"/>
+                <enum name="GL_DEVICE_NODE_MASK_EXT"/>
+                <enum name="GL_LUID_SIZE_EXT"/>
+                <enum name="GL_HANDLE_TYPE_D3D12_TILEPOOL_EXT"/>
+                <enum name="GL_HANDLE_TYPE_D3D12_RESOURCE_EXT"/>
+                <enum name="GL_HANDLE_TYPE_D3D11_IMAGE_EXT"/>
+                <enum name="GL_HANDLE_TYPE_D3D11_IMAGE_KMT_EXT"/>
+                <command name="glImportMemoryWin32HandleEXT"/>
+                <command name="glImportMemoryWin32NameEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_misc_attribute" supported="gl"/>
+        <extension name="GL_EXT_multi_draw_arrays" supported="gl|gles1|gles2">
+            <require>
+                <command name="glMultiDrawArraysEXT"/>
+                <command name="glMultiDrawElementsEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_multi_draw_indirect" supported="gles2">
+            <require>
+                <command name="glMultiDrawArraysIndirectEXT"/>
+                <command name="glMultiDrawElementsIndirectEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_multisample" supported="gl">
+            <require>
+                <enum name="GL_MULTISAMPLE_EXT"/>
+                <enum name="GL_SAMPLE_ALPHA_TO_MASK_EXT"/>
+                <enum name="GL_SAMPLE_ALPHA_TO_ONE_EXT"/>
+                <enum name="GL_SAMPLE_MASK_EXT"/>
+                <enum name="GL_1PASS_EXT"/>
+                <enum name="GL_2PASS_0_EXT"/>
+                <enum name="GL_2PASS_1_EXT"/>
+                <enum name="GL_4PASS_0_EXT"/>
+                <enum name="GL_4PASS_1_EXT"/>
+                <enum name="GL_4PASS_2_EXT"/>
+                <enum name="GL_4PASS_3_EXT"/>
+                <enum name="GL_SAMPLE_BUFFERS_EXT"/>
+                <enum name="GL_SAMPLES_EXT"/>
+                <enum name="GL_SAMPLE_MASK_VALUE_EXT"/>
+                <enum name="GL_SAMPLE_MASK_INVERT_EXT"/>
+                <enum name="GL_SAMPLE_PATTERN_EXT"/>
+                <enum name="GL_MULTISAMPLE_BIT_EXT"/>
+                <command name="glSampleMaskEXT"/>
+                <command name="glSamplePatternEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_multisampled_compatibility" supported="gles2">
+            <require>
+                <enum name="GL_MULTISAMPLE_EXT"/>
+                <enum name="GL_SAMPLE_ALPHA_TO_ONE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_multisampled_render_to_texture" supported="gles1|gles2">
+            <require>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT"/>
+                <enum name="GL_RENDERBUFFER_SAMPLES_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT"/>
+                <enum name="GL_MAX_SAMPLES_EXT"/>
+                <command name="glRenderbufferStorageMultisampleEXT"/>
+                <command name="glFramebufferTexture2DMultisampleEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_multiview_draw_buffers" supported="gles2">
+            <require>
+                <enum name="GL_COLOR_ATTACHMENT_EXT"/>
+                <enum name="GL_MULTIVIEW_EXT"/>
+                <enum name="GL_DRAW_BUFFER_EXT"/>
+                <enum name="GL_READ_BUFFER_EXT"/>
+                <enum name="GL_MAX_MULTIVIEW_BUFFERS_EXT"/>
+                <command name="glReadBufferIndexedEXT"/>
+                <command name="glDrawBuffersIndexedEXT"/>
+                <command name="glGetIntegeri_vEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_multiview_tessellation_geometry_shader" supported="gl|glcore|gles2"/>
+        <extension name="GL_EXT_multiview_texture_multisample" supported="gl|glcore|gles2"/>
+        <extension name="GL_EXT_multiview_timer_query" supported="gl|glcore|gles2"/>
+        <extension name="GL_EXT_occlusion_query_boolean" supported="gles2">
+            <require>
+                <enum name="GL_ANY_SAMPLES_PASSED_EXT"/>
+                <enum name="GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT"/>
+                <enum name="GL_CURRENT_QUERY_EXT"/>
+                <enum name="GL_QUERY_RESULT_EXT"/>
+                <enum name="GL_QUERY_RESULT_AVAILABLE_EXT"/>
+                <command name="glGenQueriesEXT"/>
+                <command name="glDeleteQueriesEXT"/>
+                <command name="glIsQueryEXT"/>
+                <command name="glBeginQueryEXT"/>
+                <command name="glEndQueryEXT"/>
+                <command name="glGetQueryivEXT"/>
+                <command name="glGetQueryObjectuivEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_packed_depth_stencil" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_STENCIL_EXT"/>
+                <enum name="GL_UNSIGNED_INT_24_8_EXT"/>
+                <enum name="GL_DEPTH24_STENCIL8_EXT"/>
+                <enum name="GL_TEXTURE_STENCIL_SIZE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_packed_float" supported="gl">
+            <require>
+                <enum name="GL_R11F_G11F_B10F_EXT"/>
+                <enum name="GL_UNSIGNED_INT_10F_11F_11F_REV_EXT"/>
+                <enum name="GL_RGBA_SIGNED_COMPONENTS_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_packed_pixels" supported="gl">
+            <require>
+                <enum name="GL_UNSIGNED_BYTE_3_3_2_EXT"/>
+                <enum name="GL_UNSIGNED_SHORT_4_4_4_4_EXT"/>
+                <enum name="GL_UNSIGNED_SHORT_5_5_5_1_EXT"/>
+                <enum name="GL_UNSIGNED_INT_8_8_8_8_EXT"/>
+                <enum name="GL_UNSIGNED_INT_10_10_10_2_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_paletted_texture" supported="gl">
+            <require>
+                <enum name="GL_COLOR_INDEX1_EXT"/>
+                <enum name="GL_COLOR_INDEX2_EXT"/>
+                <enum name="GL_COLOR_INDEX4_EXT"/>
+                <enum name="GL_COLOR_INDEX8_EXT"/>
+                <enum name="GL_COLOR_INDEX12_EXT"/>
+                <enum name="GL_COLOR_INDEX16_EXT"/>
+                <enum name="GL_TEXTURE_INDEX_SIZE_EXT"/>
+                <command name="glColorTableEXT"/>
+                <command name="glGetColorTableEXT"/>
+                <command name="glGetColorTableParameterivEXT"/>
+                <command name="glGetColorTableParameterfvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_pixel_buffer_object" supported="gl">
+            <require>
+                <enum name="GL_PIXEL_PACK_BUFFER_EXT"/>
+                <enum name="GL_PIXEL_UNPACK_BUFFER_EXT"/>
+                <enum name="GL_PIXEL_PACK_BUFFER_BINDING_EXT"/>
+                <enum name="GL_PIXEL_UNPACK_BUFFER_BINDING_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_pixel_transform" supported="gl">
+            <require>
+                <enum name="GL_PIXEL_TRANSFORM_2D_EXT"/>
+                <enum name="GL_PIXEL_MAG_FILTER_EXT"/>
+                <enum name="GL_PIXEL_MIN_FILTER_EXT"/>
+                <enum name="GL_PIXEL_CUBIC_WEIGHT_EXT"/>
+                <enum name="GL_CUBIC_EXT"/>
+                <enum name="GL_AVERAGE_EXT"/>
+                <enum name="GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT"/>
+                <enum name="GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT"/>
+                <enum name="GL_PIXEL_TRANSFORM_2D_MATRIX_EXT"/>
+                <command name="glPixelTransformParameteriEXT"/>
+                <command name="glPixelTransformParameterfEXT"/>
+                <command name="glPixelTransformParameterivEXT"/>
+                <command name="glPixelTransformParameterfvEXT"/>
+                <command name="glGetPixelTransformParameterivEXT"/>
+                <command name="glGetPixelTransformParameterfvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_pixel_transform_color_table" supported="gl"/>
+        <extension name="GL_EXT_point_parameters" supported="gl">
+            <require>
+                <enum name="GL_POINT_SIZE_MIN_EXT"/>
+                <enum name="GL_POINT_SIZE_MAX_EXT"/>
+                <enum name="GL_POINT_FADE_THRESHOLD_SIZE_EXT"/>
+                <enum name="GL_DISTANCE_ATTENUATION_EXT"/>
+                <command name="glPointParameterfEXT"/>
+                <command name="glPointParameterfvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_polygon_offset" supported="gl">
+            <require>
+                <enum name="GL_POLYGON_OFFSET_EXT"/>
+                <enum name="GL_POLYGON_OFFSET_FACTOR_EXT"/>
+                <enum name="GL_POLYGON_OFFSET_BIAS_EXT"/>
+                <command name="glPolygonOffsetEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_polygon_offset_clamp" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_POLYGON_OFFSET_CLAMP_EXT"/>
+                <command name="glPolygonOffsetClampEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_post_depth_coverage" supported="gl|glcore|gles2"/>
+        <extension name="GL_EXT_primitive_bounding_box" supported="gles2">
+            <require>
+                <enum name="GL_PRIMITIVE_BOUNDING_BOX_EXT"/>
+                <command name="glPrimitiveBoundingBoxEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_protected_textures" supported="gles2">
+            <require>
+                <enum name="GL_CONTEXT_FLAG_PROTECTED_CONTENT_BIT_EXT"/>
+                <enum name="GL_TEXTURE_PROTECTED_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_provoking_vertex" supported="gl">
+            <require>
+                <enum name="GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION_EXT"/>
+                <enum name="GL_FIRST_VERTEX_CONVENTION_EXT"/>
+                <enum name="GL_LAST_VERTEX_CONVENTION_EXT"/>
+                <enum name="GL_PROVOKING_VERTEX_EXT"/>
+                <command name="glProvokingVertexEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_pvrtc_sRGB" supported="gles2">
+            <require>
+                <enum name="GL_COMPRESSED_SRGB_PVRTC_2BPPV1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_PVRTC_4BPPV1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2_IMG"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_raster_multisample" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_RASTER_MULTISAMPLE_EXT"/>
+                <enum name="GL_RASTER_SAMPLES_EXT"/>
+                <enum name="GL_MAX_RASTER_SAMPLES_EXT"/>
+                <enum name="GL_RASTER_FIXED_SAMPLE_LOCATIONS_EXT"/>
+                <enum name="GL_MULTISAMPLE_RASTERIZATION_ALLOWED_EXT"/>
+                <enum name="GL_EFFECTIVE_RASTER_SAMPLES_EXT"/>
+                <command name="glRasterSamplesEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_read_format_bgra" supported="gles1|gles2">
+            <require>
+                <enum name="GL_BGRA_EXT"/>
+                <enum name="GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT"/>
+                <enum name="GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_render_snorm" supported="gles2">
+            <require>
+                <enum name="GL_BYTE"/>
+                <enum name="GL_SHORT"/>
+                <enum name="GL_R8_SNORM"/>
+                <enum name="GL_RG8_SNORM"/>
+                <enum name="GL_RGBA8_SNORM"/>
+                <enum name="GL_R16_SNORM_EXT"/>
+                <enum name="GL_RG16_SNORM_EXT"/>
+                <enum name="GL_RGBA16_SNORM_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_rescale_normal" supported="gl">
+            <require>
+                <enum name="GL_RESCALE_NORMAL_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_robustness" supported="gles1|gles2">
+            <require>
+                <enum name="GL_NO_ERROR"/>
+                <enum name="GL_GUILTY_CONTEXT_RESET_EXT"/>
+                <enum name="GL_INNOCENT_CONTEXT_RESET_EXT"/>
+                <enum name="GL_UNKNOWN_CONTEXT_RESET_EXT"/>
+                <enum name="GL_CONTEXT_ROBUST_ACCESS_EXT"/>
+                <enum name="GL_RESET_NOTIFICATION_STRATEGY_EXT"/>
+                <enum name="GL_LOSE_CONTEXT_ON_RESET_EXT"/>
+                <enum name="GL_NO_RESET_NOTIFICATION_EXT"/>
+                <command name="glGetGraphicsResetStatusEXT"/>
+                <command name="glReadnPixelsEXT"/>
+                <command name="glGetnUniformfvEXT"/>
+                <command name="glGetnUniformivEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_semaphore" supported="gl|gles2">
+            <require>
+                <enum name="GL_NUM_DEVICE_UUIDS_EXT"/>
+                <enum name="GL_DEVICE_UUID_EXT"/>
+                <enum name="GL_DRIVER_UUID_EXT"/>
+                <enum name="GL_UUID_SIZE_EXT"/>
+                <enum name="GL_LAYOUT_GENERAL_EXT"/>
+                <enum name="GL_LAYOUT_COLOR_ATTACHMENT_EXT"/>
+                <enum name="GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT"/>
+                <enum name="GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT"/>
+                <enum name="GL_LAYOUT_SHADER_READ_ONLY_EXT"/>
+                <enum name="GL_LAYOUT_TRANSFER_SRC_EXT"/>
+                <enum name="GL_LAYOUT_TRANSFER_DST_EXT"/>
+                <enum name="GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT"/>
+                <enum name="GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT"/>
+                <command name="glGetUnsignedBytevEXT"/>
+                <command name="glGetUnsignedBytei_vEXT"/>
+                <command name="glGenSemaphoresEXT"/>
+                <command name="glDeleteSemaphoresEXT"/>
+                <command name="glIsSemaphoreEXT"/>
+                <command name="glSemaphoreParameterui64vEXT"/>
+                <command name="glGetSemaphoreParameterui64vEXT"/>
+                <command name="glWaitSemaphoreEXT"/>
+                <command name="glSignalSemaphoreEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_semaphore_fd" supported="gl|gles2">
+            <require>
+                <enum name="GL_HANDLE_TYPE_OPAQUE_FD_EXT"/>
+                <command name="glImportSemaphoreFdEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_semaphore_win32" supported="gl|gles2">
+            <require>
+                <enum name="GL_HANDLE_TYPE_OPAQUE_WIN32_EXT"/>
+                <enum name="GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT"/>
+                <enum name="GL_DEVICE_LUID_EXT"/>
+                <enum name="GL_DEVICE_NODE_MASK_EXT"/>
+                <enum name="GL_LUID_SIZE_EXT"/>
+                <enum name="GL_HANDLE_TYPE_D3D12_FENCE_EXT"/>
+                <enum name="GL_D3D12_FENCE_VALUE_EXT"/>
+                <command name="glImportSemaphoreWin32HandleEXT"/>
+                <command name="glImportSemaphoreWin32NameEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_sRGB" supported="gles1|gles2">
+            <require>
+                <enum name="GL_SRGB_EXT"/>
+                <enum name="GL_SRGB_ALPHA_EXT"/>
+                <enum name="GL_SRGB8_ALPHA8_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_sRGB_write_control" supported="gles2">
+            <require>
+                <enum name="GL_FRAMEBUFFER_SRGB_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_secondary_color" supported="gl">
+            <require>
+                <enum name="GL_COLOR_SUM_EXT"/>
+                <enum name="GL_CURRENT_SECONDARY_COLOR_EXT"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_SIZE_EXT"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_TYPE_EXT"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_POINTER_EXT"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_EXT"/>
+                <command name="glSecondaryColor3bEXT"/>
+                <command name="glSecondaryColor3bvEXT"/>
+                <command name="glSecondaryColor3dEXT"/>
+                <command name="glSecondaryColor3dvEXT"/>
+                <command name="glSecondaryColor3fEXT"/>
+                <command name="glSecondaryColor3fvEXT"/>
+                <command name="glSecondaryColor3iEXT"/>
+                <command name="glSecondaryColor3ivEXT"/>
+                <command name="glSecondaryColor3sEXT"/>
+                <command name="glSecondaryColor3svEXT"/>
+                <command name="glSecondaryColor3ubEXT"/>
+                <command name="glSecondaryColor3ubvEXT"/>
+                <command name="glSecondaryColor3uiEXT"/>
+                <command name="glSecondaryColor3uivEXT"/>
+                <command name="glSecondaryColor3usEXT"/>
+                <command name="glSecondaryColor3usvEXT"/>
+                <command name="glSecondaryColorPointerEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_separate_shader_objects" supported="gl|glcore|gles2">
+            <require api="gl" comment="The OpenGL version of this extension is completely unrelated to the OpenGL ES version">
+                <enum name="GL_ACTIVE_PROGRAM_EXT"/>
+                <command name="glUseShaderProgramEXT"/>
+                <command name="glActiveProgramEXT"/>
+                <command name="glCreateShaderProgramEXT"/>
+            </require>
+            <require api="gles2" comment="The OpenGL ES version of this extension is completely unrelated to the OpenGL version">
+                <enum name="GL_VERTEX_SHADER_BIT_EXT"/>
+                <enum name="GL_FRAGMENT_SHADER_BIT_EXT"/>
+                <enum name="GL_ALL_SHADER_BITS_EXT"/>
+                <enum name="GL_PROGRAM_SEPARABLE_EXT"/>
+                <enum name="GL_ACTIVE_PROGRAM_EXT"/>
+                <enum name="GL_PROGRAM_PIPELINE_BINDING_EXT"/>
+                <command name="glActiveShaderProgramEXT"/>
+                <command name="glBindProgramPipelineEXT"/>
+                <command name="glCreateShaderProgramvEXT"/>
+                <command name="glDeleteProgramPipelinesEXT"/>
+                <command name="glGenProgramPipelinesEXT"/>
+                <command name="glGetProgramPipelineInfoLogEXT"/>
+                <command name="glGetProgramPipelineivEXT"/>
+                <command name="glIsProgramPipelineEXT"/>
+                <command name="glProgramParameteriEXT"/>
+                <command name="glProgramUniform1fEXT"/>
+                <command name="glProgramUniform1fvEXT"/>
+                <command name="glProgramUniform1iEXT"/>
+                <command name="glProgramUniform1ivEXT"/>
+                <command name="glProgramUniform2fEXT"/>
+                <command name="glProgramUniform2fvEXT"/>
+                <command name="glProgramUniform2iEXT"/>
+                <command name="glProgramUniform2ivEXT"/>
+                <command name="glProgramUniform3fEXT"/>
+                <command name="glProgramUniform3fvEXT"/>
+                <command name="glProgramUniform3iEXT"/>
+                <command name="glProgramUniform3ivEXT"/>
+                <command name="glProgramUniform4fEXT"/>
+                <command name="glProgramUniform4fvEXT"/>
+                <command name="glProgramUniform4iEXT"/>
+                <command name="glProgramUniform4ivEXT"/>
+                <command name="glProgramUniformMatrix2fvEXT"/>
+                <command name="glProgramUniformMatrix3fvEXT"/>
+                <command name="glProgramUniformMatrix4fvEXT"/>
+                <command name="glUseProgramStagesEXT"/>
+                <command name="glValidateProgramPipelineEXT"/>
+            </require>
+            <require api="gles2" comment="Depends on OpenGL ES 3.0 or GL_NV_non_square_matrices">
+                <command name="glProgramUniform1uiEXT"/>
+                <command name="glProgramUniform2uiEXT"/>
+                <command name="glProgramUniform3uiEXT"/>
+                <command name="glProgramUniform4uiEXT"/>
+                <command name="glProgramUniform1uivEXT"/>
+                <command name="glProgramUniform2uivEXT"/>
+                <command name="glProgramUniform3uivEXT"/>
+                <command name="glProgramUniform4uivEXT"/>
+                <command name="glProgramUniformMatrix4fvEXT"/>
+                <command name="glProgramUniformMatrix2x3fvEXT"/>
+                <command name="glProgramUniformMatrix3x2fvEXT"/>
+                <command name="glProgramUniformMatrix2x4fvEXT"/>
+                <command name="glProgramUniformMatrix4x2fvEXT"/>
+                <command name="glProgramUniformMatrix3x4fvEXT"/>
+                <command name="glProgramUniformMatrix4x3fvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_separate_specular_color" supported="gl">
+            <require>
+                <enum name="GL_LIGHT_MODEL_COLOR_CONTROL_EXT"/>
+                <enum name="GL_SINGLE_COLOR_EXT"/>
+                <enum name="GL_SEPARATE_SPECULAR_COLOR_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_shader_framebuffer_fetch" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_shader_framebuffer_fetch_non_coherent" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT"/>
+                <command name="glFramebufferFetchBarrierEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_shader_group_vote" supported="gles2"/>
+        <extension name="GL_EXT_shader_image_load_formatted" supported="gl"/>
+        <extension name="GL_EXT_shader_image_load_store" supported="gl">
+            <require>
+                <enum name="GL_MAX_IMAGE_UNITS_EXT"/>
+                <enum name="GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS_EXT"/>
+                <enum name="GL_IMAGE_BINDING_NAME_EXT"/>
+                <enum name="GL_IMAGE_BINDING_LEVEL_EXT"/>
+                <enum name="GL_IMAGE_BINDING_LAYERED_EXT"/>
+                <enum name="GL_IMAGE_BINDING_LAYER_EXT"/>
+                <enum name="GL_IMAGE_BINDING_ACCESS_EXT"/>
+                <enum name="GL_IMAGE_1D_EXT"/>
+                <enum name="GL_IMAGE_2D_EXT"/>
+                <enum name="GL_IMAGE_3D_EXT"/>
+                <enum name="GL_IMAGE_2D_RECT_EXT"/>
+                <enum name="GL_IMAGE_CUBE_EXT"/>
+                <enum name="GL_IMAGE_BUFFER_EXT"/>
+                <enum name="GL_IMAGE_1D_ARRAY_EXT"/>
+                <enum name="GL_IMAGE_2D_ARRAY_EXT"/>
+                <enum name="GL_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_IMAGE_2D_MULTISAMPLE_EXT"/>
+                <enum name="GL_IMAGE_2D_MULTISAMPLE_ARRAY_EXT"/>
+                <enum name="GL_INT_IMAGE_1D_EXT"/>
+                <enum name="GL_INT_IMAGE_2D_EXT"/>
+                <enum name="GL_INT_IMAGE_3D_EXT"/>
+                <enum name="GL_INT_IMAGE_2D_RECT_EXT"/>
+                <enum name="GL_INT_IMAGE_CUBE_EXT"/>
+                <enum name="GL_INT_IMAGE_BUFFER_EXT"/>
+                <enum name="GL_INT_IMAGE_1D_ARRAY_EXT"/>
+                <enum name="GL_INT_IMAGE_2D_ARRAY_EXT"/>
+                <enum name="GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_INT_IMAGE_2D_MULTISAMPLE_EXT"/>
+                <enum name="GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_1D_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_3D_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_RECT_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_CUBE_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_BUFFER_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_1D_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT"/>
+                <enum name="GL_MAX_IMAGE_SAMPLES_EXT"/>
+                <enum name="GL_IMAGE_BINDING_FORMAT_EXT"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT_EXT"/>
+                <enum name="GL_ELEMENT_ARRAY_BARRIER_BIT_EXT"/>
+                <enum name="GL_UNIFORM_BARRIER_BIT_EXT"/>
+                <enum name="GL_TEXTURE_FETCH_BARRIER_BIT_EXT"/>
+                <enum name="GL_SHADER_IMAGE_ACCESS_BARRIER_BIT_EXT"/>
+                <enum name="GL_COMMAND_BARRIER_BIT_EXT"/>
+                <enum name="GL_PIXEL_BUFFER_BARRIER_BIT_EXT"/>
+                <enum name="GL_TEXTURE_UPDATE_BARRIER_BIT_EXT"/>
+                <enum name="GL_BUFFER_UPDATE_BARRIER_BIT_EXT"/>
+                <enum name="GL_FRAMEBUFFER_BARRIER_BIT_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BARRIER_BIT_EXT"/>
+                <enum name="GL_ATOMIC_COUNTER_BARRIER_BIT_EXT"/>
+                <enum name="GL_ALL_BARRIER_BITS_EXT"/>
+                <command name="glBindImageTextureEXT"/>
+                <command name="glMemoryBarrierEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_shader_implicit_conversions" supported="gles2"/>
+        <extension name="GL_EXT_shader_integer_mix" supported="gl|glcore|gles2"/>
+        <extension name="GL_EXT_shader_io_blocks" supported="gles2"/>
+        <extension name="GL_EXT_shader_non_constant_global_initializers" supported="gles2"/>
+        <extension name="GL_EXT_shader_pixel_local_storage" supported="gles2">
+            <require>
+                <enum name="GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE_EXT"/>
+                <enum name="GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_SIZE_EXT"/>
+                <enum name="GL_SHADER_PIXEL_LOCAL_STORAGE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_shader_pixel_local_storage2" supported="gles2">
+            <require>
+                <enum name="GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_FAST_SIZE_EXT"/>
+                <enum name="GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_SIZE_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_INSUFFICIENT_SHADER_COMBINED_LOCAL_STORAGE_EXT"/>
+                <command name="glFramebufferPixelLocalStorageSizeEXT"/>
+                <command name="glGetFramebufferPixelLocalStorageSizeEXT"/>
+                <command name="glClearPixelLocalStorageuiEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_shader_texture_lod" supported="gles2"/>
+        <extension name="GL_EXT_shadow_funcs" supported="gl"/>
+        <extension name="GL_EXT_shadow_samplers" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_COMPARE_MODE_EXT"/>
+                <enum name="GL_TEXTURE_COMPARE_FUNC_EXT"/>
+                <enum name="GL_COMPARE_REF_TO_TEXTURE_EXT"/>
+                <enum name="GL_SAMPLER_2D_SHADOW_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_shared_texture_palette" supported="gl">
+            <require>
+                <enum name="GL_SHARED_TEXTURE_PALETTE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_sparse_texture" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_SPARSE_EXT"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_INDEX_EXT"/>
+                <enum name="GL_NUM_SPARSE_LEVELS_EXT"/>
+                <enum name="GL_NUM_VIRTUAL_PAGE_SIZES_EXT"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_X_EXT"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_Y_EXT"/>
+                <enum name="GL_VIRTUAL_PAGE_SIZE_Z_EXT"/>
+                <enum name="GL_TEXTURE_2D"/>
+                <enum name="GL_TEXTURE_2D_ARRAY"/>
+                <enum name="GL_TEXTURE_CUBE_MAP"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_TEXTURE_3D"/>
+                <enum name="GL_MAX_SPARSE_TEXTURE_SIZE_EXT"/>
+                <enum name="GL_MAX_SPARSE_3D_TEXTURE_SIZE_EXT"/>
+                <enum name="GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_EXT"/>
+                <enum name="GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_EXT"/>
+                <command name="glTexPageCommitmentEXT"/>
+                <!-- <command name="glTexturePageCommitmentEXT"/> -->
+            </require>
+        </extension>
+        <extension name="GL_EXT_sparse_texture2" supported="gl|gles2"/>
+        <extension name="GL_EXT_stencil_clear_tag" supported="gl">
+            <require>
+                <enum name="GL_STENCIL_TAG_BITS_EXT"/>
+                <enum name="GL_STENCIL_CLEAR_TAG_VALUE_EXT"/>
+                <command name="glStencilClearTagEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_stencil_two_side" supported="gl">
+            <require>
+                <enum name="GL_STENCIL_TEST_TWO_SIDE_EXT"/>
+                <enum name="GL_ACTIVE_STENCIL_FACE_EXT"/>
+                <command name="glActiveStencilFaceEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_stencil_wrap" supported="gl">
+            <require>
+                <enum name="GL_INCR_WRAP_EXT"/>
+                <enum name="GL_DECR_WRAP_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_subtexture" supported="gl">
+            <require>
+                <command name="glTexSubImage1DEXT"/>
+                <command name="glTexSubImage2DEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_tessellation_point_size" supported="gles2"/>
+        <extension name="GL_EXT_tessellation_shader" supported="gles2">
+            <require>
+                <enum name="GL_PATCHES_EXT"/>
+                <enum name="GL_PATCH_VERTICES_EXT"/>
+                <enum name="GL_TESS_CONTROL_OUTPUT_VERTICES_EXT"/>
+                <enum name="GL_TESS_GEN_MODE_EXT"/>
+                <enum name="GL_TESS_GEN_SPACING_EXT"/>
+                <enum name="GL_TESS_GEN_VERTEX_ORDER_EXT"/>
+                <enum name="GL_TESS_GEN_POINT_MODE_EXT"/>
+                <enum name="GL_TRIANGLES"/>
+                <enum name="GL_ISOLINES_EXT"/>
+                <enum name="GL_QUADS_EXT"/>
+                <enum name="GL_EQUAL"/>
+                <enum name="GL_FRACTIONAL_ODD_EXT"/>
+                <enum name="GL_FRACTIONAL_EVEN_EXT"/>
+                <enum name="GL_CCW"/>
+                <enum name="GL_CW"/>
+                <enum name="GL_MAX_PATCH_VERTICES_EXT"/>
+                <enum name="GL_MAX_TESS_GEN_LEVEL_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_PATCH_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_EXT"/>
+                <enum name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_EXT"/>
+                <enum name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_EXT"/>
+                <enum name="GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED"/>
+                <enum name="GL_IS_PER_PATCH_EXT"/>
+                <enum name="GL_REFERENCED_BY_TESS_CONTROL_SHADER_EXT"/>
+                <enum name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER_EXT"/>
+                <enum name="GL_TESS_CONTROL_SHADER_EXT"/>
+                <enum name="GL_TESS_EVALUATION_SHADER_EXT"/>
+                <enum name="GL_TESS_CONTROL_SHADER_BIT_EXT"/>
+                <enum name="GL_TESS_EVALUATION_SHADER_BIT_EXT"/>
+                <command name="glPatchParameteriEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture" supported="gl">
+            <require>
+                <enum name="GL_ALPHA4_EXT"/>
+                <enum name="GL_ALPHA8_EXT"/>
+                <enum name="GL_ALPHA12_EXT"/>
+                <enum name="GL_ALPHA16_EXT"/>
+                <enum name="GL_LUMINANCE4_EXT"/>
+                <enum name="GL_LUMINANCE8_EXT"/>
+                <enum name="GL_LUMINANCE12_EXT"/>
+                <enum name="GL_LUMINANCE16_EXT"/>
+                <enum name="GL_LUMINANCE4_ALPHA4_EXT"/>
+                <enum name="GL_LUMINANCE6_ALPHA2_EXT"/>
+                <enum name="GL_LUMINANCE8_ALPHA8_EXT"/>
+                <enum name="GL_LUMINANCE12_ALPHA4_EXT"/>
+                <enum name="GL_LUMINANCE12_ALPHA12_EXT"/>
+                <enum name="GL_LUMINANCE16_ALPHA16_EXT"/>
+                <enum name="GL_INTENSITY_EXT"/>
+                <enum name="GL_INTENSITY4_EXT"/>
+                <enum name="GL_INTENSITY8_EXT"/>
+                <enum name="GL_INTENSITY12_EXT"/>
+                <enum name="GL_INTENSITY16_EXT"/>
+                <enum name="GL_RGB2_EXT"/>
+                <enum name="GL_RGB4_EXT"/>
+                <enum name="GL_RGB5_EXT"/>
+                <enum name="GL_RGB8_EXT"/>
+                <enum name="GL_RGB10_EXT"/>
+                <enum name="GL_RGB12_EXT"/>
+                <enum name="GL_RGB16_EXT"/>
+                <enum name="GL_RGBA2_EXT"/>
+                <enum name="GL_RGBA4_EXT"/>
+                <enum name="GL_RGB5_A1_EXT"/>
+                <enum name="GL_RGBA8_EXT"/>
+                <enum name="GL_RGB10_A2_EXT"/>
+                <enum name="GL_RGBA12_EXT"/>
+                <enum name="GL_RGBA16_EXT"/>
+                <enum name="GL_TEXTURE_RED_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_GREEN_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_BLUE_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_ALPHA_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_LUMINANCE_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_INTENSITY_SIZE_EXT"/>
+                <enum name="GL_REPLACE_EXT"/>
+                <enum name="GL_PROXY_TEXTURE_1D_EXT"/>
+                <enum name="GL_PROXY_TEXTURE_2D_EXT"/>
+                <enum name="GL_TEXTURE_TOO_LARGE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture3D" supported="gl">
+            <require>
+                <enum name="GL_PACK_SKIP_IMAGES_EXT"/>
+                <enum name="GL_PACK_IMAGE_HEIGHT_EXT"/>
+                <enum name="GL_UNPACK_SKIP_IMAGES_EXT"/>
+                <enum name="GL_UNPACK_IMAGE_HEIGHT_EXT"/>
+                <enum name="GL_TEXTURE_3D_EXT"/>
+                <enum name="GL_PROXY_TEXTURE_3D_EXT"/>
+                <enum name="GL_TEXTURE_DEPTH_EXT"/>
+                <enum name="GL_TEXTURE_WRAP_R_EXT"/>
+                <enum name="GL_MAX_3D_TEXTURE_SIZE_EXT"/>
+                <command name="glTexImage3DEXT"/>
+                <command name="glTexSubImage3DEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_array" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_1D_ARRAY_EXT"/>
+                <enum name="GL_PROXY_TEXTURE_1D_ARRAY_EXT"/>
+                <enum name="GL_TEXTURE_2D_ARRAY_EXT"/>
+                <enum name="GL_PROXY_TEXTURE_2D_ARRAY_EXT"/>
+                <enum name="GL_TEXTURE_BINDING_1D_ARRAY_EXT"/>
+                <enum name="GL_TEXTURE_BINDING_2D_ARRAY_EXT"/>
+                <enum name="GL_MAX_ARRAY_TEXTURE_LAYERS_EXT"/>
+                <enum name="GL_COMPARE_REF_DEPTH_TO_TEXTURE_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT"/>
+                <command name="glFramebufferTextureLayerEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_border_clamp" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_BORDER_COLOR_EXT"/>
+                <enum name="GL_CLAMP_TO_BORDER_EXT"/>
+                <command name="glTexParameterIivEXT"/>
+                <command name="glTexParameterIuivEXT"/>
+                <command name="glGetTexParameterIivEXT"/>
+                <command name="glGetTexParameterIuivEXT"/>
+                <command name="glSamplerParameterIivEXT"/>
+                <command name="glSamplerParameterIuivEXT"/>
+                <command name="glGetSamplerParameterIivEXT"/>
+                <command name="glGetSamplerParameterIuivEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_buffer" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_BUFFER_EXT"/>
+                <enum name="GL_TEXTURE_BUFFER_BINDING_EXT"/>
+                <enum name="GL_MAX_TEXTURE_BUFFER_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_BINDING_BUFFER_EXT"/>
+                <enum name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT"/>
+                <enum name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_EXT"/>
+                <enum name="GL_SAMPLER_BUFFER_EXT"/>
+                <enum name="GL_INT_SAMPLER_BUFFER_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT"/>
+                <enum name="GL_IMAGE_BUFFER_EXT"/>
+                <enum name="GL_INT_IMAGE_BUFFER_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_BUFFER_EXT"/>
+                <enum name="GL_TEXTURE_BUFFER_OFFSET_EXT"/>
+                <enum name="GL_TEXTURE_BUFFER_SIZE_EXT"/>
+                <command name="glTexBufferEXT"/>
+                <command name="glTexBufferRangeEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_buffer_object" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_BUFFER_EXT"/>
+                <enum name="GL_MAX_TEXTURE_BUFFER_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_BINDING_BUFFER_EXT"/>
+                <enum name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT"/>
+                <enum name="GL_TEXTURE_BUFFER_FORMAT_EXT"/>
+                <command name="glTexBufferEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_compression_astc_decode_mode" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_ASTC_DECODE_PRECISION_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_compression_bptc" supported="gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_BPTC_UNORM_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT"/>
+                <enum name="GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT"/>
+                <enum name="GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_compression_dxt1" supported="gles1|gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RGB_S3TC_DXT1_EXT"/>
+                <enum name="GL_COMPRESSED_RGBA_S3TC_DXT1_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_compression_latc" supported="gl">
+            <require>
+                <enum name="GL_COMPRESSED_LUMINANCE_LATC1_EXT"/>
+                <enum name="GL_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT"/>
+                <enum name="GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT"/>
+                <enum name="GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_compression_rgtc" supported="gl|gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RED_RGTC1_EXT"/>
+                <enum name="GL_COMPRESSED_SIGNED_RED_RGTC1_EXT"/>
+                <enum name="GL_COMPRESSED_RED_GREEN_RGTC2_EXT"/>
+                <enum name="GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_compression_s3tc" supported="gl|glcore|gles2|glsc2">
+            <require>
+                <enum name="GL_COMPRESSED_RGB_S3TC_DXT1_EXT"/>
+                <enum name="GL_COMPRESSED_RGBA_S3TC_DXT1_EXT"/>
+                <enum name="GL_COMPRESSED_RGBA_S3TC_DXT3_EXT"/>
+                <enum name="GL_COMPRESSED_RGBA_S3TC_DXT5_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_compression_s3tc_srgb" supported="gles2">
+            <require>
+                <enum name="GL_COMPRESSED_SRGB_S3TC_DXT1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_cube_map" supported="gl" comment="Replaced by ARB_texture_cube_map, but was apparently shipped anyway?">
+            <require>
+                <enum name="GL_NORMAL_MAP_EXT"/>
+                <enum name="GL_REFLECTION_MAP_EXT"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_EXT"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP_EXT"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_X_EXT"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X_EXT"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y_EXT"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z_EXT"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT"/>
+                <enum name="GL_PROXY_TEXTURE_CUBE_MAP_EXT"/>
+                <enum name="GL_MAX_CUBE_MAP_TEXTURE_SIZE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_cube_map_array" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_EXT"/>
+                <enum name="GL_INT_SAMPLER_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_env_add" supported="gl"/>
+        <extension name="GL_EXT_texture_env_combine" supported="gl">
+            <require>
+                <enum name="GL_COMBINE_EXT"/>
+                <enum name="GL_COMBINE_RGB_EXT"/>
+                <enum name="GL_COMBINE_ALPHA_EXT"/>
+                <enum name="GL_RGB_SCALE_EXT"/>
+                <enum name="GL_ADD_SIGNED_EXT"/>
+                <enum name="GL_INTERPOLATE_EXT"/>
+                <enum name="GL_CONSTANT_EXT"/>
+                <enum name="GL_PRIMARY_COLOR_EXT"/>
+                <enum name="GL_PREVIOUS_EXT"/>
+                <enum name="GL_SOURCE0_RGB_EXT"/>
+                <enum name="GL_SOURCE1_RGB_EXT"/>
+                <enum name="GL_SOURCE2_RGB_EXT"/>
+                <enum name="GL_SOURCE0_ALPHA_EXT"/>
+                <enum name="GL_SOURCE1_ALPHA_EXT"/>
+                <enum name="GL_SOURCE2_ALPHA_EXT"/>
+                <enum name="GL_OPERAND0_RGB_EXT"/>
+                <enum name="GL_OPERAND1_RGB_EXT"/>
+                <enum name="GL_OPERAND2_RGB_EXT"/>
+                <enum name="GL_OPERAND0_ALPHA_EXT"/>
+                <enum name="GL_OPERAND1_ALPHA_EXT"/>
+                <enum name="GL_OPERAND2_ALPHA_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_env_dot3" supported="gl">
+            <require>
+                <enum name="GL_DOT3_RGB_EXT"/>
+                <enum name="GL_DOT3_RGBA_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_filter_anisotropic" supported="gl|gles1|gles2">
+            <require>
+                <enum name="GL_TEXTURE_MAX_ANISOTROPY_EXT"/>
+                <enum name="GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_filter_minmax" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_TEXTURE_REDUCTION_MODE_EXT"/>
+                <enum name="GL_WEIGHTED_AVERAGE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_format_BGRA8888" supported="gles1|gles2">
+            <require>
+                <enum name="GL_BGRA_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_format_sRGB_override" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_FORMAT_SRGB_OVERRIDE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_integer" supported="gl">
+            <require>
+                <enum name="GL_RGBA32UI_EXT"/>
+                <enum name="GL_RGB32UI_EXT"/>
+                <enum name="GL_ALPHA32UI_EXT"/>
+                <enum name="GL_INTENSITY32UI_EXT"/>
+                <enum name="GL_LUMINANCE32UI_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA32UI_EXT"/>
+                <enum name="GL_RGBA16UI_EXT"/>
+                <enum name="GL_RGB16UI_EXT"/>
+                <enum name="GL_ALPHA16UI_EXT"/>
+                <enum name="GL_INTENSITY16UI_EXT"/>
+                <enum name="GL_LUMINANCE16UI_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA16UI_EXT"/>
+                <enum name="GL_RGBA8UI_EXT"/>
+                <enum name="GL_RGB8UI_EXT"/>
+                <enum name="GL_ALPHA8UI_EXT"/>
+                <enum name="GL_INTENSITY8UI_EXT"/>
+                <enum name="GL_LUMINANCE8UI_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA8UI_EXT"/>
+                <enum name="GL_RGBA32I_EXT"/>
+                <enum name="GL_RGB32I_EXT"/>
+                <enum name="GL_ALPHA32I_EXT"/>
+                <enum name="GL_INTENSITY32I_EXT"/>
+                <enum name="GL_LUMINANCE32I_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA32I_EXT"/>
+                <enum name="GL_RGBA16I_EXT"/>
+                <enum name="GL_RGB16I_EXT"/>
+                <enum name="GL_ALPHA16I_EXT"/>
+                <enum name="GL_INTENSITY16I_EXT"/>
+                <enum name="GL_LUMINANCE16I_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA16I_EXT"/>
+                <enum name="GL_RGBA8I_EXT"/>
+                <enum name="GL_RGB8I_EXT"/>
+                <enum name="GL_ALPHA8I_EXT"/>
+                <enum name="GL_INTENSITY8I_EXT"/>
+                <enum name="GL_LUMINANCE8I_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA8I_EXT"/>
+                <enum name="GL_RED_INTEGER_EXT"/>
+                <enum name="GL_GREEN_INTEGER_EXT"/>
+                <enum name="GL_BLUE_INTEGER_EXT"/>
+                <enum name="GL_ALPHA_INTEGER_EXT"/>
+                <enum name="GL_RGB_INTEGER_EXT"/>
+                <enum name="GL_RGBA_INTEGER_EXT"/>
+                <enum name="GL_BGR_INTEGER_EXT"/>
+                <enum name="GL_BGRA_INTEGER_EXT"/>
+                <enum name="GL_LUMINANCE_INTEGER_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA_INTEGER_EXT"/>
+                <enum name="GL_RGBA_INTEGER_MODE_EXT"/>
+                <command name="glTexParameterIivEXT"/>
+                <command name="glTexParameterIuivEXT"/>
+                <command name="glGetTexParameterIivEXT"/>
+                <command name="glGetTexParameterIuivEXT"/>
+                <command name="glClearColorIiEXT"/>
+                <command name="glClearColorIuiEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_lod_bias" supported="gl|gles1">
+            <require>
+                <enum name="GL_MAX_TEXTURE_LOD_BIAS_EXT"/>
+                <enum name="GL_TEXTURE_FILTER_CONTROL_EXT"/>
+                <enum name="GL_TEXTURE_LOD_BIAS_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_mirror_clamp" supported="gl">
+            <require>
+                <enum name="GL_MIRROR_CLAMP_EXT"/>
+                <enum name="GL_MIRROR_CLAMP_TO_EDGE_EXT"/>
+                <enum name="GL_MIRROR_CLAMP_TO_BORDER_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_mirror_clamp_to_edge" supported="gles2">
+            <require>
+                <enum name="GL_MIRROR_CLAMP_TO_EDGE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_norm16" supported="gles2">
+            <require>
+                <enum name="GL_R16_EXT"/>
+                <enum name="GL_RG16_EXT"/>
+                <enum name="GL_RGBA16_EXT"/>
+                <enum name="GL_RGB16_EXT"/>
+                <enum name="GL_R16_SNORM_EXT"/>
+                <enum name="GL_RG16_SNORM_EXT"/>
+                <enum name="GL_RGB16_SNORM_EXT"/>
+                <enum name="GL_RGBA16_SNORM_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_object" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_PRIORITY_EXT"/>
+                <enum name="GL_TEXTURE_RESIDENT_EXT"/>
+                <enum name="GL_TEXTURE_1D_BINDING_EXT"/>
+                <enum name="GL_TEXTURE_2D_BINDING_EXT"/>
+                <enum name="GL_TEXTURE_3D_BINDING_EXT"/>
+                <command name="glAreTexturesResidentEXT"/>
+                <command name="glBindTextureEXT"/>
+                <command name="glDeleteTexturesEXT"/>
+                <command name="glGenTexturesEXT"/>
+                <command name="glIsTextureEXT"/>
+                <command name="glPrioritizeTexturesEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_perturb_normal" supported="gl">
+            <require>
+                <enum name="GL_PERTURB_EXT"/>
+                <enum name="GL_TEXTURE_NORMAL_EXT"/>
+                <command name="glTextureNormalEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_query_lod" supported="gles2"/>
+        <extension name="GL_EXT_texture_rg" supported="gles2">
+            <require>
+                <enum name="GL_RED_EXT"/>
+                <enum name="GL_RG_EXT"/>
+                <enum name="GL_R8_EXT"/>
+                <enum name="GL_RG8_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_sRGB" supported="gl">
+            <require>
+                <enum name="GL_SRGB_EXT"/>
+                <enum name="GL_SRGB8_EXT"/>
+                <enum name="GL_SRGB_ALPHA_EXT"/>
+                <enum name="GL_SRGB8_ALPHA8_EXT"/>
+                <enum name="GL_SLUMINANCE_ALPHA_EXT"/>
+                <enum name="GL_SLUMINANCE8_ALPHA8_EXT"/>
+                <enum name="GL_SLUMINANCE_EXT"/>
+                <enum name="GL_SLUMINANCE8_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_EXT"/>
+                <enum name="GL_COMPRESSED_SLUMINANCE_EXT"/>
+                <enum name="GL_COMPRESSED_SLUMINANCE_ALPHA_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_S3TC_DXT1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_sRGB_R8" supported="gles2|gl|glcore">
+            <require>
+                <enum name="GL_SR8_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_sRGB_RG8" supported="gles2">
+            <require>
+                <enum name="GL_SRG8_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_sRGB_decode" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_TEXTURE_SRGB_DECODE_EXT"/>
+                <enum name="GL_DECODE_EXT"/>
+                <enum name="GL_SKIP_DECODE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_shared_exponent" supported="gl">
+            <require>
+                <enum name="GL_RGB9_E5_EXT"/>
+                <enum name="GL_UNSIGNED_INT_5_9_9_9_REV_EXT"/>
+                <enum name="GL_TEXTURE_SHARED_SIZE_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_snorm" supported="gl">
+            <require>
+                <enum name="GL_ALPHA_SNORM"/>
+                <enum name="GL_LUMINANCE_SNORM"/>
+                <enum name="GL_LUMINANCE_ALPHA_SNORM"/>
+                <enum name="GL_INTENSITY_SNORM"/>
+                <enum name="GL_ALPHA8_SNORM"/>
+                <enum name="GL_LUMINANCE8_SNORM"/>
+                <enum name="GL_LUMINANCE8_ALPHA8_SNORM"/>
+                <enum name="GL_INTENSITY8_SNORM"/>
+                <enum name="GL_ALPHA16_SNORM"/>
+                <enum name="GL_LUMINANCE16_SNORM"/>
+                <enum name="GL_LUMINANCE16_ALPHA16_SNORM"/>
+                <enum name="GL_INTENSITY16_SNORM"/>
+                <enum name="GL_RED_SNORM"/>
+                <enum name="GL_RG_SNORM"/>
+                <enum name="GL_RGB_SNORM"/>
+                <enum name="GL_RGBA_SNORM"/>
+                <enum name="GL_R8_SNORM"/>
+                <enum name="GL_RG8_SNORM"/>
+                <enum name="GL_RGB8_SNORM"/>
+                <enum name="GL_RGBA8_SNORM"/>
+                <enum name="GL_R16_SNORM"/>
+                <enum name="GL_RG16_SNORM"/>
+                <enum name="GL_RGB16_SNORM"/>
+                <enum name="GL_RGBA16_SNORM"/>
+                <enum name="GL_SIGNED_NORMALIZED"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_storage" supported="gles1|gles2">
+            <require comment="Not clear all of these enums should be here for OpenGL ES. Many are only defined if other extensions also requiring them are supported">
+                <enum name="GL_TEXTURE_IMMUTABLE_FORMAT_EXT"/>
+                <enum name="GL_ALPHA8_EXT"/>
+                <enum name="GL_LUMINANCE8_EXT"/>
+                <enum name="GL_LUMINANCE8_ALPHA8_EXT"/>
+                <enum name="GL_RGBA32F_EXT"/>
+                <enum name="GL_RGB32F_EXT"/>
+                <enum name="GL_ALPHA32F_EXT"/>
+                <enum name="GL_LUMINANCE32F_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA32F_EXT"/>
+                <enum name="GL_RGBA16F_EXT"/>
+                <enum name="GL_RGB16F_EXT"/>
+                <enum name="GL_ALPHA16F_EXT"/>
+                <enum name="GL_LUMINANCE16F_EXT"/>
+                <enum name="GL_LUMINANCE_ALPHA16F_EXT"/>
+                <enum name="GL_RGB10_A2_EXT"/>
+                <enum name="GL_RGB10_EXT"/>
+                <enum name="GL_BGRA8_EXT"/>
+                <enum name="GL_R8_EXT"/>
+                <enum name="GL_RG8_EXT"/>
+                <enum name="GL_R32F_EXT"/>
+                <enum name="GL_RG32F_EXT"/>
+                <enum name="GL_R16F_EXT"/>
+                <enum name="GL_RG16F_EXT"/>
+                <command name="glTexStorage1DEXT"/>
+                <command name="glTexStorage2DEXT"/>
+                <command name="glTexStorage3DEXT"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glTextureStorage1DEXT"/>
+                <command name="glTextureStorage2DEXT"/>
+                <command name="glTextureStorage3DEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_swizzle" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_SWIZZLE_R_EXT"/>
+                <enum name="GL_TEXTURE_SWIZZLE_G_EXT"/>
+                <enum name="GL_TEXTURE_SWIZZLE_B_EXT"/>
+                <enum name="GL_TEXTURE_SWIZZLE_A_EXT"/>
+                <enum name="GL_TEXTURE_SWIZZLE_RGBA_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_type_2_10_10_10_REV" supported="gles2">
+            <require>
+                <enum name="GL_UNSIGNED_INT_2_10_10_10_REV_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_view" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_VIEW_MIN_LEVEL_EXT"/>
+                <enum name="GL_TEXTURE_VIEW_NUM_LEVELS_EXT"/>
+                <enum name="GL_TEXTURE_VIEW_MIN_LAYER_EXT"/>
+                <enum name="GL_TEXTURE_VIEW_NUM_LAYERS_EXT"/>
+                <enum name="GL_TEXTURE_IMMUTABLE_LEVELS"/>
+                <command name="glTextureViewEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_timer_query" supported="gl">
+            <require>
+                <enum name="GL_TIME_ELAPSED_EXT"/>
+                <command name="glGetQueryObjecti64vEXT"/>
+                <command name="glGetQueryObjectui64vEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_transform_feedback" supported="gl">
+            <require>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_START_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_EXT"/>
+                <enum name="GL_INTERLEAVED_ATTRIBS_EXT"/>
+                <enum name="GL_SEPARATE_ATTRIBS_EXT"/>
+                <enum name="GL_PRIMITIVES_GENERATED_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_EXT"/>
+                <enum name="GL_RASTERIZER_DISCARD_EXT"/>
+                <enum name="GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT"/>
+                <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_EXT"/>
+                <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_VARYINGS_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE_EXT"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH_EXT"/>
+                <command name="glBeginTransformFeedbackEXT"/>
+                <command name="glEndTransformFeedbackEXT"/>
+                <command name="glBindBufferRangeEXT"/>
+                <command name="glBindBufferOffsetEXT"/>
+                <command name="glBindBufferBaseEXT"/>
+                <command name="glTransformFeedbackVaryingsEXT"/>
+                <command name="glGetTransformFeedbackVaryingEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_unpack_subimage" supported="gles2">
+            <require>
+                <enum name="GL_UNPACK_ROW_LENGTH_EXT"/>
+                <enum name="GL_UNPACK_SKIP_ROWS_EXT"/>
+                <enum name="GL_UNPACK_SKIP_PIXELS_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_vertex_array" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_EXT"/>
+                <enum name="GL_NORMAL_ARRAY_EXT"/>
+                <enum name="GL_COLOR_ARRAY_EXT"/>
+                <enum name="GL_INDEX_ARRAY_EXT"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_EXT"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_EXT"/>
+                <enum name="GL_VERTEX_ARRAY_SIZE_EXT"/>
+                <enum name="GL_VERTEX_ARRAY_TYPE_EXT"/>
+                <enum name="GL_VERTEX_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_VERTEX_ARRAY_COUNT_EXT"/>
+                <enum name="GL_NORMAL_ARRAY_TYPE_EXT"/>
+                <enum name="GL_NORMAL_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_NORMAL_ARRAY_COUNT_EXT"/>
+                <enum name="GL_COLOR_ARRAY_SIZE_EXT"/>
+                <enum name="GL_COLOR_ARRAY_TYPE_EXT"/>
+                <enum name="GL_COLOR_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_COLOR_ARRAY_COUNT_EXT"/>
+                <enum name="GL_INDEX_ARRAY_TYPE_EXT"/>
+                <enum name="GL_INDEX_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_INDEX_ARRAY_COUNT_EXT"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_SIZE_EXT"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_TYPE_EXT"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_COUNT_EXT"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_COUNT_EXT"/>
+                <enum name="GL_VERTEX_ARRAY_POINTER_EXT"/>
+                <enum name="GL_NORMAL_ARRAY_POINTER_EXT"/>
+                <enum name="GL_COLOR_ARRAY_POINTER_EXT"/>
+                <enum name="GL_INDEX_ARRAY_POINTER_EXT"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_POINTER_EXT"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_POINTER_EXT"/>
+                <command name="glArrayElementEXT"/>
+                <command name="glColorPointerEXT"/>
+                <command name="glDrawArraysEXT"/>
+                <command name="glEdgeFlagPointerEXT"/>
+                <command name="glGetPointervEXT"/>
+                <command name="glIndexPointerEXT"/>
+                <command name="glNormalPointerEXT"/>
+                <command name="glTexCoordPointerEXT"/>
+                <command name="glVertexPointerEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_vertex_array_bgra" supported="gl">
+            <require>
+                <enum name="GL_BGRA"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_vertex_attrib_64bit" supported="gl">
+            <require>
+                <enum name="GL_DOUBLE"/>
+                <enum name="GL_DOUBLE_VEC2_EXT"/>
+                <enum name="GL_DOUBLE_VEC3_EXT"/>
+                <enum name="GL_DOUBLE_VEC4_EXT"/>
+                <enum name="GL_DOUBLE_MAT2_EXT"/>
+                <enum name="GL_DOUBLE_MAT3_EXT"/>
+                <enum name="GL_DOUBLE_MAT4_EXT"/>
+                <enum name="GL_DOUBLE_MAT2x3_EXT"/>
+                <enum name="GL_DOUBLE_MAT2x4_EXT"/>
+                <enum name="GL_DOUBLE_MAT3x2_EXT"/>
+                <enum name="GL_DOUBLE_MAT3x4_EXT"/>
+                <enum name="GL_DOUBLE_MAT4x2_EXT"/>
+                <enum name="GL_DOUBLE_MAT4x3_EXT"/>
+                <command name="glVertexAttribL1dEXT"/>
+                <command name="glVertexAttribL2dEXT"/>
+                <command name="glVertexAttribL3dEXT"/>
+                <command name="glVertexAttribL4dEXT"/>
+                <command name="glVertexAttribL1dvEXT"/>
+                <command name="glVertexAttribL2dvEXT"/>
+                <command name="glVertexAttribL3dvEXT"/>
+                <command name="glVertexAttribL4dvEXT"/>
+                <command name="glVertexAttribLPointerEXT"/>
+                <command name="glGetVertexAttribLdvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_vertex_shader" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_SHADER_EXT"/>
+                <enum name="GL_VERTEX_SHADER_BINDING_EXT"/>
+                <enum name="GL_OP_INDEX_EXT"/>
+                <enum name="GL_OP_NEGATE_EXT"/>
+                <enum name="GL_OP_DOT3_EXT"/>
+                <enum name="GL_OP_DOT4_EXT"/>
+                <enum name="GL_OP_MUL_EXT"/>
+                <enum name="GL_OP_ADD_EXT"/>
+                <enum name="GL_OP_MADD_EXT"/>
+                <enum name="GL_OP_FRAC_EXT"/>
+                <enum name="GL_OP_MAX_EXT"/>
+                <enum name="GL_OP_MIN_EXT"/>
+                <enum name="GL_OP_SET_GE_EXT"/>
+                <enum name="GL_OP_SET_LT_EXT"/>
+                <enum name="GL_OP_CLAMP_EXT"/>
+                <enum name="GL_OP_FLOOR_EXT"/>
+                <enum name="GL_OP_ROUND_EXT"/>
+                <enum name="GL_OP_EXP_BASE_2_EXT"/>
+                <enum name="GL_OP_LOG_BASE_2_EXT"/>
+                <enum name="GL_OP_POWER_EXT"/>
+                <enum name="GL_OP_RECIP_EXT"/>
+                <enum name="GL_OP_RECIP_SQRT_EXT"/>
+                <enum name="GL_OP_SUB_EXT"/>
+                <enum name="GL_OP_CROSS_PRODUCT_EXT"/>
+                <enum name="GL_OP_MULTIPLY_MATRIX_EXT"/>
+                <enum name="GL_OP_MOV_EXT"/>
+                <enum name="GL_OUTPUT_VERTEX_EXT"/>
+                <enum name="GL_OUTPUT_COLOR0_EXT"/>
+                <enum name="GL_OUTPUT_COLOR1_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD0_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD1_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD2_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD3_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD4_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD5_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD6_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD7_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD8_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD9_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD10_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD11_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD12_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD13_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD14_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD15_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD16_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD17_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD18_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD19_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD20_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD21_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD22_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD23_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD24_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD25_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD26_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD27_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD28_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD29_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD30_EXT"/>
+                <enum name="GL_OUTPUT_TEXTURE_COORD31_EXT"/>
+                <enum name="GL_OUTPUT_FOG_EXT"/>
+                <enum name="GL_SCALAR_EXT"/>
+                <enum name="GL_VECTOR_EXT"/>
+                <enum name="GL_MATRIX_EXT"/>
+                <enum name="GL_VARIANT_EXT"/>
+                <enum name="GL_INVARIANT_EXT"/>
+                <enum name="GL_LOCAL_CONSTANT_EXT"/>
+                <enum name="GL_LOCAL_EXT"/>
+                <enum name="GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT"/>
+                <enum name="GL_MAX_VERTEX_SHADER_VARIANTS_EXT"/>
+                <enum name="GL_MAX_VERTEX_SHADER_INVARIANTS_EXT"/>
+                <enum name="GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"/>
+                <enum name="GL_MAX_VERTEX_SHADER_LOCALS_EXT"/>
+                <enum name="GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT"/>
+                <enum name="GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT"/>
+                <enum name="GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"/>
+                <enum name="GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT"/>
+                <enum name="GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT"/>
+                <enum name="GL_VERTEX_SHADER_INSTRUCTIONS_EXT"/>
+                <enum name="GL_VERTEX_SHADER_VARIANTS_EXT"/>
+                <enum name="GL_VERTEX_SHADER_INVARIANTS_EXT"/>
+                <enum name="GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"/>
+                <enum name="GL_VERTEX_SHADER_LOCALS_EXT"/>
+                <enum name="GL_VERTEX_SHADER_OPTIMIZED_EXT"/>
+                <enum name="GL_X_EXT"/>
+                <enum name="GL_Y_EXT"/>
+                <enum name="GL_Z_EXT"/>
+                <enum name="GL_W_EXT"/>
+                <enum name="GL_NEGATIVE_X_EXT"/>
+                <enum name="GL_NEGATIVE_Y_EXT"/>
+                <enum name="GL_NEGATIVE_Z_EXT"/>
+                <enum name="GL_NEGATIVE_W_EXT"/>
+                <enum name="GL_ZERO_EXT"/>
+                <enum name="GL_ONE_EXT"/>
+                <enum name="GL_NEGATIVE_ONE_EXT"/>
+                <enum name="GL_NORMALIZED_RANGE_EXT"/>
+                <enum name="GL_FULL_RANGE_EXT"/>
+                <enum name="GL_CURRENT_VERTEX_EXT"/>
+                <enum name="GL_MVP_MATRIX_EXT"/>
+                <enum name="GL_VARIANT_VALUE_EXT"/>
+                <enum name="GL_VARIANT_DATATYPE_EXT"/>
+                <enum name="GL_VARIANT_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_VARIANT_ARRAY_TYPE_EXT"/>
+                <enum name="GL_VARIANT_ARRAY_EXT"/>
+                <enum name="GL_VARIANT_ARRAY_POINTER_EXT"/>
+                <enum name="GL_INVARIANT_VALUE_EXT"/>
+                <enum name="GL_INVARIANT_DATATYPE_EXT"/>
+                <enum name="GL_LOCAL_CONSTANT_VALUE_EXT"/>
+                <enum name="GL_LOCAL_CONSTANT_DATATYPE_EXT"/>
+                <command name="glBeginVertexShaderEXT"/>
+                <command name="glEndVertexShaderEXT"/>
+                <command name="glBindVertexShaderEXT"/>
+                <command name="glGenVertexShadersEXT"/>
+                <command name="glDeleteVertexShaderEXT"/>
+                <command name="glShaderOp1EXT"/>
+                <command name="glShaderOp2EXT"/>
+                <command name="glShaderOp3EXT"/>
+                <command name="glSwizzleEXT"/>
+                <command name="glWriteMaskEXT"/>
+                <command name="glInsertComponentEXT"/>
+                <command name="glExtractComponentEXT"/>
+                <command name="glGenSymbolsEXT"/>
+                <command name="glSetInvariantEXT"/>
+                <command name="glSetLocalConstantEXT"/>
+                <command name="glVariantbvEXT"/>
+                <command name="glVariantsvEXT"/>
+                <command name="glVariantivEXT"/>
+                <command name="glVariantfvEXT"/>
+                <command name="glVariantdvEXT"/>
+                <command name="glVariantubvEXT"/>
+                <command name="glVariantusvEXT"/>
+                <command name="glVariantuivEXT"/>
+                <command name="glVariantPointerEXT"/>
+                <command name="glEnableVariantClientStateEXT"/>
+                <command name="glDisableVariantClientStateEXT"/>
+                <command name="glBindLightParameterEXT"/>
+                <command name="glBindMaterialParameterEXT"/>
+                <command name="glBindTexGenParameterEXT"/>
+                <command name="glBindTextureUnitParameterEXT"/>
+                <command name="glBindParameterEXT"/>
+                <command name="glIsVariantEnabledEXT"/>
+                <command name="glGetVariantBooleanvEXT"/>
+                <command name="glGetVariantIntegervEXT"/>
+                <command name="glGetVariantFloatvEXT"/>
+                <command name="glGetVariantPointervEXT"/>
+                <command name="glGetInvariantBooleanvEXT"/>
+                <command name="glGetInvariantIntegervEXT"/>
+                <command name="glGetInvariantFloatvEXT"/>
+                <command name="glGetLocalConstantBooleanvEXT"/>
+                <command name="glGetLocalConstantIntegervEXT"/>
+                <command name="glGetLocalConstantFloatvEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_vertex_weighting" supported="gl">
+            <require>
+                <enum name="GL_MODELVIEW0_STACK_DEPTH_EXT"/>
+                <enum name="GL_MODELVIEW1_STACK_DEPTH_EXT"/>
+                <enum name="GL_MODELVIEW0_MATRIX_EXT"/>
+                <enum name="GL_MODELVIEW1_MATRIX_EXT"/>
+                <enum name="GL_VERTEX_WEIGHTING_EXT"/>
+                <enum name="GL_MODELVIEW0_EXT"/>
+                <enum name="GL_MODELVIEW1_EXT"/>
+                <enum name="GL_CURRENT_VERTEX_WEIGHT_EXT"/>
+                <enum name="GL_VERTEX_WEIGHT_ARRAY_EXT"/>
+                <enum name="GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT"/>
+                <enum name="GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT"/>
+                <enum name="GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT"/>
+                <enum name="GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT"/>
+                <command name="glVertexWeightfEXT"/>
+                <command name="glVertexWeightfvEXT"/>
+                <command name="glVertexWeightPointerEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_win32_keyed_mutex" supported="gl|gles2">
+            <require>
+                <command name="glAcquireKeyedMutexWin32EXT"/>
+                <command name="glReleaseKeyedMutexWin32EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_window_rectangles" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_INCLUSIVE_EXT"/>
+                <enum name="GL_EXCLUSIVE_EXT"/>
+                <enum name="GL_WINDOW_RECTANGLE_EXT"/>
+                <enum name="GL_WINDOW_RECTANGLE_MODE_EXT"/>
+                <enum name="GL_MAX_WINDOW_RECTANGLES_EXT"/>
+                <enum name="GL_NUM_WINDOW_RECTANGLES_EXT"/>
+                <command name="glWindowRectanglesEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_x11_sync_object" supported="gl">
+            <require>
+                <enum name="GL_SYNC_X11_FENCE_EXT"/>
+                <command name="glImportSyncEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_FJ_shader_binary_GCCSO" supported="gles2">
+            <require>
+                <enum name="GL_GCCSO_SHADER_BINARY_FJ"/>
+            </require>
+        </extension>
+        <extension name="GL_GREMEDY_frame_terminator" supported="gl">
+            <require>
+                <command name="glFrameTerminatorGREMEDY"/>
+            </require>
+        </extension>
+        <extension name="GL_GREMEDY_string_marker" supported="gl">
+            <require>
+                <command name="glStringMarkerGREMEDY"/>
+            </require>
+        </extension>
+        <extension name="GL_HP_convolution_border_modes" supported="gl">
+            <require>
+                <enum name="GL_IGNORE_BORDER_HP"/>
+                <enum name="GL_CONSTANT_BORDER_HP"/>
+                <enum name="GL_REPLICATE_BORDER_HP"/>
+                <enum name="GL_CONVOLUTION_BORDER_COLOR_HP"/>
+            </require>
+        </extension>
+        <extension name="GL_HP_image_transform" supported="gl">
+            <require>
+                <enum name="GL_IMAGE_SCALE_X_HP"/>
+                <enum name="GL_IMAGE_SCALE_Y_HP"/>
+                <enum name="GL_IMAGE_TRANSLATE_X_HP"/>
+                <enum name="GL_IMAGE_TRANSLATE_Y_HP"/>
+                <enum name="GL_IMAGE_ROTATE_ANGLE_HP"/>
+                <enum name="GL_IMAGE_ROTATE_ORIGIN_X_HP"/>
+                <enum name="GL_IMAGE_ROTATE_ORIGIN_Y_HP"/>
+                <enum name="GL_IMAGE_MAG_FILTER_HP"/>
+                <enum name="GL_IMAGE_MIN_FILTER_HP"/>
+                <enum name="GL_IMAGE_CUBIC_WEIGHT_HP"/>
+                <enum name="GL_CUBIC_HP"/>
+                <enum name="GL_AVERAGE_HP"/>
+                <enum name="GL_IMAGE_TRANSFORM_2D_HP"/>
+                <enum name="GL_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP"/>
+                <enum name="GL_PROXY_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP"/>
+                <command name="glImageTransformParameteriHP"/>
+                <command name="glImageTransformParameterfHP"/>
+                <command name="glImageTransformParameterivHP"/>
+                <command name="glImageTransformParameterfvHP"/>
+                <command name="glGetImageTransformParameterivHP"/>
+                <command name="glGetImageTransformParameterfvHP"/>
+            </require>
+        </extension>
+        <extension name="GL_HP_occlusion_test" supported="gl">
+            <require>
+                <enum name="GL_OCCLUSION_TEST_HP"/>
+                <enum name="GL_OCCLUSION_TEST_RESULT_HP"/>
+            </require>
+        </extension>
+        <extension name="GL_HP_texture_lighting" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_LIGHTING_MODE_HP"/>
+                <enum name="GL_TEXTURE_POST_SPECULAR_HP"/>
+                <enum name="GL_TEXTURE_PRE_SPECULAR_HP"/>
+            </require>
+        </extension>
+        <extension name="GL_IBM_cull_vertex" supported="gl">
+            <require>
+                <enum name="GL_CULL_VERTEX_IBM"/>
+            </require>
+        </extension>
+        <extension name="GL_IBM_multimode_draw_arrays" supported="gl">
+            <require>
+                <command name="glMultiModeDrawArraysIBM"/>
+                <command name="glMultiModeDrawElementsIBM"/>
+            </require>
+        </extension>
+        <extension name="GL_IBM_rasterpos_clip" supported="gl">
+            <require>
+                <enum name="GL_RASTER_POSITION_UNCLIPPED_IBM"/>
+            </require>
+        </extension>
+        <extension name="GL_IBM_static_data" supported="gl">
+            <require>
+                <enum name="GL_ALL_STATIC_DATA_IBM"/>
+                <enum name="GL_STATIC_VERTEX_ARRAY_IBM"/>
+                <command name="glFlushStaticDataIBM"/>
+            </require>
+        </extension>
+        <extension name="GL_IBM_texture_mirrored_repeat" supported="gl">
+            <require>
+                <enum name="GL_MIRRORED_REPEAT_IBM"/>
+            </require>
+        </extension>
+        <extension name="GL_IBM_vertex_array_lists" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_LIST_IBM"/>
+                <enum name="GL_NORMAL_ARRAY_LIST_IBM"/>
+                <enum name="GL_COLOR_ARRAY_LIST_IBM"/>
+                <enum name="GL_INDEX_ARRAY_LIST_IBM"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_LIST_IBM"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_LIST_IBM"/>
+                <enum name="GL_FOG_COORDINATE_ARRAY_LIST_IBM"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_LIST_IBM"/>
+                <enum name="GL_VERTEX_ARRAY_LIST_STRIDE_IBM"/>
+                <enum name="GL_NORMAL_ARRAY_LIST_STRIDE_IBM"/>
+                <enum name="GL_COLOR_ARRAY_LIST_STRIDE_IBM"/>
+                <enum name="GL_INDEX_ARRAY_LIST_STRIDE_IBM"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM"/>
+                <enum name="GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM"/>
+                <command name="glColorPointerListIBM"/>
+                <command name="glSecondaryColorPointerListIBM"/>
+                <command name="glEdgeFlagPointerListIBM"/>
+                <command name="glFogCoordPointerListIBM"/>
+                <command name="glIndexPointerListIBM"/>
+                <command name="glNormalPointerListIBM"/>
+                <command name="glTexCoordPointerListIBM"/>
+                <command name="glVertexPointerListIBM"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_bindless_texture" supported="gles2">
+            <require>
+                <command name="glGetTextureHandleIMG"/>
+                <command name="glGetTextureSamplerHandleIMG"/>
+                <command name="glUniformHandleui64IMG"/>
+                <command name="glUniformHandleui64vIMG"/>
+                <command name="glProgramUniformHandleui64IMG"/>
+                <command name="glProgramUniformHandleui64vIMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_framebuffer_downsample" supported="gles2">
+            <require>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_AND_DOWNSAMPLE_IMG"/>
+                <enum name="GL_NUM_DOWNSAMPLE_SCALES_IMG"/>
+                <enum name="GL_DOWNSAMPLE_SCALES_IMG"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SCALE_IMG"/>
+                <command name="glFramebufferTexture2DDownsampleIMG"/>
+                <command name="glFramebufferTextureLayerDownsampleIMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_multisampled_render_to_texture" supported="gles1|gles2">
+            <require>
+                <enum name="GL_RENDERBUFFER_SAMPLES_IMG"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG"/>
+                <enum name="GL_MAX_SAMPLES_IMG"/>
+                <enum name="GL_TEXTURE_SAMPLES_IMG"/>
+                <command name="glRenderbufferStorageMultisampleIMG"/>
+                <command name="glFramebufferTexture2DMultisampleIMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_program_binary" supported="gles2">
+            <require>
+                <enum name="GL_SGX_PROGRAM_BINARY_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_read_format" supported="gles1|gles2">
+            <require>
+                <enum name="GL_BGRA_IMG"/>
+                <enum name="GL_UNSIGNED_SHORT_4_4_4_4_REV_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_shader_binary" supported="gles2">
+            <require>
+                <enum name="GL_SGX_BINARY_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_texture_compression_pvrtc" supported="gles1|gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG"/>
+                <enum name="GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG"/>
+                <enum name="GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG"/>
+                <enum name="GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_texture_compression_pvrtc2" supported="gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG"/>
+                <enum name="GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_texture_env_enhanced_fixed_function" supported="gles1">
+            <require>
+                <enum name="GL_MODULATE_COLOR_IMG"/>
+                <enum name="GL_RECIP_ADD_SIGNED_ALPHA_IMG"/>
+                <enum name="GL_TEXTURE_ALPHA_MODULATE_IMG"/>
+                <enum name="GL_FACTOR_ALPHA_MODULATE_IMG"/>
+                <enum name="GL_FRAGMENT_ALPHA_MODULATE_IMG"/>
+                <enum name="GL_ADD_BLEND_IMG"/>
+                <enum name="GL_DOT3_RGBA_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_texture_filter_cubic" supported="gles2">
+            <require>
+                <enum name="GL_CUBIC_IMG"/>
+                <enum name="GL_CUBIC_MIPMAP_NEAREST_IMG"/>
+                <enum name="GL_CUBIC_MIPMAP_LINEAR_IMG"/>
+            </require>
+        </extension>
+        <extension name="GL_IMG_user_clip_plane" supported="gles1">
+            <require>
+                <enum name="GL_CLIP_PLANE0_IMG"/>
+                <enum name="GL_CLIP_PLANE1_IMG"/>
+                <enum name="GL_CLIP_PLANE2_IMG"/>
+                <enum name="GL_CLIP_PLANE3_IMG"/>
+                <enum name="GL_CLIP_PLANE4_IMG"/>
+                <enum name="GL_CLIP_PLANE5_IMG"/>
+                <enum name="GL_MAX_CLIP_PLANES_IMG"/>
+                <command name="glClipPlanefIMG"/>
+                <command name="glClipPlanexIMG"/>
+            </require>
+        </extension>
+        <extension name="GL_INGR_blend_func_separate" supported="gl">
+            <require>
+                <command name="glBlendFuncSeparateINGR"/>
+            </require>
+        </extension>
+        <extension name="GL_INGR_color_clamp" supported="gl">
+            <require>
+                <enum name="GL_RED_MIN_CLAMP_INGR"/>
+                <enum name="GL_GREEN_MIN_CLAMP_INGR"/>
+                <enum name="GL_BLUE_MIN_CLAMP_INGR"/>
+                <enum name="GL_ALPHA_MIN_CLAMP_INGR"/>
+                <enum name="GL_RED_MAX_CLAMP_INGR"/>
+                <enum name="GL_GREEN_MAX_CLAMP_INGR"/>
+                <enum name="GL_BLUE_MAX_CLAMP_INGR"/>
+                <enum name="GL_ALPHA_MAX_CLAMP_INGR"/>
+            </require>
+        </extension>
+        <extension name="GL_INGR_interlace_read" supported="gl">
+            <require>
+                <enum name="GL_INTERLACE_READ_INGR"/>
+            </require>
+        </extension>
+        <extension name="GL_INTEL_conservative_rasterization" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_CONSERVATIVE_RASTERIZATION_INTEL"/>
+            </require>
+        </extension>
+        <extension name="GL_INTEL_fragment_shader_ordering" supported="gl"/>
+        <extension name="GL_INTEL_framebuffer_CMAA" supported="gl|glcore|gles2">
+            <require>
+                <command name="glApplyFramebufferAttachmentCMAAINTEL"/>
+            </require>
+        </extension>
+        <extension name="GL_INTEL_map_texture" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_MEMORY_LAYOUT_INTEL"/>
+                <enum name="GL_LAYOUT_DEFAULT_INTEL"/>
+                <enum name="GL_LAYOUT_LINEAR_INTEL"/>
+                <enum name="GL_LAYOUT_LINEAR_CPU_CACHED_INTEL"/>
+                <command name="glSyncTextureINTEL"/>
+                <command name="glUnmapTexture2DINTEL"/>
+                <command name="glMapTexture2DINTEL"/>
+            </require>
+        </extension>
+        <extension name="GL_INTEL_blackhole_render" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_BLACKHOLE_RENDER_INTEL"/>
+            </require>
+        </extension>
+        <extension name="GL_INTEL_parallel_arrays" supported="gl">
+            <require>
+                <enum name="GL_PARALLEL_ARRAYS_INTEL"/>
+                <enum name="GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL"/>
+                <enum name="GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL"/>
+                <enum name="GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL"/>
+                <command name="glVertexPointervINTEL"/>
+                <command name="glNormalPointervINTEL"/>
+                <command name="glColorPointervINTEL"/>
+                <command name="glTexCoordPointervINTEL"/>
+            </require>
+        </extension>
+        <extension name="GL_INTEL_performance_query" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_PERFQUERY_SINGLE_CONTEXT_INTEL"/>
+                <enum name="GL_PERFQUERY_GLOBAL_CONTEXT_INTEL"/>
+                <enum name="GL_PERFQUERY_WAIT_INTEL"/>
+                <enum name="GL_PERFQUERY_FLUSH_INTEL"/>
+                <enum name="GL_PERFQUERY_DONOT_FLUSH_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_EVENT_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_RAW_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL"/>
+                <enum name="GL_PERFQUERY_QUERY_NAME_LENGTH_MAX_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_NAME_LENGTH_MAX_INTEL"/>
+                <enum name="GL_PERFQUERY_COUNTER_DESC_LENGTH_MAX_INTEL"/>
+                <enum name="GL_PERFQUERY_GPA_EXTENDED_COUNTERS_INTEL"/>
+                <command name="glBeginPerfQueryINTEL"/>
+                <command name="glCreatePerfQueryINTEL"/>
+                <command name="glDeletePerfQueryINTEL"/>
+                <command name="glEndPerfQueryINTEL"/>
+                <command name="glGetFirstPerfQueryIdINTEL"/>
+                <command name="glGetNextPerfQueryIdINTEL"/>
+                <command name="glGetPerfCounterInfoINTEL"/>
+                <command name="glGetPerfQueryDataINTEL"/>
+                <command name="glGetPerfQueryIdByNameINTEL"/>
+                <command name="glGetPerfQueryInfoINTEL"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_blend_equation_advanced" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_MULTIPLY_KHR"/>
+                <enum name="GL_SCREEN_KHR"/>
+                <enum name="GL_OVERLAY_KHR"/>
+                <enum name="GL_DARKEN_KHR"/>
+                <enum name="GL_LIGHTEN_KHR"/>
+                <enum name="GL_COLORDODGE_KHR"/>
+                <enum name="GL_COLORBURN_KHR"/>
+                <enum name="GL_HARDLIGHT_KHR"/>
+                <enum name="GL_SOFTLIGHT_KHR"/>
+                <enum name="GL_DIFFERENCE_KHR"/>
+                <enum name="GL_EXCLUSION_KHR"/>
+                <enum name="GL_HSL_HUE_KHR"/>
+                <enum name="GL_HSL_SATURATION_KHR"/>
+                <enum name="GL_HSL_COLOR_KHR"/>
+                <enum name="GL_HSL_LUMINOSITY_KHR"/>
+                <command name="glBlendBarrierKHR"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_blend_equation_advanced_coherent" supported="gl|glcore|gles2">
+            <require comment="Otherwise identical to GL_KHR_blend_equation_advanced, just different semantic behavior">
+                <enum name="GL_BLEND_ADVANCED_COHERENT_KHR"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_context_flush_control" supported="gl|glcore|gles2">
+            <require api="gl" comment="KHR extensions *mandate* suffixes for ES, unlike for GL">
+                <enum name="GL_CONTEXT_RELEASE_BEHAVIOR"/>
+                <enum name="GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH"/>
+                <enum name="GL_NONE"/>
+            </require>
+            <require api="gles2">
+                <enum name="GL_CONTEXT_RELEASE_BEHAVIOR_KHR"/>
+                <enum name="GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR"/>
+                <enum name="GL_NONE"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_debug" supported="gl|glcore|gles1|gles2">
+            <require api="gl" comment="KHR extensions *mandate* suffixes for ES, unlike for GL">
+                <enum name="GL_DEBUG_OUTPUT_SYNCHRONOUS"/>
+                <enum name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH"/>
+                <enum name="GL_DEBUG_CALLBACK_FUNCTION"/>
+                <enum name="GL_DEBUG_CALLBACK_USER_PARAM"/>
+                <enum name="GL_DEBUG_SOURCE_API"/>
+                <enum name="GL_DEBUG_SOURCE_WINDOW_SYSTEM"/>
+                <enum name="GL_DEBUG_SOURCE_SHADER_COMPILER"/>
+                <enum name="GL_DEBUG_SOURCE_THIRD_PARTY"/>
+                <enum name="GL_DEBUG_SOURCE_APPLICATION"/>
+                <enum name="GL_DEBUG_SOURCE_OTHER"/>
+                <enum name="GL_DEBUG_TYPE_ERROR"/>
+                <enum name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR"/>
+                <enum name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR"/>
+                <enum name="GL_DEBUG_TYPE_PORTABILITY"/>
+                <enum name="GL_DEBUG_TYPE_PERFORMANCE"/>
+                <enum name="GL_DEBUG_TYPE_OTHER"/>
+                <enum name="GL_DEBUG_TYPE_MARKER"/>
+                <enum name="GL_DEBUG_TYPE_PUSH_GROUP"/>
+                <enum name="GL_DEBUG_TYPE_POP_GROUP"/>
+                <enum name="GL_DEBUG_SEVERITY_NOTIFICATION"/>
+                <enum name="GL_MAX_DEBUG_GROUP_STACK_DEPTH"/>
+                <enum name="GL_DEBUG_GROUP_STACK_DEPTH"/>
+                <enum name="GL_BUFFER"/>
+                <enum name="GL_SHADER"/>
+                <enum name="GL_PROGRAM"/>
+                <enum name="GL_VERTEX_ARRAY"/>
+                <enum name="GL_QUERY"/>
+                <enum name="GL_PROGRAM_PIPELINE"/>
+                <enum name="GL_SAMPLER"/>
+                <enum name="GL_MAX_LABEL_LENGTH"/>
+                <enum name="GL_MAX_DEBUG_MESSAGE_LENGTH"/>
+                <enum name="GL_MAX_DEBUG_LOGGED_MESSAGES"/>
+                <enum name="GL_DEBUG_LOGGED_MESSAGES"/>
+                <enum name="GL_DEBUG_SEVERITY_HIGH"/>
+                <enum name="GL_DEBUG_SEVERITY_MEDIUM"/>
+                <enum name="GL_DEBUG_SEVERITY_LOW"/>
+                <enum name="GL_DEBUG_OUTPUT"/>
+                <enum name="GL_CONTEXT_FLAG_DEBUG_BIT"/>
+                <enum name="GL_STACK_OVERFLOW"/>
+                <enum name="GL_STACK_UNDERFLOW"/>
+                <command name="glDebugMessageControl"/>
+                <command name="glDebugMessageInsert"/>
+                <command name="glDebugMessageCallback"/>
+                <command name="glGetDebugMessageLog"/>
+                <command name="glPushDebugGroup"/>
+                <command name="glPopDebugGroup"/>
+                <command name="glObjectLabel"/>
+                <command name="glGetObjectLabel"/>
+                <command name="glObjectPtrLabel"/>
+                <command name="glGetObjectPtrLabel"/>
+                <command name="glGetPointerv"/>
+            </require>
+            <require api="gles2">
+                <enum name="GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR"/>
+                <enum name="GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_KHR"/>
+                <enum name="GL_DEBUG_CALLBACK_FUNCTION_KHR"/>
+                <enum name="GL_DEBUG_CALLBACK_USER_PARAM_KHR"/>
+                <enum name="GL_DEBUG_SOURCE_API_KHR"/>
+                <enum name="GL_DEBUG_SOURCE_WINDOW_SYSTEM_KHR"/>
+                <enum name="GL_DEBUG_SOURCE_SHADER_COMPILER_KHR"/>
+                <enum name="GL_DEBUG_SOURCE_THIRD_PARTY_KHR"/>
+                <enum name="GL_DEBUG_SOURCE_APPLICATION_KHR"/>
+                <enum name="GL_DEBUG_SOURCE_OTHER_KHR"/>
+                <enum name="GL_DEBUG_TYPE_ERROR_KHR"/>
+                <enum name="GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR"/>
+                <enum name="GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR"/>
+                <enum name="GL_DEBUG_TYPE_PORTABILITY_KHR"/>
+                <enum name="GL_DEBUG_TYPE_PERFORMANCE_KHR"/>
+                <enum name="GL_DEBUG_TYPE_OTHER_KHR"/>
+                <enum name="GL_DEBUG_TYPE_MARKER_KHR"/>
+                <enum name="GL_DEBUG_TYPE_PUSH_GROUP_KHR"/>
+                <enum name="GL_DEBUG_TYPE_POP_GROUP_KHR"/>
+                <enum name="GL_DEBUG_SEVERITY_NOTIFICATION_KHR"/>
+                <enum name="GL_MAX_DEBUG_GROUP_STACK_DEPTH_KHR"/>
+                <enum name="GL_DEBUG_GROUP_STACK_DEPTH_KHR"/>
+                <enum name="GL_BUFFER_KHR"/>
+                <enum name="GL_SHADER_KHR"/>
+                <enum name="GL_PROGRAM_KHR"/>
+                <enum name="GL_VERTEX_ARRAY_KHR"/>
+                <enum name="GL_QUERY_KHR"/>
+                <enum name="GL_PROGRAM_PIPELINE_KHR"/>
+                <enum name="GL_SAMPLER_KHR"/>
+                <enum name="GL_MAX_LABEL_LENGTH_KHR"/>
+                <enum name="GL_MAX_DEBUG_MESSAGE_LENGTH_KHR"/>
+                <enum name="GL_MAX_DEBUG_LOGGED_MESSAGES_KHR"/>
+                <enum name="GL_DEBUG_LOGGED_MESSAGES_KHR"/>
+                <enum name="GL_DEBUG_SEVERITY_HIGH_KHR"/>
+                <enum name="GL_DEBUG_SEVERITY_MEDIUM_KHR"/>
+                <enum name="GL_DEBUG_SEVERITY_LOW_KHR"/>
+                <enum name="GL_DEBUG_OUTPUT_KHR"/>
+                <enum name="GL_CONTEXT_FLAG_DEBUG_BIT_KHR"/>
+                <enum name="GL_STACK_OVERFLOW_KHR"/>
+                <enum name="GL_STACK_UNDERFLOW_KHR"/>
+                <command name="glDebugMessageControlKHR"/>
+                <command name="glDebugMessageInsertKHR"/>
+                <command name="glDebugMessageCallbackKHR"/>
+                <command name="glGetDebugMessageLogKHR"/>
+                <command name="glPushDebugGroupKHR"/>
+                <command name="glPopDebugGroupKHR"/>
+                <command name="glObjectLabelKHR"/>
+                <command name="glGetObjectLabelKHR"/>
+                <command name="glObjectPtrLabelKHR"/>
+                <command name="glGetObjectPtrLabelKHR"/>
+                <command name="glGetPointervKHR"/>
+            </require>
+            <require api="gl" profile="compatibility">
+                <enum name="GL_DISPLAY_LIST"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_no_error" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_robust_buffer_access_behavior" supported="gl|glcore|gles2"/>
+        <extension name="GL_KHR_robustness" supported="gl|glcore|gles2">
+            <require api="gl" comment="KHR extensions *mandate* suffixes for ES, unlike for GL">
+                <enum name="GL_NO_ERROR"/>
+                <enum name="GL_CONTEXT_ROBUST_ACCESS"/>
+                <enum name="GL_LOSE_CONTEXT_ON_RESET"/>
+                <enum name="GL_GUILTY_CONTEXT_RESET"/>
+                <enum name="GL_INNOCENT_CONTEXT_RESET"/>
+                <enum name="GL_UNKNOWN_CONTEXT_RESET"/>
+                <enum name="GL_RESET_NOTIFICATION_STRATEGY"/>
+                <enum name="GL_NO_RESET_NOTIFICATION"/>
+                <enum name="GL_CONTEXT_LOST"/>
+                <command name="glGetGraphicsResetStatus"/>
+                <command name="glReadnPixels"/>
+                <command name="glGetnUniformfv"/>
+                <command name="glGetnUniformiv"/>
+                <command name="glGetnUniformuiv"/>
+            </require>
+            <require api="gles2">
+                <enum name="GL_NO_ERROR"/>
+                <enum name="GL_CONTEXT_ROBUST_ACCESS_KHR"/>
+                <enum name="GL_LOSE_CONTEXT_ON_RESET_KHR"/>
+                <enum name="GL_GUILTY_CONTEXT_RESET_KHR"/>
+                <enum name="GL_INNOCENT_CONTEXT_RESET_KHR"/>
+                <enum name="GL_UNKNOWN_CONTEXT_RESET_KHR"/>
+                <enum name="GL_RESET_NOTIFICATION_STRATEGY_KHR"/>
+                <enum name="GL_NO_RESET_NOTIFICATION_KHR"/>
+                <enum name="GL_CONTEXT_LOST_KHR"/>
+                <command name="glGetGraphicsResetStatusKHR"/>
+                <command name="glReadnPixelsKHR"/>
+                <command name="glGetnUniformfvKHR"/>
+                <command name="glGetnUniformivKHR"/>
+                <command name="glGetnUniformuivKHR"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_texture_compression_astc_hdr" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_4x4_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x4_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x8_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x8_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x10_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_12x10_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_12x12_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_texture_compression_astc_ldr" supported="gl|glcore|gles2" comment="API is identical to GL_KHR_texture_compression_astc_hdr extension">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_4x4_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x4_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x8_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x8_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x10_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_12x10_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_12x12_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR"/>
+            </require>
+        </extension>
+        <extension name="GL_KHR_texture_compression_astc_sliced_3d" supported="gl|glcore|gles2"/>
+        <extension name="GL_KHR_parallel_shader_compile" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_MAX_SHADER_COMPILER_THREADS_KHR"/>
+                <enum name="GL_COMPLETION_STATUS_KHR"/>
+                <command name="glMaxShaderCompilerThreadsKHR"/>
+            </require>
+        </extension>
+        <extension name="GL_MESAX_texture_stack" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_1D_STACK_MESAX"/>
+                <enum name="GL_TEXTURE_2D_STACK_MESAX"/>
+                <enum name="GL_PROXY_TEXTURE_1D_STACK_MESAX"/>
+                <enum name="GL_PROXY_TEXTURE_2D_STACK_MESAX"/>
+                <enum name="GL_TEXTURE_1D_STACK_BINDING_MESAX"/>
+                <enum name="GL_TEXTURE_2D_STACK_BINDING_MESAX"/>
+            </require>
+        </extension>
+        <extension name="GL_MESA_framebuffer_flip_y" supported="gles2">
+            <require>
+                <enum name="GL_FRAMEBUFFER_FLIP_Y_MESA"/>
+            </require>
+        </extension>
+        <extension name="GL_MESA_pack_invert" supported="gl">
+            <require>
+                <enum name="GL_PACK_INVERT_MESA"/>
+            </require>
+        </extension>
+        <extension name="GL_MESA_program_binary_formats" supported="gl|gles2">
+            <require>
+                <enum name="GL_PROGRAM_BINARY_FORMAT_MESA"/>
+            </require>
+        </extension>
+        <extension name="GL_MESA_resize_buffers" supported="gl">
+            <require>
+                <command name="glResizeBuffersMESA"/>
+            </require>
+        </extension>
+        <extension name="GL_MESA_shader_integer_functions" supported="gl|gles2"/>
+        <extension name="GL_MESA_tile_raster_order" supported="gl">
+            <require>
+                <enum name="GL_TILE_RASTER_ORDER_FIXED_MESA"/>
+                <enum name="GL_TILE_RASTER_ORDER_INCREASING_X_MESA"/>
+                <enum name="GL_TILE_RASTER_ORDER_INCREASING_Y_MESA"/>
+            </require>
+        </extension>
+        <extension name="GL_MESA_window_pos" supported="gl">
+            <require>
+                <command name="glWindowPos2dMESA"/>
+                <command name="glWindowPos2dvMESA"/>
+                <command name="glWindowPos2fMESA"/>
+                <command name="glWindowPos2fvMESA"/>
+                <command name="glWindowPos2iMESA"/>
+                <command name="glWindowPos2ivMESA"/>
+                <command name="glWindowPos2sMESA"/>
+                <command name="glWindowPos2svMESA"/>
+                <command name="glWindowPos3dMESA"/>
+                <command name="glWindowPos3dvMESA"/>
+                <command name="glWindowPos3fMESA"/>
+                <command name="glWindowPos3fvMESA"/>
+                <command name="glWindowPos3iMESA"/>
+                <command name="glWindowPos3ivMESA"/>
+                <command name="glWindowPos3sMESA"/>
+                <command name="glWindowPos3svMESA"/>
+                <command name="glWindowPos4dMESA"/>
+                <command name="glWindowPos4dvMESA"/>
+                <command name="glWindowPos4fMESA"/>
+                <command name="glWindowPos4fvMESA"/>
+                <command name="glWindowPos4iMESA"/>
+                <command name="glWindowPos4ivMESA"/>
+                <command name="glWindowPos4sMESA"/>
+                <command name="glWindowPos4svMESA"/>
+            </require>
+        </extension>
+        <extension name="GL_MESA_ycbcr_texture" supported="gl">
+            <require>
+                <enum name="GL_UNSIGNED_SHORT_8_8_MESA"/>
+                <enum name="GL_UNSIGNED_SHORT_8_8_REV_MESA"/>
+                <enum name="GL_YCBCR_MESA"/>
+            </require>
+        </extension>
+        <extension name="GL_NVX_blend_equation_advanced_multi_draw_buffers" supported="gl|gles2"/>
+        <extension name="GL_NVX_cross_process_interop" supported="disabled">
+            <require comment="unpublished experimental extension">
+                <enum name="GL_EXTERNAL_STORAGE_BIT_NVX"/>
+            </require>
+        </extension>
+        <extension name="GL_NVX_conditional_render" supported="gl">
+            <require>
+                <command name="glBeginConditionalRenderNVX"/>
+                <command name="glEndConditionalRenderNVX"/>
+            </require>
+        </extension>
+        <extension name="GL_NVX_gpu_memory_info" supported="gl">
+            <require>
+                <enum name="GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX"/>
+                <enum name="GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX"/>
+                <enum name="GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX"/>
+                <enum name="GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX"/>
+                <enum name="GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX"/>
+            </require>
+        </extension>
+        <extension name="GL_NVX_linked_gpu_multicast" supported="gl">
+            <require>
+                <enum name="GL_LGPU_SEPARATE_STORAGE_BIT_NVX"/>
+                <enum name="GL_MAX_LGPU_GPUS_NVX"/>
+                <command name="glLGPUNamedBufferSubDataNVX"/>
+                <command name="glLGPUCopyImageSubDataNVX"/>
+                <command name="glLGPUInterlockNVX"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_alpha_to_coverage_dither_control" supported="gl">
+            <require>
+                <enum name="GL_ALPHA_TO_COVERAGE_DITHER_DEFAULT_NV"/>
+                <enum name="GL_ALPHA_TO_COVERAGE_DITHER_ENABLE_NV"/>
+                <enum name="GL_ALPHA_TO_COVERAGE_DITHER_DISABLE_NV"/>
+                <enum name="GL_ALPHA_TO_COVERAGE_DITHER_MODE_NV"/>
+                <command name="glAlphaToCoverageDitherControlNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_bindless_multi_draw_indirect" supported="gl|glcore">
+            <require>
+                <command name="glMultiDrawArraysIndirectBindlessNV"/>
+                <command name="glMultiDrawElementsIndirectBindlessNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_bindless_multi_draw_indirect_count" supported="gl|glcore">
+            <require>
+                <command name="glMultiDrawArraysIndirectBindlessCountNV"/>
+                <command name="glMultiDrawElementsIndirectBindlessCountNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_bindless_texture" supported="gl|glcore|gles2">
+            <require>
+                <command name="glGetTextureHandleNV"/>
+                <command name="glGetTextureSamplerHandleNV"/>
+                <command name="glMakeTextureHandleResidentNV"/>
+                <command name="glMakeTextureHandleNonResidentNV"/>
+                <command name="glGetImageHandleNV"/>
+                <command name="glMakeImageHandleResidentNV"/>
+                <command name="glMakeImageHandleNonResidentNV"/>
+                <command name="glUniformHandleui64NV"/>
+                <command name="glUniformHandleui64vNV"/>
+                <command name="glProgramUniformHandleui64NV"/>
+                <command name="glProgramUniformHandleui64vNV"/>
+                <command name="glIsTextureHandleResidentNV"/>
+                <command name="glIsImageHandleResidentNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_blend_equation_advanced" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_BLEND_OVERLAP_NV"/>
+                <enum name="GL_BLEND_PREMULTIPLIED_SRC_NV"/>
+                <enum name="GL_BLUE_NV"/>
+                <enum name="GL_COLORBURN_NV"/>
+                <enum name="GL_COLORDODGE_NV"/>
+                <enum name="GL_CONJOINT_NV"/>
+                <enum name="GL_CONTRAST_NV"/>
+                <enum name="GL_DARKEN_NV"/>
+                <enum name="GL_DIFFERENCE_NV"/>
+                <enum name="GL_DISJOINT_NV"/>
+                <enum name="GL_DST_ATOP_NV"/>
+                <enum name="GL_DST_IN_NV"/>
+                <enum name="GL_DST_NV"/>
+                <enum name="GL_DST_OUT_NV"/>
+                <enum name="GL_DST_OVER_NV"/>
+                <enum name="GL_EXCLUSION_NV"/>
+                <enum name="GL_GREEN_NV"/>
+                <enum name="GL_HARDLIGHT_NV"/>
+                <enum name="GL_HARDMIX_NV"/>
+                <enum name="GL_HSL_COLOR_NV"/>
+                <enum name="GL_HSL_HUE_NV"/>
+                <enum name="GL_HSL_LUMINOSITY_NV"/>
+                <enum name="GL_HSL_SATURATION_NV"/>
+                <enum name="GL_INVERT"/>
+                <enum name="GL_INVERT_OVG_NV"/>
+                <enum name="GL_INVERT_RGB_NV"/>
+                <enum name="GL_LIGHTEN_NV"/>
+                <enum name="GL_LINEARBURN_NV"/>
+                <enum name="GL_LINEARDODGE_NV"/>
+                <enum name="GL_LINEARLIGHT_NV"/>
+                <enum name="GL_MINUS_CLAMPED_NV"/>
+                <enum name="GL_MINUS_NV"/>
+                <enum name="GL_MULTIPLY_NV"/>
+                <enum name="GL_OVERLAY_NV"/>
+                <enum name="GL_PINLIGHT_NV"/>
+                <enum name="GL_PLUS_CLAMPED_ALPHA_NV"/>
+                <enum name="GL_PLUS_CLAMPED_NV"/>
+                <enum name="GL_PLUS_DARKER_NV"/>
+                <enum name="GL_PLUS_NV"/>
+                <enum name="GL_RED_NV"/>
+                <enum name="GL_SCREEN_NV"/>
+                <enum name="GL_SOFTLIGHT_NV"/>
+                <enum name="GL_SRC_ATOP_NV"/>
+                <enum name="GL_SRC_IN_NV"/>
+                <enum name="GL_SRC_NV"/>
+                <enum name="GL_SRC_OUT_NV"/>
+                <enum name="GL_SRC_OVER_NV"/>
+                <enum name="GL_UNCORRELATED_NV"/>
+                <enum name="GL_VIVIDLIGHT_NV"/>
+                <enum name="GL_XOR_NV"/>
+                <enum name="GL_ZERO"/>
+                <command name="glBlendParameteriNV"/>
+                <command name="glBlendBarrierNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_blend_equation_advanced_coherent" supported="gl|glcore|gles2">
+            <require comment="Otherwise identical to GL_NV_blend_equation_advanced, just different semantic behavior">
+                <enum name="GL_BLEND_ADVANCED_COHERENT_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_blend_minmax_factor" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_FACTOR_MIN_AMD"/>
+                <enum name="GL_FACTOR_MAX_AMD"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_blend_square" supported="gl"/>
+        <extension name="GL_NV_clip_space_w_scaling" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_VIEWPORT_POSITION_W_SCALE_NV"/>
+                <enum name="GL_VIEWPORT_POSITION_W_SCALE_X_COEFF_NV"/>
+                <enum name="GL_VIEWPORT_POSITION_W_SCALE_Y_COEFF_NV"/>
+                <command name="glViewportPositionWScaleNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_command_list" supported="gl|glcore">
+            <require>
+                <enum name="GL_TERMINATE_SEQUENCE_COMMAND_NV"/>
+                <enum name="GL_NOP_COMMAND_NV"/>
+                <enum name="GL_DRAW_ELEMENTS_COMMAND_NV"/>
+                <enum name="GL_DRAW_ARRAYS_COMMAND_NV"/>
+                <enum name="GL_DRAW_ELEMENTS_STRIP_COMMAND_NV"/>
+                <enum name="GL_DRAW_ARRAYS_STRIP_COMMAND_NV"/>
+                <enum name="GL_DRAW_ELEMENTS_INSTANCED_COMMAND_NV"/>
+                <enum name="GL_DRAW_ARRAYS_INSTANCED_COMMAND_NV"/>
+                <enum name="GL_ELEMENT_ADDRESS_COMMAND_NV"/>
+                <enum name="GL_ATTRIBUTE_ADDRESS_COMMAND_NV"/>
+                <enum name="GL_UNIFORM_ADDRESS_COMMAND_NV"/>
+                <enum name="GL_BLEND_COLOR_COMMAND_NV"/>
+                <enum name="GL_STENCIL_REF_COMMAND_NV"/>
+                <enum name="GL_LINE_WIDTH_COMMAND_NV"/>
+                <enum name="GL_POLYGON_OFFSET_COMMAND_NV"/>
+                <enum name="GL_ALPHA_REF_COMMAND_NV"/>
+                <enum name="GL_VIEWPORT_COMMAND_NV"/>
+                <enum name="GL_SCISSOR_COMMAND_NV"/>
+                <enum name="GL_FRONT_FACE_COMMAND_NV"/>
+                <command name="glCreateStatesNV"/>
+                <command name="glDeleteStatesNV"/>
+                <command name="glIsStateNV"/>
+                <command name="glStateCaptureNV"/>
+                <command name="glGetCommandHeaderNV"/>
+                <command name="glGetStageIndexNV"/>
+                <command name="glDrawCommandsNV"/>
+                <command name="glDrawCommandsAddressNV"/>
+                <command name="glDrawCommandsStatesNV"/>
+                <command name="glDrawCommandsStatesAddressNV"/>
+                <command name="glCreateCommandListsNV"/>
+                <command name="glDeleteCommandListsNV"/>
+                <command name="glIsCommandListNV"/>
+                <command name="glListDrawCommandsStatesClientNV"/>
+                <command name="glCommandListSegmentsNV"/>
+                <command name="glCompileCommandListNV"/>
+                <command name="glCallCommandListNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_compute_program5" supported="gl">
+            <require>
+                <enum name="GL_COMPUTE_PROGRAM_NV"/>
+                <enum name="GL_COMPUTE_PROGRAM_PARAMETER_BUFFER_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_compute_shader_derivatives" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_conditional_render" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_QUERY_WAIT_NV"/>
+                <enum name="GL_QUERY_NO_WAIT_NV"/>
+                <enum name="GL_QUERY_BY_REGION_WAIT_NV"/>
+                <enum name="GL_QUERY_BY_REGION_NO_WAIT_NV"/>
+                <command name="glBeginConditionalRenderNV"/>
+                <command name="glEndConditionalRenderNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_conservative_raster" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_CONSERVATIVE_RASTERIZATION_NV"/>
+                <enum name="GL_SUBPIXEL_PRECISION_BIAS_X_BITS_NV"/>
+                <enum name="GL_SUBPIXEL_PRECISION_BIAS_Y_BITS_NV"/>
+                <enum name="GL_MAX_SUBPIXEL_PRECISION_BIAS_BITS_NV"/>
+                <command name="glSubpixelPrecisionBiasNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_conservative_raster_dilate" supported="gl|glcore">
+            <require>
+                <enum name="GL_CONSERVATIVE_RASTER_DILATE_NV"/>
+                <enum name="GL_CONSERVATIVE_RASTER_DILATE_RANGE_NV"/>
+                <enum name="GL_CONSERVATIVE_RASTER_DILATE_GRANULARITY_NV"/>
+                <command name="glConservativeRasterParameterfNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_conservative_raster_pre_snap" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_conservative_raster_pre_snap_triangles" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_CONSERVATIVE_RASTER_MODE_NV"/>
+                <enum name="GL_CONSERVATIVE_RASTER_MODE_POST_SNAP_NV"/>
+                <enum name="GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_TRIANGLES_NV"/>
+                <enum name="GL_CONSERVATIVE_RASTER_MODE_NV"/>
+                <command name="glConservativeRasterParameteriNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_conservative_raster_underestimation" supported="gl|glcore"/>
+        <extension name="GL_NV_copy_buffer" supported="gles2">
+            <require>
+                <enum name="GL_COPY_READ_BUFFER_NV"/>
+                <enum name="GL_COPY_WRITE_BUFFER_NV"/>
+                <command name="glCopyBufferSubDataNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_copy_depth_to_color" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_STENCIL_TO_RGBA_NV"/>
+                <enum name="GL_DEPTH_STENCIL_TO_BGRA_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_copy_image" supported="gl">
+            <require>
+                <command name="glCopyImageSubDataNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_coverage_sample" supported="gles2">
+            <require>
+                <enum name="GL_COVERAGE_COMPONENT_NV"/>
+                <enum name="GL_COVERAGE_COMPONENT4_NV"/>
+                <enum name="GL_COVERAGE_ATTACHMENT_NV"/>
+                <enum name="GL_COVERAGE_BUFFERS_NV"/>
+                <enum name="GL_COVERAGE_SAMPLES_NV"/>
+                <enum name="GL_COVERAGE_ALL_FRAGMENTS_NV"/>
+                <enum name="GL_COVERAGE_EDGE_FRAGMENTS_NV"/>
+                <enum name="GL_COVERAGE_AUTOMATIC_NV"/>
+                <enum name="GL_COVERAGE_BUFFER_BIT_NV"/>
+                <command name="glCoverageMaskNV"/>
+                <command name="glCoverageOperationNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_deep_texture3D" supported="gl">
+            <require>
+                <enum name="GL_MAX_DEEP_3D_TEXTURE_WIDTH_HEIGHT_NV"/>
+                <enum name="GL_MAX_DEEP_3D_TEXTURE_DEPTH_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_depth_buffer_float" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT32F_NV"/>
+                <enum name="GL_DEPTH32F_STENCIL8_NV"/>
+                <enum name="GL_FLOAT_32_UNSIGNED_INT_24_8_REV_NV"/>
+                <enum name="GL_DEPTH_BUFFER_FLOAT_MODE_NV"/>
+                <command name="glDepthRangedNV"/>
+                <command name="glClearDepthdNV"/>
+                <command name="glDepthBoundsdNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_depth_clamp" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_CLAMP_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_depth_nonlinear" supported="gles2">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT16_NONLINEAR_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_draw_buffers" supported="gles2">
+            <require>
+                <enum name="GL_MAX_DRAW_BUFFERS_NV"/>
+                <enum name="GL_DRAW_BUFFER0_NV"/>
+                <enum name="GL_DRAW_BUFFER1_NV"/>
+                <enum name="GL_DRAW_BUFFER2_NV"/>
+                <enum name="GL_DRAW_BUFFER3_NV"/>
+                <enum name="GL_DRAW_BUFFER4_NV"/>
+                <enum name="GL_DRAW_BUFFER5_NV"/>
+                <enum name="GL_DRAW_BUFFER6_NV"/>
+                <enum name="GL_DRAW_BUFFER7_NV"/>
+                <enum name="GL_DRAW_BUFFER8_NV"/>
+                <enum name="GL_DRAW_BUFFER9_NV"/>
+                <enum name="GL_DRAW_BUFFER10_NV"/>
+                <enum name="GL_DRAW_BUFFER11_NV"/>
+                <enum name="GL_DRAW_BUFFER12_NV"/>
+                <enum name="GL_DRAW_BUFFER13_NV"/>
+                <enum name="GL_DRAW_BUFFER14_NV"/>
+                <enum name="GL_DRAW_BUFFER15_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT0_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT1_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT2_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT3_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT4_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT5_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT6_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT7_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT8_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT9_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT10_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT11_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT12_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT13_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT14_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT15_NV"/>
+                <command name="glDrawBuffersNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_draw_instanced" supported="gles2">
+            <require>
+                <command name="glDrawArraysInstancedNV"/>
+                <command name="glDrawElementsInstancedNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_draw_texture" supported="gl">
+            <require>
+                <command name="glDrawTextureNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_draw_vulkan_image" supported="gl|glcore|gles2">
+            <require>
+                <command name="glDrawVkImageNV"/>
+                <command name="glGetVkProcAddrNV"/>
+                <command name="glWaitVkSemaphoreNV"/>
+                <command name="glSignalVkSemaphoreNV"/>
+                <command name="glSignalVkFenceNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_evaluators" supported="gl">
+            <require>
+                <enum name="GL_EVAL_2D_NV"/>
+                <enum name="GL_EVAL_TRIANGULAR_2D_NV"/>
+                <enum name="GL_MAP_TESSELLATION_NV"/>
+                <enum name="GL_MAP_ATTRIB_U_ORDER_NV"/>
+                <enum name="GL_MAP_ATTRIB_V_ORDER_NV"/>
+                <enum name="GL_EVAL_FRACTIONAL_TESSELLATION_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB0_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB1_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB2_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB3_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB4_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB5_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB6_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB7_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB8_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB9_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB10_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB11_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB12_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB13_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB14_NV"/>
+                <enum name="GL_EVAL_VERTEX_ATTRIB15_NV"/>
+                <enum name="GL_MAX_MAP_TESSELLATION_NV"/>
+                <enum name="GL_MAX_RATIONAL_EVAL_ORDER_NV"/>
+                <command name="glMapControlPointsNV"/>
+                <command name="glMapParameterivNV"/>
+                <command name="glMapParameterfvNV"/>
+                <command name="glGetMapControlPointsNV"/>
+                <command name="glGetMapParameterivNV"/>
+                <command name="glGetMapParameterfvNV"/>
+                <command name="glGetMapAttribParameterivNV"/>
+                <command name="glGetMapAttribParameterfvNV"/>
+                <command name="glEvalMapsNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_explicit_attrib_location" supported="gles2"/>
+        <extension name="GL_NV_explicit_multisample" supported="gl">
+            <require>
+                <enum name="GL_SAMPLE_POSITION_NV"/>
+                <enum name="GL_SAMPLE_MASK_NV"/>
+                <enum name="GL_SAMPLE_MASK_VALUE_NV"/>
+                <enum name="GL_TEXTURE_BINDING_RENDERBUFFER_NV"/>
+                <enum name="GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV"/>
+                <enum name="GL_TEXTURE_RENDERBUFFER_NV"/>
+                <enum name="GL_SAMPLER_RENDERBUFFER_NV"/>
+                <enum name="GL_INT_SAMPLER_RENDERBUFFER_NV"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_RENDERBUFFER_NV"/>
+                <enum name="GL_MAX_SAMPLE_MASK_WORDS_NV"/>
+                <command name="glGetMultisamplefvNV"/>
+                <command name="glSampleMaskIndexedNV"/>
+                <command name="glTexRenderbufferNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fbo_color_attachments" supported="gles2">
+            <require>
+                <enum name="GL_MAX_COLOR_ATTACHMENTS_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT0_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT1_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT2_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT3_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT4_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT5_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT6_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT7_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT8_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT9_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT10_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT11_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT12_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT13_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT14_NV"/>
+                <enum name="GL_COLOR_ATTACHMENT15_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fence" supported="gl|gles1|gles2">
+            <require>
+                <enum name="GL_ALL_COMPLETED_NV"/>
+                <enum name="GL_FENCE_STATUS_NV"/>
+                <enum name="GL_FENCE_CONDITION_NV"/>
+                <command name="glDeleteFencesNV"/>
+                <command name="glGenFencesNV"/>
+                <command name="glIsFenceNV"/>
+                <command name="glTestFenceNV"/>
+                <command name="glGetFenceivNV"/>
+                <command name="glFinishFenceNV"/>
+                <command name="glSetFenceNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fill_rectangle" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_FILL_RECTANGLE_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_float_buffer" supported="gl">
+            <require>
+                <enum name="GL_FLOAT_R_NV"/>
+                <enum name="GL_FLOAT_RG_NV"/>
+                <enum name="GL_FLOAT_RGB_NV"/>
+                <enum name="GL_FLOAT_RGBA_NV"/>
+                <enum name="GL_FLOAT_R16_NV"/>
+                <enum name="GL_FLOAT_R32_NV"/>
+                <enum name="GL_FLOAT_RG16_NV"/>
+                <enum name="GL_FLOAT_RG32_NV"/>
+                <enum name="GL_FLOAT_RGB16_NV"/>
+                <enum name="GL_FLOAT_RGB32_NV"/>
+                <enum name="GL_FLOAT_RGBA16_NV"/>
+                <enum name="GL_FLOAT_RGBA32_NV"/>
+                <enum name="GL_TEXTURE_FLOAT_COMPONENTS_NV"/>
+                <enum name="GL_FLOAT_CLEAR_COLOR_VALUE_NV"/>
+                <enum name="GL_FLOAT_RGBA_MODE_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fog_distance" supported="gl">
+            <require>
+                <enum name="GL_FOG_DISTANCE_MODE_NV"/>
+                <enum name="GL_EYE_RADIAL_NV"/>
+                <enum name="GL_EYE_PLANE_ABSOLUTE_NV"/>
+                <enum name="GL_EYE_PLANE"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fragment_coverage_to_color" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_FRAGMENT_COVERAGE_TO_COLOR_NV"/>
+                <enum name="GL_FRAGMENT_COVERAGE_COLOR_NV"/>
+                <command name="glFragmentCoverageColorNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fragment_program" supported="gl">
+            <require>
+                <enum name="GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV"/>
+                <enum name="GL_FRAGMENT_PROGRAM_NV"/>
+                <enum name="GL_MAX_TEXTURE_COORDS_NV"/>
+                <enum name="GL_MAX_TEXTURE_IMAGE_UNITS_NV"/>
+                <enum name="GL_FRAGMENT_PROGRAM_BINDING_NV"/>
+                <enum name="GL_PROGRAM_ERROR_STRING_NV"/>
+            </require>
+            <require comment="Some NV_fragment_program entry points are shared with ARB_vertex_program">
+                <command name="glProgramNamedParameter4fNV"/>
+                <command name="glProgramNamedParameter4fvNV"/>
+                <command name="glProgramNamedParameter4dNV"/>
+                <command name="glProgramNamedParameter4dvNV"/>
+                <command name="glGetProgramNamedParameterfvNV"/>
+                <command name="glGetProgramNamedParameterdvNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fragment_program2" supported="gl">
+            <require>
+                <enum name="GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV"/>
+                <enum name="GL_MAX_PROGRAM_CALL_DEPTH_NV"/>
+                <enum name="GL_MAX_PROGRAM_IF_DEPTH_NV"/>
+                <enum name="GL_MAX_PROGRAM_LOOP_DEPTH_NV"/>
+                <enum name="GL_MAX_PROGRAM_LOOP_COUNT_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_fragment_program4" supported="gl"/>
+        <extension name="GL_NV_fragment_program_option" supported="gl"/>
+        <extension name="GL_NV_fragment_shader_barycentric" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_fragment_shader_interlock" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_framebuffer_blit" supported="gles2">
+            <require>
+                <enum name="GL_READ_FRAMEBUFFER_NV"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_NV"/>
+                <enum name="GL_DRAW_FRAMEBUFFER_BINDING_NV"/>
+                <enum name="GL_READ_FRAMEBUFFER_BINDING_NV"/>
+                <command name="glBlitFramebufferNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_framebuffer_mixed_samples" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_RASTER_MULTISAMPLE_EXT"/>
+                <enum name="GL_COVERAGE_MODULATION_TABLE_NV"/>
+                <enum name="GL_RASTER_SAMPLES_EXT"/>
+                <enum name="GL_MAX_RASTER_SAMPLES_EXT"/>
+                <enum name="GL_RASTER_FIXED_SAMPLE_LOCATIONS_EXT"/>
+                <enum name="GL_MULTISAMPLE_RASTERIZATION_ALLOWED_EXT"/>
+                <enum name="GL_EFFECTIVE_RASTER_SAMPLES_EXT"/>
+                <enum name="GL_COLOR_SAMPLES_NV"/>
+                <enum name="GL_DEPTH_SAMPLES_NV"/>
+                <enum name="GL_STENCIL_SAMPLES_NV"/>
+                <enum name="GL_MIXED_DEPTH_SAMPLES_SUPPORTED_NV"/>
+                <enum name="GL_MIXED_STENCIL_SAMPLES_SUPPORTED_NV"/>
+                <enum name="GL_COVERAGE_MODULATION_NV"/>
+                <enum name="GL_COVERAGE_MODULATION_TABLE_SIZE_NV"/>
+                <command name="glRasterSamplesEXT"/>
+                <command name="glCoverageModulationTableNV"/>
+                <command name="glGetCoverageModulationTableNV"/>
+                <command name="glCoverageModulationNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_framebuffer_multisample" supported="gles2">
+            <require>
+                <enum name="GL_RENDERBUFFER_SAMPLES_NV"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_NV"/>
+                <enum name="GL_MAX_SAMPLES_NV"/>
+                <command name="glRenderbufferStorageMultisampleNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_framebuffer_multisample_coverage" supported="gl|glcore">
+            <require>
+                <enum name="GL_RENDERBUFFER_COVERAGE_SAMPLES_NV"/>
+                <enum name="GL_RENDERBUFFER_COLOR_SAMPLES_NV"/>
+                <enum name="GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV"/>
+                <enum name="GL_MULTISAMPLE_COVERAGE_MODES_NV"/>
+                <command name="glRenderbufferStorageMultisampleCoverageNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_generate_mipmap_sRGB" supported="gles2"/>
+        <extension name="GL_NV_geometry_program4" supported="gl">
+            <require>
+                <enum name="GL_LINES_ADJACENCY_EXT"/>
+                <enum name="GL_LINE_STRIP_ADJACENCY_EXT"/>
+                <enum name="GL_TRIANGLES_ADJACENCY_EXT"/>
+                <enum name="GL_TRIANGLE_STRIP_ADJACENCY_EXT"/>
+                <enum name="GL_GEOMETRY_PROGRAM_NV"/>
+                <enum name="GL_MAX_PROGRAM_OUTPUT_VERTICES_NV"/>
+                <enum name="GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV"/>
+                <enum name="GL_GEOMETRY_VERTICES_OUT_EXT"/>
+                <enum name="GL_GEOMETRY_INPUT_TYPE_EXT"/>
+                <enum name="GL_GEOMETRY_OUTPUT_TYPE_EXT"/>
+                <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT"/>
+                <enum name="GL_PROGRAM_POINT_SIZE_EXT"/>
+                <command name="glProgramVertexLimitNV"/>
+                <command name="glFramebufferTextureEXT"/>
+                <command name="glFramebufferTextureLayerEXT"/>
+                <command name="glFramebufferTextureFaceEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_geometry_shader4" supported="gl"/>
+        <extension name="GL_NV_geometry_shader_passthrough" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_gpu_program4" supported="gl">
+            <require>
+                <enum name="GL_MIN_PROGRAM_TEXEL_OFFSET_NV"/>
+                <enum name="GL_MAX_PROGRAM_TEXEL_OFFSET_NV"/>
+                <enum name="GL_PROGRAM_ATTRIB_COMPONENTS_NV"/>
+                <enum name="GL_PROGRAM_RESULT_COMPONENTS_NV"/>
+                <enum name="GL_MAX_PROGRAM_ATTRIB_COMPONENTS_NV"/>
+                <enum name="GL_MAX_PROGRAM_RESULT_COMPONENTS_NV"/>
+                <enum name="GL_MAX_PROGRAM_GENERIC_ATTRIBS_NV"/>
+                <enum name="GL_MAX_PROGRAM_GENERIC_RESULTS_NV"/>
+                <command name="glProgramLocalParameterI4iNV"/>
+                <command name="glProgramLocalParameterI4ivNV"/>
+                <command name="glProgramLocalParametersI4ivNV"/>
+                <command name="glProgramLocalParameterI4uiNV"/>
+                <command name="glProgramLocalParameterI4uivNV"/>
+                <command name="glProgramLocalParametersI4uivNV"/>
+                <command name="glProgramEnvParameterI4iNV"/>
+                <command name="glProgramEnvParameterI4ivNV"/>
+                <command name="glProgramEnvParametersI4ivNV"/>
+                <command name="glProgramEnvParameterI4uiNV"/>
+                <command name="glProgramEnvParameterI4uivNV"/>
+                <command name="glProgramEnvParametersI4uivNV"/>
+                <command name="glGetProgramLocalParameterIivNV"/>
+                <command name="glGetProgramLocalParameterIuivNV"/>
+                <command name="glGetProgramEnvParameterIivNV"/>
+                <command name="glGetProgramEnvParameterIuivNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_gpu_program5" supported="gl">
+            <require>
+                <enum name="GL_MAX_GEOMETRY_PROGRAM_INVOCATIONS_NV"/>
+                <enum name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_NV"/>
+                <enum name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_NV"/>
+                <enum name="GL_FRAGMENT_PROGRAM_INTERPOLATION_OFFSET_BITS_NV"/>
+                <enum name="GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_NV"/>
+                <enum name="GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_NV"/>
+                <enum name="GL_MAX_PROGRAM_SUBROUTINE_PARAMETERS_NV"/>
+                <enum name="GL_MAX_PROGRAM_SUBROUTINE_NUM_NV"/>
+                <command name="glProgramSubroutineParametersuivNV"/>
+                <command name="glGetProgramSubroutineParameteruivNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_gpu_program5_mem_extended" supported="gl"/>
+        <extension name="GL_NV_gpu_shader5" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_INT64_NV"/>
+                <enum name="GL_UNSIGNED_INT64_NV"/>
+                <enum name="GL_INT8_NV"/>
+                <enum name="GL_INT8_VEC2_NV"/>
+                <enum name="GL_INT8_VEC3_NV"/>
+                <enum name="GL_INT8_VEC4_NV"/>
+                <enum name="GL_INT16_NV"/>
+                <enum name="GL_INT16_VEC2_NV"/>
+                <enum name="GL_INT16_VEC3_NV"/>
+                <enum name="GL_INT16_VEC4_NV"/>
+                <enum name="GL_INT64_VEC2_NV"/>
+                <enum name="GL_INT64_VEC3_NV"/>
+                <enum name="GL_INT64_VEC4_NV"/>
+                <enum name="GL_UNSIGNED_INT8_NV"/>
+                <enum name="GL_UNSIGNED_INT8_VEC2_NV"/>
+                <enum name="GL_UNSIGNED_INT8_VEC3_NV"/>
+                <enum name="GL_UNSIGNED_INT8_VEC4_NV"/>
+                <enum name="GL_UNSIGNED_INT16_NV"/>
+                <enum name="GL_UNSIGNED_INT16_VEC2_NV"/>
+                <enum name="GL_UNSIGNED_INT16_VEC3_NV"/>
+                <enum name="GL_UNSIGNED_INT16_VEC4_NV"/>
+                <enum name="GL_UNSIGNED_INT64_VEC2_NV"/>
+                <enum name="GL_UNSIGNED_INT64_VEC3_NV"/>
+                <enum name="GL_UNSIGNED_INT64_VEC4_NV"/>
+                <enum name="GL_FLOAT16_NV"/>
+                <enum name="GL_FLOAT16_VEC2_NV"/>
+                <enum name="GL_FLOAT16_VEC3_NV"/>
+                <enum name="GL_FLOAT16_VEC4_NV"/>
+                <enum name="GL_PATCHES"/>
+                <command name="glUniform1i64NV"/>
+                <command name="glUniform2i64NV"/>
+                <command name="glUniform3i64NV"/>
+                <command name="glUniform4i64NV"/>
+                <command name="glUniform1i64vNV"/>
+                <command name="glUniform2i64vNV"/>
+                <command name="glUniform3i64vNV"/>
+                <command name="glUniform4i64vNV"/>
+                <command name="glUniform1ui64NV"/>
+                <command name="glUniform2ui64NV"/>
+                <command name="glUniform3ui64NV"/>
+                <command name="glUniform4ui64NV"/>
+                <command name="glUniform1ui64vNV"/>
+                <command name="glUniform2ui64vNV"/>
+                <command name="glUniform3ui64vNV"/>
+                <command name="glUniform4ui64vNV"/>
+                <command name="glGetUniformi64vNV"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glProgramUniform1i64NV"/>
+                <command name="glProgramUniform2i64NV"/>
+                <command name="glProgramUniform3i64NV"/>
+                <command name="glProgramUniform4i64NV"/>
+                <command name="glProgramUniform1i64vNV"/>
+                <command name="glProgramUniform2i64vNV"/>
+                <command name="glProgramUniform3i64vNV"/>
+                <command name="glProgramUniform4i64vNV"/>
+                <command name="glProgramUniform1ui64NV"/>
+                <command name="glProgramUniform2ui64NV"/>
+                <command name="glProgramUniform3ui64NV"/>
+                <command name="glProgramUniform4ui64NV"/>
+                <command name="glProgramUniform1ui64vNV"/>
+                <command name="glProgramUniform2ui64vNV"/>
+                <command name="glProgramUniform3ui64vNV"/>
+                <command name="glProgramUniform4ui64vNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_half_float" supported="gl">
+            <require>
+                <enum name="GL_HALF_FLOAT_NV"/>
+                <command name="glVertex2hNV"/>
+                <command name="glVertex2hvNV"/>
+                <command name="glVertex3hNV"/>
+                <command name="glVertex3hvNV"/>
+                <command name="glVertex4hNV"/>
+                <command name="glVertex4hvNV"/>
+                <command name="glNormal3hNV"/>
+                <command name="glNormal3hvNV"/>
+                <command name="glColor3hNV"/>
+                <command name="glColor3hvNV"/>
+                <command name="glColor4hNV"/>
+                <command name="glColor4hvNV"/>
+                <command name="glTexCoord1hNV"/>
+                <command name="glTexCoord1hvNV"/>
+                <command name="glTexCoord2hNV"/>
+                <command name="glTexCoord2hvNV"/>
+                <command name="glTexCoord3hNV"/>
+                <command name="glTexCoord3hvNV"/>
+                <command name="glTexCoord4hNV"/>
+                <command name="glTexCoord4hvNV"/>
+                <command name="glMultiTexCoord1hNV"/>
+                <command name="glMultiTexCoord1hvNV"/>
+                <command name="glMultiTexCoord2hNV"/>
+                <command name="glMultiTexCoord2hvNV"/>
+                <command name="glMultiTexCoord3hNV"/>
+                <command name="glMultiTexCoord3hvNV"/>
+                <command name="glMultiTexCoord4hNV"/>
+                <command name="glMultiTexCoord4hvNV"/>
+                <command name="glFogCoordhNV"/>
+                <command name="glFogCoordhvNV"/>
+                <command name="glSecondaryColor3hNV"/>
+                <command name="glSecondaryColor3hvNV"/>
+                <command name="glVertexWeighthNV"/>
+                <command name="glVertexWeighthvNV"/>
+                <command name="glVertexAttrib1hNV"/>
+                <command name="glVertexAttrib1hvNV"/>
+                <command name="glVertexAttrib2hNV"/>
+                <command name="glVertexAttrib2hvNV"/>
+                <command name="glVertexAttrib3hNV"/>
+                <command name="glVertexAttrib3hvNV"/>
+                <command name="glVertexAttrib4hNV"/>
+                <command name="glVertexAttrib4hvNV"/>
+                <command name="glVertexAttribs1hvNV"/>
+                <command name="glVertexAttribs2hvNV"/>
+                <command name="glVertexAttribs3hvNV"/>
+                <command name="glVertexAttribs4hvNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_image_formats" supported="gles2"/>
+        <extension name="GL_NV_instanced_arrays" supported="gles2">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_DIVISOR_NV"/>
+                <command name="glVertexAttribDivisorNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_internalformat_sample_query" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_RENDERBUFFER"/>
+                <enum name="GL_TEXTURE_2D_MULTISAMPLE"/>
+                <enum name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY"/>
+                <enum name="GL_MULTISAMPLES_NV"/>
+                <enum name="GL_SUPERSAMPLE_SCALE_X_NV"/>
+                <enum name="GL_SUPERSAMPLE_SCALE_Y_NV"/>
+                <enum name="GL_CONFORMANT_NV"/>
+                <command name="glGetInternalformatSampleivNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_light_max_exponent" supported="gl">
+            <require>
+                <enum name="GL_MAX_SHININESS_NV"/>
+                <enum name="GL_MAX_SPOT_EXPONENT_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_gpu_multicast" supported="gl">
+            <require>
+                <enum name="GL_PER_GPU_STORAGE_BIT_NV"/>
+                <enum name="GL_MULTICAST_GPUS_NV"/>
+                <enum name="GL_RENDER_GPU_MASK_NV"/>
+                <enum name="GL_PER_GPU_STORAGE_NV"/>
+                <enum name="GL_MULTICAST_PROGRAMMABLE_SAMPLE_LOCATION_NV"/>
+                <command name="glRenderGpuMaskNV"/>
+                <command name="glMulticastBufferSubDataNV"/>
+                <command name="glMulticastCopyBufferSubDataNV"/>
+                <command name="glMulticastCopyImageSubDataNV"/>
+                <command name="glMulticastBlitFramebufferNV"/>
+                <command name="glMulticastFramebufferSampleLocationsfvNV"/>
+                <command name="glMulticastBarrierNV"/>
+                <command name="glMulticastWaitSyncNV"/>
+                <command name="glMulticastGetQueryObjectivNV"/>
+                <command name="glMulticastGetQueryObjectuivNV"/>
+                <command name="glMulticastGetQueryObjecti64vNV"/>
+                <command name="glMulticastGetQueryObjectui64vNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_memory_attachment" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_ATTACHED_MEMORY_OBJECT_NV"/>
+                <enum name="GL_ATTACHED_MEMORY_OFFSET_NV"/>
+                <enum name="GL_MEMORY_ATTACHABLE_ALIGNMENT_NV"/>
+                <enum name="GL_MEMORY_ATTACHABLE_SIZE_NV"/>
+                <enum name="GL_MEMORY_ATTACHABLE_NV"/>
+                <enum name="GL_DETACHED_MEMORY_INCARNATION_NV"/>
+                <enum name="GL_DETACHED_TEXTURES_NV"/>
+                <enum name="GL_DETACHED_BUFFERS_NV"/>
+                <enum name="GL_MAX_DETACHED_TEXTURES_NV"/>
+                <enum name="GL_MAX_DETACHED_BUFFERS_NV"/>
+                <command name="glGetMemoryObjectDetachedResourcesuivNV"/>
+                <command name="glResetMemoryObjectParameterNV"/>
+                <command name="glTexAttachMemoryNV"/>
+                <command name="glBufferAttachMemoryNV"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glTextureAttachMemoryNV"/>
+                <command name="glNamedBufferAttachMemoryNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_mesh_shader" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_MESH_SHADER_NV"/>
+                <enum name="GL_TASK_SHADER_NV"/>
+                <enum name="GL_MAX_MESH_UNIFORM_BLOCKS_NV"/>
+                <enum name="GL_MAX_MESH_TEXTURE_IMAGE_UNITS_NV"/>
+                <enum name="GL_MAX_MESH_IMAGE_UNIFORMS_NV"/>
+                <enum name="GL_MAX_MESH_UNIFORM_COMPONENTS_NV"/>
+                <enum name="GL_MAX_MESH_ATOMIC_COUNTER_BUFFERS_NV"/>
+                <enum name="GL_MAX_MESH_ATOMIC_COUNTERS_NV"/>
+                <enum name="GL_MAX_MESH_SHADER_STORAGE_BLOCKS_NV"/>
+                <enum name="GL_MAX_COMBINED_MESH_UNIFORM_COMPONENTS_NV"/>
+                <enum name="GL_MAX_TASK_UNIFORM_BLOCKS_NV"/>
+                <enum name="GL_MAX_TASK_TEXTURE_IMAGE_UNITS_NV"/>
+                <enum name="GL_MAX_TASK_IMAGE_UNIFORMS_NV"/>
+                <enum name="GL_MAX_TASK_UNIFORM_COMPONENTS_NV"/>
+                <enum name="GL_MAX_TASK_ATOMIC_COUNTER_BUFFERS_NV"/>
+                <enum name="GL_MAX_TASK_ATOMIC_COUNTERS_NV"/>
+                <enum name="GL_MAX_TASK_SHADER_STORAGE_BLOCKS_NV"/>
+                <enum name="GL_MAX_COMBINED_TASK_UNIFORM_COMPONENTS_NV"/>
+                <enum name="GL_MAX_MESH_WORK_GROUP_INVOCATIONS_NV"/>
+                <enum name="GL_MAX_TASK_WORK_GROUP_INVOCATIONS_NV"/>
+                <enum name="GL_MAX_MESH_TOTAL_MEMORY_SIZE_NV"/>
+                <enum name="GL_MAX_TASK_TOTAL_MEMORY_SIZE_NV"/>
+                <enum name="GL_MAX_MESH_OUTPUT_VERTICES_NV"/>
+                <enum name="GL_MAX_MESH_OUTPUT_PRIMITIVES_NV"/>
+                <enum name="GL_MAX_TASK_OUTPUT_COUNT_NV"/>
+                <enum name="GL_MAX_DRAW_MESH_TASKS_COUNT_NV"/>
+                <enum name="GL_MAX_MESH_VIEWS_NV"/>
+                <enum name="GL_MESH_OUTPUT_PER_VERTEX_GRANULARITY_NV"/>
+                <enum name="GL_MESH_OUTPUT_PER_PRIMITIVE_GRANULARITY_NV"/>
+                <enum name="GL_MAX_MESH_WORK_GROUP_SIZE_NV"/>
+                <enum name="GL_MAX_TASK_WORK_GROUP_SIZE_NV"/>
+                <enum name="GL_MESH_WORK_GROUP_SIZE_NV"/>
+                <enum name="GL_TASK_WORK_GROUP_SIZE_NV"/>
+                <enum name="GL_MESH_VERTICES_OUT_NV"/>
+                <enum name="GL_MESH_PRIMITIVES_OUT_NV"/>
+                <enum name="GL_MESH_OUTPUT_TYPE_NV"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_MESH_SHADER_NV"/>
+                <enum name="GL_UNIFORM_BLOCK_REFERENCED_BY_TASK_SHADER_NV"/>
+                <enum name="GL_REFERENCED_BY_MESH_SHADER_NV"/>
+                <enum name="GL_REFERENCED_BY_TASK_SHADER_NV"/>
+                <enum name="GL_MESH_SHADER_BIT_NV"/>
+                <enum name="GL_TASK_SHADER_BIT_NV"/>
+                <command name="glDrawMeshTasksNV"/>
+                <command name="glDrawMeshTasksIndirectNV"/>
+                <command name="glMultiDrawMeshTasksIndirectNV"/>
+                <command name="glMultiDrawMeshTasksIndirectCountNV"/>
+            </require>
+            <require comment="Supported only in OpenGL">
+                <enum name="GL_MESH_SUBROUTINE_NV"/>
+                <enum name="GL_TASK_SUBROUTINE_NV"/>
+                <enum name="GL_MESH_SUBROUTINE_UNIFORM_NV"/>
+                <enum name="GL_TASK_SUBROUTINE_UNIFORM_NV"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_MESH_SHADER_NV"/>
+                <enum name="GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TASK_SHADER_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_multisample_coverage" supported="gl">
+            <require>
+                <enum name="GL_SAMPLES_ARB"/>
+                <enum name="GL_COLOR_SAMPLES_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_multisample_filter_hint" supported="gl">
+            <require>
+                <enum name="GL_MULTISAMPLE_FILTER_HINT_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_non_square_matrices" supported="gles2">
+            <require>
+                <enum name="GL_FLOAT_MAT2x3_NV"/>
+                <enum name="GL_FLOAT_MAT2x4_NV"/>
+                <enum name="GL_FLOAT_MAT3x2_NV"/>
+                <enum name="GL_FLOAT_MAT3x4_NV"/>
+                <enum name="GL_FLOAT_MAT4x2_NV"/>
+                <enum name="GL_FLOAT_MAT4x3_NV"/>
+                <command name="glUniformMatrix2x3fvNV"/>
+                <command name="glUniformMatrix3x2fvNV"/>
+                <command name="glUniformMatrix2x4fvNV"/>
+                <command name="glUniformMatrix4x2fvNV"/>
+                <command name="glUniformMatrix3x4fvNV"/>
+                <command name="glUniformMatrix4x3fvNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_occlusion_query" supported="gl">
+            <require>
+                <enum name="GL_PIXEL_COUNTER_BITS_NV"/>
+                <enum name="GL_CURRENT_OCCLUSION_QUERY_ID_NV"/>
+                <enum name="GL_PIXEL_COUNT_NV"/>
+                <enum name="GL_PIXEL_COUNT_AVAILABLE_NV"/>
+                <command name="glGenOcclusionQueriesNV"/>
+                <command name="glDeleteOcclusionQueriesNV"/>
+                <command name="glIsOcclusionQueryNV"/>
+                <command name="glBeginOcclusionQueryNV"/>
+                <command name="glEndOcclusionQueryNV"/>
+                <command name="glGetOcclusionQueryivNV"/>
+                <command name="glGetOcclusionQueryuivNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_packed_depth_stencil" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_STENCIL_NV"/>
+                <enum name="GL_UNSIGNED_INT_24_8_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_parameter_buffer_object" supported="gl">
+            <require>
+                <enum name="GL_MAX_PROGRAM_PARAMETER_BUFFER_BINDINGS_NV"/>
+                <enum name="GL_MAX_PROGRAM_PARAMETER_BUFFER_SIZE_NV"/>
+                <enum name="GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV"/>
+                <enum name="GL_GEOMETRY_PROGRAM_PARAMETER_BUFFER_NV"/>
+                <enum name="GL_FRAGMENT_PROGRAM_PARAMETER_BUFFER_NV"/>
+                <command name="glProgramBufferParametersfvNV"/>
+                <command name="glProgramBufferParametersIivNV"/>
+                <command name="glProgramBufferParametersIuivNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_parameter_buffer_object2" supported="gl"/>
+        <extension name="GL_NV_path_rendering" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_PATH_FORMAT_SVG_NV"/>
+                <enum name="GL_PATH_FORMAT_PS_NV"/>
+                <enum name="GL_STANDARD_FONT_NAME_NV"/>
+                <enum name="GL_SYSTEM_FONT_NAME_NV"/>
+                <enum name="GL_FILE_NAME_NV"/>
+                <enum name="GL_PATH_STROKE_WIDTH_NV"/>
+                <enum name="GL_PATH_END_CAPS_NV"/>
+                <enum name="GL_PATH_INITIAL_END_CAP_NV"/>
+                <enum name="GL_PATH_TERMINAL_END_CAP_NV"/>
+                <enum name="GL_PATH_JOIN_STYLE_NV"/>
+                <enum name="GL_PATH_MITER_LIMIT_NV"/>
+                <enum name="GL_PATH_DASH_CAPS_NV"/>
+                <enum name="GL_PATH_INITIAL_DASH_CAP_NV"/>
+                <enum name="GL_PATH_TERMINAL_DASH_CAP_NV"/>
+                <enum name="GL_PATH_DASH_OFFSET_NV"/>
+                <enum name="GL_PATH_CLIENT_LENGTH_NV"/>
+                <enum name="GL_PATH_FILL_MODE_NV"/>
+                <enum name="GL_PATH_FILL_MASK_NV"/>
+                <enum name="GL_PATH_FILL_COVER_MODE_NV"/>
+                <enum name="GL_PATH_STROKE_COVER_MODE_NV"/>
+                <enum name="GL_PATH_STROKE_MASK_NV"/>
+                <enum name="GL_COUNT_UP_NV"/>
+                <enum name="GL_COUNT_DOWN_NV"/>
+                <enum name="GL_PATH_OBJECT_BOUNDING_BOX_NV"/>
+                <enum name="GL_CONVEX_HULL_NV"/>
+                <enum name="GL_BOUNDING_BOX_NV"/>
+                <enum name="GL_TRANSLATE_X_NV"/>
+                <enum name="GL_TRANSLATE_Y_NV"/>
+                <enum name="GL_TRANSLATE_2D_NV"/>
+                <enum name="GL_TRANSLATE_3D_NV"/>
+                <enum name="GL_AFFINE_2D_NV"/>
+                <enum name="GL_AFFINE_3D_NV"/>
+                <enum name="GL_TRANSPOSE_AFFINE_2D_NV"/>
+                <enum name="GL_TRANSPOSE_AFFINE_3D_NV"/>
+                <enum name="GL_UTF8_NV"/>
+                <enum name="GL_UTF16_NV"/>
+                <enum name="GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV"/>
+                <enum name="GL_PATH_COMMAND_COUNT_NV"/>
+                <enum name="GL_PATH_COORD_COUNT_NV"/>
+                <enum name="GL_PATH_DASH_ARRAY_COUNT_NV"/>
+                <enum name="GL_PATH_COMPUTED_LENGTH_NV"/>
+                <enum name="GL_PATH_FILL_BOUNDING_BOX_NV"/>
+                <enum name="GL_PATH_STROKE_BOUNDING_BOX_NV"/>
+                <enum name="GL_SQUARE_NV"/>
+                <enum name="GL_ROUND_NV"/>
+                <enum name="GL_TRIANGULAR_NV"/>
+                <enum name="GL_BEVEL_NV"/>
+                <enum name="GL_MITER_REVERT_NV"/>
+                <enum name="GL_MITER_TRUNCATE_NV"/>
+                <enum name="GL_SKIP_MISSING_GLYPH_NV"/>
+                <enum name="GL_USE_MISSING_GLYPH_NV"/>
+                <enum name="GL_PATH_ERROR_POSITION_NV"/>
+                <enum name="GL_ACCUM_ADJACENT_PAIRS_NV"/>
+                <enum name="GL_ADJACENT_PAIRS_NV"/>
+                <enum name="GL_FIRST_TO_REST_NV"/>
+                <enum name="GL_PATH_GEN_MODE_NV"/>
+                <enum name="GL_PATH_GEN_COEFF_NV"/>
+                <enum name="GL_PATH_GEN_COMPONENTS_NV"/>
+                <enum name="GL_PATH_STENCIL_FUNC_NV"/>
+                <enum name="GL_PATH_STENCIL_REF_NV"/>
+                <enum name="GL_PATH_STENCIL_VALUE_MASK_NV"/>
+                <enum name="GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV"/>
+                <enum name="GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV"/>
+                <enum name="GL_PATH_COVER_DEPTH_FUNC_NV"/>
+                <enum name="GL_PATH_DASH_OFFSET_RESET_NV"/>
+                <enum name="GL_MOVE_TO_RESETS_NV"/>
+                <enum name="GL_MOVE_TO_CONTINUES_NV"/>
+                <enum name="GL_CLOSE_PATH_NV"/>
+                <enum name="GL_MOVE_TO_NV"/>
+                <enum name="GL_RELATIVE_MOVE_TO_NV"/>
+                <enum name="GL_LINE_TO_NV"/>
+                <enum name="GL_RELATIVE_LINE_TO_NV"/>
+                <enum name="GL_HORIZONTAL_LINE_TO_NV"/>
+                <enum name="GL_RELATIVE_HORIZONTAL_LINE_TO_NV"/>
+                <enum name="GL_VERTICAL_LINE_TO_NV"/>
+                <enum name="GL_RELATIVE_VERTICAL_LINE_TO_NV"/>
+                <enum name="GL_QUADRATIC_CURVE_TO_NV"/>
+                <enum name="GL_RELATIVE_QUADRATIC_CURVE_TO_NV"/>
+                <enum name="GL_CUBIC_CURVE_TO_NV"/>
+                <enum name="GL_RELATIVE_CUBIC_CURVE_TO_NV"/>
+                <enum name="GL_SMOOTH_QUADRATIC_CURVE_TO_NV"/>
+                <enum name="GL_RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV"/>
+                <enum name="GL_SMOOTH_CUBIC_CURVE_TO_NV"/>
+                <enum name="GL_RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV"/>
+                <enum name="GL_SMALL_CCW_ARC_TO_NV"/>
+                <enum name="GL_RELATIVE_SMALL_CCW_ARC_TO_NV"/>
+                <enum name="GL_SMALL_CW_ARC_TO_NV"/>
+                <enum name="GL_RELATIVE_SMALL_CW_ARC_TO_NV"/>
+                <enum name="GL_LARGE_CCW_ARC_TO_NV"/>
+                <enum name="GL_RELATIVE_LARGE_CCW_ARC_TO_NV"/>
+                <enum name="GL_LARGE_CW_ARC_TO_NV"/>
+                <enum name="GL_RELATIVE_LARGE_CW_ARC_TO_NV"/>
+                <enum name="GL_RESTART_PATH_NV"/>
+                <enum name="GL_DUP_FIRST_CUBIC_CURVE_TO_NV"/>
+                <enum name="GL_DUP_LAST_CUBIC_CURVE_TO_NV"/>
+                <enum name="GL_RECT_NV"/>
+                <enum name="GL_CIRCULAR_CCW_ARC_TO_NV"/>
+                <enum name="GL_CIRCULAR_CW_ARC_TO_NV"/>
+                <enum name="GL_CIRCULAR_TANGENT_ARC_TO_NV"/>
+                <enum name="GL_ARC_TO_NV"/>
+                <enum name="GL_RELATIVE_ARC_TO_NV"/>
+                <enum name="GL_BOLD_BIT_NV"/>
+                <enum name="GL_ITALIC_BIT_NV"/>
+                <enum name="GL_GLYPH_WIDTH_BIT_NV"/>
+                <enum name="GL_GLYPH_HEIGHT_BIT_NV"/>
+                <enum name="GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV"/>
+                <enum name="GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV"/>
+                <enum name="GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV"/>
+                <enum name="GL_GLYPH_VERTICAL_BEARING_X_BIT_NV"/>
+                <enum name="GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV"/>
+                <enum name="GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV"/>
+                <enum name="GL_GLYPH_HAS_KERNING_BIT_NV"/>
+                <enum name="GL_FONT_X_MIN_BOUNDS_BIT_NV"/>
+                <enum name="GL_FONT_Y_MIN_BOUNDS_BIT_NV"/>
+                <enum name="GL_FONT_X_MAX_BOUNDS_BIT_NV"/>
+                <enum name="GL_FONT_Y_MAX_BOUNDS_BIT_NV"/>
+                <enum name="GL_FONT_UNITS_PER_EM_BIT_NV"/>
+                <enum name="GL_FONT_ASCENDER_BIT_NV"/>
+                <enum name="GL_FONT_DESCENDER_BIT_NV"/>
+                <enum name="GL_FONT_HEIGHT_BIT_NV"/>
+                <enum name="GL_FONT_MAX_ADVANCE_WIDTH_BIT_NV"/>
+                <enum name="GL_FONT_MAX_ADVANCE_HEIGHT_BIT_NV"/>
+                <enum name="GL_FONT_UNDERLINE_POSITION_BIT_NV"/>
+                <enum name="GL_FONT_UNDERLINE_THICKNESS_BIT_NV"/>
+                <enum name="GL_FONT_HAS_KERNING_BIT_NV"/>
+                <command name="glGenPathsNV"/>
+                <command name="glDeletePathsNV"/>
+                <command name="glIsPathNV"/>
+                <command name="glPathCommandsNV"/>
+                <command name="glPathCoordsNV"/>
+                <command name="glPathSubCommandsNV"/>
+                <command name="glPathSubCoordsNV"/>
+                <command name="glPathStringNV"/>
+                <command name="glPathGlyphsNV"/>
+                <command name="glPathGlyphRangeNV"/>
+                <command name="glWeightPathsNV"/>
+                <command name="glCopyPathNV"/>
+                <command name="glInterpolatePathsNV"/>
+                <command name="glTransformPathNV"/>
+                <command name="glPathParameterivNV"/>
+                <command name="glPathParameteriNV"/>
+                <command name="glPathParameterfvNV"/>
+                <command name="glPathParameterfNV"/>
+                <command name="glPathDashArrayNV"/>
+                <command name="glPathStencilFuncNV"/>
+                <command name="glPathStencilDepthOffsetNV"/>
+                <command name="glStencilFillPathNV"/>
+                <command name="glStencilStrokePathNV"/>
+                <command name="glStencilFillPathInstancedNV"/>
+                <command name="glStencilStrokePathInstancedNV"/>
+                <command name="glPathCoverDepthFuncNV"/>
+                <command name="glCoverFillPathNV"/>
+                <command name="glCoverStrokePathNV"/>
+                <command name="glCoverFillPathInstancedNV"/>
+                <command name="glCoverStrokePathInstancedNV"/>
+                <command name="glGetPathParameterivNV"/>
+                <command name="glGetPathParameterfvNV"/>
+                <command name="glGetPathCommandsNV"/>
+                <command name="glGetPathCoordsNV"/>
+                <command name="glGetPathDashArrayNV"/>
+                <command name="glGetPathMetricsNV"/>
+                <command name="glGetPathMetricRangeNV"/>
+                <command name="glGetPathSpacingNV"/>
+                <command name="glIsPointInFillPathNV"/>
+                <command name="glIsPointInStrokePathNV"/>
+                <command name="glGetPathLengthNV"/>
+                <command name="glPointAlongPathNV"/>
+            </require>
+            <require comment="API revision 1.2">
+                <enum name="GL_ROUNDED_RECT_NV"/>
+                <enum name="GL_RELATIVE_ROUNDED_RECT_NV"/>
+                <enum name="GL_ROUNDED_RECT2_NV"/>
+                <enum name="GL_RELATIVE_ROUNDED_RECT2_NV"/>
+                <enum name="GL_ROUNDED_RECT4_NV"/>
+                <enum name="GL_RELATIVE_ROUNDED_RECT4_NV"/>
+                <enum name="GL_ROUNDED_RECT8_NV"/>
+                <enum name="GL_RELATIVE_ROUNDED_RECT8_NV"/>
+                <enum name="GL_RELATIVE_RECT_NV"/>
+                <enum name="GL_FONT_GLYPHS_AVAILABLE_NV"/>
+                <enum name="GL_FONT_TARGET_UNAVAILABLE_NV"/>
+                <enum name="GL_FONT_UNAVAILABLE_NV"/>
+                <enum name="GL_FONT_UNINTELLIGIBLE_NV"/>
+                <command name="glMatrixLoad3x2fNV"/>
+                <command name="glMatrixLoad3x3fNV"/>
+                <command name="glMatrixLoadTranspose3x3fNV"/>
+                <command name="glMatrixMult3x2fNV"/>
+                <command name="glMatrixMult3x3fNV"/>
+                <command name="glMatrixMultTranspose3x3fNV"/>
+                <command name="glStencilThenCoverFillPathNV"/>
+                <command name="glStencilThenCoverStrokePathNV"/>
+                <command name="glStencilThenCoverFillPathInstancedNV"/>
+                <command name="glStencilThenCoverStrokePathInstancedNV"/>
+                <command name="glPathGlyphIndexRangeNV"/>
+            </require>
+            <require comment="API revision 1.3">
+                <enum name="GL_CONIC_CURVE_TO_NV"/>
+                <enum name="GL_RELATIVE_CONIC_CURVE_TO_NV"/>
+                <enum name="GL_FONT_NUM_GLYPH_INDICES_BIT_NV"/>
+                <enum name="GL_STANDARD_FONT_FORMAT_NV"/>
+                <command name="glPathGlyphIndexArrayNV"/>
+                <command name="glPathMemoryGlyphIndexArrayNV"/>
+                <command name="glProgramPathFragmentInputGenNV"/>
+                <command name="glGetProgramResourcefvNV"/>
+            </require>
+            <require api="gl" profile="compatibility">
+                <enum name="GL_2_BYTES_NV"/>
+                <enum name="GL_3_BYTES_NV"/>
+                <enum name="GL_4_BYTES_NV"/>
+                <enum name="GL_EYE_LINEAR_NV"/>
+                <enum name="GL_OBJECT_LINEAR_NV"/>
+                <enum name="GL_CONSTANT_NV"/>
+                <enum name="GL_PATH_FOG_GEN_MODE_NV"/>
+                <enum name="GL_PRIMARY_COLOR"/>
+                <enum name="GL_PRIMARY_COLOR_NV"/>
+                <enum name="GL_SECONDARY_COLOR_NV"/>
+                <enum name="GL_PATH_GEN_COLOR_FORMAT_NV"/>
+                <command name="glPathColorGenNV"/>
+                <command name="glPathTexGenNV"/>
+                <command name="glPathFogGenNV"/>
+                <command name="glGetPathColorGenivNV"/>
+                <command name="glGetPathColorGenfvNV"/>
+                <command name="glGetPathTexGenivNV"/>
+                <command name="glGetPathTexGenfvNV"/>
+            </require>
+            <require comment="Other API additions of unknown history">
+                <enum name="GL_PATH_PROJECTION_NV"/>
+                <enum name="GL_PATH_MODELVIEW_NV"/>
+                <enum name="GL_PATH_MODELVIEW_STACK_DEPTH_NV"/>
+                <enum name="GL_PATH_MODELVIEW_MATRIX_NV"/>
+                <enum name="GL_PATH_MAX_MODELVIEW_STACK_DEPTH_NV"/>
+                <enum name="GL_PATH_TRANSPOSE_MODELVIEW_MATRIX_NV"/>
+                <enum name="GL_PATH_PROJECTION_STACK_DEPTH_NV"/>
+                <enum name="GL_PATH_PROJECTION_MATRIX_NV"/>
+                <enum name="GL_PATH_MAX_PROJECTION_STACK_DEPTH_NV"/>
+                <enum name="GL_PATH_TRANSPOSE_PROJECTION_MATRIX_NV"/>
+                <enum name="GL_FRAGMENT_INPUT_NV"/>
+                <command name="glMatrixFrustumEXT"/>
+                <command name="glMatrixLoadIdentityEXT"/>
+                <command name="glMatrixLoadTransposefEXT"/>
+                <command name="glMatrixLoadTransposedEXT"/>
+                <command name="glMatrixLoadfEXT"/>
+                <command name="glMatrixLoaddEXT"/>
+                <command name="glMatrixMultTransposefEXT"/>
+                <command name="glMatrixMultTransposedEXT"/>
+                <command name="glMatrixMultfEXT"/>
+                <command name="glMatrixMultdEXT"/>
+                <command name="glMatrixOrthoEXT"/>
+                <command name="glMatrixPopEXT"/>
+                <command name="glMatrixPushEXT"/>
+                <command name="glMatrixRotatefEXT"/>
+                <command name="glMatrixRotatedEXT"/>
+                <command name="glMatrixScalefEXT"/>
+                <command name="glMatrixScaledEXT"/>
+                <command name="glMatrixTranslatefEXT"/>
+                <command name="glMatrixTranslatedEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_path_rendering_shared_edge" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_SHARED_EDGE_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_pixel_data_range" supported="gl">
+            <require>
+                <enum name="GL_WRITE_PIXEL_DATA_RANGE_NV"/>
+                <enum name="GL_READ_PIXEL_DATA_RANGE_NV"/>
+                <enum name="GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV"/>
+                <enum name="GL_READ_PIXEL_DATA_RANGE_LENGTH_NV"/>
+                <enum name="GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV"/>
+                <enum name="GL_READ_PIXEL_DATA_RANGE_POINTER_NV"/>
+                <command name="glPixelDataRangeNV"/>
+                <command name="glFlushPixelDataRangeNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_pixel_buffer_object" supported="gles2">
+            <require>
+                <enum name="GL_PIXEL_PACK_BUFFER_NV"/>
+                <enum name="GL_PIXEL_UNPACK_BUFFER_NV"/>
+                <enum name="GL_PIXEL_PACK_BUFFER_BINDING_NV"/>
+                <enum name="GL_PIXEL_UNPACK_BUFFER_BINDING_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_point_sprite" supported="gl">
+            <require>
+                <enum name="GL_POINT_SPRITE_NV"/>
+                <enum name="GL_COORD_REPLACE_NV"/>
+                <enum name="GL_POINT_SPRITE_R_MODE_NV"/>
+                <command name="glPointParameteriNV"/>
+                <command name="glPointParameterivNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_polygon_mode" supported="gles2">
+            <require>
+                <enum name="GL_POLYGON_MODE_NV"/>
+                <enum name="GL_POLYGON_OFFSET_POINT_NV"/>
+                <enum name="GL_POLYGON_OFFSET_LINE_NV"/>
+                <enum name="GL_POINT_NV"/>
+                <enum name="GL_LINE_NV"/>
+                <enum name="GL_FILL_NV"/>
+                <command name="glPolygonModeNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_present_video" supported="gl">
+            <require>
+                <enum name="GL_FRAME_NV"/>
+                <enum name="GL_FIELDS_NV"/>
+                <enum name="GL_CURRENT_TIME_NV"/>
+                <enum name="GL_NUM_FILL_STREAMS_NV"/>
+                <enum name="GL_PRESENT_TIME_NV"/>
+                <enum name="GL_PRESENT_DURATION_NV"/>
+                <command name="glPresentFrameKeyedNV"/>
+                <command name="glPresentFrameDualFillNV"/>
+                <command name="glGetVideoivNV"/>
+                <command name="glGetVideouivNV"/>
+                <command name="glGetVideoi64vNV"/>
+                <command name="glGetVideoui64vNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_primitive_restart" supported="gl">
+            <require>
+                <enum name="GL_PRIMITIVE_RESTART_NV"/>
+                <enum name="GL_PRIMITIVE_RESTART_INDEX_NV"/>
+                <command name="glPrimitiveRestartNV"/>
+                <command name="glPrimitiveRestartIndexNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_query_resource" supported="gl">
+            <require>
+                <enum name="GL_QUERY_RESOURCE_TYPE_VIDMEM_ALLOC_NV"/>
+                <enum name="GL_QUERY_RESOURCE_MEMTYPE_VIDMEM_NV"/>
+                <enum name="GL_QUERY_RESOURCE_SYS_RESERVED_NV"/>
+                <enum name="GL_QUERY_RESOURCE_TEXTURE_NV"/>
+                <enum name="GL_QUERY_RESOURCE_RENDERBUFFER_NV"/>
+                <enum name="GL_QUERY_RESOURCE_BUFFEROBJECT_NV"/>
+                <command name="glQueryResourceNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_query_resource_tag" supported="gl">
+            <require>
+                <command name="glGenQueryResourceTagNV"/>
+                <command name="glDeleteQueryResourceTagNV"/>
+                <command name="glQueryResourceTagNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_read_buffer" supported="gles2">
+            <require>
+                <enum name="GL_READ_BUFFER_NV"/>
+                <command name="glReadBufferNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_read_buffer_front" supported="gles2"/>
+        <extension name="GL_NV_read_depth" supported="gles2"/>
+        <extension name="GL_NV_read_depth_stencil" supported="gles2"/>
+        <extension name="GL_NV_read_stencil" supported="gles2"/>
+        <extension name="GL_NV_register_combiners" supported="gl">
+            <require>
+                <enum name="GL_REGISTER_COMBINERS_NV"/>
+                <enum name="GL_VARIABLE_A_NV"/>
+                <enum name="GL_VARIABLE_B_NV"/>
+                <enum name="GL_VARIABLE_C_NV"/>
+                <enum name="GL_VARIABLE_D_NV"/>
+                <enum name="GL_VARIABLE_E_NV"/>
+                <enum name="GL_VARIABLE_F_NV"/>
+                <enum name="GL_VARIABLE_G_NV"/>
+                <enum name="GL_CONSTANT_COLOR0_NV"/>
+                <enum name="GL_CONSTANT_COLOR1_NV"/>
+                <enum name="GL_PRIMARY_COLOR_NV"/>
+                <enum name="GL_SECONDARY_COLOR_NV"/>
+                <enum name="GL_SPARE0_NV"/>
+                <enum name="GL_SPARE1_NV"/>
+                <enum name="GL_DISCARD_NV"/>
+                <enum name="GL_E_TIMES_F_NV"/>
+                <enum name="GL_SPARE0_PLUS_SECONDARY_COLOR_NV"/>
+                <enum name="GL_UNSIGNED_IDENTITY_NV"/>
+                <enum name="GL_UNSIGNED_INVERT_NV"/>
+                <enum name="GL_EXPAND_NORMAL_NV"/>
+                <enum name="GL_EXPAND_NEGATE_NV"/>
+                <enum name="GL_HALF_BIAS_NORMAL_NV"/>
+                <enum name="GL_HALF_BIAS_NEGATE_NV"/>
+                <enum name="GL_SIGNED_IDENTITY_NV"/>
+                <enum name="GL_SIGNED_NEGATE_NV"/>
+                <enum name="GL_SCALE_BY_TWO_NV"/>
+                <enum name="GL_SCALE_BY_FOUR_NV"/>
+                <enum name="GL_SCALE_BY_ONE_HALF_NV"/>
+                <enum name="GL_BIAS_BY_NEGATIVE_ONE_HALF_NV"/>
+                <enum name="GL_COMBINER_INPUT_NV"/>
+                <enum name="GL_COMBINER_MAPPING_NV"/>
+                <enum name="GL_COMBINER_COMPONENT_USAGE_NV"/>
+                <enum name="GL_COMBINER_AB_DOT_PRODUCT_NV"/>
+                <enum name="GL_COMBINER_CD_DOT_PRODUCT_NV"/>
+                <enum name="GL_COMBINER_MUX_SUM_NV"/>
+                <enum name="GL_COMBINER_SCALE_NV"/>
+                <enum name="GL_COMBINER_BIAS_NV"/>
+                <enum name="GL_COMBINER_AB_OUTPUT_NV"/>
+                <enum name="GL_COMBINER_CD_OUTPUT_NV"/>
+                <enum name="GL_COMBINER_SUM_OUTPUT_NV"/>
+                <enum name="GL_MAX_GENERAL_COMBINERS_NV"/>
+                <enum name="GL_NUM_GENERAL_COMBINERS_NV"/>
+                <enum name="GL_COLOR_SUM_CLAMP_NV"/>
+                <enum name="GL_COMBINER0_NV"/>
+                <enum name="GL_COMBINER1_NV"/>
+                <enum name="GL_COMBINER2_NV"/>
+                <enum name="GL_COMBINER3_NV"/>
+                <enum name="GL_COMBINER4_NV"/>
+                <enum name="GL_COMBINER5_NV"/>
+                <enum name="GL_COMBINER6_NV"/>
+                <enum name="GL_COMBINER7_NV"/>
+                <enum name="GL_TEXTURE0_ARB"/>
+                <enum name="GL_TEXTURE1_ARB"/>
+                <enum name="GL_ZERO"/>
+                <enum name="GL_NONE"/>
+                <enum name="GL_FOG"/>
+                <command name="glCombinerParameterfvNV"/>
+                <command name="glCombinerParameterfNV"/>
+                <command name="glCombinerParameterivNV"/>
+                <command name="glCombinerParameteriNV"/>
+                <command name="glCombinerInputNV"/>
+                <command name="glCombinerOutputNV"/>
+                <command name="glFinalCombinerInputNV"/>
+                <command name="glGetCombinerInputParameterfvNV"/>
+                <command name="glGetCombinerInputParameterivNV"/>
+                <command name="glGetCombinerOutputParameterfvNV"/>
+                <command name="glGetCombinerOutputParameterivNV"/>
+                <command name="glGetFinalCombinerInputParameterfvNV"/>
+                <command name="glGetFinalCombinerInputParameterivNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_register_combiners2" supported="gl">
+            <require>
+                <enum name="GL_PER_STAGE_CONSTANTS_NV"/>
+                <command name="glCombinerStageParameterfvNV"/>
+                <command name="glGetCombinerStageParameterfvNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_representative_fragment_test" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_REPRESENTATIVE_FRAGMENT_TEST_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_robustness_video_memory_purge" supported="gl">
+            <require>
+                <enum name="GL_PURGED_CONTEXT_RESET_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_sRGB_formats" supported="gles2">
+            <require>
+                <enum name="GL_SLUMINANCE_NV"/>
+                <enum name="GL_SLUMINANCE_ALPHA_NV"/>
+                <enum name="GL_SRGB8_NV"/>
+                <enum name="GL_SLUMINANCE8_NV"/>
+                <enum name="GL_SLUMINANCE8_ALPHA8_NV"/>
+                <enum name="GL_COMPRESSED_SRGB_S3TC_DXT1_NV"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_NV"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_NV"/>
+                <enum name="GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_NV"/>
+                <enum name="GL_ETC1_SRGB8_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_sample_locations" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_SAMPLE_LOCATION_SUBPIXEL_BITS_NV"/>
+                <enum name="GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_NV"/>
+                <enum name="GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_NV"/>
+                <enum name="GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_NV"/>
+                <enum name="GL_SAMPLE_LOCATION_NV"/>
+                <enum name="GL_PROGRAMMABLE_SAMPLE_LOCATION_NV"/>
+                <enum name="GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_NV"/>
+                <enum name="GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_NV"/>
+                <command name="glFramebufferSampleLocationsfvNV"/>
+                <command name="glNamedFramebufferSampleLocationsfvNV"/>
+                <command name="glResolveDepthValuesNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_sample_mask_override_coverage" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_scissor_exclusive" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_SCISSOR_TEST_EXCLUSIVE_NV"/>
+                <enum name="GL_SCISSOR_BOX_EXCLUSIVE_NV"/>
+                <command name="glScissorExclusiveNV"/>
+                <command name="glScissorExclusiveArrayvNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_shader_atomic_counters" supported="gl|glcore"/>
+        <extension name="GL_NV_shader_atomic_float" supported="gl|glcore"/>
+        <extension name="GL_NV_shader_atomic_float64" supported="gl|glcore"/>
+        <extension name="GL_NV_shader_atomic_fp16_vector" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_shader_atomic_int64" supported="gl|glcore"/>
+        <extension name="GL_NV_shader_buffer_load" supported="gl|glcore">
+            <require>
+                <enum name="GL_BUFFER_GPU_ADDRESS_NV"/>
+                <enum name="GL_GPU_ADDRESS_NV"/>
+                <enum name="GL_MAX_SHADER_BUFFER_ADDRESS_NV"/>
+                <command name="glMakeBufferResidentNV"/>
+                <command name="glMakeBufferNonResidentNV"/>
+                <command name="glIsBufferResidentNV"/>
+                <command name="glMakeNamedBufferResidentNV"/>
+                <command name="glMakeNamedBufferNonResidentNV"/>
+                <command name="glIsNamedBufferResidentNV"/>
+                <command name="glGetBufferParameterui64vNV"/>
+                <command name="glGetNamedBufferParameterui64vNV"/>
+                <command name="glGetIntegerui64vNV"/>
+                <command name="glUniformui64NV"/>
+                <command name="glUniformui64vNV"/>
+                <command name="glGetUniformui64vNV"/>
+                <command name="glProgramUniformui64NV"/>
+                <command name="glProgramUniformui64vNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_shader_buffer_store" supported="gl|glcore">
+            <require>
+                <enum name="GL_SHADER_GLOBAL_ACCESS_BARRIER_BIT_NV"/>
+                <enum name="GL_READ_WRITE"/>
+                <enum name="GL_WRITE_ONLY"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_shader_noperspective_interpolation" supported="gles2"/>
+        <extension name="GL_NV_shader_storage_buffer_object" supported="gl"/>
+        <extension name="GL_NV_shader_texture_footprint" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_shader_thread_group" supported="gl|glcore">
+            <require>
+                <enum name="GL_WARP_SIZE_NV"/>
+                <enum name="GL_WARPS_PER_SM_NV"/>
+                <enum name="GL_SM_COUNT_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_shader_thread_shuffle" supported="gl|glcore"/>
+        <extension name="GL_NV_shading_rate_image" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_SHADING_RATE_IMAGE_NV"/>
+                <enum name="GL_SHADING_RATE_NO_INVOCATIONS_NV"/>
+                <enum name="GL_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV"/>
+                <enum name="GL_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV"/>
+                <enum name="GL_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV"/>
+                <enum name="GL_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV"/>
+                <enum name="GL_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV"/>
+                <enum name="GL_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV"/>
+                <enum name="GL_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV"/>
+                <enum name="GL_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV"/>
+                <enum name="GL_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV"/>
+                <enum name="GL_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV"/>
+                <enum name="GL_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV"/>
+                <enum name="GL_SHADING_RATE_IMAGE_BINDING_NV"/>
+                <enum name="GL_SHADING_RATE_IMAGE_TEXEL_WIDTH_NV"/>
+                <enum name="GL_SHADING_RATE_IMAGE_TEXEL_HEIGHT_NV"/>
+                <enum name="GL_SHADING_RATE_IMAGE_PALETTE_SIZE_NV"/>
+                <enum name="GL_MAX_COARSE_FRAGMENT_SAMPLES_NV"/>
+                <enum name="GL_SHADING_RATE_SAMPLE_ORDER_DEFAULT_NV"/>
+                <enum name="GL_SHADING_RATE_SAMPLE_ORDER_PIXEL_MAJOR_NV"/>
+                <enum name="GL_SHADING_RATE_SAMPLE_ORDER_SAMPLE_MAJOR_NV"/>
+                <command name="glBindShadingRateImageNV"/>
+                <command name="glGetShadingRateImagePaletteNV"/>
+                <command name="glGetShadingRateSampleLocationivNV"/>
+                <command name="glShadingRateImageBarrierNV"/>
+                <command name="glShadingRateImageBarrierNV"/>
+                <command name="glShadingRateImagePaletteNV"/>
+                <command name="glShadingRateSampleOrderNV"/>
+                <command name="glShadingRateSampleOrderCustomNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_shadow_samplers_array" supported="gles2">
+            <require>
+                <enum name="GL_SAMPLER_2D_ARRAY_SHADOW_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_shadow_samplers_cube" supported="gles2">
+            <require>
+                <enum name="GL_SAMPLER_CUBE_SHADOW_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_stereo_view_rendering" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_tessellation_program5" supported="gl">
+            <require>
+                <enum name="GL_MAX_PROGRAM_PATCH_ATTRIBS_NV"/>
+                <enum name="GL_TESS_CONTROL_PROGRAM_NV"/>
+                <enum name="GL_TESS_EVALUATION_PROGRAM_NV"/>
+                <enum name="GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV"/>
+                <enum name="GL_TESS_EVALUATION_PROGRAM_PARAMETER_BUFFER_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texgen_emboss" supported="gl">
+            <require>
+                <enum name="GL_EMBOSS_LIGHT_NV"/>
+                <enum name="GL_EMBOSS_CONSTANT_NV"/>
+                <enum name="GL_EMBOSS_MAP_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texgen_reflection" supported="gl">
+            <require>
+                <enum name="GL_NORMAL_MAP_NV"/>
+                <enum name="GL_REFLECTION_MAP_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_barrier" supported="gl|glcore">
+            <require>
+                <command name="glTextureBarrierNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_border_clamp" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_BORDER_COLOR_NV"/>
+                <enum name="GL_CLAMP_TO_BORDER_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_compression_s3tc_update" supported="gles2"/>
+        <extension name="GL_NV_texture_compression_vtc" supported="gl"/>
+        <extension name="GL_NV_texture_env_combine4" supported="gl">
+            <require>
+                <enum name="GL_COMBINE4_NV"/>
+                <enum name="GL_SOURCE3_RGB_NV"/>
+                <enum name="GL_SOURCE3_ALPHA_NV"/>
+                <enum name="GL_OPERAND3_RGB_NV"/>
+                <enum name="GL_OPERAND3_ALPHA_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_expand_normal" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_UNSIGNED_REMAP_MODE_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_multisample" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_COVERAGE_SAMPLES_NV"/>
+                <enum name="GL_TEXTURE_COLOR_SAMPLES_NV"/>
+                <command name="glTexImage2DMultisampleCoverageNV"/>
+                <command name="glTexImage3DMultisampleCoverageNV"/>
+            </require>
+            <require comment="Supported only if GL_EXT_direct_state_access is supported">
+                <command name="glTextureImage2DMultisampleNV"/>
+                <command name="glTextureImage3DMultisampleNV"/>
+                <command name="glTextureImage2DMultisampleCoverageNV"/>
+                <command name="glTextureImage3DMultisampleCoverageNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_npot_2D_mipmap" supported="gles2"/>
+        <extension name="GL_NV_texture_rectangle" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_RECTANGLE_NV"/>
+                <enum name="GL_TEXTURE_BINDING_RECTANGLE_NV"/>
+                <enum name="GL_PROXY_TEXTURE_RECTANGLE_NV"/>
+                <enum name="GL_MAX_RECTANGLE_TEXTURE_SIZE_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_rectangle_compressed" supported="gl|glcore"/>
+        <extension name="GL_NV_texture_shader" supported="gl">
+            <require>
+                <enum name="GL_OFFSET_TEXTURE_RECTANGLE_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV"/>
+                <enum name="GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV"/>
+                <enum name="GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV"/>
+                <enum name="GL_UNSIGNED_INT_S8_S8_8_8_NV"/>
+                <enum name="GL_UNSIGNED_INT_8_8_S8_S8_REV_NV"/>
+                <enum name="GL_DSDT_MAG_INTENSITY_NV"/>
+                <enum name="GL_SHADER_CONSISTENT_NV"/>
+                <enum name="GL_TEXTURE_SHADER_NV"/>
+                <enum name="GL_SHADER_OPERATION_NV"/>
+                <enum name="GL_CULL_MODES_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_MATRIX_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_SCALE_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_BIAS_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_2D_MATRIX_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_2D_SCALE_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_2D_BIAS_NV"/>
+                <enum name="GL_PREVIOUS_TEXTURE_INPUT_NV"/>
+                <enum name="GL_CONST_EYE_NV"/>
+                <enum name="GL_PASS_THROUGH_NV"/>
+                <enum name="GL_CULL_FRAGMENT_NV"/>
+                <enum name="GL_OFFSET_TEXTURE_2D_NV"/>
+                <enum name="GL_DEPENDENT_AR_TEXTURE_2D_NV"/>
+                <enum name="GL_DEPENDENT_GB_TEXTURE_2D_NV"/>
+                <enum name="GL_DOT_PRODUCT_NV"/>
+                <enum name="GL_DOT_PRODUCT_DEPTH_REPLACE_NV"/>
+                <enum name="GL_DOT_PRODUCT_TEXTURE_2D_NV"/>
+                <enum name="GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV"/>
+                <enum name="GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV"/>
+                <enum name="GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV"/>
+                <enum name="GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV"/>
+                <enum name="GL_HILO_NV"/>
+                <enum name="GL_DSDT_NV"/>
+                <enum name="GL_DSDT_MAG_NV"/>
+                <enum name="GL_DSDT_MAG_VIB_NV"/>
+                <enum name="GL_HILO16_NV"/>
+                <enum name="GL_SIGNED_HILO_NV"/>
+                <enum name="GL_SIGNED_HILO16_NV"/>
+                <enum name="GL_SIGNED_RGBA_NV"/>
+                <enum name="GL_SIGNED_RGBA8_NV"/>
+                <enum name="GL_SIGNED_RGB_NV"/>
+                <enum name="GL_SIGNED_RGB8_NV"/>
+                <enum name="GL_SIGNED_LUMINANCE_NV"/>
+                <enum name="GL_SIGNED_LUMINANCE8_NV"/>
+                <enum name="GL_SIGNED_LUMINANCE_ALPHA_NV"/>
+                <enum name="GL_SIGNED_LUMINANCE8_ALPHA8_NV"/>
+                <enum name="GL_SIGNED_ALPHA_NV"/>
+                <enum name="GL_SIGNED_ALPHA8_NV"/>
+                <enum name="GL_SIGNED_INTENSITY_NV"/>
+                <enum name="GL_SIGNED_INTENSITY8_NV"/>
+                <enum name="GL_DSDT8_NV"/>
+                <enum name="GL_DSDT8_MAG8_NV"/>
+                <enum name="GL_DSDT8_MAG8_INTENSITY8_NV"/>
+                <enum name="GL_SIGNED_RGB_UNSIGNED_ALPHA_NV"/>
+                <enum name="GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV"/>
+                <enum name="GL_HI_SCALE_NV"/>
+                <enum name="GL_LO_SCALE_NV"/>
+                <enum name="GL_DS_SCALE_NV"/>
+                <enum name="GL_DT_SCALE_NV"/>
+                <enum name="GL_MAGNITUDE_SCALE_NV"/>
+                <enum name="GL_VIBRANCE_SCALE_NV"/>
+                <enum name="GL_HI_BIAS_NV"/>
+                <enum name="GL_LO_BIAS_NV"/>
+                <enum name="GL_DS_BIAS_NV"/>
+                <enum name="GL_DT_BIAS_NV"/>
+                <enum name="GL_MAGNITUDE_BIAS_NV"/>
+                <enum name="GL_VIBRANCE_BIAS_NV"/>
+                <enum name="GL_TEXTURE_BORDER_VALUES_NV"/>
+                <enum name="GL_TEXTURE_HI_SIZE_NV"/>
+                <enum name="GL_TEXTURE_LO_SIZE_NV"/>
+                <enum name="GL_TEXTURE_DS_SIZE_NV"/>
+                <enum name="GL_TEXTURE_DT_SIZE_NV"/>
+                <enum name="GL_TEXTURE_MAG_SIZE_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_shader2" supported="gl">
+            <require>
+                <enum name="GL_DOT_PRODUCT_TEXTURE_3D_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_texture_shader3" supported="gl">
+            <require>
+                <enum name="GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV"/>
+                <enum name="GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV"/>
+                <enum name="GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV"/>
+                <enum name="GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV"/>
+                <enum name="GL_OFFSET_HILO_TEXTURE_2D_NV"/>
+                <enum name="GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV"/>
+                <enum name="GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV"/>
+                <enum name="GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV"/>
+                <enum name="GL_DEPENDENT_HILO_TEXTURE_2D_NV"/>
+                <enum name="GL_DEPENDENT_RGB_TEXTURE_3D_NV"/>
+                <enum name="GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV"/>
+                <enum name="GL_DOT_PRODUCT_PASS_THROUGH_NV"/>
+                <enum name="GL_DOT_PRODUCT_TEXTURE_1D_NV"/>
+                <enum name="GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV"/>
+                <enum name="GL_HILO8_NV"/>
+                <enum name="GL_SIGNED_HILO8_NV"/>
+                <enum name="GL_FORCE_BLUE_TO_ONE_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_transform_feedback" supported="gl">
+            <require>
+                <enum name="GL_BACK_PRIMARY_COLOR_NV"/>
+                <enum name="GL_BACK_SECONDARY_COLOR_NV"/>
+                <enum name="GL_TEXTURE_COORD_NV"/>
+                <enum name="GL_CLIP_DISTANCE_NV"/>
+                <enum name="GL_VERTEX_ID_NV"/>
+                <enum name="GL_PRIMITIVE_ID_NV"/>
+                <enum name="GL_GENERIC_ATTRIB_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_ATTRIBS_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV"/>
+                <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_NV"/>
+                <enum name="GL_ACTIVE_VARYINGS_NV"/>
+                <enum name="GL_ACTIVE_VARYING_MAX_LENGTH_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_VARYINGS_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_START_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_RECORD_NV"/>
+                <enum name="GL_PRIMITIVES_GENERATED_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_NV"/>
+                <enum name="GL_RASTERIZER_DISCARD_NV"/>
+                <enum name="GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_NV"/>
+                <enum name="GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_NV"/>
+                <enum name="GL_INTERLEAVED_ATTRIBS_NV"/>
+                <enum name="GL_SEPARATE_ATTRIBS_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_NV"/>
+                <enum name="GL_LAYER_NV"/>
+                <command name="glBeginTransformFeedbackNV"/>
+                <command name="glEndTransformFeedbackNV"/>
+                <command name="glTransformFeedbackAttribsNV"/>
+                <command name="glBindBufferRangeNV"/>
+                <command name="glBindBufferOffsetNV"/>
+                <command name="glBindBufferBaseNV"/>
+                <command name="glTransformFeedbackVaryingsNV"/>
+                <command name="glActiveVaryingNV"/>
+                <command name="glGetVaryingLocationNV"/>
+                <command name="glGetActiveVaryingNV"/>
+                <command name="glGetTransformFeedbackVaryingNV"/>
+            </require>
+            <require comment="Extended by GL_ARB_transform_feedback3">
+                <enum name="GL_NEXT_BUFFER_NV"/>
+                <enum name="GL_SKIP_COMPONENTS4_NV"/>
+                <enum name="GL_SKIP_COMPONENTS3_NV"/>
+                <enum name="GL_SKIP_COMPONENTS2_NV"/>
+                <enum name="GL_SKIP_COMPONENTS1_NV"/>
+                <command name="glTransformFeedbackStreamAttribsNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_transform_feedback2" supported="gl">
+            <require>
+                <enum name="GL_TRANSFORM_FEEDBACK_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE_NV"/>
+                <enum name="GL_TRANSFORM_FEEDBACK_BINDING_NV"/>
+                <command name="glBindTransformFeedbackNV"/>
+                <command name="glDeleteTransformFeedbacksNV"/>
+                <command name="glGenTransformFeedbacksNV"/>
+                <command name="glIsTransformFeedbackNV"/>
+                <command name="glPauseTransformFeedbackNV"/>
+                <command name="glResumeTransformFeedbackNV"/>
+                <command name="glDrawTransformFeedbackNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_uniform_buffer_unified_memory" supported="gl|glcore">
+            <require>
+                <enum name="GL_UNIFORM_BUFFER_UNIFIED_NV"/>
+                <enum name="GL_UNIFORM_BUFFER_ADDRESS_NV"/>
+                <enum name="GL_UNIFORM_BUFFER_LENGTH_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vdpau_interop" supported="gl">
+            <require>
+                <enum name="GL_SURFACE_STATE_NV"/>
+                <enum name="GL_SURFACE_REGISTERED_NV"/>
+                <enum name="GL_SURFACE_MAPPED_NV"/>
+                <enum name="GL_WRITE_DISCARD_NV"/>
+                <command name="glVDPAUInitNV"/>
+                <command name="glVDPAUFiniNV"/>
+                <command name="glVDPAURegisterVideoSurfaceNV"/>
+                <command name="glVDPAURegisterOutputSurfaceNV"/>
+                <command name="glVDPAUIsSurfaceNV"/>
+                <command name="glVDPAUUnregisterSurfaceNV"/>
+                <command name="glVDPAUGetSurfaceivNV"/>
+                <command name="glVDPAUSurfaceAccessNV"/>
+                <command name="glVDPAUMapSurfacesNV"/>
+                <command name="glVDPAUUnmapSurfacesNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vdpau_interop2" supported="gl">
+            <require>
+                <command name="glVDPAURegisterVideoSurfaceWithPictureStructureNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_array_range" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_RANGE_NV"/>
+                <enum name="GL_VERTEX_ARRAY_RANGE_LENGTH_NV"/>
+                <enum name="GL_VERTEX_ARRAY_RANGE_VALID_NV"/>
+                <enum name="GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV"/>
+                <enum name="GL_VERTEX_ARRAY_RANGE_POINTER_NV"/>
+                <command name="glFlushVertexArrayRangeNV"/>
+                <command name="glVertexArrayRangeNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_array_range2" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_attrib_integer_64bit" supported="gl|glcore">
+            <require>
+                <enum name="GL_INT64_NV"/>
+                <enum name="GL_UNSIGNED_INT64_NV"/>
+                <command name="glVertexAttribL1i64NV"/>
+                <command name="glVertexAttribL2i64NV"/>
+                <command name="glVertexAttribL3i64NV"/>
+                <command name="glVertexAttribL4i64NV"/>
+                <command name="glVertexAttribL1i64vNV"/>
+                <command name="glVertexAttribL2i64vNV"/>
+                <command name="glVertexAttribL3i64vNV"/>
+                <command name="glVertexAttribL4i64vNV"/>
+                <command name="glVertexAttribL1ui64NV"/>
+                <command name="glVertexAttribL2ui64NV"/>
+                <command name="glVertexAttribL3ui64NV"/>
+                <command name="glVertexAttribL4ui64NV"/>
+                <command name="glVertexAttribL1ui64vNV"/>
+                <command name="glVertexAttribL2ui64vNV"/>
+                <command name="glVertexAttribL3ui64vNV"/>
+                <command name="glVertexAttribL4ui64vNV"/>
+                <command name="glGetVertexAttribLi64vNV"/>
+                <command name="glGetVertexAttribLui64vNV"/>
+                <command name="glVertexAttribLFormatNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_buffer_unified_memory" supported="gl|glcore">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV"/>
+                <enum name="GL_ELEMENT_ARRAY_UNIFIED_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_VERTEX_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_NORMAL_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_COLOR_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_INDEX_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_FOG_COORD_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_ELEMENT_ARRAY_ADDRESS_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_LENGTH_NV"/>
+                <enum name="GL_VERTEX_ARRAY_LENGTH_NV"/>
+                <enum name="GL_NORMAL_ARRAY_LENGTH_NV"/>
+                <enum name="GL_COLOR_ARRAY_LENGTH_NV"/>
+                <enum name="GL_INDEX_ARRAY_LENGTH_NV"/>
+                <enum name="GL_TEXTURE_COORD_ARRAY_LENGTH_NV"/>
+                <enum name="GL_EDGE_FLAG_ARRAY_LENGTH_NV"/>
+                <enum name="GL_SECONDARY_COLOR_ARRAY_LENGTH_NV"/>
+                <enum name="GL_FOG_COORD_ARRAY_LENGTH_NV"/>
+                <enum name="GL_ELEMENT_ARRAY_LENGTH_NV"/>
+                <enum name="GL_DRAW_INDIRECT_UNIFIED_NV"/>
+                <enum name="GL_DRAW_INDIRECT_ADDRESS_NV"/>
+                <enum name="GL_DRAW_INDIRECT_LENGTH_NV"/>
+                <command name="glBufferAddressRangeNV"/>
+                <command name="glVertexFormatNV"/>
+                <command name="glNormalFormatNV"/>
+                <command name="glColorFormatNV"/>
+                <command name="glIndexFormatNV"/>
+                <command name="glTexCoordFormatNV"/>
+                <command name="glEdgeFlagFormatNV"/>
+                <command name="glSecondaryColorFormatNV"/>
+                <command name="glFogCoordFormatNV"/>
+                <command name="glVertexAttribFormatNV"/>
+                <command name="glVertexAttribIFormatNV"/>
+                <command name="glGetIntegerui64i_vNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_program" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_PROGRAM_NV"/>
+                <enum name="GL_VERTEX_STATE_PROGRAM_NV"/>
+                <enum name="GL_ATTRIB_ARRAY_SIZE_NV"/>
+                <enum name="GL_ATTRIB_ARRAY_STRIDE_NV"/>
+                <enum name="GL_ATTRIB_ARRAY_TYPE_NV"/>
+                <enum name="GL_CURRENT_ATTRIB_NV"/>
+                <enum name="GL_PROGRAM_LENGTH_NV"/>
+                <enum name="GL_PROGRAM_STRING_NV"/>
+                <enum name="GL_MODELVIEW_PROJECTION_NV"/>
+                <enum name="GL_IDENTITY_NV"/>
+                <enum name="GL_INVERSE_NV"/>
+                <enum name="GL_TRANSPOSE_NV"/>
+                <enum name="GL_INVERSE_TRANSPOSE_NV"/>
+                <enum name="GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV"/>
+                <enum name="GL_MAX_TRACK_MATRICES_NV"/>
+                <enum name="GL_MATRIX0_NV"/>
+                <enum name="GL_MATRIX1_NV"/>
+                <enum name="GL_MATRIX2_NV"/>
+                <enum name="GL_MATRIX3_NV"/>
+                <enum name="GL_MATRIX4_NV"/>
+                <enum name="GL_MATRIX5_NV"/>
+                <enum name="GL_MATRIX6_NV"/>
+                <enum name="GL_MATRIX7_NV"/>
+                <enum name="GL_CURRENT_MATRIX_STACK_DEPTH_NV"/>
+                <enum name="GL_CURRENT_MATRIX_NV"/>
+                <enum name="GL_VERTEX_PROGRAM_POINT_SIZE_NV"/>
+                <enum name="GL_VERTEX_PROGRAM_TWO_SIDE_NV"/>
+                <enum name="GL_PROGRAM_PARAMETER_NV"/>
+                <enum name="GL_ATTRIB_ARRAY_POINTER_NV"/>
+                <enum name="GL_PROGRAM_TARGET_NV"/>
+                <enum name="GL_PROGRAM_RESIDENT_NV"/>
+                <enum name="GL_TRACK_MATRIX_NV"/>
+                <enum name="GL_TRACK_MATRIX_TRANSFORM_NV"/>
+                <enum name="GL_VERTEX_PROGRAM_BINDING_NV"/>
+                <enum name="GL_PROGRAM_ERROR_POSITION_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY0_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY1_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY2_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY3_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY4_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY5_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY6_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY7_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY8_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY9_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY10_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY11_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY12_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY13_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY14_NV"/>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY15_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB0_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB1_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB2_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB3_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB4_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB5_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB6_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB7_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB8_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB9_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB10_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB11_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB12_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB13_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB14_4_NV"/>
+                <enum name="GL_MAP1_VERTEX_ATTRIB15_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB0_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB1_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB2_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB3_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB4_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB5_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB6_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB7_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB8_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB9_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB10_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB11_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB12_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB13_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB14_4_NV"/>
+                <enum name="GL_MAP2_VERTEX_ATTRIB15_4_NV"/>
+                <command name="glAreProgramsResidentNV"/>
+                <command name="glBindProgramNV"/>
+                <command name="glDeleteProgramsNV"/>
+                <command name="glExecuteProgramNV"/>
+                <command name="glGenProgramsNV"/>
+                <command name="glGetProgramParameterdvNV"/>
+                <command name="glGetProgramParameterfvNV"/>
+                <command name="glGetProgramivNV"/>
+                <command name="glGetProgramStringNV"/>
+                <command name="glGetTrackMatrixivNV"/>
+                <command name="glGetVertexAttribdvNV"/>
+                <command name="glGetVertexAttribfvNV"/>
+                <command name="glGetVertexAttribivNV"/>
+                <command name="glGetVertexAttribPointervNV"/>
+                <command name="glIsProgramNV"/>
+                <command name="glLoadProgramNV"/>
+                <command name="glProgramParameter4dNV"/>
+                <command name="glProgramParameter4dvNV"/>
+                <command name="glProgramParameter4fNV"/>
+                <command name="glProgramParameter4fvNV"/>
+                <command name="glProgramParameters4dvNV"/>
+                <command name="glProgramParameters4fvNV"/>
+                <command name="glRequestResidentProgramsNV"/>
+                <command name="glTrackMatrixNV"/>
+                <command name="glVertexAttribPointerNV"/>
+                <command name="glVertexAttrib1dNV"/>
+                <command name="glVertexAttrib1dvNV"/>
+                <command name="glVertexAttrib1fNV"/>
+                <command name="glVertexAttrib1fvNV"/>
+                <command name="glVertexAttrib1sNV"/>
+                <command name="glVertexAttrib1svNV"/>
+                <command name="glVertexAttrib2dNV"/>
+                <command name="glVertexAttrib2dvNV"/>
+                <command name="glVertexAttrib2fNV"/>
+                <command name="glVertexAttrib2fvNV"/>
+                <command name="glVertexAttrib2sNV"/>
+                <command name="glVertexAttrib2svNV"/>
+                <command name="glVertexAttrib3dNV"/>
+                <command name="glVertexAttrib3dvNV"/>
+                <command name="glVertexAttrib3fNV"/>
+                <command name="glVertexAttrib3fvNV"/>
+                <command name="glVertexAttrib3sNV"/>
+                <command name="glVertexAttrib3svNV"/>
+                <command name="glVertexAttrib4dNV"/>
+                <command name="glVertexAttrib4dvNV"/>
+                <command name="glVertexAttrib4fNV"/>
+                <command name="glVertexAttrib4fvNV"/>
+                <command name="glVertexAttrib4sNV"/>
+                <command name="glVertexAttrib4svNV"/>
+                <command name="glVertexAttrib4ubNV"/>
+                <command name="glVertexAttrib4ubvNV"/>
+                <command name="glVertexAttribs1dvNV"/>
+                <command name="glVertexAttribs1fvNV"/>
+                <command name="glVertexAttribs1svNV"/>
+                <command name="glVertexAttribs2dvNV"/>
+                <command name="glVertexAttribs2fvNV"/>
+                <command name="glVertexAttribs2svNV"/>
+                <command name="glVertexAttribs3dvNV"/>
+                <command name="glVertexAttribs3fvNV"/>
+                <command name="glVertexAttribs3svNV"/>
+                <command name="glVertexAttribs4dvNV"/>
+                <command name="glVertexAttribs4fvNV"/>
+                <command name="glVertexAttribs4svNV"/>
+                <command name="glVertexAttribs4ubvNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_program1_1" supported="gl"/>
+        <extension name="GL_NV_vertex_program2" supported="gl"/>
+        <extension name="GL_NV_vertex_program2_option" supported="gl">
+            <require>
+                <enum name="GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV"/>
+                <enum name="GL_MAX_PROGRAM_CALL_DEPTH_NV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_program3" supported="gl">
+            <require>
+                <enum name="GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_vertex_program4" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_ATTRIB_ARRAY_INTEGER_NV"/>
+                <command name="glVertexAttribI1iEXT"/>
+                <command name="glVertexAttribI2iEXT"/>
+                <command name="glVertexAttribI3iEXT"/>
+                <command name="glVertexAttribI4iEXT"/>
+                <command name="glVertexAttribI1uiEXT"/>
+                <command name="glVertexAttribI2uiEXT"/>
+                <command name="glVertexAttribI3uiEXT"/>
+                <command name="glVertexAttribI4uiEXT"/>
+                <command name="glVertexAttribI1ivEXT"/>
+                <command name="glVertexAttribI2ivEXT"/>
+                <command name="glVertexAttribI3ivEXT"/>
+                <command name="glVertexAttribI4ivEXT"/>
+                <command name="glVertexAttribI1uivEXT"/>
+                <command name="glVertexAttribI2uivEXT"/>
+                <command name="glVertexAttribI3uivEXT"/>
+                <command name="glVertexAttribI4uivEXT"/>
+                <command name="glVertexAttribI4bvEXT"/>
+                <command name="glVertexAttribI4svEXT"/>
+                <command name="glVertexAttribI4ubvEXT"/>
+                <command name="glVertexAttribI4usvEXT"/>
+                <command name="glVertexAttribIPointerEXT"/>
+                <command name="glGetVertexAttribIivEXT"/>
+                <command name="glGetVertexAttribIuivEXT"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_video_capture" supported="gl">
+            <require>
+                <enum name="GL_VIDEO_BUFFER_NV"/>
+                <enum name="GL_VIDEO_BUFFER_BINDING_NV"/>
+                <enum name="GL_FIELD_UPPER_NV"/>
+                <enum name="GL_FIELD_LOWER_NV"/>
+                <enum name="GL_NUM_VIDEO_CAPTURE_STREAMS_NV"/>
+                <enum name="GL_NEXT_VIDEO_CAPTURE_BUFFER_STATUS_NV"/>
+                <enum name="GL_VIDEO_CAPTURE_TO_422_SUPPORTED_NV"/>
+                <enum name="GL_LAST_VIDEO_CAPTURE_STATUS_NV"/>
+                <enum name="GL_VIDEO_BUFFER_PITCH_NV"/>
+                <enum name="GL_VIDEO_COLOR_CONVERSION_MATRIX_NV"/>
+                <enum name="GL_VIDEO_COLOR_CONVERSION_MAX_NV"/>
+                <enum name="GL_VIDEO_COLOR_CONVERSION_MIN_NV"/>
+                <enum name="GL_VIDEO_COLOR_CONVERSION_OFFSET_NV"/>
+                <enum name="GL_VIDEO_BUFFER_INTERNAL_FORMAT_NV"/>
+                <enum name="GL_PARTIAL_SUCCESS_NV"/>
+                <enum name="GL_SUCCESS_NV"/>
+                <enum name="GL_FAILURE_NV"/>
+                <enum name="GL_YCBYCR8_422_NV"/>
+                <enum name="GL_YCBAYCR8A_4224_NV"/>
+                <enum name="GL_Z6Y10Z6CB10Z6Y10Z6CR10_422_NV"/>
+                <enum name="GL_Z6Y10Z6CB10Z6A10Z6Y10Z6CR10Z6A10_4224_NV"/>
+                <enum name="GL_Z4Y12Z4CB12Z4Y12Z4CR12_422_NV"/>
+                <enum name="GL_Z4Y12Z4CB12Z4A12Z4Y12Z4CR12Z4A12_4224_NV"/>
+                <enum name="GL_Z4Y12Z4CB12Z4CR12_444_NV"/>
+                <enum name="GL_VIDEO_CAPTURE_FRAME_WIDTH_NV"/>
+                <enum name="GL_VIDEO_CAPTURE_FRAME_HEIGHT_NV"/>
+                <enum name="GL_VIDEO_CAPTURE_FIELD_UPPER_HEIGHT_NV"/>
+                <enum name="GL_VIDEO_CAPTURE_FIELD_LOWER_HEIGHT_NV"/>
+                <enum name="GL_VIDEO_CAPTURE_SURFACE_ORIGIN_NV"/>
+                <command name="glBeginVideoCaptureNV"/>
+                <command name="glBindVideoCaptureStreamBufferNV"/>
+                <command name="glBindVideoCaptureStreamTextureNV"/>
+                <command name="glEndVideoCaptureNV"/>
+                <command name="glGetVideoCaptureivNV"/>
+                <command name="glGetVideoCaptureStreamivNV"/>
+                <command name="glGetVideoCaptureStreamfvNV"/>
+                <command name="glGetVideoCaptureStreamdvNV"/>
+                <command name="glVideoCaptureNV"/>
+                <command name="glVideoCaptureStreamParameterivNV"/>
+                <command name="glVideoCaptureStreamParameterfvNV"/>
+                <command name="glVideoCaptureStreamParameterdvNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_viewport_array" supported="gles2">
+            <require>
+                <enum name="GL_MAX_VIEWPORTS_NV"/>
+                <enum name="GL_VIEWPORT_SUBPIXEL_BITS_NV"/>
+                <enum name="GL_VIEWPORT_BOUNDS_RANGE_NV"/>
+                <enum name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV"/>
+                <enum name="GL_SCISSOR_BOX"/>
+                <enum name="GL_VIEWPORT"/>
+                <enum name="GL_DEPTH_RANGE"/>
+                <enum name="GL_SCISSOR_TEST"/>
+                <command name="glViewportArrayvNV"/>
+                <command name="glViewportIndexedfNV"/>
+                <command name="glViewportIndexedfvNV"/>
+                <command name="glScissorArrayvNV"/>
+                <command name="glScissorIndexedNV"/>
+                <command name="glScissorIndexedvNV"/>
+                <command name="glDepthRangeArrayfvNV"/>
+                <command name="glDepthRangeIndexedfNV"/>
+                <command name="glGetFloati_vNV"/>
+                <command name="glEnableiNV"/>
+                <command name="glDisableiNV"/>
+                <command name="glIsEnablediNV"/>
+            </require>
+        </extension>
+        <extension name="GL_NV_viewport_array2" supported="gl|glcore|gles2"/>
+        <extension name="GL_NV_viewport_swizzle" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_VIEWPORT_SWIZZLE_POSITIVE_X_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_NEGATIVE_X_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_POSITIVE_Y_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_NEGATIVE_Y_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_POSITIVE_Z_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_NEGATIVE_Z_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_POSITIVE_W_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_NEGATIVE_W_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_X_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_Y_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_Z_NV"/>
+                <enum name="GL_VIEWPORT_SWIZZLE_W_NV"/>
+                <command name="glViewportSwizzleNV"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_EGL_image" supported="gles1|gles2">
+            <require>
+                <type name="GLeglImageOES"/>
+                <command name="glEGLImageTargetTexture2DOES"/>
+                <command name="glEGLImageTargetRenderbufferStorageOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_EGL_image_external" supported="gles1|gles2">
+            <require>
+                <type name="GLeglImageOES"/>
+                <enum name="GL_TEXTURE_EXTERNAL_OES"/>
+                <enum name="GL_TEXTURE_BINDING_EXTERNAL_OES"/>
+                <enum name="GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES"/>
+            </require>
+            <require api="gles2">
+                <enum name="GL_SAMPLER_EXTERNAL_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_EGL_image_external_essl3" supported="gles2"/>
+        <extension name="GL_OES_blend_equation_separate" supported="gles1">
+            <require>
+                <enum name="GL_BLEND_EQUATION_RGB_OES"/>
+                <enum name="GL_BLEND_EQUATION_ALPHA_OES"/>
+                <command name="glBlendEquationSeparateOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_blend_func_separate" supported="gles1">
+            <require>
+                <enum name="GL_BLEND_DST_RGB_OES"/>
+                <enum name="GL_BLEND_SRC_RGB_OES"/>
+                <enum name="GL_BLEND_DST_ALPHA_OES"/>
+                <enum name="GL_BLEND_SRC_ALPHA_OES"/>
+                <command name="glBlendFuncSeparateOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_blend_subtract" supported="gles1">
+            <require>
+                <enum name="GL_BLEND_EQUATION_OES"/>
+                <enum name="GL_FUNC_ADD_OES"/>
+                <enum name="GL_FUNC_SUBTRACT_OES"/>
+                <enum name="GL_FUNC_REVERSE_SUBTRACT_OES"/>
+                <command name="glBlendEquationOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_byte_coordinates" supported="gl|gles1">
+            <require>
+                <type name="GLbyte"/>
+                <enum name="GL_BYTE"/>
+            </require>
+            <require api="gl" comment="Immediate-mode entry points don't exist in ES 1.x">
+                <command name="glMultiTexCoord1bOES"/>
+                <command name="glMultiTexCoord1bvOES"/>
+                <command name="glMultiTexCoord2bOES"/>
+                <command name="glMultiTexCoord2bvOES"/>
+                <command name="glMultiTexCoord3bOES"/>
+                <command name="glMultiTexCoord3bvOES"/>
+                <command name="glMultiTexCoord4bOES"/>
+                <command name="glMultiTexCoord4bvOES"/>
+                <command name="glTexCoord1bOES"/>
+                <command name="glTexCoord1bvOES"/>
+                <command name="glTexCoord2bOES"/>
+                <command name="glTexCoord2bvOES"/>
+                <command name="glTexCoord3bOES"/>
+                <command name="glTexCoord3bvOES"/>
+                <command name="glTexCoord4bOES"/>
+                <command name="glTexCoord4bvOES"/>
+                <command name="glVertex2bOES"/>
+                <command name="glVertex2bvOES"/>
+                <command name="glVertex3bOES"/>
+                <command name="glVertex3bvOES"/>
+                <command name="glVertex4bOES"/>
+                <command name="glVertex4bvOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_compressed_ETC1_RGB8_sub_texture" supported="gles1|gles2"/>
+        <extension name="GL_OES_compressed_ETC1_RGB8_texture" supported="gles1|gles2">
+            <require>
+                <enum name="GL_ETC1_RGB8_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_compressed_paletted_texture" supported="gl|gles1|gles2">
+            <require>
+                <enum name="GL_PALETTE4_RGB8_OES"/>
+                <enum name="GL_PALETTE4_RGBA8_OES"/>
+                <enum name="GL_PALETTE4_R5_G6_B5_OES"/>
+                <enum name="GL_PALETTE4_RGBA4_OES"/>
+                <enum name="GL_PALETTE4_RGB5_A1_OES"/>
+                <enum name="GL_PALETTE8_RGB8_OES"/>
+                <enum name="GL_PALETTE8_RGBA8_OES"/>
+                <enum name="GL_PALETTE8_R5_G6_B5_OES"/>
+                <enum name="GL_PALETTE8_RGBA4_OES"/>
+                <enum name="GL_PALETTE8_RGB5_A1_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_copy_image" supported="gles2">
+            <require>
+                <command name="glCopyImageSubDataOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_depth24" supported="gles1|gles2|glsc2">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT24_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_depth32" supported="gles1|gles2|glsc2">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT32_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_depth_texture" supported="gles2">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT"/>
+                <enum name="GL_UNSIGNED_SHORT"/>
+                <enum name="GL_UNSIGNED_INT"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_draw_buffers_indexed" supported="gles2">
+            <require>
+                <enum name="GL_BLEND_EQUATION_RGB"/>
+                <enum name="GL_BLEND_EQUATION_ALPHA"/>
+                <enum name="GL_BLEND_SRC_RGB"/>
+                <enum name="GL_BLEND_SRC_ALPHA"/>
+                <enum name="GL_BLEND_DST_RGB"/>
+                <enum name="GL_BLEND_DST_ALPHA"/>
+                <enum name="GL_COLOR_WRITEMASK"/>
+                <enum name="GL_BLEND"/>
+                <enum name="GL_FUNC_ADD"/>
+                <enum name="GL_FUNC_SUBTRACT"/>
+                <enum name="GL_FUNC_REVERSE_SUBTRACT"/>
+                <enum name="GL_MIN"/>
+                <enum name="GL_MAX"/>
+                <enum name="GL_ZERO"/>
+                <enum name="GL_ONE"/>
+                <enum name="GL_SRC_COLOR"/>
+                <enum name="GL_ONE_MINUS_SRC_COLOR"/>
+                <enum name="GL_DST_COLOR"/>
+                <enum name="GL_ONE_MINUS_DST_COLOR"/>
+                <enum name="GL_SRC_ALPHA"/>
+                <enum name="GL_ONE_MINUS_SRC_ALPHA"/>
+                <enum name="GL_DST_ALPHA"/>
+                <enum name="GL_ONE_MINUS_DST_ALPHA"/>
+                <enum name="GL_CONSTANT_COLOR"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_COLOR"/>
+                <enum name="GL_CONSTANT_ALPHA"/>
+                <enum name="GL_ONE_MINUS_CONSTANT_ALPHA"/>
+                <enum name="GL_SRC_ALPHA_SATURATE"/>
+                <command name="glEnableiOES"/>
+                <command name="glDisableiOES"/>
+                <command name="glBlendEquationiOES"/>
+                <command name="glBlendEquationSeparateiOES"/>
+                <command name="glBlendFunciOES"/>
+                <command name="glBlendFuncSeparateiOES"/>
+                <command name="glColorMaskiOES"/>
+                <command name="glIsEnablediOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_draw_elements_base_vertex" supported="gles2">
+            <require>
+                <command name="glDrawElementsBaseVertexOES"/>
+                <command name="glDrawRangeElementsBaseVertexOES" comment="Supported only if OpenGL ES 3.0 is supported"/>
+                <command name="glDrawElementsInstancedBaseVertexOES" comment="Supported only if OpenGL ES 3.0 is supported"/>
+                <command name="glMultiDrawElementsBaseVertexEXT" comment="Supported only if GL_EXT_multi_draw_arrays is supported"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_draw_texture" supported="gles1">
+            <require>
+                <enum name="GL_TEXTURE_CROP_RECT_OES"/>
+                <command name="glDrawTexsOES"/>
+                <command name="glDrawTexiOES"/>
+                <command name="glDrawTexxOES"/>
+                <command name="glDrawTexsvOES"/>
+                <command name="glDrawTexivOES"/>
+                <command name="glDrawTexxvOES"/>
+                <command name="glDrawTexfOES"/>
+                <command name="glDrawTexfvOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_element_index_uint" supported="gles1|gles2">
+            <require>
+                <enum name="GL_UNSIGNED_INT"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_extended_matrix_palette" supported="gles1"/>
+        <extension name="GL_OES_fbo_render_mipmap" supported="gles1|gles2"/>
+        <extension name="GL_OES_fixed_point" supported="gl|gles1">
+            <require>
+                <enum name="GL_FIXED_OES"/>
+                <command name="glAlphaFuncxOES"/>
+                <command name="glClearColorxOES"/>
+                <command name="glClearDepthxOES"/>
+                <command name="glClipPlanexOES"/>
+                <command name="glColor4xOES"/>
+                <command name="glDepthRangexOES"/>
+                <command name="glFogxOES"/>
+                <command name="glFogxvOES"/>
+                <command name="glFrustumxOES"/>
+                <command name="glGetClipPlanexOES"/>
+                <command name="glGetFixedvOES"/>
+                <command name="glGetTexEnvxvOES"/>
+                <command name="glGetTexParameterxvOES"/>
+                <command name="glLightModelxOES"/>
+                <command name="glLightModelxvOES"/>
+                <command name="glLightxOES"/>
+                <command name="glLightxvOES"/>
+                <command name="glLineWidthxOES"/>
+                <command name="glLoadMatrixxOES"/>
+                <command name="glMaterialxOES"/>
+                <command name="glMaterialxvOES"/>
+                <command name="glMultMatrixxOES"/>
+                <command name="glMultiTexCoord4xOES"/>
+                <command name="glNormal3xOES"/>
+                <command name="glOrthoxOES"/>
+                <command name="glPointParameterxvOES"/>
+                <command name="glPointSizexOES"/>
+                <command name="glPolygonOffsetxOES"/>
+                <command name="glRotatexOES"/>
+                <command name="glScalexOES"/>
+                <command name="glTexEnvxOES"/>
+                <command name="glTexEnvxvOES"/>
+                <command name="glTexParameterxOES"/>
+                <command name="glTexParameterxvOES"/>
+                <command name="glTranslatexOES"/>
+            </require>
+            <require api="gles1" comment="Entry points not in the extension spec, but in the Khronos glext.h. Included for backward compatibility.">
+                <command name="glGetLightxvOES"/>
+                <command name="glGetMaterialxvOES"/>
+                <command name="glPointParameterxOES"/>
+                <command name="glSampleCoveragexOES"/>
+            </require>
+            <require api="gl" comment="Entry points in the extension spec, but not the Khronos glext.h. Correspond to GL-only features it's unlikely were ever implemented against ES 1.x.">
+                <command name="glAccumxOES"/>
+                <command name="glBitmapxOES"/>
+                <command name="glBlendColorxOES"/>
+                <command name="glClearAccumxOES"/>
+                <command name="glColor3xOES"/>
+                <command name="glColor3xvOES"/>
+                <command name="glColor4xvOES"/>
+                <command name="glConvolutionParameterxOES"/>
+                <command name="glConvolutionParameterxvOES"/>
+                <command name="glEvalCoord1xOES"/>
+                <command name="glEvalCoord1xvOES"/>
+                <command name="glEvalCoord2xOES"/>
+                <command name="glEvalCoord2xvOES"/>
+                <command name="glFeedbackBufferxOES"/>
+                <command name="glGetConvolutionParameterxvOES"/>
+                <command name="glGetHistogramParameterxvOES"/>
+                <command name="glGetLightxOES"/>
+                <command name="glGetMapxvOES"/>
+                <command name="glGetMaterialxOES"/>
+                <command name="glGetPixelMapxv"/>
+                <command name="glGetTexGenxvOES"/>
+                <command name="glGetTexLevelParameterxvOES"/>
+                <command name="glIndexxOES"/>
+                <command name="glIndexxvOES"/>
+                <command name="glLoadTransposeMatrixxOES"/>
+                <command name="glMap1xOES"/>
+                <command name="glMap2xOES"/>
+                <command name="glMapGrid1xOES"/>
+                <command name="glMapGrid2xOES"/>
+                <command name="glMultTransposeMatrixxOES"/>
+                <command name="glMultiTexCoord1xOES"/>
+                <command name="glMultiTexCoord1xvOES"/>
+                <command name="glMultiTexCoord2xOES"/>
+                <command name="glMultiTexCoord2xvOES"/>
+                <command name="glMultiTexCoord3xOES"/>
+                <command name="glMultiTexCoord3xvOES"/>
+                <command name="glMultiTexCoord4xvOES"/>
+                <command name="glNormal3xvOES"/>
+                <command name="glPassThroughxOES"/>
+                <command name="glPixelMapx"/>
+                <command name="glPixelStorex"/>
+                <command name="glPixelTransferxOES"/>
+                <command name="glPixelZoomxOES"/>
+                <command name="glPrioritizeTexturesxOES"/>
+                <command name="glRasterPos2xOES"/>
+                <command name="glRasterPos2xvOES"/>
+                <command name="glRasterPos3xOES"/>
+                <command name="glRasterPos3xvOES"/>
+                <command name="glRasterPos4xOES"/>
+                <command name="glRasterPos4xvOES"/>
+                <command name="glRectxOES"/>
+                <command name="glRectxvOES"/>
+                <command name="glTexCoord1xOES"/>
+                <command name="glTexCoord1xvOES"/>
+                <command name="glTexCoord2xOES"/>
+                <command name="glTexCoord2xvOES"/>
+                <command name="glTexCoord3xOES"/>
+                <command name="glTexCoord3xvOES"/>
+                <command name="glTexCoord4xOES"/>
+                <command name="glTexCoord4xvOES"/>
+                <command name="glTexGenxOES"/>
+                <command name="glTexGenxvOES"/>
+                <command name="glVertex2xOES"/>
+                <command name="glVertex2xvOES"/>
+                <command name="glVertex3xOES"/>
+                <command name="glVertex3xvOES"/>
+                <command name="glVertex4xOES"/>
+                <command name="glVertex4xvOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_fragment_precision_high" supported="gles2">
+            <require>
+            </require>
+        </extension>
+        <extension name="GL_OES_framebuffer_object" supported="gles1">
+            <require>
+                <enum name="GL_NONE_OES"/>
+                <enum name="GL_FRAMEBUFFER_OES"/>
+                <enum name="GL_RENDERBUFFER_OES"/>
+                <enum name="GL_RGBA4_OES"/>
+                <enum name="GL_RGB5_A1_OES"/>
+                <enum name="GL_RGB565_OES"/>
+                <enum name="GL_DEPTH_COMPONENT16_OES"/>
+                <enum name="GL_RENDERBUFFER_WIDTH_OES"/>
+                <enum name="GL_RENDERBUFFER_HEIGHT_OES"/>
+                <enum name="GL_RENDERBUFFER_INTERNAL_FORMAT_OES"/>
+                <enum name="GL_RENDERBUFFER_RED_SIZE_OES"/>
+                <enum name="GL_RENDERBUFFER_GREEN_SIZE_OES"/>
+                <enum name="GL_RENDERBUFFER_BLUE_SIZE_OES"/>
+                <enum name="GL_RENDERBUFFER_ALPHA_SIZE_OES"/>
+                <enum name="GL_RENDERBUFFER_DEPTH_SIZE_OES"/>
+                <enum name="GL_RENDERBUFFER_STENCIL_SIZE_OES"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_OES"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_OES"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_OES"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_OES"/>
+                <enum name="GL_COLOR_ATTACHMENT0_OES"/>
+                <enum name="GL_DEPTH_ATTACHMENT_OES"/>
+                <enum name="GL_STENCIL_ATTACHMENT_OES"/>
+                <enum name="GL_FRAMEBUFFER_COMPLETE_OES"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_OES"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_OES"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_OES"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_FORMATS_OES"/>
+                <enum name="GL_FRAMEBUFFER_UNSUPPORTED_OES"/>
+                <enum name="GL_FRAMEBUFFER_BINDING_OES"/>
+                <enum name="GL_RENDERBUFFER_BINDING_OES"/>
+                <enum name="GL_MAX_RENDERBUFFER_SIZE_OES"/>
+                <enum name="GL_INVALID_FRAMEBUFFER_OPERATION_OES"/>
+                <command name="glIsRenderbufferOES"/>
+                <command name="glBindRenderbufferOES"/>
+                <command name="glDeleteRenderbuffersOES"/>
+                <command name="glGenRenderbuffersOES"/>
+                <command name="glRenderbufferStorageOES"/>
+                <command name="glGetRenderbufferParameterivOES"/>
+                <command name="glIsFramebufferOES"/>
+                <command name="glBindFramebufferOES"/>
+                <command name="glDeleteFramebuffersOES"/>
+                <command name="glGenFramebuffersOES"/>
+                <command name="glCheckFramebufferStatusOES"/>
+                <command name="glFramebufferRenderbufferOES"/>
+                <command name="glFramebufferTexture2DOES"/>
+                <command name="glGetFramebufferAttachmentParameterivOES"/>
+                <command name="glGenerateMipmapOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_geometry_point_size" supported="gles2"/>
+        <extension name="GL_OES_geometry_shader" supported="gles2">
+            <require>
+                <enum name="GL_GEOMETRY_SHADER_OES"/>
+                <enum name="GL_GEOMETRY_SHADER_BIT_OES"/>
+                <enum name="GL_GEOMETRY_LINKED_VERTICES_OUT_OES"/>
+                <enum name="GL_GEOMETRY_LINKED_INPUT_TYPE_OES"/>
+                <enum name="GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES"/>
+                <enum name="GL_GEOMETRY_SHADER_INVOCATIONS_OES"/>
+                <enum name="GL_LAYER_PROVOKING_VERTEX_OES"/>
+                <enum name="GL_LINES_ADJACENCY_OES"/>
+                <enum name="GL_LINE_STRIP_ADJACENCY_OES"/>
+                <enum name="GL_TRIANGLES_ADJACENCY_OES"/>
+                <enum name="GL_TRIANGLE_STRIP_ADJACENCY_OES"/>
+                <enum name="GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES"/>
+                <enum name="GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES"/>
+                <enum name="GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES"/>
+                <enum name="GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES"/>
+                <enum name="GL_FIRST_VERTEX_CONVENTION_OES"/>
+                <enum name="GL_LAST_VERTEX_CONVENTION_OES"/>
+                <enum name="GL_UNDEFINED_VERTEX_OES"/>
+                <enum name="GL_PRIMITIVES_GENERATED_OES"/>
+                <enum name="GL_FRAMEBUFFER_DEFAULT_LAYERS_OES"/>
+                <enum name="GL_MAX_FRAMEBUFFER_LAYERS_OES"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES"/>
+                <enum name="GL_REFERENCED_BY_GEOMETRY_SHADER_OES"/>
+                <command name="glFramebufferTextureOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_get_program_binary" supported="gles2">
+            <require>
+                <enum name="GL_PROGRAM_BINARY_LENGTH_OES"/>
+                <enum name="GL_NUM_PROGRAM_BINARY_FORMATS_OES"/>
+                <enum name="GL_PROGRAM_BINARY_FORMATS_OES"/>
+                <command name="glGetProgramBinaryOES"/>
+                <command name="glProgramBinaryOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_gpu_shader5" supported="gles2"/>
+        <extension name="GL_OES_mapbuffer" supported="gles1|gles2">
+            <require>
+                <enum name="GL_WRITE_ONLY_OES"/>
+                <enum name="GL_BUFFER_ACCESS_OES"/>
+                <enum name="GL_BUFFER_MAPPED_OES"/>
+                <enum name="GL_BUFFER_MAP_POINTER_OES"/>
+                <command name="glMapBufferOES"/>
+                <command name="glUnmapBufferOES"/>
+                <command name="glGetBufferPointervOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_matrix_get" supported="gles1">
+            <require>
+                <enum name="GL_MODELVIEW_MATRIX_FLOAT_AS_INT_BITS_OES"/>
+                <enum name="GL_PROJECTION_MATRIX_FLOAT_AS_INT_BITS_OES"/>
+                <enum name="GL_TEXTURE_MATRIX_FLOAT_AS_INT_BITS_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_matrix_palette" supported="gles1">
+            <require>
+                <enum name="GL_MAX_VERTEX_UNITS_OES"/>
+                <enum name="GL_MAX_PALETTE_MATRICES_OES"/>
+                <enum name="GL_MATRIX_PALETTE_OES"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_OES"/>
+                <enum name="GL_WEIGHT_ARRAY_OES"/>
+                <enum name="GL_CURRENT_PALETTE_MATRIX_OES"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_SIZE_OES"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_TYPE_OES"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_STRIDE_OES"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_POINTER_OES"/>
+                <enum name="GL_MATRIX_INDEX_ARRAY_BUFFER_BINDING_OES"/>
+                <enum name="GL_WEIGHT_ARRAY_SIZE_OES"/>
+                <enum name="GL_WEIGHT_ARRAY_TYPE_OES"/>
+                <enum name="GL_WEIGHT_ARRAY_STRIDE_OES"/>
+                <enum name="GL_WEIGHT_ARRAY_POINTER_OES"/>
+                <enum name="GL_WEIGHT_ARRAY_BUFFER_BINDING_OES"/>
+                <command name="glCurrentPaletteMatrixOES"/>
+                <command name="glLoadPaletteFromModelViewMatrixOES"/>
+                <command name="glMatrixIndexPointerOES"/>
+                <command name="glWeightPointerOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_packed_depth_stencil" supported="gles1|gles2">
+            <require>
+                <enum name="GL_DEPTH_STENCIL_OES"/>
+                <enum name="GL_UNSIGNED_INT_24_8_OES"/>
+                <enum name="GL_DEPTH24_STENCIL8_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_point_size_array" supported="gles1">
+            <require>
+                <enum name="GL_POINT_SIZE_ARRAY_OES"/>
+                <enum name="GL_POINT_SIZE_ARRAY_TYPE_OES"/>
+                <enum name="GL_POINT_SIZE_ARRAY_STRIDE_OES"/>
+                <enum name="GL_POINT_SIZE_ARRAY_POINTER_OES"/>
+                <enum name="GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES"/>
+                <command name="glPointSizePointerOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_point_sprite" supported="gles1">
+            <require>
+                <enum name="GL_POINT_SPRITE_OES"/>
+                <enum name="GL_COORD_REPLACE_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_primitive_bounding_box" supported="gles2">
+            <require>
+                <enum name="GL_PRIMITIVE_BOUNDING_BOX_OES"/>
+                <command name="glPrimitiveBoundingBoxOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_query_matrix" supported="gl|gles1">
+            <require>
+                <command name="glQueryMatrixxOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_read_format" supported="gl|gles1">
+            <require>
+                <enum name="GL_IMPLEMENTATION_COLOR_READ_TYPE_OES"/>
+                <enum name="GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_required_internalformat" supported="gles1|gles2">
+            <require>
+                <enum name="GL_ALPHA8_OES"/>
+                <enum name="GL_DEPTH_COMPONENT16_OES"/>
+                <enum name="GL_DEPTH_COMPONENT24_OES"/>
+                <enum name="GL_DEPTH24_STENCIL8_OES"/>
+                <enum name="GL_DEPTH_COMPONENT32_OES"/>
+                <enum name="GL_LUMINANCE4_ALPHA4_OES"/>
+                <enum name="GL_LUMINANCE8_ALPHA8_OES"/>
+                <enum name="GL_LUMINANCE8_OES"/>
+                <enum name="GL_RGBA4_OES"/>
+                <enum name="GL_RGB5_A1_OES"/>
+                <enum name="GL_RGB565_OES"/>
+                <enum name="GL_RGB8_OES"/>
+                <enum name="GL_RGBA8_OES"/>
+                <enum name="GL_RGB10_EXT"/>
+                <enum name="GL_RGB10_A2_EXT"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_rgb8_rgba8" supported="gles1|gles2|glsc2">
+            <require>
+                <enum name="GL_RGB8_OES"/>
+                <enum name="GL_RGBA8_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_sample_shading" supported="gles2">
+            <require>
+                <command name="glMinSampleShadingOES"/>
+                <enum name="GL_SAMPLE_SHADING_OES"/>
+                <enum name="GL_MIN_SAMPLE_SHADING_VALUE_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_sample_variables" supported="gles2"/>
+        <extension name="GL_OES_shader_image_atomic" supported="gles2"/>
+        <extension name="GL_OES_shader_io_blocks" supported="gles2"/>
+        <extension name="GL_OES_shader_multisample_interpolation" supported="gles2">
+            <require>
+                <enum name="GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_OES"/>
+                <enum name="GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_OES"/>
+                <enum name="GL_FRAGMENT_INTERPOLATION_OFFSET_BITS_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_single_precision" supported="gl|gles1">
+            <require>
+                <command name="glClearDepthfOES"/>
+                <command name="glClipPlanefOES"/>
+                <command name="glDepthRangefOES"/>
+                <command name="glFrustumfOES"/>
+                <command name="glGetClipPlanefOES"/>
+                <command name="glOrthofOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_standard_derivatives" supported="gles2|glsc2">
+            <require>
+                <enum name="GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_stencil1" supported="gles1|gles2">
+            <require>
+                <enum name="GL_STENCIL_INDEX1_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_stencil4" supported="gles1|gles2">
+            <require>
+                <enum name="GL_STENCIL_INDEX4_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_stencil8" supported="gles1">
+            <require>
+                <enum name="GL_STENCIL_INDEX8_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_stencil_wrap" supported="gles1">
+            <require>
+                <enum name="GL_INCR_WRAP_OES"/>
+                <enum name="GL_DECR_WRAP_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_surfaceless_context" supported="gles1|gles2">
+            <require>
+                <enum name="GL_FRAMEBUFFER_UNDEFINED_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_tessellation_point_size" supported="gles2"/>
+        <extension name="GL_OES_tessellation_shader" supported="gles2">
+            <require>
+                <enum name="GL_PATCHES_OES"/>
+                <enum name="GL_PATCH_VERTICES_OES"/>
+                <enum name="GL_TESS_CONTROL_OUTPUT_VERTICES_OES"/>
+                <enum name="GL_TESS_GEN_MODE_OES"/>
+                <enum name="GL_TESS_GEN_SPACING_OES"/>
+                <enum name="GL_TESS_GEN_VERTEX_ORDER_OES"/>
+                <enum name="GL_TESS_GEN_POINT_MODE_OES"/>
+                <enum name="GL_TRIANGLES"/>
+                <enum name="GL_ISOLINES_OES"/>
+                <enum name="GL_QUADS_OES"/>
+                <enum name="GL_EQUAL"/>
+                <enum name="GL_FRACTIONAL_ODD_OES"/>
+                <enum name="GL_FRACTIONAL_EVEN_OES"/>
+                <enum name="GL_CCW"/>
+                <enum name="GL_CW"/>
+                <enum name="GL_MAX_PATCH_VERTICES_OES"/>
+                <enum name="GL_MAX_TESS_GEN_LEVEL_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_PATCH_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_OES"/>
+                <enum name="GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_OES"/>
+                <enum name="GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_OES"/>
+                <enum name="GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_OES"/>
+                <enum name="GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_OES"/>
+                <enum name="GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED_OES"/>
+                <enum name="GL_IS_PER_PATCH_OES"/>
+                <enum name="GL_REFERENCED_BY_TESS_CONTROL_SHADER_OES"/>
+                <enum name="GL_REFERENCED_BY_TESS_EVALUATION_SHADER_OES"/>
+                <enum name="GL_TESS_CONTROL_SHADER_OES"/>
+                <enum name="GL_TESS_EVALUATION_SHADER_OES"/>
+                <enum name="GL_TESS_CONTROL_SHADER_BIT_OES"/>
+                <enum name="GL_TESS_EVALUATION_SHADER_BIT_OES"/>
+                <command name="glPatchParameteriOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_3D" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_WRAP_R_OES"/>
+                <enum name="GL_TEXTURE_3D_OES"/>
+                <enum name="GL_TEXTURE_BINDING_3D_OES"/>
+                <enum name="GL_MAX_3D_TEXTURE_SIZE_OES"/>
+                <enum name="GL_SAMPLER_3D_OES"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_OES"/>
+                <command name="glTexImage3DOES"/>
+                <command name="glTexSubImage3DOES"/>
+                <command name="glCopyTexSubImage3DOES"/>
+                <command name="glCompressedTexImage3DOES"/>
+                <command name="glCompressedTexSubImage3DOES"/>
+                <command name="glFramebufferTexture3DOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_border_clamp" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_BORDER_COLOR_OES"/>
+                <enum name="GL_CLAMP_TO_BORDER_OES"/>
+                <command name="glTexParameterIivOES"/>
+                <command name="glTexParameterIuivOES"/>
+                <command name="glGetTexParameterIivOES"/>
+                <command name="glGetTexParameterIuivOES"/>
+                <command name="glSamplerParameterIivOES"/>
+                <command name="glSamplerParameterIuivOES"/>
+                <command name="glGetSamplerParameterIivOES"/>
+                <command name="glGetSamplerParameterIuivOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_buffer" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_BUFFER_OES"/>
+                <enum name="GL_TEXTURE_BUFFER_BINDING_OES"/>
+                <enum name="GL_MAX_TEXTURE_BUFFER_SIZE_OES"/>
+                <enum name="GL_TEXTURE_BINDING_BUFFER_OES"/>
+                <enum name="GL_TEXTURE_BUFFER_DATA_STORE_BINDING_OES"/>
+                <enum name="GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_OES"/>
+                <enum name="GL_SAMPLER_BUFFER_OES"/>
+                <enum name="GL_INT_SAMPLER_BUFFER_OES"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_BUFFER_OES"/>
+                <enum name="GL_IMAGE_BUFFER_OES"/>
+                <enum name="GL_INT_IMAGE_BUFFER_OES"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_BUFFER_OES"/>
+                <enum name="GL_TEXTURE_BUFFER_OFFSET_OES"/>
+                <enum name="GL_TEXTURE_BUFFER_SIZE_OES"/>
+                <command name="glTexBufferOES"/>
+                <command name="glTexBufferRangeOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_compression_astc" supported="gles2" comment="API is identical to GL_KHR_texture_compression_astc_hdr extension">
+            <require>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_4x4_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x4_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_8x8_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x5_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x6_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x8_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_10x10_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_12x10_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_12x12_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_3x3x3_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_4x3x3_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_4x4x3_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_4x4x4_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x4x4_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x5x4_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_5x5x5_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x5x5_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x6x5_OES"/>
+                <enum name="GL_COMPRESSED_RGBA_ASTC_6x6x6_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES"/>
+                <enum name="GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_cube_map" supported="gles1">
+            <require>
+                <enum name="GL_NORMAL_MAP_OES"/>
+                <enum name="GL_REFLECTION_MAP_OES"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_OES"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP_OES"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES"/>
+                <enum name="GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES"/>
+                <enum name="GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES"/>
+                <enum name="GL_TEXTURE_GEN_MODE_OES"/>
+                <enum name="GL_TEXTURE_GEN_STR_OES"/>
+                <command name="glTexGenfOES"/>
+                <command name="glTexGenfvOES"/>
+                <command name="glTexGeniOES"/>
+                <command name="glTexGenivOES"/>
+                <command name="glTexGenxOES"/>
+                <command name="glTexGenxvOES"/>
+                <command name="glGetTexGenfvOES"/>
+                <command name="glGetTexGenivOES"/>
+                <command name="glGetTexGenxvOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_cube_map_array" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_OES"/>
+                <enum name="GL_INT_SAMPLER_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_IMAGE_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_INT_IMAGE_CUBE_MAP_ARRAY_OES"/>
+                <enum name="GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_env_crossbar" supported="gles1"/>
+        <extension name="GL_OES_texture_float" supported="gles2">
+            <require>
+                <enum name="GL_FLOAT"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_float_linear" supported="gles2"/>
+        <extension name="GL_OES_texture_half_float" supported="gles2">
+            <require>
+                <enum name="GL_HALF_FLOAT_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_half_float_linear" supported="gles2"/>
+        <extension name="GL_OES_texture_mirrored_repeat" supported="gles1">
+            <require>
+                <enum name="GL_MIRRORED_REPEAT_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_npot" supported="gles1|gles2"/>
+        <extension name="GL_OES_texture_stencil8" supported="gles2">
+            <require>
+                <enum name="GL_STENCIL_INDEX_OES"/>
+                <enum name="GL_STENCIL_INDEX8_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_storage_multisample_2d_array" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES"/>
+                <enum name="GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES"/>
+                <enum name="GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES"/>
+                <enum name="GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES"/>
+                <enum name="GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES"/>
+                <command name="glTexStorage3DMultisampleOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_texture_view" supported="gles2">
+            <require>
+                <enum name="GL_TEXTURE_VIEW_MIN_LEVEL_OES"/>
+                <enum name="GL_TEXTURE_VIEW_NUM_LEVELS_OES"/>
+                <enum name="GL_TEXTURE_VIEW_MIN_LAYER_OES"/>
+                <enum name="GL_TEXTURE_VIEW_NUM_LAYERS_OES"/>
+                <enum name="GL_TEXTURE_IMMUTABLE_LEVELS"/>
+                <command name="glTextureViewOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_vertex_array_object" supported="gles1|gles2">
+            <require>
+                <enum name="GL_VERTEX_ARRAY_BINDING_OES"/>
+                <command name="glBindVertexArrayOES"/>
+                <command name="glDeleteVertexArraysOES"/>
+                <command name="glGenVertexArraysOES"/>
+                <command name="glIsVertexArrayOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_vertex_half_float" supported="gles2">
+            <require>
+                <enum name="GL_HALF_FLOAT_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_vertex_type_10_10_10_2" supported="gles2">
+            <require>
+                <enum name="GL_UNSIGNED_INT_10_10_10_2_OES"/>
+                <enum name="GL_INT_10_10_10_2_OES"/>
+            </require>
+        </extension>
+        <extension name="GL_OES_viewport_array" supported="gles2">
+            <require>
+                <enum name="GL_SCISSOR_BOX"/>
+                <enum name="GL_VIEWPORT"/>
+                <enum name="GL_DEPTH_RANGE"/>
+                <enum name="GL_SCISSOR_TEST"/>
+                <enum name="GL_MAX_VIEWPORTS_OES"/>
+                <enum name="GL_VIEWPORT_SUBPIXEL_BITS_OES"/>
+                <enum name="GL_VIEWPORT_BOUNDS_RANGE_OES"/>
+                <enum name="GL_VIEWPORT_INDEX_PROVOKING_VERTEX_OES"/>
+                <command name="glViewportArrayvOES"/>
+                <command name="glViewportIndexedfOES"/>
+                <command name="glViewportIndexedfvOES"/>
+                <command name="glScissorArrayvOES"/>
+                <command name="glScissorIndexedOES"/>
+                <command name="glScissorIndexedvOES"/>
+                <command name="glDepthRangeArrayfvOES"/>
+                <command name="glDepthRangeIndexedfOES"/>
+                <command name="glGetFloati_vOES"/>
+                <command name="glEnableiOES"/>
+                <command name="glDisableiOES"/>
+                <command name="glIsEnablediOES"/>
+            </require>
+        </extension>
+        <extension name="GL_OML_interlace" supported="gl">
+            <require>
+                <enum name="GL_INTERLACE_OML"/>
+                <enum name="GL_INTERLACE_READ_OML"/>
+            </require>
+        </extension>
+        <extension name="GL_OML_resample" supported="gl">
+            <require>
+                <enum name="GL_PACK_RESAMPLE_OML"/>
+                <enum name="GL_UNPACK_RESAMPLE_OML"/>
+                <enum name="GL_RESAMPLE_REPLICATE_OML"/>
+                <enum name="GL_RESAMPLE_ZERO_FILL_OML"/>
+                <enum name="GL_RESAMPLE_AVERAGE_OML"/>
+                <enum name="GL_RESAMPLE_DECIMATE_OML"/>
+            </require>
+        </extension>
+        <extension name="GL_OML_subsample" supported="gl">
+            <require>
+                <enum name="GL_FORMAT_SUBSAMPLE_24_24_OML"/>
+                <enum name="GL_FORMAT_SUBSAMPLE_244_244_OML"/>
+            </require>
+        </extension>
+        <extension name="GL_OVR_multiview" supported="gl|glcore|gles2">
+            <require>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_NUM_VIEWS_OVR"/>
+                <enum name="GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_BASE_VIEW_INDEX_OVR"/>
+                <enum name="GL_MAX_VIEWS_OVR"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR"/>
+                <command name="glFramebufferTextureMultiviewOVR"/>
+            </require>
+        </extension>
+        <extension name="GL_OVR_multiview2" supported="gl|glcore|gles2"/>
+        <extension name="GL_OVR_multiview_multisampled_render_to_texture" supported="gles2">
+            <require>
+                <command name="glFramebufferTextureMultisampleMultiviewOVR"/>
+            </require>
+        </extension>
+        <extension name="GL_PGI_misc_hints" supported="gl">
+            <require>
+                <enum name="GL_PREFER_DOUBLEBUFFER_HINT_PGI"/>
+                <enum name="GL_CONSERVE_MEMORY_HINT_PGI"/>
+                <enum name="GL_RECLAIM_MEMORY_HINT_PGI"/>
+                <enum name="GL_NATIVE_GRAPHICS_HANDLE_PGI"/>
+                <enum name="GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI"/>
+                <enum name="GL_NATIVE_GRAPHICS_END_HINT_PGI"/>
+                <enum name="GL_ALWAYS_FAST_HINT_PGI"/>
+                <enum name="GL_ALWAYS_SOFT_HINT_PGI"/>
+                <enum name="GL_ALLOW_DRAW_OBJ_HINT_PGI"/>
+                <enum name="GL_ALLOW_DRAW_WIN_HINT_PGI"/>
+                <enum name="GL_ALLOW_DRAW_FRG_HINT_PGI"/>
+                <enum name="GL_ALLOW_DRAW_MEM_HINT_PGI"/>
+                <enum name="GL_STRICT_DEPTHFUNC_HINT_PGI"/>
+                <enum name="GL_STRICT_LIGHTING_HINT_PGI"/>
+                <enum name="GL_STRICT_SCISSOR_HINT_PGI"/>
+                <enum name="GL_FULL_STIPPLE_HINT_PGI"/>
+                <enum name="GL_CLIP_NEAR_HINT_PGI"/>
+                <enum name="GL_CLIP_FAR_HINT_PGI"/>
+                <enum name="GL_WIDE_LINE_HINT_PGI"/>
+                <enum name="GL_BACK_NORMALS_HINT_PGI"/>
+                <command name="glHintPGI"/>
+            </require>
+        </extension>
+        <extension name="GL_PGI_vertex_hints" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_DATA_HINT_PGI"/>
+                <enum name="GL_VERTEX_CONSISTENT_HINT_PGI"/>
+                <enum name="GL_MATERIAL_SIDE_HINT_PGI"/>
+                <enum name="GL_MAX_VERTEX_HINT_PGI"/>
+                <enum name="GL_COLOR3_BIT_PGI"/>
+                <enum name="GL_COLOR4_BIT_PGI"/>
+                <enum name="GL_EDGEFLAG_BIT_PGI"/>
+                <enum name="GL_INDEX_BIT_PGI"/>
+                <enum name="GL_MAT_AMBIENT_BIT_PGI"/>
+                <enum name="GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI"/>
+                <enum name="GL_MAT_DIFFUSE_BIT_PGI"/>
+                <enum name="GL_MAT_EMISSION_BIT_PGI"/>
+                <enum name="GL_MAT_COLOR_INDEXES_BIT_PGI"/>
+                <enum name="GL_MAT_SHININESS_BIT_PGI"/>
+                <enum name="GL_MAT_SPECULAR_BIT_PGI"/>
+                <enum name="GL_NORMAL_BIT_PGI"/>
+                <enum name="GL_TEXCOORD1_BIT_PGI"/>
+                <enum name="GL_TEXCOORD2_BIT_PGI"/>
+                <enum name="GL_TEXCOORD3_BIT_PGI"/>
+                <enum name="GL_TEXCOORD4_BIT_PGI"/>
+                <enum name="GL_VERTEX23_BIT_PGI"/>
+                <enum name="GL_VERTEX4_BIT_PGI"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_alpha_test" supported="gles2">
+            <require>
+                <enum name="GL_ALPHA_TEST_QCOM"/>
+                <enum name="GL_ALPHA_TEST_FUNC_QCOM"/>
+                <enum name="GL_ALPHA_TEST_REF_QCOM"/>
+                <command name="glAlphaFuncQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_binning_control" supported="gles2">
+            <require>
+                <enum name="GL_BINNING_CONTROL_HINT_QCOM"/>
+                <enum name="GL_CPU_OPTIMIZED_QCOM"/>
+                <enum name="GL_GPU_OPTIMIZED_QCOM"/>
+                <enum name="GL_RENDER_DIRECT_TO_FRAMEBUFFER_QCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_driver_control" supported="gles1|gles2">
+            <require>
+                <command name="glGetDriverControlsQCOM"/>
+                <command name="glGetDriverControlStringQCOM"/>
+                <command name="glEnableDriverControlQCOM"/>
+                <command name="glDisableDriverControlQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_extended_get" supported="gles1|gles2">
+            <require>
+                <enum name="GL_TEXTURE_WIDTH_QCOM"/>
+                <enum name="GL_TEXTURE_HEIGHT_QCOM"/>
+                <enum name="GL_TEXTURE_DEPTH_QCOM"/>
+                <enum name="GL_TEXTURE_INTERNAL_FORMAT_QCOM"/>
+                <enum name="GL_TEXTURE_FORMAT_QCOM"/>
+                <enum name="GL_TEXTURE_TYPE_QCOM"/>
+                <enum name="GL_TEXTURE_IMAGE_VALID_QCOM"/>
+                <enum name="GL_TEXTURE_NUM_LEVELS_QCOM"/>
+                <enum name="GL_TEXTURE_TARGET_QCOM"/>
+                <enum name="GL_TEXTURE_OBJECT_VALID_QCOM"/>
+                <enum name="GL_STATE_RESTORE"/>
+                <command name="glExtGetTexturesQCOM"/>
+                <command name="glExtGetBuffersQCOM"/>
+                <command name="glExtGetRenderbuffersQCOM"/>
+                <command name="glExtGetFramebuffersQCOM"/>
+                <command name="glExtGetTexLevelParameterivQCOM"/>
+                <command name="glExtTexObjectStateOverrideiQCOM"/>
+                <command name="glExtGetTexSubImageQCOM"/>
+                <command name="glExtGetBufferPointervQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_extended_get2" supported="gles1|gles2">
+            <require>
+                <command name="glExtGetShadersQCOM"/>
+                <command name="glExtGetProgramsQCOM"/>
+                <command name="glExtIsProgramBinaryQCOM"/>
+                <command name="glExtGetProgramBinarySourceQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_framebuffer_foveated" supported="gles2">
+            <require>
+                <enum name="GL_FOVEATION_ENABLE_BIT_QCOM"/>
+                <enum name="GL_FOVEATION_SCALED_BIN_METHOD_BIT_QCOM"/>
+                <command name="glFramebufferFoveationConfigQCOM"/>
+                <command name="glFramebufferFoveationParametersQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_texture_foveated" supported="gles2">
+            <require>
+                <enum name="GL_FOVEATION_ENABLE_BIT_QCOM"/>
+                <enum name="GL_FOVEATION_SCALED_BIN_METHOD_BIT_QCOM"/>
+                <enum name="GL_TEXTURE_FOVEATED_FEATURE_BITS_QCOM"/>
+                <enum name="GL_TEXTURE_FOVEATED_MIN_PIXEL_DENSITY_QCOM"/>
+                <enum name="GL_TEXTURE_FOVEATED_FEATURE_QUERY_QCOM"/>
+                <enum name="GL_TEXTURE_FOVEATED_NUM_FOCAL_POINTS_QUERY_QCOM"/>
+                <enum name="GL_FRAMEBUFFER_INCOMPLETE_FOVEATION_QCOM"/>
+                <command name="glTextureFoveationParametersQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_texture_foveated_subsampled_layout" supported="gles2">
+            <require>
+                <enum name="GL_FOVEATION_SUBSAMPLED_LAYOUT_METHOD_BIT_QCOM"/>
+                <enum name="GL_MAX_SHADER_SUBSAMPLED_IMAGE_UNITS_QCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_perfmon_global_mode" supported="gles1|gles2">
+            <require>
+                <enum name="GL_PERFMON_GLOBAL_MODE_QCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_shader_framebuffer_fetch_noncoherent" supported="gles2">
+            <require>
+                <enum name="GL_FRAMEBUFFER_FETCH_NONCOHERENT_QCOM"/>
+                <command name="glFramebufferFetchBarrierQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_shader_framebuffer_fetch_rate" supported="gles2">
+        </extension>
+        <extension name="GL_QCOM_tiled_rendering" supported="gles1|gles2">
+            <require>
+                <enum name="GL_COLOR_BUFFER_BIT0_QCOM"/>
+                <enum name="GL_COLOR_BUFFER_BIT1_QCOM"/>
+                <enum name="GL_COLOR_BUFFER_BIT2_QCOM"/>
+                <enum name="GL_COLOR_BUFFER_BIT3_QCOM"/>
+                <enum name="GL_COLOR_BUFFER_BIT4_QCOM"/>
+                <enum name="GL_COLOR_BUFFER_BIT5_QCOM"/>
+                <enum name="GL_COLOR_BUFFER_BIT6_QCOM"/>
+                <enum name="GL_COLOR_BUFFER_BIT7_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT0_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT1_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT2_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT3_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT4_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT5_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT6_QCOM"/>
+                <enum name="GL_DEPTH_BUFFER_BIT7_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT0_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT1_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT2_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT3_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT4_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT5_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT6_QCOM"/>
+                <enum name="GL_STENCIL_BUFFER_BIT7_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT0_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT1_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT2_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT3_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT4_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT5_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT6_QCOM"/>
+                <enum name="GL_MULTISAMPLE_BUFFER_BIT7_QCOM"/>
+                <command name="glStartTilingQCOM"/>
+                <command name="glEndTilingQCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_writeonly_rendering" supported="gles1|gles2">
+            <require>
+                <enum name="GL_WRITEONLY_RENDERING_QCOM"/>
+            </require>
+        </extension>
+        <extension name="GL_QCOM_YUV_texture_gather" supported="gles2">
+        </extension>
+        <extension name="GL_REND_screen_coordinates" supported="gl">
+            <require>
+                <enum name="GL_SCREEN_COORDINATES_REND"/>
+                <enum name="GL_INVERTED_SCREEN_W_REND"/>
+            </require>
+        </extension>
+        <extension name="GL_S3_s3tc" supported="gl">
+            <require>
+                <enum name="GL_RGB_S3TC"/>
+                <enum name="GL_RGB4_S3TC"/>
+                <enum name="GL_RGBA_S3TC"/>
+                <enum name="GL_RGBA4_S3TC"/>
+                <enum name="GL_RGBA_DXT5_S3TC"/>
+                <enum name="GL_RGBA4_DXT5_S3TC"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_detail_texture" supported="gl">
+            <require>
+                <enum name="GL_DETAIL_TEXTURE_2D_SGIS"/>
+                <enum name="GL_DETAIL_TEXTURE_2D_BINDING_SGIS"/>
+                <enum name="GL_LINEAR_DETAIL_SGIS"/>
+                <enum name="GL_LINEAR_DETAIL_ALPHA_SGIS"/>
+                <enum name="GL_LINEAR_DETAIL_COLOR_SGIS"/>
+                <enum name="GL_DETAIL_TEXTURE_LEVEL_SGIS"/>
+                <enum name="GL_DETAIL_TEXTURE_MODE_SGIS"/>
+                <enum name="GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS"/>
+                <command name="glDetailTexFuncSGIS"/>
+                <command name="glGetDetailTexFuncSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_fog_function" supported="gl">
+            <require>
+                <enum name="GL_FOG_FUNC_SGIS"/>
+                <enum name="GL_FOG_FUNC_POINTS_SGIS"/>
+                <enum name="GL_MAX_FOG_FUNC_POINTS_SGIS"/>
+                <command name="glFogFuncSGIS"/>
+                <command name="glGetFogFuncSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_generate_mipmap" supported="gl">
+            <require>
+                <enum name="GL_GENERATE_MIPMAP_SGIS"/>
+                <enum name="GL_GENERATE_MIPMAP_HINT_SGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_multisample" supported="gl">
+            <require>
+                <enum name="GL_MULTISAMPLE_SGIS"/>
+                <enum name="GL_SAMPLE_ALPHA_TO_MASK_SGIS"/>
+                <enum name="GL_SAMPLE_ALPHA_TO_ONE_SGIS"/>
+                <enum name="GL_SAMPLE_MASK_SGIS"/>
+                <enum name="GL_1PASS_SGIS"/>
+                <enum name="GL_2PASS_0_SGIS"/>
+                <enum name="GL_2PASS_1_SGIS"/>
+                <enum name="GL_4PASS_0_SGIS"/>
+                <enum name="GL_4PASS_1_SGIS"/>
+                <enum name="GL_4PASS_2_SGIS"/>
+                <enum name="GL_4PASS_3_SGIS"/>
+                <enum name="GL_SAMPLE_BUFFERS_SGIS"/>
+                <enum name="GL_SAMPLES_SGIS"/>
+                <enum name="GL_SAMPLE_MASK_VALUE_SGIS"/>
+                <enum name="GL_SAMPLE_MASK_INVERT_SGIS"/>
+                <enum name="GL_SAMPLE_PATTERN_SGIS"/>
+                <command name="glSampleMaskSGIS"/>
+                <command name="glSamplePatternSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_pixel_texture" supported="gl">
+            <require>
+                <enum name="GL_PIXEL_TEXTURE_SGIS"/>
+                <enum name="GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS"/>
+                <enum name="GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS"/>
+                <enum name="GL_PIXEL_GROUP_COLOR_SGIS"/>
+                <command name="glPixelTexGenParameteriSGIS"/>
+                <command name="glPixelTexGenParameterivSGIS"/>
+                <command name="glPixelTexGenParameterfSGIS"/>
+                <command name="glPixelTexGenParameterfvSGIS"/>
+                <command name="glGetPixelTexGenParameterivSGIS"/>
+                <command name="glGetPixelTexGenParameterfvSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_point_line_texgen" supported="gl">
+            <require>
+                <enum name="GL_EYE_DISTANCE_TO_POINT_SGIS"/>
+                <enum name="GL_OBJECT_DISTANCE_TO_POINT_SGIS"/>
+                <enum name="GL_EYE_DISTANCE_TO_LINE_SGIS"/>
+                <enum name="GL_OBJECT_DISTANCE_TO_LINE_SGIS"/>
+                <enum name="GL_EYE_POINT_SGIS"/>
+                <enum name="GL_OBJECT_POINT_SGIS"/>
+                <enum name="GL_EYE_LINE_SGIS"/>
+                <enum name="GL_OBJECT_LINE_SGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_point_parameters" supported="gl">
+            <require>
+                <enum name="GL_POINT_SIZE_MIN_SGIS"/>
+                <enum name="GL_POINT_SIZE_MAX_SGIS"/>
+                <enum name="GL_POINT_FADE_THRESHOLD_SIZE_SGIS"/>
+                <enum name="GL_DISTANCE_ATTENUATION_SGIS"/>
+                <command name="glPointParameterfSGIS"/>
+                <command name="glPointParameterfvSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_sharpen_texture" supported="gl">
+            <require>
+                <enum name="GL_LINEAR_SHARPEN_SGIS"/>
+                <enum name="GL_LINEAR_SHARPEN_ALPHA_SGIS"/>
+                <enum name="GL_LINEAR_SHARPEN_COLOR_SGIS"/>
+                <enum name="GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS"/>
+                <command name="glSharpenTexFuncSGIS"/>
+                <command name="glGetSharpenTexFuncSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_texture4D" supported="gl">
+            <require>
+                <enum name="GL_PACK_SKIP_VOLUMES_SGIS"/>
+                <enum name="GL_PACK_IMAGE_DEPTH_SGIS"/>
+                <enum name="GL_UNPACK_SKIP_VOLUMES_SGIS"/>
+                <enum name="GL_UNPACK_IMAGE_DEPTH_SGIS"/>
+                <enum name="GL_TEXTURE_4D_SGIS"/>
+                <enum name="GL_PROXY_TEXTURE_4D_SGIS"/>
+                <enum name="GL_TEXTURE_4DSIZE_SGIS"/>
+                <enum name="GL_TEXTURE_WRAP_Q_SGIS"/>
+                <enum name="GL_MAX_4D_TEXTURE_SIZE_SGIS"/>
+                <enum name="GL_TEXTURE_4D_BINDING_SGIS"/>
+                <command name="glTexImage4DSGIS"/>
+                <command name="glTexSubImage4DSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_texture_border_clamp" supported="gl">
+            <require>
+                <enum name="GL_CLAMP_TO_BORDER_SGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_texture_color_mask" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_COLOR_WRITEMASK_SGIS"/>
+                <command name="glTextureColorMaskSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_texture_edge_clamp" supported="gl">
+            <require>
+                <enum name="GL_CLAMP_TO_EDGE_SGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_texture_filter4" supported="gl">
+            <require>
+                <enum name="GL_FILTER4_SGIS"/>
+                <enum name="GL_TEXTURE_FILTER4_SIZE_SGIS"/>
+                <command name="glGetTexFilterFuncSGIS"/>
+                <command name="glTexFilterFuncSGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_texture_lod" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_MIN_LOD_SGIS"/>
+                <enum name="GL_TEXTURE_MAX_LOD_SGIS"/>
+                <enum name="GL_TEXTURE_BASE_LEVEL_SGIS"/>
+                <enum name="GL_TEXTURE_MAX_LEVEL_SGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIS_texture_select" supported="gl">
+            <require>
+                <enum name="GL_DUAL_ALPHA4_SGIS"/>
+                <enum name="GL_DUAL_ALPHA8_SGIS"/>
+                <enum name="GL_DUAL_ALPHA12_SGIS"/>
+                <enum name="GL_DUAL_ALPHA16_SGIS"/>
+                <enum name="GL_DUAL_LUMINANCE4_SGIS"/>
+                <enum name="GL_DUAL_LUMINANCE8_SGIS"/>
+                <enum name="GL_DUAL_LUMINANCE12_SGIS"/>
+                <enum name="GL_DUAL_LUMINANCE16_SGIS"/>
+                <enum name="GL_DUAL_INTENSITY4_SGIS"/>
+                <enum name="GL_DUAL_INTENSITY8_SGIS"/>
+                <enum name="GL_DUAL_INTENSITY12_SGIS"/>
+                <enum name="GL_DUAL_INTENSITY16_SGIS"/>
+                <enum name="GL_DUAL_LUMINANCE_ALPHA4_SGIS"/>
+                <enum name="GL_DUAL_LUMINANCE_ALPHA8_SGIS"/>
+                <enum name="GL_QUAD_ALPHA4_SGIS"/>
+                <enum name="GL_QUAD_ALPHA8_SGIS"/>
+                <enum name="GL_QUAD_LUMINANCE4_SGIS"/>
+                <enum name="GL_QUAD_LUMINANCE8_SGIS"/>
+                <enum name="GL_QUAD_INTENSITY4_SGIS"/>
+                <enum name="GL_QUAD_INTENSITY8_SGIS"/>
+                <enum name="GL_DUAL_TEXTURE_SELECT_SGIS"/>
+                <enum name="GL_QUAD_TEXTURE_SELECT_SGIS"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_async" supported="gl">
+            <require>
+                <enum name="GL_ASYNC_MARKER_SGIX"/>
+                <command name="glAsyncMarkerSGIX"/>
+                <command name="glFinishAsyncSGIX"/>
+                <command name="glPollAsyncSGIX"/>
+                <command name="glGenAsyncMarkersSGIX"/>
+                <command name="glDeleteAsyncMarkersSGIX"/>
+                <command name="glIsAsyncMarkerSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_async_histogram" supported="gl">
+            <require>
+                <enum name="GL_ASYNC_HISTOGRAM_SGIX"/>
+                <enum name="GL_MAX_ASYNC_HISTOGRAM_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_async_pixel" supported="gl">
+            <require>
+                <enum name="GL_ASYNC_TEX_IMAGE_SGIX"/>
+                <enum name="GL_ASYNC_DRAW_PIXELS_SGIX"/>
+                <enum name="GL_ASYNC_READ_PIXELS_SGIX"/>
+                <enum name="GL_MAX_ASYNC_TEX_IMAGE_SGIX"/>
+                <enum name="GL_MAX_ASYNC_DRAW_PIXELS_SGIX"/>
+                <enum name="GL_MAX_ASYNC_READ_PIXELS_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_blend_alpha_minmax" supported="gl">
+            <require>
+                <enum name="GL_ALPHA_MIN_SGIX"/>
+                <enum name="GL_ALPHA_MAX_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_calligraphic_fragment" supported="gl">
+            <require>
+                <enum name="GL_CALLIGRAPHIC_FRAGMENT_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_clipmap" supported="gl">
+            <require>
+                <enum name="GL_LINEAR_CLIPMAP_LINEAR_SGIX"/>
+                <enum name="GL_TEXTURE_CLIPMAP_CENTER_SGIX"/>
+                <enum name="GL_TEXTURE_CLIPMAP_FRAME_SGIX"/>
+                <enum name="GL_TEXTURE_CLIPMAP_OFFSET_SGIX"/>
+                <enum name="GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX"/>
+                <enum name="GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX"/>
+                <enum name="GL_TEXTURE_CLIPMAP_DEPTH_SGIX"/>
+                <enum name="GL_MAX_CLIPMAP_DEPTH_SGIX"/>
+                <enum name="GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX"/>
+                <enum name="GL_NEAREST_CLIPMAP_NEAREST_SGIX"/>
+                <enum name="GL_NEAREST_CLIPMAP_LINEAR_SGIX"/>
+                <enum name="GL_LINEAR_CLIPMAP_NEAREST_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_convolution_accuracy" supported="gl">
+            <require>
+                <enum name="GL_CONVOLUTION_HINT_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_depth_pass_instrument" supported="gl"/>
+        <extension name="GL_SGIX_depth_texture" supported="gl">
+            <require>
+                <enum name="GL_DEPTH_COMPONENT16_SGIX"/>
+                <enum name="GL_DEPTH_COMPONENT24_SGIX"/>
+                <enum name="GL_DEPTH_COMPONENT32_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_flush_raster" supported="gl">
+            <require>
+                <command name="glFlushRasterSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_fog_offset" supported="gl">
+            <require>
+                <enum name="GL_FOG_OFFSET_SGIX"/>
+                <enum name="GL_FOG_OFFSET_VALUE_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_fragment_lighting" supported="gl" comment="Incomplete extension">
+            <require>
+                <enum name="GL_FRAGMENT_LIGHTING_SGIX"/>
+                <enum name="GL_FRAGMENT_COLOR_MATERIAL_SGIX"/>
+                <enum name="GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX"/>
+                <enum name="GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX"/>
+                <enum name="GL_MAX_FRAGMENT_LIGHTS_SGIX"/>
+                <enum name="GL_MAX_ACTIVE_LIGHTS_SGIX"/>
+                <enum name="GL_CURRENT_RASTER_NORMAL_SGIX"/>
+                <enum name="GL_LIGHT_ENV_MODE_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT0_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT1_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT2_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT3_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT4_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT5_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT6_SGIX"/>
+                <enum name="GL_FRAGMENT_LIGHT7_SGIX"/>
+                <command name="glFragmentColorMaterialSGIX"/>
+                <command name="glFragmentLightfSGIX"/>
+                <command name="glFragmentLightfvSGIX"/>
+                <command name="glFragmentLightiSGIX"/>
+                <command name="glFragmentLightivSGIX"/>
+                <command name="glFragmentLightModelfSGIX"/>
+                <command name="glFragmentLightModelfvSGIX"/>
+                <command name="glFragmentLightModeliSGIX"/>
+                <command name="glFragmentLightModelivSGIX"/>
+                <command name="glFragmentMaterialfSGIX"/>
+                <command name="glFragmentMaterialfvSGIX"/>
+                <command name="glFragmentMaterialiSGIX"/>
+                <command name="glFragmentMaterialivSGIX"/>
+                <command name="glGetFragmentLightfvSGIX"/>
+                <command name="glGetFragmentLightivSGIX"/>
+                <command name="glGetFragmentMaterialfvSGIX"/>
+                <command name="glGetFragmentMaterialivSGIX"/>
+                <command name="glLightEnviSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_framezoom" supported="gl">
+            <require>
+                <enum name="GL_FRAMEZOOM_SGIX"/>
+                <enum name="GL_FRAMEZOOM_FACTOR_SGIX"/>
+                <enum name="GL_MAX_FRAMEZOOM_FACTOR_SGIX"/>
+                <command name="glFrameZoomSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_igloo_interface" supported="gl">
+            <require>
+                <command name="glIglooInterfaceSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_instruments" supported="gl">
+            <require>
+                <enum name="GL_INSTRUMENT_BUFFER_POINTER_SGIX"/>
+                <enum name="GL_INSTRUMENT_MEASUREMENTS_SGIX"/>
+                <command name="glGetInstrumentsSGIX"/>
+                <command name="glInstrumentsBufferSGIX"/>
+                <command name="glPollInstrumentsSGIX"/>
+                <command name="glReadInstrumentsSGIX"/>
+                <command name="glStartInstrumentsSGIX"/>
+                <command name="glStopInstrumentsSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_interlace" supported="gl">
+            <require>
+                <enum name="GL_INTERLACE_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_ir_instrument1" supported="gl">
+            <require>
+                <enum name="GL_IR_INSTRUMENT1_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_list_priority" supported="gl">
+            <require>
+                <enum name="GL_LIST_PRIORITY_SGIX"/>
+                <command name="glGetListParameterfvSGIX"/>
+                <command name="glGetListParameterivSGIX"/>
+                <command name="glListParameterfSGIX"/>
+                <command name="glListParameterfvSGIX"/>
+                <command name="glListParameteriSGIX"/>
+                <command name="glListParameterivSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_pixel_texture" supported="gl">
+            <require>
+                <enum name="GL_PIXEL_TEX_GEN_SGIX"/>
+                <enum name="GL_PIXEL_TEX_GEN_MODE_SGIX"/>
+                <command name="glPixelTexGenSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_pixel_tiles" supported="gl">
+            <require>
+                <enum name="GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX"/>
+                <enum name="GL_PIXEL_TILE_CACHE_INCREMENT_SGIX"/>
+                <enum name="GL_PIXEL_TILE_WIDTH_SGIX"/>
+                <enum name="GL_PIXEL_TILE_HEIGHT_SGIX"/>
+                <enum name="GL_PIXEL_TILE_GRID_WIDTH_SGIX"/>
+                <enum name="GL_PIXEL_TILE_GRID_HEIGHT_SGIX"/>
+                <enum name="GL_PIXEL_TILE_GRID_DEPTH_SGIX"/>
+                <enum name="GL_PIXEL_TILE_CACHE_SIZE_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_polynomial_ffd" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_DEFORMATION_BIT_SGIX"/>
+                <enum name="GL_GEOMETRY_DEFORMATION_BIT_SGIX"/>
+                <enum name="GL_GEOMETRY_DEFORMATION_SGIX"/>
+                <enum name="GL_TEXTURE_DEFORMATION_SGIX"/>
+                <enum name="GL_DEFORMATIONS_MASK_SGIX"/>
+                <enum name="GL_MAX_DEFORMATION_ORDER_SGIX"/>
+                <command name="glDeformationMap3dSGIX"/>
+                <command name="glDeformationMap3fSGIX"/>
+                <command name="glDeformSGIX"/>
+                <command name="glLoadIdentityDeformationMapSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_reference_plane" supported="gl">
+            <require>
+                <enum name="GL_REFERENCE_PLANE_SGIX"/>
+                <enum name="GL_REFERENCE_PLANE_EQUATION_SGIX"/>
+                <command name="glReferencePlaneSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_resample" supported="gl">
+            <require>
+                <enum name="GL_PACK_RESAMPLE_SGIX"/>
+                <enum name="GL_UNPACK_RESAMPLE_SGIX"/>
+                <enum name="GL_RESAMPLE_REPLICATE_SGIX"/>
+                <enum name="GL_RESAMPLE_ZERO_FILL_SGIX"/>
+                <enum name="GL_RESAMPLE_DECIMATE_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_scalebias_hint" supported="gl">
+            <require>
+                <enum name="GL_SCALEBIAS_HINT_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_shadow" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_COMPARE_SGIX"/>
+                <enum name="GL_TEXTURE_COMPARE_OPERATOR_SGIX"/>
+                <enum name="GL_TEXTURE_LEQUAL_R_SGIX"/>
+                <enum name="GL_TEXTURE_GEQUAL_R_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_shadow_ambient" supported="gl">
+            <require>
+                <enum name="GL_SHADOW_AMBIENT_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_sprite" supported="gl">
+            <require>
+                <enum name="GL_SPRITE_SGIX"/>
+                <enum name="GL_SPRITE_MODE_SGIX"/>
+                <enum name="GL_SPRITE_AXIS_SGIX"/>
+                <enum name="GL_SPRITE_TRANSLATION_SGIX"/>
+                <enum name="GL_SPRITE_AXIAL_SGIX"/>
+                <enum name="GL_SPRITE_OBJECT_ALIGNED_SGIX"/>
+                <enum name="GL_SPRITE_EYE_ALIGNED_SGIX"/>
+                <command name="glSpriteParameterfSGIX"/>
+                <command name="glSpriteParameterfvSGIX"/>
+                <command name="glSpriteParameteriSGIX"/>
+                <command name="glSpriteParameterivSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_subsample" supported="gl">
+            <require>
+                <enum name="GL_PACK_SUBSAMPLE_RATE_SGIX"/>
+                <enum name="GL_UNPACK_SUBSAMPLE_RATE_SGIX"/>
+                <enum name="GL_PIXEL_SUBSAMPLE_4444_SGIX"/>
+                <enum name="GL_PIXEL_SUBSAMPLE_2424_SGIX"/>
+                <enum name="GL_PIXEL_SUBSAMPLE_4242_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_tag_sample_buffer" supported="gl">
+            <require>
+                <command name="glTagSampleBufferSGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_texture_add_env" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_ENV_BIAS_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_texture_coordinate_clamp" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_MAX_CLAMP_S_SGIX"/>
+                <enum name="GL_TEXTURE_MAX_CLAMP_T_SGIX"/>
+                <enum name="GL_TEXTURE_MAX_CLAMP_R_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_texture_lod_bias" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_LOD_BIAS_S_SGIX"/>
+                <enum name="GL_TEXTURE_LOD_BIAS_T_SGIX"/>
+                <enum name="GL_TEXTURE_LOD_BIAS_R_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_texture_multi_buffer" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_MULTI_BUFFER_HINT_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_texture_scale_bias" supported="gl">
+            <require>
+                <enum name="GL_POST_TEXTURE_FILTER_BIAS_SGIX"/>
+                <enum name="GL_POST_TEXTURE_FILTER_SCALE_SGIX"/>
+                <enum name="GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX"/>
+                <enum name="GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_vertex_preclip" supported="gl">
+            <require>
+                <enum name="GL_VERTEX_PRECLIP_SGIX"/>
+                <enum name="GL_VERTEX_PRECLIP_HINT_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_ycrcb" supported="gl">
+            <require>
+                <enum name="GL_YCRCB_422_SGIX"/>
+                <enum name="GL_YCRCB_444_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGIX_ycrcb_subsample" supported="gl"/>
+        <extension name="GL_SGIX_ycrcba" supported="gl">
+            <require>
+                <enum name="GL_YCRCB_SGIX"/>
+                <enum name="GL_YCRCBA_SGIX"/>
+            </require>
+        </extension>
+        <extension name="GL_SGI_color_matrix" supported="gl">
+            <require>
+                <enum name="GL_COLOR_MATRIX_SGI"/>
+                <enum name="GL_COLOR_MATRIX_STACK_DEPTH_SGI"/>
+                <enum name="GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_RED_SCALE_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_RED_BIAS_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI"/>
+            </require>
+        </extension>
+        <extension name="GL_SGI_color_table" supported="gl">
+            <require>
+                <enum name="GL_COLOR_TABLE_SGI"/>
+                <enum name="GL_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+                <enum name="GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+                <enum name="GL_PROXY_COLOR_TABLE_SGI"/>
+                <enum name="GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI"/>
+                <enum name="GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI"/>
+                <enum name="GL_COLOR_TABLE_SCALE_SGI"/>
+                <enum name="GL_COLOR_TABLE_BIAS_SGI"/>
+                <enum name="GL_COLOR_TABLE_FORMAT_SGI"/>
+                <enum name="GL_COLOR_TABLE_WIDTH_SGI"/>
+                <enum name="GL_COLOR_TABLE_RED_SIZE_SGI"/>
+                <enum name="GL_COLOR_TABLE_GREEN_SIZE_SGI"/>
+                <enum name="GL_COLOR_TABLE_BLUE_SIZE_SGI"/>
+                <enum name="GL_COLOR_TABLE_ALPHA_SIZE_SGI"/>
+                <enum name="GL_COLOR_TABLE_LUMINANCE_SIZE_SGI"/>
+                <enum name="GL_COLOR_TABLE_INTENSITY_SIZE_SGI"/>
+                <command name="glColorTableSGI"/>
+                <command name="glColorTableParameterfvSGI"/>
+                <command name="glColorTableParameterivSGI"/>
+                <command name="glCopyColorTableSGI"/>
+                <command name="glGetColorTableSGI"/>
+                <command name="glGetColorTableParameterfvSGI"/>
+                <command name="glGetColorTableParameterivSGI"/>
+            </require>
+        </extension>
+        <extension name="GL_SGI_texture_color_table" supported="gl">
+            <require>
+                <enum name="GL_TEXTURE_COLOR_TABLE_SGI"/>
+                <enum name="GL_PROXY_TEXTURE_COLOR_TABLE_SGI"/>
+            </require>
+        </extension>
+        <extension name="GL_SUNX_constant_data" supported="gl">
+            <require>
+                <enum name="GL_UNPACK_CONSTANT_DATA_SUNX"/>
+                <enum name="GL_TEXTURE_CONSTANT_DATA_SUNX"/>
+                <command name="glFinishTextureSUNX"/>
+            </require>
+        </extension>
+        <extension name="GL_SUN_convolution_border_modes" supported="gl">
+            <require>
+                <enum name="GL_WRAP_BORDER_SUN"/>
+            </require>
+        </extension>
+        <extension name="GL_SUN_global_alpha" supported="gl">
+            <require>
+                <enum name="GL_GLOBAL_ALPHA_SUN"/>
+                <enum name="GL_GLOBAL_ALPHA_FACTOR_SUN"/>
+                <command name="glGlobalAlphaFactorbSUN"/>
+                <command name="glGlobalAlphaFactorsSUN"/>
+                <command name="glGlobalAlphaFactoriSUN"/>
+                <command name="glGlobalAlphaFactorfSUN"/>
+                <command name="glGlobalAlphaFactordSUN"/>
+                <command name="glGlobalAlphaFactorubSUN"/>
+                <command name="glGlobalAlphaFactorusSUN"/>
+                <command name="glGlobalAlphaFactoruiSUN"/>
+            </require>
+        </extension>
+        <extension name="GL_SUN_mesh_array" supported="gl">
+            <require>
+                <enum name="GL_QUAD_MESH_SUN"/>
+                <enum name="GL_TRIANGLE_MESH_SUN"/>
+                <command name="glDrawMeshArraysSUN"/>
+            </require>
+        </extension>
+        <extension name="GL_SUN_slice_accum" supported="gl">
+            <require>
+                <enum name="GL_SLICE_ACCUM_SUN"/>
+            </require>
+        </extension>
+        <extension name="GL_SUN_triangle_list" supported="gl">
+            <require>
+                <enum name="GL_RESTART_SUN"/>
+                <enum name="GL_REPLACE_MIDDLE_SUN"/>
+                <enum name="GL_REPLACE_OLDEST_SUN"/>
+                <enum name="GL_TRIANGLE_LIST_SUN"/>
+                <enum name="GL_REPLACEMENT_CODE_SUN"/>
+                <enum name="GL_REPLACEMENT_CODE_ARRAY_SUN"/>
+                <enum name="GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN"/>
+                <enum name="GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN"/>
+                <enum name="GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN"/>
+                <enum name="GL_R1UI_V3F_SUN"/>
+                <enum name="GL_R1UI_C4UB_V3F_SUN"/>
+                <enum name="GL_R1UI_C3F_V3F_SUN"/>
+                <enum name="GL_R1UI_N3F_V3F_SUN"/>
+                <enum name="GL_R1UI_C4F_N3F_V3F_SUN"/>
+                <enum name="GL_R1UI_T2F_V3F_SUN"/>
+                <enum name="GL_R1UI_T2F_N3F_V3F_SUN"/>
+                <enum name="GL_R1UI_T2F_C4F_N3F_V3F_SUN"/>
+                <command name="glReplacementCodeuiSUN"/>
+                <command name="glReplacementCodeusSUN"/>
+                <command name="glReplacementCodeubSUN"/>
+                <command name="glReplacementCodeuivSUN"/>
+                <command name="glReplacementCodeusvSUN"/>
+                <command name="glReplacementCodeubvSUN"/>
+                <command name="glReplacementCodePointerSUN"/>
+            </require>
+        </extension>
+        <extension name="GL_SUN_vertex" supported="gl">
+            <require>
+                <command name="glColor4ubVertex2fSUN"/>
+                <command name="glColor4ubVertex2fvSUN"/>
+                <command name="glColor4ubVertex3fSUN"/>
+                <command name="glColor4ubVertex3fvSUN"/>
+                <command name="glColor3fVertex3fSUN"/>
+                <command name="glColor3fVertex3fvSUN"/>
+                <command name="glNormal3fVertex3fSUN"/>
+                <command name="glNormal3fVertex3fvSUN"/>
+                <command name="glColor4fNormal3fVertex3fSUN"/>
+                <command name="glColor4fNormal3fVertex3fvSUN"/>
+                <command name="glTexCoord2fVertex3fSUN"/>
+                <command name="glTexCoord2fVertex3fvSUN"/>
+                <command name="glTexCoord4fVertex4fSUN"/>
+                <command name="glTexCoord4fVertex4fvSUN"/>
+                <command name="glTexCoord2fColor4ubVertex3fSUN"/>
+                <command name="glTexCoord2fColor4ubVertex3fvSUN"/>
+                <command name="glTexCoord2fColor3fVertex3fSUN"/>
+                <command name="glTexCoord2fColor3fVertex3fvSUN"/>
+                <command name="glTexCoord2fNormal3fVertex3fSUN"/>
+                <command name="glTexCoord2fNormal3fVertex3fvSUN"/>
+                <command name="glTexCoord2fColor4fNormal3fVertex3fSUN"/>
+                <command name="glTexCoord2fColor4fNormal3fVertex3fvSUN"/>
+                <command name="glTexCoord4fColor4fNormal3fVertex4fSUN"/>
+                <command name="glTexCoord4fColor4fNormal3fVertex4fvSUN"/>
+                <command name="glReplacementCodeuiVertex3fSUN"/>
+                <command name="glReplacementCodeuiVertex3fvSUN"/>
+                <command name="glReplacementCodeuiColor4ubVertex3fSUN"/>
+                <command name="glReplacementCodeuiColor4ubVertex3fvSUN"/>
+                <command name="glReplacementCodeuiColor3fVertex3fSUN"/>
+                <command name="glReplacementCodeuiColor3fVertex3fvSUN"/>
+                <command name="glReplacementCodeuiNormal3fVertex3fSUN"/>
+                <command name="glReplacementCodeuiNormal3fVertex3fvSUN"/>
+                <command name="glReplacementCodeuiColor4fNormal3fVertex3fSUN"/>
+                <command name="glReplacementCodeuiColor4fNormal3fVertex3fvSUN"/>
+                <command name="glReplacementCodeuiTexCoord2fVertex3fSUN"/>
+                <command name="glReplacementCodeuiTexCoord2fVertex3fvSUN"/>
+                <command name="glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN"/>
+                <command name="glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN"/>
+                <command name="glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN"/>
+                <command name="glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN"/>
+            </require>
+        </extension>
+        <extension name="GL_VIV_shader_binary" supported="gles2">
+            <require>
+                <enum name="GL_SHADER_BINARY_VIV"/>
+            </require>
+        </extension>
+        <extension name="GL_WIN_phong_shading" supported="gl">
+            <require>
+                <enum name="GL_PHONG_WIN"/>
+                <enum name="GL_PHONG_HINT_WIN"/>
+            </require>
+        </extension>
+        <extension name="GL_WIN_specular_fog" supported="gl">
+            <require>
+                <enum name="GL_FOG_SPECULAR_TEXTURE_WIN"/>
+            </require>
+        </extension>
+        <extension name="GL_EXT_texture_shadow_lod" supported="gl|glcore|gles2"/>
+    </extensions>
+</registry>
diff --git a/tools/memory/asan/blacklist.txt b/tools/memory/asan/blacklist.txt
new file mode 100644
index 0000000..35fa055
--- /dev/null
+++ b/tools/memory/asan/blacklist.txt
@@ -0,0 +1,4 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules.
diff --git a/tools/roll-release b/tools/roll-release
deleted file mode 100755
index 2f39f11..0000000
--- a/tools/roll-release
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2021 The Tint Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e # Fail on any error.
-
-if [ ! -x "$(which go)" ] ; then
-    echo "error: go needs to be on \$PATH to use $0"
-    exit 1
-fi
-
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd )"
-ROOT_DIR="$( cd "${SCRIPT_DIR}/.." >/dev/null 2>&1 && pwd )"
-BINARY="${SCRIPT_DIR}/bin/roll-release"
-
-# Rebuild the binary.
-# Note, go caches build artifacts, so this is quick for repeat calls
-pushd "${SCRIPT_DIR}/src/cmd/roll-release" > /dev/null
-    go build -o "${BINARY}" main.go
-popd > /dev/null
-
-"${BINARY}" "$@"
diff --git a/tools/src/cmd/roll-release/main.go b/tools/src/cmd/roll-release/main.go
deleted file mode 100644
index cb6e261..0000000
--- a/tools/src/cmd/roll-release/main.go
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright 2021 The Tint Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// roll-release is a tool to roll changes in Tint release branches into Dawn,
-// and create new Tint release branches.
-//
-// See showUsage() for more information
-package main
-
-import (
-	"encoding/hex"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"net/http"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-
-	"dawn.googlesource.com/tint/tools/src/gerrit"
-	"github.com/go-git/go-git/v5"
-	"github.com/go-git/go-git/v5/config"
-	"github.com/go-git/go-git/v5/plumbing"
-	"github.com/go-git/go-git/v5/plumbing/transport"
-	git_http "github.com/go-git/go-git/v5/plumbing/transport/http"
-	"github.com/go-git/go-git/v5/storage/memory"
-)
-
-const (
-	toolName            = "roll-release"
-	gitCommitMsgHookURL = "https://gerrit-review.googlesource.com/tools/hooks/commit-msg"
-	tintURL             = "https://dawn.googlesource.com/tint"
-	dawnURL             = "https://dawn.googlesource.com/dawn"
-	tintSubdirInDawn    = "third_party/tint"
-	branchPrefix        = "chromium/"
-	branchLegacyCutoff  = 4664 // Branch numbers < than this are ignored
-)
-
-type branches = map[string]plumbing.Hash
-
-func main() {
-	if err := run(); err != nil {
-		fmt.Println(err)
-		os.Exit(1)
-	}
-}
-
-func showUsage() {
-	fmt.Printf(`
-%[1]v is a tool to synchronize Dawn's release branches with Tint.
-
-%[1]v will scan the release branches of both Dawn and Tint, and will:
-* Create new Gerrit changes to roll new release branch changes from Tint into
-  Dawn.
-* Find and create missing Tint release branches, using the git hash of Tint in
-  the DEPS file of the Dawn release branch.
-
-%[1]v does not depend on the current state of the Tint checkout, nor will it
-make any changes to the local checkout.
-
-usage:
-  %[1]v
-`, toolName)
-	flag.PrintDefaults()
-	fmt.Println(``)
-	os.Exit(1)
-}
-
-func run() error {
-	dry := false
-	flag.BoolVar(&dry, "dry", false, "perform a dry run")
-	flag.Usage = showUsage
-	flag.Parse()
-
-	// This tool uses a mix of 'go-git' and the command line git.
-	// go-git has the benefit of keeping the git information entirely in-memory,
-	// but has issues working with chromium's tools and gerrit.
-	// To create new release branches in Tint, we use 'go-git', so we need to
-	// dig out the username and password.
-	var auth transport.AuthMethod
-	if user, pass := gerrit.LoadCredentials(); user != "" {
-		auth = &git_http.BasicAuth{Username: user, Password: pass}
-	} else {
-		return fmt.Errorf("failed to fetch git credentials")
-	}
-
-	// Using in-memory repos, find all the tint and dawn release branches
-	log.Println("Inspecting dawn and tint release branches...")
-	var tint, dawn *git.Repository
-	var tintBranches, dawnBranches branches
-	for _, r := range []struct {
-		name     string
-		url      string
-		repo     **git.Repository
-		branches *branches
-	}{
-		{"tint", tintURL, &tint, &tintBranches},
-		{"dawn", dawnURL, &dawn, &dawnBranches},
-	} {
-		repo, err := git.Init(memory.NewStorage(), nil)
-		if err != nil {
-			return fmt.Errorf("failed to create %v in-memory repo: %w", r.name, err)
-		}
-		remote, err := repo.CreateRemote(&config.RemoteConfig{
-			Name: "origin",
-			URLs: []string{r.url},
-		})
-		if err != nil {
-			return fmt.Errorf("failed to add %v remote: %w", r.name, err)
-		}
-		refs, err := remote.List(&git.ListOptions{})
-		if err != nil {
-			return fmt.Errorf("failed to fetch %v branches: %w", r.name, err)
-		}
-		branches := branches{}
-		for _, ref := range refs {
-			if !ref.Name().IsBranch() {
-				continue
-			}
-			name := ref.Name().Short()
-			if strings.HasPrefix(name, branchPrefix) {
-				branches[name] = ref.Hash()
-			}
-		}
-		*r.repo = repo
-		*r.branches = branches
-	}
-
-	// Find the release branches found in dawn, which are missing in tint.
-	// Find the release branches in dawn that are behind HEAD of the
-	// corresponding branch in tint.
-	log.Println("Scanning dawn DEPS...")
-	type roll struct {
-		from, to plumbing.Hash
-	}
-	tintBranchesToCreate := branches{}      // branch name -> tint hash
-	dawnBranchesToRoll := map[string]roll{} // branch name -> roll
-	for name := range dawnBranches {
-		if isBranchBefore(name, branchLegacyCutoff) {
-			continue // Branch is earlier than we're interested in
-		}
-		deps, err := getDEPS(dawn, name)
-		if err != nil {
-			return err
-		}
-		depsTintHash, err := parseTintFromDEPS(deps)
-		if err != nil {
-			return err
-		}
-
-		if tintBranchHash, found := tintBranches[name]; found {
-			if tintBranchHash != depsTintHash {
-				dawnBranchesToRoll[name] = roll{from: depsTintHash, to: tintBranchHash}
-			}
-		} else {
-			tintBranchesToCreate[name] = depsTintHash
-		}
-	}
-
-	if dry {
-		tasks := []string{}
-		for name, sha := range tintBranchesToCreate {
-			tasks = append(tasks, fmt.Sprintf("Create Tint release branch '%v' @ %v", name, sha))
-		}
-		for name, roll := range dawnBranchesToRoll {
-			tasks = append(tasks, fmt.Sprintf("Roll Dawn release branch '%v' from %v to %v", name, roll.from, roll.to))
-		}
-		sort.Strings(tasks)
-		fmt.Printf("%v was run with --dry. Run without --dry to:\n", toolName)
-		for _, task := range tasks {
-			fmt.Println(" >", task)
-		}
-		return nil
-	}
-
-	didSomething := false
-	if n := len(tintBranchesToCreate); n > 0 {
-		log.Println("Creating", n, "release branches in tint...")
-
-		// In order to create the branches, we need to know what the DEPS
-		// hashes are referring to. Perform an in-memory fetch of tint's main
-		// branch.
-		if _, err := fetch(tint, "main"); err != nil {
-			return err
-		}
-
-		for name, sha := range tintBranchesToCreate {
-			log.Println("Creating branch", name, "@", sha, "...")
-
-			// Pushing a branch by SHA does not work, so we need to create a
-			// local branch first. See https://github.com/go-git/go-git/issues/105
-			src := plumbing.NewHashReference(plumbing.NewBranchReferenceName(name), sha)
-			if err := tint.Storer.SetReference(src); err != nil {
-				return fmt.Errorf("failed to create temporary branch: %w", err)
-			}
-
-			dst := plumbing.NewBranchReferenceName(name)
-			refspec := config.RefSpec(src.Name() + ":" + dst)
-			err := tint.Push(&git.PushOptions{
-				RefSpecs: []config.RefSpec{refspec},
-				Progress: os.Stdout,
-				Auth:     auth,
-			})
-			if err != nil && err != git.NoErrAlreadyUpToDate {
-				return fmt.Errorf("failed to push branch: %w", err)
-			}
-		}
-		didSomething = true
-	}
-
-	if n := len(dawnBranchesToRoll); n > 0 {
-		log.Println("Rolling", n, "release branches in dawn...")
-
-		// Fetch the change-id hook script
-		commitMsgHookResp, err := http.Get(gitCommitMsgHookURL)
-		if err != nil {
-			return fmt.Errorf("failed to fetch the git commit message hook from '%v': %w", gitCommitMsgHookURL, err)
-		}
-		commitMsgHook, err := ioutil.ReadAll(commitMsgHookResp.Body)
-		if err != nil {
-			return fmt.Errorf("failed to fetch the git commit message hook from '%v': %w", gitCommitMsgHookURL, err)
-		}
-
-		for name, roll := range dawnBranchesToRoll {
-			log.Println("Rolling branch", name, "from tint", roll.from, "to", roll.to, "...")
-			dir, err := ioutil.TempDir("", "dawn-roll")
-			if err != nil {
-				return err
-			}
-			defer os.RemoveAll(dir)
-
-			// Clone dawn into dir
-			if err := call(dir, "git", "clone", "--depth", "1", "-b", name, dawnURL, "."); err != nil {
-				return fmt.Errorf("failed to clone dawn branch %v: %w", name, err)
-			}
-
-			// Copy the Change-Id hook into the dawn directory
-			gitHooksDir := filepath.Join(dir, ".git", "hooks")
-			if err := os.MkdirAll(gitHooksDir, 0777); err != nil {
-				return fmt.Errorf("failed create commit hooks directory: %w", err)
-			}
-			if err := ioutil.WriteFile(filepath.Join(gitHooksDir, "commit-msg"), commitMsgHook, 0777); err != nil {
-				return fmt.Errorf("failed install commit message hook: %w", err)
-			}
-
-			// Clone tint into third_party directory of dawn
-			tintDir := filepath.Join(dir, tintSubdirInDawn)
-			if err := os.MkdirAll(tintDir, 0777); err != nil {
-				return fmt.Errorf("failed to create directory %v: %w", tintDir, err)
-			}
-			if err := call(tintDir, "git", "clone", "-b", name, tintURL, "."); err != nil {
-				return fmt.Errorf("failed to clone tint hash %v: %w", roll.from, err)
-			}
-
-			// Checkout tint at roll.from
-			if err := call(tintDir, "git", "checkout", roll.from); err != nil {
-				return fmt.Errorf("failed to checkout tint at %v: %w", roll.from, err)
-			}
-
-			// Use roll-dep to roll tint to roll.to
-			if err := call(dir, "roll-dep", "--ignore-dirty-tree", fmt.Sprintf("--roll-to=%s", roll.to), tintSubdirInDawn); err != nil {
-				return err
-			}
-
-			// Push the change to gerrit
-			if err := call(dir, "git", "push", "origin", "HEAD:refs/for/"+name); err != nil {
-				return fmt.Errorf("failed to push roll to gerrit: %w", err)
-			}
-		}
-		didSomething = true
-	}
-
-	if !didSomething {
-		log.Println("Everything up to date")
-	} else {
-		log.Println("Done")
-	}
-	return nil
-}
-
-// returns true if the branch name contains a branch number less than 'version'
-func isBranchBefore(name string, version int) bool {
-	n, err := strconv.Atoi(strings.TrimPrefix(name, branchPrefix))
-	if err != nil {
-		return false
-	}
-	return n < version
-}
-
-// call invokes the executable 'exe' with the given arguments in the working
-// directory 'dir'.
-func call(dir, exe string, args ...interface{}) error {
-	s := make([]string, len(args))
-	for i, a := range args {
-		s[i] = fmt.Sprint(a)
-	}
-	cmd := exec.Command(exe, s...)
-	cmd.Dir = dir
-	cmd.Stdout = os.Stdout
-	cmd.Stderr = os.Stderr
-	if err := cmd.Run(); err != nil {
-		return fmt.Errorf("%v returned %v", cmd, err)
-	}
-	return nil
-}
-
-// getDEPS returns the content of the DEPS file for the given branch.
-func getDEPS(r *git.Repository, branch string) (string, error) {
-	hash, err := fetch(r, branch)
-	if err != nil {
-		return "", err
-	}
-	commit, err := r.CommitObject(hash)
-	if err != nil {
-		return "", fmt.Errorf("failed to fetch commit: %w", err)
-	}
-	tree, err := commit.Tree()
-	if err != nil {
-		return "", fmt.Errorf("failed to fetch tree: %w", err)
-	}
-	deps, err := tree.File("DEPS")
-	if err != nil {
-		return "", fmt.Errorf("failed to find DEPS: %w", err)
-	}
-	return deps.Contents()
-}
-
-// fetch performs a git-fetch of the given branch into 'r', returning the
-// fetched branch's hash.
-func fetch(r *git.Repository, branch string) (plumbing.Hash, error) {
-	src := plumbing.NewBranchReferenceName(branch)
-	dst := plumbing.NewRemoteReferenceName("origin", branch)
-	err := r.Fetch(&git.FetchOptions{
-		RefSpecs: []config.RefSpec{config.RefSpec("+" + src + ":" + dst)},
-	})
-	if err != nil {
-		return plumbing.Hash{}, fmt.Errorf("failed to fetch branch %v: %w", branch, err)
-	}
-	ref, err := r.Reference(plumbing.ReferenceName(dst), true)
-	if err != nil {
-		return plumbing.Hash{}, fmt.Errorf("failed to resolve branch %v: %w", branch, err)
-	}
-	return ref.Hash(), nil
-}
-
-var reDEPSTintVersion = regexp.MustCompile("tint@([0-9a-fA-F]*)")
-
-// parseTintFromDEPS returns the tint hash from the DEPS file content 'deps'
-func parseTintFromDEPS(deps string) (plumbing.Hash, error) {
-	m := reDEPSTintVersion.FindStringSubmatch(deps)
-	if len(m) != 2 {
-		return plumbing.Hash{}, fmt.Errorf("failed to find tint hash in DEPS")
-	}
-	b, err := hex.DecodeString(m[1])
-	if err != nil {
-		return plumbing.Hash{}, fmt.Errorf("failed to find parse tint hash in DEPS: %w", err)
-	}
-	var h plumbing.Hash
-	copy(h[:], b)
-	return h, nil
-}
diff --git a/tools/src/container/container.go b/tools/src/container/container.go
new file mode 100644
index 0000000..9958757
--- /dev/null
+++ b/tools/src/container/container.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package set implements a basic generic containers
+package container
diff --git a/tools/src/container/container_test.go b/tools/src/container/container_test.go
new file mode 100644
index 0000000..064ae03
--- /dev/null
+++ b/tools/src/container/container_test.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package container_test
+
+import (
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+func expectEq(t *testing.T, name string, got, expect interface{}) {
+	t.Helper()
+	if diff := cmp.Diff(got, expect); diff != "" {
+		t.Errorf("%v:\n%v", name, diff)
+	}
+}
diff --git a/tools/src/container/key.go b/tools/src/container/key.go
new file mode 100644
index 0000000..ea843b7
--- /dev/null
+++ b/tools/src/container/key.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package container
+
+// key is the constraint for container keys.
+// As Map and Set sort before returning a slice, the constraint is equivalent to
+// the constraints.Ordered in x/exp, instead of 'comparable':
+// https://cs.opensource.google/go/x/exp/+/master:constraints/constraints.go
+type key interface {
+	~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 |
+		~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 | ~string
+}
diff --git a/tools/src/container/map.go b/tools/src/container/map.go
new file mode 100644
index 0000000..6b8acdc
--- /dev/null
+++ b/tools/src/container/map.go
@@ -0,0 +1,62 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package container
+
+import "sort"
+
+// Map is a generic unordered map, which wrap's go's builtin 'map'.
+// K is the map key, which must match the 'key' constraint.
+// V is the map value, which can be any type.
+type Map[K key, V any] map[K]V
+
+// Returns a new empty map
+func NewMap[K key, V any]() Map[K, V] {
+	return make(Map[K, V])
+}
+
+// Add adds an item to the map.
+func (m Map[K, V]) Add(k K, v V) {
+	m[k] = v
+}
+
+// Remove removes an item from the map
+func (m Map[K, V]) Remove(item K) {
+	delete(m, item)
+}
+
+// Contains returns true if the map contains the given item
+func (m Map[K, V]) Contains(item K) bool {
+	_, found := m[item]
+	return found
+}
+
+// Keys returns the sorted keys of the map as a slice
+func (m Map[K, V]) Keys() []K {
+	out := make([]K, 0, len(m))
+	for v := range m {
+		out = append(out, v)
+	}
+	sort.Slice(out, func(i, j int) bool { return out[i] < out[j] })
+	return out
+}
+
+// Values returns the values of the map sorted by key
+func (m Map[K, V]) Values() []V {
+	out := make([]V, 0, len(m))
+	for _, k := range m.Keys() {
+		out = append(out, m[k])
+	}
+	return out
+}
diff --git a/tools/src/container/map_test.go b/tools/src/container/map_test.go
new file mode 100644
index 0000000..292b428
--- /dev/null
+++ b/tools/src/container/map_test.go
@@ -0,0 +1,114 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package container_test
+
+import (
+	"testing"
+
+	"dawn.googlesource.com/dawn/tools/src/container"
+)
+
+func TestNewMap(t *testing.T) {
+	m := container.NewMap[string, int]()
+	expectEq(t, "len(m)", len(m), 0)
+}
+
+func TestMapAdd(t *testing.T) {
+	m := container.NewMap[string, int]()
+	m.Add("c", 3)
+	expectEq(t, "len(m)", len(m), 1)
+	expectEq(t, `m["a"]`, m["a"], 0)
+	expectEq(t, `m["b"]`, m["b"], 0)
+	expectEq(t, `m["c"]`, m["c"], 3)
+
+	m.Add("a", 1)
+	expectEq(t, "len(m)", len(m), 2)
+	expectEq(t, `m["a"]`, m["a"], 1)
+	expectEq(t, `m["b"]`, m["b"], 0)
+	expectEq(t, `m["c"]`, m["c"], 3)
+
+	m.Add("b", 2)
+	expectEq(t, "len(m)", len(m), 3)
+	expectEq(t, `m["a"]`, m["a"], 1)
+	expectEq(t, `m["b"]`, m["b"], 2)
+	expectEq(t, `m["c"]`, m["c"], 3)
+}
+
+func TestMapRemove(t *testing.T) {
+	m := container.NewMap[string, int]()
+	m.Add("a", 1)
+	m.Add("b", 2)
+	m.Add("c", 3)
+
+	m.Remove("c")
+	expectEq(t, "len(m)", len(m), 2)
+	expectEq(t, `m["a"]`, m["a"], 1)
+	expectEq(t, `m["b"]`, m["b"], 2)
+	expectEq(t, `m["c"]`, m["c"], 0)
+
+	m.Remove("a")
+	expectEq(t, "len(m)", len(m), 1)
+	expectEq(t, `m["a"]`, m["a"], 0)
+	expectEq(t, `m["b"]`, m["b"], 2)
+	expectEq(t, `m["c"]`, m["c"], 0)
+
+	m.Remove("b")
+	expectEq(t, "len(m)", len(m), 0)
+	expectEq(t, `m["a"]`, m["a"], 0)
+	expectEq(t, `m["b"]`, m["b"], 0)
+	expectEq(t, `m["c"]`, m["c"], 0)
+}
+
+func TestMapContains(t *testing.T) {
+	m := container.NewMap[string, int]()
+	m.Add("c", 3)
+	expectEq(t, `m.Contains("a")`, m.Contains("a"), false)
+	expectEq(t, `m.Contains("b")`, m.Contains("b"), false)
+	expectEq(t, `m.Contains("c")`, m.Contains("c"), true)
+
+	m.Add("a", 1)
+	expectEq(t, `m.Contains("a")`, m.Contains("a"), true)
+	expectEq(t, `m.Contains("b")`, m.Contains("b"), false)
+	expectEq(t, `m.Contains("c")`, m.Contains("c"), true)
+
+	m.Add("b", 2)
+	expectEq(t, `m.Contains("a")`, m.Contains("a"), true)
+	expectEq(t, `m.Contains("b")`, m.Contains("b"), true)
+	expectEq(t, `m.Contains("c")`, m.Contains("c"), true)
+}
+
+func TestMapKeys(t *testing.T) {
+	m := container.NewMap[string, int]()
+	m.Add("c", 3)
+	expectEq(t, `m.Keys()`, m.Keys(), []string{"c"})
+
+	m.Add("a", 1)
+	expectEq(t, `m.Keys()`, m.Keys(), []string{"a", "c"})
+
+	m.Add("b", 2)
+	expectEq(t, `m.Keys()`, m.Keys(), []string{"a", "b", "c"})
+}
+
+func TestMapValues(t *testing.T) {
+	m := container.NewMap[string, int]()
+	m.Add("c", 1)
+	expectEq(t, `m.Values()`, m.Values(), []int{1})
+
+	m.Add("a", 2)
+	expectEq(t, `m.Values()`, m.Values(), []int{2, 1})
+
+	m.Add("b", 3)
+	expectEq(t, `m.Values()`, m.Values(), []int{2, 3, 1})
+}
diff --git a/tools/src/container/set.go b/tools/src/container/set.go
new file mode 100644
index 0000000..c48a649
--- /dev/null
+++ b/tools/src/container/set.go
@@ -0,0 +1,100 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package container
+
+import "sort"
+
+// Set is a generic unordered set, which wrap's go's builtin 'map'.
+// T is the set key, which must match the 'key' constraint.
+type Set[T key] map[T]struct{}
+
+// Returns a new set with the give items
+func NewSet[T key](items ...T) Set[T] {
+	out := make(Set[T])
+	for _, item := range items {
+		out.Add(item)
+	}
+	return out
+}
+
+// Clone returns a new Set populated with s
+func (s Set[T]) Clone() Set[T] {
+	out := make(Set[T], len(s))
+	for item := range s {
+		out.Add(item)
+	}
+	return out
+}
+
+// Add adds an item to the set.
+func (s Set[T]) Add(item T) {
+	s[item] = struct{}{}
+}
+
+// AddAll adds all the items of o to the set.
+func (s Set[T]) AddAll(o Set[T]) {
+	for item := range o {
+		s.Add(item)
+	}
+}
+
+// Remove removes an item from the set
+func (s Set[T]) Remove(item T) {
+	delete(s, item)
+}
+
+// RemoveAll removes all the items of o from the set.
+func (s Set[T]) RemoveAll(o Set[T]) {
+	for item := range o {
+		s.Remove(item)
+	}
+}
+
+// Contains returns true if the set contains the given item
+func (s Set[T]) Contains(item T) bool {
+	_, found := s[item]
+	return found
+}
+
+// Contains returns true if the set contains all the items in o
+func (s Set[T]) ContainsAll(o Set[T]) bool {
+	for item := range o {
+		if !s.Contains(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Intersection returns true if the set contains all the items in o
+func (s Set[T]) Intersection(o Set[T]) Set[T] {
+	out := NewSet[T]()
+	for item := range o {
+		if s.Contains(item) {
+			out.Add(item)
+		}
+	}
+	return out
+}
+
+// List returns the sorted entries of the set as a slice
+func (s Set[T]) List() []T {
+	out := make([]T, 0, len(s))
+	for v := range s {
+		out = append(out, v)
+	}
+	sort.Slice(out, func(i, j int) bool { return out[i] < out[j] })
+	return out
+}
diff --git a/tools/src/container/set_test.go b/tools/src/container/set_test.go
new file mode 100644
index 0000000..ff1e28f
--- /dev/null
+++ b/tools/src/container/set_test.go
@@ -0,0 +1,145 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package container_test
+
+import (
+	"testing"
+
+	"dawn.googlesource.com/dawn/tools/src/container"
+)
+
+func TestNewEmptySet(t *testing.T) {
+	s := container.NewSet[string]()
+	expectEq(t, "len(s)", len(s), 0)
+}
+
+func TestNewSet(t *testing.T) {
+	s := container.NewSet("c", "a", "b")
+	expectEq(t, "len(s)", len(s), 3)
+}
+
+func TestSetList(t *testing.T) {
+	s := container.NewSet("c", "a", "b")
+	expectEq(t, "s.List()", s.List(), []string{"a", "b", "c"})
+}
+
+func TestSetClone(t *testing.T) {
+	a := container.NewSet("c", "a", "b")
+	b := a.Clone()
+	a.Remove("a")
+	expectEq(t, "b.List()", b.List(), []string{"a", "b", "c"})
+}
+
+func TestSetAdd(t *testing.T) {
+	s := container.NewSet[string]()
+	s.Add("c")
+	expectEq(t, "len(s)", len(s), 1)
+	expectEq(t, "s.List()", s.List(), []string{"c"})
+
+	s.Add("a")
+	expectEq(t, "len(s)", len(s), 2)
+	expectEq(t, "s.List()", s.List(), []string{"a", "c"})
+
+	s.Add("b")
+	expectEq(t, "len(s)", len(s), 3)
+	expectEq(t, "s.List()", s.List(), []string{"a", "b", "c"})
+}
+
+func TestSetRemove(t *testing.T) {
+	s := container.NewSet("c", "a", "b")
+	s.Remove("c")
+	expectEq(t, "len(s)", len(s), 2)
+	expectEq(t, "s.List()", s.List(), []string{"a", "b"})
+
+	s.Remove("a")
+	expectEq(t, "len(s)", len(s), 1)
+	expectEq(t, "s.List()", s.List(), []string{"b"})
+
+	s.Remove("b")
+	expectEq(t, "len(s)", len(s), 0)
+	expectEq(t, "s.List()", s.List(), []string{})
+}
+
+func TestSetContains(t *testing.T) {
+	s := container.NewSet[string]()
+	s.Add("c")
+	expectEq(t, `m.Contains("a")`, s.Contains("a"), false)
+	expectEq(t, `s.Contains("b")`, s.Contains("b"), false)
+	expectEq(t, `s.Contains("c")`, s.Contains("c"), true)
+
+	s.Add("a")
+	expectEq(t, `s.Contains("a")`, s.Contains("a"), true)
+	expectEq(t, `s.Contains("b")`, s.Contains("b"), false)
+	expectEq(t, `s.Contains("c")`, s.Contains("c"), true)
+
+	s.Add("b")
+	expectEq(t, `s.Contains("a")`, s.Contains("a"), true)
+	expectEq(t, `s.Contains("b")`, s.Contains("b"), true)
+	expectEq(t, `s.Contains("c")`, s.Contains("c"), true)
+}
+
+func TestSetContainsAll(t *testing.T) {
+	S := container.NewSet[string]
+
+	s := container.NewSet[string]()
+	s.Add("c")
+	expectEq(t, `s.ContainsAll("a")`, s.ContainsAll(S("a")), false)
+	expectEq(t, `s.ContainsAll("b")`, s.ContainsAll(S("b")), false)
+	expectEq(t, `s.ContainsAll("c")`, s.ContainsAll(S("c")), true)
+	expectEq(t, `s.ContainsAll("a", "b")`, s.ContainsAll(S("a", "b")), false)
+	expectEq(t, `s.ContainsAll("b", "c")`, s.ContainsAll(S("b", "c")), false)
+	expectEq(t, `s.ContainsAll("c", "a")`, s.ContainsAll(S("c", "a")), false)
+	expectEq(t, `s.ContainsAll("c", "a", "b")`, s.ContainsAll(S("c", "a", "b")), false)
+
+	s.Add("a")
+	expectEq(t, `s.ContainsAll("a")`, s.ContainsAll(S("a")), true)
+	expectEq(t, `s.ContainsAll("b")`, s.ContainsAll(S("b")), false)
+	expectEq(t, `s.ContainsAll("c")`, s.ContainsAll(S("c")), true)
+	expectEq(t, `s.ContainsAll("a", "b")`, s.ContainsAll(S("a", "b")), false)
+	expectEq(t, `s.ContainsAll("b", "c")`, s.ContainsAll(S("b", "c")), false)
+	expectEq(t, `s.ContainsAll("c", "a")`, s.ContainsAll(S("c", "a")), true)
+	expectEq(t, `s.ContainsAll("c", "a", "b")`, s.ContainsAll(S("c", "a", "b")), false)
+
+	s.Add("b")
+	expectEq(t, `s.ContainsAll("a")`, s.ContainsAll(S("a")), true)
+	expectEq(t, `s.ContainsAll("b")`, s.ContainsAll(S("b")), true)
+	expectEq(t, `s.ContainsAll("c")`, s.ContainsAll(S("c")), true)
+	expectEq(t, `s.ContainsAll("a", "b")`, s.ContainsAll(S("a", "b")), true)
+	expectEq(t, `s.ContainsAll("b", "c")`, s.ContainsAll(S("b", "c")), true)
+	expectEq(t, `s.ContainsAll("c", "a")`, s.ContainsAll(S("c", "a")), true)
+	expectEq(t, `s.ContainsAll("c", "a", "b")`, s.ContainsAll(S("c", "a", "b")), true)
+}
+
+func TestSetIntersection(t *testing.T) {
+	a := container.NewSet(1, 3, 4, 6)
+	b := container.NewSet(2, 3, 4, 5)
+
+	i := a.Intersection(b)
+	expectEq(t, `i.List()`, i.List(), []int{3, 4})
+}
+
+func TestSetAddAll(t *testing.T) {
+	s := container.NewSet[string]()
+	s.AddAll(container.NewSet("c", "a"))
+	expectEq(t, "len(s)", len(s), 2)
+	expectEq(t, "s.List()", s.List(), []string{"a", "c"})
+}
+
+func TestSetRemoveAll(t *testing.T) {
+	s := container.NewSet("c", "a", "b")
+	s.RemoveAll(container.NewSet("c", "a"))
+	expectEq(t, "len(s)", len(s), 1)
+	expectEq(t, "s.List()", s.List(), []string{"b"})
+}
diff --git a/tools/src/cts/query/query.go b/tools/src/cts/query/query.go
new file mode 100644
index 0000000..46efa71
--- /dev/null
+++ b/tools/src/cts/query/query.go
@@ -0,0 +1,362 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package query provides helpers for parsing and mutating WebGPU CTS queries.
+//
+// The full query syntax is described at:
+// https://github.com/gpuweb/cts/blob/main/docs/terms.md#queries
+//
+// Note that this package supports a superset of the official CTS query syntax,
+// as this package permits parsing and printing of queries that do not end in a
+// wildcard, whereas the CTS requires that all queries end in wildcards unless
+// they identify a specific test.
+// For example, the following queries are considered valid by this  package, but
+// would be rejected by the CTS:
+// `suite`, `suite:file`, `suite:file,file`, `suite:file,file:test`.
+//
+// This relaxation is intentional as the Query type is used for constructing and
+// reducing query trees, and always requiring a wildcard adds unnecessary
+// complexity.
+package query
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Query represents a WebGPU test query
+// Example queries:
+//    'suite'
+//    'suite:*'
+//    'suite:file'
+//    'suite:file,*'
+//    'suite:file,file'
+//    'suite:file,file,*'
+//    'suite:file,file,file:test'
+//    'suite:file,file,file:test:*'
+//    'suite:file,file,file:test,test:case;*'
+type Query struct {
+	Suite string
+	Files string
+	Tests string
+	Cases string
+}
+
+// Target is the target of a query, either a Suite, File, Test or Case.
+type Target int
+
+// Enumerators of Target
+const (
+	// The query targets a suite
+	Suite Target = iota
+	// The query targets one or more files
+	Files
+	// The query targets one or more tests
+	Tests
+	// The query targets one or more test cases
+	Cases
+
+	TargetCount
+)
+
+// Format writes the Target to the fmt.State
+func (l Target) Format(f fmt.State, verb rune) {
+	switch l {
+	case Suite:
+		fmt.Fprint(f, "suite")
+	case Files:
+		fmt.Fprint(f, "files")
+	case Tests:
+		fmt.Fprint(f, "tests")
+	case Cases:
+		fmt.Fprint(f, "cases")
+	default:
+		fmt.Fprint(f, "<invalid>")
+	}
+}
+
+// Delimiter constants used by the query format
+const (
+	TargetDelimiter = ":"
+	FileDelimiter   = ","
+	TestDelimiter   = ","
+	CaseDelimiter   = ";"
+)
+
+// Parse parses a query string
+func Parse(s string) Query {
+	parts := strings.Split(s, TargetDelimiter)
+	q := Query{}
+	switch len(parts) {
+	default:
+		q.Cases = strings.Join(parts[3:], TargetDelimiter)
+		fallthrough
+	case 3:
+		q.Tests = parts[2]
+		fallthrough
+	case 2:
+		q.Files = parts[1]
+		fallthrough
+	case 1:
+		q.Suite = parts[0]
+	}
+	return q
+}
+
+// AppendFiles returns a new query with the strings appended to the 'files'
+func (q Query) AppendFiles(f ...string) Query {
+	if len(f) > 0 {
+		if q.Files == "" {
+			q.Files = strings.Join(f, FileDelimiter)
+		} else {
+			q.Files = q.Files + FileDelimiter + strings.Join(f, FileDelimiter)
+		}
+	}
+	return q
+}
+
+// SplitFiles returns the separated 'files' part of the query
+func (q Query) SplitFiles() []string {
+	if q.Files != "" {
+		return strings.Split(q.Files, FileDelimiter)
+	}
+	return nil
+}
+
+// AppendTests returns a new query with the strings appended to the 'tests'
+func (q Query) AppendTests(t ...string) Query {
+	if len(t) > 0 {
+		if q.Tests == "" {
+			q.Tests = strings.Join(t, TestDelimiter)
+		} else {
+			q.Tests = q.Tests + TestDelimiter + strings.Join(t, TestDelimiter)
+		}
+	}
+	return q
+}
+
+// SplitTests returns the separated 'tests' part of the query
+func (q Query) SplitTests() []string {
+	if q.Tests != "" {
+		return strings.Split(q.Tests, TestDelimiter)
+	}
+	return nil
+}
+
+// AppendCases returns a new query with the strings appended to the 'cases'
+func (q Query) AppendCases(c ...string) Query {
+	if len(c) > 0 {
+		if q.Cases == "" {
+			q.Cases = strings.Join(c, CaseDelimiter)
+		} else {
+			q.Cases = q.Cases + CaseDelimiter + strings.Join(c, CaseDelimiter)
+		}
+	}
+	return q
+}
+
+// SplitCases returns the separated 'cases' part of the query
+func (q Query) SplitCases() []string {
+	if q.Cases != "" {
+		return strings.Split(q.Cases, CaseDelimiter)
+	}
+	return nil
+}
+
+// Case parameters is a map of parameter name to parameter value
+type CaseParameters map[string]string
+
+// CaseParameters returns all the case parameters of the query
+func (q Query) CaseParameters() CaseParameters {
+	if q.Cases != "" {
+		out := CaseParameters{}
+		for _, c := range strings.Split(q.Cases, CaseDelimiter) {
+			idx := strings.IndexRune(c, '=')
+			if idx < 0 {
+				out[c] = ""
+			} else {
+				k, v := c[:idx], c[idx+1:]
+				out[k] = v
+			}
+		}
+		return out
+	}
+	return nil
+}
+
+// Append returns the query with the additional strings appended to the target
+func (q Query) Append(t Target, n ...string) Query {
+	switch t {
+	case Files:
+		return q.AppendFiles(n...)
+	case Tests:
+		return q.AppendTests(n...)
+	case Cases:
+		return q.AppendCases(n...)
+	}
+	panic("invalid target")
+}
+
+// Target returns the target of the query
+func (q Query) Target() Target {
+	if q.Files != "" {
+		if q.Tests != "" {
+			if q.Cases != "" {
+				return Cases
+			}
+			return Tests
+		}
+		return Files
+	}
+	return Suite
+}
+
+// IsWildcard returns true if the query ends with a wildcard
+func (q Query) IsWildcard() bool {
+	switch q.Target() {
+	case Suite:
+		return q.Suite == "*"
+	case Files:
+		return strings.HasSuffix(q.Files, "*")
+	case Tests:
+		return strings.HasSuffix(q.Tests, "*")
+	case Cases:
+		return strings.HasSuffix(q.Cases, "*")
+	}
+	panic("invalid target")
+}
+
+// String returns the query formatted as a string
+func (q Query) String() string {
+	sb := strings.Builder{}
+	sb.WriteString(q.Suite)
+	if q.Files != "" {
+		sb.WriteString(TargetDelimiter)
+		sb.WriteString(q.Files)
+		if q.Tests != "" {
+			sb.WriteString(TargetDelimiter)
+			sb.WriteString(q.Tests)
+			if q.Cases != "" {
+				sb.WriteString(TargetDelimiter)
+				sb.WriteString(q.Cases)
+			}
+		}
+	}
+	return sb.String()
+}
+
+// Compare compares the relative order of q and o, returning:
+//  -1 if q should come before o
+//   1 if q should come after o
+//   0 if q and o are identical
+func (q Query) Compare(o Query) int {
+	for _, cmp := range []struct{ a, b string }{
+		{q.Suite, o.Suite},
+		{q.Files, o.Files},
+		{q.Tests, o.Tests},
+		{q.Cases, o.Cases},
+	} {
+		if cmp.a < cmp.b {
+			return -1
+		}
+		if cmp.a > cmp.b {
+			return 1
+		}
+	}
+
+	return 0
+}
+
+// Contains returns true if q is a superset of o
+func (q Query) Contains(o Query) bool {
+	if q.Suite != o.Suite {
+		return false
+	}
+	{
+		a, b := q.SplitFiles(), o.SplitFiles()
+		for i, f := range a {
+			if f == "*" {
+				return true
+			}
+			if i >= len(b) || b[i] != f {
+				return false
+			}
+		}
+		if len(a) < len(b) {
+			return false
+		}
+	}
+	{
+		a, b := q.SplitTests(), o.SplitTests()
+		for i, f := range a {
+			if f == "*" {
+				return true
+			}
+			if i >= len(b) || b[i] != f {
+				return false
+			}
+		}
+		if len(a) < len(b) {
+			return false
+		}
+	}
+	{
+		a, b := q.CaseParameters(), o.CaseParameters()
+		for key, av := range a {
+			if bv, found := b[key]; found && av != bv {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Callback function for Query.Walk()
+//   q is the query for the current segment.
+//   t is the target of the query q.
+//   n is the name of the new segment.
+type WalkCallback func(q Query, t Target, n string) error
+
+// Walk calls 'f' for each suite, file, test segment, and calls f once for all
+// cases. If f returns an error then walking is immediately terminated and the
+// error is returned.
+func (q Query) Walk(f WalkCallback) error {
+	p := Query{Suite: q.Suite}
+
+	if err := f(p, Suite, q.Suite); err != nil {
+		return err
+	}
+
+	for _, file := range q.SplitFiles() {
+		p = p.AppendFiles(file)
+		if err := f(p, Files, file); err != nil {
+			return err
+		}
+	}
+
+	for _, test := range q.SplitTests() {
+		p = p.AppendTests(test)
+		if err := f(p, Tests, test); err != nil {
+			return err
+		}
+	}
+
+	if q.Cases != "" {
+		if err := f(q, Cases, q.Cases); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/tools/src/cts/query/query_test.go b/tools/src/cts/query/query_test.go
new file mode 100644
index 0000000..e817e93
--- /dev/null
+++ b/tools/src/cts/query/query_test.go
@@ -0,0 +1,855 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package query_test
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+
+	"dawn.googlesource.com/dawn/tools/src/cts/query"
+	"github.com/google/go-cmp/cmp"
+)
+
+var Q = query.Parse
+
+func TestTargetFormat(t *testing.T) {
+	type Test struct {
+		target query.Target
+		expect string
+	}
+
+	for _, test := range []Test{
+		{query.Suite, "suite"},
+		{query.Files, "files"},
+		{query.Tests, "tests"},
+		{query.Cases, "cases"},
+		{query.Target(-1), "<invalid>"},
+	} {
+		s := strings.Builder{}
+		_, err := fmt.Fprint(&s, test.target)
+		if err != nil {
+			t.Errorf("Fprint() returned %v", err)
+			continue
+		}
+		if diff := cmp.Diff(s.String(), test.expect); diff != "" {
+			t.Errorf("Fprint('%v')\n%v", test.target, diff)
+		}
+	}
+}
+
+func TestAppendFiles(t *testing.T) {
+	type Test struct {
+		base   query.Query
+		files  []string
+		expect query.Query
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), []string{}, Q("suite")},
+		{Q("suite"), []string{"x"}, Q("suite:x")},
+		{Q("suite"), []string{"x", "y"}, Q("suite:x,y")},
+		{Q("suite:a"), []string{}, Q("suite:a")},
+		{Q("suite:a"), []string{"x"}, Q("suite:a,x")},
+		{Q("suite:a"), []string{"x", "y"}, Q("suite:a,x,y")},
+		{Q("suite:a,b"), []string{}, Q("suite:a,b")},
+		{Q("suite:a,b"), []string{"x"}, Q("suite:a,b,x")},
+		{Q("suite:a,b"), []string{"x", "y"}, Q("suite:a,b,x,y")},
+		{Q("suite:a,b:c"), []string{}, Q("suite:a,b:c")},
+		{Q("suite:a,b:c"), []string{"x"}, Q("suite:a,b,x:c")},
+		{Q("suite:a,b:c"), []string{"x", "y"}, Q("suite:a,b,x,y:c")},
+		{Q("suite:a,b:c,d"), []string{}, Q("suite:a,b:c,d")},
+		{Q("suite:a,b:c,d"), []string{"x"}, Q("suite:a,b,x:c,d")},
+		{Q("suite:a,b:c,d"), []string{"x", "y"}, Q("suite:a,b,x,y:c,d")},
+		{Q("suite:a,b:c,d:e"), []string{}, Q("suite:a,b:c,d:e")},
+		{Q("suite:a,b:c,d:e"), []string{"x"}, Q("suite:a,b,x:c,d:e")},
+		{Q("suite:a,b:c,d:e"), []string{"x", "y"}, Q("suite:a,b,x,y:c,d:e")},
+		{Q("suite:a,b:c,d:e;f"), []string{}, Q("suite:a,b:c,d:e;f")},
+		{Q("suite:a,b:c,d:e;f"), []string{"x"}, Q("suite:a,b,x:c,d:e;f")},
+		{Q("suite:a,b:c,d:e;f"), []string{"x", "y"}, Q("suite:a,b,x,y:c,d:e;f")},
+	} {
+		got := test.base.AppendFiles(test.files...)
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.AppendFiles(%v)\n%v", test.base, test.files, diff)
+		}
+	}
+}
+
+func TestSplitFiles(t *testing.T) {
+	type Test struct {
+		query  query.Query
+		expect []string
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), nil},
+		{Q("suite:a"), []string{"a"}},
+		{Q("suite:a,b"), []string{"a", "b"}},
+		{Q("suite:a,b:c"), []string{"a", "b"}},
+		{Q("suite:a,b:c,d"), []string{"a", "b"}},
+		{Q("suite:a,b:c,d:e"), []string{"a", "b"}},
+		{Q("suite:a,b:c,d:e;f"), []string{"a", "b"}},
+	} {
+		got := test.query.SplitFiles()
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.SplitFiles()\n%v", test.query, diff)
+		}
+	}
+}
+
+func TestAppendTests(t *testing.T) {
+	type Test struct {
+		base   query.Query
+		files  []string
+		expect query.Query
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), []string{}, Q("suite")},
+		{Q("suite"), []string{"x"}, Q("suite::x")},
+		{Q("suite"), []string{"x", "y"}, Q("suite::x,y")},
+		{Q("suite:a"), []string{}, Q("suite:a")},
+		{Q("suite:a"), []string{"x"}, Q("suite:a:x")},
+		{Q("suite:a"), []string{"x", "y"}, Q("suite:a:x,y")},
+		{Q("suite:a,b"), []string{}, Q("suite:a,b")},
+		{Q("suite:a,b"), []string{"x"}, Q("suite:a,b:x")},
+		{Q("suite:a,b"), []string{"x", "y"}, Q("suite:a,b:x,y")},
+		{Q("suite:a,b:c"), []string{}, Q("suite:a,b:c")},
+		{Q("suite:a,b:c"), []string{"x"}, Q("suite:a,b:c,x")},
+		{Q("suite:a,b:c"), []string{"x", "y"}, Q("suite:a,b:c,x,y")},
+		{Q("suite:a,b:c,d"), []string{}, Q("suite:a,b:c,d")},
+		{Q("suite:a,b:c,d"), []string{"x"}, Q("suite:a,b:c,d,x")},
+		{Q("suite:a,b:c,d"), []string{"x", "y"}, Q("suite:a,b:c,d,x,y")},
+		{Q("suite:a,b:c,d:e"), []string{}, Q("suite:a,b:c,d:e")},
+		{Q("suite:a,b:c,d:e"), []string{"x"}, Q("suite:a,b:c,d,x:e")},
+		{Q("suite:a,b:c,d:e"), []string{"x", "y"}, Q("suite:a,b:c,d,x,y:e")},
+		{Q("suite:a,b:c,d:e;f"), []string{}, Q("suite:a,b:c,d:e;f")},
+		{Q("suite:a,b:c,d:e;f"), []string{"x"}, Q("suite:a,b:c,d,x:e;f")},
+		{Q("suite:a,b:c,d:e;f"), []string{"x", "y"}, Q("suite:a,b:c,d,x,y:e;f")},
+	} {
+		got := test.base.AppendTests(test.files...)
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.AppendTests(%v)\n%v", test.base, test.files, diff)
+		}
+	}
+}
+
+func TestSplitTests(t *testing.T) {
+	type Test struct {
+		query query.Query
+		tests []string
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), nil},
+		{Q("suite:a"), nil},
+		{Q("suite:a,b"), nil},
+		{Q("suite:a,b:c"), []string{"c"}},
+		{Q("suite:a,b:c,d"), []string{"c", "d"}},
+		{Q("suite:a,b:c,d:e"), []string{"c", "d"}},
+		{Q("suite:a,b:c,d:e;f"), []string{"c", "d"}},
+	} {
+		got := test.query.SplitTests()
+		if diff := cmp.Diff(got, test.tests); diff != "" {
+			t.Errorf("'%v'.SplitTests()\n%v", test.query, diff)
+		}
+	}
+}
+
+func TestAppendCases(t *testing.T) {
+	type Test struct {
+		base   query.Query
+		cases  []string
+		expect query.Query
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), []string{}, Q("suite")},
+		{Q("suite"), []string{"x"}, Q("suite:::x")},
+		{Q("suite"), []string{"x", "y"}, Q("suite:::x;y")},
+		{Q("suite:a"), []string{}, Q("suite:a")},
+		{Q("suite:a"), []string{"x"}, Q("suite:a::x")},
+		{Q("suite:a"), []string{"x", "y"}, Q("suite:a::x;y")},
+		{Q("suite:a,b"), []string{}, Q("suite:a,b")},
+		{Q("suite:a,b"), []string{"x"}, Q("suite:a,b::x")},
+		{Q("suite:a,b"), []string{"x", "y"}, Q("suite:a,b::x;y")},
+		{Q("suite:a,b:c"), []string{}, Q("suite:a,b:c")},
+		{Q("suite:a,b:c"), []string{"x"}, Q("suite:a,b:c:x")},
+		{Q("suite:a,b:c"), []string{"x", "y"}, Q("suite:a,b:c:x;y")},
+		{Q("suite:a,b:c,d"), []string{}, Q("suite:a,b:c,d")},
+		{Q("suite:a,b:c,d"), []string{"x"}, Q("suite:a,b:c,d:x")},
+		{Q("suite:a,b:c,d"), []string{"x", "y"}, Q("suite:a,b:c,d:x;y")},
+		{Q("suite:a,b:c,d:e"), []string{}, Q("suite:a,b:c,d:e")},
+		{Q("suite:a,b:c,d:e"), []string{"x"}, Q("suite:a,b:c,d:e;x")},
+		{Q("suite:a,b:c,d:e"), []string{"x", "y"}, Q("suite:a,b:c,d:e;x;y")},
+		{Q("suite:a,b:c,d:e;f"), []string{}, Q("suite:a,b:c,d:e;f")},
+		{Q("suite:a,b:c,d:e;f"), []string{"x"}, Q("suite:a,b:c,d:e;f;x")},
+		{Q("suite:a,b:c,d:e;f"), []string{"x", "y"}, Q("suite:a,b:c,d:e;f;x;y")},
+	} {
+		got := test.base.AppendCases(test.cases...)
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.AppendCases(%v)\n%v", test.base, test.cases, diff)
+		}
+	}
+}
+
+func TestAppend(t *testing.T) {
+	type Subtest struct {
+		target query.Target
+		expect query.Query
+	}
+	type Test struct {
+		base    query.Query
+		strings []string
+		subtest []Subtest
+	}
+	for _, test := range []Test{
+		{
+			Q("suite"), []string{}, []Subtest{
+				{query.Files, Q("suite")},
+				{query.Tests, Q("suite")},
+				{query.Cases, Q("suite")},
+			},
+		}, {
+			Q("suite"), []string{"x"}, []Subtest{
+				{query.Files, Q("suite:x")},
+				{query.Tests, Q("suite::x")},
+				{query.Cases, Q("suite:::x")},
+			},
+		}, {
+			Q("suite"), []string{"x", "y"}, []Subtest{
+				{query.Files, Q("suite:x,y")},
+				{query.Tests, Q("suite::x,y")},
+				{query.Cases, Q("suite:::x;y")},
+			},
+		}, {
+			Q("suite:a"), []string{}, []Subtest{
+				{query.Files, Q("suite:a")},
+				{query.Tests, Q("suite:a")},
+				{query.Cases, Q("suite:a")},
+			},
+		}, {
+			Q("suite:a"), []string{"x"}, []Subtest{
+				{query.Files, Q("suite:a,x")},
+				{query.Tests, Q("suite:a:x")},
+				{query.Cases, Q("suite:a::x")},
+			},
+		}, {
+			Q("suite:a"), []string{"x", "y"}, []Subtest{
+				{query.Files, Q("suite:a,x,y")},
+				{query.Tests, Q("suite:a:x,y")},
+				{query.Cases, Q("suite:a::x;y")},
+			},
+		}, {
+			Q("suite:a,b"), []string{}, []Subtest{
+				{query.Files, Q("suite:a,b")},
+				{query.Tests, Q("suite:a,b")},
+				{query.Cases, Q("suite:a,b")},
+			},
+		}, {
+			Q("suite:a,b"), []string{"x"}, []Subtest{
+				{query.Files, Q("suite:a,b,x")},
+				{query.Tests, Q("suite:a,b:x")},
+				{query.Cases, Q("suite:a,b::x")},
+			},
+		}, {
+			Q("suite:a,b"), []string{"x", "y"}, []Subtest{
+				{query.Files, Q("suite:a,b,x,y")},
+				{query.Tests, Q("suite:a,b:x,y")},
+				{query.Cases, Q("suite:a,b::x;y")},
+			},
+		}, {
+			Q("suite:a,b:c"), []string{}, []Subtest{
+				{query.Files, Q("suite:a,b:c")},
+				{query.Tests, Q("suite:a,b:c")},
+				{query.Cases, Q("suite:a,b:c")},
+			},
+		}, {
+			Q("suite:a,b:c"), []string{"x"}, []Subtest{
+				{query.Files, Q("suite:a,b,x:c")},
+				{query.Tests, Q("suite:a,b:c,x")},
+				{query.Cases, Q("suite:a,b:c:x")},
+			},
+		}, {
+			Q("suite:a,b:c"), []string{"x", "y"}, []Subtest{
+				{query.Files, Q("suite:a,b,x,y:c")},
+				{query.Tests, Q("suite:a,b:c,x,y")},
+				{query.Cases, Q("suite:a,b:c:x;y")},
+			},
+		}, {
+			Q("suite:a,b:c,d"), []string{}, []Subtest{
+				{query.Files, Q("suite:a,b:c,d")},
+				{query.Tests, Q("suite:a,b:c,d")},
+				{query.Cases, Q("suite:a,b:c,d")},
+			},
+		}, {
+			Q("suite:a,b:c,d"), []string{"x"}, []Subtest{
+				{query.Files, Q("suite:a,b,x:c,d")},
+				{query.Tests, Q("suite:a,b:c,d,x")},
+				{query.Cases, Q("suite:a,b:c,d:x")},
+			},
+		}, {
+			Q("suite:a,b:c,d"), []string{"x", "y"}, []Subtest{
+				{query.Files, Q("suite:a,b,x,y:c,d")},
+				{query.Tests, Q("suite:a,b:c,d,x,y")},
+				{query.Cases, Q("suite:a,b:c,d:x;y")},
+			},
+		}, {
+			Q("suite:a,b:c,d:e"), []string{}, []Subtest{
+				{query.Files, Q("suite:a,b:c,d:e")},
+				{query.Tests, Q("suite:a,b:c,d:e")},
+				{query.Cases, Q("suite:a,b:c,d:e")},
+			},
+		}, {
+			Q("suite:a,b:c,d:e"), []string{"x"}, []Subtest{
+				{query.Files, Q("suite:a,b,x:c,d:e")},
+				{query.Tests, Q("suite:a,b:c,d,x:e")},
+				{query.Cases, Q("suite:a,b:c,d:e;x")},
+			},
+		}, {
+			Q("suite:a,b:c,d:e"), []string{"x", "y"}, []Subtest{
+				{query.Files, Q("suite:a,b,x,y:c,d:e")},
+				{query.Tests, Q("suite:a,b:c,d,x,y:e")},
+				{query.Cases, Q("suite:a,b:c,d:e;x;y")},
+			},
+		}, {
+			Q("suite:a,b:c,d:e;f"), []string{}, []Subtest{
+				{query.Files, Q("suite:a,b:c,d:e;f")},
+				{query.Tests, Q("suite:a,b:c,d:e;f")},
+				{query.Cases, Q("suite:a,b:c,d:e;f")},
+			},
+		}, {
+			Q("suite:a,b:c,d:e;f"), []string{"x"}, []Subtest{
+				{query.Files, Q("suite:a,b,x:c,d:e;f")},
+				{query.Tests, Q("suite:a,b:c,d,x:e;f")},
+				{query.Cases, Q("suite:a,b:c,d:e;f;x")},
+			},
+		}, {
+			Q("suite:a,b:c,d:e;f"), []string{"x", "y"}, []Subtest{
+				{query.Files, Q("suite:a,b,x,y:c,d:e;f")},
+				{query.Tests, Q("suite:a,b:c,d,x,y:e;f")},
+				{query.Cases, Q("suite:a,b:c,d:e;f;x;y")},
+			},
+		},
+	} {
+		for _, subtest := range test.subtest {
+			got := test.base.Append(subtest.target, test.strings...)
+			if diff := cmp.Diff(got, subtest.expect); diff != "" {
+				t.Errorf("'%v'.Append(%v, %v)\n%v", test.base, subtest.target, test.base.Files, diff)
+			}
+		}
+	}
+}
+
+func TestSplitCases(t *testing.T) {
+	type Test struct {
+		query  query.Query
+		expect []string
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), nil},
+		{Q("suite:a"), nil},
+		{Q("suite:a,b"), nil},
+		{Q("suite:a,b:c"), nil},
+		{Q("suite:a,b:c,d"), nil},
+		{Q("suite:a,b:c,d:e"), []string{"e"}},
+		{Q("suite:a,b:c,d:e;f"), []string{"e", "f"}},
+	} {
+		got := test.query.SplitCases()
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.SplitCases()\n%v", test.query, diff)
+		}
+	}
+}
+
+func TestCaseParameters(t *testing.T) {
+	type Test struct {
+		query  query.Query
+		expect query.CaseParameters
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), nil},
+		{Q("suite:a"), nil},
+		{Q("suite:a,b"), nil},
+		{Q("suite:a,b:c"), nil},
+		{Q("suite:a,b:c,d"), nil},
+		{Q("suite:a,b:c,d:e"), query.CaseParameters{"e": ""}},
+		{Q("suite:a,b:c,d:e;f"), query.CaseParameters{"e": "", "f": ""}},
+		{Q("suite:a,b:c,d:e=f;g=h"), query.CaseParameters{"e": "f", "g": "h"}},
+	} {
+		got := test.query.CaseParameters()
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.CaseParameters()\n%v", test.query, diff)
+		}
+	}
+}
+
+func TestTarget(t *testing.T) {
+	type Test struct {
+		query  query.Query
+		expect query.Target
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), query.Suite},
+		{Q("suite:*"), query.Files},
+		{Q("suite:a"), query.Files},
+		{Q("suite:a,*"), query.Files},
+		{Q("suite:a,b"), query.Files},
+		{Q("suite:a,b:*"), query.Tests},
+		{Q("suite:a,b:c"), query.Tests},
+		{Q("suite:a,b:c,*"), query.Tests},
+		{Q("suite:a,b:c,d"), query.Tests},
+		{Q("suite:a,b:c,d:*"), query.Cases},
+		{Q("suite:a,b:c,d:e"), query.Cases},
+		{Q("suite:a,b:c,d:e;*"), query.Cases},
+		{Q("suite:a,b:c,d:e;f"), query.Cases},
+		{Q("suite:a,b:c,d:e;f;*"), query.Cases},
+	} {
+		got := test.query.Target()
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.Target()\n%v", test.query, diff)
+		}
+	}
+}
+
+func TestIsWildcard(t *testing.T) {
+	type Test struct {
+		query  query.Query
+		expect bool
+	}
+
+	for _, test := range []Test{
+		{Q("suite"), false},
+		{Q("suite:*"), true},
+		{Q("suite:a"), false},
+		{Q("suite:a,*"), true},
+		{Q("suite:a,b"), false},
+		{Q("suite:a,b:*"), true},
+		{Q("suite:a,b:c"), false},
+		{Q("suite:a,b:c,*"), true},
+		{Q("suite:a,b:c,d"), false},
+		{Q("suite:a,b:c,d:*"), true},
+		{Q("suite:a,b:c,d:e"), false},
+		{Q("suite:a,b:c,d:e;*"), true},
+		{Q("suite:a,b:c,d:e;f"), false},
+		{Q("suite:a,b:c,d:e;f;*"), true},
+	} {
+		got := test.query.IsWildcard()
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.IsWildcard()\n%v", test.query, diff)
+		}
+	}
+}
+
+func TestParsePrint(t *testing.T) {
+	type Test struct {
+		in     string
+		expect query.Query
+	}
+
+	for _, test := range []Test{
+		{
+			"a",
+			query.Query{
+				Suite: "a",
+			},
+		}, {
+			"a:*",
+			query.Query{
+				Suite: "a",
+				Files: "*",
+			},
+		}, {
+			"a:b",
+			query.Query{
+				Suite: "a",
+				Files: "b",
+			},
+		}, {
+			"a:b,*",
+			query.Query{
+				Suite: "a",
+				Files: "b,*",
+			},
+		}, {
+			"a:b:*",
+			query.Query{
+				Suite: "a",
+				Files: "b",
+				Tests: "*",
+			},
+		}, {
+			"a:b,c",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+			},
+		}, {
+			"a:b,c:*",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "*",
+			},
+		}, {
+			"a:b,c:d",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d",
+			},
+		}, {
+			"a:b,c:d,*",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,*",
+			},
+		}, {
+			"a:b,c:d,e",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e",
+			},
+		}, {
+			"a:b,c:d,e,*",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e,*",
+			},
+		}, {
+			"a:b,c:d,e:*",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e",
+				Cases: "*",
+			},
+		}, {
+			"a:b,c:d,e:f=g",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e",
+				Cases: "f=g",
+			},
+		}, {
+			"a:b,c:d,e:f=g;*",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e",
+				Cases: "f=g;*",
+			},
+		}, {
+			"a:b,c:d,e:f=g;h=i",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e",
+				Cases: "f=g;h=i",
+			},
+		}, {
+			"a:b,c:d,e:f=g;h=i;*",
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e",
+				Cases: "f=g;h=i;*",
+			},
+		}, {
+			`a:b,c:d,e:f={"x": 1, "y": 2}`,
+			query.Query{
+				Suite: "a",
+				Files: "b,c",
+				Tests: "d,e",
+				Cases: `f={"x": 1, "y": 2}`,
+			},
+		},
+	} {
+		parsed := query.Parse(test.in)
+		if diff := cmp.Diff(test.expect, parsed); diff != "" {
+			t.Errorf("query.Parse('%v')\n%v", test.in, diff)
+		}
+		str := test.expect.String()
+		if diff := cmp.Diff(test.in, str); diff != "" {
+			t.Errorf("query.String('%v')\n%v", test.in, diff)
+		}
+	}
+}
+
+func TestCompare(t *testing.T) {
+	type Test struct {
+		a, b   query.Query
+		expect int
+	}
+
+	for _, test := range []Test{
+		{Q("a"), Q("a"), 0},
+		{Q("a:*"), Q("a"), 1},
+		{Q("a:*"), Q("a:*"), 0},
+		{Q("a:*"), Q("b:*"), -1},
+		{Q("a:*"), Q("a:b,*"), -1},
+		{Q("a:b,*"), Q("a:b"), 1},
+		{Q("a:b,*"), Q("a:b,*"), 0},
+		{Q("a:b,*"), Q("a:c,*"), -1},
+		{Q("a:b,c,*"), Q("a:b,*"), 1},
+		{Q("a:b,c,*"), Q("a:b,c,*"), 0},
+		{Q("a:b,c,d,*"), Q("a:b,c,*"), 1},
+		{Q("a:b,c,*"), Q("a:b,c:d,*"), 1},
+		{Q("a:b,c:*"), Q("a:b,c,d,*"), -1},
+		{Q("a:b,c:d,*"), Q("a:b,c:d,*"), 0},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:d,*"), 1},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:d,e,*"), 0},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:e,f,*"), -1},
+		{Q("a:b:c:d;*"), Q("a:b:c:d;*"), 0},
+		{Q("a:b:c:d;e=1;*"), Q("a:b:c:d;*"), 1},
+		{Q("a:b:c:d;e=2;*"), Q("a:b:c:d;e=1;*"), 1},
+		{Q("a:b:c:d;e=1;f=2;*"), Q("a:b:c:d;*"), 1},
+	} {
+		if got, expect := test.a.Compare(test.b), test.expect; got != expect {
+			t.Errorf("('%v').Compare('%v')\nexpect: %+v\ngot:    %+v", test.a, test.b, expect, got)
+		}
+		// Check opposite order
+		if got, expect := test.b.Compare(test.a), -test.expect; got != expect {
+			t.Errorf("('%v').Compare('%v')\nexpect: %+v\ngot:    %+v", test.b, test.a, expect, got)
+		}
+	}
+}
+
+func TestContains(t *testing.T) {
+	type Test struct {
+		a, b   query.Query
+		expect bool
+	}
+
+	for _, test := range []Test{
+		{Q("a"), Q("a"), true},
+		{Q("a"), Q("b"), false},
+		{Q("a:*"), Q("a:*"), true},
+		{Q("a:*"), Q("a:b"), true},
+		{Q("a:*"), Q("b"), false},
+		{Q("a:*"), Q("b:c"), false},
+		{Q("a:*"), Q("b:*"), false},
+		{Q("a:*"), Q("a:b,*"), true},
+		{Q("a:b,*"), Q("a:*"), false},
+		{Q("a:b,*"), Q("a:b"), true},
+		{Q("a:b,*"), Q("a:c"), false},
+		{Q("a:b,*"), Q("a:b,*"), true},
+		{Q("a:b,*"), Q("a:c,*"), false},
+		{Q("a:b,c"), Q("a:b,c,d"), false},
+		{Q("a:b,c"), Q("a:b,c:d"), false},
+		{Q("a:b,c,*"), Q("a:b,*"), false},
+		{Q("a:b,c,*"), Q("a:b,c"), true},
+		{Q("a:b,c,*"), Q("a:b,d"), false},
+		{Q("a:b,c,*"), Q("a:b,c,*"), true},
+		{Q("a:b,c,*"), Q("a:b,c,d,*"), true},
+		{Q("a:b,c,*"), Q("a:b,c:d,*"), true},
+		{Q("a:b,c:*"), Q("a:b,c,d,*"), false},
+		{Q("a:b,c:d"), Q("a:b,c:d,e"), false},
+		{Q("a:b,c:d,*"), Q("a:b,c:d"), true},
+		{Q("a:b,c:d,*"), Q("a:b,c:e"), false},
+		{Q("a:b,c:d,*"), Q("a:b,c:d,*"), true},
+		{Q("a:b,c:d,*"), Q("a:b,c:d,e,*"), true},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:d,e"), true},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:e,e"), false},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:d,f"), false},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:d,e,*"), true},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:e,f,*"), false},
+		{Q("a:b,c:d,e,*"), Q("a:b,c:d,*"), false},
+		{Q("a:b:c:d;*"), Q("a:b:c:d;*"), true},
+		{Q("a:b:c:d;*"), Q("a:b:c:d,e;*"), true},
+		{Q("a:b:c:d;*"), Q("a:b:c:d;e=1;*"), true},
+		{Q("a:b:c:d;*"), Q("a:b:c:d;e=1;*"), true},
+		{Q("a:b:c:d;*"), Q("a:b:c:d;e=1;f=2;*"), true},
+		{Q("a:b:c:d;e=1;*"), Q("a:b:c:d;*"), true},
+		{Q("a:b:c:d;e=1;f=2;*"), Q("a:b:c:d;*"), true},
+		{Q("a:b:c:d;e=1;*"), Q("a:b:c:d;e=2;*"), false},
+		{Q("a:b:c:d;e=2;*"), Q("a:b:c:d;e=1;*"), false},
+		{Q("a:b:c:d;e;*"), Q("a:b:c:d;e=1;*"), false},
+	} {
+		if got := test.a.Contains(test.b); got != test.expect {
+			t.Errorf("('%v').Contains('%v')\nexpect: %+v\ngot:    %+v", test.a, test.b, test.expect, got)
+		}
+	}
+}
+
+func TestWalk(t *testing.T) {
+	type Segment struct {
+		Query  query.Query
+		Target query.Target
+		Name   string
+	}
+	type Test struct {
+		query  query.Query
+		expect []Segment
+	}
+
+	for _, test := range []Test{
+		{
+			Q("suite"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+			}},
+		{
+			Q("suite:*"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:*"), query.Files, "*"},
+			}},
+		{
+			Q("suite:a"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+			}},
+		{
+			Q("suite:a,*"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,*"), query.Files, "*"},
+			}},
+		{
+			Q("suite:a,b"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+			}},
+		{
+			Q("suite:a,b:*"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:*"), query.Tests, "*"},
+			}},
+		{
+			Q("suite:a,b:c"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+			}},
+		{
+			Q("suite:a,b:c,*"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+				{Q("suite:a,b:c,*"), query.Tests, "*"},
+			}},
+		{
+			Q("suite:a,b:c,d"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+				{Q("suite:a,b:c,d"), query.Tests, "d"},
+			}},
+		{
+			Q("suite:a,b:c,d:*"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+				{Q("suite:a,b:c,d"), query.Tests, "d"},
+				{Q("suite:a,b:c,d:*"), query.Cases, "*"},
+			}},
+		{
+			Q("suite:a,b:c,d:e"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+				{Q("suite:a,b:c,d"), query.Tests, "d"},
+				{Q("suite:a,b:c,d:e"), query.Cases, "e"},
+			}},
+		{
+			Q("suite:a,b:c,d:e;*"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+				{Q("suite:a,b:c,d"), query.Tests, "d"},
+				{Q("suite:a,b:c,d:e;*"), query.Cases, "e;*"},
+			}},
+		{
+			Q("suite:a,b:c,d:e;f"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+				{Q("suite:a,b:c,d"), query.Tests, "d"},
+				{Q("suite:a,b:c,d:e;f"), query.Cases, "e;f"},
+			}},
+		{
+			Q("suite:a,b:c,d:e;f;*"), []Segment{
+				{Q("suite"), query.Suite, "suite"},
+				{Q("suite:a"), query.Files, "a"},
+				{Q("suite:a,b"), query.Files, "b"},
+				{Q("suite:a,b:c"), query.Tests, "c"},
+				{Q("suite:a,b:c,d"), query.Tests, "d"},
+				{Q("suite:a,b:c,d:e;f;*"), query.Cases, "e;f;*"},
+			}},
+	} {
+		got := []Segment{}
+		err := test.query.Walk(func(q query.Query, t query.Target, n string) error {
+			got = append(got, Segment{q, t, n})
+			return nil
+		})
+		if err != nil {
+			t.Errorf("'%v'.Walk() returned %v", test.query, err)
+			continue
+		}
+		if diff := cmp.Diff(got, test.expect); diff != "" {
+			t.Errorf("'%v'.Walk()\n%v", test.query, diff)
+		}
+	}
+}
+
+type TestError struct{}
+
+func (TestError) Error() string { return "test error" }
+
+func TestWalkErrors(t *testing.T) {
+	for _, fq := range []query.Query{
+		Q("suite"),
+		Q("suite:*"),
+		Q("suite:a"),
+		Q("suite:a,*"),
+		Q("suite:a,b"),
+		Q("suite:a,b:*"),
+		Q("suite:a,b:c"),
+		Q("suite:a,b:c,*"),
+		Q("suite:a,b:c,d"),
+		Q("suite:a,b:c,d:*"),
+		Q("suite:a,b:c,d:e"),
+		Q("suite:a,b:c,d:e;*"),
+		Q("suite:a,b:c,d:e;f"),
+		Q("suite:a,b:c,d:e;f;*"),
+	} {
+		expect := TestError{}
+		got := fq.Walk(func(q query.Query, t query.Target, n string) error {
+			if q == fq {
+				return expect
+			}
+			return nil
+		})
+		if diff := cmp.Diff(got, expect); diff != "" {
+			t.Errorf("'%v'.Walk()\n%v", fq, diff)
+		}
+	}
+}
diff --git a/tools/src/go.mod b/tools/src/go.mod
index cbb7157..c55a6ee 100644
--- a/tools/src/go.mod
+++ b/tools/src/go.mod
@@ -1,14 +1,7 @@
-module dawn.googlesource.com/tint/tools/src
+module dawn.googlesource.com/dawn/tools/src
 
-go 1.16
+go 1.18
 
-require (
-	github.com/andygrunwald/go-gerrit v0.0.0-20210709065208-9d38b0be0268
-	github.com/fatih/color v1.10.0
-	github.com/go-git/go-git/v5 v5.4.2
-	github.com/sergi/go-diff v1.2.0
-	github.com/shirou/gopsutil v3.21.11+incompatible
-	github.com/tklauser/go-sysconf v0.3.9 // indirect
-	github.com/yusufpapurcu/wmi v1.2.2 // indirect
-	golang.org/x/net v0.0.0-20210614182718-04defd469f4e
-)
+require github.com/google/go-cmp v0.5.6
+
+require golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
diff --git a/tools/src/go.sum b/tools/src/go.sum
index 9929b0c..58b75a4 100644
--- a/tools/src/go.sum
+++ b/tools/src/go.sum
@@ -1,130 +1,5 @@
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
-github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
-github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
-github.com/andygrunwald/go-gerrit v0.0.0-20210709065208-9d38b0be0268 h1:7gokoTWteZhP1t2f0OzrFFXlyL8o0+b0r4ZaRV9PXOs=
-github.com/andygrunwald/go-gerrit v0.0.0-20210709065208-9d38b0be0268/go.mod h1:aqcjwEnmLLSalFNYR0p2ttnEXOVVRctIzsUMHbEcruU=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
-github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
-github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
-github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
-github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
-github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
-github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
-github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
-github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
-github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
-github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
-github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
-github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
-github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
-github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
-github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
-github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
-github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
-github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
-github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo=
-github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
-github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
-github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
-github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI=
-github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
-github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
-github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 h1:ikCpsnYR+Ew0vu99XlDp55lGgDJdIMx3f4a18jfse/s=
-golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/webgpu-cts/BUILD.gn b/webgpu-cts/BUILD.gn
new file mode 100644
index 0000000..a0ecb67
--- /dev/null
+++ b/webgpu-cts/BUILD.gn
@@ -0,0 +1,44 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Note: This file is intentionally not used by any other BUILD.gn in Dawn.
+# Instead, Chromium depends directly on this file to build the WebGPU CTS.
+# Scripts called from this file assume Dawn is checked out inside Chromium.
+
+# This needs to be copied to the output directory since the CTS tests also
+# serve resources that are copied into it.
+copy("webgpu_cts_resources") {
+  testonly = true
+  sources = [
+    "test_page.html",
+    "test_runner.js",
+  ]
+  outputs = [ "$target_gen_dir/{{source_file_part}}" ]
+}
+
+group("webgpu-cts") {
+  testonly = true
+  data_deps = [
+    ":webgpu_cts_resources",
+    "../third_party/gn/webgpu-cts",
+  ]
+  data = [
+    "//third_party/node/",
+    "./scripts",
+    "./expectations.txt",
+    "../third_party/webgpu-cts/src",
+    "../third_party/webgpu-cts/node.tsconfig.json",
+    "../third_party/webgpu-cts/tsconfig.json",
+  ]
+}
diff --git a/webgpu-cts/README.md b/webgpu-cts/README.md
new file mode 100644
index 0000000..cc4d86d
--- /dev/null
+++ b/webgpu-cts/README.md
@@ -0,0 +1,20 @@
+# Running the WebGPU CTS Locally with Chrome
+
+Running the WebGPU CTS locally with Chrome requires a Chromium checkout.
+
+Follow [these instructions](https://www.chromium.org/developers/how-tos/get-the-code/) for checking out
+and building Chrome. You'll also need to build the `telemetry_gpu_integration_test` target.
+
+At the root of a Chromium checkout, run:
+`./content/test/gpu/run_gpu_integration_test.py webgpu_cts --browser=exact --browser-executable=path/to/your/chrome-executable`
+
+If you don't want to build Chrome, you can still run the CTS, by passing the path to an existing Chrome executable to the `--browser-executable` argument. You should still build the `telemetry_gpu_integration_test` target to support all harness
+functionality.
+
+Useful command-line arguments:
+ - `-l`: List all tests that would be run.
+ - `--test-filter`: Filter tests. Run `--help` for more information.
+ - `--help`: See more options.
+ - `--passthrough --show-stdout`: Show browser output. See also `--browser-logging-verbosity`.
+ - `--extra-browser-args`: Pass extra args to the browser executable.
+ - `--is-backend-validation`: Enable backend validation. TODO: rename this to `--backend-validation`.
diff --git a/webgpu-cts/expectations.txt b/webgpu-cts/expectations.txt
new file mode 100644
index 0000000..9beec38
--- /dev/null
+++ b/webgpu-cts/expectations.txt
@@ -0,0 +1,79 @@
+# THIS FILE IS AUTOGENERATED. DO NOT MANUALLY EDIT.
+# SEE //content/test/gpu/process_generated_webgpu_expectations.py
+# BEGIN TAG HEADER (autogenerated, see validate_tag_consistency.py)
+# OS
+# tags: [ android android-lollipop android-marshmallow android-nougat
+#             android-pie android-r android-s
+#         chromeos
+#         fuchsia
+#         linux ubuntu
+#         mac bigsur catalina lion highsierra mac-10.12 mojave monterey
+#             mountainlion sierra
+#         win win7 win8 win10 ]
+# Devices
+# tags: [ android-nexus-5 android-nexus-5x android-pixel-2 android-pixel-4
+#             android-pixel-6 android-shield-android-tv
+#         chromeos-board-amd64-generic chromeos-board-kevin chromeos-board-eve
+#         fuchsia-board-astro fuchsia-board-sherlock fuchsia-board-qemu-x64 ]
+# Platform
+# tags: [ desktop
+#         mobile ]
+# Browser
+# tags: [ android-chromium android-webview-instrumentation
+#         debug debug-x64
+#         release release-x64
+#         fuchsia-chrome web-engine-shell ]
+# GPU
+# tags: [ amd amd-0x6613 amd-0x679e amd-0x6821 amd-0x7340
+#         apple apple-apple-m1 apple-angle-metal-renderer:-apple-m1
+#         arm
+#         google google-0xffff
+#         intel intel-0xa2e intel-0xd26 intel-0xa011 intel-0x3e92 intel-0x3e9b
+#               intel-0x5912
+#         nvidia nvidia-0xfe9 nvidia-0x1cb3 nvidia-0x2184
+#         qualcomm ]
+# Decoder
+# tags: [ passthrough no-passthrough ]
+# ANGLE Backend
+# tags: [ angle-disabled
+#         angle-d3d9 angle-d3d11
+#         angle-metal
+#         angle-opengl angle-opengles
+#         angle-swiftshader
+#         angle-vulkan ]
+# Skia Renderer
+# tags: [ skia-renderer-dawn
+#         skia-renderer-disabled
+#         skia-renderer-gl
+#         skia-renderer-vulkan ]
+# SwiftShader
+# tags: [ swiftshader-gl no-swiftshader-gl ]
+# Driver
+# tags: [ mesa_lt_19.1 ]
+# ASan
+# tags: [ asan no-asan ]
+# Display Server
+# tags: [ display-server-wayland display-server-x ]
+# OOP-Canvas
+# tags: [ oop-c no-oop-c ]
+# WebGPU Backend Validation
+# tags: [ dawn-backend-validation dawn-no-backend-validation ]
+# results: [ Failure RetryOnFailure Skip Slow ]
+# END TAG HEADER
+
+# This will be slowly removed over time as more tests are moved from the web
+# test harness to the Telemetry harness.
+webgpu:api,validation,* [ Skip ]
+webgpu:api,operation,* [ Skip ]
+worker_webgpu:api,validation,* [ Skip ]
+worker_webgpu:api,operation,* [ Skip ]
+webgpu:shader,* [ Skip ]
+webgpu:util,* [ Skip ]
+webgpu:web_platform,* [ Skip ]
+
+# Not skipped are:
+# webgpu:examples:*
+# webgpu:idl,*
+
+# BEGIN AUTOGENERATED EXPECTATIONS
+
diff --git a/webgpu-cts/scripts/add_query_to_expectation_file.py b/webgpu-cts/scripts/add_query_to_expectation_file.py
new file mode 100755
index 0000000..3597369
--- /dev/null
+++ b/webgpu-cts/scripts/add_query_to_expectation_file.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python3
+#
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Script for easily adding expectations to expectations.txt
+
+Converts one or more WebGPU CTS queries into one or more individual expectations
+and appends them to the end of the file.
+"""
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+
+import dir_paths
+
+LIST_SCRIPT_PATH = os.path.join(dir_paths.webgpu_cts_scripts_dir, 'list.py')
+TRANSPILE_DIR = os.path.join(dir_paths.dawn_dir, '.node_transpile_work_dir')
+EXPECTATION_FILE_PATH = os.path.join(dir_paths.dawn_dir, 'webgpu-cts',
+                                     'expectations.txt')
+
+
+def expand_query(query):
+    cmd = [
+        sys.executable,
+        LIST_SCRIPT_PATH,
+        '--js-out-dir',
+        TRANSPILE_DIR,
+        '--query',
+        query,
+    ]
+    p = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
+    return p.stdout.decode('utf-8').splitlines()
+
+
+def generate_expectations(queries, tags, results, bug):
+    tags = '[ %s ] ' % ' '.join(tags) if tags else ''
+    results = ' [ %s ]' % ' '.join(results)
+    bug = bug + ' ' if bug else ''
+    content = ''
+    for q in queries:
+        test_names = expand_query(q)
+        if not test_names:
+            logging.warning('Did not get any test names for query %s', q)
+        for tn in test_names:
+            content += '{bug}{tags}{test}{results}\n'.format(bug=bug,
+                                                             tags=tags,
+                                                             test=tn,
+                                                             results=results)
+    with open(EXPECTATION_FILE_PATH, 'a') as outfile:
+        outfile.write(content)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description=('Converts one or more WebGPU CTS queries into one or '
+                     'more individual expectations and appends them to the '
+                     'end of expectations.txt'))
+    parser.add_argument('-b',
+                        '--bug',
+                        help='The bug link to associate with the expectations')
+    parser.add_argument('-t',
+                        '--tag',
+                        action='append',
+                        default=[],
+                        dest='tags',
+                        help=('A tag to restrict the expectation to. Can be '
+                              'specified multiple times.'))
+    parser.add_argument('-r',
+                        '--result',
+                        action='append',
+                        default=[],
+                        dest='results',
+                        required=True,
+                        help=('An expected result for the expectation. Can be '
+                              'specified multiple times, although a single '
+                              'result is the most common usage.'))
+    parser.add_argument('-q',
+                        '--query',
+                        action='append',
+                        default=[],
+                        dest='queries',
+                        help=('A CTS query to expand into expectations. Can '
+                              'be specified multiple times.'))
+    args = parser.parse_args()
+    generate_expectations(args.queries, args.tags, args.results, args.bug)
diff --git a/webgpu-cts/scripts/compile_src.py b/webgpu-cts/scripts/compile_src.py
new file mode 100755
index 0000000..b08c0a7
--- /dev/null
+++ b/webgpu-cts/scripts/compile_src.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+#
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import sys
+
+from dir_paths import webgpu_cts_root_dir, node_dir
+from tsc_ignore_errors import run_tsc_ignore_errors
+
+try:
+    old_sys_path = sys.path
+    sys.path = [node_dir] + sys.path
+
+    from node import RunNode
+finally:
+    sys.path = old_sys_path
+
+
+def compile_src(out_dir):
+    # First, clean the output directory so deleted files are pruned from old builds.
+    shutil.rmtree(out_dir)
+
+    run_tsc_ignore_errors([
+        '--project',
+        os.path.join(webgpu_cts_root_dir, 'tsconfig.json'),
+        '--outDir',
+        out_dir,
+        '--noEmit',
+        'false',
+        '--noEmitOnError',
+        'false',
+        '--declaration',
+        'false',
+        '--sourceMap',
+        'false',
+        '--target',
+        'ES2017',
+    ])
+
+
+def compile_src_for_node(out_dir, additional_args=None, clean=True):
+    additional_args = additional_args or []
+    if clean:
+        # First, clean the output directory so deleted files are pruned from old builds.
+        shutil.rmtree(out_dir)
+
+    args = [
+        '--project',
+        os.path.join(webgpu_cts_root_dir, 'node.tsconfig.json'),
+        '--outDir',
+        out_dir,
+        '--noEmit',
+        'false',
+        '--noEmitOnError',
+        'false',
+        '--declaration',
+        'false',
+        '--sourceMap',
+        'false',
+        '--target',
+        'ES6',
+    ]
+    args.extend(additional_args)
+
+    run_tsc_ignore_errors(args)
+
+
+if __name__ == '__main__':
+    if len(sys.argv) != 2:
+        print('Usage: compile_src.py GEN_DIR')
+        sys.exit(1)
+
+    gen_dir = sys.argv[1]
+
+    # Compile the CTS src.
+    compile_src(os.path.join(gen_dir, 'src'))
+    compile_src_for_node(os.path.join(gen_dir, 'src-node'))
+
+    # Run gen_listings.js to overwrite the dummy src/webgpu/listings.js created
+    # from transpiling src/
+    RunNode([
+        os.path.join(gen_dir, 'src-node', 'common', 'tools',
+                     'gen_listings.js'),
+        '--no-validate',
+        os.path.join(gen_dir, 'src'),
+        os.path.join(gen_dir, 'src-node', 'webgpu'),
+    ])
diff --git a/webgpu-cts/scripts/dir_paths.py b/webgpu-cts/scripts/dir_paths.py
new file mode 100644
index 0000000..8e2b37e
--- /dev/null
+++ b/webgpu-cts/scripts/dir_paths.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+#
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+webgpu_cts_scripts_dir = os.path.dirname(os.path.abspath(__file__))
+dawn_dir = os.path.dirname(os.path.dirname(webgpu_cts_scripts_dir))
+dawn_third_party_dir = os.path.join(dawn_dir, 'third_party')
+gn_webgpu_cts_dir = os.path.join(dawn_third_party_dir, 'gn', 'webgpu-cts')
+webgpu_cts_root_dir = os.path.join(dawn_third_party_dir, 'webgpu-cts')
+chromium_third_party_dir = None
+node_dir = None
+
+_possible_chromium_third_party_dir = os.path.dirname(
+    os.path.dirname(dawn_third_party_dir))
+_possible_node_dir = os.path.join(_possible_chromium_third_party_dir, 'node')
+if os.path.exists(_possible_node_dir):
+    chromium_third_party_dir = _possible_chromium_third_party_dir
+    node_dir = _possible_node_dir
diff --git a/webgpu-cts/scripts/gen_ts_dep_lists.py b/webgpu-cts/scripts/gen_ts_dep_lists.py
new file mode 100755
index 0000000..5862ab2
--- /dev/null
+++ b/webgpu-cts/scripts/gen_ts_dep_lists.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+#
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import sys
+
+from dir_paths import gn_webgpu_cts_dir, webgpu_cts_root_dir
+from tsc_ignore_errors import run_tsc_ignore_errors
+
+src_prefix = webgpu_cts_root_dir.replace('\\', '/') + '/'
+
+
+def get_ts_sources():
+    # This will output all the source files in the form:
+    # "/absolute/path/to/file.ts"
+    # The path is always Unix-style.
+    # It will also output many Typescript errors since the build doesn't download the .d.ts
+    # dependencies.
+    stdout = run_tsc_ignore_errors([
+        '--project',
+        os.path.join(webgpu_cts_root_dir, 'tsconfig.json'), '--listFiles',
+        '--declaration', 'false', '--sourceMap', 'false'
+    ])
+
+    lines = [l.decode() for l in stdout.splitlines()]
+    return [
+        line[len(src_prefix):] for line in lines
+        if line.startswith(src_prefix + 'src/')
+    ]
+
+
+def get_resource_files():
+    files = os.listdir(os.path.join(webgpu_cts_root_dir, 'src', 'resources'))
+    files.sort()
+    return files
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--check',
+                        action='store_true',
+                        help='Check that the output file is up to date.')
+    parser.add_argument('--stamp', help='Stamp file to write after success.')
+    args = parser.parse_args()
+
+    ts_sources = [x + '\n' for x in get_ts_sources()]
+    ts_sources_txt = os.path.join(gn_webgpu_cts_dir, 'ts_sources.txt')
+
+    resource_files = [x + '\n' for x in get_resource_files()]
+    resource_files_txt = os.path.join(gn_webgpu_cts_dir, 'resource_files.txt')
+
+    if args.check:
+        with open(ts_sources_txt, 'r') as f:
+            txt = f.readlines()
+            if (txt != ts_sources):
+                raise RuntimeError(
+                    '%s is out of date. Please re-run //third_party/dawn/third_party/webgpu-cts/scripts/gen_ts_dep_lists.py\n'
+                    % ts_sources_txt)
+        with open(resource_files_txt, 'r') as f:
+            if (f.readlines() != resource_files):
+                raise RuntimeError(
+                    '%s is out of date. Please re-run //third_party/dawn/third_party/webgpu-cts/scripts/gen_ts_dep_lists.py\n'
+                    % resource_files_txt)
+    else:
+        with open(ts_sources_txt, 'w') as f:
+            f.writelines(ts_sources)
+        with open(resource_files_txt, 'w') as f:
+            f.writelines(resource_files)
+
+    if args.stamp:
+        with open(args.stamp, 'w') as f:
+            f.write('')
diff --git a/webgpu-cts/scripts/list.py b/webgpu-cts/scripts/list.py
new file mode 100755
index 0000000..d63beb4
--- /dev/null
+++ b/webgpu-cts/scripts/list.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+#
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import logging
+import os
+import shutil
+import sys
+import tempfile
+
+from dir_paths import node_dir
+
+from compile_src import compile_src_for_node
+
+
+def list_testcases(query, js_out_dir=None):
+    if js_out_dir is None:
+        js_out_dir = tempfile.mkdtemp()
+        delete_js_out_dir = True
+    else:
+        delete_js_out_dir = False
+
+    try:
+        logging.info('WebGPU CTS: Transpiling tools...')
+        compile_src_for_node(js_out_dir, [
+            '--incremental', '--tsBuildInfoFile',
+            os.path.join(js_out_dir, 'build.tsbuildinfo')
+        ],
+                             clean=False)
+
+        old_sys_path = sys.path
+        try:
+            sys.path = old_sys_path + [node_dir]
+            from node import RunNode
+        finally:
+            sys.path = old_sys_path
+
+        return RunNode([
+            os.path.join(js_out_dir, 'common', 'runtime', 'cmdline.js'), query,
+            '--list'
+        ])
+    finally:
+        if delete_js_out_dir:
+            shutil.rmtree(js_out_dir)
+
+
+# List all testcases matching a test query.
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--query', default='webgpu:*', help='WebGPU CTS Query')
+    parser.add_argument(
+        '--js-out-dir',
+        default=None,
+        help='Output directory for intermediate compiled JS sources')
+    args = parser.parse_args()
+
+    print(list_testcases(args.query, args.js_out_dir))
diff --git a/webgpu-cts/scripts/tsc_ignore_errors.py b/webgpu-cts/scripts/tsc_ignore_errors.py
new file mode 100755
index 0000000..14b8455
--- /dev/null
+++ b/webgpu-cts/scripts/tsc_ignore_errors.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+#
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+import sys
+import os
+
+from dir_paths import node_dir
+
+try:
+    old_sys_path = sys.path
+    sys.path = [node_dir] + sys.path
+
+    from node import GetBinaryPath as get_node_binary_path
+finally:
+    sys.path = old_sys_path
+
+tsc = os.path.join(node_dir, 'node_modules', 'typescript', 'lib', 'tsc.js')
+
+
+def run_tsc_ignore_errors(args):
+    cmd = [get_node_binary_path(), tsc] + args
+    process = subprocess.Popen(cmd,
+                               cwd=os.getcwd(),
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE)
+
+    stdout, stderr = process.communicate()
+
+    # Typecheck errors go in stdout, not stderr. If we see something in stderr, raise an error.
+    if len(stderr):
+        raise RuntimeError('tsc \'%s\' failed\n%s' % (' '.join(cmd), stderr))
+
+    return stdout
+
+
+if __name__ == '__main__':
+    run_tsc_ignore_errors(sys.argv[1:])
diff --git a/webgpu-cts/test_page.html b/webgpu-cts/test_page.html
new file mode 100644
index 0000000..89ef8086
--- /dev/null
+++ b/webgpu-cts/test_page.html
@@ -0,0 +1,24 @@
+<!doctype html>
+<!-- Copyright 2022 The Dawn Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. -->
+
+<title>WebGPU CTS</title>
+<meta charset=utf-8>
+<link rel=help href='https://gpuweb.github.io/gpuweb/'>
+
+<script type=module>
+    import { setBaseResourcePath } from '/gen/third_party/dawn/third_party/webgpu-cts/src/common/framework/resources.js';
+    setBaseResourcePath('/gen/third_party/dawn/third_party/webgpu-cts/resources');
+</script>
+<script type=module src=./test_runner.js></script>
diff --git a/webgpu-cts/test_runner.js b/webgpu-cts/test_runner.js
new file mode 100644
index 0000000..3f4eea2
--- /dev/null
+++ b/webgpu-cts/test_runner.js
@@ -0,0 +1,65 @@
+// Copyright 2022 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import { DefaultTestFileLoader } from '../third_party/webgpu-cts/src/common/internal/file_loader.js';
+import { prettyPrintLog } from '../third_party/webgpu-cts/src/common/internal/logging/log_message.js';
+import { Logger } from '../third_party/webgpu-cts/src/common/internal/logging/logger.js';
+import { parseQuery } from '../third_party/webgpu-cts/src/common/internal/query/parseQuery.js';
+
+import { TestWorker } from '../third_party/webgpu-cts/src/common/runtime/helper/test_worker.js';
+
+var socket;
+
+async function setupWebsocket(port) {
+  socket = new WebSocket('ws://127.0.0.1:' + port)
+  socket.addEventListener('message', runCtsTestViaSocket);
+}
+
+async function runCtsTestViaSocket(event) {
+  let input = JSON.parse(event.data);
+  runCtsTest(input['q'], input['w']);
+}
+
+async function runCtsTest(query, use_worker) {
+  const workerEnabled = use_worker;
+  const worker = workerEnabled ? new TestWorker(false) : undefined;
+
+  const loader = new DefaultTestFileLoader();
+  const filterQuery = parseQuery(query);
+  const testcases = await loader.loadCases(filterQuery);
+
+  const expectations = [];
+
+  const log = new Logger();
+
+  for (const testcase of testcases) {
+    const name = testcase.query.toString();
+
+    const wpt_fn = async () => {
+      const [rec, res] = log.record(name);
+      if (worker) {
+        await worker.run(rec, name, expectations);
+      } else {
+        await testcase.run(rec, expectations);
+      }
+
+      socket.send(JSON.stringify({'s': res.status,
+                                  'l': (res.logs ?? []).map(prettyPrintLog)}));
+    };
+    await wpt_fn();
+  }
+}
+
+window.runCtsTest = runCtsTest;
+window.setupWebsocket = setupWebsocket